Finish 4.2a0
commit
811369d883
|
|
@ -26,7 +26,11 @@ jobs:
|
|||
windows-2019-cl,
|
||||
]
|
||||
|
||||
build_type: [Debug, Release]
|
||||
build_type: [
|
||||
Debug,
|
||||
#TODO(Varun) The release build takes over 2.5 hours, need to figure out why.
|
||||
# Release
|
||||
]
|
||||
build_unstable: [ON]
|
||||
include:
|
||||
#TODO This build fails, need to understand why.
|
||||
|
|
@ -90,13 +94,18 @@ jobs:
|
|||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Build
|
||||
- name: Configuration
|
||||
run: |
|
||||
cmake -E remove_directory build
|
||||
cmake -B build -S . -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DBOOST_ROOT="${env:BOOST_ROOT}" -DBOOST_INCLUDEDIR="${env:BOOST_ROOT}\boost\include" -DBOOST_LIBRARYDIR="${env:BOOST_ROOT}\lib"
|
||||
cmake --build build --config ${{ matrix.build_type }} --target gtsam
|
||||
cmake --build build --config ${{ matrix.build_type }} --target gtsam_unstable
|
||||
cmake --build build --config ${{ matrix.build_type }} --target wrap
|
||||
cmake --build build --config ${{ matrix.build_type }} --target check.base
|
||||
cmake --build build --config ${{ matrix.build_type }} --target check.base_unstable
|
||||
cmake --build build --config ${{ matrix.build_type }} --target check.linear
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
# Since Visual Studio is a multi-generator, we need to use --config
|
||||
# https://stackoverflow.com/a/24470998/1236990
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target gtsam
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target gtsam_unstable
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target wrap
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.base
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.base_unstable
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.linear
|
||||
|
|
|
|||
|
|
@ -9,12 +9,18 @@ endif()
|
|||
|
||||
# Set the version number for the library
|
||||
set (GTSAM_VERSION_MAJOR 4)
|
||||
set (GTSAM_VERSION_MINOR 1)
|
||||
set (GTSAM_VERSION_PATCH 1)
|
||||
set (GTSAM_VERSION_MINOR 2)
|
||||
set (GTSAM_VERSION_PATCH 0)
|
||||
set (GTSAM_PRERELEASE_VERSION "a0")
|
||||
math (EXPR GTSAM_VERSION_NUMERIC "10000 * ${GTSAM_VERSION_MAJOR} + 100 * ${GTSAM_VERSION_MINOR} + ${GTSAM_VERSION_PATCH}")
|
||||
set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}")
|
||||
|
||||
set (CMAKE_PROJECT_VERSION ${GTSAM_VERSION_STRING})
|
||||
if (${GTSAM_VERSION_PATCH} EQUAL 0)
|
||||
set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}${GTSAM_PRERELEASE_VERSION}")
|
||||
else()
|
||||
set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}${GTSAM_PRERELEASE_VERSION}")
|
||||
endif()
|
||||
message(STATUS "GTSAM Version: ${GTSAM_VERSION_STRING}")
|
||||
|
||||
set (CMAKE_PROJECT_VERSION_MAJOR ${GTSAM_VERSION_MAJOR})
|
||||
set (CMAKE_PROJECT_VERSION_MINOR ${GTSAM_VERSION_MINOR})
|
||||
set (CMAKE_PROJECT_VERSION_PATCH ${GTSAM_VERSION_PATCH})
|
||||
|
|
@ -87,6 +93,13 @@ if(GTSAM_BUILD_PYTHON OR GTSAM_INSTALL_MATLAB_TOOLBOX)
|
|||
CACHE STRING "The Python version to use for wrapping")
|
||||
# Set the include directory for matlab.h
|
||||
set(GTWRAP_INCLUDE_NAME "wrap")
|
||||
|
||||
# Copy matlab.h to the correct folder.
|
||||
configure_file(${PROJECT_SOURCE_DIR}/wrap/matlab.h
|
||||
${PROJECT_BINARY_DIR}/wrap/matlab.h COPYONLY)
|
||||
# Add the include directories so that matlab.h can be found
|
||||
include_directories("${PROJECT_BINARY_DIR}" "${GTSAM_EIGEN_INCLUDE_FOR_BUILD}")
|
||||
|
||||
add_subdirectory(wrap)
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/wrap/cmake")
|
||||
endif()
|
||||
|
|
|
|||
|
|
@ -2,9 +2,9 @@
|
|||
|
||||
**Important Note**
|
||||
|
||||
As of August 1 2020, the `develop` branch is officially in "Pre 4.1" mode, and features deprecated in 4.0 have been removed. Please use the last [4.0.3 release](https://github.com/borglab/gtsam/releases/tag/4.0.3) if you need those features.
|
||||
As of Dec 2021, the `develop` branch is officially in "Pre 4.2" mode. A great new feature we will be adding in 4.2 is *hybrid inference* a la DCSLAM (Kevin Doherty et al) and we envision several API-breaking changes will happen in the discrete folder.
|
||||
|
||||
However, most are easily converted and can be tracked down (in 4.0.3) by disabling the cmake flag `GTSAM_ALLOW_DEPRECATED_SINCE_V4`.
|
||||
In addition, features deprecated in 4.1 will be removed. Please use the last [4.1.1 release](https://github.com/borglab/gtsam/releases/tag/4.1.1) if you need those features. However, most (not all, unfortunately) are easily converted and can be tracked down (in 4.1.1) by disabling the cmake flag `GTSAM_ALLOW_DEPRECATED_SINCE_V41`.
|
||||
|
||||
## What is GTSAM?
|
||||
|
||||
|
|
|
|||
|
|
@ -56,8 +56,8 @@ int main(int argc, char **argv) {
|
|||
DiscreteBayesNet::shared_ptr chordal = fg.eliminateSequential(ordering);
|
||||
|
||||
// solve
|
||||
DiscreteFactor::sharedValues mpe = chordal->optimize();
|
||||
GTSAM_PRINT(*mpe);
|
||||
auto mpe = chordal->optimize();
|
||||
GTSAM_PRINT(mpe);
|
||||
|
||||
// We can also build a Bayes tree (directed junction tree).
|
||||
// The elimination order above will do fine:
|
||||
|
|
@ -70,14 +70,14 @@ int main(int argc, char **argv) {
|
|||
|
||||
// solve again, now with evidence
|
||||
DiscreteBayesNet::shared_ptr chordal2 = fg.eliminateSequential(ordering);
|
||||
DiscreteFactor::sharedValues mpe2 = chordal2->optimize();
|
||||
GTSAM_PRINT(*mpe2);
|
||||
auto mpe2 = chordal2->optimize();
|
||||
GTSAM_PRINT(mpe2);
|
||||
|
||||
// We can also sample from it
|
||||
cout << "\n10 samples:" << endl;
|
||||
for (size_t i = 0; i < 10; i++) {
|
||||
DiscreteFactor::sharedValues sample = chordal2->sample();
|
||||
GTSAM_PRINT(*sample);
|
||||
auto sample = chordal2->sample();
|
||||
GTSAM_PRINT(sample);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -33,11 +33,11 @@ using namespace gtsam;
|
|||
int main(int argc, char **argv) {
|
||||
// Define keys and a print function
|
||||
Key C(1), S(2), R(3), W(4);
|
||||
auto print = [=](DiscreteFactor::sharedValues values) {
|
||||
cout << boolalpha << "Cloudy = " << static_cast<bool>((*values)[C])
|
||||
<< " Sprinkler = " << static_cast<bool>((*values)[S])
|
||||
<< " Rain = " << boolalpha << static_cast<bool>((*values)[R])
|
||||
<< " WetGrass = " << static_cast<bool>((*values)[W]) << endl;
|
||||
auto print = [=](const DiscreteFactor::Values& values) {
|
||||
cout << boolalpha << "Cloudy = " << static_cast<bool>(values.at(C))
|
||||
<< " Sprinkler = " << static_cast<bool>(values.at(S))
|
||||
<< " Rain = " << boolalpha << static_cast<bool>(values.at(R))
|
||||
<< " WetGrass = " << static_cast<bool>(values.at(W)) << endl;
|
||||
};
|
||||
|
||||
// We assume binary state variables
|
||||
|
|
@ -85,7 +85,7 @@ int main(int argc, char **argv) {
|
|||
}
|
||||
|
||||
// "Most Probable Explanation", i.e., configuration with largest value
|
||||
DiscreteFactor::sharedValues mpe = graph.eliminateSequential()->optimize();
|
||||
auto mpe = graph.eliminateSequential()->optimize();
|
||||
cout << "\nMost Probable Explanation (MPE):" << endl;
|
||||
print(mpe);
|
||||
|
||||
|
|
@ -97,7 +97,7 @@ int main(int argc, char **argv) {
|
|||
|
||||
// solve again, now with evidence
|
||||
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
|
||||
DiscreteFactor::sharedValues mpe_with_evidence = chordal->optimize();
|
||||
auto mpe_with_evidence = chordal->optimize();
|
||||
|
||||
cout << "\nMPE given C=0:" << endl;
|
||||
print(mpe_with_evidence);
|
||||
|
|
@ -113,7 +113,7 @@ int main(int argc, char **argv) {
|
|||
// We can also sample from it
|
||||
cout << "\n10 samples:" << endl;
|
||||
for (size_t i = 0; i < 10; i++) {
|
||||
DiscreteFactor::sharedValues sample = chordal->sample();
|
||||
auto sample = chordal->sample();
|
||||
print(sample);
|
||||
}
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -122,8 +122,7 @@ int main(int argc, char *argv[]) {
|
|||
std::cout << "initial error=" << graph.error(initialEstimate) << std::endl;
|
||||
std::cout << "final error=" << graph.error(result) << std::endl;
|
||||
|
||||
std::ofstream os("examples/vio_batch.dot");
|
||||
graph.saveGraph(os, result);
|
||||
graph.saveGraph("examples/vio_batch.dot", result);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -66,14 +66,14 @@ int main(int argc, char **argv) {
|
|||
chordal->print("Eliminated");
|
||||
|
||||
// solve
|
||||
DiscreteFactor::sharedValues mpe = chordal->optimize();
|
||||
GTSAM_PRINT(*mpe);
|
||||
auto mpe = chordal->optimize();
|
||||
GTSAM_PRINT(mpe);
|
||||
|
||||
// We can also sample from it
|
||||
cout << "\n10 samples:" << endl;
|
||||
for (size_t k = 0; k < 10; k++) {
|
||||
DiscreteFactor::sharedValues sample = chordal->sample();
|
||||
GTSAM_PRINT(*sample);
|
||||
auto sample = chordal->sample();
|
||||
GTSAM_PRINT(sample);
|
||||
}
|
||||
|
||||
// Or compute the marginals. This re-eliminates the FG into a Bayes tree
|
||||
|
|
|
|||
|
|
@ -60,11 +60,10 @@ int main(int argc, char** argv) {
|
|||
|
||||
// save factor graph as graphviz dot file
|
||||
// Render to PDF using "fdp Pose2SLAMExample.dot -Tpdf > graph.pdf"
|
||||
ofstream os("Pose2SLAMExample.dot");
|
||||
graph.saveGraph(os, result);
|
||||
graph.saveGraph("Pose2SLAMExample.dot", result);
|
||||
|
||||
// Also print out to console
|
||||
graph.saveGraph(cout, result);
|
||||
graph.dot(cout, result);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -70,8 +70,8 @@ int main(int argc, char** argv) {
|
|||
// "Decoding", i.e., configuration with largest value
|
||||
// We use sequential variable elimination
|
||||
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
|
||||
DiscreteFactor::sharedValues optimalDecoding = chordal->optimize();
|
||||
optimalDecoding->print("\nMost Probable Explanation (optimalDecoding)\n");
|
||||
auto optimalDecoding = chordal->optimize();
|
||||
optimalDecoding.print("\nMost Probable Explanation (optimalDecoding)\n");
|
||||
|
||||
// "Inference" Computing marginals for each node
|
||||
// Here we'll make use of DiscreteMarginals class, which makes use of
|
||||
|
|
|
|||
|
|
@ -63,8 +63,8 @@ int main(int argc, char** argv) {
|
|||
// "Decoding", i.e., configuration with largest value (MPE)
|
||||
// We use sequential variable elimination
|
||||
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
|
||||
DiscreteFactor::sharedValues optimalDecoding = chordal->optimize();
|
||||
optimalDecoding->print("\noptimalDecoding");
|
||||
auto optimalDecoding = chordal->optimize();
|
||||
GTSAM_PRINT(optimalDecoding);
|
||||
|
||||
// "Inference" Computing marginals
|
||||
cout << "\nComputing Node Marginals .." << endl;
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ set (gtsam_subdirs
|
|||
sam
|
||||
sfm
|
||||
slam
|
||||
navigation
|
||||
navigation
|
||||
)
|
||||
|
||||
set(gtsam_srcs)
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ class DSFMap {
|
|||
DSFMap();
|
||||
KEY find(const KEY& key) const;
|
||||
void merge(const KEY& x, const KEY& y);
|
||||
std::map<KEY, Set> sets();
|
||||
std::map<KEY, This::Set> sets();
|
||||
};
|
||||
|
||||
class IndexPairSet {
|
||||
|
|
|
|||
|
|
@ -173,7 +173,7 @@ TEST(Matrix, stack )
|
|||
{
|
||||
Matrix A = (Matrix(2, 2) << -5.0, 3.0, 00.0, -5.0).finished();
|
||||
Matrix B = (Matrix(3, 2) << -0.5, 2.1, 1.1, 3.4, 2.6, 7.1).finished();
|
||||
Matrix AB = stack(2, &A, &B);
|
||||
Matrix AB = gtsam::stack(2, &A, &B);
|
||||
Matrix C(5, 2);
|
||||
for (int i = 0; i < 2; i++)
|
||||
for (int j = 0; j < 2; j++)
|
||||
|
|
@ -187,7 +187,7 @@ TEST(Matrix, stack )
|
|||
std::vector<gtsam::Matrix> matrices;
|
||||
matrices.push_back(A);
|
||||
matrices.push_back(B);
|
||||
Matrix AB2 = stack(matrices);
|
||||
Matrix AB2 = gtsam::stack(matrices);
|
||||
EQUALITY(C,AB2);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -140,7 +140,7 @@ class FitBasis {
|
|||
static gtsam::GaussianFactorGraph::shared_ptr LinearGraph(
|
||||
const std::map<double, double>& sequence,
|
||||
const gtsam::noiseModel::Base* model, size_t N);
|
||||
Parameters parameters() const;
|
||||
This::Parameters parameters() const;
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
|||
|
|
@ -248,8 +248,9 @@ namespace gtsam {
|
|||
void dot(std::ostream& os, bool showZero) const override {
|
||||
os << "\"" << this->id() << "\" [shape=circle, label=\"" << label_
|
||||
<< "\"]\n";
|
||||
for (size_t i = 0; i < branches_.size(); i++) {
|
||||
NodePtr branch = branches_[i];
|
||||
size_t B = branches_.size();
|
||||
for (size_t i = 0; i < B; i++) {
|
||||
const NodePtr& branch = branches_[i];
|
||||
|
||||
// Check if zero
|
||||
if (!showZero) {
|
||||
|
|
@ -258,8 +259,10 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
os << "\"" << this->id() << "\" -> \"" << branch->id() << "\"";
|
||||
if (i == 0) os << " [style=dashed]";
|
||||
if (i > 1) os << " [style=bold]";
|
||||
if (B == 2) {
|
||||
if (i == 0) os << " [style=dashed]";
|
||||
if (i > 1) os << " [style=bold]";
|
||||
}
|
||||
os << std::endl;
|
||||
branch->dot(os, showZero);
|
||||
}
|
||||
|
|
@ -671,7 +674,14 @@ namespace gtsam {
|
|||
int result = system(
|
||||
("dot -Tpdf " + name + ".dot -o " + name + ".pdf >& /dev/null").c_str());
|
||||
if (result==-1) throw std::runtime_error("DecisionTree::dot system call failed");
|
||||
}
|
||||
}
|
||||
|
||||
template<typename L, typename Y>
|
||||
std::string DecisionTree<L, Y>::dot(bool showZero) const {
|
||||
std::stringstream ss;
|
||||
dot(ss, showZero);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
/*********************************************************************************/
|
||||
|
||||
|
|
|
|||
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/types.h>
|
||||
|
||||
#include <gtsam/discrete/Assignment.h>
|
||||
|
||||
#include <boost/function.hpp>
|
||||
|
|
@ -35,7 +37,7 @@ namespace gtsam {
|
|||
* Y = function range (any algebra), e.g., bool, int, double
|
||||
*/
|
||||
template<typename L, typename Y>
|
||||
class DecisionTree {
|
||||
class GTSAM_EXPORT DecisionTree {
|
||||
|
||||
public:
|
||||
|
||||
|
|
@ -198,6 +200,9 @@ namespace gtsam {
|
|||
/** output to graphviz format, open a file */
|
||||
void dot(const std::string& name, bool showZero = true) const;
|
||||
|
||||
/** output to graphviz format string */
|
||||
std::string dot(bool showZero = true) const;
|
||||
|
||||
/// @name Advanced Interface
|
||||
/// @{
|
||||
|
||||
|
|
|
|||
|
|
@ -134,5 +134,52 @@ namespace gtsam {
|
|||
return boost::make_shared<DecisionTreeFactor>(dkeys, result);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/* ************************************************************************* */
|
||||
std::vector<std::pair<DiscreteValues, double>> DecisionTreeFactor::enumerate() const {
|
||||
// Get all possible assignments
|
||||
std::vector<std::pair<Key, size_t>> pairs;
|
||||
for (auto& key : keys()) {
|
||||
pairs.emplace_back(key, cardinalities_.at(key));
|
||||
}
|
||||
// Reverse to make cartesianProduct output a more natural ordering.
|
||||
std::vector<std::pair<Key, size_t>> rpairs(pairs.rbegin(), pairs.rend());
|
||||
const auto assignments = cartesianProduct(rpairs);
|
||||
|
||||
// Construct unordered_map with values
|
||||
std::vector<std::pair<DiscreteValues, double>> result;
|
||||
for (const auto& assignment : assignments) {
|
||||
result.emplace_back(assignment, operator()(assignment));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
std::string DecisionTreeFactor::markdown(
|
||||
const KeyFormatter& keyFormatter) const {
|
||||
std::stringstream ss;
|
||||
|
||||
// Print out header and construct argument for `cartesianProduct`.
|
||||
ss << "|";
|
||||
for (auto& key : keys()) {
|
||||
ss << keyFormatter(key) << "|";
|
||||
}
|
||||
ss << "value|\n";
|
||||
|
||||
// Print out separator with alignment hints.
|
||||
ss << "|";
|
||||
for (size_t j = 0; j < size(); j++) ss << ":-:|";
|
||||
ss << ":-:|\n";
|
||||
|
||||
// Print out all rows.
|
||||
auto rows = enumerate();
|
||||
for (const auto& kv : rows) {
|
||||
ss << "|";
|
||||
auto assignment = kv.first;
|
||||
for (auto& key : keys()) ss << assignment.at(key) << "|";
|
||||
ss << kv.second << "|\n";
|
||||
}
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
} // namespace gtsam
|
||||
|
|
|
|||
|
|
@ -61,6 +61,15 @@ namespace gtsam {
|
|||
DiscreteFactor(keys.indices()), Potentials(keys, table) {
|
||||
}
|
||||
|
||||
/// Single-key specialization
|
||||
template <class SOURCE>
|
||||
DecisionTreeFactor(const DiscreteKey& key, SOURCE table)
|
||||
: DecisionTreeFactor(DiscreteKeys{key}, table) {}
|
||||
|
||||
/// Single-key specialization, with vector of doubles.
|
||||
DecisionTreeFactor(const DiscreteKey& key, const std::vector<double>& row)
|
||||
: DecisionTreeFactor(DiscreteKeys{key}, row) {}
|
||||
|
||||
/** Construct from a DiscreteConditional type */
|
||||
DecisionTreeFactor(const DiscreteConditional& c);
|
||||
|
||||
|
|
@ -80,7 +89,7 @@ namespace gtsam {
|
|||
/// @{
|
||||
|
||||
/// Value is just look up in AlgebraicDecisonTree
|
||||
double operator()(const Values& values) const override {
|
||||
double operator()(const DiscreteValues& values) const override {
|
||||
return Potentials::operator()(values);
|
||||
}
|
||||
|
||||
|
|
@ -162,7 +171,19 @@ namespace gtsam {
|
|||
// Potentials::reduceWithInverse(inverseReduction);
|
||||
// }
|
||||
|
||||
/// Enumerate all values into a map from values to double.
|
||||
std::vector<std::pair<DiscreteValues, double>> enumerate() const;
|
||||
|
||||
/// @}
|
||||
/// @name Wrapper support
|
||||
/// @{
|
||||
|
||||
/// Render as markdown table.
|
||||
std::string markdown(
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override;
|
||||
|
||||
/// @}
|
||||
|
||||
};
|
||||
// DecisionTreeFactor
|
||||
|
||||
|
|
|
|||
|
|
@ -35,41 +35,41 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// void DiscreteBayesNet::add_front(const Signature& s) {
|
||||
// push_front(boost::make_shared<DiscreteConditional>(s));
|
||||
// }
|
||||
|
||||
/* ************************************************************************* */
|
||||
void DiscreteBayesNet::add(const Signature& s) {
|
||||
push_back(boost::make_shared<DiscreteConditional>(s));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
double DiscreteBayesNet::evaluate(const DiscreteConditional::Values & values) const {
|
||||
double DiscreteBayesNet::evaluate(const DiscreteValues & values) const {
|
||||
// evaluate all conditionals and multiply
|
||||
double result = 1.0;
|
||||
for(DiscreteConditional::shared_ptr conditional: *this)
|
||||
for(const DiscreteConditional::shared_ptr& conditional: *this)
|
||||
result *= (*conditional)(values);
|
||||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
DiscreteFactor::sharedValues DiscreteBayesNet::optimize() const {
|
||||
DiscreteValues DiscreteBayesNet::optimize() const {
|
||||
// solve each node in turn in topological sort order (parents first)
|
||||
DiscreteFactor::sharedValues result(new DiscreteFactor::Values());
|
||||
DiscreteValues result;
|
||||
for (auto conditional: boost::adaptors::reverse(*this))
|
||||
conditional->solveInPlace(*result);
|
||||
conditional->solveInPlace(&result);
|
||||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
DiscreteFactor::sharedValues DiscreteBayesNet::sample() const {
|
||||
DiscreteValues DiscreteBayesNet::sample() const {
|
||||
// sample each node in turn in topological sort order (parents first)
|
||||
DiscreteFactor::sharedValues result(new DiscreteFactor::Values());
|
||||
DiscreteValues result;
|
||||
for (auto conditional: boost::adaptors::reverse(*this))
|
||||
conditional->sampleInPlace(*result);
|
||||
conditional->sampleInPlace(&result);
|
||||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
std::string DiscreteBayesNet::markdown(
|
||||
const KeyFormatter& keyFormatter) const {
|
||||
using std::endl;
|
||||
std::stringstream ss;
|
||||
ss << "`DiscreteBayesNet` of size " << size() << endl << endl;
|
||||
for(const DiscreteConditional::shared_ptr& conditional: *this)
|
||||
ss << conditional->markdown(keyFormatter) << endl;
|
||||
return ss.str();
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
} // namespace
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@
|
|||
* @file DiscreteBayesNet.h
|
||||
* @date Feb 15, 2011
|
||||
* @author Duy-Nguyen Ta
|
||||
* @author Frank dellaert
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
|
@ -22,6 +23,7 @@
|
|||
#include <boost/shared_ptr.hpp>
|
||||
#include <gtsam/inference/BayesNet.h>
|
||||
#include <gtsam/inference/FactorGraph.h>
|
||||
#include <gtsam/discrete/DiscretePrior.h>
|
||||
#include <gtsam/discrete/DiscreteConditional.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
|
@ -71,24 +73,45 @@ namespace gtsam {
|
|||
/// @name Standard Interface
|
||||
/// @{
|
||||
|
||||
// Add inherited versions of add.
|
||||
using Base::add;
|
||||
|
||||
/** Add a DiscretePrior using a table or a string */
|
||||
void add(const DiscreteKey& key, const std::string& spec) {
|
||||
emplace_shared<DiscretePrior>(key, spec);
|
||||
}
|
||||
|
||||
/** Add a DiscreteCondtional */
|
||||
void add(const Signature& s);
|
||||
template <typename... Args>
|
||||
void add(Args&&... args) {
|
||||
emplace_shared<DiscreteConditional>(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
//** evaluate for given DiscreteValues */
|
||||
double evaluate(const DiscreteValues & values) const;
|
||||
|
||||
// /** Add a DiscreteCondtional in front, when listing parents first*/
|
||||
// GTSAM_EXPORT void add_front(const Signature& s);
|
||||
|
||||
//** evaluate for given Values */
|
||||
double evaluate(const DiscreteConditional::Values & values) const;
|
||||
//** (Preferred) sugar for the above for given DiscreteValues */
|
||||
double operator()(const DiscreteValues & values) const {
|
||||
return evaluate(values);
|
||||
}
|
||||
|
||||
/**
|
||||
* Solve the DiscreteBayesNet by back-substitution
|
||||
*/
|
||||
DiscreteFactor::sharedValues optimize() const;
|
||||
DiscreteValues optimize() const;
|
||||
|
||||
/** Do ancestral sampling */
|
||||
DiscreteFactor::sharedValues sample() const;
|
||||
DiscreteValues sample() const;
|
||||
|
||||
///@}
|
||||
/// @name Wrapper support
|
||||
/// @{
|
||||
|
||||
/// Render as markdown table.
|
||||
std::string markdown(
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
||||
|
||||
/// @}
|
||||
|
||||
private:
|
||||
/** Serialization function */
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ namespace gtsam {
|
|||
|
||||
/* ************************************************************************* */
|
||||
double DiscreteBayesTreeClique::evaluate(
|
||||
const DiscreteConditional::Values& values) const {
|
||||
const DiscreteValues& values) const {
|
||||
// evaluate all conditionals and multiply
|
||||
double result = (*conditional_)(values);
|
||||
for (const auto& child : children) {
|
||||
|
|
@ -47,7 +47,7 @@ namespace gtsam {
|
|||
|
||||
/* ************************************************************************* */
|
||||
double DiscreteBayesTree::evaluate(
|
||||
const DiscreteConditional::Values& values) const {
|
||||
const DiscreteValues& values) const {
|
||||
double result = 1.0;
|
||||
for (const auto& root : roots_) {
|
||||
result *= root->evaluate(values);
|
||||
|
|
@ -55,8 +55,21 @@ namespace gtsam {
|
|||
return result;
|
||||
}
|
||||
|
||||
} // \namespace gtsam
|
||||
|
||||
|
||||
|
||||
/* **************************************************************************/
|
||||
std::string DiscreteBayesTree::markdown(
|
||||
const KeyFormatter& keyFormatter) const {
|
||||
using std::endl;
|
||||
std::stringstream ss;
|
||||
ss << "`DiscreteBayesTree` of size " << nodes_.size() << endl << endl;
|
||||
auto visitor = [&](const DiscreteBayesTreeClique::shared_ptr& clique,
|
||||
size_t& indent) {
|
||||
ss << "\n" << clique->conditional()->markdown(keyFormatter);
|
||||
return indent + 1;
|
||||
};
|
||||
size_t indent;
|
||||
treeTraversal::DepthFirstForest(*this, indent, visitor);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
/* **************************************************************************/
|
||||
} // namespace gtsam
|
||||
|
|
|
|||
|
|
@ -57,8 +57,8 @@ class GTSAM_EXPORT DiscreteBayesTreeClique
|
|||
conditional_->printSignature(s, formatter);
|
||||
}
|
||||
|
||||
//** evaluate conditional probability of subtree for given Values */
|
||||
double evaluate(const DiscreteConditional::Values& values) const;
|
||||
//** evaluate conditional probability of subtree for given DiscreteValues */
|
||||
double evaluate(const DiscreteValues& values) const;
|
||||
};
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
@ -72,14 +72,31 @@ class GTSAM_EXPORT DiscreteBayesTree
|
|||
typedef DiscreteBayesTree This;
|
||||
typedef boost::shared_ptr<This> shared_ptr;
|
||||
|
||||
/// @name Standard interface
|
||||
/// @{
|
||||
/** Default constructor, creates an empty Bayes tree */
|
||||
DiscreteBayesTree() {}
|
||||
|
||||
/** Check equality */
|
||||
bool equals(const This& other, double tol = 1e-9) const;
|
||||
|
||||
//** evaluate probability for given Values */
|
||||
double evaluate(const DiscreteConditional::Values& values) const;
|
||||
//** evaluate probability for given DiscreteValues */
|
||||
double evaluate(const DiscreteValues& values) const;
|
||||
|
||||
//** (Preferred) sugar for the above for given DiscreteValues */
|
||||
double operator()(const DiscreteValues& values) const {
|
||||
return evaluate(values);
|
||||
}
|
||||
|
||||
/// @}
|
||||
/// @name Wrapper support
|
||||
/// @{
|
||||
|
||||
/// Render as markdown table.
|
||||
std::string markdown(
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
||||
|
||||
/// @}
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
|||
|
|
@ -97,32 +97,93 @@ bool DiscreteConditional::equals(const DiscreteFactor& other,
|
|||
}
|
||||
|
||||
/* ******************************************************************************** */
|
||||
Potentials::ADT DiscreteConditional::choose(const Values& parentsValues) const {
|
||||
ADT pFS(*this);
|
||||
Key j; size_t value;
|
||||
for(Key key: parents()) {
|
||||
static DiscreteConditional::ADT Choose(const DiscreteConditional& conditional,
|
||||
const DiscreteValues& parentsValues) {
|
||||
// Get the big decision tree with all the levels, and then go down the
|
||||
// branches based on the value of the parent variables.
|
||||
DiscreteConditional::ADT adt(conditional);
|
||||
size_t value;
|
||||
for (Key j : conditional.parents()) {
|
||||
try {
|
||||
j = (key);
|
||||
value = parentsValues.at(j);
|
||||
pFS = pFS.choose(j, value);
|
||||
} catch (exception&) {
|
||||
cout << "Key: " << j << " Value: " << value << endl;
|
||||
adt = adt.choose(j, value); // ADT keeps getting smaller.
|
||||
} catch (std::out_of_range&) {
|
||||
parentsValues.print("parentsValues: ");
|
||||
throw runtime_error("DiscreteConditional::choose: parent value missing");
|
||||
};
|
||||
}
|
||||
return adt;
|
||||
}
|
||||
|
||||
/* ******************************************************************************** */
|
||||
DecisionTreeFactor::shared_ptr DiscreteConditional::choose(
|
||||
const DiscreteValues& parentsValues) const {
|
||||
// Get the big decision tree with all the levels, and then go down the
|
||||
// branches based on the value of the parent variables.
|
||||
ADT adt(*this);
|
||||
size_t value;
|
||||
for (Key j : parents()) {
|
||||
try {
|
||||
value = parentsValues.at(j);
|
||||
adt = adt.choose(j, value); // ADT keeps getting smaller.
|
||||
} catch (exception&) {
|
||||
parentsValues.print("parentsValues: ");
|
||||
// pFS.print("pFS: ");
|
||||
throw runtime_error("DiscreteConditional::choose: parent value missing");
|
||||
};
|
||||
}
|
||||
|
||||
return pFS;
|
||||
// Convert ADT to factor.
|
||||
DiscreteKeys discreteKeys;
|
||||
for (Key j : frontals()) {
|
||||
discreteKeys.emplace_back(j, this->cardinality(j));
|
||||
}
|
||||
return boost::make_shared<DecisionTreeFactor>(discreteKeys, adt);
|
||||
}
|
||||
|
||||
/* ******************************************************************************** */
|
||||
void DiscreteConditional::solveInPlace(Values& values) const {
|
||||
DecisionTreeFactor::shared_ptr DiscreteConditional::likelihood(
|
||||
const DiscreteValues& frontalValues) const {
|
||||
// Get the big decision tree with all the levels, and then go down the
|
||||
// branches based on the value of the frontal variables.
|
||||
ADT adt(*this);
|
||||
size_t value;
|
||||
for (Key j : frontals()) {
|
||||
try {
|
||||
value = frontalValues.at(j);
|
||||
adt = adt.choose(j, value); // ADT keeps getting smaller.
|
||||
} catch (exception&) {
|
||||
frontalValues.print("frontalValues: ");
|
||||
throw runtime_error("DiscreteConditional::choose: frontal value missing");
|
||||
};
|
||||
}
|
||||
|
||||
// Convert ADT to factor.
|
||||
DiscreteKeys discreteKeys;
|
||||
for (Key j : parents()) {
|
||||
discreteKeys.emplace_back(j, this->cardinality(j));
|
||||
}
|
||||
return boost::make_shared<DecisionTreeFactor>(discreteKeys, adt);
|
||||
}
|
||||
|
||||
/* ******************************************************************************** */
|
||||
DecisionTreeFactor::shared_ptr DiscreteConditional::likelihood(
|
||||
size_t parent_value) const {
|
||||
if (nrFrontals() != 1)
|
||||
throw std::invalid_argument(
|
||||
"Single value likelihood can only be invoked on single-variable "
|
||||
"conditional");
|
||||
DiscreteValues values;
|
||||
values.emplace(keys_[0], parent_value);
|
||||
return likelihood(values);
|
||||
}
|
||||
|
||||
/* ******************************************************************************** */
|
||||
void DiscreteConditional::solveInPlace(DiscreteValues* values) const {
|
||||
// TODO: Abhijit asks: is this really the fastest way? He thinks it is.
|
||||
ADT pFS = choose(values); // P(F|S=parentsValues)
|
||||
ADT pFS = Choose(*this, *values); // P(F|S=parentsValues)
|
||||
|
||||
// Initialize
|
||||
Values mpe;
|
||||
DiscreteValues mpe;
|
||||
double maxP = 0;
|
||||
|
||||
DiscreteKeys keys;
|
||||
|
|
@ -131,10 +192,10 @@ void DiscreteConditional::solveInPlace(Values& values) const {
|
|||
keys & dk;
|
||||
}
|
||||
// Get all Possible Configurations
|
||||
vector<Values> allPosbValues = cartesianProduct(keys);
|
||||
const auto allPosbValues = cartesianProduct(keys);
|
||||
|
||||
// Find the MPE
|
||||
for(Values& frontalVals: allPosbValues) {
|
||||
for(const auto& frontalVals: allPosbValues) {
|
||||
double pValueS = pFS(frontalVals); // P(F=value|S=parentsValues)
|
||||
// Update MPE solution if better
|
||||
if (pValueS > maxP) {
|
||||
|
|
@ -145,28 +206,28 @@ void DiscreteConditional::solveInPlace(Values& values) const {
|
|||
|
||||
//set values (inPlace) to mpe
|
||||
for(Key j: frontals()) {
|
||||
values[j] = mpe[j];
|
||||
(*values)[j] = mpe[j];
|
||||
}
|
||||
}
|
||||
|
||||
/* ******************************************************************************** */
|
||||
void DiscreteConditional::sampleInPlace(Values& values) const {
|
||||
void DiscreteConditional::sampleInPlace(DiscreteValues* values) const {
|
||||
assert(nrFrontals() == 1);
|
||||
Key j = (firstFrontalKey());
|
||||
size_t sampled = sample(values); // Sample variable
|
||||
values[j] = sampled; // store result in partial solution
|
||||
size_t sampled = sample(*values); // Sample variable given parents
|
||||
(*values)[j] = sampled; // store result in partial solution
|
||||
}
|
||||
|
||||
/* ******************************************************************************** */
|
||||
size_t DiscreteConditional::solve(const Values& parentsValues) const {
|
||||
size_t DiscreteConditional::solve(const DiscreteValues& parentsValues) const {
|
||||
|
||||
// TODO: is this really the fastest way? I think it is.
|
||||
ADT pFS = choose(parentsValues); // P(F|S=parentsValues)
|
||||
ADT pFS = Choose(*this, parentsValues); // P(F|S=parentsValues)
|
||||
|
||||
// Then, find the max over all remaining
|
||||
// TODO, only works for one key now, seems horribly slow this way
|
||||
size_t mpe = 0;
|
||||
Values frontals;
|
||||
DiscreteValues frontals;
|
||||
double maxP = 0;
|
||||
assert(nrFrontals() == 1);
|
||||
Key j = (firstFrontalKey());
|
||||
|
|
@ -183,18 +244,22 @@ size_t DiscreteConditional::solve(const Values& parentsValues) const {
|
|||
}
|
||||
|
||||
/* ******************************************************************************** */
|
||||
size_t DiscreteConditional::sample(const Values& parentsValues) const {
|
||||
size_t DiscreteConditional::sample(const DiscreteValues& parentsValues) const {
|
||||
static mt19937 rng(2); // random number generator
|
||||
|
||||
// Get the correct conditional density
|
||||
ADT pFS = choose(parentsValues); // P(F|S=parentsValues)
|
||||
ADT pFS = Choose(*this, parentsValues); // P(F|S=parentsValues)
|
||||
|
||||
// TODO(Duy): only works for one key now, seems horribly slow this way
|
||||
assert(nrFrontals() == 1);
|
||||
if (nrFrontals() != 1) {
|
||||
throw std::invalid_argument(
|
||||
"DiscreteConditional::sample can only be called on single variable "
|
||||
"conditionals");
|
||||
}
|
||||
Key key = firstFrontalKey();
|
||||
size_t nj = cardinality(key);
|
||||
vector<double> p(nj);
|
||||
Values frontals;
|
||||
DiscreteValues frontals;
|
||||
for (size_t value = 0; value < nj; value++) {
|
||||
frontals[key] = value;
|
||||
p[value] = pFS(frontals); // P(F=value|S=parentsValues)
|
||||
|
|
@ -207,5 +272,91 @@ size_t DiscreteConditional::sample(const Values& parentsValues) const {
|
|||
}
|
||||
|
||||
/* ******************************************************************************** */
|
||||
size_t DiscreteConditional::sample(size_t parent_value) const {
|
||||
if (nrParents() != 1)
|
||||
throw std::invalid_argument(
|
||||
"Single value sample() can only be invoked on single-parent "
|
||||
"conditional");
|
||||
DiscreteValues values;
|
||||
values.emplace(keys_.back(), parent_value);
|
||||
return sample(values);
|
||||
}
|
||||
|
||||
}// namespace
|
||||
/* ************************************************************************* */
|
||||
std::string DiscreteConditional::markdown(
|
||||
const KeyFormatter& keyFormatter) const {
|
||||
std::stringstream ss;
|
||||
|
||||
// Print out signature.
|
||||
ss << " *P(";
|
||||
bool first = true;
|
||||
for (Key key : frontals()) {
|
||||
if (!first) ss << ",";
|
||||
ss << keyFormatter(key);
|
||||
first = false;
|
||||
}
|
||||
if (nrParents() == 0) {
|
||||
// We have no parents, call factor method.
|
||||
ss << ")*:\n" << std::endl;
|
||||
ss << DecisionTreeFactor::markdown(keyFormatter);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
// We have parents, continue signature and do custom print.
|
||||
ss << "|";
|
||||
first = true;
|
||||
for (Key parent : parents()) {
|
||||
if (!first) ss << ",";
|
||||
ss << keyFormatter(parent);
|
||||
first = false;
|
||||
}
|
||||
ss << ")*:\n" << std::endl;
|
||||
|
||||
// Print out header and construct argument for `cartesianProduct`.
|
||||
std::vector<std::pair<Key, size_t>> pairs;
|
||||
ss << "|";
|
||||
const_iterator it;
|
||||
for(Key parent: parents()) {
|
||||
ss << keyFormatter(parent) << "|";
|
||||
pairs.emplace_back(parent, cardinalities_.at(parent));
|
||||
}
|
||||
|
||||
size_t n = 1;
|
||||
for(Key key: frontals()) {
|
||||
size_t k = cardinalities_.at(key);
|
||||
pairs.emplace_back(key, k);
|
||||
n *= k;
|
||||
}
|
||||
std::vector<std::pair<Key, size_t>> slatnorf(pairs.rbegin(),
|
||||
pairs.rend() - nrParents());
|
||||
const auto frontal_assignments = cartesianProduct(slatnorf);
|
||||
for (const auto& a : frontal_assignments) {
|
||||
for (it = beginFrontals(); it != endFrontals(); ++it) ss << a.at(*it);
|
||||
ss << "|";
|
||||
}
|
||||
ss << "\n";
|
||||
|
||||
// Print out separator with alignment hints.
|
||||
ss << "|";
|
||||
for (size_t j = 0; j < nrParents() + n; j++) ss << ":-:|";
|
||||
ss << "\n";
|
||||
|
||||
// Print out all rows.
|
||||
std::vector<std::pair<Key, size_t>> rpairs(pairs.rbegin(), pairs.rend());
|
||||
const auto assignments = cartesianProduct(rpairs);
|
||||
size_t count = 0;
|
||||
for (const auto& a : assignments) {
|
||||
if (count == 0) {
|
||||
ss << "|";
|
||||
for (it = beginParents(); it != endParents(); ++it)
|
||||
ss << a.at(*it) << "|";
|
||||
}
|
||||
ss << operator()(a) << "|";
|
||||
count = (count + 1) % n;
|
||||
if (count == 0) ss << "\n";
|
||||
}
|
||||
return ss.str();
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
|||
|
|
@ -42,10 +42,7 @@ public:
|
|||
typedef DecisionTreeFactor BaseFactor; ///< Typedef to our factor base class
|
||||
typedef Conditional<BaseFactor, This> BaseConditional; ///< Typedef to our conditional base class
|
||||
|
||||
/** A map from keys to values..
|
||||
* TODO: Again, do we need this??? */
|
||||
typedef Assignment<Key> Values;
|
||||
typedef boost::shared_ptr<Values> sharedValues;
|
||||
using Values = DiscreteValues; ///< backwards compatibility
|
||||
|
||||
/// @name Standard Constructors
|
||||
/// @{
|
||||
|
|
@ -60,6 +57,34 @@ public:
|
|||
/** Construct from signature */
|
||||
DiscreteConditional(const Signature& signature);
|
||||
|
||||
/**
|
||||
* Construct from key, parents, and a Signature::Table specifying the
|
||||
* conditional probability table (CPT) in 00 01 10 11 order. For
|
||||
* three-valued, it would be 00 01 02 10 11 12 20 21 22, etc....
|
||||
*
|
||||
* Example: DiscreteConditional P(D, {B,E}, table);
|
||||
*/
|
||||
DiscreteConditional(const DiscreteKey& key, const DiscreteKeys& parents,
|
||||
const Signature::Table& table)
|
||||
: DiscreteConditional(Signature(key, parents, table)) {}
|
||||
|
||||
/**
|
||||
* Construct from key, parents, and a string specifying the conditional
|
||||
* probability table (CPT) in 00 01 10 11 order. For three-valued, it would
|
||||
* be 00 01 02 10 11 12 20 21 22, etc....
|
||||
*
|
||||
* The string is parsed into a Signature::Table.
|
||||
*
|
||||
* Example: DiscreteConditional P(D, {B,E}, "9/1 2/8 3/7 1/9");
|
||||
*/
|
||||
DiscreteConditional(const DiscreteKey& key, const DiscreteKeys& parents,
|
||||
const std::string& spec)
|
||||
: DiscreteConditional(Signature(key, parents, spec)) {}
|
||||
|
||||
/// No-parent specialization; can also use DiscretePrior.
|
||||
DiscreteConditional(const DiscreteKey& key, const std::string& spec)
|
||||
: DiscreteConditional(Signature(key, {}, spec)) {}
|
||||
|
||||
/** construct P(X|Y)=P(X,Y)/P(Y) from P(X,Y) and P(Y) */
|
||||
DiscreteConditional(const DecisionTreeFactor& joint,
|
||||
const DecisionTreeFactor& marginal);
|
||||
|
|
@ -102,7 +127,7 @@ public:
|
|||
}
|
||||
|
||||
/// Evaluate, just look up in AlgebraicDecisonTree
|
||||
double operator()(const Values& values) const override {
|
||||
double operator()(const DiscreteValues& values) const override {
|
||||
return Potentials::operator()(values);
|
||||
}
|
||||
|
||||
|
|
@ -111,35 +136,54 @@ public:
|
|||
return DecisionTreeFactor::shared_ptr(new DecisionTreeFactor(*this));
|
||||
}
|
||||
|
||||
/** Restrict to given parent values, returns AlgebraicDecisionDiagram */
|
||||
ADT choose(const Assignment<Key>& parentsValues) const;
|
||||
/** Restrict to given parent values, returns DecisionTreeFactor */
|
||||
DecisionTreeFactor::shared_ptr choose(
|
||||
const DiscreteValues& parentsValues) const;
|
||||
|
||||
/** Convert to a likelihood factor by providing value before bar. */
|
||||
DecisionTreeFactor::shared_ptr likelihood(
|
||||
const DiscreteValues& frontalValues) const;
|
||||
|
||||
/** Single variable version of likelihood. */
|
||||
DecisionTreeFactor::shared_ptr likelihood(size_t parent_value) const;
|
||||
|
||||
/**
|
||||
* solve a conditional
|
||||
* @param parentsValues Known values of the parents
|
||||
* @return MPE value of the child (1 frontal variable).
|
||||
*/
|
||||
size_t solve(const Values& parentsValues) const;
|
||||
size_t solve(const DiscreteValues& parentsValues) const;
|
||||
|
||||
/**
|
||||
* sample
|
||||
* @param parentsValues Known values of the parents
|
||||
* @return sample from conditional
|
||||
*/
|
||||
size_t sample(const Values& parentsValues) const;
|
||||
size_t sample(const DiscreteValues& parentsValues) const;
|
||||
|
||||
|
||||
/// Single value version.
|
||||
size_t sample(size_t parent_value) const;
|
||||
|
||||
/// @}
|
||||
/// @name Advanced Interface
|
||||
/// @{
|
||||
|
||||
/// solve a conditional, in place
|
||||
void solveInPlace(Values& parentsValues) const;
|
||||
void solveInPlace(DiscreteValues* parentsValues) const;
|
||||
|
||||
/// sample in place, stores result in partial solution
|
||||
void sampleInPlace(Values& parentsValues) const;
|
||||
void sampleInPlace(DiscreteValues* parentsValues) const;
|
||||
|
||||
/// @}
|
||||
/// @name Wrapper support
|
||||
/// @{
|
||||
|
||||
/// Render as markdown table.
|
||||
std::string markdown(
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override;
|
||||
|
||||
/// @}
|
||||
};
|
||||
// DiscreteConditional
|
||||
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/discrete/Assignment.h>
|
||||
#include <gtsam/discrete/DiscreteValues.h>
|
||||
#include <gtsam/inference/Factor.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
|
||||
|
|
@ -40,18 +40,7 @@ public:
|
|||
typedef boost::shared_ptr<DiscreteFactor> shared_ptr; ///< shared_ptr to this class
|
||||
typedef Factor Base; ///< Our base class
|
||||
|
||||
/** A map from keys to values
|
||||
* TODO: Do we need this? Should we just use gtsam::Values?
|
||||
* We just need another special DiscreteValue to represent labels,
|
||||
* However, all other Lie's operators are undefined in this class.
|
||||
* The good thing is we can have a Hybrid graph of discrete/continuous variables
|
||||
* together..
|
||||
* Another good thing is we don't need to have the special DiscreteKey which stores
|
||||
* cardinality of a Discrete variable. It should be handled naturally in
|
||||
* the new class DiscreteValue, as the varible's type (domain)
|
||||
*/
|
||||
typedef Assignment<Key> Values;
|
||||
typedef boost::shared_ptr<Values> sharedValues;
|
||||
using Values = DiscreteValues; ///< backwards compatibility
|
||||
|
||||
public:
|
||||
|
||||
|
|
@ -92,19 +81,26 @@ public:
|
|||
/// @{
|
||||
|
||||
/// Find value for given assignment of values to variables
|
||||
virtual double operator()(const Values&) const = 0;
|
||||
virtual double operator()(const DiscreteValues&) const = 0;
|
||||
|
||||
/// Multiply in a DecisionTreeFactor and return the result as DecisionTreeFactor
|
||||
virtual DecisionTreeFactor operator*(const DecisionTreeFactor&) const = 0;
|
||||
|
||||
virtual DecisionTreeFactor toDecisionTreeFactor() const = 0;
|
||||
|
||||
/// @}
|
||||
/// @name Wrapper support
|
||||
/// @{
|
||||
|
||||
/// Render as markdown table.
|
||||
virtual std::string markdown(
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const = 0;
|
||||
|
||||
/// @}
|
||||
};
|
||||
// DiscreteFactor
|
||||
|
||||
// traits
|
||||
template<> struct traits<DiscreteFactor> : public Testable<DiscreteFactor> {};
|
||||
template<> struct traits<DiscreteFactor::Values> : public Testable<DiscreteFactor::Values> {};
|
||||
|
||||
}// namespace gtsam
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ namespace gtsam {
|
|||
|
||||
/* ************************************************************************* */
|
||||
double DiscreteFactorGraph::operator()(
|
||||
const DiscreteFactor::Values &values) const {
|
||||
const DiscreteValues &values) const {
|
||||
double product = 1.0;
|
||||
for( const sharedFactor& factor: factors_ )
|
||||
product *= (*factor)(values);
|
||||
|
|
@ -94,7 +94,7 @@ namespace gtsam {
|
|||
// }
|
||||
|
||||
/* ************************************************************************* */
|
||||
DiscreteFactor::sharedValues DiscreteFactorGraph::optimize() const
|
||||
DiscreteValues DiscreteFactorGraph::optimize() const
|
||||
{
|
||||
gttic(DiscreteFactorGraph_optimize);
|
||||
return BaseEliminateable::eliminateSequential()->optimize();
|
||||
|
|
@ -129,6 +129,18 @@ namespace gtsam {
|
|||
return std::make_pair(cond, sum);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
} // namespace
|
||||
/* ************************************************************************* */
|
||||
std::string DiscreteFactorGraph::markdown(
|
||||
const KeyFormatter& keyFormatter) const {
|
||||
using std::endl;
|
||||
std::stringstream ss;
|
||||
ss << "`DiscreteFactorGraph` of size " << size() << endl << endl;
|
||||
for (size_t i = 0; i < factors_.size(); i++) {
|
||||
ss << "factor " << i << ":\n";
|
||||
ss << factors_[i]->markdown(keyFormatter) << endl;
|
||||
}
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
} // namespace gtsam
|
||||
|
|
|
|||
|
|
@ -71,10 +71,10 @@ public:
|
|||
typedef EliminateableFactorGraph<This> BaseEliminateable; ///< Typedef to base elimination class
|
||||
typedef boost::shared_ptr<This> shared_ptr; ///< shared_ptr to this class
|
||||
|
||||
using Values = DiscreteValues; ///< backwards compatibility
|
||||
|
||||
/** A map from keys to values */
|
||||
typedef KeyVector Indices;
|
||||
typedef Assignment<Key> Values;
|
||||
typedef boost::shared_ptr<Values> sharedValues;
|
||||
|
||||
/** Default constructor */
|
||||
DiscreteFactorGraph() {}
|
||||
|
|
@ -101,35 +101,23 @@ public:
|
|||
|
||||
/// @}
|
||||
|
||||
template<class SOURCE>
|
||||
void add(const DiscreteKey& j, SOURCE table) {
|
||||
DiscreteKeys keys;
|
||||
keys.push_back(j);
|
||||
push_back(boost::make_shared<DecisionTreeFactor>(keys, table));
|
||||
/** Add a decision-tree factor */
|
||||
template <typename... Args>
|
||||
void add(Args&&... args) {
|
||||
emplace_shared<DecisionTreeFactor>(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
template<class SOURCE>
|
||||
void add(const DiscreteKey& j1, const DiscreteKey& j2, SOURCE table) {
|
||||
DiscreteKeys keys;
|
||||
keys.push_back(j1);
|
||||
keys.push_back(j2);
|
||||
push_back(boost::make_shared<DecisionTreeFactor>(keys, table));
|
||||
}
|
||||
|
||||
/** add shared discreteFactor immediately from arguments */
|
||||
template<class SOURCE>
|
||||
void add(const DiscreteKeys& keys, SOURCE table) {
|
||||
push_back(boost::make_shared<DecisionTreeFactor>(keys, table));
|
||||
}
|
||||
|
||||
|
||||
/** Return the set of variables involved in the factors (set union) */
|
||||
KeySet keys() const;
|
||||
|
||||
/** return product of all factors as a single factor */
|
||||
DecisionTreeFactor product() const;
|
||||
|
||||
/** Evaluates the factor graph given values, returns the joint probability of the factor graph given specific instantiation of values*/
|
||||
double operator()(const DiscreteFactor::Values & values) const;
|
||||
/**
|
||||
* Evaluates the factor graph given values, returns the joint probability of
|
||||
* the factor graph given specific instantiation of values
|
||||
*/
|
||||
double operator()(const DiscreteValues& values) const;
|
||||
|
||||
/// print
|
||||
void print(
|
||||
|
|
@ -140,7 +128,7 @@ public:
|
|||
* the dense elimination function specified in \c function,
|
||||
* followed by back-substitution resulting from elimination. Is equivalent
|
||||
* to calling graph.eliminateSequential()->optimize(). */
|
||||
DiscreteFactor::sharedValues optimize() const;
|
||||
DiscreteValues optimize() const;
|
||||
|
||||
|
||||
// /** Permute the variables in the factors */
|
||||
|
|
@ -149,6 +137,14 @@ public:
|
|||
// /** Apply a reduction, which is a remapping of variable indices. */
|
||||
// GTSAM_EXPORT void reduceWithInverse(const internal::Reduction& inverseReduction);
|
||||
|
||||
/// @name Wrapper support
|
||||
/// @{
|
||||
|
||||
/// Render as markdown table.
|
||||
std::string markdown(
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
||||
|
||||
/// @}
|
||||
}; // \ DiscreteFactorGraph
|
||||
|
||||
/// traits
|
||||
|
|
|
|||
|
|
@ -31,19 +31,19 @@ namespace gtsam {
|
|||
* Key type for discrete conditionals
|
||||
* Includes name and cardinality
|
||||
*/
|
||||
typedef std::pair<Key,size_t> DiscreteKey;
|
||||
using DiscreteKey = std::pair<Key,size_t>;
|
||||
|
||||
/// DiscreteKeys is a set of keys that can be assembled using the & operator
|
||||
struct DiscreteKeys: public std::vector<DiscreteKey> {
|
||||
struct GTSAM_EXPORT DiscreteKeys: public std::vector<DiscreteKey> {
|
||||
|
||||
/// Default constructor
|
||||
DiscreteKeys() {
|
||||
}
|
||||
// Forward all constructors.
|
||||
using std::vector<DiscreteKey>::vector;
|
||||
|
||||
/// Constructor for serialization
|
||||
DiscreteKeys() : std::vector<DiscreteKey>::vector() {}
|
||||
|
||||
/// Construct from a key
|
||||
DiscreteKeys(const DiscreteKey& key) {
|
||||
push_back(key);
|
||||
}
|
||||
explicit DiscreteKeys(const DiscreteKey& key) { push_back(key); }
|
||||
|
||||
/// Construct from a vector of keys
|
||||
DiscreteKeys(const std::vector<DiscreteKey>& keys) :
|
||||
|
|
@ -51,13 +51,13 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/// Construct from cardinalities with default names
|
||||
GTSAM_EXPORT DiscreteKeys(const std::vector<int>& cs);
|
||||
DiscreteKeys(const std::vector<int>& cs);
|
||||
|
||||
/// Return a vector of indices
|
||||
GTSAM_EXPORT KeyVector indices() const;
|
||||
KeyVector indices() const;
|
||||
|
||||
/// Return a map from index to cardinality
|
||||
GTSAM_EXPORT std::map<Key,size_t> cardinalities() const;
|
||||
std::map<Key,size_t> cardinalities() const;
|
||||
|
||||
/// Add a key (non-const!)
|
||||
DiscreteKeys& operator&(const DiscreteKey& key) {
|
||||
|
|
@ -67,5 +67,5 @@ namespace gtsam {
|
|||
}; // DiscreteKeys
|
||||
|
||||
/// Create a list from two keys
|
||||
GTSAM_EXPORT DiscreteKeys operator&(const DiscreteKey& key1, const DiscreteKey& key2);
|
||||
DiscreteKeys operator&(const DiscreteKey& key1, const DiscreteKey& key2);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ namespace gtsam {
|
|||
/**
|
||||
* A class for computing marginals of variables in a DiscreteFactorGraph
|
||||
*/
|
||||
class DiscreteMarginals {
|
||||
class GTSAM_EXPORT DiscreteMarginals {
|
||||
|
||||
protected:
|
||||
|
||||
|
|
@ -64,7 +64,7 @@ namespace gtsam {
|
|||
//Create result
|
||||
Vector vResult(key.second);
|
||||
for (size_t state = 0; state < key.second ; ++ state) {
|
||||
DiscreteFactor::Values values;
|
||||
DiscreteValues values;
|
||||
values[key.first] = state;
|
||||
vResult(state) = (*marginalFactor)(values);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,50 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file DiscretePrior.cpp
|
||||
* @date December 2021
|
||||
* @author Frank Dellaert
|
||||
*/
|
||||
|
||||
#include <gtsam/discrete/DiscretePrior.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
void DiscretePrior::print(const std::string& s,
|
||||
const KeyFormatter& formatter) const {
|
||||
Base::print(s, formatter);
|
||||
}
|
||||
|
||||
double DiscretePrior::operator()(size_t value) const {
|
||||
if (nrFrontals() != 1)
|
||||
throw std::invalid_argument(
|
||||
"Single value operator can only be invoked on single-variable "
|
||||
"priors");
|
||||
DiscreteValues values;
|
||||
values.emplace(keys_[0], value);
|
||||
return Base::operator()(values);
|
||||
}
|
||||
|
||||
std::vector<double> DiscretePrior::pmf() const {
|
||||
if (nrFrontals() != 1)
|
||||
throw std::invalid_argument(
|
||||
"DiscretePrior::pmf only defined for single-variable priors");
|
||||
const size_t nrValues = cardinalities_.at(keys_[0]);
|
||||
std::vector<double> array;
|
||||
array.reserve(nrValues);
|
||||
for (size_t v = 0; v < nrValues; v++) {
|
||||
array.push_back(operator()(v));
|
||||
}
|
||||
return array;
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
@ -0,0 +1,111 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file DiscretePrior.h
|
||||
* @date December 2021
|
||||
* @author Frank Dellaert
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/discrete/DiscreteConditional.h>
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* A prior probability on a set of discrete variables.
|
||||
* Derives from DiscreteConditional
|
||||
*/
|
||||
class GTSAM_EXPORT DiscretePrior : public DiscreteConditional {
|
||||
public:
|
||||
using Base = DiscreteConditional;
|
||||
|
||||
/// @name Standard Constructors
|
||||
/// @{
|
||||
|
||||
/// Default constructor needed for serialization.
|
||||
DiscretePrior() {}
|
||||
|
||||
/// Constructor from factor.
|
||||
DiscretePrior(const DecisionTreeFactor& f) : Base(f.size(), f) {}
|
||||
|
||||
/**
|
||||
* Construct from a Signature.
|
||||
*
|
||||
* Example: DiscretePrior P(D % "3/2");
|
||||
*/
|
||||
DiscretePrior(const Signature& s) : Base(s) {}
|
||||
|
||||
/**
|
||||
* Construct from key and a Signature::Table specifying the
|
||||
* conditional probability table (CPT).
|
||||
*
|
||||
* Example: DiscretePrior P(D, table);
|
||||
*/
|
||||
DiscretePrior(const DiscreteKey& key, const Signature::Table& table)
|
||||
: Base(Signature(key, {}, table)) {}
|
||||
|
||||
/**
|
||||
* Construct from key and a string specifying the conditional
|
||||
* probability table (CPT).
|
||||
*
|
||||
* Example: DiscretePrior P(D, "9/1 2/8 3/7 1/9");
|
||||
*/
|
||||
DiscretePrior(const DiscreteKey& key, const std::string& spec)
|
||||
: DiscretePrior(Signature(key, {}, spec)) {}
|
||||
|
||||
/// @}
|
||||
/// @name Testable
|
||||
/// @{
|
||||
|
||||
/// GTSAM-style print
|
||||
void print(
|
||||
const std::string& s = "Discrete Prior: ",
|
||||
const KeyFormatter& formatter = DefaultKeyFormatter) const override;
|
||||
|
||||
/// @}
|
||||
/// @name Standard interface
|
||||
/// @{
|
||||
|
||||
/// Evaluate given a single value.
|
||||
double operator()(size_t value) const;
|
||||
|
||||
/// We also want to keep the Base version, taking DiscreteValues:
|
||||
// TODO(dellaert): does not play well with wrapper!
|
||||
// using Base::operator();
|
||||
|
||||
/// Return entire probability mass function.
|
||||
std::vector<double> pmf() const;
|
||||
|
||||
/**
|
||||
* solve a conditional
|
||||
* @return MPE value of the child (1 frontal variable).
|
||||
*/
|
||||
size_t solve() const { return Base::solve({}); }
|
||||
|
||||
/**
|
||||
* sample
|
||||
* @return sample from conditional
|
||||
*/
|
||||
size_t sample() const { return Base::sample({}); }
|
||||
|
||||
/// @}
|
||||
};
|
||||
// DiscretePrior
|
||||
|
||||
// traits
|
||||
template <>
|
||||
struct traits<DiscretePrior> : public Testable<DiscretePrior> {};
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file DiscreteValues.h
|
||||
* @date Dec 13, 2021
|
||||
* @author Frank Dellaert
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/discrete/Assignment.h>
|
||||
#include <gtsam/inference/Key.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/** A map from keys to values
|
||||
* TODO(dellaert): Do we need this? Should we just use gtsam::DiscreteValues?
|
||||
* We just need another special DiscreteValue to represent labels,
|
||||
* However, all other Lie's operators are undefined in this class.
|
||||
* The good thing is we can have a Hybrid graph of discrete/continuous variables
|
||||
* together..
|
||||
* Another good thing is we don't need to have the special DiscreteKey which
|
||||
* stores cardinality of a Discrete variable. It should be handled naturally in
|
||||
* the new class DiscreteValue, as the variable's type (domain)
|
||||
*/
|
||||
class DiscreteValues : public Assignment<Key> {
|
||||
public:
|
||||
using Assignment::Assignment; // all constructors
|
||||
|
||||
// Define the implicit default constructor.
|
||||
DiscreteValues() = default;
|
||||
|
||||
// Construct from assignment.
|
||||
DiscreteValues(const Assignment<Key>& a) : Assignment<Key>(a) {}
|
||||
|
||||
void print(const std::string& s = "",
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const {
|
||||
std::cout << s << ": ";
|
||||
for (const typename Assignment::value_type& keyValue : *this)
|
||||
std::cout << "(" << keyFormatter(keyValue.first) << ", "
|
||||
<< keyValue.second << ")";
|
||||
std::cout << std::endl;
|
||||
}
|
||||
};
|
||||
|
||||
// traits
|
||||
template<> struct traits<DiscreteValues> : public Testable<DiscreteValues> {};
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
@ -26,10 +26,6 @@ using namespace std;
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
// explicit instantiation
|
||||
template class DecisionTree<Key, double>;
|
||||
template class AlgebraicDecisionTree<Key>;
|
||||
|
||||
/* ************************************************************************* */
|
||||
double Potentials::safe_div(const double& a, const double& b) {
|
||||
// cout << boost::format("%g / %g = %g\n") % a % b % ((a == 0) ? 0 : (a / b));
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ namespace gtsam {
|
|||
/**
|
||||
* A base class for both DiscreteFactor and DiscreteConditional
|
||||
*/
|
||||
class Potentials: public AlgebraicDecisionTree<Key> {
|
||||
class GTSAM_EXPORT Potentials: public AlgebraicDecisionTree<Key> {
|
||||
|
||||
public:
|
||||
|
||||
|
|
@ -46,7 +46,7 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
// Safe division for probabilities
|
||||
GTSAM_EXPORT static double safe_div(const double& a, const double& b);
|
||||
static double safe_div(const double& a, const double& b);
|
||||
|
||||
// // Apply either a permutation or a reduction
|
||||
// template<class P>
|
||||
|
|
@ -55,10 +55,10 @@ namespace gtsam {
|
|||
public:
|
||||
|
||||
/** Default constructor for I/O */
|
||||
GTSAM_EXPORT Potentials();
|
||||
Potentials();
|
||||
|
||||
/** Constructor from Indices and ADT */
|
||||
GTSAM_EXPORT Potentials(const DiscreteKeys& keys, const ADT& decisionTree);
|
||||
Potentials(const DiscreteKeys& keys, const ADT& decisionTree);
|
||||
|
||||
/** Constructor from Indices and (string or doubles) */
|
||||
template<class SOURCE>
|
||||
|
|
@ -67,8 +67,8 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
// Testable
|
||||
GTSAM_EXPORT bool equals(const Potentials& other, double tol = 1e-9) const;
|
||||
GTSAM_EXPORT void print(const std::string& s = "Potentials: ",
|
||||
bool equals(const Potentials& other, double tol = 1e-9) const;
|
||||
void print(const std::string& s = "Potentials: ",
|
||||
const KeyFormatter& formatter = DefaultKeyFormatter) const;
|
||||
|
||||
size_t cardinality(Key j) const { return cardinalities_.at(j);}
|
||||
|
|
|
|||
|
|
@ -38,19 +38,7 @@ namespace gtsam {
|
|||
using boost::phoenix::push_back;
|
||||
|
||||
// Special rows, true and false
|
||||
Signature::Row createF() {
|
||||
Signature::Row r(2);
|
||||
r[0] = 1;
|
||||
r[1] = 0;
|
||||
return r;
|
||||
}
|
||||
Signature::Row createT() {
|
||||
Signature::Row r(2);
|
||||
r[0] = 0;
|
||||
r[1] = 1;
|
||||
return r;
|
||||
}
|
||||
Signature::Row T = createT(), F = createF();
|
||||
Signature::Row F{1, 0}, T{0, 1};
|
||||
|
||||
// Special tables (inefficient, but do we care for user input?)
|
||||
Signature::Table logic(bool ff, bool ft, bool tf, bool tt) {
|
||||
|
|
@ -69,40 +57,13 @@ namespace gtsam {
|
|||
table = or_ | and_ | rows;
|
||||
or_ = qi::lit("OR")[qi::_val = logic(false, true, true, true)];
|
||||
and_ = qi::lit("AND")[qi::_val = logic(false, false, false, true)];
|
||||
rows = +(row | true_ | false_); // only loads first of the rows under boost 1.42
|
||||
rows = +(row | true_ | false_);
|
||||
row = qi::double_ >> +("/" >> qi::double_);
|
||||
true_ = qi::lit("T")[qi::_val = T];
|
||||
false_ = qi::lit("F")[qi::_val = F];
|
||||
}
|
||||
} grammar;
|
||||
|
||||
// Create simpler parsing function to avoid the issue of only parsing a single row
|
||||
bool parse_table(const string& spec, Signature::Table& table) {
|
||||
// check for OR, AND on whole phrase
|
||||
It f = spec.begin(), l = spec.end();
|
||||
if (qi::parse(f, l,
|
||||
qi::lit("OR")[ph::ref(table) = logic(false, true, true, true)]) ||
|
||||
qi::parse(f, l,
|
||||
qi::lit("AND")[ph::ref(table) = logic(false, false, false, true)]))
|
||||
return true;
|
||||
|
||||
// tokenize into separate rows
|
||||
istringstream iss(spec);
|
||||
string token;
|
||||
while (iss >> token) {
|
||||
Signature::Row values;
|
||||
It tf = token.begin(), tl = token.end();
|
||||
bool r = qi::parse(tf, tl,
|
||||
qi::double_[push_back(ph::ref(values), qi::_1)] >> +("/" >> qi::double_[push_back(ph::ref(values), qi::_1)]) |
|
||||
qi::lit("T")[ph::ref(values) = T] |
|
||||
qi::lit("F")[ph::ref(values) = F] );
|
||||
if (!r)
|
||||
return false;
|
||||
table.push_back(values);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
} // \namespace parser
|
||||
|
||||
ostream& operator <<(ostream &os, const Signature::Row &row) {
|
||||
|
|
@ -118,6 +79,18 @@ namespace gtsam {
|
|||
return os;
|
||||
}
|
||||
|
||||
Signature::Signature(const DiscreteKey& key, const DiscreteKeys& parents,
|
||||
const Table& table)
|
||||
: key_(key), parents_(parents) {
|
||||
operator=(table);
|
||||
}
|
||||
|
||||
Signature::Signature(const DiscreteKey& key, const DiscreteKeys& parents,
|
||||
const std::string& spec)
|
||||
: key_(key), parents_(parents) {
|
||||
operator=(spec);
|
||||
}
|
||||
|
||||
Signature::Signature(const DiscreteKey& key) :
|
||||
key_(key) {
|
||||
}
|
||||
|
|
@ -166,14 +139,11 @@ namespace gtsam {
|
|||
Signature& Signature::operator=(const string& spec) {
|
||||
spec_.reset(spec);
|
||||
Table table;
|
||||
// NOTE: using simpler parse function to ensure boost back compatibility
|
||||
// parser::It f = spec.begin(), l = spec.end();
|
||||
bool success = //
|
||||
// qi::phrase_parse(f, l, parser::grammar.table, qi::space, table); // using full grammar
|
||||
parser::parse_table(spec, table);
|
||||
parser::It f = spec.begin(), l = spec.end();
|
||||
bool success =
|
||||
qi::phrase_parse(f, l, parser::grammar.table, qi::space, table);
|
||||
if (success) {
|
||||
for(Row& row: table)
|
||||
normalize(row);
|
||||
for (Row& row : table) normalize(row);
|
||||
table_.reset(table);
|
||||
}
|
||||
return *this;
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ namespace gtsam {
|
|||
* The format is (Key % string) for nodes with no parents,
|
||||
* and (Key | Key, Key = string) for nodes with parents.
|
||||
*
|
||||
* The string specifies a conditional probability spec in the 00 01 10 11 order.
|
||||
* The string specifies a conditional probability table in 00 01 10 11 order.
|
||||
* For three-valued, it would be 00 01 02 10 11 12 20 21 22, etc...
|
||||
*
|
||||
* For example, given the following keys
|
||||
|
|
@ -45,9 +45,9 @@ namespace gtsam {
|
|||
* T|A = "99/1 95/5"
|
||||
* L|S = "99/1 90/10"
|
||||
* B|S = "70/30 40/60"
|
||||
* E|T,L = "F F F 1"
|
||||
* (E|T,L) = "F F F 1"
|
||||
* X|E = "95/5 2/98"
|
||||
* D|E,B = "9/1 2/8 3/7 1/9"
|
||||
* (D|E,B) = "9/1 2/8 3/7 1/9"
|
||||
*/
|
||||
class GTSAM_EXPORT Signature {
|
||||
|
||||
|
|
@ -72,45 +72,73 @@ namespace gtsam {
|
|||
boost::optional<Table> table_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* Construct from key, parents, and a Signature::Table specifying the
|
||||
* conditional probability table (CPT) in 00 01 10 11 order. For
|
||||
* three-valued, it would be 00 01 02 10 11 12 20 21 22, etc....
|
||||
*
|
||||
* The first string is parsed to add a key and parents.
|
||||
*
|
||||
* Example:
|
||||
* Signature::Table table{{0.9, 0.1}, {0.2, 0.8}, {0.3, 0.7}, {0.1, 0.9}};
|
||||
* Signature sig(D, {E, B}, table);
|
||||
*/
|
||||
Signature(const DiscreteKey& key, const DiscreteKeys& parents,
|
||||
const Table& table);
|
||||
|
||||
/** Constructor from DiscreteKey */
|
||||
Signature(const DiscreteKey& key);
|
||||
/**
|
||||
* Construct from key, parents, and a string specifying the conditional
|
||||
* probability table (CPT) in 00 01 10 11 order. For three-valued, it would
|
||||
* be 00 01 02 10 11 12 20 21 22, etc....
|
||||
*
|
||||
* The first string is parsed to add a key and parents. The second string
|
||||
* parses into a table.
|
||||
*
|
||||
* Example (same CPT as above):
|
||||
* Signature sig(D, {B,E}, "9/1 2/8 3/7 1/9");
|
||||
*/
|
||||
Signature(const DiscreteKey& key, const DiscreteKeys& parents,
|
||||
const std::string& spec);
|
||||
|
||||
/** the variable key */
|
||||
const DiscreteKey& key() const {
|
||||
return key_;
|
||||
}
|
||||
/**
|
||||
* Construct from a single DiscreteKey.
|
||||
*
|
||||
* The resulting signature has no parents or CPT table. Typical use then
|
||||
* either adds parents with | and , operators below, or assigns a table with
|
||||
* operator=().
|
||||
*/
|
||||
Signature(const DiscreteKey& key);
|
||||
|
||||
/** the parent keys */
|
||||
const DiscreteKeys& parents() const {
|
||||
return parents_;
|
||||
}
|
||||
/** the variable key */
|
||||
const DiscreteKey& key() const { return key_; }
|
||||
|
||||
/** All keys, with variable key first */
|
||||
DiscreteKeys discreteKeys() const;
|
||||
/** the parent keys */
|
||||
const DiscreteKeys& parents() const { return parents_; }
|
||||
|
||||
/** All key indices, with variable key first */
|
||||
KeyVector indices() const;
|
||||
/** All keys, with variable key first */
|
||||
DiscreteKeys discreteKeys() const;
|
||||
|
||||
// the CPT as parsed, if successful
|
||||
const boost::optional<Table>& table() const {
|
||||
return table_;
|
||||
}
|
||||
/** All key indices, with variable key first */
|
||||
KeyVector indices() const;
|
||||
|
||||
// the CPT as a vector of doubles, with key's values most rapidly changing
|
||||
std::vector<double> cpt() const;
|
||||
// the CPT as parsed, if successful
|
||||
const boost::optional<Table>& table() const { return table_; }
|
||||
|
||||
/** Add a parent */
|
||||
Signature& operator,(const DiscreteKey& parent);
|
||||
// the CPT as a vector of doubles, with key's values most rapidly changing
|
||||
std::vector<double> cpt() const;
|
||||
|
||||
/** Add the CPT spec - Fails in boost 1.40 */
|
||||
Signature& operator=(const std::string& spec);
|
||||
/** Add a parent */
|
||||
Signature& operator,(const DiscreteKey& parent);
|
||||
|
||||
/** Add the CPT spec directly as a table */
|
||||
Signature& operator=(const Table& table);
|
||||
/** Add the CPT spec */
|
||||
Signature& operator=(const std::string& spec);
|
||||
|
||||
/** provide streaming */
|
||||
GTSAM_EXPORT friend std::ostream& operator <<(std::ostream &os, const Signature &s);
|
||||
/** Add the CPT spec directly as a table */
|
||||
Signature& operator=(const Table& table);
|
||||
|
||||
/** provide streaming */
|
||||
GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os,
|
||||
const Signature& s);
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -122,7 +150,6 @@ namespace gtsam {
|
|||
/**
|
||||
* Helper function to create Signature objects
|
||||
* example: Signature s(D % "99/1");
|
||||
* Uses string parser, which requires BOOST 1.42 or higher
|
||||
*/
|
||||
GTSAM_EXPORT Signature operator%(const DiscreteKey& key, const std::string& parent);
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,214 @@
|
|||
//*************************************************************************
|
||||
// discrete
|
||||
//*************************************************************************
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
|
||||
#include<gtsam/discrete/DiscreteKey.h>
|
||||
class DiscreteKey {};
|
||||
|
||||
class DiscreteKeys {
|
||||
DiscreteKeys();
|
||||
size_t size() const;
|
||||
bool empty() const;
|
||||
gtsam::DiscreteKey at(size_t n) const;
|
||||
void push_back(const gtsam::DiscreteKey& point_pair);
|
||||
};
|
||||
|
||||
// DiscreteValues is added in specializations/discrete.h as a std::map
|
||||
|
||||
#include <gtsam/discrete/DiscreteFactor.h>
|
||||
class DiscreteFactor {
|
||||
void print(string s = "DiscreteFactor\n",
|
||||
const gtsam::KeyFormatter& keyFormatter =
|
||||
gtsam::DefaultKeyFormatter) const;
|
||||
bool equals(const gtsam::DiscreteFactor& other, double tol = 1e-9) const;
|
||||
bool empty() const;
|
||||
size_t size() const;
|
||||
double operator()(const gtsam::DiscreteValues& values) const;
|
||||
};
|
||||
|
||||
#include <gtsam/discrete/DecisionTreeFactor.h>
|
||||
virtual class DecisionTreeFactor : gtsam::DiscreteFactor {
|
||||
DecisionTreeFactor();
|
||||
|
||||
DecisionTreeFactor(const gtsam::DiscreteKey& key,
|
||||
const std::vector<double>& spec);
|
||||
DecisionTreeFactor(const gtsam::DiscreteKey& key, string table);
|
||||
|
||||
DecisionTreeFactor(const gtsam::DiscreteKeys& keys, string table);
|
||||
DecisionTreeFactor(const std::vector<gtsam::DiscreteKey>& keys, string table);
|
||||
|
||||
DecisionTreeFactor(const gtsam::DiscreteConditional& c);
|
||||
|
||||
void print(string s = "DecisionTreeFactor\n",
|
||||
const gtsam::KeyFormatter& keyFormatter =
|
||||
gtsam::DefaultKeyFormatter) const;
|
||||
bool equals(const gtsam::DecisionTreeFactor& other, double tol = 1e-9) const;
|
||||
string dot(bool showZero = false) const;
|
||||
std::vector<std::pair<DiscreteValues, double>> enumerate() const;
|
||||
string markdown(const gtsam::KeyFormatter& keyFormatter =
|
||||
gtsam::DefaultKeyFormatter) const;
|
||||
};
|
||||
|
||||
#include <gtsam/discrete/DiscreteConditional.h>
|
||||
virtual class DiscreteConditional : gtsam::DecisionTreeFactor {
|
||||
DiscreteConditional();
|
||||
DiscreteConditional(size_t nFrontals, const gtsam::DecisionTreeFactor& f);
|
||||
DiscreteConditional(const gtsam::DiscreteKey& key, string spec);
|
||||
DiscreteConditional(const gtsam::DiscreteKey& key,
|
||||
const gtsam::DiscreteKeys& parents, string spec);
|
||||
DiscreteConditional(const gtsam::DiscreteKey& key,
|
||||
const std::vector<gtsam::DiscreteKey>& parents, string spec);
|
||||
DiscreteConditional(const gtsam::DecisionTreeFactor& joint,
|
||||
const gtsam::DecisionTreeFactor& marginal);
|
||||
DiscreteConditional(const gtsam::DecisionTreeFactor& joint,
|
||||
const gtsam::DecisionTreeFactor& marginal,
|
||||
const gtsam::Ordering& orderedKeys);
|
||||
void print(string s = "Discrete Conditional\n",
|
||||
const gtsam::KeyFormatter& keyFormatter =
|
||||
gtsam::DefaultKeyFormatter) const;
|
||||
bool equals(const gtsam::DiscreteConditional& other, double tol = 1e-9) const;
|
||||
void printSignature(
|
||||
string s = "Discrete Conditional: ",
|
||||
const gtsam::KeyFormatter& formatter = gtsam::DefaultKeyFormatter) const;
|
||||
gtsam::DecisionTreeFactor* toFactor() const;
|
||||
gtsam::DecisionTreeFactor* choose(
|
||||
const gtsam::DiscreteValues& parentsValues) const;
|
||||
gtsam::DecisionTreeFactor* likelihood(
|
||||
const gtsam::DiscreteValues& frontalValues) const;
|
||||
gtsam::DecisionTreeFactor* likelihood(size_t value) const;
|
||||
size_t solve(const gtsam::DiscreteValues& parentsValues) const;
|
||||
size_t sample(const gtsam::DiscreteValues& parentsValues) const;
|
||||
size_t sample(size_t value) const;
|
||||
void solveInPlace(gtsam::DiscreteValues @parentsValues) const;
|
||||
void sampleInPlace(gtsam::DiscreteValues @parentsValues) const;
|
||||
string markdown(const gtsam::KeyFormatter& keyFormatter =
|
||||
gtsam::DefaultKeyFormatter) const;
|
||||
};
|
||||
|
||||
#include <gtsam/discrete/DiscretePrior.h>
|
||||
virtual class DiscretePrior : gtsam::DiscreteConditional {
|
||||
DiscretePrior();
|
||||
DiscretePrior(const gtsam::DecisionTreeFactor& f);
|
||||
DiscretePrior(const gtsam::DiscreteKey& key, string spec);
|
||||
void print(string s = "Discrete Prior\n",
|
||||
const gtsam::KeyFormatter& keyFormatter =
|
||||
gtsam::DefaultKeyFormatter) const;
|
||||
double operator()(size_t value) const;
|
||||
std::vector<double> pmf() const;
|
||||
size_t solve() const;
|
||||
size_t sample() const;
|
||||
};
|
||||
|
||||
#include <gtsam/discrete/DiscreteBayesNet.h>
|
||||
class DiscreteBayesNet {
|
||||
DiscreteBayesNet();
|
||||
void add(const gtsam::DiscreteConditional& s);
|
||||
void add(const gtsam::DiscreteKey& key, string spec);
|
||||
void add(const gtsam::DiscreteKey& key, const gtsam::DiscreteKeys& parents,
|
||||
string spec);
|
||||
void add(const gtsam::DiscreteKey& key,
|
||||
const std::vector<gtsam::DiscreteKey>& parents, string spec);
|
||||
bool empty() const;
|
||||
size_t size() const;
|
||||
gtsam::KeySet keys() const;
|
||||
const gtsam::DiscreteConditional* at(size_t i) const;
|
||||
void print(string s = "DiscreteBayesNet\n",
|
||||
const gtsam::KeyFormatter& keyFormatter =
|
||||
gtsam::DefaultKeyFormatter) const;
|
||||
bool equals(const gtsam::DiscreteBayesNet& other, double tol = 1e-9) const;
|
||||
string dot(const gtsam::KeyFormatter& keyFormatter =
|
||||
gtsam::DefaultKeyFormatter) const;
|
||||
void saveGraph(string s, const gtsam::KeyFormatter& keyFormatter =
|
||||
gtsam::DefaultKeyFormatter) const;
|
||||
double operator()(const gtsam::DiscreteValues& values) const;
|
||||
gtsam::DiscreteValues optimize() const;
|
||||
gtsam::DiscreteValues sample() const;
|
||||
string markdown(const gtsam::KeyFormatter& keyFormatter =
|
||||
gtsam::DefaultKeyFormatter) const;
|
||||
};
|
||||
|
||||
#include <gtsam/discrete/DiscreteBayesTree.h>
|
||||
class DiscreteBayesTreeClique {
|
||||
DiscreteBayesTreeClique();
|
||||
DiscreteBayesTreeClique(const gtsam::DiscreteConditional* conditional);
|
||||
const gtsam::DiscreteConditional* conditional() const;
|
||||
bool isRoot() const;
|
||||
void printSignature(
|
||||
const string& s = "Clique: ",
|
||||
const gtsam::KeyFormatter& formatter = gtsam::DefaultKeyFormatter) const;
|
||||
double evaluate(const gtsam::DiscreteValues& values) const;
|
||||
};
|
||||
|
||||
class DiscreteBayesTree {
|
||||
DiscreteBayesTree();
|
||||
void print(string s = "DiscreteBayesTree\n",
|
||||
const gtsam::KeyFormatter& keyFormatter =
|
||||
gtsam::DefaultKeyFormatter) const;
|
||||
bool equals(const gtsam::DiscreteBayesTree& other, double tol = 1e-9) const;
|
||||
|
||||
size_t size() const;
|
||||
bool empty() const;
|
||||
const DiscreteBayesTreeClique* operator[](size_t j) const;
|
||||
|
||||
string dot(const gtsam::KeyFormatter& keyFormatter =
|
||||
gtsam::DefaultKeyFormatter) const;
|
||||
void saveGraph(string s,
|
||||
const gtsam::KeyFormatter& keyFormatter =
|
||||
gtsam::DefaultKeyFormatter) const;
|
||||
double operator()(const gtsam::DiscreteValues& values) const;
|
||||
|
||||
string markdown(const gtsam::KeyFormatter& keyFormatter =
|
||||
gtsam::DefaultKeyFormatter) const;
|
||||
};
|
||||
|
||||
#include <gtsam/inference/DotWriter.h>
|
||||
class DotWriter {
|
||||
DotWriter(double figureWidthInches = 5, double figureHeightInches = 5,
|
||||
bool plotFactorPoints = true, bool connectKeysToFactor = true,
|
||||
bool binaryEdges = true);
|
||||
};
|
||||
|
||||
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||
class DiscreteFactorGraph {
|
||||
DiscreteFactorGraph();
|
||||
DiscreteFactorGraph(const gtsam::DiscreteBayesNet& bayesNet);
|
||||
|
||||
void add(const gtsam::DiscreteKey& j, string table);
|
||||
void add(const gtsam::DiscreteKey& j, const std::vector<double>& spec);
|
||||
|
||||
void add(const gtsam::DiscreteKeys& keys, string table);
|
||||
void add(const std::vector<gtsam::DiscreteKey>& keys, string table);
|
||||
|
||||
bool empty() const;
|
||||
size_t size() const;
|
||||
gtsam::KeySet keys() const;
|
||||
const gtsam::DiscreteFactor* at(size_t i) const;
|
||||
|
||||
void print(string s = "") const;
|
||||
bool equals(const gtsam::DiscreteFactorGraph& fg, double tol = 1e-9) const;
|
||||
|
||||
string dot(
|
||||
const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter,
|
||||
const gtsam::DotWriter& dotWriter = gtsam::DotWriter()) const;
|
||||
void saveGraph(
|
||||
string s,
|
||||
const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter,
|
||||
const gtsam::DotWriter& dotWriter = gtsam::DotWriter()) const;
|
||||
|
||||
gtsam::DecisionTreeFactor product() const;
|
||||
double operator()(const gtsam::DiscreteValues& values) const;
|
||||
gtsam::DiscreteValues optimize() const;
|
||||
|
||||
gtsam::DiscreteBayesNet eliminateSequential();
|
||||
gtsam::DiscreteBayesNet eliminateSequential(const gtsam::Ordering& ordering);
|
||||
gtsam::DiscreteBayesTree eliminateMultifrontal();
|
||||
gtsam::DiscreteBayesTree eliminateMultifrontal(const gtsam::Ordering& ordering);
|
||||
|
||||
string markdown(const gtsam::KeyFormatter& keyFormatter =
|
||||
gtsam::DefaultKeyFormatter) const;
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
@ -18,6 +18,7 @@
|
|||
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/discrete/DiscreteKey.h> // make sure we have traits
|
||||
#include <gtsam/discrete/DiscreteValues.h>
|
||||
// headers first to make sure no missing headers
|
||||
//#define DT_NO_PRUNING
|
||||
#include <gtsam/discrete/AlgebraicDecisionTree.h>
|
||||
|
|
@ -445,7 +446,7 @@ TEST(ADT, equality_parser)
|
|||
TEST(ADT, constructor)
|
||||
{
|
||||
DiscreteKey v0(0,2), v1(1,3);
|
||||
Assignment<Key> x00, x01, x02, x10, x11, x12;
|
||||
DiscreteValues x00, x01, x02, x10, x11, x12;
|
||||
x00[0] = 0, x00[1] = 0;
|
||||
x01[0] = 0, x01[1] = 1;
|
||||
x02[0] = 0, x02[1] = 2;
|
||||
|
|
@ -475,7 +476,7 @@ TEST(ADT, constructor)
|
|||
for(double& t: table)
|
||||
t = x++;
|
||||
ADT f3(z0 & z1 & z2 & z3, table);
|
||||
Assignment<Key> assignment;
|
||||
DiscreteValues assignment;
|
||||
assignment[0] = 0;
|
||||
assignment[1] = 0;
|
||||
assignment[2] = 0;
|
||||
|
|
@ -501,7 +502,7 @@ TEST(ADT, conversion)
|
|||
// f2.print("f2");
|
||||
dot(fIndexKey, "conversion-f2");
|
||||
|
||||
Assignment<Key> x00, x01, x02, x10, x11, x12;
|
||||
DiscreteValues x00, x01, x02, x10, x11, x12;
|
||||
x00[5] = 0, x00[2] = 0;
|
||||
x01[5] = 0, x01[2] = 1;
|
||||
x10[5] = 1, x10[2] = 0;
|
||||
|
|
@ -577,7 +578,7 @@ TEST(ADT, zero)
|
|||
ADT notb(B, 1, 0);
|
||||
ADT anotb = a * notb;
|
||||
// GTSAM_PRINT(anotb);
|
||||
Assignment<Key> x00, x01, x10, x11;
|
||||
DiscreteValues x00, x01, x10, x11;
|
||||
x00[0] = 0, x00[1] = 0;
|
||||
x01[0] = 0, x01[1] = 1;
|
||||
x10[0] = 1, x10[1] = 0;
|
||||
|
|
|
|||
|
|
@ -30,20 +30,18 @@ using namespace gtsam;
|
|||
/* ************************************************************************* */
|
||||
TEST( DecisionTreeFactor, constructors)
|
||||
{
|
||||
// Declare a bunch of keys
|
||||
DiscreteKey X(0,2), Y(1,3), Z(2,2);
|
||||
|
||||
DecisionTreeFactor f1(X, "2 8");
|
||||
// Create factors
|
||||
DecisionTreeFactor f1(X, {2, 8});
|
||||
DecisionTreeFactor f2(X & Y, "2 5 3 6 4 7");
|
||||
DecisionTreeFactor f3(X & Y & Z, "2 5 3 6 4 7 25 55 35 65 45 75");
|
||||
EXPECT_LONGS_EQUAL(1,f1.size());
|
||||
EXPECT_LONGS_EQUAL(2,f2.size());
|
||||
EXPECT_LONGS_EQUAL(3,f3.size());
|
||||
|
||||
// f1.print("f1:");
|
||||
// f2.print("f2:");
|
||||
// f3.print("f3:");
|
||||
|
||||
DecisionTreeFactor::Values values;
|
||||
DiscreteValues values;
|
||||
values[0] = 1; // x
|
||||
values[1] = 2; // y
|
||||
values[2] = 1; // z
|
||||
|
|
@ -55,37 +53,26 @@ TEST( DecisionTreeFactor, constructors)
|
|||
/* ************************************************************************* */
|
||||
TEST_UNSAFE( DecisionTreeFactor, multiplication)
|
||||
{
|
||||
// Declare a bunch of keys
|
||||
DiscreteKey v0(0,2), v1(1,2), v2(2,2);
|
||||
|
||||
// Create a factor
|
||||
DecisionTreeFactor f1(v0 & v1, "1 2 3 4");
|
||||
DecisionTreeFactor f2(v1 & v2, "5 6 7 8");
|
||||
// f1.print("f1:");
|
||||
// f2.print("f2:");
|
||||
|
||||
DecisionTreeFactor expected(v0 & v1 & v2, "5 6 14 16 15 18 28 32");
|
||||
|
||||
DecisionTreeFactor actual = f1 * f2;
|
||||
// actual.print("actual: ");
|
||||
CHECK(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( DecisionTreeFactor, sum_max)
|
||||
{
|
||||
// Declare a bunch of keys
|
||||
DiscreteKey v0(0,3), v1(1,2);
|
||||
|
||||
// Create a factor
|
||||
DecisionTreeFactor f1(v0 & v1, "1 2 3 4 5 6");
|
||||
|
||||
DecisionTreeFactor expected(v1, "9 12");
|
||||
DecisionTreeFactor::shared_ptr actual = f1.sum(1);
|
||||
CHECK(assert_equal(expected, *actual, 1e-5));
|
||||
// f1.print("f1:");
|
||||
// actual->print("actual: ");
|
||||
// actual->printCache("actual cache: ");
|
||||
|
||||
DecisionTreeFactor expected2(v1, "5 6");
|
||||
DecisionTreeFactor::shared_ptr actual2 = f1.max(1);
|
||||
|
|
@ -93,9 +80,43 @@ TEST( DecisionTreeFactor, sum_max)
|
|||
|
||||
DecisionTreeFactor f2(v1 & v0, "1 2 3 4 5 6");
|
||||
DecisionTreeFactor::shared_ptr actual22 = f2.sum(1);
|
||||
// f2.print("f2: ");
|
||||
// actual22->print("actual22: ");
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Check enumerate yields the correct list of assignment/value pairs.
|
||||
TEST(DecisionTreeFactor, enumerate) {
|
||||
DiscreteKey A(12, 3), B(5, 2);
|
||||
DecisionTreeFactor f(A & B, "1 2 3 4 5 6");
|
||||
auto actual = f.enumerate();
|
||||
std::vector<std::pair<DiscreteValues, double>> expected;
|
||||
DiscreteValues values;
|
||||
for (size_t a : {0, 1, 2}) {
|
||||
for (size_t b : {0, 1}) {
|
||||
values[12] = a;
|
||||
values[5] = b;
|
||||
expected.emplace_back(values, f(values));
|
||||
}
|
||||
}
|
||||
EXPECT(actual == expected);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Check markdown representation looks as expected.
|
||||
TEST(DecisionTreeFactor, markdown) {
|
||||
DiscreteKey A(12, 3), B(5, 2);
|
||||
DecisionTreeFactor f(A & B, "1 2 3 4 5 6");
|
||||
string expected =
|
||||
"|A|B|value|\n"
|
||||
"|:-:|:-:|:-:|\n"
|
||||
"|0|0|1|\n"
|
||||
"|0|1|2|\n"
|
||||
"|1|0|3|\n"
|
||||
"|1|1|4|\n"
|
||||
"|2|0|5|\n"
|
||||
"|2|1|6|\n";
|
||||
auto formatter = [](Key key) { return key == 12 ? "A" : "B"; };
|
||||
string actual = f.markdown(formatter);
|
||||
EXPECT(actual == expected);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -38,6 +38,9 @@ using namespace boost::assign;
|
|||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
static const DiscreteKey Asia(0, 2), Smoking(4, 2), Tuberculosis(3, 2),
|
||||
LungCancer(6, 2), Bronchitis(7, 2), Either(5, 2), XRay(2, 2), Dyspnea(1, 2);
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(DiscreteBayesNet, bayesNet) {
|
||||
DiscreteBayesNet bayesNet;
|
||||
|
|
@ -71,11 +74,9 @@ TEST(DiscreteBayesNet, bayesNet) {
|
|||
/* ************************************************************************* */
|
||||
TEST(DiscreteBayesNet, Asia) {
|
||||
DiscreteBayesNet asia;
|
||||
DiscreteKey Asia(0, 2), Smoking(4, 2), Tuberculosis(3, 2), LungCancer(6, 2),
|
||||
Bronchitis(7, 2), Either(5, 2), XRay(2, 2), Dyspnea(1, 2);
|
||||
|
||||
asia.add(Asia % "99/1");
|
||||
asia.add(Smoking % "50/50");
|
||||
asia.add(Asia, "99/1");
|
||||
asia.add(Smoking % "50/50"); // Signature version
|
||||
|
||||
asia.add(Tuberculosis | Asia = "99/1 95/5");
|
||||
asia.add(LungCancer | Smoking = "99/1 90/10");
|
||||
|
|
@ -104,12 +105,12 @@ TEST(DiscreteBayesNet, Asia) {
|
|||
EXPECT(assert_equal(expected2, *chordal->back()));
|
||||
|
||||
// solve
|
||||
DiscreteFactor::sharedValues actualMPE = chordal->optimize();
|
||||
DiscreteFactor::Values expectedMPE;
|
||||
auto actualMPE = chordal->optimize();
|
||||
DiscreteValues expectedMPE;
|
||||
insert(expectedMPE)(Asia.first, 0)(Dyspnea.first, 0)(XRay.first, 0)(
|
||||
Tuberculosis.first, 0)(Smoking.first, 0)(Either.first, 0)(
|
||||
LungCancer.first, 0)(Bronchitis.first, 0);
|
||||
EXPECT(assert_equal(expectedMPE, *actualMPE));
|
||||
EXPECT(assert_equal(expectedMPE, actualMPE));
|
||||
|
||||
// add evidence, we were in Asia and we have dyspnea
|
||||
fg.add(Asia, "0 1");
|
||||
|
|
@ -117,25 +118,25 @@ TEST(DiscreteBayesNet, Asia) {
|
|||
|
||||
// solve again, now with evidence
|
||||
DiscreteBayesNet::shared_ptr chordal2 = fg.eliminateSequential(ordering);
|
||||
DiscreteFactor::sharedValues actualMPE2 = chordal2->optimize();
|
||||
DiscreteFactor::Values expectedMPE2;
|
||||
auto actualMPE2 = chordal2->optimize();
|
||||
DiscreteValues expectedMPE2;
|
||||
insert(expectedMPE2)(Asia.first, 1)(Dyspnea.first, 1)(XRay.first, 0)(
|
||||
Tuberculosis.first, 0)(Smoking.first, 1)(Either.first, 0)(
|
||||
LungCancer.first, 0)(Bronchitis.first, 1);
|
||||
EXPECT(assert_equal(expectedMPE2, *actualMPE2));
|
||||
EXPECT(assert_equal(expectedMPE2, actualMPE2));
|
||||
|
||||
// now sample from it
|
||||
DiscreteFactor::Values expectedSample;
|
||||
DiscreteValues expectedSample;
|
||||
SETDEBUG("DiscreteConditional::sample", false);
|
||||
insert(expectedSample)(Asia.first, 1)(Dyspnea.first, 1)(XRay.first, 1)(
|
||||
Tuberculosis.first, 0)(Smoking.first, 1)(Either.first, 1)(
|
||||
LungCancer.first, 1)(Bronchitis.first, 0);
|
||||
DiscreteFactor::sharedValues actualSample = chordal2->sample();
|
||||
EXPECT(assert_equal(expectedSample, *actualSample));
|
||||
auto actualSample = chordal2->sample();
|
||||
EXPECT(assert_equal(expectedSample, actualSample));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST_UNSAFE(DiscreteBayesNet, Sugar) {
|
||||
TEST(DiscreteBayesNet, Sugar) {
|
||||
DiscreteKey T(0, 2), L(1, 2), E(2, 2), C(8, 3), S(7, 2);
|
||||
|
||||
DiscreteBayesNet bn;
|
||||
|
|
@ -149,6 +150,52 @@ TEST_UNSAFE(DiscreteBayesNet, Sugar) {
|
|||
bn.add(C | S = "1/1/2 5/2/3");
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(DiscreteBayesNet, Dot) {
|
||||
DiscreteBayesNet fragment;
|
||||
fragment.add(Asia % "99/1");
|
||||
fragment.add(Smoking % "50/50");
|
||||
|
||||
fragment.add(Tuberculosis | Asia = "99/1 95/5");
|
||||
fragment.add(LungCancer | Smoking = "99/1 90/10");
|
||||
fragment.add((Either | Tuberculosis, LungCancer) = "F T T T");
|
||||
|
||||
string actual = fragment.dot();
|
||||
EXPECT(actual ==
|
||||
"digraph G{\n"
|
||||
"0->3\n"
|
||||
"4->6\n"
|
||||
"3->5\n"
|
||||
"6->5\n"
|
||||
"}");
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Check markdown representation looks as expected.
|
||||
TEST(DiscreteBayesNet, markdown) {
|
||||
DiscreteBayesNet fragment;
|
||||
fragment.add(Asia % "99/1");
|
||||
fragment.add(Smoking | Asia = "8/2 7/3");
|
||||
|
||||
string expected =
|
||||
"`DiscreteBayesNet` of size 2\n"
|
||||
"\n"
|
||||
" *P(Asia)*:\n\n"
|
||||
"|Asia|value|\n"
|
||||
"|:-:|:-:|\n"
|
||||
"|0|0.99|\n"
|
||||
"|1|0.01|\n"
|
||||
"\n"
|
||||
" *P(Smoking|Asia)*:\n\n"
|
||||
"|Asia|0|1|\n"
|
||||
"|:-:|:-:|:-:|\n"
|
||||
"|0|0.8|0.2|\n"
|
||||
"|1|0.7|0.3|\n\n";
|
||||
auto formatter = [](Key key) { return key == 0 ? "Asia" : "Smoking"; };
|
||||
string actual = fragment.markdown(formatter);
|
||||
EXPECT(actual == expected);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
|
|
|
|||
|
|
@ -26,88 +26,101 @@ using namespace boost::assign;
|
|||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
static bool debug = false;
|
||||
static constexpr bool debug = false;
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
||||
TEST_UNSAFE(DiscreteBayesTree, ThinTree) {
|
||||
const int nrNodes = 15;
|
||||
const size_t nrStates = 2;
|
||||
|
||||
// define variables
|
||||
vector<DiscreteKey> key;
|
||||
for (int i = 0; i < nrNodes; i++) {
|
||||
DiscreteKey key_i(i, nrStates);
|
||||
key.push_back(key_i);
|
||||
}
|
||||
|
||||
// create a thin-tree Bayesnet, a la Jean-Guillaume
|
||||
struct TestFixture {
|
||||
vector<DiscreteKey> keys;
|
||||
DiscreteBayesNet bayesNet;
|
||||
bayesNet.add(key[14] % "1/3");
|
||||
boost::shared_ptr<DiscreteBayesTree> bayesTree;
|
||||
|
||||
bayesNet.add(key[13] | key[14] = "1/3 3/1");
|
||||
bayesNet.add(key[12] | key[14] = "3/1 3/1");
|
||||
/**
|
||||
* Create a thin-tree Bayesnet, a la Jean-Guillaume Durand (former student),
|
||||
* and then create the Bayes tree from it.
|
||||
*/
|
||||
TestFixture() {
|
||||
// Define variables.
|
||||
for (int i = 0; i < 15; i++) {
|
||||
DiscreteKey key_i(i, 2);
|
||||
keys.push_back(key_i);
|
||||
}
|
||||
|
||||
bayesNet.add((key[11] | key[13], key[14]) = "1/4 2/3 3/2 4/1");
|
||||
bayesNet.add((key[10] | key[13], key[14]) = "1/4 3/2 2/3 4/1");
|
||||
bayesNet.add((key[9] | key[12], key[14]) = "4/1 2/3 F 1/4");
|
||||
bayesNet.add((key[8] | key[12], key[14]) = "T 1/4 3/2 4/1");
|
||||
// Create thin-tree Bayesnet.
|
||||
bayesNet.add(keys[14] % "1/3");
|
||||
|
||||
bayesNet.add((key[7] | key[11], key[13]) = "1/4 2/3 3/2 4/1");
|
||||
bayesNet.add((key[6] | key[11], key[13]) = "1/4 3/2 2/3 4/1");
|
||||
bayesNet.add((key[5] | key[10], key[13]) = "4/1 2/3 3/2 1/4");
|
||||
bayesNet.add((key[4] | key[10], key[13]) = "2/3 1/4 3/2 4/1");
|
||||
bayesNet.add(keys[13] | keys[14] = "1/3 3/1");
|
||||
bayesNet.add(keys[12] | keys[14] = "3/1 3/1");
|
||||
|
||||
bayesNet.add((key[3] | key[9], key[12]) = "1/4 2/3 3/2 4/1");
|
||||
bayesNet.add((key[2] | key[9], key[12]) = "1/4 8/2 2/3 4/1");
|
||||
bayesNet.add((key[1] | key[8], key[12]) = "4/1 2/3 3/2 1/4");
|
||||
bayesNet.add((key[0] | key[8], key[12]) = "2/3 1/4 3/2 4/1");
|
||||
bayesNet.add((keys[11] | keys[13], keys[14]) = "1/4 2/3 3/2 4/1");
|
||||
bayesNet.add((keys[10] | keys[13], keys[14]) = "1/4 3/2 2/3 4/1");
|
||||
bayesNet.add((keys[9] | keys[12], keys[14]) = "4/1 2/3 F 1/4");
|
||||
bayesNet.add((keys[8] | keys[12], keys[14]) = "T 1/4 3/2 4/1");
|
||||
|
||||
bayesNet.add((keys[7] | keys[11], keys[13]) = "1/4 2/3 3/2 4/1");
|
||||
bayesNet.add((keys[6] | keys[11], keys[13]) = "1/4 3/2 2/3 4/1");
|
||||
bayesNet.add((keys[5] | keys[10], keys[13]) = "4/1 2/3 3/2 1/4");
|
||||
bayesNet.add((keys[4] | keys[10], keys[13]) = "2/3 1/4 3/2 4/1");
|
||||
|
||||
bayesNet.add((keys[3] | keys[9], keys[12]) = "1/4 2/3 3/2 4/1");
|
||||
bayesNet.add((keys[2] | keys[9], keys[12]) = "1/4 8/2 2/3 4/1");
|
||||
bayesNet.add((keys[1] | keys[8], keys[12]) = "4/1 2/3 3/2 1/4");
|
||||
bayesNet.add((keys[0] | keys[8], keys[12]) = "2/3 1/4 3/2 4/1");
|
||||
|
||||
// Create a BayesTree out of the Bayes net.
|
||||
bayesTree = DiscreteFactorGraph(bayesNet).eliminateMultifrontal();
|
||||
}
|
||||
};
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(DiscreteBayesTree, ThinTree) {
|
||||
const TestFixture self;
|
||||
const auto& keys = self.keys;
|
||||
|
||||
if (debug) {
|
||||
GTSAM_PRINT(bayesNet);
|
||||
bayesNet.saveGraph("/tmp/discreteBayesNet.dot");
|
||||
GTSAM_PRINT(self.bayesNet);
|
||||
self.bayesNet.saveGraph("/tmp/discreteBayesNet.dot");
|
||||
}
|
||||
|
||||
// create a BayesTree out of a Bayes net
|
||||
auto bayesTree = DiscreteFactorGraph(bayesNet).eliminateMultifrontal();
|
||||
if (debug) {
|
||||
GTSAM_PRINT(*bayesTree);
|
||||
bayesTree->saveGraph("/tmp/discreteBayesTree.dot");
|
||||
GTSAM_PRINT(*self.bayesTree);
|
||||
self.bayesTree->saveGraph("/tmp/discreteBayesTree.dot");
|
||||
}
|
||||
|
||||
// Check frontals and parents
|
||||
for (size_t i : {13, 14, 9, 3, 2, 8, 1, 0, 10, 5, 4}) {
|
||||
auto clique_i = (*bayesTree)[i];
|
||||
auto clique_i = (*self.bayesTree)[i];
|
||||
EXPECT_LONGS_EQUAL(i, *(clique_i->conditional_->beginFrontals()));
|
||||
}
|
||||
|
||||
auto R = bayesTree->roots().front();
|
||||
auto R = self.bayesTree->roots().front();
|
||||
|
||||
// Check whether BN and BT give the same answer on all configurations
|
||||
vector<DiscreteFactor::Values> allPosbValues = cartesianProduct(
|
||||
key[0] & key[1] & key[2] & key[3] & key[4] & key[5] & key[6] & key[7] &
|
||||
key[8] & key[9] & key[10] & key[11] & key[12] & key[13] & key[14]);
|
||||
auto allPosbValues =
|
||||
cartesianProduct(keys[0] & keys[1] & keys[2] & keys[3] & keys[4] &
|
||||
keys[5] & keys[6] & keys[7] & keys[8] & keys[9] &
|
||||
keys[10] & keys[11] & keys[12] & keys[13] & keys[14]);
|
||||
for (size_t i = 0; i < allPosbValues.size(); ++i) {
|
||||
DiscreteFactor::Values x = allPosbValues[i];
|
||||
double expected = bayesNet.evaluate(x);
|
||||
double actual = bayesTree->evaluate(x);
|
||||
DiscreteValues x = allPosbValues[i];
|
||||
double expected = self.bayesNet.evaluate(x);
|
||||
double actual = self.bayesTree->evaluate(x);
|
||||
DOUBLES_EQUAL(expected, actual, 1e-9);
|
||||
}
|
||||
|
||||
// Calculate all some marginals for Values==all1
|
||||
// Calculate all some marginals for DiscreteValues==all1
|
||||
Vector marginals = Vector::Zero(15);
|
||||
double joint_12_14 = 0, joint_9_12_14 = 0, joint_8_12_14 = 0, joint_8_12 = 0,
|
||||
joint82 = 0, joint12 = 0, joint24 = 0, joint45 = 0, joint46 = 0,
|
||||
joint_4_11 = 0, joint_11_13 = 0, joint_11_13_14 = 0,
|
||||
joint_11_12_13_14 = 0, joint_9_11_12_13 = 0, joint_8_11_12_13 = 0;
|
||||
for (size_t i = 0; i < allPosbValues.size(); ++i) {
|
||||
DiscreteFactor::Values x = allPosbValues[i];
|
||||
double px = bayesTree->evaluate(x);
|
||||
DiscreteValues x = allPosbValues[i];
|
||||
double px = self.bayesTree->evaluate(x);
|
||||
for (size_t i = 0; i < 15; i++)
|
||||
if (x[i]) marginals[i] += px;
|
||||
if (x[12] && x[14]) {
|
||||
|
|
@ -138,49 +151,49 @@ TEST_UNSAFE(DiscreteBayesTree, ThinTree) {
|
|||
}
|
||||
}
|
||||
}
|
||||
DiscreteFactor::Values all1 = allPosbValues.back();
|
||||
DiscreteValues all1 = allPosbValues.back();
|
||||
|
||||
// check separator marginal P(S0)
|
||||
auto clique = (*bayesTree)[0];
|
||||
auto clique = (*self.bayesTree)[0];
|
||||
DiscreteFactorGraph separatorMarginal0 =
|
||||
clique->separatorMarginal(EliminateDiscrete);
|
||||
DOUBLES_EQUAL(joint_8_12, separatorMarginal0(all1), 1e-9);
|
||||
|
||||
// check separator marginal P(S9), should be P(14)
|
||||
clique = (*bayesTree)[9];
|
||||
clique = (*self.bayesTree)[9];
|
||||
DiscreteFactorGraph separatorMarginal9 =
|
||||
clique->separatorMarginal(EliminateDiscrete);
|
||||
DOUBLES_EQUAL(marginals[14], separatorMarginal9(all1), 1e-9);
|
||||
|
||||
// check separator marginal of root, should be empty
|
||||
clique = (*bayesTree)[11];
|
||||
clique = (*self.bayesTree)[11];
|
||||
DiscreteFactorGraph separatorMarginal11 =
|
||||
clique->separatorMarginal(EliminateDiscrete);
|
||||
LONGS_EQUAL(0, separatorMarginal11.size());
|
||||
|
||||
// check shortcut P(S9||R) to root
|
||||
clique = (*bayesTree)[9];
|
||||
clique = (*self.bayesTree)[9];
|
||||
DiscreteBayesNet shortcut = clique->shortcut(R, EliminateDiscrete);
|
||||
LONGS_EQUAL(1, shortcut.size());
|
||||
DOUBLES_EQUAL(joint_11_13_14 / joint_11_13, shortcut.evaluate(all1), 1e-9);
|
||||
|
||||
// check shortcut P(S8||R) to root
|
||||
clique = (*bayesTree)[8];
|
||||
clique = (*self.bayesTree)[8];
|
||||
shortcut = clique->shortcut(R, EliminateDiscrete);
|
||||
DOUBLES_EQUAL(joint_11_12_13_14 / joint_11_13, shortcut.evaluate(all1), 1e-9);
|
||||
|
||||
// check shortcut P(S2||R) to root
|
||||
clique = (*bayesTree)[2];
|
||||
clique = (*self.bayesTree)[2];
|
||||
shortcut = clique->shortcut(R, EliminateDiscrete);
|
||||
DOUBLES_EQUAL(joint_9_11_12_13 / joint_11_13, shortcut.evaluate(all1), 1e-9);
|
||||
|
||||
// check shortcut P(S0||R) to root
|
||||
clique = (*bayesTree)[0];
|
||||
clique = (*self.bayesTree)[0];
|
||||
shortcut = clique->shortcut(R, EliminateDiscrete);
|
||||
DOUBLES_EQUAL(joint_8_11_12_13 / joint_11_13, shortcut.evaluate(all1), 1e-9);
|
||||
|
||||
// calculate all shortcuts to root
|
||||
DiscreteBayesTree::Nodes cliques = bayesTree->nodes();
|
||||
DiscreteBayesTree::Nodes cliques = self.bayesTree->nodes();
|
||||
for (auto clique : cliques) {
|
||||
DiscreteBayesNet shortcut = clique.second->shortcut(R, EliminateDiscrete);
|
||||
if (debug) {
|
||||
|
|
@ -192,7 +205,7 @@ TEST_UNSAFE(DiscreteBayesTree, ThinTree) {
|
|||
// Check all marginals
|
||||
DiscreteFactor::shared_ptr marginalFactor;
|
||||
for (size_t i = 0; i < 15; i++) {
|
||||
marginalFactor = bayesTree->marginalFactor(i, EliminateDiscrete);
|
||||
marginalFactor = self.bayesTree->marginalFactor(i, EliminateDiscrete);
|
||||
double actual = (*marginalFactor)(all1);
|
||||
DOUBLES_EQUAL(marginals[i], actual, 1e-9);
|
||||
}
|
||||
|
|
@ -200,30 +213,60 @@ TEST_UNSAFE(DiscreteBayesTree, ThinTree) {
|
|||
DiscreteBayesNet::shared_ptr actualJoint;
|
||||
|
||||
// Check joint P(8, 2)
|
||||
actualJoint = bayesTree->jointBayesNet(8, 2, EliminateDiscrete);
|
||||
actualJoint = self.bayesTree->jointBayesNet(8, 2, EliminateDiscrete);
|
||||
DOUBLES_EQUAL(joint82, actualJoint->evaluate(all1), 1e-9);
|
||||
|
||||
// Check joint P(1, 2)
|
||||
actualJoint = bayesTree->jointBayesNet(1, 2, EliminateDiscrete);
|
||||
actualJoint = self.bayesTree->jointBayesNet(1, 2, EliminateDiscrete);
|
||||
DOUBLES_EQUAL(joint12, actualJoint->evaluate(all1), 1e-9);
|
||||
|
||||
// Check joint P(2, 4)
|
||||
actualJoint = bayesTree->jointBayesNet(2, 4, EliminateDiscrete);
|
||||
actualJoint = self.bayesTree->jointBayesNet(2, 4, EliminateDiscrete);
|
||||
DOUBLES_EQUAL(joint24, actualJoint->evaluate(all1), 1e-9);
|
||||
|
||||
// Check joint P(4, 5)
|
||||
actualJoint = bayesTree->jointBayesNet(4, 5, EliminateDiscrete);
|
||||
actualJoint = self.bayesTree->jointBayesNet(4, 5, EliminateDiscrete);
|
||||
DOUBLES_EQUAL(joint45, actualJoint->evaluate(all1), 1e-9);
|
||||
|
||||
// Check joint P(4, 6)
|
||||
actualJoint = bayesTree->jointBayesNet(4, 6, EliminateDiscrete);
|
||||
actualJoint = self.bayesTree->jointBayesNet(4, 6, EliminateDiscrete);
|
||||
DOUBLES_EQUAL(joint46, actualJoint->evaluate(all1), 1e-9);
|
||||
|
||||
// Check joint P(4, 11)
|
||||
actualJoint = bayesTree->jointBayesNet(4, 11, EliminateDiscrete);
|
||||
actualJoint = self.bayesTree->jointBayesNet(4, 11, EliminateDiscrete);
|
||||
DOUBLES_EQUAL(joint_4_11, actualJoint->evaluate(all1), 1e-9);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(DiscreteBayesTree, Dot) {
|
||||
const TestFixture self;
|
||||
string actual = self.bayesTree->dot();
|
||||
EXPECT(actual ==
|
||||
"digraph G{\n"
|
||||
"0[label=\"13,11,6,7\"];\n"
|
||||
"0->1\n"
|
||||
"1[label=\"14 : 11,13\"];\n"
|
||||
"1->2\n"
|
||||
"2[label=\"9,12 : 14\"];\n"
|
||||
"2->3\n"
|
||||
"3[label=\"3 : 9,12\"];\n"
|
||||
"2->4\n"
|
||||
"4[label=\"2 : 9,12\"];\n"
|
||||
"2->5\n"
|
||||
"5[label=\"8 : 12,14\"];\n"
|
||||
"5->6\n"
|
||||
"6[label=\"1 : 8,12\"];\n"
|
||||
"5->7\n"
|
||||
"7[label=\"0 : 8,12\"];\n"
|
||||
"1->8\n"
|
||||
"8[label=\"10 : 13,14\"];\n"
|
||||
"8->9\n"
|
||||
"9[label=\"5 : 10,13\"];\n"
|
||||
"8->10\n"
|
||||
"10[label=\"4 : 10,13\"];\n"
|
||||
"}");
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
|
|
|
|||
|
|
@ -10,10 +10,11 @@
|
|||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/*
|
||||
* @file testDecisionTreeFactor.cpp
|
||||
* @file testDiscreteConditional.cpp
|
||||
* @brief unit tests for DiscreteConditional
|
||||
* @author Duy-Nguyen Ta
|
||||
* @date Feb 14, 2011
|
||||
* @author Frank dellaert
|
||||
* @date Feb 14, 2011
|
||||
*/
|
||||
|
||||
#include <boost/assign/std/map.hpp>
|
||||
|
|
@ -24,29 +25,27 @@ using namespace boost::assign;
|
|||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/discrete/DecisionTreeFactor.h>
|
||||
#include <gtsam/discrete/DiscreteConditional.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( DiscreteConditional, constructors)
|
||||
{
|
||||
DiscreteKey X(0, 2), Y(2, 3), Z(1, 2); // watch ordering !
|
||||
TEST(DiscreteConditional, constructors) {
|
||||
DiscreteKey X(0, 2), Y(2, 3), Z(1, 2); // watch ordering !
|
||||
|
||||
DiscreteConditional expected(X | Y = "1/1 2/3 1/4");
|
||||
EXPECT_LONGS_EQUAL(0, *(expected.beginFrontals()));
|
||||
EXPECT_LONGS_EQUAL(2, *(expected.beginParents()));
|
||||
EXPECT(expected.endParents() == expected.end());
|
||||
EXPECT(expected.endFrontals() == expected.beginParents());
|
||||
|
||||
DiscreteConditional::shared_ptr expected1 = //
|
||||
boost::make_shared<DiscreteConditional>(X | Y = "1/1 2/3 1/4");
|
||||
EXPECT(expected1);
|
||||
EXPECT_LONGS_EQUAL(0, *(expected1->beginFrontals()));
|
||||
EXPECT_LONGS_EQUAL(2, *(expected1->beginParents()));
|
||||
EXPECT(expected1->endParents() == expected1->end());
|
||||
EXPECT(expected1->endFrontals() == expected1->beginParents());
|
||||
|
||||
DecisionTreeFactor f1(X & Y, "0.5 0.4 0.2 0.5 0.6 0.8");
|
||||
DiscreteConditional actual1(1, f1);
|
||||
EXPECT(assert_equal(*expected1, actual1, 1e-9));
|
||||
EXPECT(assert_equal(expected, actual1, 1e-9));
|
||||
|
||||
DecisionTreeFactor f2(X & Y & Z,
|
||||
"0.2 0.5 0.3 0.6 0.4 0.7 0.25 0.55 0.35 0.65 0.45 0.75");
|
||||
DecisionTreeFactor f2(
|
||||
X & Y & Z, "0.2 0.5 0.3 0.6 0.4 0.7 0.25 0.55 0.35 0.65 0.45 0.75");
|
||||
DiscreteConditional actual2(1, f2);
|
||||
EXPECT(assert_equal(f2 / *f2.sum(1), *actual2.toFactor(), 1e-9));
|
||||
}
|
||||
|
|
@ -61,11 +60,10 @@ TEST(DiscreteConditional, constructors_alt_interface) {
|
|||
r2 += 2.0, 3.0;
|
||||
r3 += 1.0, 4.0;
|
||||
table += r1, r2, r3;
|
||||
auto actual1 = boost::make_shared<DiscreteConditional>(X | Y = table);
|
||||
EXPECT(actual1);
|
||||
DiscreteConditional actual1(X, {Y}, table);
|
||||
DecisionTreeFactor f1(X & Y, "0.5 0.4 0.2 0.5 0.6 0.8");
|
||||
DiscreteConditional expected1(1, f1);
|
||||
EXPECT(assert_equal(expected1, *actual1, 1e-9));
|
||||
EXPECT(assert_equal(expected1, actual1, 1e-9));
|
||||
|
||||
DecisionTreeFactor f2(
|
||||
X & Y & Z, "0.2 0.5 0.3 0.6 0.4 0.7 0.25 0.55 0.35 0.65 0.45 0.75");
|
||||
|
|
@ -102,9 +100,79 @@ TEST(DiscreteConditional, Combine) {
|
|||
c.push_back(boost::make_shared<DiscreteConditional>(A | B = "1/2 2/1"));
|
||||
c.push_back(boost::make_shared<DiscreteConditional>(B % "1/2"));
|
||||
DecisionTreeFactor factor(A & B, "0.111111 0.444444 0.222222 0.222222");
|
||||
DiscreteConditional actual(2, factor);
|
||||
auto expected = DiscreteConditional::Combine(c.begin(), c.end());
|
||||
EXPECT(assert_equal(*expected, actual, 1e-5));
|
||||
DiscreteConditional expected(2, factor);
|
||||
auto actual = DiscreteConditional::Combine(c.begin(), c.end());
|
||||
EXPECT(assert_equal(expected, *actual, 1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(DiscreteConditional, likelihood) {
|
||||
DiscreteKey X(0, 2), Y(1, 3);
|
||||
DiscreteConditional conditional(X | Y = "2/8 4/6 5/5");
|
||||
|
||||
auto actual0 = conditional.likelihood(0);
|
||||
DecisionTreeFactor expected0(Y, "0.2 0.4 0.5");
|
||||
EXPECT(assert_equal(expected0, *actual0, 1e-9));
|
||||
|
||||
auto actual1 = conditional.likelihood(1);
|
||||
DecisionTreeFactor expected1(Y, "0.8 0.6 0.5");
|
||||
EXPECT(assert_equal(expected1, *actual1, 1e-9));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Check markdown representation looks as expected, no parents.
|
||||
TEST(DiscreteConditional, markdown_prior) {
|
||||
DiscreteKey A(Symbol('x', 1), 3);
|
||||
DiscreteConditional conditional(A % "1/2/2");
|
||||
string expected =
|
||||
" *P(x1)*:\n\n"
|
||||
"|x1|value|\n"
|
||||
"|:-:|:-:|\n"
|
||||
"|0|0.2|\n"
|
||||
"|1|0.4|\n"
|
||||
"|2|0.4|\n";
|
||||
string actual = conditional.markdown();
|
||||
EXPECT(actual == expected);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Check markdown representation looks as expected, multivalued.
|
||||
TEST(DiscreteConditional, markdown_multivalued) {
|
||||
DiscreteKey A(Symbol('a', 1), 3), B(Symbol('b', 1), 5);
|
||||
DiscreteConditional conditional(
|
||||
A | B = "2/88/10 2/20/78 33/33/34 33/33/34 95/2/3");
|
||||
string expected =
|
||||
" *P(a1|b1)*:\n\n"
|
||||
"|b1|0|1|2|\n"
|
||||
"|:-:|:-:|:-:|:-:|\n"
|
||||
"|0|0.02|0.88|0.1|\n"
|
||||
"|1|0.02|0.2|0.78|\n"
|
||||
"|2|0.33|0.33|0.34|\n"
|
||||
"|3|0.33|0.33|0.34|\n"
|
||||
"|4|0.95|0.02|0.03|\n";
|
||||
string actual = conditional.markdown();
|
||||
EXPECT(actual == expected);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Check markdown representation looks as expected, two parents.
|
||||
TEST(DiscreteConditional, markdown) {
|
||||
DiscreteKey A(2, 2), B(1, 2), C(0, 3);
|
||||
DiscreteConditional conditional(A, {B, C}, "0/1 1/3 1/1 3/1 0/1 1/0");
|
||||
string expected =
|
||||
" *P(A|B,C)*:\n\n"
|
||||
"|B|C|0|1|\n"
|
||||
"|:-:|:-:|:-:|:-:|\n"
|
||||
"|0|0|0|1|\n"
|
||||
"|0|1|0.25|0.75|\n"
|
||||
"|0|2|0.5|0.5|\n"
|
||||
"|1|0|0.75|0.25|\n"
|
||||
"|1|1|0|1|\n"
|
||||
"|1|2|1|0|\n";
|
||||
vector<string> names{"C", "B", "A"};
|
||||
auto formatter = [names](Key key) { return names[key]; };
|
||||
string actual = conditional.markdown(formatter);
|
||||
EXPECT(actual == expected);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -81,8 +81,8 @@ TEST_UNSAFE( DiscreteFactorGraph, DiscreteFactorGraphEvaluationTest) {
|
|||
graph.add(P2, "0.9 0.6");
|
||||
graph.add(P1 & P2, "4 1 10 4");
|
||||
|
||||
// Instantiate Values
|
||||
DiscreteFactor::Values values;
|
||||
// Instantiate DiscreteValues
|
||||
DiscreteValues values;
|
||||
values[0] = 1;
|
||||
values[1] = 1;
|
||||
|
||||
|
|
@ -167,10 +167,10 @@ TEST( DiscreteFactorGraph, test)
|
|||
// EXPECT(assert_equal(expected, *actual2));
|
||||
|
||||
// Test optimization
|
||||
DiscreteFactor::Values expectedValues;
|
||||
DiscreteValues expectedValues;
|
||||
insert(expectedValues)(0, 0)(1, 0)(2, 0);
|
||||
DiscreteFactor::sharedValues actualValues = graph.optimize();
|
||||
EXPECT(assert_equal(expectedValues, *actualValues));
|
||||
auto actualValues = graph.optimize();
|
||||
EXPECT(assert_equal(expectedValues, actualValues));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
@ -186,11 +186,11 @@ TEST( DiscreteFactorGraph, testMPE)
|
|||
// graph.product().print();
|
||||
// DiscreteSequentialSolver(graph).eliminate()->print();
|
||||
|
||||
DiscreteFactor::sharedValues actualMPE = graph.optimize();
|
||||
auto actualMPE = graph.optimize();
|
||||
|
||||
DiscreteFactor::Values expectedMPE;
|
||||
DiscreteValues expectedMPE;
|
||||
insert(expectedMPE)(0, 0)(1, 1)(2, 1);
|
||||
EXPECT(assert_equal(expectedMPE, *actualMPE));
|
||||
EXPECT(assert_equal(expectedMPE, actualMPE));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
@ -211,13 +211,13 @@ TEST( DiscreteFactorGraph, testMPE_Darwiche09book_p244)
|
|||
// graph.product().potentials().dot("Darwiche-product");
|
||||
// DiscreteSequentialSolver(graph).eliminate()->print();
|
||||
|
||||
DiscreteFactor::Values expectedMPE;
|
||||
DiscreteValues expectedMPE;
|
||||
insert(expectedMPE)(4, 0)(2, 0)(3, 1)(0, 1)(1, 1);
|
||||
|
||||
// Use the solver machinery.
|
||||
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
|
||||
DiscreteFactor::sharedValues actualMPE = chordal->optimize();
|
||||
EXPECT(assert_equal(expectedMPE, *actualMPE));
|
||||
auto actualMPE = chordal->optimize();
|
||||
EXPECT(assert_equal(expectedMPE, actualMPE));
|
||||
// DiscreteConditional::shared_ptr root = chordal->back();
|
||||
// EXPECT_DOUBLES_EQUAL(0.4, (*root)(*actualMPE), 1e-9);
|
||||
|
||||
|
|
@ -244,8 +244,8 @@ ETree::shared_ptr eTree = ETree::Create(graph, structure);
|
|||
// eliminate normally and check solution
|
||||
DiscreteBayesNet::shared_ptr bayesNet = eTree->eliminate(&EliminateDiscrete);
|
||||
// bayesNet->print(">>>>>>>>>>>>>> Bayes Net <<<<<<<<<<<<<<<<<<");
|
||||
DiscreteFactor::sharedValues actualMPE = optimize(*bayesNet);
|
||||
EXPECT(assert_equal(expectedMPE, *actualMPE));
|
||||
auto actualMPE = optimize(*bayesNet);
|
||||
EXPECT(assert_equal(expectedMPE, actualMPE));
|
||||
|
||||
// Approximate and check solution
|
||||
// DiscreteBayesNet::shared_ptr approximateNet = eTree->approximate();
|
||||
|
|
@ -359,6 +359,67 @@ cout << unicorns;
|
|||
}
|
||||
#endif
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(DiscreteFactorGraph, Dot) {
|
||||
// Create Factor graph
|
||||
DiscreteFactorGraph graph;
|
||||
DiscreteKey C(0, 2), A(1, 2), B(2, 2);
|
||||
graph.add(C & A, "0.2 0.8 0.3 0.7");
|
||||
graph.add(C & B, "0.1 0.9 0.4 0.6");
|
||||
|
||||
string actual = graph.dot();
|
||||
string expected =
|
||||
"graph {\n"
|
||||
" size=\"5,5\";\n"
|
||||
"\n"
|
||||
" var0[label=\"0\"];\n"
|
||||
" var1[label=\"1\"];\n"
|
||||
" var2[label=\"2\"];\n"
|
||||
"\n"
|
||||
" var0--var1;\n"
|
||||
" var0--var2;\n"
|
||||
"}\n";
|
||||
EXPECT(actual == expected);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Check markdown representation looks as expected.
|
||||
TEST(DiscreteFactorGraph, markdown) {
|
||||
// Create Factor graph
|
||||
DiscreteFactorGraph graph;
|
||||
DiscreteKey C(0, 2), A(1, 2), B(2, 2);
|
||||
graph.add(C & A, "0.2 0.8 0.3 0.7");
|
||||
graph.add(C & B, "0.1 0.9 0.4 0.6");
|
||||
|
||||
string expected =
|
||||
"`DiscreteFactorGraph` of size 2\n"
|
||||
"\n"
|
||||
"factor 0:\n"
|
||||
"|C|A|value|\n"
|
||||
"|:-:|:-:|:-:|\n"
|
||||
"|0|0|0.2|\n"
|
||||
"|0|1|0.8|\n"
|
||||
"|1|0|0.3|\n"
|
||||
"|1|1|0.7|\n"
|
||||
"\n"
|
||||
"factor 1:\n"
|
||||
"|C|B|value|\n"
|
||||
"|:-:|:-:|:-:|\n"
|
||||
"|0|0|0.1|\n"
|
||||
"|0|1|0.9|\n"
|
||||
"|1|0|0.4|\n"
|
||||
"|1|1|0.6|\n\n";
|
||||
vector<string> names{"C", "A", "B"};
|
||||
auto formatter = [names](Key key) { return names[key]; };
|
||||
string actual = graph.markdown(formatter);
|
||||
EXPECT(actual == expected);
|
||||
|
||||
// Make sure values are correctly displayed.
|
||||
DiscreteValues values;
|
||||
values[0] = 1;
|
||||
values[1] = 0;
|
||||
EXPECT_DOUBLES_EQUAL(0.3, graph[0]->operator()(values), 1e-9);
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ TEST_UNSAFE( DiscreteMarginals, UGM_small ) {
|
|||
|
||||
DiscreteMarginals marginals(graph);
|
||||
DiscreteFactor::shared_ptr actualC = marginals(Cathy.first);
|
||||
DiscreteFactor::Values values;
|
||||
DiscreteValues values;
|
||||
|
||||
values[Cathy.first] = 0;
|
||||
EXPECT_DOUBLES_EQUAL( 0.359631, (*actualC)(values), 1e-6);
|
||||
|
|
@ -94,7 +94,7 @@ TEST_UNSAFE( DiscreteMarginals, UGM_chain ) {
|
|||
|
||||
DiscreteMarginals marginals(graph);
|
||||
DiscreteFactor::shared_ptr actualC = marginals(key[2].first);
|
||||
DiscreteFactor::Values values;
|
||||
DiscreteValues values;
|
||||
|
||||
values[key[2].first] = 0;
|
||||
EXPECT_DOUBLES_EQUAL( 0.03426, (*actualC)(values), 1e-4);
|
||||
|
|
@ -164,11 +164,11 @@ TEST_UNSAFE(DiscreteMarginals, truss2) {
|
|||
graph.add(key[2] & key[3] & key[4], "1 2 3 4 5 6 7 8");
|
||||
|
||||
// Calculate the marginals by brute force
|
||||
vector<DiscreteFactor::Values> allPosbValues =
|
||||
auto allPosbValues =
|
||||
cartesianProduct(key[0] & key[1] & key[2] & key[3] & key[4]);
|
||||
Vector T = Z_5x1, F = Z_5x1;
|
||||
for (size_t i = 0; i < allPosbValues.size(); ++i) {
|
||||
DiscreteFactor::Values x = allPosbValues[i];
|
||||
DiscreteValues x = allPosbValues[i];
|
||||
double px = graph(x);
|
||||
for (size_t j = 0; j < 5; j++)
|
||||
if (x[j])
|
||||
|
|
|
|||
|
|
@ -0,0 +1,55 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/*
|
||||
* @file testDiscretePrior.cpp
|
||||
* @brief unit tests for DiscretePrior
|
||||
* @author Frank dellaert
|
||||
* @date December 2021
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/discrete/DiscretePrior.h>
|
||||
#include <gtsam/discrete/Signature.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
static const DiscreteKey X(0, 2);
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(DiscretePrior, constructors) {
|
||||
DiscretePrior actual(X % "2/3");
|
||||
DecisionTreeFactor f(X, "0.4 0.6");
|
||||
DiscretePrior expected(f);
|
||||
EXPECT(assert_equal(expected, actual, 1e-9));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(DiscretePrior, operator) {
|
||||
DiscretePrior prior(X % "2/3");
|
||||
EXPECT_DOUBLES_EQUAL(prior(0), 0.4, 1e-9);
|
||||
EXPECT_DOUBLES_EQUAL(prior(1), 0.6, 1e-9);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(DiscretePrior, to_vector) {
|
||||
DiscretePrior prior(X % "2/3");
|
||||
vector<double> expected {0.4, 0.6};
|
||||
EXPECT(prior.pmf() == expected);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
|
|
@ -32,22 +32,27 @@ DiscreteKey X(0, 2), Y(1, 3), Z(2, 2);
|
|||
|
||||
/* ************************************************************************* */
|
||||
TEST(testSignature, simple_conditional) {
|
||||
Signature sig(X | Y = "1/1 2/3 1/4");
|
||||
Signature sig(X, {Y}, "1/1 2/3 1/4");
|
||||
CHECK(sig.table());
|
||||
Signature::Table table = *sig.table();
|
||||
vector<double> row[3]{{0.5, 0.5}, {0.4, 0.6}, {0.2, 0.8}};
|
||||
LONGS_EQUAL(3, table.size());
|
||||
CHECK(row[0] == table[0]);
|
||||
CHECK(row[1] == table[1]);
|
||||
CHECK(row[2] == table[2]);
|
||||
DiscreteKey actKey = sig.key();
|
||||
LONGS_EQUAL(X.first, actKey.first);
|
||||
|
||||
DiscreteKeys actKeys = sig.discreteKeys();
|
||||
LONGS_EQUAL(2, actKeys.size());
|
||||
LONGS_EQUAL(X.first, actKeys.front().first);
|
||||
LONGS_EQUAL(Y.first, actKeys.back().first);
|
||||
CHECK(sig.key() == X);
|
||||
|
||||
vector<double> actCpt = sig.cpt();
|
||||
EXPECT_LONGS_EQUAL(6, actCpt.size());
|
||||
DiscreteKeys keys = sig.discreteKeys();
|
||||
LONGS_EQUAL(2, keys.size());
|
||||
CHECK(keys[0] == X);
|
||||
CHECK(keys[1] == Y);
|
||||
|
||||
DiscreteKeys parents = sig.parents();
|
||||
LONGS_EQUAL(1, parents.size());
|
||||
CHECK(parents[0] == Y);
|
||||
|
||||
EXPECT_LONGS_EQUAL(6, sig.cpt().size());
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
@ -60,16 +65,56 @@ TEST(testSignature, simple_conditional_nonparser) {
|
|||
table += row1, row2, row3;
|
||||
|
||||
Signature sig(X | Y = table);
|
||||
DiscreteKey actKey = sig.key();
|
||||
EXPECT_LONGS_EQUAL(X.first, actKey.first);
|
||||
CHECK(sig.key() == X);
|
||||
|
||||
DiscreteKeys actKeys = sig.discreteKeys();
|
||||
LONGS_EQUAL(2, actKeys.size());
|
||||
LONGS_EQUAL(X.first, actKeys.front().first);
|
||||
LONGS_EQUAL(Y.first, actKeys.back().first);
|
||||
DiscreteKeys keys = sig.discreteKeys();
|
||||
LONGS_EQUAL(2, keys.size());
|
||||
CHECK(keys[0] == X);
|
||||
CHECK(keys[1] == Y);
|
||||
|
||||
vector<double> actCpt = sig.cpt();
|
||||
EXPECT_LONGS_EQUAL(6, actCpt.size());
|
||||
DiscreteKeys parents = sig.parents();
|
||||
LONGS_EQUAL(1, parents.size());
|
||||
CHECK(parents[0] == Y);
|
||||
|
||||
EXPECT_LONGS_EQUAL(6, sig.cpt().size());
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
DiscreteKey A(0, 2), S(1, 2), T(2, 2), L(3, 2), B(4, 2), E(5, 2), D(7, 2);
|
||||
|
||||
// Make sure we can create all signatures for Asia network with constructor.
|
||||
TEST(testSignature, all_examples) {
|
||||
DiscreteKey X(6, 2);
|
||||
Signature a(A, {}, "99/1");
|
||||
Signature s(S, {}, "50/50");
|
||||
Signature t(T, {A}, "99/1 95/5");
|
||||
Signature l(L, {S}, "99/1 90/10");
|
||||
Signature b(B, {S}, "70/30 40/60");
|
||||
Signature e(E, {T, L}, "F F F 1");
|
||||
Signature x(X, {E}, "95/5 2/98");
|
||||
}
|
||||
|
||||
// Make sure we can create all signatures for Asia network with operator magic.
|
||||
TEST(testSignature, all_examples_magic) {
|
||||
DiscreteKey X(6, 2);
|
||||
Signature a(A % "99/1");
|
||||
Signature s(S % "50/50");
|
||||
Signature t(T | A = "99/1 95/5");
|
||||
Signature l(L | S = "99/1 90/10");
|
||||
Signature b(B | S = "70/30 40/60");
|
||||
Signature e((E | T, L) = "F F F 1");
|
||||
Signature x(X | E = "95/5 2/98");
|
||||
}
|
||||
|
||||
// Check example from docs.
|
||||
TEST(testSignature, doxygen_example) {
|
||||
Signature::Table table{{0.9, 0.1}, {0.2, 0.8}, {0.3, 0.7}, {0.1, 0.9}};
|
||||
Signature d1(D, {E, B}, table);
|
||||
Signature d2((D | E, B) = "9/1 2/8 3/7 1/9");
|
||||
Signature d3(D, {E, B}, "9/1 2/8 3/7 1/9");
|
||||
EXPECT(*(d1.table()) == table);
|
||||
EXPECT(*(d2.table()) == table);
|
||||
EXPECT(*(d3.table()) == table);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -312,6 +312,16 @@ public:
|
|||
return range(camera.pose(), Dcamera, Dother);
|
||||
}
|
||||
|
||||
/// for Linear Triangulation
|
||||
Matrix34 cameraProjectionMatrix() const {
|
||||
return K_.K() * PinholeBase::pose().inverse().matrix().block(0, 0, 3, 4);
|
||||
}
|
||||
|
||||
/// for Nonlinear Triangulation
|
||||
Vector defaultErrorWhenTriangulatingBehindCamera() const {
|
||||
return Eigen::Matrix<double,traits<Point2>::dimension,1>::Constant(2.0 * K_.fx());;
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
/** Serialization function */
|
||||
|
|
|
|||
|
|
@ -121,6 +121,13 @@ public:
|
|||
return _project(pw, Dpose, Dpoint, Dcal);
|
||||
}
|
||||
|
||||
/// project a 3D point from world coordinates into the image
|
||||
Point2 reprojectionError(const Point3& pw, const Point2& measured, OptionalJacobian<2, 6> Dpose = boost::none,
|
||||
OptionalJacobian<2, 3> Dpoint = boost::none,
|
||||
OptionalJacobian<2, DimK> Dcal = boost::none) const {
|
||||
return Point2(_project(pw, Dpose, Dpoint, Dcal) - measured);
|
||||
}
|
||||
|
||||
/// project a point at infinity from world coordinates into the image
|
||||
Point2 project(const Unit3& pw, OptionalJacobian<2, 6> Dpose = boost::none,
|
||||
OptionalJacobian<2, 2> Dpoint = boost::none,
|
||||
|
|
@ -159,7 +166,6 @@ public:
|
|||
return result;
|
||||
}
|
||||
|
||||
|
||||
/// backproject a 2-dimensional point to a 3-dimensional point at infinity
|
||||
Unit3 backprojectPointAtInfinity(const Point2& p) const {
|
||||
const Point2 pn = calibration().calibrate(p);
|
||||
|
|
@ -410,6 +416,16 @@ public:
|
|||
return PinholePose(); // assumes that the default constructor is valid
|
||||
}
|
||||
|
||||
/// for Linear Triangulation
|
||||
Matrix34 cameraProjectionMatrix() const {
|
||||
Matrix34 P = Matrix34(PinholeBase::pose().inverse().matrix().block(0, 0, 3, 4));
|
||||
return K_->K() * P;
|
||||
}
|
||||
|
||||
/// for Nonlinear Triangulation
|
||||
Vector defaultErrorWhenTriangulatingBehindCamera() const {
|
||||
return Eigen::Matrix<double,traits<Point2>::dimension,1>::Constant(2.0 * K_->fx());;
|
||||
}
|
||||
/// @}
|
||||
|
||||
private:
|
||||
|
|
|
|||
|
|
@ -117,13 +117,23 @@ struct traits<QUATERNION_TYPE> {
|
|||
omega = (-8. / 3. - 2. / 3. * qw) * q.vec();
|
||||
} else {
|
||||
// Normal, away from zero case
|
||||
_Scalar angle = 2 * acos(qw), s = sqrt(1 - qw * qw);
|
||||
// Important: convert to [-pi,pi] to keep error continuous
|
||||
if (angle > M_PI)
|
||||
angle -= twoPi;
|
||||
else if (angle < -M_PI)
|
||||
angle += twoPi;
|
||||
omega = (angle / s) * q.vec();
|
||||
if (qw > 0) {
|
||||
_Scalar angle = 2 * acos(qw), s = sqrt(1 - qw * qw);
|
||||
// Important: convert to [-pi,pi] to keep error continuous
|
||||
if (angle > M_PI)
|
||||
angle -= twoPi;
|
||||
else if (angle < -M_PI)
|
||||
angle += twoPi;
|
||||
omega = (angle / s) * q.vec();
|
||||
} else {
|
||||
// Make sure that we are using a canonical quaternion with w > 0
|
||||
_Scalar angle = 2 * acos(-qw), s = sqrt(1 - qw * qw);
|
||||
if (angle > M_PI)
|
||||
angle -= twoPi;
|
||||
else if (angle < -M_PI)
|
||||
angle += twoPi;
|
||||
omega = (angle / s) * -q.vec();
|
||||
}
|
||||
}
|
||||
|
||||
if(H) *H = SO3::LogmapDerivative(omega.template cast<double>());
|
||||
|
|
|
|||
|
|
@ -0,0 +1,109 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file SphericalCamera.h
|
||||
* @brief Calibrated camera with spherical projection
|
||||
* @date Aug 26, 2021
|
||||
* @author Luca Carlone
|
||||
*/
|
||||
|
||||
#include <gtsam/geometry/SphericalCamera.h>
|
||||
|
||||
using namespace std;
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
bool SphericalCamera::equals(const SphericalCamera& camera, double tol) const {
|
||||
return pose_.equals(camera.pose(), tol);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void SphericalCamera::print(const string& s) const { pose_.print(s + ".pose"); }
|
||||
|
||||
/* ************************************************************************* */
|
||||
pair<Unit3, bool> SphericalCamera::projectSafe(const Point3& pw) const {
|
||||
const Point3 pc = pose().transformTo(pw);
|
||||
Unit3 pu = Unit3::FromPoint3(pc);
|
||||
return make_pair(pu, pc.norm() > 1e-8);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Unit3 SphericalCamera::project2(const Point3& pw, OptionalJacobian<2, 6> Dpose,
|
||||
OptionalJacobian<2, 3> Dpoint) const {
|
||||
Matrix36 Dtf_pose;
|
||||
Matrix3 Dtf_point; // calculated by transformTo if needed
|
||||
const Point3 pc =
|
||||
pose().transformTo(pw, Dpose ? &Dtf_pose : 0, Dpoint ? &Dtf_point : 0);
|
||||
|
||||
if (pc.norm() <= 1e-8) throw("point cannot be at the center of the camera");
|
||||
|
||||
Matrix23 Dunit; // calculated by FromPoint3 if needed
|
||||
Unit3 pu = Unit3::FromPoint3(Point3(pc), Dpoint ? &Dunit : 0);
|
||||
|
||||
if (Dpose) *Dpose = Dunit * Dtf_pose; // 2x3 * 3x6 = 2x6
|
||||
if (Dpoint) *Dpoint = Dunit * Dtf_point; // 2x3 * 3x3 = 2x3
|
||||
return pu;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Unit3 SphericalCamera::project2(const Unit3& pwu, OptionalJacobian<2, 6> Dpose,
|
||||
OptionalJacobian<2, 2> Dpoint) const {
|
||||
Matrix23 Dtf_rot;
|
||||
Matrix2 Dtf_point; // calculated by transformTo if needed
|
||||
const Unit3 pu = pose().rotation().unrotate(pwu, Dpose ? &Dtf_rot : 0,
|
||||
Dpoint ? &Dtf_point : 0);
|
||||
|
||||
if (Dpose)
|
||||
*Dpose << Dtf_rot, Matrix::Zero(2, 3); // 2x6 (translation part is zero)
|
||||
if (Dpoint) *Dpoint = Dtf_point; // 2x2
|
||||
return pu;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Point3 SphericalCamera::backproject(const Unit3& pu, const double depth) const {
|
||||
return pose().transformFrom(depth * pu);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Unit3 SphericalCamera::backprojectPointAtInfinity(const Unit3& p) const {
|
||||
return pose().rotation().rotate(p);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Unit3 SphericalCamera::project(const Point3& point,
|
||||
OptionalJacobian<2, 6> Dcamera,
|
||||
OptionalJacobian<2, 3> Dpoint) const {
|
||||
return project2(point, Dcamera, Dpoint);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Vector2 SphericalCamera::reprojectionError(
|
||||
const Point3& point, const Unit3& measured, OptionalJacobian<2, 6> Dpose,
|
||||
OptionalJacobian<2, 3> Dpoint) const {
|
||||
// project point
|
||||
if (Dpose || Dpoint) {
|
||||
Matrix26 H_project_pose;
|
||||
Matrix23 H_project_point;
|
||||
Matrix22 H_error;
|
||||
Unit3 projected = project2(point, H_project_pose, H_project_point);
|
||||
Vector2 error = measured.errorVector(projected, boost::none, H_error);
|
||||
if (Dpose) *Dpose = H_error * H_project_pose;
|
||||
if (Dpoint) *Dpoint = H_error * H_project_point;
|
||||
return error;
|
||||
} else {
|
||||
return measured.errorVector(project2(point, Dpose, Dpoint));
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
} // namespace gtsam
|
||||
|
|
@ -0,0 +1,241 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file SphericalCamera.h
|
||||
* @brief Calibrated camera with spherical projection
|
||||
* @date Aug 26, 2021
|
||||
* @author Luca Carlone
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/Manifold.h>
|
||||
#include <gtsam/base/ThreadsafeException.h>
|
||||
#include <gtsam/base/concepts.h>
|
||||
#include <gtsam/dllexport.h>
|
||||
#include <gtsam/geometry/BearingRange.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <gtsam/geometry/Unit3.h>
|
||||
|
||||
#include <boost/serialization/nvp.hpp>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* Empty calibration. Only needed to play well with other cameras
|
||||
* (e.g., when templating functions wrt cameras), since other cameras
|
||||
* have constuctors in the form ‘camera(pose,calibration)’
|
||||
* @addtogroup geometry
|
||||
* \nosubgrouping
|
||||
*/
|
||||
class GTSAM_EXPORT EmptyCal {
|
||||
public:
|
||||
enum { dimension = 0 };
|
||||
EmptyCal() {}
|
||||
virtual ~EmptyCal() = default;
|
||||
using shared_ptr = boost::shared_ptr<EmptyCal>;
|
||||
|
||||
/// return DOF, dimensionality of tangent space
|
||||
inline static size_t Dim() { return dimension; }
|
||||
|
||||
void print(const std::string& s) const {
|
||||
std::cout << "empty calibration: " << s << std::endl;
|
||||
}
|
||||
|
||||
private:
|
||||
/// Serialization function
|
||||
friend class boost::serialization::access;
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const unsigned int /*version*/) {
|
||||
ar& boost::serialization::make_nvp(
|
||||
"EmptyCal", boost::serialization::base_object<EmptyCal>(*this));
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* A spherical camera class that has a Pose3 and measures bearing vectors.
|
||||
* The camera has an ‘Empty’ calibration and the only 6 dof are the pose
|
||||
* @addtogroup geometry
|
||||
* \nosubgrouping
|
||||
*/
|
||||
class GTSAM_EXPORT SphericalCamera {
|
||||
public:
|
||||
enum { dimension = 6 };
|
||||
|
||||
using Measurement = Unit3;
|
||||
using MeasurementVector = std::vector<Unit3>;
|
||||
using CalibrationType = EmptyCal;
|
||||
|
||||
private:
|
||||
Pose3 pose_; ///< 3D pose of camera
|
||||
|
||||
protected:
|
||||
EmptyCal::shared_ptr emptyCal_;
|
||||
|
||||
public:
|
||||
/// @}
|
||||
/// @name Standard Constructors
|
||||
/// @{
|
||||
|
||||
/// Default constructor
|
||||
SphericalCamera()
|
||||
: pose_(Pose3::identity()), emptyCal_(boost::make_shared<EmptyCal>()) {}
|
||||
|
||||
/// Constructor with pose
|
||||
explicit SphericalCamera(const Pose3& pose)
|
||||
: pose_(pose), emptyCal_(boost::make_shared<EmptyCal>()) {}
|
||||
|
||||
/// Constructor with empty intrinsics (needed for smart factors)
|
||||
explicit SphericalCamera(const Pose3& pose,
|
||||
const EmptyCal::shared_ptr& cal)
|
||||
: pose_(pose), emptyCal_(cal) {}
|
||||
|
||||
/// @}
|
||||
/// @name Advanced Constructors
|
||||
/// @{
|
||||
explicit SphericalCamera(const Vector& v) : pose_(Pose3::Expmap(v)) {}
|
||||
|
||||
/// Default destructor
|
||||
virtual ~SphericalCamera() = default;
|
||||
|
||||
/// return shared pointer to calibration
|
||||
const EmptyCal::shared_ptr& sharedCalibration() const {
|
||||
return emptyCal_;
|
||||
}
|
||||
|
||||
/// return calibration
|
||||
const EmptyCal& calibration() const { return *emptyCal_; }
|
||||
|
||||
/// @}
|
||||
/// @name Testable
|
||||
/// @{
|
||||
|
||||
/// assert equality up to a tolerance
|
||||
bool equals(const SphericalCamera& camera, double tol = 1e-9) const;
|
||||
|
||||
/// print
|
||||
virtual void print(const std::string& s = "SphericalCamera") const;
|
||||
|
||||
/// @}
|
||||
/// @name Standard Interface
|
||||
/// @{
|
||||
|
||||
/// return pose, constant version
|
||||
const Pose3& pose() const { return pose_; }
|
||||
|
||||
/// get rotation
|
||||
const Rot3& rotation() const { return pose_.rotation(); }
|
||||
|
||||
/// get translation
|
||||
const Point3& translation() const { return pose_.translation(); }
|
||||
|
||||
// /// return pose, with derivative
|
||||
// const Pose3& getPose(OptionalJacobian<6, 6> H) const;
|
||||
|
||||
/// @}
|
||||
/// @name Transformations and measurement functions
|
||||
/// @{
|
||||
|
||||
/// Project a point into the image and check depth
|
||||
std::pair<Unit3, bool> projectSafe(const Point3& pw) const;
|
||||
|
||||
/** Project point into the image
|
||||
* (note: there is no CheiralityException for a spherical camera)
|
||||
* @param point 3D point in world coordinates
|
||||
* @return the intrinsic coordinates of the projected point
|
||||
*/
|
||||
Unit3 project2(const Point3& pw, OptionalJacobian<2, 6> Dpose = boost::none,
|
||||
OptionalJacobian<2, 3> Dpoint = boost::none) const;
|
||||
|
||||
/** Project point into the image
|
||||
* (note: there is no CheiralityException for a spherical camera)
|
||||
* @param point 3D direction in world coordinates
|
||||
* @return the intrinsic coordinates of the projected point
|
||||
*/
|
||||
Unit3 project2(const Unit3& pwu, OptionalJacobian<2, 6> Dpose = boost::none,
|
||||
OptionalJacobian<2, 2> Dpoint = boost::none) const;
|
||||
|
||||
/// backproject a 2-dimensional point to a 3-dimensional point at given depth
|
||||
Point3 backproject(const Unit3& p, const double depth) const;
|
||||
|
||||
/// backproject point at infinity
|
||||
Unit3 backprojectPointAtInfinity(const Unit3& p) const;
|
||||
|
||||
/** Project point into the image
|
||||
* (note: there is no CheiralityException for a spherical camera)
|
||||
* @param point 3D point in world coordinates
|
||||
* @return the intrinsic coordinates of the projected point
|
||||
*/
|
||||
Unit3 project(const Point3& point, OptionalJacobian<2, 6> Dpose = boost::none,
|
||||
OptionalJacobian<2, 3> Dpoint = boost::none) const;
|
||||
|
||||
/** Compute reprojection error for a given 3D point in world coordinates
|
||||
* @param point 3D point in world coordinates
|
||||
* @return the tangent space error between the projection and the measurement
|
||||
*/
|
||||
Vector2 reprojectionError(const Point3& point, const Unit3& measured,
|
||||
OptionalJacobian<2, 6> Dpose = boost::none,
|
||||
OptionalJacobian<2, 3> Dpoint = boost::none) const;
|
||||
/// @}
|
||||
|
||||
/// move a cameras according to d
|
||||
SphericalCamera retract(const Vector6& d) const {
|
||||
return SphericalCamera(pose().retract(d));
|
||||
}
|
||||
|
||||
/// return canonical coordinate
|
||||
Vector6 localCoordinates(const SphericalCamera& p) const {
|
||||
return pose().localCoordinates(p.pose());
|
||||
}
|
||||
|
||||
/// for Canonical
|
||||
static SphericalCamera identity() {
|
||||
return SphericalCamera(
|
||||
Pose3::identity()); // assumes that the default constructor is valid
|
||||
}
|
||||
|
||||
/// for Linear Triangulation
|
||||
Matrix34 cameraProjectionMatrix() const {
|
||||
return Matrix34(pose_.inverse().matrix().block(0, 0, 3, 4));
|
||||
}
|
||||
|
||||
/// for Nonlinear Triangulation
|
||||
Vector defaultErrorWhenTriangulatingBehindCamera() const {
|
||||
return Eigen::Matrix<double, traits<Point2>::dimension, 1>::Constant(0.0);
|
||||
}
|
||||
|
||||
/// @deprecated
|
||||
size_t dim() const { return 6; }
|
||||
|
||||
/// @deprecated
|
||||
static size_t Dim() { return 6; }
|
||||
|
||||
private:
|
||||
/** Serialization function */
|
||||
friend class boost::serialization::access;
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const unsigned int /*version*/) {
|
||||
ar& BOOST_SERIALIZATION_NVP(pose_);
|
||||
}
|
||||
|
||||
public:
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
};
|
||||
// end of class SphericalCamera
|
||||
|
||||
template <>
|
||||
struct traits<SphericalCamera> : public internal::LieGroup<Pose3> {};
|
||||
|
||||
template <>
|
||||
struct traits<const SphericalCamera> : public internal::LieGroup<Pose3> {};
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
@ -170,6 +170,11 @@ public:
|
|||
OptionalJacobian<3, 3> H2 = boost::none, OptionalJacobian<3, 0> H3 =
|
||||
boost::none) const;
|
||||
|
||||
/// for Nonlinear Triangulation
|
||||
Vector defaultErrorWhenTriangulatingBehindCamera() const {
|
||||
return Eigen::Matrix<double,traits<Measurement>::dimension,1>::Constant(2.0 * K_->fx());;
|
||||
}
|
||||
|
||||
/// @}
|
||||
|
||||
private:
|
||||
|
|
|
|||
|
|
@ -27,9 +27,6 @@ class Point2 {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
class Point2Pairs {
|
||||
|
|
@ -104,9 +101,6 @@ class StereoPoint2 {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
#include <gtsam/geometry/Point3.h>
|
||||
|
|
@ -131,9 +125,6 @@ class Point3 {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
class Point3Pairs {
|
||||
|
|
@ -191,9 +182,6 @@ class Rot2 {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
#include <gtsam/geometry/SO3.h>
|
||||
|
|
@ -372,9 +360,6 @@ class Rot3 {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
|
|
@ -433,9 +418,6 @@ class Pose2 {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
boost::optional<gtsam::Pose2> align(const gtsam::Point2Pairs& pairs);
|
||||
|
|
@ -502,9 +484,6 @@ class Pose3 {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
class Pose3Pairs {
|
||||
|
|
@ -547,9 +526,6 @@ class Unit3 {
|
|||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
|
||||
// enabling function to compare objects
|
||||
bool equals(const gtsam::Unit3& expected, double tol) const;
|
||||
};
|
||||
|
|
@ -611,9 +587,6 @@ class Cal3_S2 {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
#include <gtsam/geometry/Cal3DS2_Base.h>
|
||||
|
|
@ -642,9 +615,6 @@ virtual class Cal3DS2_Base {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
#include <gtsam/geometry/Cal3DS2.h>
|
||||
|
|
@ -668,9 +638,6 @@ virtual class Cal3DS2 : gtsam::Cal3DS2_Base {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
#include <gtsam/geometry/Cal3Unified.h>
|
||||
|
|
@ -705,9 +672,6 @@ virtual class Cal3Unified : gtsam::Cal3DS2_Base {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
#include <gtsam/geometry/Cal3Fisheye.h>
|
||||
|
|
@ -750,9 +714,6 @@ class Cal3Fisheye {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
#include <gtsam/geometry/Cal3_S2Stereo.h>
|
||||
|
|
@ -811,9 +772,6 @@ class Cal3Bundler {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
#include <gtsam/geometry/CalibratedCamera.h>
|
||||
|
|
@ -847,9 +805,6 @@ class CalibratedCamera {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
#include <gtsam/geometry/PinholeCamera.h>
|
||||
|
|
@ -889,9 +844,6 @@ class PinholeCamera {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
#include <gtsam/geometry/Similarity3.h>
|
||||
|
|
@ -962,9 +914,6 @@ class StereoCamera {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
#include <gtsam/geometry/triangulation.h>
|
||||
|
|
|
|||
|
|
@ -0,0 +1,163 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file SphericalCamera.h
|
||||
* @brief Calibrated camera with spherical projection
|
||||
* @date Aug 26, 2021
|
||||
* @author Luca Carlone
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/base/numericalDerivative.h>
|
||||
#include <gtsam/geometry/SphericalCamera.h>
|
||||
|
||||
#include <cmath>
|
||||
#include <iostream>
|
||||
|
||||
using namespace std::placeholders;
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
typedef SphericalCamera Camera;
|
||||
|
||||
// static const Cal3_S2 K(625, 625, 0, 0, 0);
|
||||
//
|
||||
static const Pose3 pose(Rot3(Vector3(1, -1, -1).asDiagonal()),
|
||||
Point3(0, 0, 0.5));
|
||||
static const Camera camera(pose);
|
||||
//
|
||||
static const Pose3 pose1(Rot3(), Point3(0, 1, 0.5));
|
||||
static const Camera camera1(pose1);
|
||||
|
||||
static const Point3 point1(-0.08, -0.08, 0.0);
|
||||
static const Point3 point2(-0.08, 0.08, 0.0);
|
||||
static const Point3 point3(0.08, 0.08, 0.0);
|
||||
static const Point3 point4(0.08, -0.08, 0.0);
|
||||
|
||||
// manually computed in matlab
|
||||
static const Unit3 bearing1(-0.156054862928174, 0.156054862928174,
|
||||
0.975342893301088);
|
||||
static const Unit3 bearing2(-0.156054862928174, -0.156054862928174,
|
||||
0.975342893301088);
|
||||
static const Unit3 bearing3(0.156054862928174, -0.156054862928174,
|
||||
0.975342893301088);
|
||||
static const Unit3 bearing4(0.156054862928174, 0.156054862928174,
|
||||
0.975342893301088);
|
||||
|
||||
static double depth = 0.512640224719052;
|
||||
/* ************************************************************************* */
|
||||
TEST(SphericalCamera, constructor) {
|
||||
EXPECT(assert_equal(pose, camera.pose()));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(SphericalCamera, project) {
|
||||
// expected from manual calculation in Matlab
|
||||
EXPECT(assert_equal(camera.project(point1), bearing1));
|
||||
EXPECT(assert_equal(camera.project(point2), bearing2));
|
||||
EXPECT(assert_equal(camera.project(point3), bearing3));
|
||||
EXPECT(assert_equal(camera.project(point4), bearing4));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(SphericalCamera, backproject) {
|
||||
EXPECT(assert_equal(camera.backproject(bearing1, depth), point1));
|
||||
EXPECT(assert_equal(camera.backproject(bearing2, depth), point2));
|
||||
EXPECT(assert_equal(camera.backproject(bearing3, depth), point3));
|
||||
EXPECT(assert_equal(camera.backproject(bearing4, depth), point4));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(SphericalCamera, backproject2) {
|
||||
Point3 origin(0, 0, 0);
|
||||
Rot3 rot(1., 0., 0., 0., 0., 1., 0., -1., 0.); // a camera1 looking down
|
||||
Camera camera(Pose3(rot, origin));
|
||||
|
||||
Point3 actual = camera.backproject(Unit3(0, 0, 1), 1.);
|
||||
Point3 expected(0., 1., 0.);
|
||||
pair<Unit3, bool> x = camera.projectSafe(expected);
|
||||
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
EXPECT(assert_equal(Unit3(0, 0, 1), x.first));
|
||||
EXPECT(x.second);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
static Unit3 project3(const Pose3& pose, const Point3& point) {
|
||||
return Camera(pose).project(point);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(SphericalCamera, Dproject) {
|
||||
Matrix Dpose, Dpoint;
|
||||
Unit3 result = camera.project(point1, Dpose, Dpoint);
|
||||
Matrix numerical_pose = numericalDerivative21(project3, pose, point1);
|
||||
Matrix numerical_point = numericalDerivative22(project3, pose, point1);
|
||||
EXPECT(assert_equal(bearing1, result));
|
||||
EXPECT(assert_equal(numerical_pose, Dpose, 1e-7));
|
||||
EXPECT(assert_equal(numerical_point, Dpoint, 1e-7));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
static Vector2 reprojectionError2(const Pose3& pose, const Point3& point,
|
||||
const Unit3& measured) {
|
||||
return Camera(pose).reprojectionError(point, measured);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(SphericalCamera, reprojectionError) {
|
||||
Matrix Dpose, Dpoint;
|
||||
Vector2 result = camera.reprojectionError(point1, bearing1, Dpose, Dpoint);
|
||||
Matrix numerical_pose =
|
||||
numericalDerivative31(reprojectionError2, pose, point1, bearing1);
|
||||
Matrix numerical_point =
|
||||
numericalDerivative32(reprojectionError2, pose, point1, bearing1);
|
||||
EXPECT(assert_equal(Vector2(0.0, 0.0), result));
|
||||
EXPECT(assert_equal(numerical_pose, Dpose, 1e-7));
|
||||
EXPECT(assert_equal(numerical_point, Dpoint, 1e-7));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(SphericalCamera, reprojectionError_noisy) {
|
||||
Matrix Dpose, Dpoint;
|
||||
Unit3 bearing_noisy = bearing1.retract(Vector2(0.01, 0.05));
|
||||
Vector2 result =
|
||||
camera.reprojectionError(point1, bearing_noisy, Dpose, Dpoint);
|
||||
Matrix numerical_pose =
|
||||
numericalDerivative31(reprojectionError2, pose, point1, bearing_noisy);
|
||||
Matrix numerical_point =
|
||||
numericalDerivative32(reprojectionError2, pose, point1, bearing_noisy);
|
||||
EXPECT(assert_equal(Vector2(-0.050282, 0.00833482), result, 1e-5));
|
||||
EXPECT(assert_equal(numerical_pose, Dpose, 1e-7));
|
||||
EXPECT(assert_equal(numerical_point, Dpoint, 1e-7));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Add a test with more arbitrary rotation
|
||||
TEST(SphericalCamera, Dproject2) {
|
||||
static const Pose3 pose1(Rot3::Ypr(0.1, -0.1, 0.4), Point3(0, 0, -10));
|
||||
static const Camera camera(pose1);
|
||||
Matrix Dpose, Dpoint;
|
||||
camera.project2(point1, Dpose, Dpoint);
|
||||
Matrix numerical_pose = numericalDerivative21(project3, pose1, point1);
|
||||
Matrix numerical_point = numericalDerivative22(project3, pose1, point1);
|
||||
CHECK(assert_equal(numerical_pose, Dpose, 1e-7));
|
||||
CHECK(assert_equal(numerical_point, Dpoint, 1e-7));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
|
|
@ -10,22 +10,23 @@
|
|||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* testTriangulation.cpp
|
||||
*
|
||||
* Created on: July 30th, 2013
|
||||
* Author: cbeall3
|
||||
* @file testTriangulation.cpp
|
||||
* @brief triangulation utilities
|
||||
* @date July 30th, 2013
|
||||
* @author Chris Beall (cbeall3)
|
||||
* @author Luca Carlone
|
||||
*/
|
||||
|
||||
#include <gtsam/geometry/triangulation.h>
|
||||
#include <gtsam/geometry/PinholeCamera.h>
|
||||
#include <gtsam/geometry/StereoCamera.h>
|
||||
#include <gtsam/geometry/CameraSet.h>
|
||||
#include <gtsam/geometry/Cal3Bundler.h>
|
||||
#include <gtsam/slam/StereoFactor.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/nonlinear/ExpressionFactor.h>
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
|
||||
#include <gtsam/geometry/Cal3Bundler.h>
|
||||
#include <gtsam/geometry/CameraSet.h>
|
||||
#include <gtsam/geometry/PinholeCamera.h>
|
||||
#include <gtsam/geometry/SphericalCamera.h>
|
||||
#include <gtsam/geometry/StereoCamera.h>
|
||||
#include <gtsam/geometry/triangulation.h>
|
||||
#include <gtsam/nonlinear/ExpressionFactor.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/slam/StereoFactor.h>
|
||||
|
||||
#include <boost/assign.hpp>
|
||||
#include <boost/assign/std/vector.hpp>
|
||||
|
|
@ -36,7 +37,7 @@ using namespace boost::assign;
|
|||
|
||||
// Some common constants
|
||||
|
||||
static const boost::shared_ptr<Cal3_S2> sharedCal = //
|
||||
static const boost::shared_ptr<Cal3_S2> sharedCal = //
|
||||
boost::make_shared<Cal3_S2>(1500, 1200, 0, 640, 480);
|
||||
|
||||
// Looking along X-axis, 1 meter above ground plane (x-y)
|
||||
|
|
@ -57,8 +58,7 @@ Point2 z2 = camera2.project(landmark);
|
|||
|
||||
//******************************************************************************
|
||||
// Simple test with a well-behaved two camera situation
|
||||
TEST( triangulation, twoPoses) {
|
||||
|
||||
TEST(triangulation, twoPoses) {
|
||||
vector<Pose3> poses;
|
||||
Point2Vector measurements;
|
||||
|
||||
|
|
@ -69,36 +69,36 @@ TEST( triangulation, twoPoses) {
|
|||
|
||||
// 1. Test simple DLT, perfect in no noise situation
|
||||
bool optimize = false;
|
||||
boost::optional<Point3> actual1 = //
|
||||
triangulatePoint3(poses, sharedCal, measurements, rank_tol, optimize);
|
||||
boost::optional<Point3> actual1 = //
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements, rank_tol, optimize);
|
||||
EXPECT(assert_equal(landmark, *actual1, 1e-7));
|
||||
|
||||
// 2. test with optimization on, same answer
|
||||
optimize = true;
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3(poses, sharedCal, measurements, rank_tol, optimize);
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements, rank_tol, optimize);
|
||||
EXPECT(assert_equal(landmark, *actual2, 1e-7));
|
||||
|
||||
// 3. Add some noise and try again: result should be ~ (4.995, 0.499167, 1.19814)
|
||||
// 3. Add some noise and try again: result should be ~ (4.995,
|
||||
// 0.499167, 1.19814)
|
||||
measurements.at(0) += Point2(0.1, 0.5);
|
||||
measurements.at(1) += Point2(-0.2, 0.3);
|
||||
optimize = false;
|
||||
boost::optional<Point3> actual3 = //
|
||||
triangulatePoint3(poses, sharedCal, measurements, rank_tol, optimize);
|
||||
boost::optional<Point3> actual3 = //
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements, rank_tol, optimize);
|
||||
EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual3, 1e-4));
|
||||
|
||||
// 4. Now with optimization on
|
||||
optimize = true;
|
||||
boost::optional<Point3> actual4 = //
|
||||
triangulatePoint3(poses, sharedCal, measurements, rank_tol, optimize);
|
||||
boost::optional<Point3> actual4 = //
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements, rank_tol, optimize);
|
||||
EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual4, 1e-4));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Similar, but now with Bundler calibration
|
||||
TEST( triangulation, twoPosesBundler) {
|
||||
|
||||
boost::shared_ptr<Cal3Bundler> bundlerCal = //
|
||||
TEST(triangulation, twoPosesBundler) {
|
||||
boost::shared_ptr<Cal3Bundler> bundlerCal = //
|
||||
boost::make_shared<Cal3Bundler>(1500, 0, 0, 640, 480);
|
||||
PinholeCamera<Cal3Bundler> camera1(pose1, *bundlerCal);
|
||||
PinholeCamera<Cal3Bundler> camera2(pose2, *bundlerCal);
|
||||
|
|
@ -116,37 +116,38 @@ TEST( triangulation, twoPosesBundler) {
|
|||
bool optimize = true;
|
||||
double rank_tol = 1e-9;
|
||||
|
||||
boost::optional<Point3> actual = //
|
||||
triangulatePoint3(poses, bundlerCal, measurements, rank_tol, optimize);
|
||||
boost::optional<Point3> actual = //
|
||||
triangulatePoint3<Cal3Bundler>(poses, bundlerCal, measurements, rank_tol, optimize);
|
||||
EXPECT(assert_equal(landmark, *actual, 1e-7));
|
||||
|
||||
// Add some noise and try again
|
||||
measurements.at(0) += Point2(0.1, 0.5);
|
||||
measurements.at(1) += Point2(-0.2, 0.3);
|
||||
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3(poses, bundlerCal, measurements, rank_tol, optimize);
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3<Cal3Bundler>(poses, bundlerCal, measurements, rank_tol, optimize);
|
||||
EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19847), *actual2, 1e-4));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST( triangulation, fourPoses) {
|
||||
TEST(triangulation, fourPoses) {
|
||||
vector<Pose3> poses;
|
||||
Point2Vector measurements;
|
||||
|
||||
poses += pose1, pose2;
|
||||
measurements += z1, z2;
|
||||
|
||||
boost::optional<Point3> actual = triangulatePoint3(poses, sharedCal,
|
||||
measurements);
|
||||
boost::optional<Point3> actual =
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements);
|
||||
EXPECT(assert_equal(landmark, *actual, 1e-2));
|
||||
|
||||
// 2. Add some noise and try again: result should be ~ (4.995, 0.499167, 1.19814)
|
||||
// 2. Add some noise and try again: result should be ~ (4.995,
|
||||
// 0.499167, 1.19814)
|
||||
measurements.at(0) += Point2(0.1, 0.5);
|
||||
measurements.at(1) += Point2(-0.2, 0.3);
|
||||
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3(poses, sharedCal, measurements);
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements);
|
||||
EXPECT(assert_equal(landmark, *actual2, 1e-2));
|
||||
|
||||
// 3. Add a slightly rotated third camera above, again with measurement noise
|
||||
|
|
@ -157,13 +158,13 @@ TEST( triangulation, fourPoses) {
|
|||
poses += pose3;
|
||||
measurements += z3 + Point2(0.1, -0.1);
|
||||
|
||||
boost::optional<Point3> triangulated_3cameras = //
|
||||
triangulatePoint3(poses, sharedCal, measurements);
|
||||
boost::optional<Point3> triangulated_3cameras = //
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements);
|
||||
EXPECT(assert_equal(landmark, *triangulated_3cameras, 1e-2));
|
||||
|
||||
// Again with nonlinear optimization
|
||||
boost::optional<Point3> triangulated_3cameras_opt = triangulatePoint3(poses,
|
||||
sharedCal, measurements, 1e-9, true);
|
||||
boost::optional<Point3> triangulated_3cameras_opt =
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements, 1e-9, true);
|
||||
EXPECT(assert_equal(landmark, *triangulated_3cameras_opt, 1e-2));
|
||||
|
||||
// 4. Test failure: Add a 4th camera facing the wrong way
|
||||
|
|
@ -176,13 +177,13 @@ TEST( triangulation, fourPoses) {
|
|||
poses += pose4;
|
||||
measurements += Point2(400, 400);
|
||||
|
||||
CHECK_EXCEPTION(triangulatePoint3(poses, sharedCal, measurements),
|
||||
TriangulationCheiralityException);
|
||||
CHECK_EXCEPTION(triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements),
|
||||
TriangulationCheiralityException);
|
||||
#endif
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST( triangulation, fourPoses_distinct_Ks) {
|
||||
TEST(triangulation, fourPoses_distinct_Ks) {
|
||||
Cal3_S2 K1(1500, 1200, 0, 640, 480);
|
||||
// create first camera. Looking along X-axis, 1 meter above ground plane (x-y)
|
||||
PinholeCamera<Cal3_S2> camera1(pose1, K1);
|
||||
|
|
@ -195,22 +196,23 @@ TEST( triangulation, fourPoses_distinct_Ks) {
|
|||
Point2 z1 = camera1.project(landmark);
|
||||
Point2 z2 = camera2.project(landmark);
|
||||
|
||||
CameraSet<PinholeCamera<Cal3_S2> > cameras;
|
||||
CameraSet<PinholeCamera<Cal3_S2>> cameras;
|
||||
Point2Vector measurements;
|
||||
|
||||
cameras += camera1, camera2;
|
||||
measurements += z1, z2;
|
||||
|
||||
boost::optional<Point3> actual = //
|
||||
triangulatePoint3(cameras, measurements);
|
||||
boost::optional<Point3> actual = //
|
||||
triangulatePoint3<Cal3_S2>(cameras, measurements);
|
||||
EXPECT(assert_equal(landmark, *actual, 1e-2));
|
||||
|
||||
// 2. Add some noise and try again: result should be ~ (4.995, 0.499167, 1.19814)
|
||||
// 2. Add some noise and try again: result should be ~ (4.995,
|
||||
// 0.499167, 1.19814)
|
||||
measurements.at(0) += Point2(0.1, 0.5);
|
||||
measurements.at(1) += Point2(-0.2, 0.3);
|
||||
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3(cameras, measurements);
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3<Cal3_S2>(cameras, measurements);
|
||||
EXPECT(assert_equal(landmark, *actual2, 1e-2));
|
||||
|
||||
// 3. Add a slightly rotated third camera above, again with measurement noise
|
||||
|
|
@ -222,13 +224,13 @@ TEST( triangulation, fourPoses_distinct_Ks) {
|
|||
cameras += camera3;
|
||||
measurements += z3 + Point2(0.1, -0.1);
|
||||
|
||||
boost::optional<Point3> triangulated_3cameras = //
|
||||
triangulatePoint3(cameras, measurements);
|
||||
boost::optional<Point3> triangulated_3cameras = //
|
||||
triangulatePoint3<Cal3_S2>(cameras, measurements);
|
||||
EXPECT(assert_equal(landmark, *triangulated_3cameras, 1e-2));
|
||||
|
||||
// Again with nonlinear optimization
|
||||
boost::optional<Point3> triangulated_3cameras_opt = triangulatePoint3(cameras,
|
||||
measurements, 1e-9, true);
|
||||
boost::optional<Point3> triangulated_3cameras_opt =
|
||||
triangulatePoint3<Cal3_S2>(cameras, measurements, 1e-9, true);
|
||||
EXPECT(assert_equal(landmark, *triangulated_3cameras_opt, 1e-2));
|
||||
|
||||
// 4. Test failure: Add a 4th camera facing the wrong way
|
||||
|
|
@ -241,13 +243,13 @@ TEST( triangulation, fourPoses_distinct_Ks) {
|
|||
|
||||
cameras += camera4;
|
||||
measurements += Point2(400, 400);
|
||||
CHECK_EXCEPTION(triangulatePoint3(cameras, measurements),
|
||||
TriangulationCheiralityException);
|
||||
CHECK_EXCEPTION(triangulatePoint3<Cal3_S2>(cameras, measurements),
|
||||
TriangulationCheiralityException);
|
||||
#endif
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST( triangulation, outliersAndFarLandmarks) {
|
||||
TEST(triangulation, outliersAndFarLandmarks) {
|
||||
Cal3_S2 K1(1500, 1200, 0, 640, 480);
|
||||
// create first camera. Looking along X-axis, 1 meter above ground plane (x-y)
|
||||
PinholeCamera<Cal3_S2> camera1(pose1, K1);
|
||||
|
|
@ -260,24 +262,29 @@ TEST( triangulation, outliersAndFarLandmarks) {
|
|||
Point2 z1 = camera1.project(landmark);
|
||||
Point2 z2 = camera2.project(landmark);
|
||||
|
||||
CameraSet<PinholeCamera<Cal3_S2> > cameras;
|
||||
CameraSet<PinholeCamera<Cal3_S2>> cameras;
|
||||
Point2Vector measurements;
|
||||
|
||||
cameras += camera1, camera2;
|
||||
measurements += z1, z2;
|
||||
|
||||
double landmarkDistanceThreshold = 10; // landmark is closer than that
|
||||
TriangulationParameters params(1.0, false, landmarkDistanceThreshold); // all default except landmarkDistanceThreshold
|
||||
TriangulationResult actual = triangulateSafe(cameras,measurements,params);
|
||||
double landmarkDistanceThreshold = 10; // landmark is closer than that
|
||||
TriangulationParameters params(
|
||||
1.0, false, landmarkDistanceThreshold); // all default except
|
||||
// landmarkDistanceThreshold
|
||||
TriangulationResult actual = triangulateSafe(cameras, measurements, params);
|
||||
EXPECT(assert_equal(landmark, *actual, 1e-2));
|
||||
EXPECT(actual.valid());
|
||||
|
||||
landmarkDistanceThreshold = 4; // landmark is farther than that
|
||||
TriangulationParameters params2(1.0, false, landmarkDistanceThreshold); // all default except landmarkDistanceThreshold
|
||||
actual = triangulateSafe(cameras,measurements,params2);
|
||||
landmarkDistanceThreshold = 4; // landmark is farther than that
|
||||
TriangulationParameters params2(
|
||||
1.0, false, landmarkDistanceThreshold); // all default except
|
||||
// landmarkDistanceThreshold
|
||||
actual = triangulateSafe(cameras, measurements, params2);
|
||||
EXPECT(actual.farPoint());
|
||||
|
||||
// 3. Add a slightly rotated third camera above with a wrong measurement (OUTLIER)
|
||||
// 3. Add a slightly rotated third camera above with a wrong measurement
|
||||
// (OUTLIER)
|
||||
Pose3 pose3 = pose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1));
|
||||
Cal3_S2 K3(700, 500, 0, 640, 480);
|
||||
PinholeCamera<Cal3_S2> camera3(pose3, K3);
|
||||
|
|
@ -286,21 +293,23 @@ TEST( triangulation, outliersAndFarLandmarks) {
|
|||
cameras += camera3;
|
||||
measurements += z3 + Point2(10, -10);
|
||||
|
||||
landmarkDistanceThreshold = 10; // landmark is closer than that
|
||||
double outlierThreshold = 100; // loose, the outlier is going to pass
|
||||
TriangulationParameters params3(1.0, false, landmarkDistanceThreshold,outlierThreshold);
|
||||
actual = triangulateSafe(cameras,measurements,params3);
|
||||
landmarkDistanceThreshold = 10; // landmark is closer than that
|
||||
double outlierThreshold = 100; // loose, the outlier is going to pass
|
||||
TriangulationParameters params3(1.0, false, landmarkDistanceThreshold,
|
||||
outlierThreshold);
|
||||
actual = triangulateSafe(cameras, measurements, params3);
|
||||
EXPECT(actual.valid());
|
||||
|
||||
// now set stricter threshold for outlier rejection
|
||||
outlierThreshold = 5; // tighter, the outlier is not going to pass
|
||||
TriangulationParameters params4(1.0, false, landmarkDistanceThreshold,outlierThreshold);
|
||||
actual = triangulateSafe(cameras,measurements,params4);
|
||||
outlierThreshold = 5; // tighter, the outlier is not going to pass
|
||||
TriangulationParameters params4(1.0, false, landmarkDistanceThreshold,
|
||||
outlierThreshold);
|
||||
actual = triangulateSafe(cameras, measurements, params4);
|
||||
EXPECT(actual.outlier());
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST( triangulation, twoIdenticalPoses) {
|
||||
TEST(triangulation, twoIdenticalPoses) {
|
||||
// create first camera. Looking along X-axis, 1 meter above ground plane (x-y)
|
||||
PinholeCamera<Cal3_S2> camera1(pose1, *sharedCal);
|
||||
|
||||
|
|
@ -313,12 +322,12 @@ TEST( triangulation, twoIdenticalPoses) {
|
|||
poses += pose1, pose1;
|
||||
measurements += z1, z1;
|
||||
|
||||
CHECK_EXCEPTION(triangulatePoint3(poses, sharedCal, measurements),
|
||||
TriangulationUnderconstrainedException);
|
||||
CHECK_EXCEPTION(triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements),
|
||||
TriangulationUnderconstrainedException);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST( triangulation, onePose) {
|
||||
TEST(triangulation, onePose) {
|
||||
// we expect this test to fail with a TriangulationUnderconstrainedException
|
||||
// because there's only one camera observation
|
||||
|
||||
|
|
@ -326,28 +335,26 @@ TEST( triangulation, onePose) {
|
|||
Point2Vector measurements;
|
||||
|
||||
poses += Pose3();
|
||||
measurements += Point2(0,0);
|
||||
measurements += Point2(0, 0);
|
||||
|
||||
CHECK_EXCEPTION(triangulatePoint3(poses, sharedCal, measurements),
|
||||
TriangulationUnderconstrainedException);
|
||||
CHECK_EXCEPTION(triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements),
|
||||
TriangulationUnderconstrainedException);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST( triangulation, StereotriangulateNonlinear ) {
|
||||
|
||||
auto stereoK = boost::make_shared<Cal3_S2Stereo>(1733.75, 1733.75, 0, 689.645, 508.835, 0.0699612);
|
||||
TEST(triangulation, StereotriangulateNonlinear) {
|
||||
auto stereoK = boost::make_shared<Cal3_S2Stereo>(1733.75, 1733.75, 0, 689.645,
|
||||
508.835, 0.0699612);
|
||||
|
||||
// two camera poses m1, m2
|
||||
Matrix4 m1, m2;
|
||||
m1 << 0.796888717, 0.603404026, -0.0295271487, 46.6673779,
|
||||
0.592783835, -0.77156583, 0.230856632, 66.2186159,
|
||||
0.116517574, -0.201470143, -0.9725393, -4.28382528,
|
||||
0, 0, 0, 1;
|
||||
m1 << 0.796888717, 0.603404026, -0.0295271487, 46.6673779, 0.592783835,
|
||||
-0.77156583, 0.230856632, 66.2186159, 0.116517574, -0.201470143,
|
||||
-0.9725393, -4.28382528, 0, 0, 0, 1;
|
||||
|
||||
m2 << -0.955959025, -0.29288915, -0.0189328569, 45.7169799,
|
||||
-0.29277519, 0.947083213, 0.131587097, 65.843136,
|
||||
-0.0206094928, 0.131334858, -0.991123524, -4.3525033,
|
||||
0, 0, 0, 1;
|
||||
m2 << -0.955959025, -0.29288915, -0.0189328569, 45.7169799, -0.29277519,
|
||||
0.947083213, 0.131587097, 65.843136, -0.0206094928, 0.131334858,
|
||||
-0.991123524, -4.3525033, 0, 0, 0, 1;
|
||||
|
||||
typedef CameraSet<StereoCamera> Cameras;
|
||||
Cameras cameras;
|
||||
|
|
@ -358,18 +365,19 @@ TEST( triangulation, StereotriangulateNonlinear ) {
|
|||
measurements += StereoPoint2(226.936, 175.212, 424.469);
|
||||
measurements += StereoPoint2(339.571, 285.547, 669.973);
|
||||
|
||||
Point3 initial = Point3(46.0536958, 66.4621179, -6.56285929); // error: 96.5715555191
|
||||
Point3 initial =
|
||||
Point3(46.0536958, 66.4621179, -6.56285929); // error: 96.5715555191
|
||||
|
||||
Point3 actual = triangulateNonlinear(cameras, measurements, initial);
|
||||
Point3 actual = triangulateNonlinear<StereoCamera>(cameras, measurements, initial);
|
||||
|
||||
Point3 expected(46.0484569, 66.4710686, -6.55046613); // error: 0.763510644187
|
||||
Point3 expected(46.0484569, 66.4710686,
|
||||
-6.55046613); // error: 0.763510644187
|
||||
|
||||
EXPECT(assert_equal(expected, actual, 1e-4));
|
||||
|
||||
|
||||
// regular stereo factor comparison - expect very similar result as above
|
||||
{
|
||||
typedef GenericStereoFactor<Pose3,Point3> StereoFactor;
|
||||
typedef GenericStereoFactor<Pose3, Point3> StereoFactor;
|
||||
|
||||
Values values;
|
||||
values.insert(Symbol('x', 1), Pose3(m1));
|
||||
|
|
@ -378,17 +386,19 @@ TEST( triangulation, StereotriangulateNonlinear ) {
|
|||
|
||||
NonlinearFactorGraph graph;
|
||||
static SharedNoiseModel unit(noiseModel::Unit::Create(3));
|
||||
graph.emplace_shared<StereoFactor>(measurements[0], unit, Symbol('x',1), Symbol('l',1), stereoK);
|
||||
graph.emplace_shared<StereoFactor>(measurements[1], unit, Symbol('x',2), Symbol('l',1), stereoK);
|
||||
graph.emplace_shared<StereoFactor>(measurements[0], unit, Symbol('x', 1),
|
||||
Symbol('l', 1), stereoK);
|
||||
graph.emplace_shared<StereoFactor>(measurements[1], unit, Symbol('x', 2),
|
||||
Symbol('l', 1), stereoK);
|
||||
|
||||
const SharedDiagonal posePrior = noiseModel::Isotropic::Sigma(6, 1e-9);
|
||||
graph.addPrior(Symbol('x',1), Pose3(m1), posePrior);
|
||||
graph.addPrior(Symbol('x',2), Pose3(m2), posePrior);
|
||||
graph.addPrior(Symbol('x', 1), Pose3(m1), posePrior);
|
||||
graph.addPrior(Symbol('x', 2), Pose3(m2), posePrior);
|
||||
|
||||
LevenbergMarquardtOptimizer optimizer(graph, values);
|
||||
Values result = optimizer.optimize();
|
||||
|
||||
EXPECT(assert_equal(expected, result.at<Point3>(Symbol('l',1)), 1e-4));
|
||||
EXPECT(assert_equal(expected, result.at<Point3>(Symbol('l', 1)), 1e-4));
|
||||
}
|
||||
|
||||
// use Triangulation Factor directly - expect same result as above
|
||||
|
|
@ -399,13 +409,15 @@ TEST( triangulation, StereotriangulateNonlinear ) {
|
|||
NonlinearFactorGraph graph;
|
||||
static SharedNoiseModel unit(noiseModel::Unit::Create(3));
|
||||
|
||||
graph.emplace_shared<TriangulationFactor<StereoCamera> >(cameras[0], measurements[0], unit, Symbol('l',1));
|
||||
graph.emplace_shared<TriangulationFactor<StereoCamera> >(cameras[1], measurements[1], unit, Symbol('l',1));
|
||||
graph.emplace_shared<TriangulationFactor<StereoCamera>>(
|
||||
cameras[0], measurements[0], unit, Symbol('l', 1));
|
||||
graph.emplace_shared<TriangulationFactor<StereoCamera>>(
|
||||
cameras[1], measurements[1], unit, Symbol('l', 1));
|
||||
|
||||
LevenbergMarquardtOptimizer optimizer(graph, values);
|
||||
Values result = optimizer.optimize();
|
||||
|
||||
EXPECT(assert_equal(expected, result.at<Point3>(Symbol('l',1)), 1e-4));
|
||||
EXPECT(assert_equal(expected, result.at<Point3>(Symbol('l', 1)), 1e-4));
|
||||
}
|
||||
|
||||
// use ExpressionFactor - expect same result as above
|
||||
|
|
@ -416,11 +428,13 @@ TEST( triangulation, StereotriangulateNonlinear ) {
|
|||
NonlinearFactorGraph graph;
|
||||
static SharedNoiseModel unit(noiseModel::Unit::Create(3));
|
||||
|
||||
Expression<Point3> point_(Symbol('l',1));
|
||||
Expression<Point3> point_(Symbol('l', 1));
|
||||
Expression<StereoCamera> camera0_(cameras[0]);
|
||||
Expression<StereoCamera> camera1_(cameras[1]);
|
||||
Expression<StereoPoint2> project0_(camera0_, &StereoCamera::project2, point_);
|
||||
Expression<StereoPoint2> project1_(camera1_, &StereoCamera::project2, point_);
|
||||
Expression<StereoPoint2> project0_(camera0_, &StereoCamera::project2,
|
||||
point_);
|
||||
Expression<StereoPoint2> project1_(camera1_, &StereoCamera::project2,
|
||||
point_);
|
||||
|
||||
graph.addExpressionFactor(unit, measurements[0], project0_);
|
||||
graph.addExpressionFactor(unit, measurements[1], project1_);
|
||||
|
|
@ -428,10 +442,172 @@ TEST( triangulation, StereotriangulateNonlinear ) {
|
|||
LevenbergMarquardtOptimizer optimizer(graph, values);
|
||||
Values result = optimizer.optimize();
|
||||
|
||||
EXPECT(assert_equal(expected, result.at<Point3>(Symbol('l',1)), 1e-4));
|
||||
EXPECT(assert_equal(expected, result.at<Point3>(Symbol('l', 1)), 1e-4));
|
||||
}
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Simple test with a well-behaved two camera situation
|
||||
TEST(triangulation, twoPoses_sphericalCamera) {
|
||||
vector<Pose3> poses;
|
||||
std::vector<Unit3> measurements;
|
||||
|
||||
// Project landmark into two cameras and triangulate
|
||||
SphericalCamera cam1(pose1);
|
||||
SphericalCamera cam2(pose2);
|
||||
Unit3 u1 = cam1.project(landmark);
|
||||
Unit3 u2 = cam2.project(landmark);
|
||||
|
||||
poses += pose1, pose2;
|
||||
measurements += u1, u2;
|
||||
|
||||
CameraSet<SphericalCamera> cameras;
|
||||
cameras.push_back(cam1);
|
||||
cameras.push_back(cam2);
|
||||
|
||||
double rank_tol = 1e-9;
|
||||
|
||||
// 1. Test linear triangulation via DLT
|
||||
auto projection_matrices = projectionMatricesFromCameras(cameras);
|
||||
Point3 point = triangulateDLT(projection_matrices, measurements, rank_tol);
|
||||
EXPECT(assert_equal(landmark, point, 1e-7));
|
||||
|
||||
// 2. Test nonlinear triangulation
|
||||
point = triangulateNonlinear<SphericalCamera>(cameras, measurements, point);
|
||||
EXPECT(assert_equal(landmark, point, 1e-7));
|
||||
|
||||
// 3. Test simple DLT, now within triangulatePoint3
|
||||
bool optimize = false;
|
||||
boost::optional<Point3> actual1 = //
|
||||
triangulatePoint3<SphericalCamera>(cameras, measurements, rank_tol,
|
||||
optimize);
|
||||
EXPECT(assert_equal(landmark, *actual1, 1e-7));
|
||||
|
||||
// 4. test with optimization on, same answer
|
||||
optimize = true;
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3<SphericalCamera>(cameras, measurements, rank_tol,
|
||||
optimize);
|
||||
EXPECT(assert_equal(landmark, *actual2, 1e-7));
|
||||
|
||||
// 5. Add some noise and try again: result should be ~ (4.995,
|
||||
// 0.499167, 1.19814)
|
||||
measurements.at(0) =
|
||||
u1.retract(Vector2(0.01, 0.05)); // note: perturbation smaller for Unit3
|
||||
measurements.at(1) = u2.retract(Vector2(-0.02, 0.03));
|
||||
optimize = false;
|
||||
boost::optional<Point3> actual3 = //
|
||||
triangulatePoint3<SphericalCamera>(cameras, measurements, rank_tol,
|
||||
optimize);
|
||||
EXPECT(assert_equal(Point3(5.9432, 0.654319, 1.48192), *actual3, 1e-3));
|
||||
|
||||
// 6. Now with optimization on
|
||||
optimize = true;
|
||||
boost::optional<Point3> actual4 = //
|
||||
triangulatePoint3<SphericalCamera>(cameras, measurements, rank_tol,
|
||||
optimize);
|
||||
EXPECT(assert_equal(Point3(5.9432, 0.654334, 1.48192), *actual4, 1e-3));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(triangulation, twoPoses_sphericalCamera_extremeFOV) {
|
||||
vector<Pose3> poses;
|
||||
std::vector<Unit3> measurements;
|
||||
|
||||
// Project landmark into two cameras and triangulate
|
||||
Pose3 poseA = Pose3(
|
||||
Rot3::Ypr(-M_PI / 2, 0., -M_PI / 2),
|
||||
Point3(0.0, 0.0, 0.0)); // with z pointing along x axis of global frame
|
||||
Pose3 poseB = Pose3(Rot3::Ypr(-M_PI / 2, 0., -M_PI / 2),
|
||||
Point3(2.0, 0.0, 0.0)); // 2m in front of poseA
|
||||
Point3 landmarkL(
|
||||
1.0, -1.0,
|
||||
0.0); // 1m to the right of both cameras, in front of poseA, behind poseB
|
||||
SphericalCamera cam1(poseA);
|
||||
SphericalCamera cam2(poseB);
|
||||
Unit3 u1 = cam1.project(landmarkL);
|
||||
Unit3 u2 = cam2.project(landmarkL);
|
||||
|
||||
EXPECT(assert_equal(Unit3(Point3(1.0, 0.0, 1.0)), u1,
|
||||
1e-7)); // in front and to the right of PoseA
|
||||
EXPECT(assert_equal(Unit3(Point3(1.0, 0.0, -1.0)), u2,
|
||||
1e-7)); // behind and to the right of PoseB
|
||||
|
||||
poses += pose1, pose2;
|
||||
measurements += u1, u2;
|
||||
|
||||
CameraSet<SphericalCamera> cameras;
|
||||
cameras.push_back(cam1);
|
||||
cameras.push_back(cam2);
|
||||
|
||||
double rank_tol = 1e-9;
|
||||
|
||||
{
|
||||
// 1. Test simple DLT, when 1 point is behind spherical camera
|
||||
bool optimize = false;
|
||||
#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION
|
||||
CHECK_EXCEPTION(triangulatePoint3<SphericalCamera>(cameras, measurements,
|
||||
rank_tol, optimize),
|
||||
TriangulationCheiralityException);
|
||||
#else // otherwise project should not throw the exception
|
||||
boost::optional<Point3> actual1 = //
|
||||
triangulatePoint3<SphericalCamera>(cameras, measurements, rank_tol,
|
||||
optimize);
|
||||
EXPECT(assert_equal(landmarkL, *actual1, 1e-7));
|
||||
#endif
|
||||
}
|
||||
{
|
||||
// 2. test with optimization on, same answer
|
||||
bool optimize = true;
|
||||
#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION
|
||||
CHECK_EXCEPTION(triangulatePoint3<SphericalCamera>(cameras, measurements,
|
||||
rank_tol, optimize),
|
||||
TriangulationCheiralityException);
|
||||
#else // otherwise project should not throw the exception
|
||||
boost::optional<Point3> actual1 = //
|
||||
triangulatePoint3<SphericalCamera>(cameras, measurements, rank_tol,
|
||||
optimize);
|
||||
EXPECT(assert_equal(landmarkL, *actual1, 1e-7));
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(triangulation, reprojectionError_cameraComparison) {
|
||||
Pose3 poseA = Pose3(
|
||||
Rot3::Ypr(-M_PI / 2, 0., -M_PI / 2),
|
||||
Point3(0.0, 0.0, 0.0)); // with z pointing along x axis of global frame
|
||||
Point3 landmarkL(5.0, 0.0, 0.0); // 1m in front of poseA
|
||||
SphericalCamera sphericalCamera(poseA);
|
||||
Unit3 u = sphericalCamera.project(landmarkL);
|
||||
|
||||
static Cal3_S2::shared_ptr sharedK(new Cal3_S2(60, 640, 480));
|
||||
PinholePose<Cal3_S2> pinholeCamera(poseA, sharedK);
|
||||
Vector2 px = pinholeCamera.project(landmarkL);
|
||||
|
||||
// add perturbation and compare error in both cameras
|
||||
Vector2 px_noise(1.0, 2.0); // px perturbation vertically and horizontally
|
||||
Vector2 measured_px = px + px_noise;
|
||||
Vector2 measured_px_calibrated = sharedK->calibrate(measured_px);
|
||||
Unit3 measured_u =
|
||||
Unit3(measured_px_calibrated[0], measured_px_calibrated[1], 1.0);
|
||||
Unit3 expected_measured_u =
|
||||
Unit3(px_noise[0] / sharedK->fx(), px_noise[1] / sharedK->fy(), 1.0);
|
||||
EXPECT(assert_equal(expected_measured_u, measured_u, 1e-7));
|
||||
|
||||
Vector2 actualErrorPinhole =
|
||||
pinholeCamera.reprojectionError(landmarkL, measured_px);
|
||||
Vector2 expectedErrorPinhole = Vector2(-px_noise[0], -px_noise[1]);
|
||||
EXPECT(assert_equal(expectedErrorPinhole, actualErrorPinhole,
|
||||
1e-7)); //- sign due to definition of error
|
||||
|
||||
Vector2 actualErrorSpherical =
|
||||
sphericalCamera.reprojectionError(landmarkL, measured_u);
|
||||
// expectedError: not easy to calculate, since it involves the unit3 basis
|
||||
Vector2 expectedErrorSpherical(-0.00360842, 0.00180419);
|
||||
EXPECT(assert_equal(expectedErrorSpherical, actualErrorSpherical, 1e-7));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
int main() {
|
||||
TestResult tr;
|
||||
|
|
|
|||
|
|
@ -53,15 +53,57 @@ Vector4 triangulateHomogeneousDLT(
|
|||
return v;
|
||||
}
|
||||
|
||||
Point3 triangulateDLT(const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>& projection_matrices,
|
||||
Vector4 triangulateHomogeneousDLT(
|
||||
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>& projection_matrices,
|
||||
const std::vector<Unit3>& measurements, double rank_tol) {
|
||||
|
||||
// number of cameras
|
||||
size_t m = projection_matrices.size();
|
||||
|
||||
// Allocate DLT matrix
|
||||
Matrix A = Matrix::Zero(m * 2, 4);
|
||||
|
||||
for (size_t i = 0; i < m; i++) {
|
||||
size_t row = i * 2;
|
||||
const Matrix34& projection = projection_matrices.at(i);
|
||||
const Point3& p = measurements.at(i).point3(); // to get access to x,y,z of the bearing vector
|
||||
|
||||
// build system of equations
|
||||
A.row(row) = p.x() * projection.row(2) - p.z() * projection.row(0);
|
||||
A.row(row + 1) = p.y() * projection.row(2) - p.z() * projection.row(1);
|
||||
}
|
||||
int rank;
|
||||
double error;
|
||||
Vector v;
|
||||
boost::tie(rank, error, v) = DLT(A, rank_tol);
|
||||
|
||||
if (rank < 3)
|
||||
throw(TriangulationUnderconstrainedException());
|
||||
|
||||
return v;
|
||||
}
|
||||
|
||||
Point3 triangulateDLT(
|
||||
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>& projection_matrices,
|
||||
const Point2Vector& measurements, double rank_tol) {
|
||||
|
||||
Vector4 v = triangulateHomogeneousDLT(projection_matrices, measurements, rank_tol);
|
||||
|
||||
Vector4 v = triangulateHomogeneousDLT(projection_matrices, measurements,
|
||||
rank_tol);
|
||||
// Create 3D point from homogeneous coordinates
|
||||
return Point3(v.head<3>() / v[3]);
|
||||
}
|
||||
|
||||
Point3 triangulateDLT(
|
||||
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>& projection_matrices,
|
||||
const std::vector<Unit3>& measurements, double rank_tol) {
|
||||
|
||||
// contrary to previous triangulateDLT, this is now taking Unit3 inputs
|
||||
Vector4 v = triangulateHomogeneousDLT(projection_matrices, measurements,
|
||||
rank_tol);
|
||||
// Create 3D point from homogeneous coordinates
|
||||
return Point3(v.head<3>() / v[3]);
|
||||
}
|
||||
|
||||
///
|
||||
/**
|
||||
* Optimize for triangulation
|
||||
|
|
@ -71,7 +113,7 @@ Point3 triangulateDLT(const std::vector<Matrix34, Eigen::aligned_allocator<Matri
|
|||
* @return refined Point3
|
||||
*/
|
||||
Point3 optimize(const NonlinearFactorGraph& graph, const Values& values,
|
||||
Key landmarkKey) {
|
||||
Key landmarkKey) {
|
||||
// Maybe we should consider Gauss-Newton?
|
||||
LevenbergMarquardtParams params;
|
||||
params.verbosityLM = LevenbergMarquardtParams::TRYLAMBDA;
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@
|
|||
#include <gtsam/geometry/Cal3_S2.h>
|
||||
#include <gtsam/geometry/CameraSet.h>
|
||||
#include <gtsam/geometry/PinholeCamera.h>
|
||||
#include <gtsam/geometry/SphericalCamera.h>
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
|
|
@ -59,6 +60,18 @@ GTSAM_EXPORT Vector4 triangulateHomogeneousDLT(
|
|||
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>& projection_matrices,
|
||||
const Point2Vector& measurements, double rank_tol = 1e-9);
|
||||
|
||||
/**
|
||||
* Same math as Hartley and Zisserman, 2nd Ed., page 312, but with unit-norm bearing vectors
|
||||
* (contrarily to pinhole projection, the z entry is not assumed to be 1 as in Hartley and Zisserman)
|
||||
* @param projection_matrices Projection matrices (K*P^-1)
|
||||
* @param measurements Unit3 bearing measurements
|
||||
* @param rank_tol SVD rank tolerance
|
||||
* @return Triangulated point, in homogeneous coordinates
|
||||
*/
|
||||
GTSAM_EXPORT Vector4 triangulateHomogeneousDLT(
|
||||
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>& projection_matrices,
|
||||
const std::vector<Unit3>& measurements, double rank_tol = 1e-9);
|
||||
|
||||
/**
|
||||
* DLT triangulation: See Hartley and Zisserman, 2nd Ed., page 312
|
||||
* @param projection_matrices Projection matrices (K*P^-1)
|
||||
|
|
@ -71,6 +84,14 @@ GTSAM_EXPORT Point3 triangulateDLT(
|
|||
const Point2Vector& measurements,
|
||||
double rank_tol = 1e-9);
|
||||
|
||||
/**
|
||||
* overload of previous function to work with Unit3 (projected to canonical camera)
|
||||
*/
|
||||
GTSAM_EXPORT Point3 triangulateDLT(
|
||||
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>& projection_matrices,
|
||||
const std::vector<Unit3>& measurements,
|
||||
double rank_tol = 1e-9);
|
||||
|
||||
/**
|
||||
* Create a factor graph with projection factors from poses and one calibration
|
||||
* @param poses Camera poses
|
||||
|
|
@ -180,26 +201,27 @@ Point3 triangulateNonlinear(
|
|||
return optimize(graph, values, Symbol('p', 0));
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a 3*4 camera projection matrix from calibration and pose.
|
||||
* Functor for partial application on calibration
|
||||
* @param pose The camera pose
|
||||
* @param cal The calibration
|
||||
* @return Returns a Matrix34
|
||||
*/
|
||||
template<class CAMERA>
|
||||
std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>
|
||||
projectionMatricesFromCameras(const CameraSet<CAMERA> &cameras) {
|
||||
std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>> projection_matrices;
|
||||
for (const CAMERA &camera: cameras) {
|
||||
projection_matrices.push_back(camera.cameraProjectionMatrix());
|
||||
}
|
||||
return projection_matrices;
|
||||
}
|
||||
|
||||
// overload, assuming pinholePose
|
||||
template<class CALIBRATION>
|
||||
struct CameraProjectionMatrix {
|
||||
CameraProjectionMatrix(const CALIBRATION& calibration) :
|
||||
K_(calibration.K()) {
|
||||
std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>> projectionMatricesFromPoses(
|
||||
const std::vector<Pose3> &poses, boost::shared_ptr<CALIBRATION> sharedCal) {
|
||||
std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>> projection_matrices;
|
||||
for (size_t i = 0; i < poses.size(); i++) {
|
||||
PinholePose<CALIBRATION> camera(poses.at(i), sharedCal);
|
||||
projection_matrices.push_back(camera.cameraProjectionMatrix());
|
||||
}
|
||||
Matrix34 operator()(const Pose3& pose) const {
|
||||
return K_ * (pose.inverse().matrix()).block<3, 4>(0, 0);
|
||||
}
|
||||
private:
|
||||
const Matrix3 K_;
|
||||
public:
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
};
|
||||
return projection_matrices;
|
||||
}
|
||||
|
||||
/**
|
||||
* Function to triangulate 3D landmark point from an arbitrary number
|
||||
|
|
@ -224,10 +246,7 @@ Point3 triangulatePoint3(const std::vector<Pose3>& poses,
|
|||
throw(TriangulationUnderconstrainedException());
|
||||
|
||||
// construct projection matrices from poses & calibration
|
||||
std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>> projection_matrices;
|
||||
CameraProjectionMatrix<CALIBRATION> createP(*sharedCal); // partially apply
|
||||
for(const Pose3& pose: poses)
|
||||
projection_matrices.push_back(createP(pose));
|
||||
auto projection_matrices = projectionMatricesFromPoses(poses, sharedCal);
|
||||
|
||||
// Triangulate linearly
|
||||
Point3 point = triangulateDLT(projection_matrices, measurements, rank_tol);
|
||||
|
|
@ -274,11 +293,7 @@ Point3 triangulatePoint3(
|
|||
throw(TriangulationUnderconstrainedException());
|
||||
|
||||
// construct projection matrices from poses & calibration
|
||||
std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>> projection_matrices;
|
||||
for(const CAMERA& camera: cameras)
|
||||
projection_matrices.push_back(
|
||||
CameraProjectionMatrix<typename CAMERA::CalibrationType>(camera.calibration())(
|
||||
camera.pose()));
|
||||
auto projection_matrices = projectionMatricesFromCameras(cameras);
|
||||
Point3 point = triangulateDLT(projection_matrices, measurements, rank_tol);
|
||||
|
||||
// The n refine using non-linear optimization
|
||||
|
|
@ -474,8 +489,8 @@ TriangulationResult triangulateSafe(const CameraSet<CAMERA>& cameras,
|
|||
#endif
|
||||
// Check reprojection error
|
||||
if (params.dynamicOutlierRejectionThreshold > 0) {
|
||||
const Point2& zi = measured.at(i);
|
||||
Point2 reprojectionError(camera.project(point) - zi);
|
||||
const typename CAMERA::Measurement& zi = measured.at(i);
|
||||
Point2 reprojectionError = camera.reprojectionError(point, zi);
|
||||
maxReprojError = std::max(maxReprojError, reprojectionError.norm());
|
||||
}
|
||||
i += 1;
|
||||
|
|
@ -503,6 +518,6 @@ using CameraSetCal3Bundler = CameraSet<PinholeCamera<Cal3Bundler>>;
|
|||
using CameraSetCal3_S2 = CameraSet<PinholeCamera<Cal3_S2>>;
|
||||
using CameraSetCal3Fisheye = CameraSet<PinholeCamera<Cal3Fisheye>>;
|
||||
using CameraSetCal3Unified = CameraSet<PinholeCamera<Cal3Unified>>;
|
||||
|
||||
using CameraSetSpherical = CameraSet<SphericalCamera>;
|
||||
} // \namespace gtsam
|
||||
|
||||
|
|
|
|||
|
|
@ -39,9 +39,6 @@ class KeyList {
|
|||
void remove(size_t key);
|
||||
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
// Actually a FastSet<Key>
|
||||
|
|
@ -67,9 +64,6 @@ class KeySet {
|
|||
bool count(size_t key) const; // returns true if value exists
|
||||
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
// Actually a vector<Key>
|
||||
|
|
@ -91,9 +85,6 @@ class KeyVector {
|
|||
void push_back(size_t key) const;
|
||||
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
// Actually a FastMap<Key,int>
|
||||
|
|
@ -165,6 +156,7 @@ gtsam::Values allPose2s(gtsam::Values& values);
|
|||
Matrix extractPose2(const gtsam::Values& values);
|
||||
gtsam::Values allPose3s(gtsam::Values& values);
|
||||
Matrix extractPose3(const gtsam::Values& values);
|
||||
Matrix extractVectors(const gtsam::Values& values, char c);
|
||||
void perturbPoint2(gtsam::Values& values, double sigma, int seed = 42u);
|
||||
void perturbPose2(gtsam::Values& values, double sigmaT, double sigmaR,
|
||||
int seed = 42u);
|
||||
|
|
|
|||
|
|
@ -35,21 +35,39 @@ void BayesNet<CONDITIONAL>::print(
|
|||
|
||||
/* ************************************************************************* */
|
||||
template <class CONDITIONAL>
|
||||
void BayesNet<CONDITIONAL>::saveGraph(const std::string& s,
|
||||
const KeyFormatter& keyFormatter) const {
|
||||
std::ofstream of(s.c_str());
|
||||
of << "digraph G{\n";
|
||||
void BayesNet<CONDITIONAL>::dot(std::ostream& os,
|
||||
const KeyFormatter& keyFormatter) const {
|
||||
os << "digraph G{\n";
|
||||
|
||||
for (auto conditional : boost::adaptors::reverse(*this)) {
|
||||
typename CONDITIONAL::Frontals frontals = conditional->frontals();
|
||||
Key me = frontals.front();
|
||||
typename CONDITIONAL::Parents parents = conditional->parents();
|
||||
for (Key p : parents)
|
||||
of << keyFormatter(p) << "->" << keyFormatter(me) << std::endl;
|
||||
for (auto conditional : *this) {
|
||||
auto frontals = conditional->frontals();
|
||||
const Key me = frontals.front();
|
||||
auto parents = conditional->parents();
|
||||
for (const Key& p : parents)
|
||||
os << keyFormatter(p) << "->" << keyFormatter(me) << "\n";
|
||||
}
|
||||
|
||||
of << "}";
|
||||
os << "}";
|
||||
std::flush(os);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template <class CONDITIONAL>
|
||||
std::string BayesNet<CONDITIONAL>::dot(const KeyFormatter& keyFormatter) const {
|
||||
std::stringstream ss;
|
||||
dot(ss, keyFormatter);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template <class CONDITIONAL>
|
||||
void BayesNet<CONDITIONAL>::saveGraph(const std::string& filename,
|
||||
const KeyFormatter& keyFormatter) const {
|
||||
std::ofstream of(filename.c_str());
|
||||
dot(of, keyFormatter);
|
||||
of.close();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
|||
|
|
@ -64,11 +64,21 @@ namespace gtsam {
|
|||
|
||||
/// @}
|
||||
|
||||
/// @name Standard Interface
|
||||
/// @name Graph Display
|
||||
/// @{
|
||||
|
||||
void saveGraph(const std::string& s,
|
||||
/// Output to graphviz format, stream version.
|
||||
void dot(std::ostream& os, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
||||
|
||||
/// Output to graphviz format string.
|
||||
std::string dot(
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
||||
|
||||
/// output to file with graphviz format.
|
||||
void saveGraph(const std::string& filename,
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
||||
|
||||
/// @}
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -63,20 +63,40 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class CLIQUE>
|
||||
void BayesTree<CLIQUE>::saveGraph(const std::string &s, const KeyFormatter& keyFormatter) const {
|
||||
if (roots_.empty()) throw std::invalid_argument("the root of Bayes tree has not been initialized!");
|
||||
std::ofstream of(s.c_str());
|
||||
of<< "digraph G{\n";
|
||||
for(const sharedClique& root: roots_)
|
||||
saveGraph(of, root, keyFormatter);
|
||||
of<<"}";
|
||||
template <class CLIQUE>
|
||||
void BayesTree<CLIQUE>::dot(std::ostream& os,
|
||||
const KeyFormatter& keyFormatter) const {
|
||||
if (roots_.empty())
|
||||
throw std::invalid_argument(
|
||||
"the root of Bayes tree has not been initialized!");
|
||||
os << "digraph G{\n";
|
||||
for (const sharedClique& root : roots_) dot(os, root, keyFormatter);
|
||||
os << "}";
|
||||
std::flush(os);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template <class CLIQUE>
|
||||
std::string BayesTree<CLIQUE>::dot(const KeyFormatter& keyFormatter) const {
|
||||
std::stringstream ss;
|
||||
dot(ss, keyFormatter);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template <class CLIQUE>
|
||||
void BayesTree<CLIQUE>::saveGraph(const std::string& filename,
|
||||
const KeyFormatter& keyFormatter) const {
|
||||
std::ofstream of(filename.c_str());
|
||||
dot(of, keyFormatter);
|
||||
of.close();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class CLIQUE>
|
||||
void BayesTree<CLIQUE>::saveGraph(std::ostream &s, sharedClique clique, const KeyFormatter& indexFormatter, int parentnum) const {
|
||||
template <class CLIQUE>
|
||||
void BayesTree<CLIQUE>::dot(std::ostream& s, sharedClique clique,
|
||||
const KeyFormatter& indexFormatter,
|
||||
int parentnum) const {
|
||||
static int num = 0;
|
||||
bool first = true;
|
||||
std::stringstream out;
|
||||
|
|
@ -107,7 +127,7 @@ namespace gtsam {
|
|||
|
||||
for (sharedClique c : clique->children) {
|
||||
num++;
|
||||
saveGraph(s, c, indexFormatter, parentnum);
|
||||
dot(s, c, indexFormatter, parentnum);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -182,13 +182,20 @@ namespace gtsam {
|
|||
*/
|
||||
sharedBayesNet jointBayesNet(Key j1, Key j2, const Eliminate& function = EliminationTraitsType::DefaultEliminate) const;
|
||||
|
||||
/**
|
||||
* Read only with side effects
|
||||
*/
|
||||
/// @name Graph Display
|
||||
/// @{
|
||||
|
||||
/** saves the Tree to a text file in GraphViz format */
|
||||
void saveGraph(const std::string& s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
||||
/// Output to graphviz format, stream version.
|
||||
void dot(std::ostream& os, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
||||
|
||||
/// Output to graphviz format string.
|
||||
std::string dot(
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
||||
|
||||
/// output to file with graphviz format.
|
||||
void saveGraph(const std::string& filename,
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
||||
|
||||
/// @}
|
||||
/// @name Advanced Interface
|
||||
/// @{
|
||||
|
|
@ -236,8 +243,8 @@ namespace gtsam {
|
|||
protected:
|
||||
|
||||
/** private helper method for saving the Tree to a text file in GraphViz format */
|
||||
void saveGraph(std::ostream &s, sharedClique clique, const KeyFormatter& keyFormatter,
|
||||
int parentnum = 0) const;
|
||||
void dot(std::ostream &s, sharedClique clique, const KeyFormatter& keyFormatter,
|
||||
int parentnum = 0) const;
|
||||
|
||||
/** Gather data on a single clique */
|
||||
void getCliqueData(sharedClique clique, BayesTreeCliqueData* stats) const;
|
||||
|
|
@ -249,7 +256,7 @@ namespace gtsam {
|
|||
void fillNodesIndex(const sharedClique& subtree);
|
||||
|
||||
// Friend JunctionTree because it directly fills roots and nodes index.
|
||||
template<class BAYESRTEE, class GRAPH> friend class EliminatableClusterTree;
|
||||
template<class BAYESTREE, class GRAPH> friend class EliminatableClusterTree;
|
||||
|
||||
private:
|
||||
/** Serialization function */
|
||||
|
|
|
|||
|
|
@ -0,0 +1,93 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010-2021, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file DotWriter.cpp
|
||||
* @brief Graphviz formatting for factor graphs.
|
||||
* @author Frank Dellaert
|
||||
* @date December, 2021
|
||||
*/
|
||||
|
||||
#include <gtsam/base/Vector.h>
|
||||
#include <gtsam/inference/DotWriter.h>
|
||||
|
||||
#include <ostream>
|
||||
|
||||
using namespace std;
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
void DotWriter::writePreamble(ostream* os) const {
|
||||
*os << "graph {\n";
|
||||
*os << " size=\"" << figureWidthInches << "," << figureHeightInches
|
||||
<< "\";\n\n";
|
||||
}
|
||||
|
||||
void DotWriter::DrawVariable(Key key, const KeyFormatter& keyFormatter,
|
||||
const boost::optional<Vector2>& position,
|
||||
ostream* os) {
|
||||
// Label the node with the label from the KeyFormatter
|
||||
*os << " var" << key << "[label=\"" << keyFormatter(key) << "\"";
|
||||
if (position) {
|
||||
*os << ", pos=\"" << position->x() << "," << position->y() << "!\"";
|
||||
}
|
||||
*os << "];\n";
|
||||
}
|
||||
|
||||
void DotWriter::DrawFactor(size_t i, const boost::optional<Vector2>& position,
|
||||
ostream* os) {
|
||||
*os << " factor" << i << "[label=\"\", shape=point";
|
||||
if (position) {
|
||||
*os << ", pos=\"" << position->x() << "," << position->y() << "!\"";
|
||||
}
|
||||
*os << "];\n";
|
||||
}
|
||||
|
||||
void DotWriter::ConnectVariables(Key key1, Key key2, ostream* os) {
|
||||
*os << " var" << key1 << "--"
|
||||
<< "var" << key2 << ";\n";
|
||||
}
|
||||
|
||||
void DotWriter::ConnectVariableFactor(Key key, size_t i, ostream* os) {
|
||||
*os << " var" << key << "--"
|
||||
<< "factor" << i << ";\n";
|
||||
}
|
||||
|
||||
void DotWriter::processFactor(size_t i, const KeyVector& keys,
|
||||
const boost::optional<Vector2>& position,
|
||||
ostream* os) const {
|
||||
if (plotFactorPoints) {
|
||||
if (binaryEdges && keys.size() == 2) {
|
||||
ConnectVariables(keys[0], keys[1], os);
|
||||
} else {
|
||||
// Create dot for the factor.
|
||||
DrawFactor(i, position, os);
|
||||
|
||||
// Make factor-variable connections
|
||||
if (connectKeysToFactor) {
|
||||
for (Key key : keys) {
|
||||
ConnectVariableFactor(key, i, os);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// just connect variables in a clique
|
||||
for (Key key1 : keys) {
|
||||
for (Key key2 : keys) {
|
||||
if (key2 > key1) {
|
||||
ConnectVariables(key1, key2, os);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
@ -0,0 +1,72 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010-2021, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file DotWriter.h
|
||||
* @brief Graphviz formatter
|
||||
* @author Frank Dellaert
|
||||
* @date December, 2021
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/FastVector.h>
|
||||
#include <gtsam/base/Vector.h>
|
||||
#include <gtsam/inference/Key.h>
|
||||
|
||||
#include <iosfwd>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/// Graphviz formatter.
|
||||
struct GTSAM_EXPORT DotWriter {
|
||||
double figureWidthInches; ///< The figure width on paper in inches
|
||||
double figureHeightInches; ///< The figure height on paper in inches
|
||||
bool plotFactorPoints; ///< Plots each factor as a dot between the variables
|
||||
bool connectKeysToFactor; ///< Draw a line from each key within a factor to
|
||||
///< the dot of the factor
|
||||
bool binaryEdges; ///< just use non-dotted edges for binary factors
|
||||
|
||||
explicit DotWriter(double figureWidthInches = 5,
|
||||
double figureHeightInches = 5,
|
||||
bool plotFactorPoints = true,
|
||||
bool connectKeysToFactor = true, bool binaryEdges = true)
|
||||
: figureWidthInches(figureWidthInches),
|
||||
figureHeightInches(figureHeightInches),
|
||||
plotFactorPoints(plotFactorPoints),
|
||||
connectKeysToFactor(connectKeysToFactor),
|
||||
binaryEdges(binaryEdges) {}
|
||||
|
||||
/// Write out preamble, including size.
|
||||
void writePreamble(std::ostream* os) const;
|
||||
|
||||
/// Create a variable dot fragment.
|
||||
static void DrawVariable(Key key, const KeyFormatter& keyFormatter,
|
||||
const boost::optional<Vector2>& position,
|
||||
std::ostream* os);
|
||||
|
||||
/// Create factor dot.
|
||||
static void DrawFactor(size_t i, const boost::optional<Vector2>& position,
|
||||
std::ostream* os);
|
||||
|
||||
/// Connect two variables.
|
||||
static void ConnectVariables(Key key1, Key key2, std::ostream* os);
|
||||
|
||||
/// Connect variable and factor.
|
||||
static void ConnectVariableFactor(Key key, size_t i, std::ostream* os);
|
||||
|
||||
/// Draw a single factor, specified by its index i and its variable keys.
|
||||
void processFactor(size_t i, const KeyVector& keys,
|
||||
const boost::optional<Vector2>& position,
|
||||
std::ostream* os) const;
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
@ -26,6 +26,7 @@
|
|||
#include <stdio.h>
|
||||
#include <algorithm>
|
||||
#include <iostream> // for cout :-(
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
|
||||
|
|
@ -125,4 +126,49 @@ FactorIndices FactorGraph<FACTOR>::add_factors(const CONTAINER& factors,
|
|||
return newFactorIndices;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template <class FACTOR>
|
||||
void FactorGraph<FACTOR>::dot(std::ostream& os,
|
||||
const KeyFormatter& keyFormatter,
|
||||
const DotWriter& writer) const {
|
||||
writer.writePreamble(&os);
|
||||
|
||||
// Create nodes for each variable in the graph
|
||||
for (Key key : keys()) {
|
||||
writer.DrawVariable(key, keyFormatter, boost::none, &os);
|
||||
}
|
||||
os << "\n";
|
||||
|
||||
// Create factors and variable connections
|
||||
for (size_t i = 0; i < size(); ++i) {
|
||||
const auto& factor = at(i);
|
||||
if (factor) {
|
||||
const KeyVector& factorKeys = factor->keys();
|
||||
writer.processFactor(i, factorKeys, boost::none, &os);
|
||||
}
|
||||
}
|
||||
|
||||
os << "}\n";
|
||||
std::flush(os);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template <class FACTOR>
|
||||
std::string FactorGraph<FACTOR>::dot(const KeyFormatter& keyFormatter,
|
||||
const DotWriter& writer) const {
|
||||
std::stringstream ss;
|
||||
dot(ss, keyFormatter, writer);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template <class FACTOR>
|
||||
void FactorGraph<FACTOR>::saveGraph(const std::string& filename,
|
||||
const KeyFormatter& keyFormatter,
|
||||
const DotWriter& writer) const {
|
||||
std::ofstream of(filename.c_str());
|
||||
dot(of, keyFormatter, writer);
|
||||
of.close();
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
|||
|
|
@ -22,9 +22,10 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/inference/DotWriter.h>
|
||||
#include <gtsam/inference/Key.h>
|
||||
#include <gtsam/base/FastVector.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/inference/Key.h>
|
||||
|
||||
#include <Eigen/Core> // for Eigen::aligned_allocator
|
||||
|
||||
|
|
@ -36,6 +37,7 @@
|
|||
#include <string>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
#include <iosfwd>
|
||||
|
||||
namespace gtsam {
|
||||
/// Define collection type:
|
||||
|
|
@ -371,6 +373,24 @@ class FactorGraph {
|
|||
return factors_.erase(first, last);
|
||||
}
|
||||
|
||||
/// @}
|
||||
/// @name Graph Display
|
||||
/// @{
|
||||
|
||||
/// Output to graphviz format, stream version.
|
||||
void dot(std::ostream& os,
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter,
|
||||
const DotWriter& writer = DotWriter()) const;
|
||||
|
||||
/// Output to graphviz format string.
|
||||
std::string dot(const KeyFormatter& keyFormatter = DefaultKeyFormatter,
|
||||
const DotWriter& writer = DotWriter()) const;
|
||||
|
||||
/// output to file with graphviz format.
|
||||
void saveGraph(const std::string& filename,
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter,
|
||||
const DotWriter& writer = DotWriter()) const;
|
||||
|
||||
/// @}
|
||||
/// @name Advanced Interface
|
||||
/// @{
|
||||
|
|
|
|||
|
|
@ -110,7 +110,6 @@ double dot(const Errors& a, const Errors& b) {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<>
|
||||
void axpy(double alpha, const Errors& x, Errors& y) {
|
||||
Errors::const_iterator it = x.begin();
|
||||
for(Vector& yi: y)
|
||||
|
|
|
|||
|
|
@ -65,7 +65,6 @@ namespace gtsam {
|
|||
/**
|
||||
* BLAS level 2 style
|
||||
*/
|
||||
template <>
|
||||
GTSAM_EXPORT void axpy(double alpha, const Errors& x, Errors& y);
|
||||
|
||||
/** print with optional string */
|
||||
|
|
|
|||
|
|
@ -379,7 +379,7 @@ namespace gtsam {
|
|||
|
||||
gttic(Compute_minimizing_step_size);
|
||||
// Compute minimizing step size
|
||||
double step = -gradientSqNorm / dot(Rg, Rg);
|
||||
double step = -gradientSqNorm / gtsam::dot(Rg, Rg);
|
||||
gttoc(Compute_minimizing_step_size);
|
||||
|
||||
gttic(Compute_point);
|
||||
|
|
|
|||
|
|
@ -34,9 +34,6 @@ virtual class Gaussian : gtsam::noiseModel::Base {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serializable() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
virtual class Diagonal : gtsam::noiseModel::Gaussian {
|
||||
|
|
@ -52,9 +49,6 @@ virtual class Diagonal : gtsam::noiseModel::Gaussian {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serializable() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
virtual class Constrained : gtsam::noiseModel::Diagonal {
|
||||
|
|
@ -72,9 +66,6 @@ virtual class Constrained : gtsam::noiseModel::Diagonal {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serializable() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
virtual class Isotropic : gtsam::noiseModel::Diagonal {
|
||||
|
|
@ -87,9 +78,6 @@ virtual class Isotropic : gtsam::noiseModel::Diagonal {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serializable() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
virtual class Unit : gtsam::noiseModel::Isotropic {
|
||||
|
|
@ -97,9 +85,6 @@ virtual class Unit : gtsam::noiseModel::Isotropic {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serializable() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
namespace mEstimator {
|
||||
|
|
@ -270,9 +255,6 @@ class VectorValues {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
#include <gtsam/linear/GaussianFactor.h>
|
||||
|
|
@ -344,9 +326,6 @@ virtual class JacobianFactor : gtsam::GaussianFactor {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
#include <gtsam/linear/HessianFactor.h>
|
||||
|
|
@ -379,9 +358,6 @@ virtual class HessianFactor : gtsam::GaussianFactor {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||
|
|
@ -463,9 +439,6 @@ class GaussianFactorGraph {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
#include <gtsam/linear/GaussianConditional.h>
|
||||
|
|
|
|||
|
|
@ -0,0 +1,55 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file BarometricFactor.cpp
|
||||
* @author Peter Milani
|
||||
* @brief Implementation file for Barometric factor
|
||||
* @date December 16, 2021
|
||||
**/
|
||||
|
||||
#include "BarometricFactor.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
//***************************************************************************
|
||||
void BarometricFactor::print(const string& s,
|
||||
const KeyFormatter& keyFormatter) const {
|
||||
cout << (s.empty() ? "" : s + " ") << "Barometric Factor on "
|
||||
<< keyFormatter(key1()) << "Barometric Bias on "
|
||||
<< keyFormatter(key2()) << "\n";
|
||||
|
||||
cout << " Baro measurement: " << nT_ << "\n";
|
||||
noiseModel_->print(" noise model: ");
|
||||
}
|
||||
|
||||
//***************************************************************************
|
||||
bool BarometricFactor::equals(const NonlinearFactor& expected,
|
||||
double tol) const {
|
||||
const This* e = dynamic_cast<const This*>(&expected);
|
||||
return e != nullptr && Base::equals(*e, tol) &&
|
||||
traits<double>::Equals(nT_, e->nT_, tol);
|
||||
}
|
||||
|
||||
//***************************************************************************
|
||||
Vector BarometricFactor::evaluateError(const Pose3& p, const double& bias,
|
||||
boost::optional<Matrix&> H,
|
||||
boost::optional<Matrix&> H2) const {
|
||||
Matrix tH;
|
||||
Vector ret = (Vector(1) << (p.translation(tH).z() + bias - nT_)).finished();
|
||||
if (H) (*H) = tH.block<1, 6>(2, 0);
|
||||
if (H2) (*H2) = (Matrix(1, 1) << 1.0).finished();
|
||||
return ret;
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
@ -0,0 +1,109 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file BarometricFactor.h
|
||||
* @author Peter Milani
|
||||
* @brief Header file for Barometric factor
|
||||
* @date December 16, 2021
|
||||
**/
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <gtsam/navigation/NavState.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactor.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* Prior on height in a cartesian frame.
|
||||
* Receive barometric pressure in kilopascals
|
||||
* Model with a slowly moving bias to capture differences
|
||||
* between the height and the standard atmosphere
|
||||
* https://www.grc.nasa.gov/www/k-12/airplane/atmosmet.html
|
||||
* @addtogroup Navigation
|
||||
*/
|
||||
class GTSAM_EXPORT BarometricFactor : public NoiseModelFactor2<Pose3, double> {
|
||||
private:
|
||||
typedef NoiseModelFactor2<Pose3, double> Base;
|
||||
|
||||
double nT_; ///< Height Measurement based on a standard atmosphere
|
||||
|
||||
public:
|
||||
/// shorthand for a smart pointer to a factor
|
||||
typedef boost::shared_ptr<BarometricFactor> shared_ptr;
|
||||
|
||||
/// Typedef to this class
|
||||
typedef BarometricFactor This;
|
||||
|
||||
/** default constructor - only use for serialization */
|
||||
BarometricFactor() : nT_(0) {}
|
||||
|
||||
~BarometricFactor() override {}
|
||||
|
||||
/**
|
||||
* @brief Constructor from a measurement of pressure in KPa.
|
||||
* @param key of the Pose3 variable that will be constrained
|
||||
* @param key of the barometric bias that will be constrained
|
||||
* @param baroIn measurement in KPa
|
||||
* @param model Gaussian noise model 1 dimension
|
||||
*/
|
||||
BarometricFactor(Key key, Key baroKey, const double& baroIn,
|
||||
const SharedNoiseModel& model)
|
||||
: Base(model, key, baroKey), nT_(heightOut(baroIn)) {}
|
||||
|
||||
/// @return a deep copy of this factor
|
||||
gtsam::NonlinearFactor::shared_ptr clone() const override {
|
||||
return boost::static_pointer_cast<gtsam::NonlinearFactor>(
|
||||
gtsam::NonlinearFactor::shared_ptr(new This(*this)));
|
||||
}
|
||||
|
||||
/// print
|
||||
void print(
|
||||
const std::string& s = "",
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override;
|
||||
|
||||
/// equals
|
||||
bool equals(const NonlinearFactor& expected,
|
||||
double tol = 1e-9) const override;
|
||||
|
||||
/// vector of errors
|
||||
Vector evaluateError(
|
||||
const Pose3& p, const double& b,
|
||||
boost::optional<Matrix&> H = boost::none,
|
||||
boost::optional<Matrix&> H2 = boost::none) const override;
|
||||
|
||||
inline const double& measurementIn() const { return nT_; }
|
||||
|
||||
inline double heightOut(double n) const {
|
||||
// From https://www.grc.nasa.gov/www/k-12/airplane/atmosmet.html
|
||||
return (std::pow(n / 101.29, 1. / 5.256) * 288.08 - 273.1 - 15.04) /
|
||||
-0.00649;
|
||||
};
|
||||
|
||||
inline double baroOut(const double& meters) {
|
||||
double temp = 15.04 - 0.00649 * meters;
|
||||
return 101.29 * std::pow(((temp + 273.1) / 288.08), 5.256);
|
||||
};
|
||||
|
||||
private:
|
||||
/// Serialization function
|
||||
friend class boost::serialization::access;
|
||||
template <class ARCHIVE>
|
||||
void serialize(ARCHIVE& ar, const unsigned int /*version*/) {
|
||||
ar& boost::serialization::make_nvp(
|
||||
"NoiseModelFactor1",
|
||||
boost::serialization::base_object<Base>(*this));
|
||||
ar& BOOST_SERIALIZATION_NVP(nT_);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
@ -44,9 +44,6 @@ class ConstantBias {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
}///\namespace imuBias
|
||||
|
|
@ -73,9 +70,6 @@ class NavState {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
#include <gtsam/navigation/PreintegratedRotation.h>
|
||||
|
|
@ -121,9 +115,6 @@ virtual class PreintegrationParams : gtsam::PreintegratedRotationParams {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
#include <gtsam/navigation/ImuFactor.h>
|
||||
|
|
@ -156,9 +147,6 @@ class PreintegratedImuMeasurements {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
virtual class ImuFactor: gtsam::NonlinearFactor {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,129 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testBarometricFactor.cpp
|
||||
* @brief Unit test for BarometricFactor
|
||||
* @author Peter Milani
|
||||
* @date 16 Dec, 2021
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/base/numericalDerivative.h>
|
||||
#include <gtsam/navigation/BarometricFactor.h>
|
||||
|
||||
#include <boost/bind/bind.hpp>
|
||||
|
||||
using namespace std::placeholders;
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
// *************************************************************************
|
||||
namespace example {}
|
||||
|
||||
double metersToBaro(const double& meters) {
|
||||
double temp = 15.04 - 0.00649 * meters;
|
||||
return 101.29 * std::pow(((temp + 273.1) / 288.08), 5.256);
|
||||
}
|
||||
|
||||
// *************************************************************************
|
||||
TEST(BarometricFactor, Constructor) {
|
||||
using namespace example;
|
||||
|
||||
// meters to barometric.
|
||||
|
||||
double baroMeasurement = metersToBaro(10.);
|
||||
|
||||
// Factor
|
||||
Key key(1);
|
||||
Key key2(2);
|
||||
SharedNoiseModel model = noiseModel::Isotropic::Sigma(1, 0.25);
|
||||
BarometricFactor factor(key, key2, baroMeasurement, model);
|
||||
|
||||
// Create a linearization point at zero error
|
||||
Pose3 T(Rot3::RzRyRx(0., 0., 0.), Point3(0., 0., 10.));
|
||||
double baroBias = 0.;
|
||||
Vector1 zero;
|
||||
zero << 0.;
|
||||
EXPECT(assert_equal(zero, factor.evaluateError(T, baroBias), 1e-5));
|
||||
|
||||
// Calculate numerical derivatives
|
||||
Matrix expectedH = numericalDerivative21<Vector, Pose3, double>(
|
||||
std::bind(&BarometricFactor::evaluateError, &factor,
|
||||
std::placeholders::_1, std::placeholders::_2, boost::none,
|
||||
boost::none),
|
||||
T, baroBias);
|
||||
|
||||
Matrix expectedH2 = numericalDerivative22<Vector, Pose3, double>(
|
||||
std::bind(&BarometricFactor::evaluateError, &factor,
|
||||
std::placeholders::_1, std::placeholders::_2, boost::none,
|
||||
boost::none),
|
||||
T, baroBias);
|
||||
|
||||
// Use the factor to calculate the derivative
|
||||
Matrix actualH, actualH2;
|
||||
factor.evaluateError(T, baroBias, actualH, actualH2);
|
||||
|
||||
// Verify we get the expected error
|
||||
EXPECT(assert_equal(expectedH, actualH, 1e-8));
|
||||
EXPECT(assert_equal(expectedH2, actualH2, 1e-8));
|
||||
}
|
||||
|
||||
// *************************************************************************
|
||||
|
||||
//***************************************************************************
|
||||
TEST(BarometricFactor, nonZero) {
|
||||
using namespace example;
|
||||
|
||||
// meters to barometric.
|
||||
|
||||
double baroMeasurement = metersToBaro(10.);
|
||||
|
||||
// Factor
|
||||
Key key(1);
|
||||
Key key2(2);
|
||||
SharedNoiseModel model = noiseModel::Isotropic::Sigma(1, 0.25);
|
||||
BarometricFactor factor(key, key2, baroMeasurement, model);
|
||||
|
||||
Pose3 T(Rot3::RzRyRx(0.5, 1., 1.), Point3(20., 30., 1.));
|
||||
double baroBias = 5.;
|
||||
|
||||
// Calculate numerical derivatives
|
||||
Matrix expectedH = numericalDerivative21<Vector, Pose3, double>(
|
||||
std::bind(&BarometricFactor::evaluateError, &factor,
|
||||
std::placeholders::_1, std::placeholders::_2, boost::none,
|
||||
boost::none),
|
||||
T, baroBias);
|
||||
|
||||
Matrix expectedH2 = numericalDerivative22<Vector, Pose3, double>(
|
||||
std::bind(&BarometricFactor::evaluateError, &factor,
|
||||
std::placeholders::_1, std::placeholders::_2, boost::none,
|
||||
boost::none),
|
||||
T, baroBias);
|
||||
|
||||
// Use the factor to calculate the derivative and the error
|
||||
Matrix actualH, actualH2;
|
||||
Vector error = factor.evaluateError(T, baroBias, actualH, actualH2);
|
||||
Vector actual = (Vector(1) << -4.0).finished();
|
||||
|
||||
// Verify we get the expected error
|
||||
EXPECT(assert_equal(expectedH, actualH, 1e-8));
|
||||
EXPECT(assert_equal(expectedH2, actualH2, 1e-8));
|
||||
EXPECT(assert_equal(error, actual, 1e-8));
|
||||
}
|
||||
|
||||
// *************************************************************************
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
// *************************************************************************
|
||||
|
|
@ -0,0 +1,136 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010-2021, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file GraphvizFormatting.cpp
|
||||
* @brief Graphviz formatter for NonlinearFactorGraph
|
||||
* @author Frank Dellaert
|
||||
* @date December, 2021
|
||||
*/
|
||||
|
||||
#include <gtsam/nonlinear/GraphvizFormatting.h>
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
|
||||
// TODO(frank): nonlinear should not depend on geometry:
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
|
||||
#include <limits>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
Vector2 GraphvizFormatting::findBounds(const Values& values,
|
||||
const KeySet& keys) const {
|
||||
Vector2 min;
|
||||
min.x() = std::numeric_limits<double>::infinity();
|
||||
min.y() = std::numeric_limits<double>::infinity();
|
||||
for (const Key& key : keys) {
|
||||
if (values.exists(key)) {
|
||||
boost::optional<Vector2> xy = operator()(values.at(key));
|
||||
if (xy) {
|
||||
if (xy->x() < min.x()) min.x() = xy->x();
|
||||
if (xy->y() < min.y()) min.y() = xy->y();
|
||||
}
|
||||
}
|
||||
}
|
||||
return min;
|
||||
}
|
||||
|
||||
boost::optional<Vector2> GraphvizFormatting::operator()(
|
||||
const Value& value) const {
|
||||
Vector3 t;
|
||||
if (const GenericValue<Pose2>* p =
|
||||
dynamic_cast<const GenericValue<Pose2>*>(&value)) {
|
||||
t << p->value().x(), p->value().y(), 0;
|
||||
} else if (const GenericValue<Vector2>* p =
|
||||
dynamic_cast<const GenericValue<Vector2>*>(&value)) {
|
||||
t << p->value().x(), p->value().y(), 0;
|
||||
} else if (const GenericValue<Pose3>* p =
|
||||
dynamic_cast<const GenericValue<Pose3>*>(&value)) {
|
||||
t = p->value().translation();
|
||||
} else if (const GenericValue<Point3>* p =
|
||||
dynamic_cast<const GenericValue<Point3>*>(&value)) {
|
||||
t = p->value();
|
||||
} else {
|
||||
return boost::none;
|
||||
}
|
||||
double x, y;
|
||||
switch (paperHorizontalAxis) {
|
||||
case X:
|
||||
x = t.x();
|
||||
break;
|
||||
case Y:
|
||||
x = t.y();
|
||||
break;
|
||||
case Z:
|
||||
x = t.z();
|
||||
break;
|
||||
case NEGX:
|
||||
x = -t.x();
|
||||
break;
|
||||
case NEGY:
|
||||
x = -t.y();
|
||||
break;
|
||||
case NEGZ:
|
||||
x = -t.z();
|
||||
break;
|
||||
default:
|
||||
throw std::runtime_error("Invalid enum value");
|
||||
}
|
||||
switch (paperVerticalAxis) {
|
||||
case X:
|
||||
y = t.x();
|
||||
break;
|
||||
case Y:
|
||||
y = t.y();
|
||||
break;
|
||||
case Z:
|
||||
y = t.z();
|
||||
break;
|
||||
case NEGX:
|
||||
y = -t.x();
|
||||
break;
|
||||
case NEGY:
|
||||
y = -t.y();
|
||||
break;
|
||||
case NEGZ:
|
||||
y = -t.z();
|
||||
break;
|
||||
default:
|
||||
throw std::runtime_error("Invalid enum value");
|
||||
}
|
||||
return Vector2(x, y);
|
||||
}
|
||||
|
||||
// Return affinely transformed variable position if it exists.
|
||||
boost::optional<Vector2> GraphvizFormatting::variablePos(const Values& values,
|
||||
const Vector2& min,
|
||||
Key key) const {
|
||||
if (!values.exists(key)) return boost::none;
|
||||
boost::optional<Vector2> xy = operator()(values.at(key));
|
||||
if (xy) {
|
||||
xy->x() = scale * (xy->x() - min.x());
|
||||
xy->y() = scale * (xy->y() - min.y());
|
||||
}
|
||||
return xy;
|
||||
}
|
||||
|
||||
// Return affinely transformed factor position if it exists.
|
||||
boost::optional<Vector2> GraphvizFormatting::factorPos(const Vector2& min,
|
||||
size_t i) const {
|
||||
if (factorPositions.size() == 0) return boost::none;
|
||||
auto it = factorPositions.find(i);
|
||||
if (it == factorPositions.end()) return boost::none;
|
||||
auto pos = it->second;
|
||||
return Vector2(scale * (pos.x() - min.x()), scale * (pos.y() - min.y()));
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
@ -0,0 +1,69 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010-2021, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file GraphvizFormatting.h
|
||||
* @brief Graphviz formatter for NonlinearFactorGraph
|
||||
* @author Frank Dellaert
|
||||
* @date December, 2021
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/inference/DotWriter.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
class Values;
|
||||
class Value;
|
||||
|
||||
/**
|
||||
* Formatting options and functions for saving a NonlinearFactorGraph instance
|
||||
* in GraphViz format.
|
||||
*/
|
||||
struct GTSAM_EXPORT GraphvizFormatting : public DotWriter {
|
||||
/// World axes to be assigned to paper axes
|
||||
enum Axis { X, Y, Z, NEGX, NEGY, NEGZ };
|
||||
|
||||
Axis paperHorizontalAxis; ///< The world axis assigned to the horizontal
|
||||
///< paper axis
|
||||
Axis paperVerticalAxis; ///< The world axis assigned to the vertical paper
|
||||
///< axis
|
||||
double scale; ///< Scale all positions to reduce / increase density
|
||||
bool mergeSimilarFactors; ///< Merge multiple factors that have the same
|
||||
///< connectivity
|
||||
|
||||
/// (optional for each factor) Manually specify factor "dot" positions:
|
||||
std::map<size_t, Vector2> factorPositions;
|
||||
|
||||
/// Default constructor sets up robot coordinates. Paper horizontal is robot
|
||||
/// Y, paper vertical is robot X. Default figure size of 5x5 in.
|
||||
GraphvizFormatting()
|
||||
: paperHorizontalAxis(Y),
|
||||
paperVerticalAxis(X),
|
||||
scale(1),
|
||||
mergeSimilarFactors(false) {}
|
||||
|
||||
// Find bounds
|
||||
Vector2 findBounds(const Values& values, const KeySet& keys) const;
|
||||
|
||||
/// Extract a Vector2 from either Vector2, Pose2, Pose3, or Point3
|
||||
boost::optional<Vector2> operator()(const Value& value) const;
|
||||
|
||||
/// Return affinely transformed variable position if it exists.
|
||||
boost::optional<Vector2> variablePos(const Values& values, const Vector2& min,
|
||||
Key key) const;
|
||||
|
||||
/// Return affinely transformed factor position if it exists.
|
||||
boost::optional<Vector2> factorPos(const Vector2& min, size_t i) const;
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
@ -282,7 +282,7 @@ public:
|
|||
* which are objects in non-linear manifolds (Lie groups).
|
||||
*/
|
||||
template<class VALUE>
|
||||
class NoiseModelFactor1: public NoiseModelFactor {
|
||||
class GTSAM_EXPORT NoiseModelFactor1: public NoiseModelFactor {
|
||||
|
||||
public:
|
||||
|
||||
|
|
@ -366,7 +366,7 @@ private:
|
|||
/** A convenient base class for creating your own NoiseModelFactor with 2
|
||||
* variables. To derive from this class, implement evaluateError(). */
|
||||
template<class VALUE1, class VALUE2>
|
||||
class NoiseModelFactor2: public NoiseModelFactor {
|
||||
class GTSAM_EXPORT NoiseModelFactor2: public NoiseModelFactor {
|
||||
|
||||
public:
|
||||
|
||||
|
|
@ -441,7 +441,7 @@ private:
|
|||
/** A convenient base class for creating your own NoiseModelFactor with 3
|
||||
* variables. To derive from this class, implement evaluateError(). */
|
||||
template<class VALUE1, class VALUE2, class VALUE3>
|
||||
class NoiseModelFactor3: public NoiseModelFactor {
|
||||
class GTSAM_EXPORT NoiseModelFactor3: public NoiseModelFactor {
|
||||
|
||||
public:
|
||||
|
||||
|
|
@ -518,7 +518,7 @@ private:
|
|||
/** A convenient base class for creating your own NoiseModelFactor with 4
|
||||
* variables. To derive from this class, implement evaluateError(). */
|
||||
template<class VALUE1, class VALUE2, class VALUE3, class VALUE4>
|
||||
class NoiseModelFactor4: public NoiseModelFactor {
|
||||
class GTSAM_EXPORT NoiseModelFactor4: public NoiseModelFactor {
|
||||
|
||||
public:
|
||||
|
||||
|
|
@ -599,7 +599,7 @@ private:
|
|||
/** A convenient base class for creating your own NoiseModelFactor with 5
|
||||
* variables. To derive from this class, implement evaluateError(). */
|
||||
template<class VALUE1, class VALUE2, class VALUE3, class VALUE4, class VALUE5>
|
||||
class NoiseModelFactor5: public NoiseModelFactor {
|
||||
class GTSAM_EXPORT NoiseModelFactor5: public NoiseModelFactor {
|
||||
|
||||
public:
|
||||
|
||||
|
|
@ -684,7 +684,7 @@ private:
|
|||
/** A convenient base class for creating your own NoiseModelFactor with 6
|
||||
* variables. To derive from this class, implement evaluateError(). */
|
||||
template<class VALUE1, class VALUE2, class VALUE3, class VALUE4, class VALUE5, class VALUE6>
|
||||
class NoiseModelFactor6: public NoiseModelFactor {
|
||||
class GTSAM_EXPORT NoiseModelFactor6: public NoiseModelFactor {
|
||||
|
||||
public:
|
||||
|
||||
|
|
|
|||
|
|
@ -35,7 +35,6 @@
|
|||
|
||||
#include <cmath>
|
||||
#include <fstream>
|
||||
#include <limits>
|
||||
|
||||
using namespace std;
|
||||
|
||||
|
|
@ -91,89 +90,25 @@ bool NonlinearFactorGraph::equals(const NonlinearFactorGraph& other, double tol)
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void NonlinearFactorGraph::saveGraph(std::ostream &stm, const Values& values,
|
||||
const GraphvizFormatting& formatting,
|
||||
const KeyFormatter& keyFormatter) const
|
||||
{
|
||||
stm << "graph {\n";
|
||||
stm << " size=\"" << formatting.figureWidthInches << "," <<
|
||||
formatting.figureHeightInches << "\";\n\n";
|
||||
void NonlinearFactorGraph::dot(std::ostream& os, const Values& values,
|
||||
const KeyFormatter& keyFormatter,
|
||||
const GraphvizFormatting& writer) const {
|
||||
writer.writePreamble(&os);
|
||||
|
||||
// Find bounds (imperative)
|
||||
KeySet keys = this->keys();
|
||||
|
||||
// Local utility function to extract x and y coordinates
|
||||
struct { boost::optional<Point2> operator()(
|
||||
const Value& value, const GraphvizFormatting& graphvizFormatting)
|
||||
{
|
||||
Vector3 t;
|
||||
if (const GenericValue<Pose2>* p = dynamic_cast<const GenericValue<Pose2>*>(&value)) {
|
||||
t << p->value().x(), p->value().y(), 0;
|
||||
} else if (const GenericValue<Point2>* p = dynamic_cast<const GenericValue<Point2>*>(&value)) {
|
||||
t << p->value().x(), p->value().y(), 0;
|
||||
} else if (const GenericValue<Pose3>* p = dynamic_cast<const GenericValue<Pose3>*>(&value)) {
|
||||
t = p->value().translation();
|
||||
} else if (const GenericValue<Point3>* p = dynamic_cast<const GenericValue<Point3>*>(&value)) {
|
||||
t = p->value();
|
||||
} else {
|
||||
return boost::none;
|
||||
}
|
||||
double x, y;
|
||||
switch (graphvizFormatting.paperHorizontalAxis) {
|
||||
case GraphvizFormatting::X: x = t.x(); break;
|
||||
case GraphvizFormatting::Y: x = t.y(); break;
|
||||
case GraphvizFormatting::Z: x = t.z(); break;
|
||||
case GraphvizFormatting::NEGX: x = -t.x(); break;
|
||||
case GraphvizFormatting::NEGY: x = -t.y(); break;
|
||||
case GraphvizFormatting::NEGZ: x = -t.z(); break;
|
||||
default: throw std::runtime_error("Invalid enum value");
|
||||
}
|
||||
switch (graphvizFormatting.paperVerticalAxis) {
|
||||
case GraphvizFormatting::X: y = t.x(); break;
|
||||
case GraphvizFormatting::Y: y = t.y(); break;
|
||||
case GraphvizFormatting::Z: y = t.z(); break;
|
||||
case GraphvizFormatting::NEGX: y = -t.x(); break;
|
||||
case GraphvizFormatting::NEGY: y = -t.y(); break;
|
||||
case GraphvizFormatting::NEGZ: y = -t.z(); break;
|
||||
default: throw std::runtime_error("Invalid enum value");
|
||||
}
|
||||
return Point2(x,y);
|
||||
}} getXY;
|
||||
|
||||
// Find bounds
|
||||
double minX = numeric_limits<double>::infinity(), maxX = -numeric_limits<double>::infinity();
|
||||
double minY = numeric_limits<double>::infinity(), maxY = -numeric_limits<double>::infinity();
|
||||
for (const Key& key : keys) {
|
||||
if (values.exists(key)) {
|
||||
boost::optional<Point2> xy = getXY(values.at(key), formatting);
|
||||
if(xy) {
|
||||
if(xy->x() < minX)
|
||||
minX = xy->x();
|
||||
if(xy->x() > maxX)
|
||||
maxX = xy->x();
|
||||
if(xy->y() < minY)
|
||||
minY = xy->y();
|
||||
if(xy->y() > maxY)
|
||||
maxY = xy->y();
|
||||
}
|
||||
}
|
||||
}
|
||||
Vector2 min = writer.findBounds(values, keys);
|
||||
|
||||
// Create nodes for each variable in the graph
|
||||
for(Key key: keys){
|
||||
// Label the node with the label from the KeyFormatter
|
||||
stm << " var" << key << "[label=\"" << keyFormatter(key) << "\"";
|
||||
if(values.exists(key)) {
|
||||
boost::optional<Point2> xy = getXY(values.at(key), formatting);
|
||||
if(xy)
|
||||
stm << ", pos=\"" << formatting.scale*(xy->x() - minX) << "," << formatting.scale*(xy->y() - minY) << "!\"";
|
||||
}
|
||||
stm << "];\n";
|
||||
for (Key key : keys) {
|
||||
auto position = writer.variablePos(values, min, key);
|
||||
writer.DrawVariable(key, keyFormatter, position, &os);
|
||||
}
|
||||
stm << "\n";
|
||||
os << "\n";
|
||||
|
||||
if (formatting.mergeSimilarFactors) {
|
||||
if (writer.mergeSimilarFactors) {
|
||||
// Remove duplicate factors
|
||||
std::set<KeyVector > structure;
|
||||
std::set<KeyVector> structure;
|
||||
for (const sharedFactor& factor : factors_) {
|
||||
if (factor) {
|
||||
KeyVector factorKeys = factor->keys();
|
||||
|
|
@ -184,86 +119,40 @@ void NonlinearFactorGraph::saveGraph(std::ostream &stm, const Values& values,
|
|||
|
||||
// Create factors and variable connections
|
||||
size_t i = 0;
|
||||
for(const KeyVector& factorKeys: structure){
|
||||
// Make each factor a dot
|
||||
stm << " factor" << i << "[label=\"\", shape=point";
|
||||
{
|
||||
map<size_t, Point2>::const_iterator pos = formatting.factorPositions.find(i);
|
||||
if(pos != formatting.factorPositions.end())
|
||||
stm << ", pos=\"" << formatting.scale*(pos->second.x() - minX) << ","
|
||||
<< formatting.scale*(pos->second.y() - minY) << "!\"";
|
||||
}
|
||||
stm << "];\n";
|
||||
|
||||
// Make factor-variable connections
|
||||
for(Key key: factorKeys) {
|
||||
stm << " var" << key << "--" << "factor" << i << ";\n";
|
||||
}
|
||||
|
||||
++ i;
|
||||
for (const KeyVector& factorKeys : structure) {
|
||||
writer.processFactor(i++, factorKeys, boost::none, &os);
|
||||
}
|
||||
} else {
|
||||
// Create factors and variable connections
|
||||
for(size_t i = 0; i < size(); ++i) {
|
||||
for (size_t i = 0; i < size(); ++i) {
|
||||
const NonlinearFactor::shared_ptr& factor = at(i);
|
||||
// If null pointer, move on to the next
|
||||
if (!factor) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (formatting.plotFactorPoints) {
|
||||
const KeyVector& keys = factor->keys();
|
||||
if (formatting.binaryEdges && keys.size() == 2) {
|
||||
stm << " var" << keys[0] << "--"
|
||||
<< "var" << keys[1] << ";\n";
|
||||
} else {
|
||||
// Make each factor a dot
|
||||
stm << " factor" << i << "[label=\"\", shape=point";
|
||||
{
|
||||
map<size_t, Point2>::const_iterator pos =
|
||||
formatting.factorPositions.find(i);
|
||||
if (pos != formatting.factorPositions.end())
|
||||
stm << ", pos=\"" << formatting.scale * (pos->second.x() - minX)
|
||||
<< "," << formatting.scale * (pos->second.y() - minY)
|
||||
<< "!\"";
|
||||
}
|
||||
stm << "];\n";
|
||||
|
||||
// Make factor-variable connections
|
||||
if (formatting.connectKeysToFactor && factor) {
|
||||
for (Key key : *factor) {
|
||||
stm << " var" << key << "--"
|
||||
<< "factor" << i << ";\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Key k;
|
||||
bool firstTime = true;
|
||||
for (Key key : *this->at(i)) {
|
||||
if (firstTime) {
|
||||
k = key;
|
||||
firstTime = false;
|
||||
continue;
|
||||
}
|
||||
stm << " var" << key << "--"
|
||||
<< "var" << k << ";\n";
|
||||
k = key;
|
||||
}
|
||||
if (factor) {
|
||||
const KeyVector& factorKeys = factor->keys();
|
||||
writer.processFactor(i, factorKeys, writer.factorPos(min, i), &os);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stm << "}\n";
|
||||
os << "}\n";
|
||||
std::flush(os);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void NonlinearFactorGraph::saveGraph(
|
||||
const std::string& file, const Values& values,
|
||||
const GraphvizFormatting& graphvizFormatting,
|
||||
const KeyFormatter& keyFormatter) const {
|
||||
std::ofstream of(file);
|
||||
saveGraph(of, values, graphvizFormatting, keyFormatter);
|
||||
std::string NonlinearFactorGraph::dot(const Values& values,
|
||||
const KeyFormatter& keyFormatter,
|
||||
const GraphvizFormatting& writer) const {
|
||||
std::stringstream ss;
|
||||
dot(ss, values, keyFormatter, writer);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void NonlinearFactorGraph::saveGraph(const std::string& filename,
|
||||
const Values& values,
|
||||
const KeyFormatter& keyFormatter,
|
||||
const GraphvizFormatting& writer) const {
|
||||
std::ofstream of(filename);
|
||||
dot(of, values, keyFormatter, writer);
|
||||
of.close();
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@
|
|||
|
||||
#include <gtsam/geometry/Point2.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactor.h>
|
||||
#include <gtsam/nonlinear/GraphvizFormatting.h>
|
||||
#include <gtsam/inference/FactorGraph.h>
|
||||
#include <gtsam/nonlinear/PriorFactor.h>
|
||||
|
||||
|
|
@ -41,32 +42,6 @@ namespace gtsam {
|
|||
template<typename T>
|
||||
class ExpressionFactor;
|
||||
|
||||
/**
|
||||
* Formatting options when saving in GraphViz format using
|
||||
* NonlinearFactorGraph::saveGraph.
|
||||
*/
|
||||
struct GTSAM_EXPORT GraphvizFormatting {
|
||||
enum Axis { X, Y, Z, NEGX, NEGY, NEGZ }; ///< World axes to be assigned to paper axes
|
||||
Axis paperHorizontalAxis; ///< The world axis assigned to the horizontal paper axis
|
||||
Axis paperVerticalAxis; ///< The world axis assigned to the vertical paper axis
|
||||
double figureWidthInches; ///< The figure width on paper in inches
|
||||
double figureHeightInches; ///< The figure height on paper in inches
|
||||
double scale; ///< Scale all positions to reduce / increase density
|
||||
bool mergeSimilarFactors; ///< Merge multiple factors that have the same connectivity
|
||||
bool plotFactorPoints; ///< Plots each factor as a dot between the variables
|
||||
bool connectKeysToFactor; ///< Draw a line from each key within a factor to the dot of the factor
|
||||
bool binaryEdges; ///< just use non-dotted edges for binary factors
|
||||
std::map<size_t, Point2> factorPositions; ///< (optional for each factor) Manually specify factor "dot" positions.
|
||||
/// Default constructor sets up robot coordinates. Paper horizontal is robot Y,
|
||||
/// paper vertical is robot X. Default figure size of 5x5 in.
|
||||
GraphvizFormatting() :
|
||||
paperHorizontalAxis(Y), paperVerticalAxis(X),
|
||||
figureWidthInches(5), figureHeightInches(5), scale(1),
|
||||
mergeSimilarFactors(false), plotFactorPoints(true),
|
||||
connectKeysToFactor(true), binaryEdges(true) {}
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* A non-linear factor graph is a graph of non-Gaussian, i.e. non-linear factors,
|
||||
* which derive from NonlinearFactor. The values structures are typically (in SAM) more general
|
||||
|
|
@ -115,21 +90,6 @@ namespace gtsam {
|
|||
/** Test equality */
|
||||
bool equals(const NonlinearFactorGraph& other, double tol = 1e-9) const;
|
||||
|
||||
/// Write the graph in GraphViz format for visualization
|
||||
void saveGraph(std::ostream& stm, const Values& values = Values(),
|
||||
const GraphvizFormatting& graphvizFormatting = GraphvizFormatting(),
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
||||
|
||||
/**
|
||||
* Write the graph in GraphViz format to file for visualization.
|
||||
*
|
||||
* This is a wrapper friendly version since wrapped languages don't have
|
||||
* access to C++ streams.
|
||||
*/
|
||||
void saveGraph(const std::string& file, const Values& values = Values(),
|
||||
const GraphvizFormatting& graphvizFormatting = GraphvizFormatting(),
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
||||
|
||||
/** unnormalized error, \f$ 0.5 \sum_i (h_i(X_i)-z)^2/\sigma^2 \f$ in the most common case */
|
||||
double error(const Values& values) const;
|
||||
|
||||
|
|
@ -246,7 +206,32 @@ namespace gtsam {
|
|||
emplace_shared<PriorFactor<T>>(key, prior, covariance);
|
||||
}
|
||||
|
||||
private:
|
||||
/// @name Graph Display
|
||||
/// @{
|
||||
|
||||
using FactorGraph::dot;
|
||||
using FactorGraph::saveGraph;
|
||||
|
||||
/// Output to graphviz format, stream version, with Values/extra options.
|
||||
void dot(std::ostream& os, const Values& values,
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter,
|
||||
const GraphvizFormatting& graphvizFormatting =
|
||||
GraphvizFormatting()) const;
|
||||
|
||||
/// Output to graphviz format string, with Values/extra options.
|
||||
std::string dot(const Values& values,
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter,
|
||||
const GraphvizFormatting& graphvizFormatting =
|
||||
GraphvizFormatting()) const;
|
||||
|
||||
/// output to file with graphviz format, with Values/extra options.
|
||||
void saveGraph(const std::string& filename, const Values& values,
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter,
|
||||
const GraphvizFormatting& graphvizFormatting =
|
||||
GraphvizFormatting()) const;
|
||||
/// @}
|
||||
|
||||
private:
|
||||
|
||||
/**
|
||||
* Linearize from Scatter rather than from Ordering. Made private because
|
||||
|
|
@ -275,6 +260,21 @@ namespace gtsam {
|
|||
Values GTSAM_DEPRECATED updateCholesky(const Values& values, boost::none_t,
|
||||
const Dampen& dampen = nullptr) const
|
||||
{return updateCholesky(values, dampen);}
|
||||
|
||||
/** \deprecated */
|
||||
void GTSAM_DEPRECATED saveGraph(
|
||||
std::ostream& os, const Values& values = Values(),
|
||||
const GraphvizFormatting& graphvizFormatting = GraphvizFormatting(),
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const {
|
||||
dot(os, values, keyFormatter, graphvizFormatting);
|
||||
}
|
||||
/** \deprecated */
|
||||
void GTSAM_DEPRECATED saveGraph(
|
||||
const std::string& filename, const Values& values,
|
||||
const GraphvizFormatting& graphvizFormatting = GraphvizFormatting(),
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const {
|
||||
saveGraph(filename, values, keyFormatter, graphvizFormatting);
|
||||
}
|
||||
#endif
|
||||
|
||||
};
|
||||
|
|
|
|||
|
|
@ -391,4 +391,10 @@ namespace gtsam {
|
|||
update(j, static_cast<const Value&>(GenericValue<ValueType>(val)));
|
||||
}
|
||||
|
||||
// insert_or_assign with templated value
|
||||
template <typename ValueType>
|
||||
void Values::insert_or_assign(Key j, const ValueType& val) {
|
||||
insert_or_assign(j, static_cast<const Value&>(GenericValue<ValueType>(val)));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -171,6 +171,25 @@ namespace gtsam {
|
|||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************ */
|
||||
void Values::insert_or_assign(Key j, const Value& val) {
|
||||
if (this->exists(j)) {
|
||||
// If key already exists, perform an update.
|
||||
this->update(j, val);
|
||||
} else {
|
||||
// If key does not exist, perform an insert.
|
||||
this->insert(j, val);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************ */
|
||||
void Values::insert_or_assign(const Values& values) {
|
||||
for (const_iterator key_value = values.begin(); key_value != values.end();
|
||||
++key_value) {
|
||||
this->insert_or_assign(key_value->key, key_value->value);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void Values::erase(Key j) {
|
||||
KeyValueMap::iterator item = values_.find(j);
|
||||
|
|
|
|||
|
|
@ -285,6 +285,19 @@ namespace gtsam {
|
|||
/** update the current available values without adding new ones */
|
||||
void update(const Values& values);
|
||||
|
||||
/// If key j exists, update value, else perform an insert.
|
||||
void insert_or_assign(Key j, const Value& val);
|
||||
|
||||
/**
|
||||
* Update a set of variables.
|
||||
* If any variable key doe not exist, then perform an insert.
|
||||
*/
|
||||
void insert_or_assign(const Values& values);
|
||||
|
||||
/// Templated version to insert_or_assign a variable with the given j.
|
||||
template <typename ValueType>
|
||||
void insert_or_assign(Key j, const ValueType& val);
|
||||
|
||||
/** Remove a variable from the config, throws KeyDoesNotExist<J> if j is not present */
|
||||
void erase(Key j);
|
||||
|
||||
|
|
|
|||
|
|
@ -131,9 +131,6 @@ class Ordering {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
|
|
@ -196,10 +193,12 @@ class NonlinearFactorGraph {
|
|||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
|
||||
void saveGraph(const string& s) const;
|
||||
string dot(
|
||||
const gtsam::Values& values,
|
||||
const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter);
|
||||
void saveGraph(const string& s, const gtsam::Values& values,
|
||||
const gtsam::KeyFormatter& keyFormatter =
|
||||
gtsam::DefaultKeyFormatter) const;
|
||||
};
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearFactor.h>
|
||||
|
|
@ -275,6 +274,7 @@ class Values {
|
|||
|
||||
void insert(const gtsam::Values& values);
|
||||
void update(const gtsam::Values& values);
|
||||
void insert_or_assign(const gtsam::Values& values);
|
||||
void erase(size_t j);
|
||||
void swap(gtsam::Values& values);
|
||||
|
||||
|
|
@ -289,9 +289,6 @@ class Values {
|
|||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
|
||||
// New in 4.0, we have to specialize every insert/update/at to generate
|
||||
// wrappers Instead of the old: void insert(size_t j, const gtsam::Value&
|
||||
// value); void update(size_t j, const gtsam::Value& val); gtsam::Value
|
||||
|
|
@ -351,6 +348,32 @@ class Values {
|
|||
void update(size_t j, Matrix matrix);
|
||||
void update(size_t j, double c);
|
||||
|
||||
void insert_or_assign(size_t j, const gtsam::Point2& point2);
|
||||
void insert_or_assign(size_t j, const gtsam::Point3& point3);
|
||||
void insert_or_assign(size_t j, const gtsam::Rot2& rot2);
|
||||
void insert_or_assign(size_t j, const gtsam::Pose2& pose2);
|
||||
void insert_or_assign(size_t j, const gtsam::SO3& R);
|
||||
void insert_or_assign(size_t j, const gtsam::SO4& Q);
|
||||
void insert_or_assign(size_t j, const gtsam::SOn& P);
|
||||
void insert_or_assign(size_t j, const gtsam::Rot3& rot3);
|
||||
void insert_or_assign(size_t j, const gtsam::Pose3& pose3);
|
||||
void insert_or_assign(size_t j, const gtsam::Unit3& unit3);
|
||||
void insert_or_assign(size_t j, const gtsam::Cal3_S2& cal3_s2);
|
||||
void insert_or_assign(size_t j, const gtsam::Cal3DS2& cal3ds2);
|
||||
void insert_or_assign(size_t j, const gtsam::Cal3Bundler& cal3bundler);
|
||||
void insert_or_assign(size_t j, const gtsam::Cal3Fisheye& cal3fisheye);
|
||||
void insert_or_assign(size_t j, const gtsam::Cal3Unified& cal3unified);
|
||||
void insert_or_assign(size_t j, const gtsam::EssentialMatrix& essential_matrix);
|
||||
void insert_or_assign(size_t j, const gtsam::PinholeCamera<gtsam::Cal3_S2>& camera);
|
||||
void insert_or_assign(size_t j, const gtsam::PinholeCamera<gtsam::Cal3Bundler>& camera);
|
||||
void insert_or_assign(size_t j, const gtsam::PinholeCamera<gtsam::Cal3Fisheye>& camera);
|
||||
void insert_or_assign(size_t j, const gtsam::PinholeCamera<gtsam::Cal3Unified>& camera);
|
||||
void insert_or_assign(size_t j, const gtsam::imuBias::ConstantBias& constant_bias);
|
||||
void insert_or_assign(size_t j, const gtsam::NavState& nav_state);
|
||||
void insert_or_assign(size_t j, Vector vector);
|
||||
void insert_or_assign(size_t j, Matrix matrix);
|
||||
void insert_or_assign(size_t j, double c);
|
||||
|
||||
template <T = {gtsam::Point2,
|
||||
gtsam::Point3,
|
||||
gtsam::Rot2,
|
||||
|
|
@ -764,6 +787,12 @@ class ISAM2 {
|
|||
|
||||
void printStats() const;
|
||||
gtsam::VectorValues gradientAtZero() const;
|
||||
|
||||
string dot(const gtsam::KeyFormatter& keyFormatter =
|
||||
gtsam::DefaultKeyFormatter) const;
|
||||
void saveGraph(string s,
|
||||
const gtsam::KeyFormatter& keyFormatter =
|
||||
gtsam::DefaultKeyFormatter) const;
|
||||
};
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearISAM.h>
|
||||
|
|
@ -824,9 +853,6 @@ virtual class PriorFactor : gtsam::NoiseModelFactor {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearEquality.h>
|
||||
|
|
|
|||
|
|
@ -21,7 +21,6 @@
|
|||
#include <gtsam/geometry/Point2.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/utilities.h>
|
||||
|
||||
using namespace gtsam;
|
||||
|
|
@ -55,6 +54,26 @@ TEST(Utilities, ExtractPoint3) {
|
|||
EXPECT_LONGS_EQUAL(2, all_points.rows());
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(Utilities, ExtractVector) {
|
||||
// Test normal case with 3 vectors and 1 non-vector (ignore non-vector)
|
||||
auto values = Values();
|
||||
values.insert(X(0), (Vector(4) << 1, 2, 3, 4).finished());
|
||||
values.insert(X(2), (Vector(4) << 13, 14, 15, 16).finished());
|
||||
values.insert(X(1), (Vector(4) << 6, 7, 8, 9).finished());
|
||||
values.insert(X(3), Pose3());
|
||||
auto actual = utilities::extractVectors(values, 'x');
|
||||
auto expected =
|
||||
(Matrix(3, 4) << 1, 2, 3, 4, 6, 7, 8, 9, 13, 14, 15, 16).finished();
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
|
||||
// Check that mis-sized vectors fail
|
||||
values.insert(X(4), (Vector(2) << 1, 2).finished());
|
||||
THROWS_EXCEPTION(utilities::extractVectors(values, 'x'));
|
||||
values.update(X(4), (Vector(6) << 1, 2, 3, 4, 5, 6).finished());
|
||||
THROWS_EXCEPTION(utilities::extractVectors(values, 'x'));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
srand(time(nullptr));
|
||||
|
|
@ -172,6 +172,22 @@ TEST( Values, update_element )
|
|||
CHECK(assert_equal((Vector)v2, cfg.at<Vector3>(key1)));
|
||||
}
|
||||
|
||||
TEST(Values, InsertOrAssign) {
|
||||
Values values;
|
||||
Key X(0);
|
||||
double x = 1;
|
||||
|
||||
CHECK(values.size() == 0);
|
||||
// This should perform an insert.
|
||||
values.insert_or_assign(X, x);
|
||||
EXPECT(assert_equal(values.at<double>(X), x));
|
||||
|
||||
// This should perform an update.
|
||||
double y = 2;
|
||||
values.insert_or_assign(X, y);
|
||||
EXPECT(assert_equal(values.at<double>(X), y));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(Values, basic_functions)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@
|
|||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/slam/ProjectionFactor.h>
|
||||
#include <gtsam/linear/Sampler.h>
|
||||
#include <gtsam/linear/VectorValues.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactor.h>
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
|
|
@ -162,6 +163,34 @@ Matrix extractPose3(const Values& values) {
|
|||
return result;
|
||||
}
|
||||
|
||||
/// Extract all Vector values with a given symbol character into an mxn matrix,
|
||||
/// where m is the number of symbols that match the character and n is the
|
||||
/// dimension of the variables. If not all variables have dimension n, then a
|
||||
/// runtime error will be thrown. The order of returned values are sorted by
|
||||
/// the symbol.
|
||||
/// For example, calling extractVector(values, 'x'), where values contains 200
|
||||
/// variables x1, x2, ..., x200 of type Vector each 5-dimensional, will return a
|
||||
/// 200x5 matrix with row i containing xi.
|
||||
Matrix extractVectors(const Values& values, char c) {
|
||||
Values::ConstFiltered<Vector> vectors =
|
||||
values.filter<Vector>(Symbol::ChrTest(c));
|
||||
if (vectors.size() == 0) {
|
||||
return Matrix();
|
||||
}
|
||||
auto dim = vectors.begin()->value.size();
|
||||
Matrix result(vectors.size(), dim);
|
||||
Eigen::Index rowi = 0;
|
||||
for (const auto& kv : vectors) {
|
||||
if (kv.value.size() != dim) {
|
||||
throw std::runtime_error(
|
||||
"Tried to extract different-sized vectors into a single matrix");
|
||||
}
|
||||
result.row(rowi) = kv.value;
|
||||
++rowi;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/// Perturb all Point2 values using normally distributed noise
|
||||
void perturbPoint2(Values& values, double sigma, int32_t seed = 42u) {
|
||||
noiseModel::Isotropic::shared_ptr model =
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ namespace gtsam {
|
|||
unit translations in a projection direction.
|
||||
@addtogroup SFM
|
||||
*/
|
||||
class MFAS {
|
||||
class GTSAM_EXPORT MFAS {
|
||||
public:
|
||||
// used to represent edges between two nodes in the graph. When used in
|
||||
// translation averaging for global SfM
|
||||
|
|
|
|||
|
|
@ -1,7 +1,20 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010-2014, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/*
|
||||
* @file EssentialMatrixFactor.cpp
|
||||
* @file EssentialMatrixFactor.h
|
||||
* @brief EssentialMatrixFactor class
|
||||
* @author Frank Dellaert
|
||||
* @author Ayush Baid
|
||||
* @author Akshay Krishnan
|
||||
* @date December 17, 2013
|
||||
*/
|
||||
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ namespace gtsam {
|
|||
* @tparam CAMERA should behave like a PinholeCamera.
|
||||
*/
|
||||
template<class CAMERA>
|
||||
class SmartFactorBase: public NonlinearFactor {
|
||||
class GTSAM_EXPORT SmartFactorBase: public NonlinearFactor {
|
||||
|
||||
private:
|
||||
typedef NonlinearFactor Base;
|
||||
|
|
|
|||
|
|
@ -41,11 +41,10 @@ namespace gtsam {
|
|||
* If the calibration should be optimized, as well, use SmartProjectionFactor instead!
|
||||
* @addtogroup SLAM
|
||||
*/
|
||||
template<class CALIBRATION>
|
||||
class SmartProjectionPoseFactor: public SmartProjectionFactor<
|
||||
PinholePose<CALIBRATION> > {
|
||||
|
||||
private:
|
||||
template <class CALIBRATION>
|
||||
class GTSAM_EXPORT SmartProjectionPoseFactor
|
||||
: public SmartProjectionFactor<PinholePose<CALIBRATION> > {
|
||||
private:
|
||||
typedef PinholePose<CALIBRATION> Camera;
|
||||
typedef SmartProjectionFactor<Camera> Base;
|
||||
typedef SmartProjectionPoseFactor<CALIBRATION> This;
|
||||
|
|
@ -156,7 +155,6 @@ public:
|
|||
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
|
||||
ar & BOOST_SERIALIZATION_NVP(K_);
|
||||
}
|
||||
|
||||
};
|
||||
// end of class declaration
|
||||
|
||||
|
|
|
|||
|
|
@ -54,6 +54,8 @@ class SmartProjectionRigFactor : public SmartProjectionFactor<CAMERA> {
|
|||
typedef SmartProjectionFactor<CAMERA> Base;
|
||||
typedef SmartProjectionRigFactor<CAMERA> This;
|
||||
typedef typename CAMERA::CalibrationType CALIBRATION;
|
||||
typedef typename CAMERA::Measurement MEASUREMENT;
|
||||
typedef typename CAMERA::MeasurementVector MEASUREMENTS;
|
||||
|
||||
static const int DimPose = 6; ///< Pose3 dimension
|
||||
static const int ZDim = 2; ///< Measurement dimension
|
||||
|
|
@ -118,7 +120,7 @@ class SmartProjectionRigFactor : public SmartProjectionFactor<CAMERA> {
|
|||
* @param cameraId ID of the camera in the rig taking the measurement (default
|
||||
* 0)
|
||||
*/
|
||||
void add(const Point2& measured, const Key& poseKey,
|
||||
void add(const MEASUREMENT& measured, const Key& poseKey,
|
||||
const size_t& cameraId = 0) {
|
||||
// store measurement and key
|
||||
this->measured_.push_back(measured);
|
||||
|
|
@ -144,7 +146,7 @@ class SmartProjectionRigFactor : public SmartProjectionFactor<CAMERA> {
|
|||
* @param cameraIds IDs of the cameras in the rig taking each measurement
|
||||
* (same order as the measurements)
|
||||
*/
|
||||
void add(const Point2Vector& measurements, const KeyVector& poseKeys,
|
||||
void add(const MEASUREMENTS& measurements, const KeyVector& poseKeys,
|
||||
const FastVector<size_t>& cameraIds = FastVector<size_t>()) {
|
||||
if (poseKeys.size() != measurements.size() ||
|
||||
(poseKeys.size() != cameraIds.size() && cameraIds.size() != 0)) {
|
||||
|
|
|
|||
|
|
@ -33,18 +33,18 @@ class TriangulationFactor: public NoiseModelFactor1<Point3> {
|
|||
public:
|
||||
|
||||
/// CAMERA type
|
||||
typedef CAMERA Camera;
|
||||
using Camera = CAMERA;
|
||||
|
||||
protected:
|
||||
|
||||
/// shorthand for base class type
|
||||
typedef NoiseModelFactor1<Point3> Base;
|
||||
using Base = NoiseModelFactor1<Point3>;
|
||||
|
||||
/// shorthand for this class
|
||||
typedef TriangulationFactor<CAMERA> This;
|
||||
using This = TriangulationFactor<CAMERA>;
|
||||
|
||||
/// shorthand for measurement type, e.g. Point2 or StereoPoint2
|
||||
typedef typename CAMERA::Measurement Measurement;
|
||||
using Measurement = typename CAMERA::Measurement;
|
||||
|
||||
// Keep a copy of measurement and calibration for I/O
|
||||
const CAMERA camera_; ///< CAMERA in which this landmark was seen
|
||||
|
|
@ -55,9 +55,10 @@ protected:
|
|||
const bool verboseCheirality_; ///< If true, prints text for Cheirality exceptions (default: false)
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
|
||||
/// shorthand for a smart pointer to a factor
|
||||
typedef boost::shared_ptr<This> shared_ptr;
|
||||
using shared_ptr = boost::shared_ptr<This>;
|
||||
|
||||
/// Default constructor
|
||||
TriangulationFactor() :
|
||||
|
|
@ -129,7 +130,7 @@ public:
|
|||
<< std::endl;
|
||||
if (throwCheirality_)
|
||||
throw e;
|
||||
return Eigen::Matrix<double,traits<Measurement>::dimension,1>::Constant(2.0 * camera_.calibration().fx());
|
||||
return camera_.defaultErrorWhenTriangulatingBehindCamera();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -21,9 +21,6 @@ virtual class BetweenFactor : gtsam::NoiseModelFactor {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
#include <gtsam/slam/ProjectionFactor.h>
|
||||
|
|
@ -168,6 +165,10 @@ template <POSE>
|
|||
virtual class PoseTranslationPrior : gtsam::NoiseModelFactor {
|
||||
PoseTranslationPrior(size_t key, const POSE& pose_z,
|
||||
const gtsam::noiseModel::Base* noiseModel);
|
||||
POSE::Translation measured() const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
};
|
||||
|
||||
typedef gtsam::PoseTranslationPrior<gtsam::Pose2> PoseTranslationPrior2D;
|
||||
|
|
@ -178,6 +179,7 @@ template <POSE>
|
|||
virtual class PoseRotationPrior : gtsam::NoiseModelFactor {
|
||||
PoseRotationPrior(size_t key, const POSE& pose_z,
|
||||
const gtsam::noiseModel::Base* noiseModel);
|
||||
POSE::Rotation measured() const;
|
||||
};
|
||||
|
||||
typedef gtsam::PoseRotationPrior<gtsam::Pose2> PoseRotationPrior2D;
|
||||
|
|
@ -188,6 +190,21 @@ virtual class EssentialMatrixFactor : gtsam::NoiseModelFactor {
|
|||
EssentialMatrixFactor(size_t key, const gtsam::Point2& pA,
|
||||
const gtsam::Point2& pB,
|
||||
const gtsam::noiseModel::Base* noiseModel);
|
||||
void print(string s = "", const gtsam::KeyFormatter& keyFormatter =
|
||||
gtsam::DefaultKeyFormatter) const;
|
||||
bool equals(const gtsam::EssentialMatrixFactor& other, double tol) const;
|
||||
Vector evaluateError(const gtsam::EssentialMatrix& E) const;
|
||||
};
|
||||
|
||||
#include <gtsam/slam/EssentialMatrixConstraint.h>
|
||||
virtual class EssentialMatrixConstraint : gtsam::NoiseModelFactor {
|
||||
EssentialMatrixConstraint(size_t key1, size_t key2, const gtsam::EssentialMatrix &measuredE,
|
||||
const gtsam::noiseModel::Base *model);
|
||||
void print(string s = "", const gtsam::KeyFormatter& keyFormatter =
|
||||
gtsam::DefaultKeyFormatter) const;
|
||||
bool equals(const gtsam::EssentialMatrixConstraint& other, double tol) const;
|
||||
Vector evaluateError(const gtsam::Pose3& p1, const gtsam::Pose3& p2) const;
|
||||
const gtsam::EssentialMatrix& measured() const;
|
||||
};
|
||||
|
||||
#include <gtsam/slam/dataset.h>
|
||||
|
|
@ -211,9 +228,6 @@ class SfmTrack {
|
|||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
|
||||
// enabling function to compare objects
|
||||
bool equals(const gtsam::SfmTrack& expected, double tol) const;
|
||||
};
|
||||
|
|
@ -230,9 +244,6 @@ class SfmData {
|
|||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
|
||||
// enabling function to compare objects
|
||||
bool equals(const gtsam::SfmData& expected, double tol) const;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -17,11 +17,13 @@
|
|||
*/
|
||||
|
||||
#pragma once
|
||||
#include <gtsam/slam/SmartProjectionPoseFactor.h>
|
||||
#include <gtsam/slam/SmartProjectionFactor.h>
|
||||
#include <gtsam/slam/GeneralSFMFactor.h>
|
||||
#include <gtsam/geometry/Cal3_S2.h>
|
||||
#include <gtsam/geometry/Cal3Bundler.h>
|
||||
#include <gtsam/geometry/Cal3_S2.h>
|
||||
#include <gtsam/geometry/SphericalCamera.h>
|
||||
#include <gtsam/slam/GeneralSFMFactor.h>
|
||||
#include <gtsam/slam/SmartProjectionFactor.h>
|
||||
#include <gtsam/slam/SmartProjectionPoseFactor.h>
|
||||
|
||||
#include "../SmartProjectionRigFactor.h"
|
||||
|
||||
using namespace std;
|
||||
|
|
@ -44,7 +46,7 @@ Pose3 pose_above = level_pose * Pose3(Rot3(), Point3(0, -1, 0));
|
|||
// Create a noise unit2 for the pixel error
|
||||
static SharedNoiseModel unit2(noiseModel::Unit::Create(2));
|
||||
|
||||
static double fov = 60; // degrees
|
||||
static double fov = 60; // degrees
|
||||
static size_t w = 640, h = 480;
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
@ -63,7 +65,7 @@ Camera cam2(pose_right, K2);
|
|||
Camera cam3(pose_above, K2);
|
||||
typedef GeneralSFMFactor<Camera, Point3> SFMFactor;
|
||||
SmartProjectionParams params;
|
||||
}
|
||||
} // namespace vanilla
|
||||
|
||||
/* ************************************************************************* */
|
||||
// default Cal3_S2 poses
|
||||
|
|
@ -78,7 +80,7 @@ Camera level_camera_right(pose_right, sharedK);
|
|||
Camera cam1(level_pose, sharedK);
|
||||
Camera cam2(pose_right, sharedK);
|
||||
Camera cam3(pose_above, sharedK);
|
||||
}
|
||||
} // namespace vanillaPose
|
||||
|
||||
/* ************************************************************************* */
|
||||
// default Cal3_S2 poses
|
||||
|
|
@ -93,7 +95,7 @@ Camera level_camera_right(pose_right, sharedK2);
|
|||
Camera cam1(level_pose, sharedK2);
|
||||
Camera cam2(pose_right, sharedK2);
|
||||
Camera cam3(pose_above, sharedK2);
|
||||
}
|
||||
} // namespace vanillaPose2
|
||||
|
||||
/* *************************************************************************/
|
||||
// Cal3Bundler cameras
|
||||
|
|
@ -111,7 +113,8 @@ Camera cam1(level_pose, K);
|
|||
Camera cam2(pose_right, K);
|
||||
Camera cam3(pose_above, K);
|
||||
typedef GeneralSFMFactor<Camera, Point3> SFMFactor;
|
||||
}
|
||||
} // namespace bundler
|
||||
|
||||
/* *************************************************************************/
|
||||
// Cal3Bundler poses
|
||||
namespace bundlerPose {
|
||||
|
|
@ -119,35 +122,50 @@ typedef PinholePose<Cal3Bundler> Camera;
|
|||
typedef CameraSet<Camera> Cameras;
|
||||
typedef SmartProjectionPoseFactor<Cal3Bundler> SmartFactor;
|
||||
typedef SmartProjectionRigFactor<Camera> SmartRigFactor;
|
||||
static boost::shared_ptr<Cal3Bundler> sharedBundlerK(
|
||||
new Cal3Bundler(500, 1e-3, 1e-3, 1000, 2000));
|
||||
static boost::shared_ptr<Cal3Bundler> sharedBundlerK(new Cal3Bundler(500, 1e-3,
|
||||
1e-3, 1000,
|
||||
2000));
|
||||
Camera level_camera(level_pose, sharedBundlerK);
|
||||
Camera level_camera_right(pose_right, sharedBundlerK);
|
||||
Camera cam1(level_pose, sharedBundlerK);
|
||||
Camera cam2(pose_right, sharedBundlerK);
|
||||
Camera cam3(pose_above, sharedBundlerK);
|
||||
}
|
||||
} // namespace bundlerPose
|
||||
|
||||
/* ************************************************************************* */
|
||||
// sphericalCamera
|
||||
namespace sphericalCamera {
|
||||
typedef SphericalCamera Camera;
|
||||
typedef CameraSet<Camera> Cameras;
|
||||
typedef SmartProjectionRigFactor<Camera> SmartFactorP;
|
||||
static EmptyCal::shared_ptr emptyK(new EmptyCal());
|
||||
Camera level_camera(level_pose);
|
||||
Camera level_camera_right(pose_right);
|
||||
Camera cam1(level_pose);
|
||||
Camera cam2(pose_right);
|
||||
Camera cam3(pose_above);
|
||||
} // namespace sphericalCamera
|
||||
/* *************************************************************************/
|
||||
|
||||
template<class CAMERA>
|
||||
template <class CAMERA>
|
||||
CAMERA perturbCameraPose(const CAMERA& camera) {
|
||||
Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 10, 0., -M_PI / 10),
|
||||
Point3(0.5, 0.1, 0.3));
|
||||
Pose3 noise_pose =
|
||||
Pose3(Rot3::Ypr(-M_PI / 10, 0., -M_PI / 10), Point3(0.5, 0.1, 0.3));
|
||||
Pose3 cameraPose = camera.pose();
|
||||
Pose3 perturbedCameraPose = cameraPose.compose(noise_pose);
|
||||
return CAMERA(perturbedCameraPose, camera.calibration());
|
||||
}
|
||||
|
||||
template<class CAMERA>
|
||||
void projectToMultipleCameras(const CAMERA& cam1, const CAMERA& cam2,
|
||||
const CAMERA& cam3, Point3 landmark, typename CAMERA::MeasurementVector& measurements_cam) {
|
||||
Point2 cam1_uv1 = cam1.project(landmark);
|
||||
Point2 cam2_uv1 = cam2.project(landmark);
|
||||
Point2 cam3_uv1 = cam3.project(landmark);
|
||||
template <class CAMERA>
|
||||
void projectToMultipleCameras(
|
||||
const CAMERA& cam1, const CAMERA& cam2, const CAMERA& cam3, Point3 landmark,
|
||||
typename CAMERA::MeasurementVector& measurements_cam) {
|
||||
typename CAMERA::Measurement cam1_uv1 = cam1.project(landmark);
|
||||
typename CAMERA::Measurement cam2_uv1 = cam2.project(landmark);
|
||||
typename CAMERA::Measurement cam3_uv1 = cam3.project(landmark);
|
||||
measurements_cam.push_back(cam1_uv1);
|
||||
measurements_cam.push_back(cam2_uv1);
|
||||
measurements_cam.push_back(cam3_uv1);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@
|
|||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testEssentialMatrixConstraint.cpp
|
||||
* @file TestEssentialMatrixConstraint.cpp
|
||||
* @brief Unit tests for EssentialMatrixConstraint Class
|
||||
* @author Frank Dellaert
|
||||
* @author Pablo Alcantarilla
|
||||
|
|
|
|||
|
|
@ -55,8 +55,6 @@ Key cameraId3 = 2;
|
|||
static Point2 measurement1(323.0, 240.0);
|
||||
|
||||
LevenbergMarquardtParams lmParams;
|
||||
// Make more verbose like so (in tests):
|
||||
// params.verbosityLM = LevenbergMarquardtParams::SUMMARY;
|
||||
|
||||
/* ************************************************************************* */
|
||||
// default Cal3_S2 poses with rolling shutter effect
|
||||
|
|
@ -1187,10 +1185,9 @@ TEST(SmartProjectionRigFactor, optimization_3poses_measurementsFromSamePose) {
|
|||
// this factor is slightly slower (but comparable) to original
|
||||
// SmartProjectionPoseFactor
|
||||
//-Total: 0 CPU (0 times, 0 wall, 0.17 children, min: 0 max: 0)
|
||||
//| -SmartRigFactor LINEARIZE: 0.06 CPU
|
||||
//(10000 times, 0.061226 wall, 0.06 children, min: 0 max: 0)
|
||||
//| -SmartPoseFactor LINEARIZE: 0.06 CPU
|
||||
//(10000 times, 0.073037 wall, 0.06 children, min: 0 max: 0)
|
||||
//| -SmartRigFactor LINEARIZE: 0.05 CPU (10000 times, 0.057952 wall, 0.05
|
||||
// children, min: 0 max: 0) | -SmartPoseFactor LINEARIZE: 0.05 CPU (10000
|
||||
// times, 0.069647 wall, 0.05 children, min: 0 max: 0)
|
||||
/* *************************************************************************/
|
||||
TEST(SmartProjectionRigFactor, timing) {
|
||||
using namespace vanillaRig;
|
||||
|
|
@ -1249,6 +1246,355 @@ TEST(SmartProjectionRigFactor, timing) {
|
|||
}
|
||||
#endif
|
||||
|
||||
/* *************************************************************************/
|
||||
TEST(SmartProjectionFactorP, optimization_3poses_sphericalCamera) {
|
||||
using namespace sphericalCamera;
|
||||
Camera::MeasurementVector measurements_lmk1, measurements_lmk2,
|
||||
measurements_lmk3;
|
||||
|
||||
// Project three landmarks into three cameras
|
||||
projectToMultipleCameras<Camera>(cam1, cam2, cam3, landmark1,
|
||||
measurements_lmk1);
|
||||
projectToMultipleCameras<Camera>(cam1, cam2, cam3, landmark2,
|
||||
measurements_lmk2);
|
||||
projectToMultipleCameras<Camera>(cam1, cam2, cam3, landmark3,
|
||||
measurements_lmk3);
|
||||
|
||||
// create inputs
|
||||
KeyVector keys;
|
||||
keys.push_back(x1);
|
||||
keys.push_back(x2);
|
||||
keys.push_back(x3);
|
||||
|
||||
boost::shared_ptr<Cameras> cameraRig(new Cameras());
|
||||
cameraRig->push_back(Camera(Pose3::identity(), emptyK));
|
||||
|
||||
SmartProjectionParams params(
|
||||
gtsam::HESSIAN,
|
||||
gtsam::ZERO_ON_DEGENERACY); // only config that works with rig factors
|
||||
params.setRankTolerance(0.1);
|
||||
|
||||
SmartFactorP::shared_ptr smartFactor1(
|
||||
new SmartFactorP(model, cameraRig, params));
|
||||
smartFactor1->add(measurements_lmk1, keys);
|
||||
|
||||
SmartFactorP::shared_ptr smartFactor2(
|
||||
new SmartFactorP(model, cameraRig, params));
|
||||
smartFactor2->add(measurements_lmk2, keys);
|
||||
|
||||
SmartFactorP::shared_ptr smartFactor3(
|
||||
new SmartFactorP(model, cameraRig, params));
|
||||
smartFactor3->add(measurements_lmk3, keys);
|
||||
|
||||
const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.push_back(smartFactor1);
|
||||
graph.push_back(smartFactor2);
|
||||
graph.push_back(smartFactor3);
|
||||
graph.addPrior(x1, level_pose, noisePrior);
|
||||
graph.addPrior(x2, pose_right, noisePrior);
|
||||
|
||||
Values groundTruth;
|
||||
groundTruth.insert(x1, level_pose);
|
||||
groundTruth.insert(x2, pose_right);
|
||||
groundTruth.insert(x3, pose_above);
|
||||
DOUBLES_EQUAL(0, graph.error(groundTruth), 1e-9);
|
||||
|
||||
Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 10, 0., -M_PI / 100),
|
||||
Point3(0.2, 0.2, 0.2)); // note: larger noise!
|
||||
|
||||
Values values;
|
||||
values.insert(x1, level_pose);
|
||||
values.insert(x2, pose_right);
|
||||
// initialize third pose with some noise, we expect it to move back to
|
||||
// original pose_above
|
||||
values.insert(x3, pose_above * noise_pose);
|
||||
|
||||
DOUBLES_EQUAL(0.94148963675515274, graph.error(values), 1e-9);
|
||||
|
||||
Values result;
|
||||
LevenbergMarquardtOptimizer optimizer(graph, values, lmParams);
|
||||
result = optimizer.optimize();
|
||||
|
||||
EXPECT(assert_equal(pose_above, result.at<Pose3>(x3), 1e-5));
|
||||
}
|
||||
|
||||
#ifndef DISABLE_TIMING
|
||||
#include <gtsam/base/timing.h>
|
||||
// using spherical camera is slightly slower (but comparable) to
|
||||
// PinholePose<Cal3_S2>
|
||||
//| -SmartFactorP spherical LINEARIZE: 0.01 CPU (1000 times, 0.008178 wall,
|
||||
// 0.01 children, min: 0 max: 0) | -SmartFactorP pinhole LINEARIZE: 0.01 CPU
|
||||
//(1000 times, 0.005717 wall, 0.01 children, min: 0 max: 0)
|
||||
/* *************************************************************************/
|
||||
TEST(SmartProjectionFactorP, timing_sphericalCamera) {
|
||||
// create common data
|
||||
Rot3 R = Rot3::identity();
|
||||
Pose3 pose1 = Pose3(R, Point3(0, 0, 0));
|
||||
Pose3 pose2 = Pose3(R, Point3(1, 0, 0));
|
||||
Pose3 body_P_sensorId = Pose3::identity();
|
||||
Point3 landmark1(0, 0, 10);
|
||||
|
||||
// create spherical data
|
||||
EmptyCal::shared_ptr emptyK;
|
||||
SphericalCamera cam1_sphere(pose1, emptyK), cam2_sphere(pose2, emptyK);
|
||||
// Project 2 landmarks into 2 cameras
|
||||
std::vector<Unit3> measurements_lmk1_sphere;
|
||||
measurements_lmk1_sphere.push_back(cam1_sphere.project(landmark1));
|
||||
measurements_lmk1_sphere.push_back(cam2_sphere.project(landmark1));
|
||||
|
||||
// create Cal3_S2 data
|
||||
static Cal3_S2::shared_ptr sharedKSimple(new Cal3_S2(100, 100, 0, 0, 0));
|
||||
PinholePose<Cal3_S2> cam1(pose1, sharedKSimple), cam2(pose2, sharedKSimple);
|
||||
// Project 2 landmarks into 2 cameras
|
||||
std::vector<Point2> measurements_lmk1;
|
||||
measurements_lmk1.push_back(cam1.project(landmark1));
|
||||
measurements_lmk1.push_back(cam2.project(landmark1));
|
||||
|
||||
SmartProjectionParams params(
|
||||
gtsam::HESSIAN,
|
||||
gtsam::ZERO_ON_DEGENERACY); // only config that works with rig factors
|
||||
|
||||
size_t nrTests = 1000;
|
||||
|
||||
for (size_t i = 0; i < nrTests; i++) {
|
||||
boost::shared_ptr<CameraSet<SphericalCamera>> cameraRig(
|
||||
new CameraSet<SphericalCamera>()); // single camera in the rig
|
||||
cameraRig->push_back(SphericalCamera(body_P_sensorId, emptyK));
|
||||
|
||||
SmartProjectionRigFactor<SphericalCamera>::shared_ptr smartFactorP(
|
||||
new SmartProjectionRigFactor<SphericalCamera>(model, cameraRig,
|
||||
params));
|
||||
smartFactorP->add(measurements_lmk1_sphere[0], x1);
|
||||
smartFactorP->add(measurements_lmk1_sphere[1], x1);
|
||||
|
||||
Values values;
|
||||
values.insert(x1, pose1);
|
||||
values.insert(x2, pose2);
|
||||
gttic_(SmartFactorP_spherical_LINEARIZE);
|
||||
smartFactorP->linearize(values);
|
||||
gttoc_(SmartFactorP_spherical_LINEARIZE);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < nrTests; i++) {
|
||||
boost::shared_ptr<CameraSet<PinholePose<Cal3_S2>>> cameraRig(
|
||||
new CameraSet<PinholePose<Cal3_S2>>()); // single camera in the rig
|
||||
cameraRig->push_back(PinholePose<Cal3_S2>(body_P_sensorId, sharedKSimple));
|
||||
|
||||
SmartProjectionRigFactor<PinholePose<Cal3_S2>>::shared_ptr smartFactorP2(
|
||||
new SmartProjectionRigFactor<PinholePose<Cal3_S2>>(model, cameraRig,
|
||||
params));
|
||||
smartFactorP2->add(measurements_lmk1[0], x1);
|
||||
smartFactorP2->add(measurements_lmk1[1], x1);
|
||||
|
||||
Values values;
|
||||
values.insert(x1, pose1);
|
||||
values.insert(x2, pose2);
|
||||
gttic_(SmartFactorP_pinhole_LINEARIZE);
|
||||
smartFactorP2->linearize(values);
|
||||
gttoc_(SmartFactorP_pinhole_LINEARIZE);
|
||||
}
|
||||
tictoc_print_();
|
||||
}
|
||||
#endif
|
||||
|
||||
/* *************************************************************************/
|
||||
TEST(SmartProjectionFactorP, 2poses_rankTol) {
|
||||
Pose3 poseA = Pose3(
|
||||
Rot3::Ypr(-M_PI / 2, 0., -M_PI / 2),
|
||||
Point3(0.0, 0.0, 0.0)); // with z pointing along x axis of global frame
|
||||
Pose3 poseB = Pose3(Rot3::Ypr(-M_PI / 2, 0., -M_PI / 2),
|
||||
Point3(0.0, -0.1, 0.0)); // 10cm to the right of poseA
|
||||
Point3 landmarkL = Point3(5.0, 0.0, 0.0); // 5m in front of poseA
|
||||
|
||||
// triangulate from a stereo with 10cm baseline, assuming standard calibration
|
||||
{ // default rankTol = 1 gives a valid point (compare with calibrated and
|
||||
// spherical cameras below)
|
||||
using namespace vanillaPose; // pinhole with Cal3_S2 calibration
|
||||
|
||||
Camera cam1(poseA, sharedK);
|
||||
Camera cam2(poseB, sharedK);
|
||||
|
||||
SmartProjectionParams params(
|
||||
gtsam::HESSIAN,
|
||||
gtsam::ZERO_ON_DEGENERACY); // only config that works with rig factors
|
||||
params.setRankTolerance(1);
|
||||
|
||||
boost::shared_ptr<CameraSet<PinholePose<Cal3_S2>>> cameraRig(
|
||||
new CameraSet<PinholePose<Cal3_S2>>()); // single camera in the rig
|
||||
cameraRig->push_back(PinholePose<Cal3_S2>(Pose3::identity(), sharedK));
|
||||
|
||||
SmartRigFactor::shared_ptr smartFactor1(
|
||||
new SmartRigFactor(model, cameraRig, params));
|
||||
smartFactor1->add(cam1.project(landmarkL), x1);
|
||||
smartFactor1->add(cam2.project(landmarkL), x2);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.push_back(smartFactor1);
|
||||
|
||||
Values groundTruth;
|
||||
groundTruth.insert(x1, poseA);
|
||||
groundTruth.insert(x2, poseB);
|
||||
DOUBLES_EQUAL(0, graph.error(groundTruth), 1e-9);
|
||||
|
||||
// get point
|
||||
TriangulationResult point = smartFactor1->point();
|
||||
EXPECT(point.valid()); // valid triangulation
|
||||
EXPECT(assert_equal(landmarkL, *point, 1e-7));
|
||||
}
|
||||
// triangulate from a stereo with 10cm baseline, assuming canonical
|
||||
// calibration
|
||||
{ // default rankTol = 1 or 0.1 gives a degenerate point, which is
|
||||
// undesirable for a point 5m away and 10cm baseline
|
||||
using namespace vanillaPose; // pinhole with Cal3_S2 calibration
|
||||
static Cal3_S2::shared_ptr canonicalK(
|
||||
new Cal3_S2(1.0, 1.0, 0.0, 0.0, 0.0)); // canonical camera
|
||||
|
||||
Camera cam1(poseA, canonicalK);
|
||||
Camera cam2(poseB, canonicalK);
|
||||
|
||||
SmartProjectionParams params(
|
||||
gtsam::HESSIAN,
|
||||
gtsam::ZERO_ON_DEGENERACY); // only config that works with rig factors
|
||||
params.setRankTolerance(0.1);
|
||||
|
||||
boost::shared_ptr<CameraSet<PinholePose<Cal3_S2>>> cameraRig(
|
||||
new CameraSet<PinholePose<Cal3_S2>>()); // single camera in the rig
|
||||
cameraRig->push_back(PinholePose<Cal3_S2>(Pose3::identity(), canonicalK));
|
||||
|
||||
SmartRigFactor::shared_ptr smartFactor1(
|
||||
new SmartRigFactor(model, cameraRig, params));
|
||||
smartFactor1->add(cam1.project(landmarkL), x1);
|
||||
smartFactor1->add(cam2.project(landmarkL), x2);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.push_back(smartFactor1);
|
||||
|
||||
Values groundTruth;
|
||||
groundTruth.insert(x1, poseA);
|
||||
groundTruth.insert(x2, poseB);
|
||||
DOUBLES_EQUAL(0, graph.error(groundTruth), 1e-9);
|
||||
|
||||
// get point
|
||||
TriangulationResult point = smartFactor1->point();
|
||||
EXPECT(point.degenerate()); // valid triangulation
|
||||
}
|
||||
// triangulate from a stereo with 10cm baseline, assuming canonical
|
||||
// calibration
|
||||
{ // smaller rankTol = 0.01 gives a valid point (compare with calibrated and
|
||||
// spherical cameras below)
|
||||
using namespace vanillaPose; // pinhole with Cal3_S2 calibration
|
||||
static Cal3_S2::shared_ptr canonicalK(
|
||||
new Cal3_S2(1.0, 1.0, 0.0, 0.0, 0.0)); // canonical camera
|
||||
|
||||
Camera cam1(poseA, canonicalK);
|
||||
Camera cam2(poseB, canonicalK);
|
||||
|
||||
SmartProjectionParams params(
|
||||
gtsam::HESSIAN,
|
||||
gtsam::ZERO_ON_DEGENERACY); // only config that works with rig factors
|
||||
params.setRankTolerance(0.01);
|
||||
|
||||
boost::shared_ptr<CameraSet<PinholePose<Cal3_S2>>> cameraRig(
|
||||
new CameraSet<PinholePose<Cal3_S2>>()); // single camera in the rig
|
||||
cameraRig->push_back(PinholePose<Cal3_S2>(Pose3::identity(), canonicalK));
|
||||
|
||||
SmartRigFactor::shared_ptr smartFactor1(
|
||||
new SmartRigFactor(model, cameraRig, params));
|
||||
smartFactor1->add(cam1.project(landmarkL), x1);
|
||||
smartFactor1->add(cam2.project(landmarkL), x2);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.push_back(smartFactor1);
|
||||
|
||||
Values groundTruth;
|
||||
groundTruth.insert(x1, poseA);
|
||||
groundTruth.insert(x2, poseB);
|
||||
DOUBLES_EQUAL(0, graph.error(groundTruth), 1e-9);
|
||||
|
||||
// get point
|
||||
TriangulationResult point = smartFactor1->point();
|
||||
EXPECT(point.valid()); // valid triangulation
|
||||
EXPECT(assert_equal(landmarkL, *point, 1e-7));
|
||||
}
|
||||
}
|
||||
|
||||
/* *************************************************************************/
|
||||
TEST(SmartProjectionFactorP, 2poses_sphericalCamera_rankTol) {
|
||||
typedef SphericalCamera Camera;
|
||||
typedef SmartProjectionRigFactor<Camera> SmartRigFactor;
|
||||
EmptyCal::shared_ptr emptyK(new EmptyCal());
|
||||
Pose3 poseA = Pose3(
|
||||
Rot3::Ypr(-M_PI / 2, 0., -M_PI / 2),
|
||||
Point3(0.0, 0.0, 0.0)); // with z pointing along x axis of global frame
|
||||
Pose3 poseB = Pose3(Rot3::Ypr(-M_PI / 2, 0., -M_PI / 2),
|
||||
Point3(0.0, -0.1, 0.0)); // 10cm to the right of poseA
|
||||
Point3 landmarkL = Point3(5.0, 0.0, 0.0); // 5m in front of poseA
|
||||
|
||||
Camera cam1(poseA);
|
||||
Camera cam2(poseB);
|
||||
|
||||
boost::shared_ptr<CameraSet<SphericalCamera>> cameraRig(
|
||||
new CameraSet<SphericalCamera>()); // single camera in the rig
|
||||
cameraRig->push_back(SphericalCamera(Pose3::identity(), emptyK));
|
||||
|
||||
// TRIANGULATION TEST WITH DEFAULT RANK TOL
|
||||
{ // rankTol = 1 or 0.1 gives a degenerate point, which is undesirable for a
|
||||
// point 5m away and 10cm baseline
|
||||
SmartProjectionParams params(
|
||||
gtsam::HESSIAN,
|
||||
gtsam::ZERO_ON_DEGENERACY); // only config that works with rig factors
|
||||
params.setRankTolerance(0.1);
|
||||
|
||||
SmartRigFactor::shared_ptr smartFactor1(
|
||||
new SmartRigFactor(model, cameraRig, params));
|
||||
smartFactor1->add(cam1.project(landmarkL), x1);
|
||||
smartFactor1->add(cam2.project(landmarkL), x2);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.push_back(smartFactor1);
|
||||
|
||||
Values groundTruth;
|
||||
groundTruth.insert(x1, poseA);
|
||||
groundTruth.insert(x2, poseB);
|
||||
DOUBLES_EQUAL(0, graph.error(groundTruth), 1e-9);
|
||||
|
||||
// get point
|
||||
TriangulationResult point = smartFactor1->point();
|
||||
EXPECT(point.degenerate()); // not enough parallax
|
||||
}
|
||||
// SAME TEST WITH SMALLER RANK TOL
|
||||
{ // rankTol = 0.01 gives a valid point
|
||||
// By playing with this test, we can show we can triangulate also with a
|
||||
// baseline of 5cm (even for points far away, >100m), but the test fails
|
||||
// when the baseline becomes 1cm. This suggests using rankTol = 0.01 and
|
||||
// setting a reasonable max landmark distance to obtain best results.
|
||||
SmartProjectionParams params(
|
||||
gtsam::HESSIAN,
|
||||
gtsam::ZERO_ON_DEGENERACY); // only config that works with rig factors
|
||||
params.setRankTolerance(0.01);
|
||||
|
||||
SmartRigFactor::shared_ptr smartFactor1(
|
||||
new SmartRigFactor(model, cameraRig, params));
|
||||
smartFactor1->add(cam1.project(landmarkL), x1);
|
||||
smartFactor1->add(cam2.project(landmarkL), x2);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.push_back(smartFactor1);
|
||||
|
||||
Values groundTruth;
|
||||
groundTruth.insert(x1, poseA);
|
||||
groundTruth.insert(x2, poseB);
|
||||
DOUBLES_EQUAL(0, graph.error(groundTruth), 1e-9);
|
||||
|
||||
// get point
|
||||
TriangulationResult point = smartFactor1->point();
|
||||
EXPECT(point.valid()); // valid triangulation
|
||||
EXPECT(assert_equal(landmarkL, *point, 1e-7));
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ void AllDiff::print(const std::string& s, const KeyFormatter& formatter) const {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
double AllDiff::operator()(const Values& values) const {
|
||||
double AllDiff::operator()(const DiscreteValues& values) const {
|
||||
std::set<size_t> taken; // record values taken by keys
|
||||
for (Key dkey : keys_) {
|
||||
size_t value = values.at(dkey); // get the value for that key
|
||||
|
|
@ -57,21 +57,25 @@ DecisionTreeFactor AllDiff::operator*(const DecisionTreeFactor& f) const {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
bool AllDiff::ensureArcConsistency(size_t j,
|
||||
std::vector<Domain>& domains) const {
|
||||
bool AllDiff::ensureArcConsistency(Key j, Domains* domains) const {
|
||||
Domain& Dj = domains->at(j);
|
||||
|
||||
// Though strictly not part of allDiff, we check for
|
||||
// a value in domains[j] that does not occur in any other connected domain.
|
||||
// a value in domains->at(j) that does not occur in any other connected domain.
|
||||
// If found, we make this a singleton...
|
||||
// TODO: make a new constraint where this really is true
|
||||
Domain& Dj = domains[j];
|
||||
if (Dj.checkAllDiff(keys_, domains)) return true;
|
||||
boost::optional<Domain> maybeChanged = Dj.checkAllDiff(keys_, *domains);
|
||||
if (maybeChanged) {
|
||||
Dj = *maybeChanged;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check all other domains for singletons and erase corresponding values
|
||||
// Check all other domains for singletons and erase corresponding values.
|
||||
// This is the same as arc-consistency on the equivalent binary constraints
|
||||
bool changed = false;
|
||||
for (Key k : keys_)
|
||||
if (k != j) {
|
||||
const Domain& Dk = domains[k];
|
||||
const Domain& Dk = domains->at(k);
|
||||
if (Dk.isSingleton()) { // check if singleton
|
||||
size_t value = Dk.firstValue();
|
||||
if (Dj.contains(value)) {
|
||||
|
|
@ -84,7 +88,7 @@ bool AllDiff::ensureArcConsistency(size_t j,
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Constraint::shared_ptr AllDiff::partiallyApply(const Values& values) const {
|
||||
Constraint::shared_ptr AllDiff::partiallyApply(const DiscreteValues& values) const {
|
||||
DiscreteKeys newKeys;
|
||||
// loop over keys and add them only if they do not appear in values
|
||||
for (Key k : keys_)
|
||||
|
|
@ -96,10 +100,10 @@ Constraint::shared_ptr AllDiff::partiallyApply(const Values& values) const {
|
|||
|
||||
/* ************************************************************************* */
|
||||
Constraint::shared_ptr AllDiff::partiallyApply(
|
||||
const std::vector<Domain>& domains) const {
|
||||
DiscreteFactor::Values known;
|
||||
const Domains& domains) const {
|
||||
DiscreteValues known;
|
||||
for (Key k : keys_) {
|
||||
const Domain& Dk = domains[k];
|
||||
const Domain& Dk = domains.at(k);
|
||||
if (Dk.isSingleton()) known[k] = Dk.firstValue();
|
||||
}
|
||||
return partiallyApply(known);
|
||||
|
|
|
|||
|
|
@ -13,11 +13,8 @@
|
|||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* General AllDiff constraint
|
||||
* Returns 1 if values for all keys are different, 0 otherwise
|
||||
* DiscreteFactors are all awkward in that they have to store two types of keys:
|
||||
* for each variable we have a Key and an Key. In this factor, we
|
||||
* keep the Indices locally, and the Indices are stored in IndexFactor.
|
||||
* General AllDiff constraint.
|
||||
* Returns 1 if values for all keys are different, 0 otherwise.
|
||||
*/
|
||||
class GTSAM_UNSTABLE_EXPORT AllDiff : public Constraint {
|
||||
std::map<Key, size_t> cardinalities_;
|
||||
|
|
@ -28,7 +25,7 @@ class GTSAM_UNSTABLE_EXPORT AllDiff : public Constraint {
|
|||
}
|
||||
|
||||
public:
|
||||
/// Constructor
|
||||
/// Construct from keys.
|
||||
AllDiff(const DiscreteKeys& dkeys);
|
||||
|
||||
// print
|
||||
|
|
@ -48,7 +45,7 @@ class GTSAM_UNSTABLE_EXPORT AllDiff : public Constraint {
|
|||
}
|
||||
|
||||
/// Calculate value = expensive !
|
||||
double operator()(const Values& values) const override;
|
||||
double operator()(const DiscreteValues& values) const override;
|
||||
|
||||
/// Convert into a decisiontree, can be *very* expensive !
|
||||
DecisionTreeFactor toDecisionTreeFactor() const override;
|
||||
|
|
@ -57,21 +54,19 @@ class GTSAM_UNSTABLE_EXPORT AllDiff : public Constraint {
|
|||
DecisionTreeFactor operator*(const DecisionTreeFactor& f) const override;
|
||||
|
||||
/*
|
||||
* Ensure Arc-consistency
|
||||
* Arc-consistency involves creating binaryAllDiff constraints
|
||||
* In which case the combinatorial hyper-arc explosion disappears.
|
||||
* Ensure Arc-consistency by checking every possible value of domain j.
|
||||
* @param j domain to be checked
|
||||
* @param domains all other domains
|
||||
* @param (in/out) domains all domains, but only domains->at(j) will be checked.
|
||||
* @return true if domains->at(j) was changed, false otherwise.
|
||||
*/
|
||||
bool ensureArcConsistency(size_t j,
|
||||
std::vector<Domain>& domains) const override;
|
||||
bool ensureArcConsistency(Key j, Domains* domains) const override;
|
||||
|
||||
/// Partially apply known values
|
||||
Constraint::shared_ptr partiallyApply(const Values&) const override;
|
||||
Constraint::shared_ptr partiallyApply(const DiscreteValues&) const override;
|
||||
|
||||
/// Partially apply known values, domain version
|
||||
Constraint::shared_ptr partiallyApply(
|
||||
const std::vector<Domain>&) const override;
|
||||
const Domains&) const override;
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
|||
|
|
@ -15,10 +15,7 @@ namespace gtsam {
|
|||
|
||||
/**
|
||||
* Binary AllDiff constraint
|
||||
* Returns 1 if values for two keys are different, 0 otherwise
|
||||
* DiscreteFactors are all awkward in that they have to store two types of keys:
|
||||
* for each variable we have a Index and an Index. In this factor, we
|
||||
* keep the Indices locally, and the Indices are stored in IndexFactor.
|
||||
* Returns 1 if values for two keys are different, 0 otherwise.
|
||||
*/
|
||||
class BinaryAllDiff : public Constraint {
|
||||
size_t cardinality0_, cardinality1_; /// cardinality
|
||||
|
|
@ -50,7 +47,7 @@ class BinaryAllDiff : public Constraint {
|
|||
}
|
||||
|
||||
/// Calculate value
|
||||
double operator()(const Values& values) const override {
|
||||
double operator()(const DiscreteValues& values) const override {
|
||||
return (double)(values.at(keys_[0]) != values.at(keys_[1]));
|
||||
}
|
||||
|
||||
|
|
@ -73,25 +70,25 @@ class BinaryAllDiff : public Constraint {
|
|||
}
|
||||
|
||||
/*
|
||||
* Ensure Arc-consistency
|
||||
* Ensure Arc-consistency by checking every possible value of domain j.
|
||||
* @param j domain to be checked
|
||||
* @param domains all other domains
|
||||
* @param (in/out) domains all domains, but only domains->at(j) will be checked.
|
||||
* @return true if domains->at(j) was changed, false otherwise.
|
||||
*/
|
||||
bool ensureArcConsistency(size_t j,
|
||||
std::vector<Domain>& domains) const override {
|
||||
// throw std::runtime_error(
|
||||
// "BinaryAllDiff::ensureArcConsistency not implemented");
|
||||
bool ensureArcConsistency(Key j, Domains* domains) const override {
|
||||
throw std::runtime_error(
|
||||
"BinaryAllDiff::ensureArcConsistency not implemented");
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Partially apply known values
|
||||
Constraint::shared_ptr partiallyApply(const Values&) const override {
|
||||
Constraint::shared_ptr partiallyApply(const DiscreteValues&) const override {
|
||||
throw std::runtime_error("BinaryAllDiff::partiallyApply not implemented");
|
||||
}
|
||||
|
||||
/// Partially apply known values, domain version
|
||||
Constraint::shared_ptr partiallyApply(
|
||||
const std::vector<Domain>&) const override {
|
||||
const Domains&) const override {
|
||||
throw std::runtime_error("BinaryAllDiff::partiallyApply not implemented");
|
||||
}
|
||||
};
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue