Merge branch 'develop' into fix/sfmtrack-color
commit
d403006dfe
|
@ -83,9 +83,3 @@ jobs:
|
|||
if: runner.os == 'Linux'
|
||||
run: |
|
||||
bash .github/scripts/unix.sh -t
|
||||
- name: Upload build directory
|
||||
uses: actions/upload-artifact@v2
|
||||
if: matrix.build_type == 'Release'
|
||||
with:
|
||||
name: gtsam-${{ matrix.name }}-${{ matrix.build_type }}
|
||||
path: ${{ github.workspace }}/build/
|
||||
|
|
|
@ -51,9 +51,3 @@ jobs:
|
|||
if: runner.os == 'macOS'
|
||||
run: |
|
||||
bash .github/scripts/unix.sh -t
|
||||
- name: Upload build directory
|
||||
uses: actions/upload-artifact@v2
|
||||
if: matrix.build_type == 'Release'
|
||||
with:
|
||||
name: gtsam-${{ matrix.name }}-${{ matrix.build_type }}
|
||||
path: ${{ github.workspace }}/build/
|
||||
|
|
|
@ -76,9 +76,3 @@ jobs:
|
|||
cmake --build build --config ${{ matrix.build_type }} --target check.base
|
||||
cmake --build build --config ${{ matrix.build_type }} --target check.base_unstable
|
||||
cmake --build build --config ${{ matrix.build_type }} --target check.linear
|
||||
- name: Upload build directory
|
||||
uses: actions/upload-artifact@v2
|
||||
if: matrix.build_type == 'Release'
|
||||
with:
|
||||
name: gtsam-${{ matrix.name }}-${{ matrix.build_type }}
|
||||
path: ${{ github.workspace }}/build/
|
||||
|
|
|
@ -62,6 +62,11 @@ add_subdirectory(CppUnitLite)
|
|||
|
||||
# This is the new wrapper
|
||||
if(GTSAM_BUILD_PYTHON)
|
||||
# Need to set this for the wrap package so we don't use the default value.
|
||||
set(WRAP_PYTHON_VERSION ${GTSAM_PYTHON_VERSION}
|
||||
CACHE STRING "The Python version to use for wrapping")
|
||||
|
||||
add_subdirectory(wrap)
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/wrap/cmake")
|
||||
add_subdirectory(python)
|
||||
endif()
|
||||
|
|
|
@ -240,12 +240,16 @@ function(wrap_library_internal interfaceHeader linkLibraries extraIncludeDirs ex
|
|||
|
||||
set(_ignore gtsam::Point2
|
||||
gtsam::Point3)
|
||||
|
||||
# set the matlab wrapping script variable
|
||||
set(MATLAB_WRAP_SCRIPT "${GTSAM_SOURCE_DIR}/wrap/scripts/matlab_wrap.py")
|
||||
|
||||
add_custom_command(
|
||||
OUTPUT ${generated_cpp_file}
|
||||
DEPENDS ${interfaceHeader} ${module_library_target} ${otherLibraryTargets} ${otherSourcesAndObjects}
|
||||
COMMAND
|
||||
${PYTHON_EXECUTABLE}
|
||||
${CMAKE_SOURCE_DIR}/wrap/matlab_wrapper.py
|
||||
${MATLAB_WRAP_SCRIPT}
|
||||
--src ${interfaceHeader}
|
||||
--module_name ${moduleName}
|
||||
--out ${generated_files_path}
|
||||
|
|
|
@ -1,22 +1,48 @@
|
|||
# Set Python version if either Python or MATLAB wrapper is requested.
|
||||
if(GTSAM_BUILD_PYTHON OR GTSAM_INSTALL_MATLAB_TOOLBOX)
|
||||
if(${GTSAM_PYTHON_VERSION} STREQUAL "Default")
|
||||
|
||||
if(${CMAKE_VERSION} VERSION_LESS "3.12.0")
|
||||
# Use older version of cmake's find_python
|
||||
find_package(PythonInterp)
|
||||
|
||||
if(NOT ${PYTHONINTERP_FOUND})
|
||||
message(
|
||||
FATAL_ERROR
|
||||
"Cannot find Python interpreter. Please install Python >= 3.6.")
|
||||
endif()
|
||||
|
||||
find_package(PythonLibs ${PYTHON_VERSION_STRING})
|
||||
|
||||
set(Python_VERSION_MAJOR ${PYTHON_VERSION_MAJOR})
|
||||
set(Python_VERSION_MINOR ${PYTHON_VERSION_MINOR})
|
||||
set(Python_EXECUTABLE ${PYTHON_EXECUTABLE})
|
||||
|
||||
else()
|
||||
# Get info about the Python3 interpreter
|
||||
# https://cmake.org/cmake/help/latest/module/FindPython3.html#module:FindPython3
|
||||
find_package(Python3 COMPONENTS Interpreter Development)
|
||||
|
||||
if(NOT ${Python3_FOUND})
|
||||
message(FATAL_ERROR "Cannot find Python3 interpreter. Please install Python >= 3.6.")
|
||||
message(
|
||||
FATAL_ERROR
|
||||
"Cannot find Python3 interpreter. Please install Python >= 3.6.")
|
||||
endif()
|
||||
|
||||
set(GTSAM_PYTHON_VERSION "${Python3_VERSION_MAJOR}.${Python3_VERSION_MINOR}"
|
||||
CACHE
|
||||
STRING
|
||||
"The version of Python to build the wrappers against."
|
||||
set(Python_VERSION_MAJOR ${Python3_VERSION_MAJOR})
|
||||
set(Python_VERSION_MINOR ${Python3_VERSION_MINOR})
|
||||
|
||||
endif()
|
||||
|
||||
set(GTSAM_PYTHON_VERSION
|
||||
"${Python_VERSION_MAJOR}.${Python_VERSION_MINOR}"
|
||||
CACHE STRING "The version of Python to build the wrappers against."
|
||||
FORCE)
|
||||
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Check for build of Unstable modules
|
||||
if(GTSAM_BUILD_PYTHON)
|
||||
if(GTSAM_UNSTABLE_BUILD_PYTHON)
|
||||
if (NOT GTSAM_BUILD_UNSTABLE)
|
||||
|
|
|
@ -43,7 +43,7 @@ int main(const int argc, const char* argv[]) {
|
|||
auto priorModel = noiseModel::Diagonal::Variances(
|
||||
(Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished());
|
||||
Key firstKey = 0;
|
||||
for (const Values::ConstKeyValuePair& key_value : *initial) {
|
||||
for (const auto key_value : *initial) {
|
||||
std::cout << "Adding prior to g2o file " << std::endl;
|
||||
firstKey = key_value.key;
|
||||
graph->addPrior(firstKey, Pose3(), priorModel);
|
||||
|
@ -74,7 +74,7 @@ int main(const int argc, const char* argv[]) {
|
|||
|
||||
// Calculate and print marginal covariances for all variables
|
||||
Marginals marginals(*graph, result);
|
||||
for (const auto& key_value : result) {
|
||||
for (const auto key_value : result) {
|
||||
auto p = dynamic_cast<const GenericValue<Pose3>*>(&key_value.value);
|
||||
if (!p) continue;
|
||||
std::cout << marginals.marginalCovariance(key_value.key) << endl;
|
||||
|
|
|
@ -55,7 +55,7 @@ int main(const int argc, const char *argv[]) {
|
|||
std::cout << "Rewriting input to file: " << inputFileRewritten << std::endl;
|
||||
// Additional: rewrite input with simplified keys 0,1,...
|
||||
Values simpleInitial;
|
||||
for(const Values::ConstKeyValuePair& key_value: *initial) {
|
||||
for(const auto key_value: *initial) {
|
||||
Key key;
|
||||
if(add)
|
||||
key = key_value.key + firstKey;
|
||||
|
|
|
@ -42,7 +42,7 @@ int main(const int argc, const char* argv[]) {
|
|||
auto priorModel = noiseModel::Diagonal::Variances(
|
||||
(Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished());
|
||||
Key firstKey = 0;
|
||||
for (const Values::ConstKeyValuePair& key_value : *initial) {
|
||||
for (const auto key_value : *initial) {
|
||||
std::cout << "Adding prior to g2o file " << std::endl;
|
||||
firstKey = key_value.key;
|
||||
graph->addPrior(firstKey, Pose3(), priorModel);
|
||||
|
|
|
@ -42,7 +42,7 @@ int main(const int argc, const char* argv[]) {
|
|||
auto priorModel = noiseModel::Diagonal::Variances(
|
||||
(Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished());
|
||||
Key firstKey = 0;
|
||||
for (const Values::ConstKeyValuePair& key_value : *initial) {
|
||||
for (const auto key_value : *initial) {
|
||||
std::cout << "Adding prior to g2o file " << std::endl;
|
||||
firstKey = key_value.key;
|
||||
graph->addPrior(firstKey, Pose3(), priorModel);
|
||||
|
|
|
@ -42,7 +42,7 @@ int main(const int argc, const char* argv[]) {
|
|||
auto priorModel = noiseModel::Diagonal::Variances(
|
||||
(Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished());
|
||||
Key firstKey = 0;
|
||||
for (const Values::ConstKeyValuePair& key_value : *initial) {
|
||||
for (const auto key_value : *initial) {
|
||||
std::cout << "Adding prior to g2o file " << std::endl;
|
||||
firstKey = key_value.key;
|
||||
graph->addPrior(firstKey, Pose3(), priorModel);
|
||||
|
|
|
@ -559,7 +559,7 @@ void runPerturb()
|
|||
|
||||
// Perturb values
|
||||
VectorValues noise;
|
||||
for(const Values::KeyValuePair& key_val: initial)
|
||||
for(const Values::KeyValuePair key_val: initial)
|
||||
{
|
||||
Vector noisev(key_val.value.dim());
|
||||
for(Vector::Index i = 0; i < noisev.size(); ++i)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
cmake_minimum_required(VERSION 2.8)
|
||||
cmake_minimum_required(VERSION 3.0)
|
||||
project(METIS)
|
||||
|
||||
# Add flags for currect directory and below
|
||||
|
|
|
@ -12,6 +12,7 @@ endif()
|
|||
if(WIN32)
|
||||
set_target_properties(metis-gtsam PROPERTIES
|
||||
PREFIX ""
|
||||
COMPILE_FLAGS /w
|
||||
RUNTIME_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/../../../bin")
|
||||
endif()
|
||||
|
||||
|
|
|
@ -99,6 +99,9 @@ class GTSAM_EXPORT Cal3 {
|
|||
*/
|
||||
Cal3(double fov, int w, int h);
|
||||
|
||||
/// Virtual destructor
|
||||
virtual ~Cal3() {}
|
||||
|
||||
/// @}
|
||||
/// @name Advanced Constructors
|
||||
/// @{
|
||||
|
|
|
@ -106,8 +106,8 @@ Vector6 Pose3::adjointTranspose(const Vector6& xi, const Vector6& y,
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void Pose3::print(const string& s) const {
|
||||
cout << (s.empty() ? s : s + " ") << *this << endl;
|
||||
void Pose3::print(const std::string& s) const {
|
||||
std::cout << (s.empty() ? s : s + " ") << *this << std::endl;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -22,8 +22,6 @@
|
|||
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
// Implementation for N>=5 just uses dynamic version
|
||||
|
@ -108,7 +106,7 @@ typename SO<N>::VectorN2 SO<N>::vec(
|
|||
|
||||
template <int N>
|
||||
void SO<N>::print(const std::string& s) const {
|
||||
cout << s << matrix_ << endl;
|
||||
std::cout << s << matrix_ << std::endl;
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -825,7 +825,7 @@ TEST(Rot3, RQ_derivative) {
|
|||
const auto R = Rot3::RzRyRx(xyz).matrix();
|
||||
const auto num = numericalDerivative11(RQ_proxy, R);
|
||||
Matrix39 calc;
|
||||
RQ(R, calc).second;
|
||||
auto dummy = RQ(R, calc).second;
|
||||
|
||||
const auto err = vec_err.second;
|
||||
CHECK(assert_equal(num, calc, err));
|
||||
|
|
|
@ -161,7 +161,7 @@ namespace gtsam {
|
|||
bool VectorValues::equals(const VectorValues& x, double tol) const {
|
||||
if(this->size() != x.size())
|
||||
return false;
|
||||
for(const auto& values: boost::combine(*this, x)) {
|
||||
for(const auto values: boost::combine(*this, x)) {
|
||||
if(values.get<0>().first != values.get<1>().first ||
|
||||
!equal_with_abs_tol(values.get<0>().second, values.get<1>().second, tol))
|
||||
return false;
|
||||
|
@ -233,7 +233,7 @@ namespace gtsam {
|
|||
double result = 0.0;
|
||||
typedef boost::tuple<value_type, value_type> ValuePair;
|
||||
using boost::adaptors::map_values;
|
||||
for(const ValuePair& values: boost::combine(*this, v)) {
|
||||
for(const ValuePair values: boost::combine(*this, v)) {
|
||||
assert_throw(values.get<0>().first == values.get<1>().first,
|
||||
invalid_argument("VectorValues::dot called with a VectorValues of different structure"));
|
||||
assert_throw(values.get<0>().second.size() == values.get<1>().second.size(),
|
||||
|
|
|
@ -28,6 +28,8 @@ class GaussNewtonOptimizer;
|
|||
* NonlinearOptimizationParams.
|
||||
*/
|
||||
class GTSAM_EXPORT GaussNewtonParams : public NonlinearOptimizerParams {
|
||||
public:
|
||||
using OptimizerType = GaussNewtonOptimizer;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -0,0 +1,338 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file GncOptimizer.h
|
||||
* @brief The GncOptimizer class
|
||||
* @author Jingnan Shi
|
||||
* @author Luca Carlone
|
||||
* @author Frank Dellaert
|
||||
*
|
||||
* Implementation of the paper: Yang, Antonante, Tzoumas, Carlone, "Graduated Non-Convexity for Robust Spatial Perception:
|
||||
* From Non-Minimal Solvers to Global Outlier Rejection", ICRA/RAL, 2020. (arxiv version: https://arxiv.org/pdf/1909.08605.pdf)
|
||||
*
|
||||
* See also:
|
||||
* Antonante, Tzoumas, Yang, Carlone, "Outlier-Robust Estimation: Hardness, Minimally-Tuned Algorithms, and Applications",
|
||||
* arxiv: https://arxiv.org/pdf/2007.15109.pdf, 2020.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/nonlinear/GncParams.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class GncParameters>
|
||||
class GncOptimizer {
|
||||
public:
|
||||
/// For each parameter, specify the corresponding optimizer: e.g., GaussNewtonParams -> GaussNewtonOptimizer.
|
||||
typedef typename GncParameters::OptimizerType BaseOptimizer;
|
||||
|
||||
private:
|
||||
NonlinearFactorGraph nfg_; ///< Original factor graph to be solved by GNC.
|
||||
Values state_; ///< Initial values to be used at each iteration by GNC.
|
||||
GncParameters params_; ///< GNC parameters.
|
||||
Vector weights_; ///< Weights associated to each factor in GNC (this could be a local variable in optimize, but it is useful to make it accessible from outside).
|
||||
|
||||
public:
|
||||
/// Constructor.
|
||||
GncOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues,
|
||||
const GncParameters& params = GncParameters())
|
||||
: state_(initialValues),
|
||||
params_(params) {
|
||||
|
||||
// make sure all noiseModels are Gaussian or convert to Gaussian
|
||||
nfg_.resize(graph.size());
|
||||
for (size_t i = 0; i < graph.size(); i++) {
|
||||
if (graph[i]) {
|
||||
NoiseModelFactor::shared_ptr factor = boost::dynamic_pointer_cast<
|
||||
NoiseModelFactor>(graph[i]);
|
||||
auto robust = boost::dynamic_pointer_cast<
|
||||
noiseModel::Robust>(factor->noiseModel());
|
||||
// if the factor has a robust loss, we remove the robust loss
|
||||
nfg_[i] = robust ? factor-> cloneWithNewNoiseModel(robust->noise()) : factor;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Access a copy of the internal factor graph.
|
||||
const NonlinearFactorGraph& getFactors() const { return nfg_; }
|
||||
|
||||
/// Access a copy of the internal values.
|
||||
const Values& getState() const { return state_; }
|
||||
|
||||
/// Access a copy of the parameters.
|
||||
const GncParameters& getParams() const { return params_;}
|
||||
|
||||
/// Access a copy of the GNC weights.
|
||||
const Vector& getWeights() const { return weights_;}
|
||||
|
||||
/// Compute optimal solution using graduated non-convexity.
|
||||
Values optimize() {
|
||||
// start by assuming all measurements are inliers
|
||||
weights_ = Vector::Ones(nfg_.size());
|
||||
BaseOptimizer baseOptimizer(nfg_, state_);
|
||||
Values result = baseOptimizer.optimize();
|
||||
double mu = initializeMu();
|
||||
double prev_cost = nfg_.error(result);
|
||||
double cost = 0.0; // this will be updated in the main loop
|
||||
|
||||
// handle the degenerate case that corresponds to small
|
||||
// maximum residual errors at initialization
|
||||
// For GM: if residual error is small, mu -> 0
|
||||
// For TLS: if residual error is small, mu -> -1
|
||||
if (mu <= 0) {
|
||||
if (params_.verbosity >= GncParameters::Verbosity::SUMMARY) {
|
||||
std::cout << "GNC Optimizer stopped because maximum residual at "
|
||||
"initialization is small."
|
||||
<< std::endl;
|
||||
}
|
||||
if (params_.verbosity >= GncParameters::Verbosity::VALUES) {
|
||||
result.print("result\n");
|
||||
std::cout << "mu: " << mu << std::endl;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
size_t iter;
|
||||
for (iter = 0; iter < params_.maxIterations; iter++) {
|
||||
|
||||
// display info
|
||||
if (params_.verbosity >= GncParameters::Verbosity::VALUES) {
|
||||
std::cout << "iter: " << iter << std::endl;
|
||||
result.print("result\n");
|
||||
std::cout << "mu: " << mu << std::endl;
|
||||
std::cout << "weights: " << weights_ << std::endl;
|
||||
}
|
||||
// weights update
|
||||
weights_ = calculateWeights(result, mu);
|
||||
|
||||
// variable/values update
|
||||
NonlinearFactorGraph graph_iter = this->makeWeightedGraph(weights_);
|
||||
BaseOptimizer baseOptimizer_iter(graph_iter, state_);
|
||||
result = baseOptimizer_iter.optimize();
|
||||
|
||||
// stopping condition
|
||||
cost = graph_iter.error(result);
|
||||
if (checkConvergence(mu, weights_, cost, prev_cost)) {
|
||||
break;
|
||||
}
|
||||
|
||||
// update mu
|
||||
mu = updateMu(mu);
|
||||
|
||||
// get ready for next iteration
|
||||
prev_cost = cost;
|
||||
|
||||
// display info
|
||||
if (params_.verbosity >= GncParameters::Verbosity::VALUES) {
|
||||
std::cout << "previous cost: " << prev_cost << std::endl;
|
||||
std::cout << "current cost: " << cost << std::endl;
|
||||
}
|
||||
}
|
||||
// display info
|
||||
if (params_.verbosity >= GncParameters::Verbosity::SUMMARY) {
|
||||
std::cout << "final iterations: " << iter << std::endl;
|
||||
std::cout << "final mu: " << mu << std::endl;
|
||||
std::cout << "final weights: " << weights_ << std::endl;
|
||||
std::cout << "previous cost: " << prev_cost << std::endl;
|
||||
std::cout << "current cost: " << cost << std::endl;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/// Initialize the gnc parameter mu such that loss is approximately convex (remark 5 in GNC paper).
|
||||
double initializeMu() const {
|
||||
// compute largest error across all factors
|
||||
double rmax_sq = 0.0;
|
||||
for (size_t i = 0; i < nfg_.size(); i++) {
|
||||
if (nfg_[i]) {
|
||||
rmax_sq = std::max(rmax_sq, nfg_[i]->error(state_));
|
||||
}
|
||||
}
|
||||
// set initial mu
|
||||
switch (params_.lossType) {
|
||||
case GncLossType::GM:
|
||||
// surrogate cost is convex for large mu
|
||||
return 2 * rmax_sq / params_.barcSq; // initial mu
|
||||
case GncLossType::TLS:
|
||||
/* initialize mu to the value specified in Remark 5 in GNC paper.
|
||||
surrogate cost is convex for mu close to zero
|
||||
degenerate case: 2 * rmax_sq - params_.barcSq < 0 (handled in the main loop)
|
||||
according to remark mu = params_.barcSq / (2 * rmax_sq - params_.barcSq) = params_.barcSq/ excessResidual
|
||||
however, if the denominator is 0 or negative, we return mu = -1 which leads to termination of the main GNC loop
|
||||
*/
|
||||
return
|
||||
(2 * rmax_sq - params_.barcSq) > 0 ?
|
||||
params_.barcSq / (2 * rmax_sq - params_.barcSq) : -1;
|
||||
default:
|
||||
throw std::runtime_error(
|
||||
"GncOptimizer::initializeMu: called with unknown loss type.");
|
||||
}
|
||||
}
|
||||
|
||||
/// Update the gnc parameter mu to gradually increase nonconvexity.
|
||||
double updateMu(const double mu) const {
|
||||
switch (params_.lossType) {
|
||||
case GncLossType::GM:
|
||||
// reduce mu, but saturate at 1 (original cost is recovered for mu -> 1)
|
||||
return std::max(1.0, mu / params_.muStep);
|
||||
case GncLossType::TLS:
|
||||
// increases mu at each iteration (original cost is recovered for mu -> inf)
|
||||
return mu * params_.muStep;
|
||||
default:
|
||||
throw std::runtime_error(
|
||||
"GncOptimizer::updateMu: called with unknown loss type.");
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if we have reached the value of mu for which the surrogate loss matches the original loss.
|
||||
bool checkMuConvergence(const double mu) const {
|
||||
bool muConverged = false;
|
||||
switch (params_.lossType) {
|
||||
case GncLossType::GM:
|
||||
muConverged = std::fabs(mu - 1.0) < 1e-9; // mu=1 recovers the original GM function
|
||||
break;
|
||||
case GncLossType::TLS:
|
||||
muConverged = false; // for TLS there is no stopping condition on mu (it must tend to infinity)
|
||||
break;
|
||||
default:
|
||||
throw std::runtime_error(
|
||||
"GncOptimizer::checkMuConvergence: called with unknown loss type.");
|
||||
}
|
||||
if (muConverged && params_.verbosity >= GncParameters::Verbosity::SUMMARY)
|
||||
std::cout << "muConverged = true " << std::endl;
|
||||
return muConverged;
|
||||
}
|
||||
|
||||
/// Check convergence of relative cost differences.
|
||||
bool checkCostConvergence(const double cost, const double prev_cost) const {
|
||||
bool costConverged = std::fabs(cost - prev_cost) / std::max(prev_cost, 1e-7)
|
||||
< params_.relativeCostTol;
|
||||
if (costConverged && params_.verbosity >= GncParameters::Verbosity::SUMMARY)
|
||||
std::cout << "checkCostConvergence = true " << std::endl;
|
||||
return costConverged;
|
||||
}
|
||||
|
||||
/// Check convergence of weights to binary values.
|
||||
bool checkWeightsConvergence(const Vector& weights) const {
|
||||
bool weightsConverged = false;
|
||||
switch (params_.lossType) {
|
||||
case GncLossType::GM:
|
||||
weightsConverged = false; // for GM, there is no clear binary convergence for the weights
|
||||
break;
|
||||
case GncLossType::TLS:
|
||||
weightsConverged = true;
|
||||
for (size_t i = 0; i < weights.size(); i++) {
|
||||
if (std::fabs(weights[i] - std::round(weights[i]))
|
||||
> params_.weightsTol) {
|
||||
weightsConverged = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
throw std::runtime_error(
|
||||
"GncOptimizer::checkWeightsConvergence: called with unknown loss type.");
|
||||
}
|
||||
if (weightsConverged
|
||||
&& params_.verbosity >= GncParameters::Verbosity::SUMMARY)
|
||||
std::cout << "weightsConverged = true " << std::endl;
|
||||
return weightsConverged;
|
||||
}
|
||||
|
||||
/// Check for convergence between consecutive GNC iterations.
|
||||
bool checkConvergence(const double mu, const Vector& weights,
|
||||
const double cost, const double prev_cost) const {
|
||||
return checkCostConvergence(cost, prev_cost)
|
||||
|| checkWeightsConvergence(weights) || checkMuConvergence(mu);
|
||||
}
|
||||
|
||||
/// Create a graph where each factor is weighted by the gnc weights.
|
||||
NonlinearFactorGraph makeWeightedGraph(const Vector& weights) const {
|
||||
// make sure all noiseModels are Gaussian or convert to Gaussian
|
||||
NonlinearFactorGraph newGraph;
|
||||
newGraph.resize(nfg_.size());
|
||||
for (size_t i = 0; i < nfg_.size(); i++) {
|
||||
if (nfg_[i]) {
|
||||
auto factor = boost::dynamic_pointer_cast<
|
||||
NoiseModelFactor>(nfg_[i]);
|
||||
auto noiseModel =
|
||||
boost::dynamic_pointer_cast<noiseModel::Gaussian>(
|
||||
factor->noiseModel());
|
||||
if (noiseModel) {
|
||||
Matrix newInfo = weights[i] * noiseModel->information();
|
||||
auto newNoiseModel = noiseModel::Gaussian::Information(newInfo);
|
||||
newGraph[i] = factor->cloneWithNewNoiseModel(newNoiseModel);
|
||||
} else {
|
||||
throw std::runtime_error(
|
||||
"GncOptimizer::makeWeightedGraph: unexpected non-Gaussian noise model.");
|
||||
}
|
||||
}
|
||||
}
|
||||
return newGraph;
|
||||
}
|
||||
|
||||
/// Calculate gnc weights.
|
||||
Vector calculateWeights(const Values& currentEstimate, const double mu) {
|
||||
Vector weights = Vector::Ones(nfg_.size());
|
||||
|
||||
// do not update the weights that the user has decided are known inliers
|
||||
std::vector<size_t> allWeights;
|
||||
for (size_t k = 0; k < nfg_.size(); k++) {
|
||||
allWeights.push_back(k);
|
||||
}
|
||||
std::vector<size_t> unknownWeights;
|
||||
std::set_difference(allWeights.begin(), allWeights.end(),
|
||||
params_.knownInliers.begin(),
|
||||
params_.knownInliers.end(),
|
||||
std::inserter(unknownWeights, unknownWeights.begin()));
|
||||
|
||||
// update weights of known inlier/outlier measurements
|
||||
switch (params_.lossType) {
|
||||
case GncLossType::GM: { // use eq (12) in GNC paper
|
||||
for (size_t k : unknownWeights) {
|
||||
if (nfg_[k]) {
|
||||
double u2_k = nfg_[k]->error(currentEstimate); // squared (and whitened) residual
|
||||
weights[k] = std::pow(
|
||||
(mu * params_.barcSq) / (u2_k + mu * params_.barcSq), 2);
|
||||
}
|
||||
}
|
||||
return weights;
|
||||
}
|
||||
case GncLossType::TLS: { // use eq (14) in GNC paper
|
||||
double upperbound = (mu + 1) / mu * params_.barcSq;
|
||||
double lowerbound = mu / (mu + 1) * params_.barcSq;
|
||||
for (size_t k : unknownWeights) {
|
||||
if (nfg_[k]) {
|
||||
double u2_k = nfg_[k]->error(currentEstimate); // squared (and whitened) residual
|
||||
if (u2_k >= upperbound) {
|
||||
weights[k] = 0;
|
||||
} else if (u2_k <= lowerbound) {
|
||||
weights[k] = 1;
|
||||
} else {
|
||||
weights[k] = std::sqrt(params_.barcSq * mu * (mu + 1) / u2_k)
|
||||
- mu;
|
||||
}
|
||||
}
|
||||
}
|
||||
return weights;
|
||||
}
|
||||
default:
|
||||
throw std::runtime_error(
|
||||
"GncOptimizer::calculateWeights: called with unknown loss type.");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,164 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file GncOptimizer.h
|
||||
* @brief The GncOptimizer class
|
||||
* @author Jingnan Shi
|
||||
* @author Luca Carlone
|
||||
* @author Frank Dellaert
|
||||
*
|
||||
* Implementation of the paper: Yang, Antonante, Tzoumas, Carlone, "Graduated Non-Convexity for Robust Spatial Perception:
|
||||
* From Non-Minimal Solvers to Global Outlier Rejection", ICRA/RAL, 2020. (arxiv version: https://arxiv.org/pdf/1909.08605.pdf)
|
||||
*
|
||||
* See also:
|
||||
* Antonante, Tzoumas, Yang, Carlone, "Outlier-Robust Estimation: Hardness, Minimally-Tuned Algorithms, and Applications",
|
||||
* arxiv: https://arxiv.org/pdf/2007.15109.pdf, 2020.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/nonlinear/GaussNewtonOptimizer.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
/// Choice of robust loss function for GNC.
|
||||
enum GncLossType {
|
||||
GM /*Geman McClure*/,
|
||||
TLS /*Truncated least squares*/
|
||||
};
|
||||
|
||||
template<class BaseOptimizerParameters>
|
||||
class GncParams {
|
||||
public:
|
||||
/// For each parameter, specify the corresponding optimizer: e.g., GaussNewtonParams -> GaussNewtonOptimizer.
|
||||
typedef typename BaseOptimizerParameters::OptimizerType OptimizerType;
|
||||
|
||||
/// Verbosity levels
|
||||
enum Verbosity {
|
||||
SILENT = 0,
|
||||
SUMMARY,
|
||||
VALUES
|
||||
};
|
||||
|
||||
/// Constructor.
|
||||
GncParams(const BaseOptimizerParameters& baseOptimizerParams)
|
||||
: baseOptimizerParams(baseOptimizerParams) {
|
||||
}
|
||||
|
||||
/// Default constructor.
|
||||
GncParams()
|
||||
: baseOptimizerParams() {
|
||||
}
|
||||
|
||||
/// GNC parameters.
|
||||
BaseOptimizerParameters baseOptimizerParams; ///< Optimization parameters used to solve the weighted least squares problem at each GNC iteration
|
||||
/// any other specific GNC parameters:
|
||||
GncLossType lossType = TLS; ///< Default loss
|
||||
size_t maxIterations = 100; ///< Maximum number of iterations
|
||||
double barcSq = 1.0; ///< A factor is considered an inlier if factor.error() < barcSq. Note that factor.error() whitens by the covariance
|
||||
double muStep = 1.4; ///< Multiplicative factor to reduce/increase the mu in gnc
|
||||
double relativeCostTol = 1e-5; ///< If relative cost change is below this threshold, stop iterating
|
||||
double weightsTol = 1e-4; ///< If the weights are within weightsTol from being binary, stop iterating (only for TLS)
|
||||
Verbosity verbosity = SILENT; ///< Verbosity level
|
||||
std::vector<size_t> knownInliers = std::vector<size_t>(); ///< Slots in the factor graph corresponding to measurements that we know are inliers
|
||||
|
||||
/// Set the robust loss function to be used in GNC (chosen among the ones in GncLossType).
|
||||
void setLossType(const GncLossType type) {
|
||||
lossType = type;
|
||||
}
|
||||
|
||||
/// Set the maximum number of iterations in GNC (changing the max nr of iters might lead to less accurate solutions and is not recommended).
|
||||
void setMaxIterations(const size_t maxIter) {
|
||||
std::cout
|
||||
<< "setMaxIterations: changing the max nr of iters might lead to less accurate solutions and is not recommended! "
|
||||
<< std::endl;
|
||||
maxIterations = maxIter;
|
||||
}
|
||||
|
||||
/** Set the maximum weighted residual error for an inlier. For a factor in the form f(x) = 0.5 * || r(x) ||^2_Omega,
|
||||
* the inlier threshold is the largest value of f(x) for the corresponding measurement to be considered an inlier.
|
||||
* In other words, an inlier at x is such that 0.5 * || r(x) ||^2_Omega <= barcSq.
|
||||
* Assuming a isotropic measurement covariance sigma^2 * Identity, the cost becomes: 0.5 * 1/sigma^2 || r(x) ||^2 <= barcSq.
|
||||
* Hence || r(x) ||^2 <= 2 * barcSq * sigma^2.
|
||||
* */
|
||||
void setInlierCostThreshold(const double inth) {
|
||||
barcSq = inth;
|
||||
}
|
||||
|
||||
/// Set the graduated non-convexity step: at each GNC iteration, mu is updated as mu <- mu * muStep.
|
||||
void setMuStep(const double step) {
|
||||
muStep = step;
|
||||
}
|
||||
|
||||
/// Set the maximum relative difference in mu values to stop iterating.
|
||||
void setRelativeCostTol(double value) {
|
||||
relativeCostTol = value;
|
||||
}
|
||||
|
||||
/// Set the maximum difference between the weights and their rounding in {0,1} to stop iterating.
|
||||
void setWeightsTol(double value) {
|
||||
weightsTol = value;
|
||||
}
|
||||
|
||||
/// Set the verbosity level.
|
||||
void setVerbosityGNC(const Verbosity value) {
|
||||
verbosity = value;
|
||||
}
|
||||
|
||||
/** (Optional) Provide a vector of measurements that must be considered inliers. The enties in the vector
|
||||
* corresponds to the slots in the factor graph. For instance, if you have a nonlinear factor graph nfg,
|
||||
* and you provide knownIn = {0, 2, 15}, GNC will not apply outlier rejection to nfg[0], nfg[2], and nfg[15].
|
||||
* This functionality is commonly used in SLAM when one may assume the odometry is outlier free, and
|
||||
* only apply GNC to prune outliers from the loop closures.
|
||||
* */
|
||||
void setKnownInliers(const std::vector<size_t>& knownIn) {
|
||||
for (size_t i = 0; i < knownIn.size(); i++)
|
||||
knownInliers.push_back(knownIn[i]);
|
||||
}
|
||||
|
||||
/// Equals.
|
||||
bool equals(const GncParams& other, double tol = 1e-9) const {
|
||||
return baseOptimizerParams.equals(other.baseOptimizerParams)
|
||||
&& lossType == other.lossType && maxIterations == other.maxIterations
|
||||
&& std::fabs(barcSq - other.barcSq) <= tol
|
||||
&& std::fabs(muStep - other.muStep) <= tol
|
||||
&& verbosity == other.verbosity && knownInliers == other.knownInliers;
|
||||
}
|
||||
|
||||
/// Print.
|
||||
void print(const std::string& str) const {
|
||||
std::cout << str << "\n";
|
||||
switch (lossType) {
|
||||
case GM:
|
||||
std::cout << "lossType: Geman McClure" << "\n";
|
||||
break;
|
||||
case TLS:
|
||||
std::cout << "lossType: Truncated Least-squares" << "\n";
|
||||
break;
|
||||
default:
|
||||
throw std::runtime_error("GncParams::print: unknown loss type.");
|
||||
}
|
||||
std::cout << "maxIterations: " << maxIterations << "\n";
|
||||
std::cout << "barcSq: " << barcSq << "\n";
|
||||
std::cout << "muStep: " << muStep << "\n";
|
||||
std::cout << "relativeCostTol: " << relativeCostTol << "\n";
|
||||
std::cout << "weightsTol: " << weightsTol << "\n";
|
||||
std::cout << "verbosity: " << verbosity << "\n";
|
||||
for (size_t i = 0; i < knownInliers.size(); i++)
|
||||
std::cout << "knownInliers: " << knownInliers[i] << "\n";
|
||||
baseOptimizerParams.print(str);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -25,6 +25,8 @@
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
class LevenbergMarquardtOptimizer;
|
||||
|
||||
/** Parameters for Levenberg-Marquardt optimization. Note that this parameters
|
||||
* class inherits from NonlinearOptimizerParams, which specifies the parameters
|
||||
* common to all nonlinear optimization algorithms. This class also contains
|
||||
|
@ -40,6 +42,7 @@ public:
|
|||
|
||||
static VerbosityLM verbosityLMTranslator(const std::string &s);
|
||||
static std::string verbosityLMTranslator(VerbosityLM value);
|
||||
using OptimizerType = LevenbergMarquardtOptimizer;
|
||||
|
||||
public:
|
||||
|
||||
|
|
|
@ -76,6 +76,14 @@ bool NoiseModelFactor::equals(const NonlinearFactor& f, double tol) const {
|
|||
&& noiseModel_->equals(*e->noiseModel_, tol)));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
NoiseModelFactor::shared_ptr NoiseModelFactor::cloneWithNewNoiseModel(
|
||||
const SharedNoiseModel newNoise) const {
|
||||
NoiseModelFactor::shared_ptr new_factor = boost::dynamic_pointer_cast<NoiseModelFactor>(clone());
|
||||
new_factor->noiseModel_ = newNoise;
|
||||
return new_factor;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
static void check(const SharedNoiseModel& noiseModel, size_t m) {
|
||||
if (noiseModel && m != noiseModel->dim())
|
||||
|
|
|
@ -244,6 +244,12 @@ public:
|
|||
*/
|
||||
boost::shared_ptr<GaussianFactor> linearize(const Values& x) const override;
|
||||
|
||||
/**
|
||||
* Creates a shared_ptr clone of the
|
||||
* factor with a new noise model
|
||||
*/
|
||||
shared_ptr cloneWithNewNoiseModel(const SharedNoiseModel newNoise) const;
|
||||
|
||||
private:
|
||||
/** Serialization function */
|
||||
friend class boost::serialization::access;
|
||||
|
|
|
@ -376,7 +376,7 @@ static Scatter scatterFromValues(const Values& values) {
|
|||
scatter.reserve(values.size());
|
||||
|
||||
// use "natural" ordering with keys taken from the initial values
|
||||
for (const auto& key_value : values) {
|
||||
for (const auto key_value : values) {
|
||||
scatter.add(key_value.key, key_value.value.dim());
|
||||
}
|
||||
|
||||
|
|
|
@ -113,6 +113,17 @@ public:
|
|||
|
||||
virtual void print(const std::string& str = "") const;
|
||||
|
||||
bool equals(const NonlinearOptimizerParams& other, double tol = 1e-9) const {
|
||||
return maxIterations == other.getMaxIterations()
|
||||
&& std::abs(relativeErrorTol - other.getRelativeErrorTol()) <= tol
|
||||
&& std::abs(absoluteErrorTol - other.getAbsoluteErrorTol()) <= tol
|
||||
&& std::abs(errorTol - other.getErrorTol()) <= tol
|
||||
&& verbosityTranslator(verbosity) == other.getVerbosity();
|
||||
// && orderingType.equals(other.getOrderingType()_;
|
||||
// && linearSolverType == other.getLinearSolverType();
|
||||
// TODO: check ordering, iterativeParams, and iterationsHook
|
||||
}
|
||||
|
||||
inline bool isMultifrontal() const {
|
||||
return (linearSolverType == MULTIFRONTAL_CHOLESKY)
|
||||
|| (linearSolverType == MULTIFRONTAL_QR);
|
||||
|
|
|
@ -217,7 +217,7 @@ namespace gtsam {
|
|||
/** Constructor from a Filtered view copies out all values */
|
||||
template<class ValueType>
|
||||
Values::Values(const Values::Filtered<ValueType>& view) {
|
||||
for(const typename Filtered<ValueType>::KeyValuePair& key_value: view) {
|
||||
for(const auto key_value: view) {
|
||||
Key key = key_value.key;
|
||||
insert(key, static_cast<const ValueType&>(key_value.value));
|
||||
}
|
||||
|
@ -226,7 +226,7 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
template<class ValueType>
|
||||
Values::Values(const Values::ConstFiltered<ValueType>& view) {
|
||||
for(const typename ConstFiltered<ValueType>::KeyValuePair& key_value: view) {
|
||||
for(const auto key_value: view) {
|
||||
Key key = key_value.key;
|
||||
insert(key, static_cast<const ValueType&>(key_value.value));
|
||||
}
|
||||
|
|
|
@ -206,7 +206,7 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
size_t Values::dim() const {
|
||||
size_t result = 0;
|
||||
for(const ConstKeyValuePair& key_value: *this) {
|
||||
for(const auto key_value: *this) {
|
||||
result += key_value.value.dim();
|
||||
}
|
||||
return result;
|
||||
|
@ -215,7 +215,7 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
VectorValues Values::zeroVectors() const {
|
||||
VectorValues result;
|
||||
for(const ConstKeyValuePair& key_value: *this)
|
||||
for(const auto key_value: *this)
|
||||
result.insert(key_value.key, Vector::Zero(key_value.value.dim()));
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -126,7 +126,7 @@ struct LevenbergMarquardtState : public NonlinearOptimizerState {
|
|||
noiseModelCache.resize(0);
|
||||
// for each of the variables, add a prior
|
||||
damped.reserve(damped.size() + values.size());
|
||||
for (const auto& key_value : values) {
|
||||
for (const auto key_value : values) {
|
||||
const Key key = key_value.key;
|
||||
const size_t dim = key_value.value.dim();
|
||||
const CachedModel* item = getCachedModel(dim);
|
||||
|
|
|
@ -175,10 +175,11 @@ TEST(Values, basic_functions)
|
|||
{
|
||||
Values values;
|
||||
const Values& values_c = values;
|
||||
values.insert(2, Vector3());
|
||||
values.insert(4, Vector3());
|
||||
values.insert(6, Matrix23());
|
||||
values.insert(8, Matrix23());
|
||||
Matrix23 M1 = Matrix23::Zero(), M2 = Matrix23::Zero();
|
||||
values.insert(2, Vector3(0, 0, 0));
|
||||
values.insert(4, Vector3(0, 0, 0));
|
||||
values.insert(6, M1);
|
||||
values.insert(8, M2);
|
||||
|
||||
// find
|
||||
EXPECT_LONGS_EQUAL(4, values.find(4)->key);
|
||||
|
@ -335,7 +336,7 @@ TEST(Values, filter) {
|
|||
int i = 0;
|
||||
Values::Filtered<Value> filtered = values.filter(boost::bind(std::greater_equal<Key>(), _1, 2));
|
||||
EXPECT_LONGS_EQUAL(2, (long)filtered.size());
|
||||
for(const Values::Filtered<>::KeyValuePair& key_value: filtered) {
|
||||
for(const auto key_value: filtered) {
|
||||
if(i == 0) {
|
||||
LONGS_EQUAL(2, (long)key_value.key);
|
||||
try {key_value.value.cast<Pose2>();} catch (const std::bad_cast& e) { FAIL("can't cast Value to Pose2");}
|
||||
|
@ -370,7 +371,7 @@ TEST(Values, filter) {
|
|||
i = 0;
|
||||
Values::ConstFiltered<Pose3> pose_filtered = values.filter<Pose3>();
|
||||
EXPECT_LONGS_EQUAL(2, (long)pose_filtered.size());
|
||||
for(const Values::ConstFiltered<Pose3>::KeyValuePair& key_value: pose_filtered) {
|
||||
for(const auto key_value: pose_filtered) {
|
||||
if(i == 0) {
|
||||
EXPECT_LONGS_EQUAL(1, (long)key_value.key);
|
||||
EXPECT(assert_equal(pose1, key_value.value));
|
||||
|
@ -408,7 +409,7 @@ TEST(Values, Symbol_filter) {
|
|||
values.insert(Symbol('y', 3), pose3);
|
||||
|
||||
int i = 0;
|
||||
for(const Values::Filtered<Value>::KeyValuePair& key_value: values.filter(Symbol::ChrTest('y'))) {
|
||||
for(const auto key_value: values.filter(Symbol::ChrTest('y'))) {
|
||||
if(i == 0) {
|
||||
LONGS_EQUAL(Symbol('y', 1), (long)key_value.key);
|
||||
EXPECT(assert_equal(pose1, key_value.value.cast<Pose3>()));
|
||||
|
|
|
@ -354,7 +354,7 @@ class ShonanAveraging2 : public ShonanAveraging<2> {
|
|||
public:
|
||||
ShonanAveraging2(const Measurements &measurements,
|
||||
const Parameters ¶meters = Parameters());
|
||||
explicit ShonanAveraging2(string g2oFile,
|
||||
explicit ShonanAveraging2(std::string g2oFile,
|
||||
const Parameters ¶meters = Parameters());
|
||||
};
|
||||
|
||||
|
@ -362,7 +362,7 @@ class ShonanAveraging3 : public ShonanAveraging<3> {
|
|||
public:
|
||||
ShonanAveraging3(const Measurements &measurements,
|
||||
const Parameters ¶meters = Parameters());
|
||||
explicit ShonanAveraging3(string g2oFile,
|
||||
explicit ShonanAveraging3(std::string g2oFile,
|
||||
const Parameters ¶meters = Parameters());
|
||||
|
||||
// TODO(frank): Deprecate after we land pybind wrapper
|
||||
|
|
|
@ -86,12 +86,15 @@ public:
|
|||
* @param cameraKey is the index of the camera
|
||||
* @param landmarkKey is the index of the landmark
|
||||
*/
|
||||
GeneralSFMFactor(const Point2& measured, const SharedNoiseModel& model, Key cameraKey, Key landmarkKey) :
|
||||
Base(model, cameraKey, landmarkKey), measured_(measured) {}
|
||||
GeneralSFMFactor(const Point2& measured, const SharedNoiseModel& model,
|
||||
Key cameraKey, Key landmarkKey)
|
||||
: Base(model, cameraKey, landmarkKey), measured_(measured) {}
|
||||
|
||||
GeneralSFMFactor():measured_(0.0,0.0) {} ///< default constructor
|
||||
GeneralSFMFactor(const Point2 & p):measured_(p) {} ///< constructor that takes a Point2
|
||||
GeneralSFMFactor(double x, double y):measured_(x,y) {} ///< constructor that takes doubles x,y to make a Point2
|
||||
GeneralSFMFactor() : measured_(0.0, 0.0) {} ///< default constructor
|
||||
///< constructor that takes a Point2
|
||||
GeneralSFMFactor(const Point2& p) : measured_(p) {}
|
||||
///< constructor that takes doubles x,y to make a Point2
|
||||
GeneralSFMFactor(double x, double y) : measured_(x, y) {}
|
||||
|
||||
virtual ~GeneralSFMFactor() {} ///< destructor
|
||||
|
||||
|
@ -127,7 +130,7 @@ public:
|
|||
catch( CheiralityException& e) {
|
||||
if (H1) *H1 = JacobianC::Zero();
|
||||
if (H2) *H2 = JacobianL::Zero();
|
||||
// TODO warn if verbose output asked for
|
||||
//TODO Print the exception via logging
|
||||
return Z_2x1;
|
||||
}
|
||||
}
|
||||
|
@ -149,7 +152,7 @@ public:
|
|||
H1.setZero();
|
||||
H2.setZero();
|
||||
b.setZero();
|
||||
// TODO warn if verbose output asked for
|
||||
//TODO Print the exception via logging
|
||||
}
|
||||
|
||||
// Whiten the system if needed
|
||||
|
|
|
@ -62,7 +62,7 @@ static Values computePoses(const Values& initialRot,
|
|||
|
||||
// Upgrade rotations to full poses
|
||||
Values initialPose;
|
||||
for (const auto& key_value : initialRot) {
|
||||
for (const auto key_value : initialRot) {
|
||||
Key key = key_value.key;
|
||||
const auto& rot = initialRot.at<typename Pose::Rotation>(key);
|
||||
Pose initializedPose = Pose(rot, origin);
|
||||
|
@ -86,7 +86,7 @@ static Values computePoses(const Values& initialRot,
|
|||
|
||||
// put into Values structure
|
||||
Values estimate;
|
||||
for (const auto& key_value : GNresult) {
|
||||
for (const auto key_value : GNresult) {
|
||||
Key key = key_value.key;
|
||||
if (key != kAnchorKey) {
|
||||
const Pose& pose = GNresult.at<Pose>(key);
|
||||
|
|
|
@ -124,7 +124,7 @@ Values InitializePose3::computeOrientationsGradient(
|
|||
// this works on the inverse rotations, according to Tron&Vidal,2011
|
||||
Values inverseRot;
|
||||
inverseRot.insert(initialize::kAnchorKey, Rot3());
|
||||
for(const auto& key_value: givenGuess) {
|
||||
for(const auto key_value: givenGuess) {
|
||||
Key key = key_value.key;
|
||||
const Pose3& pose = givenGuess.at<Pose3>(key);
|
||||
inverseRot.insert(key, pose.rotation().inverse());
|
||||
|
@ -139,7 +139,7 @@ Values InitializePose3::computeOrientationsGradient(
|
|||
// calculate max node degree & allocate gradient
|
||||
size_t maxNodeDeg = 0;
|
||||
VectorValues grad;
|
||||
for(const auto& key_value: inverseRot) {
|
||||
for(const auto key_value: inverseRot) {
|
||||
Key key = key_value.key;
|
||||
grad.insert(key,Z_3x1);
|
||||
size_t currNodeDeg = (adjEdgesMap.at(key)).size();
|
||||
|
@ -162,7 +162,7 @@ Values InitializePose3::computeOrientationsGradient(
|
|||
//////////////////////////////////////////////////////////////////////////
|
||||
// compute the gradient at each node
|
||||
maxGrad = 0;
|
||||
for (const auto& key_value : inverseRot) {
|
||||
for (const auto key_value : inverseRot) {
|
||||
Key key = key_value.key;
|
||||
Vector gradKey = Z_3x1;
|
||||
// collect the gradient for each edge incident on key
|
||||
|
@ -203,7 +203,7 @@ Values InitializePose3::computeOrientationsGradient(
|
|||
// Return correct rotations
|
||||
const Rot3& Rref = inverseRot.at<Rot3>(initialize::kAnchorKey); // This will be set to the identity as so far we included no prior
|
||||
Values estimateRot;
|
||||
for(const auto& key_value: inverseRot) {
|
||||
for(const auto key_value: inverseRot) {
|
||||
Key key = key_value.key;
|
||||
if (key != initialize::kAnchorKey) {
|
||||
const Rot3& R = inverseRot.at<Rot3>(key);
|
||||
|
|
|
@ -586,7 +586,7 @@ void save2D(const NonlinearFactorGraph &graph, const Values &config,
|
|||
fstream stream(filename.c_str(), fstream::out);
|
||||
|
||||
// save poses
|
||||
for (const Values::ConstKeyValuePair key_value : config) {
|
||||
for (const auto key_value : config) {
|
||||
const Pose2 &pose = key_value.value.cast<Pose2>();
|
||||
stream << "VERTEX2 " << key_value.key << " " << pose.x() << " " << pose.y()
|
||||
<< " " << pose.theta() << endl;
|
||||
|
|
|
@ -303,8 +303,8 @@ struct SfmTrack {
|
|||
|
||||
/// print
|
||||
void print(const std::string& s = "") const {
|
||||
cout << "Track with " << measurements.size();
|
||||
cout << " measurements of point " << p << "\n";
|
||||
std::cout << "Track with " << measurements.size();
|
||||
std::cout << " measurements of point " << p << std::endl;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -385,8 +385,8 @@ struct SfmData {
|
|||
|
||||
/// print
|
||||
void print(const std::string& s = "") const {
|
||||
cout << "Number of cameras = " << number_cameras() << "\n";
|
||||
cout << "Number of tracks = " << number_tracks() << "\n";
|
||||
std::cout << "Number of cameras = " << number_cameras() << std::endl;
|
||||
std::cout << "Number of tracks = " << number_tracks() << std::endl;
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -284,7 +284,7 @@ TEST( Lago, largeGraphNoisy_orientations ) {
|
|||
Values::shared_ptr expected;
|
||||
boost::tie(gmatlab, expected) = readG2o(matlabFile);
|
||||
|
||||
for(const Values::KeyValuePair& key_val: *expected){
|
||||
for(const auto key_val: *expected){
|
||||
Key k = key_val.key;
|
||||
EXPECT(assert_equal(expected->at<Pose2>(k), actual.at<Pose2>(k), 1e-5));
|
||||
}
|
||||
|
@ -310,7 +310,7 @@ TEST( Lago, largeGraphNoisy ) {
|
|||
Values::shared_ptr expected;
|
||||
boost::tie(gmatlab, expected) = readG2o(matlabFile);
|
||||
|
||||
for(const Values::KeyValuePair& key_val: *expected){
|
||||
for(const auto key_val: *expected){
|
||||
Key k = key_val.key;
|
||||
EXPECT(assert_equal(expected->at<Pose2>(k), actual.at<Pose2>(k), 1e-2));
|
||||
}
|
||||
|
|
|
@ -308,11 +308,11 @@ int main(int argc, char** argv) {
|
|||
// And to demonstrate the fixed-lag aspect, print the keys contained in each smoother after 3.0 seconds
|
||||
cout << "After 15.0 seconds, each version contains to the following keys:" << endl;
|
||||
cout << " Concurrent Filter Keys: " << endl;
|
||||
for(const auto& key_value: concurrentFilter.getLinearizationPoint()) {
|
||||
for(const auto key_value: concurrentFilter.getLinearizationPoint()) {
|
||||
cout << setprecision(5) << " Key: " << key_value.key << endl;
|
||||
}
|
||||
cout << " Concurrent Smoother Keys: " << endl;
|
||||
for(const auto& key_value: concurrentSmoother.getLinearizationPoint()) {
|
||||
for(const auto key_value: concurrentSmoother.getLinearizationPoint()) {
|
||||
cout << setprecision(5) << " Key: " << key_value.key << endl;
|
||||
}
|
||||
cout << " Fixed-Lag Smoother Keys: " << endl;
|
||||
|
|
|
@ -188,7 +188,8 @@ int main(int argc, char** argv) {
|
|||
smartFactors[j]->addRange(i, range);
|
||||
printf("adding range %g for %d",range,(int)j);
|
||||
} catch (const invalid_argument& e) {
|
||||
printf("warning: omitting duplicate range %g for %d",range,(int)j);
|
||||
printf("warning: omitting duplicate range %g for %d: %s", range,
|
||||
(int)j, e.what());
|
||||
}
|
||||
cout << endl;
|
||||
}
|
||||
|
@ -233,7 +234,7 @@ int main(int argc, char** argv) {
|
|||
}
|
||||
}
|
||||
countK = 0;
|
||||
for(const Values::ConstFiltered<Point2>::KeyValuePair& it: result.filter<Point2>())
|
||||
for(const auto it: result.filter<Point2>())
|
||||
os2 << it.key << "\t" << it.value.x() << "\t" << it.value.y() << "\t1"
|
||||
<< endl;
|
||||
if (smart) {
|
||||
|
@ -256,7 +257,7 @@ int main(int argc, char** argv) {
|
|||
// Write result to file
|
||||
Values result = isam.calculateEstimate();
|
||||
ofstream os("rangeResult.txt");
|
||||
for(const Values::ConstFiltered<Pose2>::KeyValuePair& it: result.filter<Pose2>())
|
||||
for(const auto it: result.filter<Pose2>())
|
||||
os << it.key << "\t" << it.value.x() << "\t" << it.value.y() << "\t"
|
||||
<< it.value.theta() << endl;
|
||||
exit(0);
|
||||
|
|
|
@ -202,11 +202,11 @@ int main(int argc, char** argv) {
|
|||
// Write result to file
|
||||
Values result = isam.calculateEstimate();
|
||||
ofstream os2("rangeResultLM.txt");
|
||||
for(const Values::ConstFiltered<Point2>::KeyValuePair& it: result.filter<Point2>())
|
||||
for(const auto it: result.filter<Point2>())
|
||||
os2 << it.key << "\t" << it.value.x() << "\t" << it.value.y() << "\t1"
|
||||
<< endl;
|
||||
ofstream os("rangeResult.txt");
|
||||
for(const Values::ConstFiltered<Pose2>::KeyValuePair& it: result.filter<Pose2>())
|
||||
for(const auto it: result.filter<Pose2>())
|
||||
os << it.key << "\t" << it.value.x() << "\t" << it.value.y() << "\t"
|
||||
<< it.value.theta() << endl;
|
||||
exit(0);
|
||||
|
|
|
@ -59,7 +59,7 @@ FixedLagSmoother::Result BatchFixedLagSmoother::update(
|
|||
// Add the new variables to theta
|
||||
theta_.insert(newTheta);
|
||||
// Add new variables to the end of the ordering
|
||||
for (const auto& key_value : newTheta) {
|
||||
for (const auto key_value : newTheta) {
|
||||
ordering_.push_back(key_value.key);
|
||||
}
|
||||
// Augment Delta
|
||||
|
@ -267,7 +267,7 @@ FixedLagSmoother::Result BatchFixedLagSmoother::optimize() {
|
|||
// Put the linearization points and deltas back for specific variables
|
||||
if (enforceConsistency_ && (linearKeys_.size() > 0)) {
|
||||
theta_.update(linearKeys_);
|
||||
for(const auto& key_value: linearKeys_) {
|
||||
for(const auto key_value: linearKeys_) {
|
||||
delta_.at(key_value.key) = newDelta.at(key_value.key);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -139,7 +139,7 @@ ConcurrentBatchFilter::Result ConcurrentBatchFilter::update(const NonlinearFacto
|
|||
// Add the new variables to theta
|
||||
theta_.insert(newTheta);
|
||||
// Add new variables to the end of the ordering
|
||||
for(const Values::ConstKeyValuePair& key_value: newTheta) {
|
||||
for(const auto key_value: newTheta) {
|
||||
ordering_.push_back(key_value.key);
|
||||
}
|
||||
// Augment Delta
|
||||
|
@ -222,7 +222,7 @@ void ConcurrentBatchFilter::synchronize(const NonlinearFactorGraph& smootherSumm
|
|||
|
||||
// Find the set of new separator keys
|
||||
KeySet newSeparatorKeys;
|
||||
for(const Values::ConstKeyValuePair& key_value: separatorValues_) {
|
||||
for(const auto key_value: separatorValues_) {
|
||||
newSeparatorKeys.insert(key_value.key);
|
||||
}
|
||||
|
||||
|
@ -236,7 +236,7 @@ void ConcurrentBatchFilter::synchronize(const NonlinearFactorGraph& smootherSumm
|
|||
graph.push_back(smootherShortcut_);
|
||||
Values values;
|
||||
values.insert(smootherSummarizationValues);
|
||||
for(const Values::ConstKeyValuePair& key_value: separatorValues_) {
|
||||
for(const auto key_value: separatorValues_) {
|
||||
if(!values.exists(key_value.key)) {
|
||||
values.insert(key_value.key, key_value.value);
|
||||
}
|
||||
|
@ -471,7 +471,7 @@ void ConcurrentBatchFilter::optimize(const NonlinearFactorGraph& factors, Values
|
|||
// Put the linearization points and deltas back for specific variables
|
||||
if(linearValues.size() > 0) {
|
||||
theta.update(linearValues);
|
||||
for(const Values::ConstKeyValuePair& key_value: linearValues) {
|
||||
for(const auto key_value: linearValues) {
|
||||
delta.at(key_value.key) = newDelta.at(key_value.key);
|
||||
}
|
||||
}
|
||||
|
@ -574,7 +574,7 @@ void ConcurrentBatchFilter::moveSeparator(const FastList<Key>& keysToMove) {
|
|||
|
||||
// Calculate the set of new separator keys: AffectedKeys + PreviousSeparatorKeys - KeysToMove
|
||||
KeySet newSeparatorKeys = removedFactors.keys();
|
||||
for(const Values::ConstKeyValuePair& key_value: separatorValues_) {
|
||||
for(const auto key_value: separatorValues_) {
|
||||
newSeparatorKeys.insert(key_value.key);
|
||||
}
|
||||
for(Key key: keysToMove) {
|
||||
|
|
|
@ -61,7 +61,7 @@ ConcurrentBatchSmoother::Result ConcurrentBatchSmoother::update(const NonlinearF
|
|||
theta_.insert(newTheta);
|
||||
|
||||
// Add new variables to the end of the ordering
|
||||
for(const Values::ConstKeyValuePair& key_value: newTheta) {
|
||||
for(const auto key_value: newTheta) {
|
||||
ordering_.push_back(key_value.key);
|
||||
}
|
||||
|
||||
|
@ -135,7 +135,7 @@ void ConcurrentBatchSmoother::synchronize(const NonlinearFactorGraph& smootherFa
|
|||
removeFactors(filterSummarizationSlots_);
|
||||
|
||||
// Insert new linpoints into the values, augment the ordering, and store new dims to augment delta
|
||||
for(const Values::ConstKeyValuePair& key_value: smootherValues) {
|
||||
for(const auto key_value: smootherValues) {
|
||||
std::pair<Values::iterator, bool> iter_inserted = theta_.tryInsert(key_value.key, key_value.value);
|
||||
if(iter_inserted.second) {
|
||||
// If the insert succeeded
|
||||
|
@ -146,7 +146,7 @@ void ConcurrentBatchSmoother::synchronize(const NonlinearFactorGraph& smootherFa
|
|||
iter_inserted.first->value = key_value.value;
|
||||
}
|
||||
}
|
||||
for(const Values::ConstKeyValuePair& key_value: separatorValues) {
|
||||
for(const auto key_value: separatorValues) {
|
||||
std::pair<Values::iterator, bool> iter_inserted = theta_.tryInsert(key_value.key, key_value.value);
|
||||
if(iter_inserted.second) {
|
||||
// If the insert succeeded
|
||||
|
@ -322,7 +322,7 @@ ConcurrentBatchSmoother::Result ConcurrentBatchSmoother::optimize() {
|
|||
// Put the linearization points and deltas back for specific variables
|
||||
if(separatorValues_.size() > 0) {
|
||||
theta_.update(separatorValues_);
|
||||
for(const Values::ConstKeyValuePair& key_value: separatorValues_) {
|
||||
for(const auto key_value: separatorValues_) {
|
||||
delta_.at(key_value.key) = newDelta.at(key_value.key);
|
||||
}
|
||||
}
|
||||
|
@ -372,7 +372,7 @@ void ConcurrentBatchSmoother::updateSmootherSummarization() {
|
|||
|
||||
// Get the set of separator keys
|
||||
gtsam::KeySet separatorKeys;
|
||||
for(const Values::ConstKeyValuePair& key_value: separatorValues_) {
|
||||
for(const auto key_value: separatorValues_) {
|
||||
separatorKeys.insert(key_value.key);
|
||||
}
|
||||
|
||||
|
|
|
@ -69,13 +69,13 @@ ConcurrentIncrementalFilter::Result ConcurrentIncrementalFilter::update(const No
|
|||
int group = 1;
|
||||
// Set all existing variables to Group1
|
||||
if(isam2_.getLinearizationPoint().size() > 0) {
|
||||
for(const Values::ConstKeyValuePair& key_value: isam2_.getLinearizationPoint()) {
|
||||
for(const auto key_value: isam2_.getLinearizationPoint()) {
|
||||
orderingConstraints->operator[](key_value.key) = group;
|
||||
}
|
||||
++group;
|
||||
}
|
||||
// Assign new variables to the root
|
||||
for(const Values::ConstKeyValuePair& key_value: newTheta) {
|
||||
for(const auto key_value: newTheta) {
|
||||
orderingConstraints->operator[](key_value.key) = group;
|
||||
}
|
||||
// Set marginalizable variables to Group0
|
||||
|
@ -201,7 +201,7 @@ void ConcurrentIncrementalFilter::synchronize(const NonlinearFactorGraph& smooth
|
|||
|
||||
// Force iSAM2 not to relinearize anything during this iteration
|
||||
FastList<Key> noRelinKeys;
|
||||
for(const Values::ConstKeyValuePair& key_value: isam2_.getLinearizationPoint()) {
|
||||
for(const auto key_value: isam2_.getLinearizationPoint()) {
|
||||
noRelinKeys.push_back(key_value.key);
|
||||
}
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ ConcurrentIncrementalSmoother::Result ConcurrentIncrementalSmoother::update(cons
|
|||
// Also, mark the separator keys as fixed linearization points
|
||||
FastMap<Key,int> constrainedKeys;
|
||||
FastList<Key> noRelinKeys;
|
||||
for(const Values::ConstKeyValuePair& key_value: separatorValues_) {
|
||||
for(const auto key_value: separatorValues_) {
|
||||
constrainedKeys[key_value.key] = 1;
|
||||
noRelinKeys.push_back(key_value.key);
|
||||
}
|
||||
|
@ -82,12 +82,12 @@ ConcurrentIncrementalSmoother::Result ConcurrentIncrementalSmoother::update(cons
|
|||
Values values(newTheta);
|
||||
// Unfortunately, we must be careful here, as some of the smoother values
|
||||
// and/or separator values may have been added previously
|
||||
for(const Values::ConstKeyValuePair& key_value: smootherValues_) {
|
||||
for(const auto key_value: smootherValues_) {
|
||||
if(!isam2_.getLinearizationPoint().exists(key_value.key)) {
|
||||
values.insert(key_value.key, smootherValues_.at(key_value.key));
|
||||
}
|
||||
}
|
||||
for(const Values::ConstKeyValuePair& key_value: separatorValues_) {
|
||||
for(const auto key_value: separatorValues_) {
|
||||
if(!isam2_.getLinearizationPoint().exists(key_value.key)) {
|
||||
values.insert(key_value.key, separatorValues_.at(key_value.key));
|
||||
}
|
||||
|
@ -246,7 +246,7 @@ void ConcurrentIncrementalSmoother::updateSmootherSummarization() {
|
|||
|
||||
// Get the set of separator keys
|
||||
KeySet separatorKeys;
|
||||
for(const Values::ConstKeyValuePair& key_value: separatorValues_) {
|
||||
for(const auto key_value: separatorValues_) {
|
||||
separatorKeys.insert(key_value.key);
|
||||
}
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ NonlinearFactorGraph CalculateMarginals(const NonlinearFactorGraph& factorGraph,
|
|||
|
||||
|
||||
std::set<Key> KeysToKeep;
|
||||
for(const Values::ConstKeyValuePair& key_value: linPoint) { // we cycle over all the keys of factorGraph
|
||||
for(const auto key_value: linPoint) { // we cycle over all the keys of factorGraph
|
||||
KeysToKeep.insert(key_value.key);
|
||||
} // so far we are keeping all keys, but we want to delete the ones that we are going to marginalize
|
||||
for(Key key: keysToMarginalize) {
|
||||
|
|
|
@ -560,7 +560,7 @@ TEST( ConcurrentBatchSmoother, synchronize_3 )
|
|||
GaussianFactorGraph::shared_ptr linearFactors = allFactors.linearize(allValues);
|
||||
|
||||
KeySet eliminateKeys = linearFactors->keys();
|
||||
for(const Values::ConstKeyValuePair& key_value: filterSeparatorValues) {
|
||||
for(const auto key_value: filterSeparatorValues) {
|
||||
eliminateKeys.erase(key_value.key);
|
||||
}
|
||||
KeyVector variables(eliminateKeys.begin(), eliminateKeys.end());
|
||||
|
|
|
@ -80,7 +80,7 @@ NonlinearFactorGraph CalculateMarginals(const NonlinearFactorGraph& factorGraph,
|
|||
|
||||
|
||||
std::set<Key> KeysToKeep;
|
||||
for(const Values::ConstKeyValuePair& key_value: linPoint) { // we cycle over all the keys of factorGraph
|
||||
for(const auto key_value: linPoint) { // we cycle over all the keys of factorGraph
|
||||
KeysToKeep.insert(key_value.key);
|
||||
} // so far we are keeping all keys, but we want to delete the ones that we are going to marginalize
|
||||
for(Key key: keysToMarginalize) {
|
||||
|
|
|
@ -512,7 +512,7 @@ TEST( ConcurrentIncrementalSmootherDL, synchronize_2 )
|
|||
// Values expectedLinearizationPoint = BatchOptimize(allFactors, allValues, 1);
|
||||
Values expectedLinearizationPoint = filterSeparatorValues;
|
||||
Values actualLinearizationPoint;
|
||||
for(const Values::ConstKeyValuePair& key_value: filterSeparatorValues) {
|
||||
for(const auto key_value: filterSeparatorValues) {
|
||||
actualLinearizationPoint.insert(key_value.key, smoother.getLinearizationPoint().at(key_value.key));
|
||||
}
|
||||
CHECK(assert_equal(expectedLinearizationPoint, actualLinearizationPoint, 1e-6));
|
||||
|
@ -580,7 +580,7 @@ TEST( ConcurrentIncrementalSmootherDL, synchronize_3 )
|
|||
// GaussianBayesNet::shared_ptr GBNsptr = GSS.eliminate();
|
||||
|
||||
KeySet allkeys = LinFactorGraph->keys();
|
||||
for(const Values::ConstKeyValuePair& key_value: filterSeparatorValues) {
|
||||
for(const auto key_value: filterSeparatorValues) {
|
||||
allkeys.erase(key_value.key);
|
||||
}
|
||||
KeyVector variables(allkeys.begin(), allkeys.end());
|
||||
|
|
|
@ -513,7 +513,7 @@ TEST( ConcurrentIncrementalSmootherGN, synchronize_2 )
|
|||
// Values expectedLinearizationPoint = BatchOptimize(allFactors, allValues, 1);
|
||||
Values expectedLinearizationPoint = filterSeparatorValues;
|
||||
Values actualLinearizationPoint;
|
||||
for(const Values::ConstKeyValuePair& key_value: filterSeparatorValues) {
|
||||
for(const auto key_value: filterSeparatorValues) {
|
||||
actualLinearizationPoint.insert(key_value.key, smoother.getLinearizationPoint().at(key_value.key));
|
||||
}
|
||||
CHECK(assert_equal(expectedLinearizationPoint, actualLinearizationPoint, 1e-6));
|
||||
|
@ -582,7 +582,7 @@ TEST( ConcurrentIncrementalSmootherGN, synchronize_3 )
|
|||
// GaussianBayesNet::shared_ptr GBNsptr = GSS.eliminate();
|
||||
|
||||
KeySet allkeys = LinFactorGraph->keys();
|
||||
for(const Values::ConstKeyValuePair& key_value: filterSeparatorValues)
|
||||
for(const auto key_value: filterSeparatorValues)
|
||||
allkeys.erase(key_value.key);
|
||||
KeyVector variables(allkeys.begin(), allkeys.end());
|
||||
std::pair<GaussianBayesNet::shared_ptr, GaussianFactorGraph::shared_ptr> result = LinFactorGraph->eliminatePartialSequential(variables, EliminateCholesky);
|
||||
|
|
|
@ -4,20 +4,26 @@ if (NOT GTSAM_BUILD_PYTHON)
|
|||
return()
|
||||
endif()
|
||||
|
||||
# Common directory for storing data/datasets stored with the package.
|
||||
# This will store the data in the Python site package directly.
|
||||
set(GTSAM_PYTHON_DATASET_DIR "./gtsam/Data")
|
||||
|
||||
# Generate setup.py.
|
||||
file(READ "${PROJECT_SOURCE_DIR}/README.md" README_CONTENTS)
|
||||
configure_file(${PROJECT_SOURCE_DIR}/python/setup.py.in
|
||||
${GTSAM_PYTHON_BUILD_DIRECTORY}/setup.py)
|
||||
|
||||
set(WRAP_USE_CUSTOM_PYTHON_LIBRARY ${GTSAM_USE_CUSTOM_PYTHON_LIBRARY})
|
||||
set(WRAP_PYTHON_VERSION ${GTSAM_PYTHON_VERSION})
|
||||
# Supply MANIFEST.in for older versions of Python
|
||||
file(COPY ${PROJECT_SOURCE_DIR}/python/MANIFEST.in
|
||||
DESTINATION ${GTSAM_PYTHON_BUILD_DIRECTORY})
|
||||
|
||||
include(PybindWrap)
|
||||
|
||||
############################################################
|
||||
## Load the necessary files to compile the wrapper
|
||||
|
||||
# Load the pybind11 code
|
||||
add_subdirectory(${PROJECT_SOURCE_DIR}/wrap/pybind11 pybind11)
|
||||
# Set the wrapping script variable
|
||||
set(PYBIND_WRAP_SCRIPT "${PROJECT_SOURCE_DIR}/wrap/scripts/pybind_wrap.py")
|
||||
############################################################
|
||||
|
||||
add_custom_target(gtsam_header DEPENDS "${PROJECT_SOURCE_DIR}/gtsam/gtsam.i")
|
||||
add_custom_target(gtsam_unstable_header DEPENDS "${PROJECT_SOURCE_DIR}/gtsam_unstable/gtsam_unstable.i")
|
||||
|
||||
|
@ -67,8 +73,16 @@ set(GTSAM_MODULE_PATH ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam)
|
|||
create_symlinks("${CMAKE_CURRENT_SOURCE_DIR}/gtsam"
|
||||
"${GTSAM_MODULE_PATH}")
|
||||
|
||||
# Common directory for data/datasets stored with the package.
|
||||
# This will store the data in the Python site package directly.
|
||||
file(COPY "${GTSAM_SOURCE_DIR}/examples/Data" DESTINATION "${GTSAM_MODULE_PATH}")
|
||||
|
||||
# Add gtsam as a dependency to the install target
|
||||
set(GTSAM_PYTHON_DEPENDENCIES gtsam_py)
|
||||
|
||||
|
||||
if(GTSAM_UNSTABLE_BUILD_PYTHON)
|
||||
set(ignore
|
||||
set(ignore
|
||||
gtsam::Point2
|
||||
gtsam::Point3
|
||||
gtsam::LieVector
|
||||
|
@ -86,7 +100,7 @@ set(ignore
|
|||
gtsam::CameraSetCal3Bundler
|
||||
gtsam::KeyPairDoubleMap)
|
||||
|
||||
pybind_wrap(gtsam_unstable_py # target
|
||||
pybind_wrap(gtsam_unstable_py # target
|
||||
${PROJECT_SOURCE_DIR}/gtsam_unstable/gtsam_unstable.i # interface_header
|
||||
"gtsam_unstable.cpp" # generated_cpp
|
||||
"gtsam_unstable" # module_name
|
||||
|
@ -98,7 +112,7 @@ pybind_wrap(gtsam_unstable_py # target
|
|||
ON # use_boost
|
||||
)
|
||||
|
||||
set_target_properties(gtsam_unstable_py PROPERTIES
|
||||
set_target_properties(gtsam_unstable_py PROPERTIES
|
||||
INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib"
|
||||
INSTALL_RPATH_USE_LINK_PATH TRUE
|
||||
OUTPUT_NAME "gtsam_unstable"
|
||||
|
@ -107,15 +121,20 @@ set_target_properties(gtsam_unstable_py PROPERTIES
|
|||
RELWITHDEBINFO_POSTFIX "" # Otherwise you will have a wrong name
|
||||
)
|
||||
|
||||
set(GTSAM_UNSTABLE_MODULE_PATH ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam_unstable)
|
||||
set(GTSAM_UNSTABLE_MODULE_PATH ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam_unstable)
|
||||
|
||||
# Symlink all tests .py files to build folder.
|
||||
create_symlinks("${CMAKE_CURRENT_SOURCE_DIR}/gtsam_unstable"
|
||||
# Symlink all tests .py files to build folder.
|
||||
create_symlinks("${CMAKE_CURRENT_SOURCE_DIR}/gtsam_unstable"
|
||||
"${GTSAM_UNSTABLE_MODULE_PATH}")
|
||||
|
||||
# Add gtsam_unstable to the install target
|
||||
list(APPEND GTSAM_PYTHON_DEPENDENCIES gtsam_unstable_py)
|
||||
|
||||
endif()
|
||||
|
||||
# Add custom target so we can install with `make python-install`
|
||||
set(GTSAM_PYTHON_INSTALL_TARGET python-install)
|
||||
add_custom_target(${GTSAM_PYTHON_INSTALL_TARGET}
|
||||
COMMAND ${PYTHON_EXECUTABLE} ${GTSAM_PYTHON_BUILD_DIRECTORY}/setup.py install
|
||||
DEPENDS gtsam_py gtsam_unstable_py
|
||||
DEPENDS ${GTSAM_PYTHON_DEPENDENCIES}
|
||||
WORKING_DIRECTORY ${GTSAM_PYTHON_BUILD_DIRECTORY})
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
recursive-include gtsam/Data *
|
|
@ -1,6 +1,4 @@
|
|||
import glob
|
||||
import os
|
||||
import sys
|
||||
"""Setup file to install the GTSAM package."""
|
||||
|
||||
try:
|
||||
from setuptools import setup, find_packages
|
||||
|
@ -10,19 +8,17 @@ except ImportError:
|
|||
packages = find_packages(where=".")
|
||||
print("PACKAGES: ", packages)
|
||||
|
||||
data_path = '${GTSAM_SOURCE_DIR}/examples/Data/'
|
||||
data_files_and_directories = glob.glob(data_path + '**', recursive=True)
|
||||
data_files = [x for x in data_files_and_directories if not os.path.isdir(x)]
|
||||
|
||||
package_data = {
|
||||
'': [
|
||||
'./*.so',
|
||||
'./*.dll',
|
||||
"./*.so",
|
||||
"./*.dll",
|
||||
"Data/*" # Add the data files to the package
|
||||
"Data/**/*" # Add the data files in subdirectories
|
||||
]
|
||||
}
|
||||
|
||||
# Cleaner to read in the contents rather than copy them over.
|
||||
readme_contents = open("${PROJECT_SOURCE_DIR}/README.md").read()
|
||||
readme_contents = open("${GTSAM_SOURCE_DIR}/README.md").read()
|
||||
|
||||
setup(
|
||||
name='gtsam',
|
||||
|
@ -49,9 +45,9 @@ setup(
|
|||
'Programming Language :: Python :: 3',
|
||||
],
|
||||
packages=packages,
|
||||
include_package_data=True,
|
||||
package_data=package_data,
|
||||
data_files=[('${GTSAM_PYTHON_DATASET_DIR}', data_files),],
|
||||
test_suite="gtsam.tests",
|
||||
install_requires=["numpy"],
|
||||
install_requires=open("${GTSAM_SOURCE_DIR}/python/requirements.txt").readlines(),
|
||||
zip_safe=False,
|
||||
)
|
||||
|
|
|
@ -342,10 +342,25 @@ struct UnaryFactor: public gtsam::NoiseModelFactor1<Point2> {
|
|||
return (h(x) - z_);
|
||||
}
|
||||
|
||||
gtsam::NonlinearFactor::shared_ptr clone() const override {
|
||||
return boost::static_pointer_cast<gtsam::NonlinearFactor>(
|
||||
gtsam::NonlinearFactor::shared_ptr(new UnaryFactor(*this))); }
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
inline NonlinearFactorGraph nonlinearFactorGraphWithGivenSigma(const double sigma) {
|
||||
using symbol_shorthand::X;
|
||||
using symbol_shorthand::L;
|
||||
boost::shared_ptr<NonlinearFactorGraph> fg(new NonlinearFactorGraph);
|
||||
Point2 z(1.0, 0.0);
|
||||
boost::shared_ptr<smallOptimize::UnaryFactor> factor(
|
||||
new smallOptimize::UnaryFactor(z, noiseModel::Isotropic::Sigma(2,sigma), X(1)));
|
||||
fg->push_back(factor);
|
||||
return *fg;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
inline boost::shared_ptr<const NonlinearFactorGraph> sharedReallyNonlinearFactorGraph() {
|
||||
using symbol_shorthand::X;
|
||||
|
@ -363,6 +378,54 @@ inline NonlinearFactorGraph createReallyNonlinearFactorGraph() {
|
|||
return *sharedReallyNonlinearFactorGraph();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
inline NonlinearFactorGraph sharedNonRobustFactorGraphWithOutliers() {
|
||||
using symbol_shorthand::X;
|
||||
boost::shared_ptr<NonlinearFactorGraph> fg(new NonlinearFactorGraph);
|
||||
Point2 z(0.0, 0.0);
|
||||
double sigma = 0.1;
|
||||
|
||||
boost::shared_ptr<PriorFactor<Point2>> factor(
|
||||
new PriorFactor<Point2>(X(1), z, noiseModel::Isotropic::Sigma(2,sigma)));
|
||||
// 3 noiseless inliers
|
||||
fg->push_back(factor);
|
||||
fg->push_back(factor);
|
||||
fg->push_back(factor);
|
||||
|
||||
// 1 outlier
|
||||
Point2 z_out(1.0, 0.0);
|
||||
boost::shared_ptr<PriorFactor<Point2>> factor_out(
|
||||
new PriorFactor<Point2>(X(1), z_out, noiseModel::Isotropic::Sigma(2,sigma)));
|
||||
fg->push_back(factor_out);
|
||||
|
||||
return *fg;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
inline NonlinearFactorGraph sharedRobustFactorGraphWithOutliers() {
|
||||
using symbol_shorthand::X;
|
||||
boost::shared_ptr<NonlinearFactorGraph> fg(new NonlinearFactorGraph);
|
||||
Point2 z(0.0, 0.0);
|
||||
double sigma = 0.1;
|
||||
auto gmNoise = noiseModel::Robust::Create(
|
||||
noiseModel::mEstimator::GemanMcClure::Create(1.0), noiseModel::Isotropic::Sigma(2,sigma));
|
||||
boost::shared_ptr<PriorFactor<Point2>> factor(
|
||||
new PriorFactor<Point2>(X(1), z, gmNoise));
|
||||
// 3 noiseless inliers
|
||||
fg->push_back(factor);
|
||||
fg->push_back(factor);
|
||||
fg->push_back(factor);
|
||||
|
||||
// 1 outlier
|
||||
Point2 z_out(1.0, 0.0);
|
||||
boost::shared_ptr<PriorFactor<Point2>> factor_out(
|
||||
new PriorFactor<Point2>(X(1), z_out, gmNoise));
|
||||
fg->push_back(factor_out);
|
||||
|
||||
return *fg;
|
||||
}
|
||||
|
||||
|
||||
/* ************************************************************************* */
|
||||
inline std::pair<NonlinearFactorGraph, Values> createNonlinearSmoother(int T) {
|
||||
using namespace impl;
|
||||
|
|
|
@ -0,0 +1,640 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testGncOptimizer.cpp
|
||||
* @brief Unit tests for GncOptimizer class
|
||||
* @author Jingnan Shi
|
||||
* @author Luca Carlone
|
||||
* @author Frank Dellaert
|
||||
*
|
||||
* Implementation of the paper: Yang, Antonante, Tzoumas, Carlone, "Graduated
|
||||
* Non-Convexity for Robust Spatial Perception: From Non-Minimal Solvers to
|
||||
* Global Outlier Rejection", ICRA/RAL, 2020. (arxiv version:
|
||||
* https://arxiv.org/pdf/1909.08605.pdf)
|
||||
*
|
||||
* See also:
|
||||
* Antonante, Tzoumas, Yang, Carlone, "Outlier-Robust Estimation: Hardness,
|
||||
* Minimally-Tuned Algorithms, and Applications", arxiv:
|
||||
* https://arxiv.org/pdf/2007.15109.pdf, 2020.
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/nonlinear/GncOptimizer.h>
|
||||
#include <gtsam/nonlinear/LinearContainerFactor.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
#include <tests/smallExample.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
using symbol_shorthand::L;
|
||||
using symbol_shorthand::X;
|
||||
static double tol = 1e-7;
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(GncOptimizer, gncParamsConstructor) {
|
||||
// check params are correctly parsed
|
||||
LevenbergMarquardtParams lmParams;
|
||||
GncParams<LevenbergMarquardtParams> gncParams1(lmParams);
|
||||
CHECK(lmParams.equals(gncParams1.baseOptimizerParams));
|
||||
|
||||
// check also default constructor
|
||||
GncParams<LevenbergMarquardtParams> gncParams1b;
|
||||
CHECK(lmParams.equals(gncParams1b.baseOptimizerParams));
|
||||
|
||||
// and check params become different if we change lmParams
|
||||
lmParams.setVerbosity("DELTA");
|
||||
CHECK(!lmParams.equals(gncParams1.baseOptimizerParams));
|
||||
|
||||
// and same for GN
|
||||
GaussNewtonParams gnParams;
|
||||
GncParams<GaussNewtonParams> gncParams2(gnParams);
|
||||
CHECK(gnParams.equals(gncParams2.baseOptimizerParams));
|
||||
|
||||
// check default constructor
|
||||
GncParams<GaussNewtonParams> gncParams2b;
|
||||
CHECK(gnParams.equals(gncParams2b.baseOptimizerParams));
|
||||
|
||||
// change something at the gncParams level
|
||||
GncParams<GaussNewtonParams> gncParams2c(gncParams2b);
|
||||
gncParams2c.setLossType(GncLossType::GM);
|
||||
CHECK(!gncParams2c.equals(gncParams2b.baseOptimizerParams));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(GncOptimizer, gncConstructor) {
|
||||
// has to have Gaussian noise models !
|
||||
auto fg = example::createReallyNonlinearFactorGraph(); // just a unary factor
|
||||
// on a 2D point
|
||||
|
||||
Point2 p0(3, 3);
|
||||
Values initial;
|
||||
initial.insert(X(1), p0);
|
||||
|
||||
GncParams<LevenbergMarquardtParams> gncParams;
|
||||
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||
gncParams);
|
||||
|
||||
CHECK(gnc.getFactors().equals(fg));
|
||||
CHECK(gnc.getState().equals(initial));
|
||||
CHECK(gnc.getParams().equals(gncParams));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(GncOptimizer, gncConstructorWithRobustGraphAsInput) {
|
||||
auto fg = example::sharedNonRobustFactorGraphWithOutliers();
|
||||
// same graph with robust noise model
|
||||
auto fg_robust = example::sharedRobustFactorGraphWithOutliers();
|
||||
|
||||
Point2 p0(3, 3);
|
||||
Values initial;
|
||||
initial.insert(X(1), p0);
|
||||
|
||||
GncParams<LevenbergMarquardtParams> gncParams;
|
||||
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg_robust,
|
||||
initial,
|
||||
gncParams);
|
||||
|
||||
// make sure that when parsing the graph is transformed into one without
|
||||
// robust loss
|
||||
CHECK(fg.equals(gnc.getFactors()));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(GncOptimizer, initializeMu) {
|
||||
auto fg = example::createReallyNonlinearFactorGraph();
|
||||
|
||||
Point2 p0(3, 3);
|
||||
Values initial;
|
||||
initial.insert(X(1), p0);
|
||||
|
||||
// testing GM mu initialization
|
||||
GncParams<LevenbergMarquardtParams> gncParams;
|
||||
gncParams.setLossType(GncLossType::GM);
|
||||
auto gnc_gm = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||
gncParams);
|
||||
// according to rmk 5 in the gnc paper: m0 = 2 rmax^2 / barcSq
|
||||
// (barcSq=1 in this example)
|
||||
EXPECT_DOUBLES_EQUAL(gnc_gm.initializeMu(), 2 * 198.999, 1e-3);
|
||||
|
||||
// testing TLS mu initialization
|
||||
gncParams.setLossType(GncLossType::TLS);
|
||||
auto gnc_tls = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||
gncParams);
|
||||
// according to rmk 5 in the gnc paper: m0 = barcSq / (2 * rmax^2 - barcSq)
|
||||
// (barcSq=1 in this example)
|
||||
EXPECT_DOUBLES_EQUAL(gnc_tls.initializeMu(), 1 / (2 * 198.999 - 1), 1e-3);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(GncOptimizer, updateMuGM) {
|
||||
// has to have Gaussian noise models !
|
||||
auto fg = example::createReallyNonlinearFactorGraph();
|
||||
|
||||
Point2 p0(3, 3);
|
||||
Values initial;
|
||||
initial.insert(X(1), p0);
|
||||
|
||||
GncParams<LevenbergMarquardtParams> gncParams;
|
||||
gncParams.setLossType(GncLossType::GM);
|
||||
gncParams.setMuStep(1.4);
|
||||
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||
gncParams);
|
||||
|
||||
double mu = 5.0;
|
||||
EXPECT_DOUBLES_EQUAL(gnc.updateMu(mu), mu / 1.4, tol);
|
||||
|
||||
// check it correctly saturates to 1 for GM
|
||||
mu = 1.2;
|
||||
EXPECT_DOUBLES_EQUAL(gnc.updateMu(mu), 1.0, tol);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(GncOptimizer, updateMuTLS) {
|
||||
// has to have Gaussian noise models !
|
||||
auto fg = example::createReallyNonlinearFactorGraph();
|
||||
|
||||
Point2 p0(3, 3);
|
||||
Values initial;
|
||||
initial.insert(X(1), p0);
|
||||
|
||||
GncParams<LevenbergMarquardtParams> gncParams;
|
||||
gncParams.setMuStep(1.4);
|
||||
gncParams.setLossType(GncLossType::TLS);
|
||||
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||
gncParams);
|
||||
|
||||
double mu = 5.0;
|
||||
EXPECT_DOUBLES_EQUAL(gnc.updateMu(mu), mu * 1.4, tol);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(GncOptimizer, checkMuConvergence) {
|
||||
// has to have Gaussian noise models !
|
||||
auto fg = example::createReallyNonlinearFactorGraph();
|
||||
|
||||
Point2 p0(3, 3);
|
||||
Values initial;
|
||||
initial.insert(X(1), p0);
|
||||
|
||||
{
|
||||
GncParams<LevenbergMarquardtParams> gncParams;
|
||||
gncParams.setLossType(GncLossType::GM);
|
||||
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||
gncParams);
|
||||
|
||||
double mu = 1.0;
|
||||
CHECK(gnc.checkMuConvergence(mu));
|
||||
}
|
||||
{
|
||||
GncParams<LevenbergMarquardtParams> gncParams;
|
||||
gncParams.setLossType(
|
||||
GncLossType::TLS);
|
||||
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||
gncParams);
|
||||
|
||||
double mu = 1.0;
|
||||
CHECK(!gnc.checkMuConvergence(mu)); //always false for TLS
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(GncOptimizer, checkCostConvergence) {
|
||||
// has to have Gaussian noise models !
|
||||
auto fg = example::createReallyNonlinearFactorGraph();
|
||||
|
||||
Point2 p0(3, 3);
|
||||
Values initial;
|
||||
initial.insert(X(1), p0);
|
||||
|
||||
{
|
||||
GncParams<LevenbergMarquardtParams> gncParams;
|
||||
gncParams.setRelativeCostTol(0.49);
|
||||
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||
gncParams);
|
||||
|
||||
double prev_cost = 1.0;
|
||||
double cost = 0.5;
|
||||
// relative cost reduction = 0.5 > 0.49, hence checkCostConvergence = false
|
||||
CHECK(!gnc.checkCostConvergence(cost, prev_cost));
|
||||
}
|
||||
{
|
||||
GncParams<LevenbergMarquardtParams> gncParams;
|
||||
gncParams.setRelativeCostTol(0.51);
|
||||
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||
gncParams);
|
||||
|
||||
double prev_cost = 1.0;
|
||||
double cost = 0.5;
|
||||
// relative cost reduction = 0.5 < 0.51, hence checkCostConvergence = true
|
||||
CHECK(gnc.checkCostConvergence(cost, prev_cost));
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(GncOptimizer, checkWeightsConvergence) {
|
||||
// has to have Gaussian noise models !
|
||||
auto fg = example::createReallyNonlinearFactorGraph();
|
||||
|
||||
Point2 p0(3, 3);
|
||||
Values initial;
|
||||
initial.insert(X(1), p0);
|
||||
|
||||
{
|
||||
GncParams<LevenbergMarquardtParams> gncParams;
|
||||
gncParams.setLossType(GncLossType::GM);
|
||||
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||
gncParams);
|
||||
|
||||
Vector weights = Vector::Ones(fg.size());
|
||||
CHECK(!gnc.checkWeightsConvergence(weights)); //always false for GM
|
||||
}
|
||||
{
|
||||
GncParams<LevenbergMarquardtParams> gncParams;
|
||||
gncParams.setLossType(
|
||||
GncLossType::TLS);
|
||||
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||
gncParams);
|
||||
|
||||
Vector weights = Vector::Ones(fg.size());
|
||||
// weights are binary, so checkWeightsConvergence = true
|
||||
CHECK(gnc.checkWeightsConvergence(weights));
|
||||
}
|
||||
{
|
||||
GncParams<LevenbergMarquardtParams> gncParams;
|
||||
gncParams.setLossType(
|
||||
GncLossType::TLS);
|
||||
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||
gncParams);
|
||||
|
||||
Vector weights = Vector::Ones(fg.size());
|
||||
weights[0] = 0.9; // more than weightsTol = 1e-4 from 1, hence checkWeightsConvergence = false
|
||||
CHECK(!gnc.checkWeightsConvergence(weights));
|
||||
}
|
||||
{
|
||||
GncParams<LevenbergMarquardtParams> gncParams;
|
||||
gncParams.setLossType(
|
||||
GncLossType::TLS);
|
||||
gncParams.setWeightsTol(0.1);
|
||||
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||
gncParams);
|
||||
|
||||
Vector weights = Vector::Ones(fg.size());
|
||||
weights[0] = 0.9; // exactly weightsTol = 0.1 from 1, hence checkWeightsConvergence = true
|
||||
CHECK(gnc.checkWeightsConvergence(weights));
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(GncOptimizer, checkConvergenceTLS) {
|
||||
// has to have Gaussian noise models !
|
||||
auto fg = example::createReallyNonlinearFactorGraph();
|
||||
|
||||
Point2 p0(3, 3);
|
||||
Values initial;
|
||||
initial.insert(X(1), p0);
|
||||
|
||||
GncParams<LevenbergMarquardtParams> gncParams;
|
||||
gncParams.setRelativeCostTol(1e-5);
|
||||
gncParams.setLossType(GncLossType::TLS);
|
||||
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||
gncParams);
|
||||
|
||||
CHECK(gnc.checkCostConvergence(1.0, 1.0));
|
||||
CHECK(!gnc.checkCostConvergence(1.0, 2.0));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(GncOptimizer, calculateWeightsGM) {
|
||||
auto fg = example::sharedNonRobustFactorGraphWithOutliers();
|
||||
|
||||
Point2 p0(0, 0);
|
||||
Values initial;
|
||||
initial.insert(X(1), p0);
|
||||
|
||||
// we have 4 factors, 3 with zero errors (inliers), 1 with error 50 = 0.5 *
|
||||
// 1/sigma^2 || [1;0] - [0;0] ||^2 (outlier)
|
||||
Vector weights_expected = Vector::Zero(4);
|
||||
weights_expected[0] = 1.0; // zero error
|
||||
weights_expected[1] = 1.0; // zero error
|
||||
weights_expected[2] = 1.0; // zero error
|
||||
weights_expected[3] = std::pow(1.0 / (50.0 + 1.0), 2); // outlier, error = 50
|
||||
|
||||
GaussNewtonParams gnParams;
|
||||
GncParams<GaussNewtonParams> gncParams(gnParams);
|
||||
gncParams.setLossType(GncLossType::GM);
|
||||
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, gncParams);
|
||||
double mu = 1.0;
|
||||
Vector weights_actual = gnc.calculateWeights(initial, mu);
|
||||
CHECK(assert_equal(weights_expected, weights_actual, tol));
|
||||
|
||||
mu = 2.0;
|
||||
double barcSq = 5.0;
|
||||
weights_expected[3] = std::pow(mu * barcSq / (50.0 + mu * barcSq), 2); // outlier, error = 50
|
||||
gncParams.setInlierCostThreshold(barcSq);
|
||||
auto gnc2 = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial,
|
||||
gncParams);
|
||||
weights_actual = gnc2.calculateWeights(initial, mu);
|
||||
CHECK(assert_equal(weights_expected, weights_actual, tol));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(GncOptimizer, calculateWeightsTLS) {
|
||||
auto fg = example::sharedNonRobustFactorGraphWithOutliers();
|
||||
|
||||
Point2 p0(0, 0);
|
||||
Values initial;
|
||||
initial.insert(X(1), p0);
|
||||
|
||||
// we have 4 factors, 3 with zero errors (inliers), 1 with error
|
||||
Vector weights_expected = Vector::Zero(4);
|
||||
weights_expected[0] = 1.0; // zero error
|
||||
weights_expected[1] = 1.0; // zero error
|
||||
weights_expected[2] = 1.0; // zero error
|
||||
weights_expected[3] = 0; // outliers
|
||||
|
||||
GaussNewtonParams gnParams;
|
||||
GncParams<GaussNewtonParams> gncParams(gnParams);
|
||||
gncParams.setLossType(GncLossType::TLS);
|
||||
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, gncParams);
|
||||
double mu = 1.0;
|
||||
Vector weights_actual = gnc.calculateWeights(initial, mu);
|
||||
CHECK(assert_equal(weights_expected, weights_actual, tol));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(GncOptimizer, calculateWeightsTLS2) {
|
||||
|
||||
// create values
|
||||
Point2 x_val(0.0, 0.0);
|
||||
Point2 x_prior(1.0, 0.0);
|
||||
Values initial;
|
||||
initial.insert(X(1), x_val);
|
||||
|
||||
// create very simple factor graph with a single factor 0.5 * 1/sigma^2 * || x - [1;0] ||^2
|
||||
double sigma = 1;
|
||||
SharedDiagonal noise = noiseModel::Diagonal::Sigmas(Vector2(sigma, sigma));
|
||||
NonlinearFactorGraph nfg;
|
||||
nfg.add(PriorFactor<Point2>(X(1), x_prior, noise));
|
||||
|
||||
// cost of the factor:
|
||||
DOUBLES_EQUAL(0.5 * 1 / (sigma * sigma), nfg.error(initial), tol);
|
||||
|
||||
// check the TLS weights are correct: CASE 1: residual below barcsq
|
||||
{
|
||||
// expected:
|
||||
Vector weights_expected = Vector::Zero(1);
|
||||
weights_expected[0] = 1.0; // inlier
|
||||
// actual:
|
||||
GaussNewtonParams gnParams;
|
||||
GncParams<GaussNewtonParams> gncParams(gnParams);
|
||||
gncParams.setLossType(GncLossType::TLS);
|
||||
gncParams.setInlierCostThreshold(0.51); // if inlier threshold is slightly larger than 0.5, then measurement is inlier
|
||||
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(nfg, initial,
|
||||
gncParams);
|
||||
double mu = 1e6;
|
||||
Vector weights_actual = gnc.calculateWeights(initial, mu);
|
||||
CHECK(assert_equal(weights_expected, weights_actual, tol));
|
||||
}
|
||||
// check the TLS weights are correct: CASE 2: residual above barcsq
|
||||
{
|
||||
// expected:
|
||||
Vector weights_expected = Vector::Zero(1);
|
||||
weights_expected[0] = 0.0; // outlier
|
||||
// actual:
|
||||
GaussNewtonParams gnParams;
|
||||
GncParams<GaussNewtonParams> gncParams(gnParams);
|
||||
gncParams.setLossType(GncLossType::TLS);
|
||||
gncParams.setInlierCostThreshold(0.49); // if inlier threshold is slightly below 0.5, then measurement is outlier
|
||||
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(nfg, initial,
|
||||
gncParams);
|
||||
double mu = 1e6; // very large mu recovers original TLS cost
|
||||
Vector weights_actual = gnc.calculateWeights(initial, mu);
|
||||
CHECK(assert_equal(weights_expected, weights_actual, tol));
|
||||
}
|
||||
// check the TLS weights are correct: CASE 2: residual at barcsq
|
||||
{
|
||||
// expected:
|
||||
Vector weights_expected = Vector::Zero(1);
|
||||
weights_expected[0] = 0.5; // undecided
|
||||
// actual:
|
||||
GaussNewtonParams gnParams;
|
||||
GncParams<GaussNewtonParams> gncParams(gnParams);
|
||||
gncParams.setLossType(GncLossType::TLS);
|
||||
gncParams.setInlierCostThreshold(0.5); // if inlier threshold is slightly below 0.5, then measurement is outlier
|
||||
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(nfg, initial,
|
||||
gncParams);
|
||||
double mu = 1e6; // very large mu recovers original TLS cost
|
||||
Vector weights_actual = gnc.calculateWeights(initial, mu);
|
||||
CHECK(assert_equal(weights_expected, weights_actual, 1e-5));
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(GncOptimizer, makeWeightedGraph) {
|
||||
// create original factor
|
||||
double sigma1 = 0.1;
|
||||
NonlinearFactorGraph nfg = example::nonlinearFactorGraphWithGivenSigma(
|
||||
sigma1);
|
||||
|
||||
// create expected
|
||||
double sigma2 = 10;
|
||||
NonlinearFactorGraph expected = example::nonlinearFactorGraphWithGivenSigma(
|
||||
sigma2);
|
||||
|
||||
// create weights
|
||||
Vector weights = Vector::Ones(1); // original info:1/0.1^2 = 100. New info: 1/10^2 = 0.01. Ratio is 10-4
|
||||
weights[0] = 1e-4;
|
||||
|
||||
// create actual
|
||||
Point2 p0(3, 3);
|
||||
Values initial;
|
||||
initial.insert(X(1), p0);
|
||||
|
||||
LevenbergMarquardtParams lmParams;
|
||||
GncParams<LevenbergMarquardtParams> gncParams(lmParams);
|
||||
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(nfg, initial,
|
||||
gncParams);
|
||||
NonlinearFactorGraph actual = gnc.makeWeightedGraph(weights);
|
||||
|
||||
// check it's all good
|
||||
CHECK(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(GncOptimizer, optimizeSimple) {
|
||||
auto fg = example::createReallyNonlinearFactorGraph();
|
||||
|
||||
Point2 p0(3, 3);
|
||||
Values initial;
|
||||
initial.insert(X(1), p0);
|
||||
|
||||
LevenbergMarquardtParams lmParams;
|
||||
GncParams<LevenbergMarquardtParams> gncParams(lmParams);
|
||||
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||
gncParams);
|
||||
|
||||
Values actual = gnc.optimize();
|
||||
DOUBLES_EQUAL(0, fg.error(actual), tol);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(GncOptimizer, optimize) {
|
||||
auto fg = example::sharedNonRobustFactorGraphWithOutliers();
|
||||
|
||||
Point2 p0(1, 0);
|
||||
Values initial;
|
||||
initial.insert(X(1), p0);
|
||||
|
||||
// try with nonrobust cost function and standard GN
|
||||
GaussNewtonParams gnParams;
|
||||
GaussNewtonOptimizer gn(fg, initial, gnParams);
|
||||
Values gn_results = gn.optimize();
|
||||
// converges to incorrect point due to lack of robustness to an outlier, ideal
|
||||
// solution is Point2(0,0)
|
||||
CHECK(assert_equal(Point2(0.25, 0.0), gn_results.at<Point2>(X(1)), 1e-3));
|
||||
|
||||
// try with robust loss function and standard GN
|
||||
auto fg_robust = example::sharedRobustFactorGraphWithOutliers(); // same as fg, but with
|
||||
// factors wrapped in
|
||||
// Geman McClure losses
|
||||
GaussNewtonOptimizer gn2(fg_robust, initial, gnParams);
|
||||
Values gn2_results = gn2.optimize();
|
||||
// converges to incorrect point, this time due to the nonconvexity of the loss
|
||||
CHECK(assert_equal(Point2(0.999706, 0.0), gn2_results.at<Point2>(X(1)), 1e-3));
|
||||
|
||||
// .. but graduated nonconvexity ensures both robustness and convergence in
|
||||
// the face of nonconvexity
|
||||
GncParams<GaussNewtonParams> gncParams(gnParams);
|
||||
// gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::SUMMARY);
|
||||
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, gncParams);
|
||||
Values gnc_result = gnc.optimize();
|
||||
CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(GncOptimizer, optimizeWithKnownInliers) {
|
||||
auto fg = example::sharedNonRobustFactorGraphWithOutliers();
|
||||
|
||||
Point2 p0(1, 0);
|
||||
Values initial;
|
||||
initial.insert(X(1), p0);
|
||||
|
||||
std::vector<size_t> knownInliers;
|
||||
knownInliers.push_back(0);
|
||||
knownInliers.push_back(1);
|
||||
knownInliers.push_back(2);
|
||||
|
||||
// nonconvexity with known inliers
|
||||
{
|
||||
GncParams<GaussNewtonParams> gncParams;
|
||||
gncParams.setKnownInliers(knownInliers);
|
||||
gncParams.setLossType(GncLossType::GM);
|
||||
//gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::SUMMARY);
|
||||
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial,
|
||||
gncParams);
|
||||
|
||||
Values gnc_result = gnc.optimize();
|
||||
CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
|
||||
|
||||
// check weights were actually fixed:
|
||||
Vector finalWeights = gnc.getWeights();
|
||||
DOUBLES_EQUAL(1.0, finalWeights[0], tol);
|
||||
DOUBLES_EQUAL(1.0, finalWeights[1], tol);
|
||||
DOUBLES_EQUAL(1.0, finalWeights[2], tol);
|
||||
}
|
||||
{
|
||||
GncParams<GaussNewtonParams> gncParams;
|
||||
gncParams.setKnownInliers(knownInliers);
|
||||
gncParams.setLossType(GncLossType::TLS);
|
||||
// gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::SUMMARY);
|
||||
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial,
|
||||
gncParams);
|
||||
|
||||
Values gnc_result = gnc.optimize();
|
||||
CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
|
||||
|
||||
// check weights were actually fixed:
|
||||
Vector finalWeights = gnc.getWeights();
|
||||
DOUBLES_EQUAL(1.0, finalWeights[0], tol);
|
||||
DOUBLES_EQUAL(1.0, finalWeights[1], tol);
|
||||
DOUBLES_EQUAL(1.0, finalWeights[2], tol);
|
||||
DOUBLES_EQUAL(0.0, finalWeights[3], tol);
|
||||
}
|
||||
{
|
||||
// if we set the threshold large, they are all inliers
|
||||
GncParams<GaussNewtonParams> gncParams;
|
||||
gncParams.setKnownInliers(knownInliers);
|
||||
gncParams.setLossType(GncLossType::TLS);
|
||||
//gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::VALUES);
|
||||
gncParams.setInlierCostThreshold(100.0);
|
||||
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial,
|
||||
gncParams);
|
||||
|
||||
Values gnc_result = gnc.optimize();
|
||||
CHECK(assert_equal(Point2(0.25, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
|
||||
|
||||
// check weights were actually fixed:
|
||||
Vector finalWeights = gnc.getWeights();
|
||||
DOUBLES_EQUAL(1.0, finalWeights[0], tol);
|
||||
DOUBLES_EQUAL(1.0, finalWeights[1], tol);
|
||||
DOUBLES_EQUAL(1.0, finalWeights[2], tol);
|
||||
DOUBLES_EQUAL(1.0, finalWeights[3], tol);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(GncOptimizer, optimizeSmallPoseGraph) {
|
||||
/// load small pose graph
|
||||
const string filename = findExampleDataFile("w100.graph");
|
||||
NonlinearFactorGraph::shared_ptr graph;
|
||||
Values::shared_ptr initial;
|
||||
boost::tie(graph, initial) = load2D(filename);
|
||||
// Add a Gaussian prior on first poses
|
||||
Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin
|
||||
SharedDiagonal priorNoise = noiseModel::Diagonal::Sigmas(
|
||||
Vector3(0.01, 0.01, 0.01));
|
||||
graph->addPrior(0, priorMean, priorNoise);
|
||||
|
||||
/// get expected values by optimizing outlier-free graph
|
||||
Values expected = LevenbergMarquardtOptimizer(*graph, *initial).optimize();
|
||||
|
||||
// add a few outliers
|
||||
SharedDiagonal betweenNoise = noiseModel::Diagonal::Sigmas(
|
||||
Vector3(0.1, 0.1, 0.01));
|
||||
graph->push_back(BetweenFactor<Pose2>(90, 50, Pose2(), betweenNoise)); // some arbitrary and incorrect between factor
|
||||
|
||||
/// get expected values by optimizing outlier-free graph
|
||||
Values expectedWithOutliers = LevenbergMarquardtOptimizer(*graph, *initial)
|
||||
.optimize();
|
||||
// as expected, the following test fails due to the presence of an outlier!
|
||||
// CHECK(assert_equal(expected, expectedWithOutliers, 1e-3));
|
||||
|
||||
// GNC
|
||||
// Note: in difficult instances, we set the odometry measurements to be
|
||||
// inliers, but this problem is simple enought to succeed even without that
|
||||
// assumption std::vector<size_t> knownInliers;
|
||||
GncParams<GaussNewtonParams> gncParams;
|
||||
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(*graph, *initial,
|
||||
gncParams);
|
||||
Values actual = gnc.optimize();
|
||||
|
||||
// compare
|
||||
CHECK(assert_equal(expected, actual, 1e-3)); // yay! we are robust to outliers!
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
|
@ -233,6 +233,26 @@ TEST( NonlinearFactor, linearize_constraint2 )
|
|||
CHECK(assert_equal((const GaussianFactor&)expected, *actual));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( NonlinearFactor, cloneWithNewNoiseModel )
|
||||
{
|
||||
// create original factor
|
||||
double sigma1 = 0.1;
|
||||
NonlinearFactorGraph nfg = example::nonlinearFactorGraphWithGivenSigma(sigma1);
|
||||
|
||||
// create expected
|
||||
double sigma2 = 10;
|
||||
NonlinearFactorGraph expected = example::nonlinearFactorGraphWithGivenSigma(sigma2);
|
||||
|
||||
// create actual
|
||||
NonlinearFactorGraph actual;
|
||||
SharedNoiseModel noise2 = noiseModel::Isotropic::Sigma(2,sigma2);
|
||||
actual.push_back( boost::dynamic_pointer_cast<NoiseModelFactor>(nfg[0])->cloneWithNewNoiseModel(noise2) );
|
||||
|
||||
// check it's all good
|
||||
CHECK(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
class TestFactor4 : public NoiseModelFactor4<double, double, double, double> {
|
||||
public:
|
||||
|
|
|
@ -48,6 +48,19 @@ const double tol = 1e-5;
|
|||
using symbol_shorthand::X;
|
||||
using symbol_shorthand::L;
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( NonlinearOptimizer, paramsEquals )
|
||||
{
|
||||
// default constructors lead to two identical params
|
||||
GaussNewtonParams gnParams1;
|
||||
GaussNewtonParams gnParams2;
|
||||
CHECK(gnParams1.equals(gnParams2));
|
||||
|
||||
// but the params become different if we change something in gnParams2
|
||||
gnParams2.setVerbosity("DELTA");
|
||||
CHECK(!gnParams1.equals(gnParams2));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( NonlinearOptimizer, iterateLM )
|
||||
{
|
||||
|
|
|
@ -1,2 +1,5 @@
|
|||
__pycache__/
|
||||
.vscode/
|
||||
*build*
|
||||
*dist*
|
||||
*.egg-info
|
||||
|
|
|
@ -0,0 +1,55 @@
|
|||
cmake_minimum_required(VERSION 3.9)
|
||||
|
||||
# Set the project name and version
|
||||
project(GTwrap VERSION 1.0)
|
||||
|
||||
# ##############################################################################
|
||||
# General configuration
|
||||
|
||||
set(WRAP_PYTHON_VERSION
|
||||
"Default"
|
||||
CACHE STRING "The Python version to use for wrapping")
|
||||
|
||||
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/GtwrapUtils.cmake)
|
||||
gtwrap_get_python_version(${WRAP_PYTHON_VERSION})
|
||||
|
||||
# ##############################################################################
|
||||
# Install the CMake file to be used by other projects
|
||||
if(WIN32 AND NOT CYGWIN)
|
||||
set(SCRIPT_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/CMake")
|
||||
else()
|
||||
set(SCRIPT_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/lib/cmake")
|
||||
endif()
|
||||
|
||||
# Install scripts to the standard CMake script directory.
|
||||
install(FILES cmake/gtwrapConfig.cmake cmake/PybindWrap.cmake
|
||||
cmake/GtwrapUtils.cmake
|
||||
DESTINATION "${SCRIPT_INSTALL_DIR}/gtwrap")
|
||||
|
||||
# Install wrapping scripts as binaries to `CMAKE_INSTALL_PREFIX/bin` so they can
|
||||
# be invoked for wrapping.
|
||||
install(PROGRAMS scripts/pybind_wrap.py scripts/matlab_wrap.py TYPE BIN)
|
||||
|
||||
# Install pybind11 directory to `CMAKE_INSTALL_PREFIX/lib/pybind11` This will
|
||||
# allow the gtwrapConfig.cmake file to load it later.
|
||||
install(DIRECTORY pybind11 TYPE LIB)
|
||||
|
||||
# ##############################################################################
|
||||
# Install the Python package
|
||||
find_package(
|
||||
Python ${WRAP_PYTHON_VERSION}
|
||||
COMPONENTS Interpreter
|
||||
REQUIRED)
|
||||
|
||||
# Detect virtualenv and set Pip args accordingly
|
||||
# https://www.scivision.dev/cmake-install-python-package/
|
||||
if(DEFINED ENV{VIRTUAL_ENV} OR DEFINED ENV{CONDA_PREFIX})
|
||||
set(_pip_args)
|
||||
else()
|
||||
set(_pip_args "--user")
|
||||
endif()
|
||||
#TODO add correct flags for virtualenv
|
||||
|
||||
# Finally install the gtwrap python package.
|
||||
execute_process(COMMAND ${Python_EXECUTABLE} -m pip install . ${_pip_args}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
|
|
@ -0,0 +1,13 @@
|
|||
Copyright (c) 2010, Georgia Tech Research Corporation
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -8,9 +8,40 @@ It was designed to be more general than just wrapping GTSAM. For notes on creati
|
|||
|
||||
1. This library uses `pybind11`, which is included as a subdirectory in GTSAM.
|
||||
2. The `interface_parser.py` in this library uses `pyparsing` to parse the interface file `gtsam.h`. Please install it first in your current Python environment before attempting the build.
|
||||
```
|
||||
python3 -m pip install pyparsing
|
||||
```
|
||||
|
||||
```
|
||||
python3 -m pip install pyparsing
|
||||
```
|
||||
|
||||
## Getting Started
|
||||
|
||||
Clone this repository to your local machine and perform the standard CMake install:
|
||||
|
||||
```sh
|
||||
mkdir build && cd build
|
||||
cmake ..
|
||||
make install # use sudo if needed
|
||||
```
|
||||
|
||||
Using `wrap` in your project is straightforward from here. In you `CMakeLists.txt` file, you just need to add the following:
|
||||
|
||||
```cmake
|
||||
include(PybindWrap)
|
||||
|
||||
pybind_wrap(${PROJECT_NAME}_py # target
|
||||
${PROJECT_SOURCE_DIR}/cpp/${PROJECT_NAME}.h # interface header file
|
||||
"${PROJECT_NAME}.cpp" # the generated cpp
|
||||
"${PROJECT_NAME}" # module_name
|
||||
"gtsam" # top namespace in the cpp file
|
||||
"${ignore}" # ignore classes
|
||||
${PROJECT_BINARY_DIR}/${PROJECT_NAME}.tpl
|
||||
${PROJECT_NAME} # libs
|
||||
"${PROJECT_NAME}" # dependencies
|
||||
ON # use boost
|
||||
)
|
||||
```
|
||||
|
||||
For more information, please follow our [tutorial](https://github.com/borglab/gtsam-project-python).
|
||||
|
||||
## GTSAM Python wrapper
|
||||
|
||||
|
@ -45,32 +76,3 @@ It was designed to be more general than just wrapping GTSAM. For notes on creati
|
|||
python setup.py install
|
||||
```
|
||||
- NOTE: It's a good idea to create a virtual environment otherwise it will be installed in your system Python's site-packages.
|
||||
|
||||
|
||||
## Old GTSAM Wrapper
|
||||
|
||||
*Outdated note from the original wrap.*
|
||||
|
||||
TODO: Update this.
|
||||
|
||||
It was designed to be more general than just wrapping GTSAM, but a small amount of GTSAM specific code exists in `matlab.h`, the include file that is included by the `mex` files. The GTSAM-specific functionality consists primarily of handling of Eigen Matrix and Vector classes.
|
||||
|
||||
For notes on creating a wrap interface, see `gtsam.h` for what features can be wrapped into a toolbox, as well as the current state of the toolbox for GTSAM. For more technical details on the interface, please read comments in `matlab.h`
|
||||
|
||||
Some good things to know:
|
||||
|
||||
OBJECT CREATION
|
||||
|
||||
- Classes are created by special constructors, e.g., `new_GaussianFactorGraph_.cpp`.
|
||||
These constructors are called from the MATLAB class `@GaussianFactorGraph`.
|
||||
`new_GaussianFactorGraph_` calls wrap_constructed in `matlab.h`, see documentation there
|
||||
|
||||
METHOD (AND CONSTRUCTOR) ARGUMENTS
|
||||
|
||||
- Simple argument types of methods, such as "double", will be converted in the
|
||||
`mex` wrappers by calling unwrap<double>, defined in matlab.h
|
||||
- Vector and Matrix arguments are normally passed by reference in GTSAM, but
|
||||
in `gtsam.h` you need to pretend they are passed by value, to trigger the
|
||||
generation of the correct conversion routines `unwrap<Vector>` and `unwrap<Matrix>`
|
||||
- passing classes as arguments works, provided they are passed by reference.
|
||||
This triggers a call to unwrap_shared_ptr
|
||||
|
|
|
@ -0,0 +1,74 @@
|
|||
# Utilities to help with wrapping.
|
||||
|
||||
macro(get_python_version)
|
||||
if(${CMAKE_VERSION} VERSION_LESS "3.12.0")
|
||||
# Use older version of cmake's find_python
|
||||
find_package(PythonInterp)
|
||||
|
||||
if(NOT ${PYTHONINTERP_FOUND})
|
||||
message(
|
||||
FATAL_ERROR
|
||||
"Cannot find Python interpreter. Please install Python >= 3.6.")
|
||||
endif()
|
||||
|
||||
find_package(PythonLibs ${PYTHON_VERSION_STRING})
|
||||
|
||||
set(Python_VERSION_MAJOR
|
||||
${PYTHON_VERSION_MAJOR}
|
||||
PARENT_SCOPE)
|
||||
set(Python_VERSION_MINOR
|
||||
${PYTHON_VERSION_MINOR}
|
||||
PARENT_SCOPE)
|
||||
set(Python_VERSION_PATCH
|
||||
${PYTHON_VERSION_PATCH}
|
||||
PARENT_SCOPE)
|
||||
set(Python_EXECUTABLE
|
||||
${PYTHON_EXECUTABLE}
|
||||
PARENT_SCOPE)
|
||||
|
||||
else()
|
||||
# Get info about the Python interpreter
|
||||
# https://cmake.org/cmake/help/latest/module/FindPython.html#module:FindPython
|
||||
find_package(Python COMPONENTS Interpreter Development)
|
||||
|
||||
if(NOT ${Python_FOUND})
|
||||
message(
|
||||
FATAL_ERROR
|
||||
"Cannot find Python interpreter. Please install Python>=3.6.")
|
||||
endif()
|
||||
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
# Set the Python version for the wrapper and set the paths to the executable and
|
||||
# include/library directories. WRAP_PYTHON_VERSION can be "Default" or a
|
||||
# specific major.minor version.
|
||||
macro(gtwrap_get_python_version WRAP_PYTHON_VERSION)
|
||||
# Unset these cached variables to avoid surprises when the python in the
|
||||
# current environment are different from the cached!
|
||||
unset(Python_EXECUTABLE CACHE)
|
||||
unset(Python_INCLUDE_DIRS CACHE)
|
||||
unset(Python_VERSION_MAJOR CACHE)
|
||||
unset(Python_VERSION_MINOR CACHE)
|
||||
unset(Python_VERSION_PATCH CACHE)
|
||||
|
||||
# Allow override
|
||||
if(${WRAP_PYTHON_VERSION} STREQUAL "Default")
|
||||
# Check for Python3 or Python2 in order
|
||||
get_python_version()
|
||||
|
||||
# Set the wrapper python version
|
||||
set(WRAP_PYTHON_VERSION
|
||||
"${Python_VERSION_MAJOR}.${Python_VERSION_MINOR}.${Python_VERSION_PATCH}"
|
||||
CACHE STRING "The version of Python to build the wrappers against."
|
||||
FORCE)
|
||||
|
||||
else()
|
||||
# Find the Python that best matches the python version specified.
|
||||
find_package(
|
||||
Python ${WRAP_PYTHON_VERSION}
|
||||
COMPONENTS Interpreter Development
|
||||
EXACT REQUIRED)
|
||||
endif()
|
||||
|
||||
endmacro()
|
|
@ -1,32 +1,5 @@
|
|||
# Unset these cached variables to avoid surprises when the python in the current
|
||||
# environment are different from the cached!
|
||||
unset(PYTHON_EXECUTABLE CACHE)
|
||||
unset(PYTHON_INCLUDE_DIR CACHE)
|
||||
unset(PYTHON_MAJOR_VERSION CACHE)
|
||||
|
||||
# Allow override from command line
|
||||
if(NOT DEFINED WRAP_USE_CUSTOM_PYTHON_LIBRARY)
|
||||
if(WRAP_PYTHON_VERSION STREQUAL "Default")
|
||||
find_package(PythonInterp REQUIRED)
|
||||
find_package(PythonLibs REQUIRED)
|
||||
else()
|
||||
find_package(PythonInterp
|
||||
${WRAP_PYTHON_VERSION}
|
||||
EXACT
|
||||
REQUIRED)
|
||||
find_package(PythonLibs
|
||||
${WRAP_PYTHON_VERSION}
|
||||
EXACT
|
||||
REQUIRED)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(DIR_OF_WRAP_PYBIND_CMAKE ${CMAKE_CURRENT_LIST_DIR})
|
||||
|
||||
set(PYBIND11_PYTHON_VERSION ${WRAP_PYTHON_VERSION})
|
||||
|
||||
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/../pybind11 pybind11)
|
||||
|
||||
# User-friendly Pybind11 wrapping and installing function.
|
||||
# Builds a Pybind11 module from the provided interface_header.
|
||||
# For example, for the interface header gtsam.h, this will
|
||||
|
@ -65,7 +38,7 @@ function(pybind_wrap
|
|||
|
||||
add_custom_command(OUTPUT ${generated_cpp}
|
||||
COMMAND ${PYTHON_EXECUTABLE}
|
||||
${CMAKE_SOURCE_DIR}/wrap/pybind_wrapper.py
|
||||
${PYBIND_WRAP_SCRIPT}
|
||||
--src
|
||||
${interface_header}
|
||||
--out
|
||||
|
@ -89,9 +62,9 @@ function(pybind_wrap
|
|||
# ~~~
|
||||
add_custom_command(OUTPUT ${generated_cpp}
|
||||
DEPENDS ${interface_header}
|
||||
${CMAKE_SOURCE_DIR}/wrap/interface_parser.py
|
||||
${CMAKE_SOURCE_DIR}/wrap/pybind_wrapper.py
|
||||
${CMAKE_SOURCE_DIR}/wrap/template_instantiator.py
|
||||
# @GTWRAP_SOURCE_DIR@/gtwrap/interface_parser.py
|
||||
# @GTWRAP_SOURCE_DIR@/gtwrap/pybind_wrapper.py
|
||||
# @GTWRAP_SOURCE_DIR@/gtwrap/template_instantiator.py
|
||||
APPEND)
|
||||
|
||||
pybind11_add_module(${target} ${generated_cpp})
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
# This config file modifies CMAKE_MODULE_PATH so that the wrap cmake files may
|
||||
# be included This file also allows the use of `find_package(gtwrap)` in CMake.
|
||||
|
||||
set(GTWRAP_DIR "${CMAKE_CURRENT_LIST_DIR}")
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}")
|
||||
|
||||
if(WIN32 AND NOT CYGWIN)
|
||||
set(SCRIPT_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/CMake")
|
||||
else()
|
||||
set(SCRIPT_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/lib/cmake")
|
||||
endif()
|
||||
|
||||
# Standard includes
|
||||
include(GNUInstallDirs)
|
||||
include(CMakePackageConfigHelpers)
|
||||
include(CMakeDependentOption)
|
||||
|
||||
# Load all the CMake scripts from the standard location
|
||||
include(${SCRIPT_INSTALL_DIR}/gtwrap/PybindWrap.cmake)
|
||||
include(${SCRIPT_INSTALL_DIR}/gtwrap/GtwrapUtils.cmake)
|
||||
|
||||
# Set the variables for the wrapping scripts to be used in the build.
|
||||
set(PYBIND_WRAP_SCRIPT "${CMAKE_INSTALL_FULL_BINDIR}/pybind_wrap.py")
|
||||
set(MATLAB_WRAP_SCRIPT "${CMAKE_INSTALL_FULL_BINDIR}/matlab_wrap.py")
|
||||
|
||||
# Load the pybind11 code from the library installation path
|
||||
add_subdirectory(${CMAKE_INSTALL_FULL_LIBDIR}/pybind11 pybind11)
|
|
@ -2,8 +2,8 @@ import os
|
|||
import argparse
|
||||
import textwrap
|
||||
|
||||
import interface_parser as parser
|
||||
import template_instantiator as instantiator
|
||||
import gtwrap.interface_parser as parser
|
||||
import gtwrap.template_instantiator as instantiator
|
||||
|
||||
from functools import reduce
|
||||
from functools import partial
|
||||
|
@ -1666,7 +1666,7 @@ class MatlabWrapper(object):
|
|||
return self.content
|
||||
|
||||
|
||||
def _generate_content(cc_content, path, verbose=False):
|
||||
def generate_content(cc_content, path, verbose=False):
|
||||
"""Generate files and folders from matlab wrapper content.
|
||||
|
||||
Keyword arguments:
|
||||
|
@ -1698,7 +1698,7 @@ def _generate_content(cc_content, path, verbose=False):
|
|||
for sub_content in c:
|
||||
import sys
|
||||
_debug("sub object: {}".format(sub_content[1][0][0]))
|
||||
_generate_content(sub_content[1], path_to_folder)
|
||||
generate_content(sub_content[1], path_to_folder)
|
||||
elif type(c[1]) == list:
|
||||
path_to_folder = path + '/' + c[0]
|
||||
|
||||
|
@ -1726,53 +1726,3 @@ def _generate_content(cc_content, path, verbose=False):
|
|||
|
||||
with open(path_to_file, 'w') as f:
|
||||
f.write(c[1])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
arg_parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
||||
arg_parser.add_argument("--src", type=str, required=True, help="Input interface .h file.")
|
||||
arg_parser.add_argument("--module_name", type=str, required=True, help="Name of the C++ class being wrapped.")
|
||||
arg_parser.add_argument("--out", type=str, required=True, help="Name of the output folder.")
|
||||
arg_parser.add_argument("--top_module_namespaces",
|
||||
type=str,
|
||||
default="",
|
||||
help="C++ namespace for the top module, e.g. `ns1::ns2::ns3`. "
|
||||
"Only the content within this namespace and its sub-namespaces "
|
||||
"will be wrapped. The content of this namespace will be available at "
|
||||
"the top module level, and its sub-namespaces' in the submodules.\n"
|
||||
"For example, `import <module_name>` gives you access to a Python "
|
||||
"`<module_name>.Class` of the corresponding C++ `ns1::ns2::ns3::Class`"
|
||||
", and `from <module_name> import ns4` gives you access to a Python "
|
||||
"`ns4.Class` of the C++ `ns1::ns2::ns3::ns4::Class`. ")
|
||||
arg_parser.add_argument("--ignore",
|
||||
nargs='*',
|
||||
type=str,
|
||||
help="A space-separated list of classes to ignore. "
|
||||
"Class names must include their full namespaces.")
|
||||
args = arg_parser.parse_args()
|
||||
|
||||
top_module_namespaces = args.top_module_namespaces.split("::")
|
||||
if top_module_namespaces[0]:
|
||||
top_module_namespaces = [''] + top_module_namespaces
|
||||
|
||||
with open(args.src, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
if not os.path.exists(args.src):
|
||||
os.mkdir(args.src)
|
||||
|
||||
module = parser.Module.parseString(content)
|
||||
|
||||
instantiator.instantiate_namespace_inplace(module)
|
||||
|
||||
import sys
|
||||
|
||||
print("Ignoring classes: {}".format(args.ignore), file=sys.stderr)
|
||||
wrapper = MatlabWrapper(module=module,
|
||||
module_name=args.module_name,
|
||||
top_module_namespace=top_module_namespaces,
|
||||
ignore_classes=args.ignore)
|
||||
|
||||
cc_content = wrapper.wrap()
|
||||
|
||||
_generate_content(cc_content, args.out)
|
|
@ -9,12 +9,11 @@ See LICENSE for the license information
|
|||
Code generator for wrapping a C++ module with Pybind11
|
||||
Author: Duy Nguyen Ta, Fan Jiang, Matthew Sklar and Frank Dellaert
|
||||
"""
|
||||
import argparse
|
||||
import re
|
||||
import textwrap
|
||||
|
||||
import interface_parser as parser
|
||||
import template_instantiator as instantiator
|
||||
import gtwrap.interface_parser as parser
|
||||
import gtwrap.template_instantiator as instantiator
|
||||
|
||||
|
||||
class PybindWrapper(object):
|
||||
|
@ -319,76 +318,3 @@ class PybindWrapper(object):
|
|||
wrapped_namespace=wrapped_namespace,
|
||||
boost_class_export=boost_class_export,
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
arg_parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
||||
arg_parser.add_argument("--src", type=str, required=True, help="Input interface .h file")
|
||||
arg_parser.add_argument(
|
||||
"--module_name",
|
||||
type=str,
|
||||
required=True,
|
||||
help="Name of the Python module to be generated and "
|
||||
"used in the Python `import` statement.",
|
||||
)
|
||||
arg_parser.add_argument(
|
||||
"--out",
|
||||
type=str,
|
||||
required=True,
|
||||
help="Name of the output pybind .cc file",
|
||||
)
|
||||
arg_parser.add_argument(
|
||||
"--use-boost",
|
||||
action="store_true",
|
||||
help="using boost's shared_ptr instead of std's",
|
||||
)
|
||||
arg_parser.add_argument(
|
||||
"--top_module_namespaces",
|
||||
type=str,
|
||||
default="",
|
||||
help="C++ namespace for the top module, e.g. `ns1::ns2::ns3`. "
|
||||
"Only the content within this namespace and its sub-namespaces "
|
||||
"will be wrapped. The content of this namespace will be available at "
|
||||
"the top module level, and its sub-namespaces' in the submodules.\n"
|
||||
"For example, `import <module_name>` gives you access to a Python "
|
||||
"`<module_name>.Class` of the corresponding C++ `ns1::ns2::ns3::Class`"
|
||||
"and `from <module_name> import ns4` gives you access to a Python "
|
||||
"`ns4.Class` of the C++ `ns1::ns2::ns3::ns4::Class`. ",
|
||||
)
|
||||
arg_parser.add_argument(
|
||||
"--ignore",
|
||||
nargs='*',
|
||||
type=str,
|
||||
help="A space-separated list of classes to ignore. "
|
||||
"Class names must include their full namespaces.",
|
||||
)
|
||||
arg_parser.add_argument("--template", type=str, help="The module template file")
|
||||
args = arg_parser.parse_args()
|
||||
|
||||
top_module_namespaces = args.top_module_namespaces.split("::")
|
||||
if top_module_namespaces[0]:
|
||||
top_module_namespaces = [''] + top_module_namespaces
|
||||
|
||||
with open(args.src, "r") as f:
|
||||
content = f.read()
|
||||
module = parser.Module.parseString(content)
|
||||
instantiator.instantiate_namespace_inplace(module)
|
||||
|
||||
with open(args.template, "r") as f:
|
||||
template_content = f.read()
|
||||
wrapper = PybindWrapper(
|
||||
module=module,
|
||||
module_name=args.module_name,
|
||||
use_boost=args.use_boost,
|
||||
top_module_namespaces=top_module_namespaces,
|
||||
ignore_classes=args.ignore,
|
||||
module_template=template_content,
|
||||
)
|
||||
|
||||
cc_content = wrapper.wrap()
|
||||
with open(args.out, "w") as f:
|
||||
f.write(cc_content)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -1,4 +1,4 @@
|
|||
import interface_parser as parser
|
||||
import gtwrap.interface_parser as parser
|
||||
|
||||
|
||||
def instantiate_type(ctype, template_typenames, instantiations, cpp_typename, instantiated_class=None):
|
|
@ -0,0 +1,68 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Helper script to wrap C++ to Matlab.
|
||||
This script is installed via CMake to the user's binary directory
|
||||
and invoked during the wrapping by CMake.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import gtwrap.interface_parser as parser
|
||||
import gtwrap.template_instantiator as instantiator
|
||||
from gtwrap.matlab_wrapper import MatlabWrapper, generate_content
|
||||
|
||||
if __name__ == "__main__":
|
||||
arg_parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
||||
arg_parser.add_argument("--src", type=str, required=True,
|
||||
help="Input interface .h file.")
|
||||
arg_parser.add_argument("--module_name", type=str, required=True,
|
||||
help="Name of the C++ class being wrapped.")
|
||||
arg_parser.add_argument("--out", type=str, required=True,
|
||||
help="Name of the output folder.")
|
||||
arg_parser.add_argument(
|
||||
"--top_module_namespaces",
|
||||
type=str,
|
||||
default="",
|
||||
help="C++ namespace for the top module, e.g. `ns1::ns2::ns3`. "
|
||||
"Only the content within this namespace and its sub-namespaces "
|
||||
"will be wrapped. The content of this namespace will be available at "
|
||||
"the top module level, and its sub-namespaces' in the submodules.\n"
|
||||
"For example, `import <module_name>` gives you access to a Python "
|
||||
"`<module_name>.Class` of the corresponding C++ `ns1::ns2::ns3::Class`"
|
||||
", and `from <module_name> import ns4` gives you access to a Python "
|
||||
"`ns4.Class` of the C++ `ns1::ns2::ns3::ns4::Class`. ")
|
||||
arg_parser.add_argument("--ignore",
|
||||
nargs='*',
|
||||
type=str,
|
||||
help="A space-separated list of classes to ignore. "
|
||||
"Class names must include their full namespaces.")
|
||||
args = arg_parser.parse_args()
|
||||
|
||||
top_module_namespaces = args.top_module_namespaces.split("::")
|
||||
if top_module_namespaces[0]:
|
||||
top_module_namespaces = [''] + top_module_namespaces
|
||||
|
||||
with open(args.src, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
if not os.path.exists(args.src):
|
||||
os.mkdir(args.src)
|
||||
|
||||
module = parser.Module.parseString(content)
|
||||
|
||||
instantiator.instantiate_namespace_inplace(module)
|
||||
|
||||
import sys
|
||||
|
||||
print("Ignoring classes: {}".format(args.ignore), file=sys.stderr)
|
||||
wrapper = MatlabWrapper(module=module,
|
||||
module_name=args.module_name,
|
||||
top_module_namespace=top_module_namespaces,
|
||||
ignore_classes=args.ignore)
|
||||
|
||||
cc_content = wrapper.wrap()
|
||||
|
||||
generate_content(cc_content, args.out)
|
|
@ -0,0 +1,93 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Helper script to wrap C++ to Python with Pybind.
|
||||
This script is installed via CMake to the user's binary directory
|
||||
and invoked during the wrapping by CMake.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
|
||||
import gtwrap.interface_parser as parser
|
||||
import gtwrap.template_instantiator as instantiator
|
||||
from gtwrap.pybind_wrapper import PybindWrapper
|
||||
|
||||
|
||||
def main():
|
||||
"""Main runner."""
|
||||
arg_parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
||||
arg_parser.add_argument(
|
||||
"--src",
|
||||
type=str,
|
||||
required=True,
|
||||
help="Input interface .i/.h file")
|
||||
arg_parser.add_argument(
|
||||
"--module_name",
|
||||
type=str,
|
||||
required=True,
|
||||
help="Name of the Python module to be generated and "
|
||||
"used in the Python `import` statement.",
|
||||
)
|
||||
arg_parser.add_argument(
|
||||
"--out",
|
||||
type=str,
|
||||
required=True,
|
||||
help="Name of the output pybind .cc file",
|
||||
)
|
||||
arg_parser.add_argument(
|
||||
"--use-boost",
|
||||
action="store_true",
|
||||
help="using boost's shared_ptr instead of std's",
|
||||
)
|
||||
arg_parser.add_argument(
|
||||
"--top_module_namespaces",
|
||||
type=str,
|
||||
default="",
|
||||
help="C++ namespace for the top module, e.g. `ns1::ns2::ns3`. "
|
||||
"Only the content within this namespace and its sub-namespaces "
|
||||
"will be wrapped. The content of this namespace will be available at "
|
||||
"the top module level, and its sub-namespaces' in the submodules.\n"
|
||||
"For example, `import <module_name>` gives you access to a Python "
|
||||
"`<module_name>.Class` of the corresponding C++ `ns1::ns2::ns3::Class`"
|
||||
"and `from <module_name> import ns4` gives you access to a Python "
|
||||
"`ns4.Class` of the C++ `ns1::ns2::ns3::ns4::Class`. ",
|
||||
)
|
||||
arg_parser.add_argument(
|
||||
"--ignore",
|
||||
nargs='*',
|
||||
type=str,
|
||||
help="A space-separated list of classes to ignore. "
|
||||
"Class names must include their full namespaces.",
|
||||
)
|
||||
arg_parser.add_argument("--template", type=str,
|
||||
help="The module template file")
|
||||
args = arg_parser.parse_args()
|
||||
|
||||
top_module_namespaces = args.top_module_namespaces.split("::")
|
||||
if top_module_namespaces[0]:
|
||||
top_module_namespaces = [''] + top_module_namespaces
|
||||
|
||||
with open(args.src, "r") as f:
|
||||
content = f.read()
|
||||
module = parser.Module.parseString(content)
|
||||
instantiator.instantiate_namespace_inplace(module)
|
||||
|
||||
with open(args.template, "r") as f:
|
||||
template_content = f.read()
|
||||
wrapper = PybindWrapper(
|
||||
module=module,
|
||||
module_name=args.module_name,
|
||||
use_boost=args.use_boost,
|
||||
top_module_namespaces=top_module_namespaces,
|
||||
ignore_classes=args.ignore,
|
||||
module_template=template_content,
|
||||
)
|
||||
|
||||
cc_content = wrapper.wrap()
|
||||
with open(args.out, "w") as f:
|
||||
f.write(cc_content)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -0,0 +1,36 @@
|
|||
"""Setup file for the GTwrap package"""
|
||||
|
||||
try:
|
||||
from setuptools import find_packages, setup
|
||||
except ImportError:
|
||||
from distutils.core import find_packages, setup
|
||||
|
||||
packages = find_packages()
|
||||
|
||||
setup(
|
||||
name='gtwrap',
|
||||
description='Library to wrap C++ with Python and Matlab',
|
||||
version='1.1.0',
|
||||
author="Frank Dellaert et. al.",
|
||||
author_email="dellaert@gatech.edu",
|
||||
license='BSD',
|
||||
keywords="wrap, bindings, cpp, python",
|
||||
long_description=open("README.md").read(),
|
||||
long_description_content_type="text/markdown",
|
||||
python_requires=">=3.6",
|
||||
# https://pypi.org/classifiers
|
||||
classifiers=[
|
||||
'Development Status :: 4 - Beta',
|
||||
'Intended Audience :: Education',
|
||||
'Intended Audience :: Developers',
|
||||
'Intended Audience :: Science/Research',
|
||||
'Operating System :: MacOS',
|
||||
'Operating System :: Microsoft :: Windows',
|
||||
'Operating System :: POSIX',
|
||||
'Programming Language :: Python :: 3',
|
||||
'Topic :: Software Development :: Libraries'
|
||||
],
|
||||
packages=packages,
|
||||
platforms="any",
|
||||
install_requires=open("requirements.txt").readlines(),
|
||||
)
|
|
@ -4,7 +4,7 @@ import unittest
|
|||
import sys, os
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from interface_parser import *
|
||||
from gtwrap.interface_parser import *
|
||||
|
||||
|
||||
class TestPyparsing(unittest.TestCase):
|
||||
|
|
|
@ -10,9 +10,9 @@ import filecmp
|
|||
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
import template_instantiator as instantiator
|
||||
import interface_parser as parser
|
||||
from matlab_wrapper import MatlabWrapper
|
||||
import gtwrap.template_instantiator as instantiator
|
||||
import gtwrap.interface_parser as parser
|
||||
from gtwrap.matlab_wrapper import MatlabWrapper
|
||||
|
||||
|
||||
class TestWrap(unittest.TestCase):
|
||||
|
@ -20,7 +20,7 @@ class TestWrap(unittest.TestCase):
|
|||
MATLAB_TEST_DIR = TEST_DIR + "expected-matlab/"
|
||||
MATLAB_ACTUAL_DIR = TEST_DIR + "actual-matlab/"
|
||||
|
||||
def _generate_content(self, cc_content, path=''):
|
||||
def generate_content(self, cc_content, path=''):
|
||||
"""Generate files and folders from matlab wrapper content.
|
||||
|
||||
Keyword arguments:
|
||||
|
@ -48,7 +48,7 @@ class TestWrap(unittest.TestCase):
|
|||
for sub_content in c:
|
||||
import sys
|
||||
print("sub object: {}".format(sub_content[1][0][0]), file=sys.stderr)
|
||||
self._generate_content(sub_content[1], path_to_folder)
|
||||
self.generate_content(sub_content[1], path_to_folder)
|
||||
elif type(c[1]) == list:
|
||||
path_to_folder = path + '/' + c[0]
|
||||
|
||||
|
@ -104,7 +104,7 @@ class TestWrap(unittest.TestCase):
|
|||
|
||||
cc_content = wrapper.wrap()
|
||||
|
||||
self._generate_content(cc_content)
|
||||
self.generate_content(cc_content)
|
||||
|
||||
def compare_and_diff(file):
|
||||
output = self.MATLAB_ACTUAL_DIR + file
|
||||
|
|
|
@ -14,9 +14,9 @@ import os.path as path
|
|||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
sys.path.append(os.path.normpath(os.path.abspath(os.path.join(__file__, '../../../build/wrap'))))
|
||||
|
||||
from pybind_wrapper import PybindWrapper
|
||||
import interface_parser as parser
|
||||
import template_instantiator as instantiator
|
||||
from gtwrap.pybind_wrapper import PybindWrapper
|
||||
import gtwrap.interface_parser as parser
|
||||
import gtwrap.template_instantiator as instantiator
|
||||
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
|
|
Loading…
Reference in New Issue