Merge pull request #1407 from borglab/feature/deprecate_boost
commit
e9e794b7cd
|
@ -43,9 +43,9 @@ int main(const int argc, const char* argv[]) {
|
|||
auto priorModel = noiseModel::Diagonal::Variances(
|
||||
(Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished());
|
||||
Key firstKey = 0;
|
||||
for (const auto key_value : *initial) {
|
||||
for (const auto key : initial->keys()) {
|
||||
std::cout << "Adding prior to g2o file " << std::endl;
|
||||
firstKey = key_value.key;
|
||||
firstKey = key;
|
||||
graph->addPrior(firstKey, Pose3(), priorModel);
|
||||
break;
|
||||
}
|
||||
|
@ -74,10 +74,8 @@ int main(const int argc, const char* argv[]) {
|
|||
|
||||
// Calculate and print marginal covariances for all variables
|
||||
Marginals marginals(*graph, result);
|
||||
for (const auto key_value : result) {
|
||||
auto p = dynamic_cast<const GenericValue<Pose3>*>(&key_value.value);
|
||||
if (!p) continue;
|
||||
std::cout << marginals.marginalCovariance(key_value.key) << endl;
|
||||
for (const auto& key_pose : result.extract<Pose3>()) {
|
||||
std::cout << marginals.marginalCovariance(key_pose.first) << endl;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -55,14 +55,14 @@ int main(const int argc, const char *argv[]) {
|
|||
std::cout << "Rewriting input to file: " << inputFileRewritten << std::endl;
|
||||
// Additional: rewrite input with simplified keys 0,1,...
|
||||
Values simpleInitial;
|
||||
for(const auto key_value: *initial) {
|
||||
for (const auto k : initial->keys()) {
|
||||
Key key;
|
||||
if(add)
|
||||
key = key_value.key + firstKey;
|
||||
if (add)
|
||||
key = k + firstKey;
|
||||
else
|
||||
key = key_value.key - firstKey;
|
||||
key = k - firstKey;
|
||||
|
||||
simpleInitial.insert(key, initial->at(key_value.key));
|
||||
simpleInitial.insert(key, initial->at(k));
|
||||
}
|
||||
NonlinearFactorGraph simpleGraph;
|
||||
for(const boost::shared_ptr<NonlinearFactor>& factor: *graph) {
|
||||
|
|
|
@ -42,9 +42,9 @@ int main(const int argc, const char* argv[]) {
|
|||
auto priorModel = noiseModel::Diagonal::Variances(
|
||||
(Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished());
|
||||
Key firstKey = 0;
|
||||
for (const auto key_value : *initial) {
|
||||
for (const auto key : initial->keys()) {
|
||||
std::cout << "Adding prior to g2o file " << std::endl;
|
||||
firstKey = key_value.key;
|
||||
firstKey = key;
|
||||
graph->addPrior(firstKey, Pose3(), priorModel);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -42,9 +42,9 @@ int main(const int argc, const char* argv[]) {
|
|||
auto priorModel = noiseModel::Diagonal::Variances(
|
||||
(Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished());
|
||||
Key firstKey = 0;
|
||||
for (const auto key_value : *initial) {
|
||||
for (const auto key : initial->keys()) {
|
||||
std::cout << "Adding prior to g2o file " << std::endl;
|
||||
firstKey = key_value.key;
|
||||
firstKey = key;
|
||||
graph->addPrior(firstKey, Pose3(), priorModel);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -42,9 +42,9 @@ int main(const int argc, const char* argv[]) {
|
|||
auto priorModel = noiseModel::Diagonal::Variances(
|
||||
(Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished());
|
||||
Key firstKey = 0;
|
||||
for (const auto key_value : *initial) {
|
||||
for (const auto key : initial->keys()) {
|
||||
std::cout << "Adding prior to g2o file " << std::endl;
|
||||
firstKey = key_value.key;
|
||||
firstKey = key;
|
||||
graph->addPrior(firstKey, Pose3(), priorModel);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -559,12 +559,12 @@ void runPerturb()
|
|||
|
||||
// Perturb values
|
||||
VectorValues noise;
|
||||
for(const Values::KeyValuePair key_val: initial)
|
||||
for(const auto& key_dim: initial.dims())
|
||||
{
|
||||
Vector noisev(key_val.value.dim());
|
||||
Vector noisev(key_dim.second);
|
||||
for(Vector::Index i = 0; i < noisev.size(); ++i)
|
||||
noisev(i) = normal(rng);
|
||||
noise.insert(key_val.key, noisev);
|
||||
noise.insert(key_dim.first, noisev);
|
||||
}
|
||||
Values perturbed = initial.retract(noise);
|
||||
|
||||
|
|
|
@ -24,6 +24,12 @@
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
SymmetricBlockMatrix::SymmetricBlockMatrix() : blockStart_(0) {
|
||||
variableColOffsets_.push_back(0);
|
||||
assertInvariants();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
SymmetricBlockMatrix SymmetricBlockMatrix::LikeActiveViewOf(
|
||||
const SymmetricBlockMatrix& other) {
|
||||
|
@ -61,6 +67,18 @@ Matrix SymmetricBlockMatrix::block(DenseIndex I, DenseIndex J) const {
|
|||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void SymmetricBlockMatrix::negate() {
|
||||
full().triangularView<Eigen::Upper>() *= -1.0;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void SymmetricBlockMatrix::invertInPlace() {
|
||||
const auto identity = Matrix::Identity(rows(), rows());
|
||||
full().triangularView<Eigen::Upper>() =
|
||||
selfadjointView().llt().solve(identity).triangularView<Eigen::Upper>();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void SymmetricBlockMatrix::choleskyPartial(DenseIndex nFrontals) {
|
||||
gttic(VerticalBlockMatrix_choleskyPartial);
|
||||
|
|
|
@ -63,12 +63,7 @@ namespace gtsam {
|
|||
|
||||
public:
|
||||
/// Construct from an empty matrix (asserts that the matrix is empty)
|
||||
SymmetricBlockMatrix() :
|
||||
blockStart_(0)
|
||||
{
|
||||
variableColOffsets_.push_back(0);
|
||||
assertInvariants();
|
||||
}
|
||||
SymmetricBlockMatrix();
|
||||
|
||||
/// Construct from a container of the sizes of each block.
|
||||
template<typename CONTAINER>
|
||||
|
@ -265,19 +260,10 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/// Negate the entire active matrix.
|
||||
void negate() {
|
||||
full().triangularView<Eigen::Upper>() *= -1.0;
|
||||
}
|
||||
void negate();
|
||||
|
||||
/// Invert the entire active matrix in place.
|
||||
void invertInPlace() {
|
||||
const auto identity = Matrix::Identity(rows(), rows());
|
||||
full().triangularView<Eigen::Upper>() =
|
||||
selfadjointView()
|
||||
.llt()
|
||||
.solve(identity)
|
||||
.triangularView<Eigen::Upper>();
|
||||
}
|
||||
void invertInPlace();
|
||||
|
||||
/// @}
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ typedef std::vector<Point2, Eigen::aligned_allocator<Point2> > Point2Vector;
|
|||
|
||||
/// multiply with scalar
|
||||
inline Point2 operator*(double s, const Point2& p) {
|
||||
return p * s;
|
||||
return Point2(s * p.x(), s * p.y());
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -158,6 +158,12 @@ bool Pose3::equals(const Pose3& pose, double tol) const {
|
|||
return R_.equals(pose.R_, tol) && traits<Point3>::Equals(t_, pose.t_, tol);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Pose3 Pose3::interpolateRt(const Pose3& T, double t) const {
|
||||
return Pose3(interpolate<Rot3>(R_, T.R_, t),
|
||||
interpolate<Point3>(t_, T.t_, t));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/** Modified from Murray94book version (which assumes w and v normalized?) */
|
||||
Pose3 Pose3::Expmap(const Vector6& xi, OptionalJacobian<6, 6> Hxi) {
|
||||
|
|
|
@ -129,10 +129,7 @@ public:
|
|||
* @param T End point of interpolation.
|
||||
* @param t A value in [0, 1].
|
||||
*/
|
||||
Pose3 interpolateRt(const Pose3& T, double t) const {
|
||||
return Pose3(interpolate<Rot3>(R_, T.R_, t),
|
||||
interpolate<Point3>(t_, T.t_, t));
|
||||
}
|
||||
Pose3 interpolateRt(const Pose3& T, double t) const;
|
||||
|
||||
/// @}
|
||||
/// @name Lie Group
|
||||
|
|
|
@ -32,6 +32,14 @@ using namespace std;
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
Unit3::Unit3(const Vector3& p) : p_(p.normalized()) {}
|
||||
|
||||
Unit3::Unit3(double x, double y, double z) : p_(x, y, z) { p_.normalize(); }
|
||||
|
||||
Unit3::Unit3(const Point2& p, double f) : p_(p.x(), p.y(), f) {
|
||||
p_.normalize();
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
Unit3 Unit3::FromPoint3(const Point3& point, OptionalJacobian<2, 3> H) {
|
||||
// 3*3 Derivative of representation with respect to point is 3*3:
|
||||
|
|
|
@ -67,21 +67,14 @@ public:
|
|||
}
|
||||
|
||||
/// Construct from point
|
||||
explicit Unit3(const Vector3& p) :
|
||||
p_(p.normalized()) {
|
||||
}
|
||||
explicit Unit3(const Vector3& p);
|
||||
|
||||
/// Construct from x,y,z
|
||||
Unit3(double x, double y, double z) :
|
||||
p_(x, y, z) {
|
||||
p_.normalize();
|
||||
}
|
||||
Unit3(double x, double y, double z);
|
||||
|
||||
/// Construct from 2D point in plane at focal length f
|
||||
/// Unit3(p,1) can be viewed as normalized homogeneous coordinates of 2D point
|
||||
explicit Unit3(const Point2& p, double f) : p_(p.x(), p.y(), f) {
|
||||
p_.normalize();
|
||||
}
|
||||
explicit Unit3(const Point2& p, double f);
|
||||
|
||||
/// Copy constructor
|
||||
Unit3(const Unit3& u) {
|
||||
|
|
|
@ -102,7 +102,7 @@ class GTSAM_EXPORT HybridValues {
|
|||
|
||||
/// Check whether a variable with key \c j exists in values.
|
||||
bool existsNonlinear(Key j) {
|
||||
return (nonlinear_.find(j) != nonlinear_.end());
|
||||
return nonlinear_.exists(j);
|
||||
};
|
||||
|
||||
/// Check whether a variable with key \c j exists.
|
||||
|
|
|
@ -129,10 +129,7 @@ TEST(HybridBayesNet, evaluateHybrid) {
|
|||
TEST(HybridBayesNet, Choose) {
|
||||
Switching s(4);
|
||||
|
||||
Ordering ordering;
|
||||
for (auto&& kvp : s.linearizationPoint) {
|
||||
ordering += kvp.key;
|
||||
}
|
||||
const Ordering ordering(s.linearizationPoint.keys());
|
||||
|
||||
HybridBayesNet::shared_ptr hybridBayesNet;
|
||||
HybridGaussianFactorGraph::shared_ptr remainingFactorGraph;
|
||||
|
@ -163,10 +160,7 @@ TEST(HybridBayesNet, Choose) {
|
|||
TEST(HybridBayesNet, OptimizeAssignment) {
|
||||
Switching s(4);
|
||||
|
||||
Ordering ordering;
|
||||
for (auto&& kvp : s.linearizationPoint) {
|
||||
ordering += kvp.key;
|
||||
}
|
||||
const Ordering ordering(s.linearizationPoint.keys());
|
||||
|
||||
HybridBayesNet::shared_ptr hybridBayesNet;
|
||||
HybridGaussianFactorGraph::shared_ptr remainingFactorGraph;
|
||||
|
|
|
@ -40,6 +40,10 @@ Vector Base::weight(const Vector& error) const {
|
|||
return w;
|
||||
}
|
||||
|
||||
Vector Base::sqrtWeight(const Vector &error) const {
|
||||
return weight(error).cwiseSqrt();
|
||||
}
|
||||
|
||||
// The following three functions re-weight block matrices and a vector
|
||||
// according to their weight implementation
|
||||
|
||||
|
|
|
@ -115,9 +115,7 @@ class GTSAM_EXPORT Base {
|
|||
Vector weight(const Vector &error) const;
|
||||
|
||||
/** square root version of the weight function */
|
||||
Vector sqrtWeight(const Vector &error) const {
|
||||
return weight(error).cwiseSqrt();
|
||||
}
|
||||
Vector sqrtWeight(const Vector &error) const;
|
||||
|
||||
/** reweight block matrices and a vector according to their weight
|
||||
* implementation */
|
||||
|
|
|
@ -239,6 +239,8 @@ void Gaussian::WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const
|
|||
whitenInPlace(b);
|
||||
}
|
||||
|
||||
Matrix Gaussian::information() const { return R().transpose() * R(); }
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Diagonal
|
||||
/* ************************************************************************* */
|
||||
|
@ -284,6 +286,11 @@ Diagonal::shared_ptr Diagonal::Sigmas(const Vector& sigmas, bool smart) {
|
|||
full: return Diagonal::shared_ptr(new Diagonal(sigmas));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Diagonal::shared_ptr Diagonal::Precisions(const Vector& precisions,
|
||||
bool smart) {
|
||||
return Variances(precisions.array().inverse(), smart);
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
void Diagonal::print(const string& name) const {
|
||||
gtsam::print(sigmas_, name + "diagonal sigmas ");
|
||||
|
@ -294,22 +301,18 @@ Vector Diagonal::whiten(const Vector& v) const {
|
|||
return v.cwiseProduct(invsigmas_);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Vector Diagonal::unwhiten(const Vector& v) const {
|
||||
return v.cwiseProduct(sigmas_);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Matrix Diagonal::Whiten(const Matrix& H) const {
|
||||
return vector_scale(invsigmas(), H);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void Diagonal::WhitenInPlace(Matrix& H) const {
|
||||
vector_scale_inplace(invsigmas(), H);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void Diagonal::WhitenInPlace(Eigen::Block<Matrix> H) const {
|
||||
H = invsigmas().asDiagonal() * H;
|
||||
}
|
||||
|
@ -377,6 +380,32 @@ Vector Constrained::whiten(const Vector& v) const {
|
|||
return c;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Constrained::shared_ptr Constrained::MixedSigmas(const Vector& sigmas) {
|
||||
return MixedSigmas(Vector::Constant(sigmas.size(), 1000.0), sigmas);
|
||||
}
|
||||
|
||||
Constrained::shared_ptr Constrained::MixedSigmas(double m,
|
||||
const Vector& sigmas) {
|
||||
return MixedSigmas(Vector::Constant(sigmas.size(), m), sigmas);
|
||||
}
|
||||
|
||||
Constrained::shared_ptr Constrained::MixedVariances(const Vector& mu,
|
||||
const Vector& variances) {
|
||||
return shared_ptr(new Constrained(mu, variances.cwiseSqrt()));
|
||||
}
|
||||
Constrained::shared_ptr Constrained::MixedVariances(const Vector& variances) {
|
||||
return shared_ptr(new Constrained(variances.cwiseSqrt()));
|
||||
}
|
||||
|
||||
Constrained::shared_ptr Constrained::MixedPrecisions(const Vector& mu,
|
||||
const Vector& precisions) {
|
||||
return MixedVariances(mu, precisions.array().inverse());
|
||||
}
|
||||
Constrained::shared_ptr Constrained::MixedPrecisions(const Vector& precisions) {
|
||||
return MixedVariances(precisions.array().inverse());
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
double Constrained::squaredMahalanobisDistance(const Vector& v) const {
|
||||
Vector w = Diagonal::whiten(v); // get noisemodel for constrained elements
|
||||
|
@ -623,6 +652,11 @@ void Unit::print(const std::string& name) const {
|
|||
cout << name << "unit (" << dim_ << ") " << endl;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
double Unit::squaredMahalanobisDistance(const Vector& v) const {
|
||||
return v.dot(v);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Robust
|
||||
/* ************************************************************************* */
|
||||
|
@ -663,6 +697,13 @@ void Robust::WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const{
|
|||
robust_->reweight(A1,A2,A3,b);
|
||||
}
|
||||
|
||||
Vector Robust::unweightedWhiten(const Vector& v) const {
|
||||
return noise_->unweightedWhiten(v);
|
||||
}
|
||||
double Robust::weight(const Vector& v) const {
|
||||
return robust_->weight(v.norm());
|
||||
}
|
||||
|
||||
Robust::shared_ptr Robust::Create(
|
||||
const RobustModel::shared_ptr &robust, const NoiseModel::shared_ptr noise){
|
||||
return shared_ptr(new Robust(robust,noise));
|
||||
|
|
|
@ -255,7 +255,7 @@ namespace gtsam {
|
|||
virtual Matrix R() const { return thisR();}
|
||||
|
||||
/// Compute information matrix
|
||||
virtual Matrix information() const { return R().transpose() * R(); }
|
||||
virtual Matrix information() const;
|
||||
|
||||
/// Compute covariance matrix
|
||||
virtual Matrix covariance() const;
|
||||
|
@ -319,9 +319,7 @@ namespace gtsam {
|
|||
* A diagonal noise model created by specifying a Vector of precisions, i.e.
|
||||
* i.e. the diagonal of the information matrix, i.e., weights
|
||||
*/
|
||||
static shared_ptr Precisions(const Vector& precisions, bool smart = true) {
|
||||
return Variances(precisions.array().inverse(), smart);
|
||||
}
|
||||
static shared_ptr Precisions(const Vector& precisions, bool smart = true);
|
||||
|
||||
void print(const std::string& name) const override;
|
||||
Vector sigmas() const override { return sigmas_; }
|
||||
|
@ -426,39 +424,27 @@ namespace gtsam {
|
|||
* A diagonal noise model created by specifying a Vector of
|
||||
* standard devations, some of which might be zero
|
||||
*/
|
||||
static shared_ptr MixedSigmas(const Vector& sigmas) {
|
||||
return MixedSigmas(Vector::Constant(sigmas.size(), 1000.0), sigmas);
|
||||
}
|
||||
static shared_ptr MixedSigmas(const Vector& sigmas);
|
||||
|
||||
/**
|
||||
* A diagonal noise model created by specifying a Vector of
|
||||
* standard devations, some of which might be zero
|
||||
*/
|
||||
static shared_ptr MixedSigmas(double m, const Vector& sigmas) {
|
||||
return MixedSigmas(Vector::Constant(sigmas.size(), m), sigmas);
|
||||
}
|
||||
static shared_ptr MixedSigmas(double m, const Vector& sigmas);
|
||||
|
||||
/**
|
||||
* A diagonal noise model created by specifying a Vector of
|
||||
* standard devations, some of which might be zero
|
||||
*/
|
||||
static shared_ptr MixedVariances(const Vector& mu, const Vector& variances) {
|
||||
return shared_ptr(new Constrained(mu, variances.cwiseSqrt()));
|
||||
}
|
||||
static shared_ptr MixedVariances(const Vector& variances) {
|
||||
return shared_ptr(new Constrained(variances.cwiseSqrt()));
|
||||
}
|
||||
static shared_ptr MixedVariances(const Vector& mu, const Vector& variances);
|
||||
static shared_ptr MixedVariances(const Vector& variances);
|
||||
|
||||
/**
|
||||
* A diagonal noise model created by specifying a Vector of
|
||||
* precisions, some of which might be inf
|
||||
*/
|
||||
static shared_ptr MixedPrecisions(const Vector& mu, const Vector& precisions) {
|
||||
return MixedVariances(mu, precisions.array().inverse());
|
||||
}
|
||||
static shared_ptr MixedPrecisions(const Vector& precisions) {
|
||||
return MixedVariances(precisions.array().inverse());
|
||||
}
|
||||
static shared_ptr MixedPrecisions(const Vector& mu, const Vector& precisions);
|
||||
static shared_ptr MixedPrecisions(const Vector& precisions);
|
||||
|
||||
/**
|
||||
* The squaredMahalanobisDistance function for a constrained noisemodel,
|
||||
|
@ -616,7 +602,7 @@ namespace gtsam {
|
|||
bool isUnit() const override { return true; }
|
||||
|
||||
void print(const std::string& name) const override;
|
||||
double squaredMahalanobisDistance(const Vector& v) const override {return v.dot(v); }
|
||||
double squaredMahalanobisDistance(const Vector& v) const override;
|
||||
Vector whiten(const Vector& v) const override { return v; }
|
||||
Vector unwhiten(const Vector& v) const override { return v; }
|
||||
Matrix Whiten(const Matrix& H) const override { return H; }
|
||||
|
@ -710,12 +696,8 @@ namespace gtsam {
|
|||
void WhitenSystem(Matrix& A1, Matrix& A2, Vector& b) const override;
|
||||
void WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const override;
|
||||
|
||||
Vector unweightedWhiten(const Vector& v) const override {
|
||||
return noise_->unweightedWhiten(v);
|
||||
}
|
||||
double weight(const Vector& v) const override {
|
||||
return robust_->weight(v.norm());
|
||||
}
|
||||
Vector unweightedWhiten(const Vector& v) const override;
|
||||
double weight(const Vector& v) const override;
|
||||
|
||||
static shared_ptr Create(
|
||||
const RobustModel::shared_ptr &robust, const NoiseModel::shared_ptr noise);
|
||||
|
|
|
@ -136,6 +136,17 @@ void GaussianFactorGraphSystem::rightPrecondition(const Vector &x,
|
|||
preconditioner_.transposeSolve(x, y);
|
||||
}
|
||||
|
||||
/**********************************************************************************/
|
||||
void GaussianFactorGraphSystem::scal(const double alpha, Vector &x) const {
|
||||
x *= alpha;
|
||||
}
|
||||
double GaussianFactorGraphSystem::dot(const Vector &x, const Vector &y) const {
|
||||
return x.dot(y);
|
||||
}
|
||||
void GaussianFactorGraphSystem::axpy(const double alpha, const Vector &x,
|
||||
Vector &y) const {
|
||||
y += alpha * x;
|
||||
}
|
||||
/**********************************************************************************/
|
||||
VectorValues buildVectorValues(const Vector &v, const Ordering &ordering,
|
||||
const map<Key, size_t> & dimensions) {
|
||||
|
|
|
@ -102,15 +102,9 @@ public:
|
|||
void multiply(const Vector &x, Vector& y) const;
|
||||
void leftPrecondition(const Vector &x, Vector &y) const;
|
||||
void rightPrecondition(const Vector &x, Vector &y) const;
|
||||
inline void scal(const double alpha, Vector &x) const {
|
||||
x *= alpha;
|
||||
}
|
||||
inline double dot(const Vector &x, const Vector &y) const {
|
||||
return x.dot(y);
|
||||
}
|
||||
inline void axpy(const double alpha, const Vector &x, Vector &y) const {
|
||||
y += alpha * x;
|
||||
}
|
||||
void scal(const double alpha, Vector &x) const;
|
||||
double dot(const Vector &x, const Vector &y) const;
|
||||
void axpy(const double alpha, const Vector &x, Vector &y) const;
|
||||
|
||||
void getb(Vector &b) const;
|
||||
};
|
||||
|
|
|
@ -436,36 +436,6 @@ struct GTSAM_EXPORT UpdateImpl {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply expmap to the given values, but only for indices appearing in
|
||||
* \c mask. Values are expmapped in-place.
|
||||
* \param mask Mask on linear indices, only \c true entries are expmapped
|
||||
*/
|
||||
static void ExpmapMasked(const VectorValues& delta, const KeySet& mask,
|
||||
Values* theta) {
|
||||
gttic(ExpmapMasked);
|
||||
assert(theta->size() == delta.size());
|
||||
Values::iterator key_value;
|
||||
VectorValues::const_iterator key_delta;
|
||||
#ifdef GTSAM_USE_TBB
|
||||
for (key_value = theta->begin(); key_value != theta->end(); ++key_value) {
|
||||
key_delta = delta.find(key_value->key);
|
||||
#else
|
||||
for (key_value = theta->begin(), key_delta = delta.begin();
|
||||
key_value != theta->end(); ++key_value, ++key_delta) {
|
||||
assert(key_value->key == key_delta->first);
|
||||
#endif
|
||||
Key var = key_value->key;
|
||||
assert(static_cast<size_t>(delta[var].size()) == key_value->value.dim());
|
||||
assert(delta[var].allFinite());
|
||||
if (mask.exists(var)) {
|
||||
Value* retracted = key_value->value.retract_(delta[var]);
|
||||
key_value->value = *retracted;
|
||||
retracted->deallocate_();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Linearize new factors
|
||||
void linearizeNewFactors(const NonlinearFactorGraph& newFactors,
|
||||
const Values& theta, size_t numNonlinearFactors,
|
||||
|
|
|
@ -454,7 +454,7 @@ ISAM2Result ISAM2::update(const NonlinearFactorGraph& newFactors,
|
|||
update.findFluid(roots_, relinKeys, &result.markedKeys, result.details());
|
||||
// 6. Update linearization point for marked variables:
|
||||
// \Theta_{J}:=\Theta_{J}+\Delta_{J}.
|
||||
UpdateImpl::ExpmapMasked(delta_, relinKeys, &theta_);
|
||||
theta_.retractMasked(delta_, relinKeys);
|
||||
}
|
||||
result.variablesRelinearized = result.markedKeys.size();
|
||||
}
|
||||
|
|
|
@ -285,8 +285,8 @@ static Scatter scatterFromValues(const Values& values) {
|
|||
scatter.reserve(values.size());
|
||||
|
||||
// use "natural" ordering with keys taken from the initial values
|
||||
for (const auto key_value : values) {
|
||||
scatter.add(key_value.key, key_value.value.dim());
|
||||
for (const auto& key_dim : values.dims()) {
|
||||
scatter.add(key_dim.first, key_dim.second);
|
||||
}
|
||||
|
||||
return scatter;
|
||||
|
|
|
@ -25,10 +25,8 @@
|
|||
#pragma once
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include <boost/bind/bind.hpp>
|
||||
|
||||
#include <gtsam/nonlinear/Values.h> // Only so Eclipse finds class definition
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
|
@ -95,13 +93,13 @@ namespace gtsam {
|
|||
std::map<Key, ValueType>
|
||||
Values::extract(const std::function<bool(Key)>& filterFcn) const {
|
||||
std::map<Key, ValueType> result;
|
||||
for (const auto& key_value : *this) {
|
||||
for (const auto& key_value : values_) {
|
||||
// Check if key matches
|
||||
if (filterFcn(key_value.key)) {
|
||||
if (filterFcn(key_value.first)) {
|
||||
// Check if type matches (typically does as symbols matched with types)
|
||||
if (auto t =
|
||||
dynamic_cast<const GenericValue<ValueType>*>(&key_value.value))
|
||||
result[key_value.key] = t->value();
|
||||
dynamic_cast<const GenericValue<ValueType>*>(key_value.second))
|
||||
result[key_value.first] = t->value();
|
||||
}
|
||||
}
|
||||
return result;
|
||||
|
@ -109,6 +107,8 @@ namespace gtsam {
|
|||
|
||||
/* ************************************************************************* */
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42
|
||||
#include <boost/bind/bind.hpp>
|
||||
|
||||
template<class ValueType>
|
||||
class Values::Filtered {
|
||||
public:
|
||||
|
|
|
@ -25,8 +25,6 @@
|
|||
#include <gtsam/nonlinear/Values.h>
|
||||
#include <gtsam/linear/VectorValues.h>
|
||||
|
||||
#include <boost/iterator/transform_iterator.hpp>
|
||||
|
||||
#include <list>
|
||||
#include <memory>
|
||||
#include <sstream>
|
||||
|
@ -52,15 +50,15 @@ namespace gtsam {
|
|||
|
||||
/* ************************************************************************* */
|
||||
Values::Values(const Values& other, const VectorValues& delta) {
|
||||
for (const_iterator key_value = other.begin(); key_value != other.end(); ++key_value) {
|
||||
VectorValues::const_iterator it = delta.find(key_value->key);
|
||||
Key key = key_value->key; // Non-const duplicate to deal with non-const insert argument
|
||||
for (const auto& key_value : other.values_) {
|
||||
VectorValues::const_iterator it = delta.find(key_value.first);
|
||||
Key key = key_value.first; // Non-const duplicate to deal with non-const insert argument
|
||||
if (it != delta.end()) {
|
||||
const Vector& v = it->second;
|
||||
Value* retractedValue(key_value->value.retract_(v)); // Retract
|
||||
Value* retractedValue(key_value.second->retract_(v)); // Retract
|
||||
values_.insert(key, retractedValue); // Add retracted result directly to result values
|
||||
} else {
|
||||
values_.insert(key, key_value->value.clone_()); // Add original version to result values
|
||||
values_.insert(key, key_value.second->clone_()); // Add original version to result values
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -69,9 +67,9 @@ namespace gtsam {
|
|||
void Values::print(const string& str, const KeyFormatter& keyFormatter) const {
|
||||
cout << str << (str.empty() ? "" : "\n");
|
||||
cout << "Values with " << size() << " values:\n";
|
||||
for(const_iterator key_value = begin(); key_value != end(); ++key_value) {
|
||||
cout << "Value " << keyFormatter(key_value->key) << ": ";
|
||||
key_value->value.print("");
|
||||
for (const auto& key_value : values_) {
|
||||
cout << "Value " << keyFormatter(key_value.first) << ": ";
|
||||
key_value.second->print("");
|
||||
cout << "\n";
|
||||
}
|
||||
}
|
||||
|
@ -80,12 +78,12 @@ namespace gtsam {
|
|||
bool Values::equals(const Values& other, double tol) const {
|
||||
if (this->size() != other.size())
|
||||
return false;
|
||||
for (const_iterator it1 = this->begin(), it2 = other.begin();
|
||||
it1 != this->end(); ++it1, ++it2) {
|
||||
const Value& value1 = it1->value;
|
||||
const Value& value2 = it2->value;
|
||||
if (typeid(value1) != typeid(value2) || it1->key != it2->key
|
||||
|| !value1.equals_(value2, tol)) {
|
||||
for (auto it1 = values_.begin(), it2 = other.values_.begin();
|
||||
it1 != values_.end(); ++it1, ++it2) {
|
||||
const Value* value1 = it1->second;
|
||||
const Value* value2 = it2->second;
|
||||
if (typeid(*value1) != typeid(*value2) || it1->first != it2->first
|
||||
|| !value1->equals_(*value2, tol)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -102,17 +100,44 @@ namespace gtsam {
|
|||
return Values(*this, delta);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void Values::retractMasked(const VectorValues& delta, const KeySet& mask) {
|
||||
gttic(retractMasked);
|
||||
assert(this->size() == delta.size());
|
||||
auto key_value = values_.begin();
|
||||
VectorValues::const_iterator key_delta;
|
||||
#ifdef GTSAM_USE_TBB
|
||||
for (; key_value != values_.end(); ++key_value) {
|
||||
key_delta = delta.find(key_value->first);
|
||||
#else
|
||||
for (key_delta = delta.begin(); key_value != values_.end();
|
||||
++key_value, ++key_delta) {
|
||||
assert(key_value->first == key_delta->first);
|
||||
#endif
|
||||
Key var = key_value->first;
|
||||
assert(static_cast<size_t>(delta[var].size()) == key_value->second->dim());
|
||||
assert(delta[var].allFinite());
|
||||
if (mask.exists(var)) {
|
||||
Value* retracted = key_value->second->retract_(delta[var]);
|
||||
// TODO(dellaert): can we use std::move here?
|
||||
*(key_value->second) = *retracted;
|
||||
retracted->deallocate_();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues Values::localCoordinates(const Values& cp) const {
|
||||
if(this->size() != cp.size())
|
||||
throw DynamicValuesMismatched();
|
||||
VectorValues result;
|
||||
for(const_iterator it1=this->begin(), it2=cp.begin(); it1!=this->end(); ++it1, ++it2) {
|
||||
if(it1->key != it2->key)
|
||||
for (auto it1 = values_.begin(), it2 = cp.values_.begin();
|
||||
it1 != values_.end(); ++it1, ++it2) {
|
||||
if(it1->first != it2->first)
|
||||
throw DynamicValuesMismatched(); // If keys do not match
|
||||
// Will throw a dynamic_cast exception if types do not match
|
||||
// NOTE: this is separate from localCoordinates(cp, ordering, result) due to at() vs. insert
|
||||
result.insert(it1->key, it1->value.localCoordinates_(it2->value));
|
||||
result.insert(it1->first, it1->second->localCoordinates_(*it2->second));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -130,24 +155,26 @@ namespace gtsam {
|
|||
|
||||
/* ************************************************************************* */
|
||||
void Values::insert(Key j, const Value& val) {
|
||||
std::pair<iterator,bool> insertResult = tryInsert(j, val);
|
||||
auto insertResult = values_.insert(j, val.clone_());
|
||||
if(!insertResult.second)
|
||||
throw ValuesKeyAlreadyExists(j);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void Values::insert(const Values& values) {
|
||||
for(const_iterator key_value = values.begin(); key_value != values.end(); ++key_value) {
|
||||
Key key = key_value->key; // Non-const duplicate to deal with non-const insert argument
|
||||
insert(key, key_value->value);
|
||||
void Values::insert(const Values& other) {
|
||||
for (auto key_value = other.values_.begin();
|
||||
key_value != other.values_.end(); ++key_value) {
|
||||
insert(key_value->first, *(key_value->second));
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42
|
||||
std::pair<Values::iterator, bool> Values::tryInsert(Key j, const Value& value) {
|
||||
std::pair<KeyValueMap::iterator, bool> result = values_.insert(j, value.clone_());
|
||||
return std::make_pair(boost::make_transform_iterator(result.first, &make_deref_pair), result.second);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* ************************************************************************* */
|
||||
void Values::update(Key j, const Value& val) {
|
||||
|
@ -165,9 +192,10 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void Values::update(const Values& values) {
|
||||
for(const_iterator key_value = values.begin(); key_value != values.end(); ++key_value) {
|
||||
this->update(key_value->key, key_value->value);
|
||||
void Values::update(const Values& other) {
|
||||
for (auto key_value = other.values_.begin();
|
||||
key_value != other.values_.end(); ++key_value) {
|
||||
this->update(key_value->first, *(key_value->second));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -183,10 +211,10 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/* ************************************************************************ */
|
||||
void Values::insert_or_assign(const Values& values) {
|
||||
for (const_iterator key_value = values.begin(); key_value != values.end();
|
||||
++key_value) {
|
||||
this->insert_or_assign(key_value->key, key_value->value);
|
||||
void Values::insert_or_assign(const Values& other) {
|
||||
for (auto key_value = other.values_.begin();
|
||||
key_value != other.values_.end(); ++key_value) {
|
||||
this->insert_or_assign(key_value->first, *(key_value->second));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -202,8 +230,16 @@ namespace gtsam {
|
|||
KeyVector Values::keys() const {
|
||||
KeyVector result;
|
||||
result.reserve(size());
|
||||
for(const_iterator key_value = begin(); key_value != end(); ++key_value)
|
||||
result.push_back(key_value->key);
|
||||
for(const auto& key_value: values_)
|
||||
result.push_back(key_value.first);
|
||||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
KeySet Values::keySet() const {
|
||||
KeySet result;
|
||||
for(const auto& key_value: values_)
|
||||
result.insert(key_value.first);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -217,8 +253,17 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
size_t Values::dim() const {
|
||||
size_t result = 0;
|
||||
for(const auto key_value: *this) {
|
||||
result += key_value.value.dim();
|
||||
for (const auto key_value : values_) {
|
||||
result += key_value->second->dim();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
std::map<Key,size_t> Values::dims() const {
|
||||
std::map<Key,size_t> result;
|
||||
for (const auto key_value : values_) {
|
||||
result.emplace(key_value->first, key_value->second->dim());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -226,8 +271,8 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
VectorValues Values::zeroVectors() const {
|
||||
VectorValues result;
|
||||
for(const auto key_value: *this)
|
||||
result.insert(key_value.key, Vector::Zero(key_value.value.dim()));
|
||||
for (const auto key_value : values_)
|
||||
result.insert(key_value->first, Vector::Zero(key_value->second->dim()));
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -28,10 +28,10 @@
|
|||
#include <gtsam/base/GenericValue.h>
|
||||
#include <gtsam/base/VectorSpace.h>
|
||||
#include <gtsam/inference/Key.h>
|
||||
#include <boost/iterator/transform_iterator.hpp>
|
||||
#include <boost/ptr_container/serialize_ptr_map.hpp>
|
||||
#include <boost/shared_ptr.hpp>
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42
|
||||
#include <boost/iterator/transform_iterator.hpp>
|
||||
#include <boost/iterator/filter_iterator.hpp>
|
||||
#endif
|
||||
|
||||
|
@ -81,10 +81,6 @@ namespace gtsam {
|
|||
// The member to store the values, see just above
|
||||
KeyValueMap values_;
|
||||
|
||||
// Types obtained by iterating
|
||||
typedef KeyValueMap::const_iterator::value_type ConstKeyValuePtrPair;
|
||||
typedef KeyValueMap::iterator::value_type KeyValuePtrPair;
|
||||
|
||||
public:
|
||||
|
||||
/// A shared_ptr to this class
|
||||
|
@ -110,22 +106,6 @@ namespace gtsam {
|
|||
ConstKeyValuePair(const KeyValuePair& kv) : key(kv.key), value(kv.value) {}
|
||||
};
|
||||
|
||||
/// Mutable forward iterator, with value type KeyValuePair
|
||||
typedef boost::transform_iterator<
|
||||
std::function<KeyValuePair(const KeyValuePtrPair&)>, KeyValueMap::iterator> iterator;
|
||||
|
||||
/// Const forward iterator, with value type ConstKeyValuePair
|
||||
typedef boost::transform_iterator<
|
||||
std::function<ConstKeyValuePair(const ConstKeyValuePtrPair&)>, KeyValueMap::const_iterator> const_iterator;
|
||||
|
||||
/// Mutable reverse iterator, with value type KeyValuePair
|
||||
typedef boost::transform_iterator<
|
||||
std::function<KeyValuePair(const KeyValuePtrPair&)>, KeyValueMap::reverse_iterator> reverse_iterator;
|
||||
|
||||
/// Const reverse iterator, with value type ConstKeyValuePair
|
||||
typedef boost::transform_iterator<
|
||||
std::function<ConstKeyValuePair(const ConstKeyValuePtrPair&)>, KeyValueMap::const_reverse_iterator> const_reverse_iterator;
|
||||
|
||||
typedef KeyValuePair value_type;
|
||||
|
||||
/** Default constructor creates an empty Values class */
|
||||
|
@ -191,47 +171,24 @@ namespace gtsam {
|
|||
template<typename ValueType>
|
||||
boost::optional<const ValueType&> exists(Key j) const;
|
||||
|
||||
/** Find an element by key, returning an iterator, or end() if the key was
|
||||
* not found. */
|
||||
iterator find(Key j) { return boost::make_transform_iterator(values_.find(j), &make_deref_pair); }
|
||||
|
||||
/** Find an element by key, returning an iterator, or end() if the key was
|
||||
* not found. */
|
||||
const_iterator find(Key j) const { return boost::make_transform_iterator(values_.find(j), &make_const_deref_pair); }
|
||||
|
||||
/** Find the element greater than or equal to the specified key. */
|
||||
iterator lower_bound(Key j) { return boost::make_transform_iterator(values_.lower_bound(j), &make_deref_pair); }
|
||||
|
||||
/** Find the element greater than or equal to the specified key. */
|
||||
const_iterator lower_bound(Key j) const { return boost::make_transform_iterator(values_.lower_bound(j), &make_const_deref_pair); }
|
||||
|
||||
/** Find the lowest-ordered element greater than the specified key. */
|
||||
iterator upper_bound(Key j) { return boost::make_transform_iterator(values_.upper_bound(j), &make_deref_pair); }
|
||||
|
||||
/** Find the lowest-ordered element greater than the specified key. */
|
||||
const_iterator upper_bound(Key j) const { return boost::make_transform_iterator(values_.upper_bound(j), &make_const_deref_pair); }
|
||||
|
||||
/** The number of variables in this config */
|
||||
size_t size() const { return values_.size(); }
|
||||
|
||||
/** whether the config is empty */
|
||||
bool empty() const { return values_.empty(); }
|
||||
|
||||
const_iterator begin() const { return boost::make_transform_iterator(values_.begin(), &make_const_deref_pair); }
|
||||
const_iterator end() const { return boost::make_transform_iterator(values_.end(), &make_const_deref_pair); }
|
||||
iterator begin() { return boost::make_transform_iterator(values_.begin(), &make_deref_pair); }
|
||||
iterator end() { return boost::make_transform_iterator(values_.end(), &make_deref_pair); }
|
||||
const_reverse_iterator rbegin() const { return boost::make_transform_iterator(values_.rbegin(), &make_const_deref_pair); }
|
||||
const_reverse_iterator rend() const { return boost::make_transform_iterator(values_.rend(), &make_const_deref_pair); }
|
||||
reverse_iterator rbegin() { return boost::make_transform_iterator(values_.rbegin(), &make_deref_pair); }
|
||||
reverse_iterator rend() { return boost::make_transform_iterator(values_.rend(), &make_deref_pair); }
|
||||
|
||||
/// @name Manifold Operations
|
||||
/// @{
|
||||
|
||||
/** Add a delta config to current config and returns a new config */
|
||||
Values retract(const VectorValues& delta) const;
|
||||
|
||||
/**
|
||||
* Retract, but only for Keys appearing in \c mask. In-place.
|
||||
* \param mask Mask on Keys where to apply retract.
|
||||
*/
|
||||
void retractMasked(const VectorValues& delta, const KeySet& mask);
|
||||
|
||||
/** Get a delta config about a linearization point c0 (*this) */
|
||||
VectorValues localCoordinates(const Values& cp) const;
|
||||
|
||||
|
@ -252,12 +209,6 @@ namespace gtsam {
|
|||
/// version for double
|
||||
void insertDouble(Key j, double c) { insert<double>(j,c); }
|
||||
|
||||
/** insert that mimics the STL map insert - if the value already exists, the map is not modified
|
||||
* and an iterator to the existing value is returned, along with 'false'. If the value did not
|
||||
* exist, it is inserted and an iterator pointing to the new element, along with 'true', is
|
||||
* returned. */
|
||||
std::pair<iterator, bool> tryInsert(Key j, const Value& value);
|
||||
|
||||
/** single element change of existing element */
|
||||
void update(Key j, const Value& val);
|
||||
|
||||
|
@ -288,11 +239,16 @@ namespace gtsam {
|
|||
void erase(Key j);
|
||||
|
||||
/**
|
||||
* Returns a set of keys in the config
|
||||
* Returns a vector of keys in the config.
|
||||
* Note: by construction, the list is ordered
|
||||
*/
|
||||
KeyVector keys() const;
|
||||
|
||||
/**
|
||||
* Returns a set of keys in the config.
|
||||
*/
|
||||
KeySet keySet() const;
|
||||
|
||||
/** Replace all keys and variables */
|
||||
Values& operator=(const Values& rhs);
|
||||
|
||||
|
@ -305,6 +261,9 @@ namespace gtsam {
|
|||
/** Compute the total dimensionality of all values (\f$ O(n) \f$) */
|
||||
size_t dim() const;
|
||||
|
||||
/** Return all dimensions in a map (\f$ O(n log n) \f$) */
|
||||
std::map<Key,size_t> dims() const;
|
||||
|
||||
/** Return a VectorValues of zero vectors for each variable in this Values */
|
||||
VectorValues zeroVectors() const;
|
||||
|
||||
|
@ -312,8 +271,8 @@ namespace gtsam {
|
|||
template<class ValueType>
|
||||
size_t count() const {
|
||||
size_t i = 0;
|
||||
for (const auto key_value : *this) {
|
||||
if (dynamic_cast<const GenericValue<ValueType>*>(&key_value.value))
|
||||
for (const auto key_value : values_) {
|
||||
if (dynamic_cast<const GenericValue<ValueType>*>(key_value.second))
|
||||
++i;
|
||||
}
|
||||
return i;
|
||||
|
@ -343,6 +302,67 @@ namespace gtsam {
|
|||
extract(const std::function<bool(Key)>& filterFcn = &_truePredicate<Key>) const;
|
||||
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42
|
||||
// Types obtained by iterating
|
||||
typedef KeyValueMap::const_iterator::value_type ConstKeyValuePtrPair;
|
||||
typedef KeyValueMap::iterator::value_type KeyValuePtrPair;
|
||||
|
||||
/// Mutable forward iterator, with value type KeyValuePair
|
||||
typedef boost::transform_iterator<
|
||||
std::function<KeyValuePair(const KeyValuePtrPair&)>, KeyValueMap::iterator> iterator;
|
||||
|
||||
/// Const forward iterator, with value type ConstKeyValuePair
|
||||
typedef boost::transform_iterator<
|
||||
std::function<ConstKeyValuePair(const ConstKeyValuePtrPair&)>, KeyValueMap::const_iterator> const_iterator;
|
||||
|
||||
/// Mutable reverse iterator, with value type KeyValuePair
|
||||
typedef boost::transform_iterator<
|
||||
std::function<KeyValuePair(const KeyValuePtrPair&)>, KeyValueMap::reverse_iterator> reverse_iterator;
|
||||
|
||||
/// Const reverse iterator, with value type ConstKeyValuePair
|
||||
typedef boost::transform_iterator<
|
||||
std::function<ConstKeyValuePair(const ConstKeyValuePtrPair&)>, KeyValueMap::const_reverse_iterator> const_reverse_iterator;
|
||||
|
||||
/** insert that mimics the STL map insert - if the value already exists, the map is not modified
|
||||
* and an iterator to the existing value is returned, along with 'false'. If the value did not
|
||||
* exist, it is inserted and an iterator pointing to the new element, along with 'true', is
|
||||
* returned. */
|
||||
std::pair<iterator, bool> tryInsert(Key j, const Value& value);
|
||||
|
||||
static ConstKeyValuePair make_const_deref_pair(const KeyValueMap::const_iterator::value_type& key_value) {
|
||||
return ConstKeyValuePair(key_value.first, *key_value.second); }
|
||||
|
||||
static KeyValuePair make_deref_pair(const KeyValueMap::iterator::value_type& key_value) {
|
||||
return KeyValuePair(key_value.first, *key_value.second); }
|
||||
|
||||
const_iterator begin() const { return boost::make_transform_iterator(values_.begin(), &make_const_deref_pair); }
|
||||
const_iterator end() const { return boost::make_transform_iterator(values_.end(), &make_const_deref_pair); }
|
||||
iterator begin() { return boost::make_transform_iterator(values_.begin(), &make_deref_pair); }
|
||||
iterator end() { return boost::make_transform_iterator(values_.end(), &make_deref_pair); }
|
||||
const_reverse_iterator rbegin() const { return boost::make_transform_iterator(values_.rbegin(), &make_const_deref_pair); }
|
||||
const_reverse_iterator rend() const { return boost::make_transform_iterator(values_.rend(), &make_const_deref_pair); }
|
||||
reverse_iterator rbegin() { return boost::make_transform_iterator(values_.rbegin(), &make_deref_pair); }
|
||||
reverse_iterator rend() { return boost::make_transform_iterator(values_.rend(), &make_deref_pair); }
|
||||
|
||||
/** Find an element by key, returning an iterator, or end() if the key was
|
||||
* not found. */
|
||||
iterator find(Key j) { return boost::make_transform_iterator(values_.find(j), &make_deref_pair); }
|
||||
|
||||
/** Find an element by key, returning an iterator, or end() if the key was
|
||||
* not found. */
|
||||
const_iterator find(Key j) const { return boost::make_transform_iterator(values_.find(j), &make_const_deref_pair); }
|
||||
|
||||
/** Find the element greater than or equal to the specified key. */
|
||||
iterator lower_bound(Key j) { return boost::make_transform_iterator(values_.lower_bound(j), &make_deref_pair); }
|
||||
|
||||
/** Find the element greater than or equal to the specified key. */
|
||||
const_iterator lower_bound(Key j) const { return boost::make_transform_iterator(values_.lower_bound(j), &make_const_deref_pair); }
|
||||
|
||||
/** Find the lowest-ordered element greater than the specified key. */
|
||||
iterator upper_bound(Key j) { return boost::make_transform_iterator(values_.upper_bound(j), &make_deref_pair); }
|
||||
|
||||
/** Find the lowest-ordered element greater than the specified key. */
|
||||
const_iterator upper_bound(Key j) const { return boost::make_transform_iterator(values_.upper_bound(j), &make_const_deref_pair); }
|
||||
|
||||
/** A filtered view of a Values, returned from Values::filter. */
|
||||
template <class ValueType = Value>
|
||||
class Filtered;
|
||||
|
@ -395,12 +415,6 @@ namespace gtsam {
|
|||
ar & BOOST_SERIALIZATION_NVP(values_);
|
||||
}
|
||||
|
||||
static ConstKeyValuePair make_const_deref_pair(const KeyValueMap::const_iterator::value_type& key_value) {
|
||||
return ConstKeyValuePair(key_value.first, *key_value.second); }
|
||||
|
||||
static KeyValuePair make_deref_pair(const KeyValueMap::iterator::value_type& key_value) {
|
||||
return KeyValuePair(key_value.first, *key_value.second); }
|
||||
|
||||
};
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -128,9 +128,10 @@ struct LevenbergMarquardtState : public NonlinearOptimizerState {
|
|||
noiseModelCache.resize(0);
|
||||
// for each of the variables, add a prior
|
||||
damped.reserve(damped.size() + values.size());
|
||||
for (const auto key_value : values) {
|
||||
const Key key = key_value.key;
|
||||
const size_t dim = key_value.value.dim();
|
||||
std::map<Key,size_t> dims = values.dims();
|
||||
for (const auto& key_dim : dims) {
|
||||
const Key& key = key_dim.first;
|
||||
const size_t& dim = key_dim.second;
|
||||
const CachedModel* item = getCachedModel(dim);
|
||||
damped += boost::make_shared<JacobianFactor>(key, item->A, item->b, item->model);
|
||||
}
|
||||
|
|
|
@ -350,8 +350,8 @@ TEST(TestLinearContainerFactor, Rekey) {
|
|||
|
||||
// For extra fun lets try linearizing this LCF
|
||||
gtsam::Values linearization_pt_rekeyed;
|
||||
for (auto key_val : linearization_pt) {
|
||||
linearization_pt_rekeyed.insert(key_map.at(key_val.key), key_val.value);
|
||||
for (auto key : linearization_pt.keys()) {
|
||||
linearization_pt_rekeyed.insert(key_map.at(key), linearization_pt.at(key));
|
||||
}
|
||||
|
||||
// Check independent values since we don't want to unnecessarily sort
|
||||
|
|
|
@ -195,6 +195,7 @@ TEST(Values, basic_functions)
|
|||
values.insert(6, M1);
|
||||
values.insert(8, M2);
|
||||
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42
|
||||
// find
|
||||
EXPECT_LONGS_EQUAL(4, values.find(4)->key);
|
||||
EXPECT_LONGS_EQUAL(4, values_c.find(4)->key);
|
||||
|
@ -210,7 +211,7 @@ TEST(Values, basic_functions)
|
|||
EXPECT_LONGS_EQUAL(6, values_c.upper_bound(4)->key);
|
||||
EXPECT_LONGS_EQUAL(4, values.upper_bound(3)->key);
|
||||
EXPECT_LONGS_EQUAL(4, values_c.upper_bound(3)->key);
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -220,8 +221,8 @@ TEST(Values, retract_full)
|
|||
config0.insert(key1, Vector3(1.0, 2.0, 3.0));
|
||||
config0.insert(key2, Vector3(5.0, 6.0, 7.0));
|
||||
|
||||
VectorValues delta {{key1, Vector3(1.0, 1.1, 1.2)},
|
||||
{key2, Vector3(1.3, 1.4, 1.5)}};
|
||||
const VectorValues delta{{key1, Vector3(1.0, 1.1, 1.2)},
|
||||
{key2, Vector3(1.3, 1.4, 1.5)}};
|
||||
|
||||
Values expected;
|
||||
expected.insert(key1, Vector3(2.0, 3.1, 4.2));
|
||||
|
@ -238,7 +239,7 @@ TEST(Values, retract_partial)
|
|||
config0.insert(key1, Vector3(1.0, 2.0, 3.0));
|
||||
config0.insert(key2, Vector3(5.0, 6.0, 7.0));
|
||||
|
||||
VectorValues delta {{key2, Vector3(1.3, 1.4, 1.5)}};
|
||||
const VectorValues delta{{key2, Vector3(1.3, 1.4, 1.5)}};
|
||||
|
||||
Values expected;
|
||||
expected.insert(key1, Vector3(1.0, 2.0, 3.0));
|
||||
|
@ -248,6 +249,24 @@ TEST(Values, retract_partial)
|
|||
CHECK(assert_equal(expected, Values(config0, delta)));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(Values, retract_masked)
|
||||
{
|
||||
Values config0;
|
||||
config0.insert(key1, Vector3(1.0, 2.0, 3.0));
|
||||
config0.insert(key2, Vector3(5.0, 6.0, 7.0));
|
||||
|
||||
const VectorValues delta{{key1, Vector3(1.0, 1.1, 1.2)},
|
||||
{key2, Vector3(1.3, 1.4, 1.5)}};
|
||||
|
||||
Values expected;
|
||||
expected.insert(key1, Vector3(1.0, 2.0, 3.0));
|
||||
expected.insert(key2, Vector3(6.3, 7.4, 8.5));
|
||||
|
||||
config0.retractMasked(delta, {key2});
|
||||
CHECK(assert_equal(expected, config0));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(Values, equals)
|
||||
{
|
||||
|
|
|
@ -62,10 +62,10 @@ static Values computePoses(const Values& initialRot,
|
|||
|
||||
// Upgrade rotations to full poses
|
||||
Values initialPose;
|
||||
for (const auto key_value : initialRot) {
|
||||
Key key = key_value.key;
|
||||
const auto& rot = initialRot.at<typename Pose::Rotation>(key);
|
||||
Pose initializedPose = Pose(rot, origin);
|
||||
for (const auto& key_rot : initialRot.extract<typename Pose::Rotation>()) {
|
||||
const Key& key = key_rot.first;
|
||||
const auto& rot = key_rot.second;
|
||||
const Pose initializedPose(rot, origin);
|
||||
initialPose.insert(key, initializedPose);
|
||||
}
|
||||
|
||||
|
@ -82,14 +82,14 @@ static Values computePoses(const Values& initialRot,
|
|||
params.setVerbosity("TERMINATION");
|
||||
}
|
||||
GaussNewtonOptimizer optimizer(*posegraph, initialPose, params);
|
||||
Values GNresult = optimizer.optimize();
|
||||
const Values GNresult = optimizer.optimize();
|
||||
|
||||
// put into Values structure
|
||||
Values estimate;
|
||||
for (const auto key_value : GNresult) {
|
||||
Key key = key_value.key;
|
||||
for (const auto& key_pose : GNresult.extract<Pose>()) {
|
||||
const Key& key = key_pose.first;
|
||||
if (key != kAnchorKey) {
|
||||
const Pose& pose = GNresult.at<Pose>(key);
|
||||
const Pose& pose = key_pose.second;
|
||||
estimate.insert(key, pose);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -122,12 +122,12 @@ Values InitializePose3::computeOrientationsGradient(
|
|||
gttic(InitializePose3_computeOrientationsGradient);
|
||||
|
||||
// this works on the inverse rotations, according to Tron&Vidal,2011
|
||||
Values inverseRot;
|
||||
inverseRot.insert(initialize::kAnchorKey, Rot3());
|
||||
for(const auto key_value: givenGuess) {
|
||||
Key key = key_value.key;
|
||||
const Pose3& pose = givenGuess.at<Pose3>(key);
|
||||
inverseRot.insert(key, pose.rotation().inverse());
|
||||
std::map<Key,Rot3> inverseRot;
|
||||
inverseRot.emplace(initialize::kAnchorKey, Rot3());
|
||||
for(const auto& key_pose: givenGuess.extract<Pose3>()) {
|
||||
const Key& key = key_pose.first;
|
||||
const Pose3& pose = key_pose.second;
|
||||
inverseRot.emplace(key, pose.rotation().inverse());
|
||||
}
|
||||
|
||||
// Create the map of edges incident on each node
|
||||
|
@ -138,10 +138,8 @@ Values InitializePose3::computeOrientationsGradient(
|
|||
|
||||
// calculate max node degree & allocate gradient
|
||||
size_t maxNodeDeg = 0;
|
||||
VectorValues grad;
|
||||
for(const auto key_value: inverseRot) {
|
||||
Key key = key_value.key;
|
||||
grad.insert(key,Z_3x1);
|
||||
for (const auto& key_R : inverseRot) {
|
||||
const Key& key = key_R.first;
|
||||
size_t currNodeDeg = (adjEdgesMap.at(key)).size();
|
||||
if(currNodeDeg > maxNodeDeg)
|
||||
maxNodeDeg = currNodeDeg;
|
||||
|
@ -162,28 +160,29 @@ Values InitializePose3::computeOrientationsGradient(
|
|||
//////////////////////////////////////////////////////////////////////////
|
||||
// compute the gradient at each node
|
||||
maxGrad = 0;
|
||||
for (const auto key_value : inverseRot) {
|
||||
Key key = key_value.key;
|
||||
VectorValues grad;
|
||||
for (const auto& key_R : inverseRot) {
|
||||
const Key& key = key_R.first;
|
||||
const Rot3& Ri = key_R.second;
|
||||
Vector gradKey = Z_3x1;
|
||||
// collect the gradient for each edge incident on key
|
||||
for (const size_t& factorId : adjEdgesMap.at(key)) {
|
||||
Rot3 Rij = factorId2RotMap.at(factorId);
|
||||
Rot3 Ri = inverseRot.at<Rot3>(key);
|
||||
const Rot3& Rij = factorId2RotMap.at(factorId);
|
||||
auto factor = pose3Graph.at(factorId);
|
||||
const auto& keys = factor->keys();
|
||||
if (key == keys[0]) {
|
||||
Key key1 = keys[1];
|
||||
Rot3 Rj = inverseRot.at<Rot3>(key1);
|
||||
const Rot3& Rj = inverseRot.at(key1);
|
||||
gradKey = gradKey + gradientTron(Ri, Rij * Rj, a, b);
|
||||
} else if (key == keys[1]) {
|
||||
Key key0 = keys[0];
|
||||
Rot3 Rj = inverseRot.at<Rot3>(key0);
|
||||
const Rot3& Rj = inverseRot.at(key0);
|
||||
gradKey = gradKey + gradientTron(Ri, Rij.between(Rj), a, b);
|
||||
} else {
|
||||
cout << "Error in gradient computation" << endl;
|
||||
}
|
||||
} // end of i-th gradient computation
|
||||
grad.at(key) = stepsize * gradKey;
|
||||
grad.insert(key, stepsize * gradKey);
|
||||
|
||||
double normGradKey = (gradKey).norm();
|
||||
if(normGradKey>maxGrad)
|
||||
|
@ -192,8 +191,12 @@ Values InitializePose3::computeOrientationsGradient(
|
|||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// update estimates
|
||||
inverseRot = inverseRot.retract(grad);
|
||||
|
||||
for (auto& key_R : inverseRot) {
|
||||
const Key& key = key_R.first;
|
||||
Rot3& Ri = key_R.second;
|
||||
Ri = Ri.retract(grad.at(key));
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// check stopping condition
|
||||
if (it>20 && maxGrad < 5e-3)
|
||||
|
@ -201,12 +204,13 @@ Values InitializePose3::computeOrientationsGradient(
|
|||
} // enf of gradient iterations
|
||||
|
||||
// Return correct rotations
|
||||
const Rot3& Rref = inverseRot.at<Rot3>(initialize::kAnchorKey); // This will be set to the identity as so far we included no prior
|
||||
const Rot3& Rref = inverseRot.at(initialize::kAnchorKey); // This will be set to the identity as so far we included no prior
|
||||
Values estimateRot;
|
||||
for(const auto key_value: inverseRot) {
|
||||
Key key = key_value.key;
|
||||
for (const auto key_R : inverseRot) {
|
||||
const Key& key = key_R.first;
|
||||
const Rot3& Ri = key_R.second;
|
||||
if (key != initialize::kAnchorKey) {
|
||||
const Rot3& R = inverseRot.at<Rot3>(key);
|
||||
const Rot3& R = inverseRot.at(key);
|
||||
if(setRefFrame)
|
||||
estimateRot.insert(key, Rref.compose(R.inverse()));
|
||||
else
|
||||
|
|
|
@ -586,9 +586,9 @@ void save2D(const NonlinearFactorGraph &graph, const Values &config,
|
|||
fstream stream(filename.c_str(), fstream::out);
|
||||
|
||||
// save poses
|
||||
for (const auto key_value : config) {
|
||||
const Pose2 &pose = key_value.value.cast<Pose2>();
|
||||
stream << "VERTEX2 " << key_value.key << " " << pose.x() << " " << pose.y()
|
||||
for (const auto &key_pose : config.extract<Pose2>()) {
|
||||
const Pose2 &pose = key_pose.second;
|
||||
stream << "VERTEX2 " << key_pose.first << " " << pose.x() << " " << pose.y()
|
||||
<< " " << pose.theta() << endl;
|
||||
}
|
||||
|
||||
|
@ -635,45 +635,33 @@ void writeG2o(const NonlinearFactorGraph &graph, const Values &estimate,
|
|||
auto index = [](gtsam::Key key) { return Symbol(key).index(); };
|
||||
|
||||
// save 2D poses
|
||||
for (const auto key_value : estimate) {
|
||||
auto p = dynamic_cast<const GenericValue<Pose2> *>(&key_value.value);
|
||||
if (!p)
|
||||
continue;
|
||||
const Pose2 &pose = p->value();
|
||||
stream << "VERTEX_SE2 " << index(key_value.key) << " " << pose.x() << " "
|
||||
for (const auto &pair : estimate.extract<Pose2>()) {
|
||||
const Pose2 &pose = pair.second;
|
||||
stream << "VERTEX_SE2 " << index(pair.first) << " " << pose.x() << " "
|
||||
<< pose.y() << " " << pose.theta() << endl;
|
||||
}
|
||||
|
||||
// save 3D poses
|
||||
for (const auto key_value : estimate) {
|
||||
auto p = dynamic_cast<const GenericValue<Pose3> *>(&key_value.value);
|
||||
if (!p)
|
||||
continue;
|
||||
const Pose3 &pose = p->value();
|
||||
for (const auto &pair : estimate.extract<Pose3>()) {
|
||||
const Pose3 &pose = pair.second;
|
||||
const Point3 t = pose.translation();
|
||||
const auto q = pose.rotation().toQuaternion();
|
||||
stream << "VERTEX_SE3:QUAT " << index(key_value.key) << " " << t.x() << " "
|
||||
stream << "VERTEX_SE3:QUAT " << index(pair.first) << " " << t.x() << " "
|
||||
<< t.y() << " " << t.z() << " " << q.x() << " " << q.y() << " "
|
||||
<< q.z() << " " << q.w() << endl;
|
||||
}
|
||||
|
||||
// save 2D landmarks
|
||||
for (const auto key_value : estimate) {
|
||||
auto p = dynamic_cast<const GenericValue<Point2> *>(&key_value.value);
|
||||
if (!p)
|
||||
continue;
|
||||
const Point2 &point = p->value();
|
||||
stream << "VERTEX_XY " << index(key_value.key) << " " << point.x() << " "
|
||||
for (const auto &pair : estimate.extract<Point2>()) {
|
||||
const Point2 &point = pair.second;
|
||||
stream << "VERTEX_XY " << index(pair.first) << " " << point.x() << " "
|
||||
<< point.y() << endl;
|
||||
}
|
||||
|
||||
// save 3D landmarks
|
||||
for (const auto key_value : estimate) {
|
||||
auto p = dynamic_cast<const GenericValue<Point3> *>(&key_value.value);
|
||||
if (!p)
|
||||
continue;
|
||||
const Point3 &point = p->value();
|
||||
stream << "VERTEX_TRACKXYZ " << index(key_value.key) << " " << point.x()
|
||||
for (const auto &pair : estimate.extract<Point3>()) {
|
||||
const Point3 &point = pair.second;
|
||||
stream << "VERTEX_TRACKXYZ " << index(pair.first) << " " << point.x()
|
||||
<< " " << point.y() << " " << point.z() << endl;
|
||||
}
|
||||
|
||||
|
|
|
@ -283,9 +283,10 @@ TEST( Lago, largeGraphNoisy_orientations ) {
|
|||
Values::shared_ptr expected;
|
||||
boost::tie(gmatlab, expected) = readG2o(matlabFile);
|
||||
|
||||
for(const auto key_val: *expected){
|
||||
Key k = key_val.key;
|
||||
EXPECT(assert_equal(expected->at<Pose2>(k), actual.at<Pose2>(k), 1e-5));
|
||||
for(const auto key_pose: expected->extract<Pose2>()){
|
||||
const Key& k = key_pose.first;
|
||||
const Pose2& pose = key_pose.second;
|
||||
EXPECT(assert_equal(pose, actual.at<Pose2>(k), 1e-5));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -309,9 +310,10 @@ TEST( Lago, largeGraphNoisy ) {
|
|||
Values::shared_ptr expected;
|
||||
boost::tie(gmatlab, expected) = readG2o(matlabFile);
|
||||
|
||||
for(const auto key_val: *expected){
|
||||
Key k = key_val.key;
|
||||
EXPECT(assert_equal(expected->at<Pose2>(k), actual.at<Pose2>(k), 1e-2));
|
||||
for(const auto key_pose: expected->extract<Pose2>()){
|
||||
const Key& k = key_pose.first;
|
||||
const Pose2& pose = key_pose.second;
|
||||
EXPECT(assert_equal(pose, actual.at<Pose2>(k), 1e-2));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -308,12 +308,12 @@ int main(int argc, char** argv) {
|
|||
// And to demonstrate the fixed-lag aspect, print the keys contained in each smoother after 3.0 seconds
|
||||
cout << "After 15.0 seconds, each version contains to the following keys:" << endl;
|
||||
cout << " Concurrent Filter Keys: " << endl;
|
||||
for(const auto key_value: concurrentFilter.getLinearizationPoint()) {
|
||||
cout << setprecision(5) << " Key: " << key_value.key << endl;
|
||||
for(const auto key: concurrentFilter.getLinearizationPoint().keys()) {
|
||||
cout << setprecision(5) << " Key: " << key << endl;
|
||||
}
|
||||
cout << " Concurrent Smoother Keys: " << endl;
|
||||
for(const auto key_value: concurrentSmoother.getLinearizationPoint()) {
|
||||
cout << setprecision(5) << " Key: " << key_value.key << endl;
|
||||
for(const auto key: concurrentSmoother.getLinearizationPoint().keys()) {
|
||||
cout << setprecision(5) << " Key: " << key << endl;
|
||||
}
|
||||
cout << " Fixed-Lag Smoother Keys: " << endl;
|
||||
for(const auto& key_timestamp: fixedlagSmoother.timestamps()) {
|
||||
|
|
|
@ -59,8 +59,8 @@ FixedLagSmoother::Result BatchFixedLagSmoother::update(
|
|||
// Add the new variables to theta
|
||||
theta_.insert(newTheta);
|
||||
// Add new variables to the end of the ordering
|
||||
for (const auto key_value : newTheta) {
|
||||
ordering_.push_back(key_value.key);
|
||||
for (const auto key : newTheta.keys()) {
|
||||
ordering_.push_back(key);
|
||||
}
|
||||
// Augment Delta
|
||||
delta_.insert(newTheta.zeroVectors());
|
||||
|
@ -161,8 +161,8 @@ void BatchFixedLagSmoother::eraseKeys(const KeyVector& keys) {
|
|||
factorIndex_.erase(key);
|
||||
|
||||
// Erase the key from the set of linearized keys
|
||||
if (linearKeys_.exists(key)) {
|
||||
linearKeys_.erase(key);
|
||||
if (linearValues_.exists(key)) {
|
||||
linearValues_.erase(key);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -186,8 +186,8 @@ FixedLagSmoother::Result BatchFixedLagSmoother::optimize() {
|
|||
|
||||
// Create output result structure
|
||||
Result result;
|
||||
result.nonlinearVariables = theta_.size() - linearKeys_.size();
|
||||
result.linearVariables = linearKeys_.size();
|
||||
result.nonlinearVariables = theta_.size() - linearValues_.size();
|
||||
result.linearVariables = linearValues_.size();
|
||||
|
||||
// Set optimization parameters
|
||||
double lambda = parameters_.lambdaInitial;
|
||||
|
@ -265,10 +265,10 @@ FixedLagSmoother::Result BatchFixedLagSmoother::optimize() {
|
|||
// Reset the deltas to zeros
|
||||
delta_.setZero();
|
||||
// Put the linearization points and deltas back for specific variables
|
||||
if (enforceConsistency_ && (linearKeys_.size() > 0)) {
|
||||
theta_.update(linearKeys_);
|
||||
for(const auto key_value: linearKeys_) {
|
||||
delta_.at(key_value.key) = newDelta.at(key_value.key);
|
||||
if (enforceConsistency_ && (linearValues_.size() > 0)) {
|
||||
theta_.update(linearValues_);
|
||||
for(const auto key: linearValues_.keys()) {
|
||||
delta_.at(key) = newDelta.at(key);
|
||||
}
|
||||
}
|
||||
// Decrease lambda for next time
|
||||
|
|
|
@ -136,8 +136,8 @@ protected:
|
|||
/** The current linearization point **/
|
||||
Values theta_;
|
||||
|
||||
/** The set of keys involved in current linear factors. These keys should not be relinearized. **/
|
||||
Values linearKeys_;
|
||||
/** The set of values involved in current linear factors. **/
|
||||
Values linearValues_;
|
||||
|
||||
/** The current ordering */
|
||||
Ordering ordering_;
|
||||
|
|
|
@ -139,8 +139,8 @@ ConcurrentBatchFilter::Result ConcurrentBatchFilter::update(const NonlinearFacto
|
|||
// Add the new variables to theta
|
||||
theta_.insert(newTheta);
|
||||
// Add new variables to the end of the ordering
|
||||
for(const auto key_value: newTheta) {
|
||||
ordering_.push_back(key_value.key);
|
||||
for (const auto key : newTheta.keys()) {
|
||||
ordering_.push_back(key);
|
||||
}
|
||||
// Augment Delta
|
||||
delta_.insert(newTheta.zeroVectors());
|
||||
|
@ -221,10 +221,7 @@ void ConcurrentBatchFilter::synchronize(const NonlinearFactorGraph& smootherSumm
|
|||
if(debug) { PrintNonlinearFactorGraph(smootherSummarization_, "ConcurrentBatchFilter::synchronize ", "Updated Smoother Summarization:", DefaultKeyFormatter); }
|
||||
|
||||
// Find the set of new separator keys
|
||||
KeySet newSeparatorKeys;
|
||||
for(const auto key_value: separatorValues_) {
|
||||
newSeparatorKeys.insert(key_value.key);
|
||||
}
|
||||
const KeySet newSeparatorKeys = separatorValues_.keySet();
|
||||
|
||||
if(debug) { PrintKeys(newSeparatorKeys, "ConcurrentBatchFilter::synchronize ", "Current Separator Keys:"); }
|
||||
|
||||
|
@ -236,9 +233,9 @@ void ConcurrentBatchFilter::synchronize(const NonlinearFactorGraph& smootherSumm
|
|||
graph.push_back(smootherShortcut_);
|
||||
Values values;
|
||||
values.insert(smootherSummarizationValues);
|
||||
for(const auto key_value: separatorValues_) {
|
||||
if(!values.exists(key_value.key)) {
|
||||
values.insert(key_value.key, key_value.value);
|
||||
for(const auto key: newSeparatorKeys) {
|
||||
if(!values.exists(key)) {
|
||||
values.insert(key, separatorValues_.at(key));
|
||||
}
|
||||
}
|
||||
// Calculate the summarized factor on just the new separator keys
|
||||
|
@ -471,8 +468,8 @@ void ConcurrentBatchFilter::optimize(const NonlinearFactorGraph& factors, Values
|
|||
// Put the linearization points and deltas back for specific variables
|
||||
if(linearValues.size() > 0) {
|
||||
theta.update(linearValues);
|
||||
for(const auto key_value: linearValues) {
|
||||
delta.at(key_value.key) = newDelta.at(key_value.key);
|
||||
for(const auto key: linearValues.keys()) {
|
||||
delta.at(key) = newDelta.at(key);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -574,9 +571,7 @@ void ConcurrentBatchFilter::moveSeparator(const FastList<Key>& keysToMove) {
|
|||
|
||||
// Calculate the set of new separator keys: AffectedKeys + PreviousSeparatorKeys - KeysToMove
|
||||
KeySet newSeparatorKeys = removedFactors.keys();
|
||||
for(const auto key_value: separatorValues_) {
|
||||
newSeparatorKeys.insert(key_value.key);
|
||||
}
|
||||
newSeparatorKeys.merge(separatorValues_.keySet());
|
||||
for(Key key: keysToMove) {
|
||||
newSeparatorKeys.erase(key);
|
||||
}
|
||||
|
|
|
@ -61,8 +61,8 @@ ConcurrentBatchSmoother::Result ConcurrentBatchSmoother::update(const NonlinearF
|
|||
theta_.insert(newTheta);
|
||||
|
||||
// Add new variables to the end of the ordering
|
||||
for(const auto key_value: newTheta) {
|
||||
ordering_.push_back(key_value.key);
|
||||
for(const auto key: newTheta.keys()) {
|
||||
ordering_.push_back(key);
|
||||
}
|
||||
|
||||
// Augment Delta
|
||||
|
@ -135,26 +135,30 @@ void ConcurrentBatchSmoother::synchronize(const NonlinearFactorGraph& smootherFa
|
|||
removeFactors(filterSummarizationSlots_);
|
||||
|
||||
// Insert new linpoints into the values, augment the ordering, and store new dims to augment delta
|
||||
for(const auto key_value: smootherValues) {
|
||||
std::pair<Values::iterator, bool> iter_inserted = theta_.tryInsert(key_value.key, key_value.value);
|
||||
if(iter_inserted.second) {
|
||||
// If the insert succeeded
|
||||
ordering_.push_back(key_value.key);
|
||||
delta_.insert(key_value.key, Vector::Zero(key_value.value.dim()));
|
||||
for(const auto key: smootherValues.keys()) {
|
||||
if(!theta_.exists(key)) {
|
||||
// If this a new key for theta_, also add to ordering and delta.
|
||||
const auto& value = smootherValues.at(key);
|
||||
delta_.insert(key, Vector::Zero(value.dim()));
|
||||
theta_.insert(key, value);
|
||||
ordering_.push_back(key);
|
||||
} else {
|
||||
// If the element already existed in theta_
|
||||
iter_inserted.first->value = key_value.value;
|
||||
// If the key already existed in theta_, just update.
|
||||
const auto& value = smootherValues.at(key);
|
||||
theta_.update(key, value);
|
||||
}
|
||||
}
|
||||
for(const auto key_value: separatorValues) {
|
||||
std::pair<Values::iterator, bool> iter_inserted = theta_.tryInsert(key_value.key, key_value.value);
|
||||
if(iter_inserted.second) {
|
||||
// If the insert succeeded
|
||||
ordering_.push_back(key_value.key);
|
||||
delta_.insert(key_value.key, Vector::Zero(key_value.value.dim()));
|
||||
for(const auto key: separatorValues.keys()) {
|
||||
if(!theta_.exists(key)) {
|
||||
// If this a new key for theta_, also add to ordering and delta.
|
||||
const auto& value = separatorValues.at(key);
|
||||
delta_.insert(key, Vector::Zero(value.dim()));
|
||||
theta_.insert(key, value);
|
||||
ordering_.push_back(key);
|
||||
} else {
|
||||
// If the element already existed in theta_
|
||||
iter_inserted.first->value = key_value.value;
|
||||
// If the key already existed in theta_, just update.
|
||||
const auto& value = separatorValues.at(key);
|
||||
theta_.update(key, value);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -322,8 +326,8 @@ ConcurrentBatchSmoother::Result ConcurrentBatchSmoother::optimize() {
|
|||
// Put the linearization points and deltas back for specific variables
|
||||
if(separatorValues_.size() > 0) {
|
||||
theta_.update(separatorValues_);
|
||||
for(const auto key_value: separatorValues_) {
|
||||
delta_.at(key_value.key) = newDelta.at(key_value.key);
|
||||
for(const auto key: separatorValues_.keys()) {
|
||||
delta_.at(key) = newDelta.at(key);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -371,10 +375,7 @@ void ConcurrentBatchSmoother::updateSmootherSummarization() {
|
|||
}
|
||||
|
||||
// Get the set of separator keys
|
||||
gtsam::KeySet separatorKeys;
|
||||
for(const auto key_value: separatorValues_) {
|
||||
separatorKeys.insert(key_value.key);
|
||||
}
|
||||
const KeySet separatorKeys = separatorValues_.keySet();
|
||||
|
||||
// Calculate the marginal factors on the separator
|
||||
smootherSummarization_ = internal::calculateMarginalFactors(graph, theta_, separatorKeys, parameters_.getEliminationFunction());
|
||||
|
|
|
@ -69,14 +69,14 @@ ConcurrentIncrementalFilter::Result ConcurrentIncrementalFilter::update(const No
|
|||
int group = 1;
|
||||
// Set all existing variables to Group1
|
||||
if(isam2_.getLinearizationPoint().size() > 0) {
|
||||
for(const auto key_value: isam2_.getLinearizationPoint()) {
|
||||
orderingConstraints->operator[](key_value.key) = group;
|
||||
for(const auto key: isam2_.getLinearizationPoint().keys()) {
|
||||
orderingConstraints->operator[](key) = group;
|
||||
}
|
||||
++group;
|
||||
}
|
||||
// Assign new variables to the root
|
||||
for(const auto key_value: newTheta) {
|
||||
orderingConstraints->operator[](key_value.key) = group;
|
||||
for(const auto key: newTheta.keys()) {
|
||||
orderingConstraints->operator[](key) = group;
|
||||
}
|
||||
// Set marginalizable variables to Group0
|
||||
for(Key key: *keysToMove){
|
||||
|
@ -201,8 +201,8 @@ void ConcurrentIncrementalFilter::synchronize(const NonlinearFactorGraph& smooth
|
|||
|
||||
// Force iSAM2 not to relinearize anything during this iteration
|
||||
FastList<Key> noRelinKeys;
|
||||
for(const auto key_value: isam2_.getLinearizationPoint()) {
|
||||
noRelinKeys.push_back(key_value.key);
|
||||
for(const auto key: isam2_.getLinearizationPoint().keys()) {
|
||||
noRelinKeys.push_back(key);
|
||||
}
|
||||
|
||||
// Calculate the summarized factor on just the new separator keys
|
||||
|
|
|
@ -66,9 +66,9 @@ ConcurrentIncrementalSmoother::Result ConcurrentIncrementalSmoother::update(cons
|
|||
// Also, mark the separator keys as fixed linearization points
|
||||
FastMap<Key,int> constrainedKeys;
|
||||
FastList<Key> noRelinKeys;
|
||||
for(const auto key_value: separatorValues_) {
|
||||
constrainedKeys[key_value.key] = 1;
|
||||
noRelinKeys.push_back(key_value.key);
|
||||
for (const auto key : separatorValues_.keys()) {
|
||||
constrainedKeys[key] = 1;
|
||||
noRelinKeys.push_back(key);
|
||||
}
|
||||
|
||||
// Use iSAM2 to perform an update
|
||||
|
@ -82,14 +82,14 @@ ConcurrentIncrementalSmoother::Result ConcurrentIncrementalSmoother::update(cons
|
|||
Values values(newTheta);
|
||||
// Unfortunately, we must be careful here, as some of the smoother values
|
||||
// and/or separator values may have been added previously
|
||||
for(const auto key_value: smootherValues_) {
|
||||
if(!isam2_.getLinearizationPoint().exists(key_value.key)) {
|
||||
values.insert(key_value.key, smootherValues_.at(key_value.key));
|
||||
for(const auto key: smootherValues_.keys()) {
|
||||
if(!isam2_.getLinearizationPoint().exists(key)) {
|
||||
values.insert(key, smootherValues_.at(key));
|
||||
}
|
||||
}
|
||||
for(const auto key_value: separatorValues_) {
|
||||
if(!isam2_.getLinearizationPoint().exists(key_value.key)) {
|
||||
values.insert(key_value.key, separatorValues_.at(key_value.key));
|
||||
for(const auto key: separatorValues_.keys()) {
|
||||
if(!isam2_.getLinearizationPoint().exists(key)) {
|
||||
values.insert(key, separatorValues_.at(key));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -245,10 +245,7 @@ void ConcurrentIncrementalSmoother::updateSmootherSummarization() {
|
|||
}
|
||||
|
||||
// Get the set of separator keys
|
||||
KeySet separatorKeys;
|
||||
for(const auto key_value: separatorValues_) {
|
||||
separatorKeys.insert(key_value.key);
|
||||
}
|
||||
const KeySet separatorKeys = separatorValues_.keySet();
|
||||
|
||||
// Calculate the marginal factors on the separator
|
||||
smootherSummarization_ = internal::calculateMarginalFactors(graph, isam2_.getLinearizationPoint(), separatorKeys,
|
||||
|
|
|
@ -64,8 +64,8 @@ NonlinearFactorGraph CalculateMarginals(const NonlinearFactorGraph& factorGraph,
|
|||
|
||||
|
||||
std::set<Key> KeysToKeep;
|
||||
for(const auto key_value: linPoint) { // we cycle over all the keys of factorGraph
|
||||
KeysToKeep.insert(key_value.key);
|
||||
for(const auto key: linPoint.keys()) { // we cycle over all the keys of factorGraph
|
||||
KeysToKeep.insert(key);
|
||||
} // so far we are keeping all keys, but we want to delete the ones that we are going to marginalize
|
||||
for(Key key: keysToMarginalize) {
|
||||
KeysToKeep.erase(key);
|
||||
|
|
|
@ -560,8 +560,8 @@ TEST( ConcurrentBatchSmoother, synchronize_3 )
|
|||
GaussianFactorGraph::shared_ptr linearFactors = allFactors.linearize(allValues);
|
||||
|
||||
KeySet eliminateKeys = linearFactors->keys();
|
||||
for(const auto key_value: filterSeparatorValues) {
|
||||
eliminateKeys.erase(key_value.key);
|
||||
for(const auto key: filterSeparatorValues.keys()) {
|
||||
eliminateKeys.erase(key);
|
||||
}
|
||||
KeyVector variables(eliminateKeys.begin(), eliminateKeys.end());
|
||||
GaussianFactorGraph result = *linearFactors->eliminatePartialMultifrontal(variables, EliminateCholesky).second;
|
||||
|
|
|
@ -80,8 +80,8 @@ NonlinearFactorGraph CalculateMarginals(const NonlinearFactorGraph& factorGraph,
|
|||
|
||||
|
||||
std::set<Key> KeysToKeep;
|
||||
for(const auto key_value: linPoint) { // we cycle over all the keys of factorGraph
|
||||
KeysToKeep.insert(key_value.key);
|
||||
for(const auto key: linPoint.keys()) { // we cycle over all the keys of factorGraph
|
||||
KeysToKeep.insert(key);
|
||||
} // so far we are keeping all keys, but we want to delete the ones that we are going to marginalize
|
||||
for(Key key: keysToMarginalize) {
|
||||
KeysToKeep.erase(key);
|
||||
|
|
|
@ -512,8 +512,8 @@ TEST( ConcurrentIncrementalSmootherDL, synchronize_2 )
|
|||
// Values expectedLinearizationPoint = BatchOptimize(allFactors, allValues, 1);
|
||||
Values expectedLinearizationPoint = filterSeparatorValues;
|
||||
Values actualLinearizationPoint;
|
||||
for(const auto key_value: filterSeparatorValues) {
|
||||
actualLinearizationPoint.insert(key_value.key, smoother.getLinearizationPoint().at(key_value.key));
|
||||
for(const auto key: filterSeparatorValues.keys()) {
|
||||
actualLinearizationPoint.insert(key, smoother.getLinearizationPoint().at(key));
|
||||
}
|
||||
CHECK(assert_equal(expectedLinearizationPoint, actualLinearizationPoint, 1e-6));
|
||||
}
|
||||
|
@ -580,8 +580,8 @@ TEST( ConcurrentIncrementalSmootherDL, synchronize_3 )
|
|||
// GaussianBayesNet::shared_ptr GBNsptr = GSS.eliminate();
|
||||
|
||||
KeySet allkeys = LinFactorGraph->keys();
|
||||
for(const auto key_value: filterSeparatorValues) {
|
||||
allkeys.erase(key_value.key);
|
||||
for(const auto key: filterSeparatorValues.keys()) {
|
||||
allkeys.erase(key);
|
||||
}
|
||||
KeyVector variables(allkeys.begin(), allkeys.end());
|
||||
std::pair<GaussianBayesNet::shared_ptr, GaussianFactorGraph::shared_ptr> result = LinFactorGraph->eliminatePartialSequential(variables, EliminateCholesky);
|
||||
|
|
|
@ -513,8 +513,8 @@ TEST( ConcurrentIncrementalSmootherGN, synchronize_2 )
|
|||
// Values expectedLinearizationPoint = BatchOptimize(allFactors, allValues, 1);
|
||||
Values expectedLinearizationPoint = filterSeparatorValues;
|
||||
Values actualLinearizationPoint;
|
||||
for(const auto key_value: filterSeparatorValues) {
|
||||
actualLinearizationPoint.insert(key_value.key, smoother.getLinearizationPoint().at(key_value.key));
|
||||
for(const auto key: filterSeparatorValues.keys()) {
|
||||
actualLinearizationPoint.insert(key, smoother.getLinearizationPoint().at(key));
|
||||
}
|
||||
CHECK(assert_equal(expectedLinearizationPoint, actualLinearizationPoint, 1e-6));
|
||||
}
|
||||
|
@ -582,8 +582,7 @@ TEST( ConcurrentIncrementalSmootherGN, synchronize_3 )
|
|||
// GaussianBayesNet::shared_ptr GBNsptr = GSS.eliminate();
|
||||
|
||||
KeySet allkeys = LinFactorGraph->keys();
|
||||
for(const auto key_value: filterSeparatorValues)
|
||||
allkeys.erase(key_value.key);
|
||||
for (const auto key : filterSeparatorValues.keys()) allkeys.erase(key);
|
||||
KeyVector variables(allkeys.begin(), allkeys.end());
|
||||
std::pair<GaussianBayesNet::shared_ptr, GaussianFactorGraph::shared_ptr> result = LinFactorGraph->eliminatePartialSequential(variables, EliminateCholesky);
|
||||
|
||||
|
|
|
@ -40,8 +40,7 @@ int main(int argc, char* argv[]) {
|
|||
os << "images,points,matches,Base,Map,BTree" << endl;
|
||||
|
||||
// loop over number of images
|
||||
vector<size_t> ms;
|
||||
ms += 10, 20, 30, 40, 50, 100, 200, 300, 400, 500, 1000;
|
||||
vector<size_t> ms {10, 20, 30, 40, 50, 100, 200, 300, 400, 500, 1000};
|
||||
for(size_t m: ms) {
|
||||
// We use volatile here to make these appear to the optimizing compiler as
|
||||
// if their values are only known at run-time.
|
||||
|
|
|
@ -100,7 +100,6 @@ int main() {
|
|||
// Oct 3, 2014, Macbook Air
|
||||
// 9.0 musecs/call
|
||||
typedef PinholeCamera<Cal3_S2> Camera;
|
||||
typedef Expression<Camera> Camera_;
|
||||
NonlinearFactor::shared_ptr g3 =
|
||||
boost::make_shared<ExpressionFactor<Point2> >(model, z,
|
||||
Point2_(myProject, x, p));
|
||||
|
|
|
@ -101,7 +101,7 @@ void timeAll(size_t m, size_t N) {
|
|||
{ // Raw memory Version
|
||||
FastVector < Key > keys;
|
||||
for (size_t i = 0; i < m; i++)
|
||||
keys += i;
|
||||
keys.push_back(i);
|
||||
Vector x = xvalues.vector(keys);
|
||||
double* xdata = x.data();
|
||||
|
||||
|
@ -144,7 +144,7 @@ int main(void) {
|
|||
// ms += 20, 30, 40, 50;
|
||||
// ms += 20,30,40,50,60,70,80,90,100;
|
||||
for (size_t m = 2; m <= 50; m += 2)
|
||||
ms += m;
|
||||
ms.push_back(m);
|
||||
//for (size_t m=10;m<=100;m+=10) ms += m;
|
||||
// loop over number of images
|
||||
for(size_t m: ms)
|
||||
|
|
Loading…
Reference in New Issue