make check for entire library now works
parent
07aaf38245
commit
97a82add25
|
|
@ -69,12 +69,6 @@ namespace gtsam {
|
|||
/// Return canonical coordinate
|
||||
static Vector Unretract(const CalibratedCamera& p);
|
||||
|
||||
// // Manifold requirements - use existing expmap/logmap
|
||||
// inline CalibratedCamera retract(const Vector& v) const { return expmap(v); }
|
||||
// inline static CalibratedCamera Retract(const Vector& v) { return Expmap(v); }
|
||||
// inline Vector unretract(const CalibratedCamera& t2) const { return logmap(t2); }
|
||||
// inline static Vector Unretract(const CalibratedCamera& t) { return Logmap(t); }
|
||||
|
||||
/// Lie group dimensionality
|
||||
inline size_t dim() const { return 6 ; }
|
||||
|
||||
|
|
|
|||
|
|
@ -50,11 +50,11 @@ namespace gtsam {
|
|||
return CalibratedCameraT( pose_.inverse(), k_ ) ;
|
||||
}
|
||||
|
||||
CalibratedCameraT expmap(const Vector& d) const {
|
||||
return CalibratedCameraT(pose().expmap(d), k_) ;
|
||||
CalibratedCameraT retract(const Vector& d) const {
|
||||
return CalibratedCameraT(pose().retract(d), k_) ;
|
||||
}
|
||||
Vector logmap(const CalibratedCameraT& T2) const {
|
||||
return pose().logmap(T2.pose()) ;
|
||||
Vector unretract(const CalibratedCameraT& T2) const {
|
||||
return pose().unretract(T2.pose()) ;
|
||||
}
|
||||
|
||||
void print(const std::string& s = "") const {
|
||||
|
|
|
|||
|
|
@ -116,31 +116,28 @@ class GeneralCameraT {
|
|||
calibration_.print(s + ".calibration.") ;
|
||||
}
|
||||
|
||||
GeneralCameraT expmap(const Vector &v) const {
|
||||
GeneralCameraT retract(const Vector &v) const {
|
||||
Vector v1 = sub(v,0,Camera::Dim());
|
||||
Vector v2 = sub(v,Camera::Dim(),Camera::Dim()+Calibration::Dim());
|
||||
return GeneralCameraT(calibrated_.expmap(v1), calibration_.expmap(v2));
|
||||
return GeneralCameraT(calibrated_.retract(v1), calibration_.retract(v2));
|
||||
}
|
||||
|
||||
Vector logmap(const GeneralCameraT &C) const {
|
||||
//std::cout << "logmap" << std::endl;
|
||||
const Vector v1(calibrated().logmap(C.calibrated())),
|
||||
v2(calibration().logmap(C.calibration()));
|
||||
Vector unretract(const GeneralCameraT &C) const {
|
||||
const Vector v1(calibrated().unretract(C.calibrated())),
|
||||
v2(calibration().unretract(C.calibration()));
|
||||
return concatVectors(2,&v1,&v2) ;
|
||||
}
|
||||
|
||||
static GeneralCameraT Expmap(const Vector& v) {
|
||||
//std::cout << "Expmap" << std::endl;
|
||||
static GeneralCameraT Retract(const Vector& v) {
|
||||
return GeneralCameraT(
|
||||
Camera::Expmap(sub(v,0,Camera::Dim())),
|
||||
Calibration::Expmap(sub(v,Camera::Dim(), Camera::Dim()+Calibration::Dim()))
|
||||
Camera::Retract(sub(v,0,Camera::Dim())),
|
||||
Calibration::Retract(sub(v,Camera::Dim(), Camera::Dim()+Calibration::Dim()))
|
||||
);
|
||||
}
|
||||
|
||||
static Vector Logmap(const GeneralCameraT& p) {
|
||||
//std::cout << "Logmap" << std::endl;
|
||||
const Vector v1(Camera::Logmap(p.calibrated())),
|
||||
v2(Calibration::Logmap(p.calibration()));
|
||||
static Vector Unretract(const GeneralCameraT& p) {
|
||||
const Vector v1(Camera::Unretract(p.calibrated())),
|
||||
v2(Calibration::Unretract(p.calibration()));
|
||||
return concatVectors(2,&v1,&v2);
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ namespace gtsam {
|
|||
|
||||
// Extract the current estimate of x1,P1 from the Bayes Network
|
||||
VectorValues result = optimize(*linearBayesNet);
|
||||
T x = linearizationPoints[lastKey].expmap(result[lastIndex]);
|
||||
T x = linearizationPoints[lastKey].retract(result[lastIndex]);
|
||||
|
||||
// Create a Jacobian Factor from the root node of the produced Bayes Net.
|
||||
// This will act as a prior for the next iteration.
|
||||
|
|
|
|||
|
|
@ -237,7 +237,7 @@ struct _SelectiveExpmapAndClear {
|
|||
assert(delta[var].size() == (int)it_x->second.dim());
|
||||
assert(delta[var].unaryExpr(&isfinite<double>).all());
|
||||
if(mask[var]) {
|
||||
it_x->second = it_x->second.expmap(delta[var]);
|
||||
it_x->second = it_x->second.retract(delta[var]);
|
||||
if(invalidate)
|
||||
(*invalidate)[var].operator=(Vector::Constant(delta[var].rows(), numeric_limits<double>::infinity())); // Strange syntax to work with clang++ (bug in clang?)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -548,7 +548,7 @@ template<class Conditional, class Values>
|
|||
Values ISAM2<Conditional, Values>::calculateBestEstimate() const {
|
||||
VectorValues delta(theta_.dims(ordering_));
|
||||
optimize2(this->root(), delta);
|
||||
return theta_.expmap(delta, ordering_);
|
||||
return theta_.retract(delta, ordering_);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -136,7 +136,7 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
// todo: insert for every element is inefficient
|
||||
template<class J>
|
||||
LieValues<J> LieValues<J>::expmap(const VectorValues& delta, const Ordering& ordering) const {
|
||||
LieValues<J> LieValues<J>::retract(const VectorValues& delta, const Ordering& ordering) const {
|
||||
LieValues<J> newValues;
|
||||
typedef pair<J,typename J::Value> KeyValue;
|
||||
BOOST_FOREACH(const KeyValue& value, this->values_) {
|
||||
|
|
@ -144,7 +144,7 @@ namespace gtsam {
|
|||
const typename J::Value& pj = value.second;
|
||||
Index index;
|
||||
if(ordering.tryAt(j,index)) {
|
||||
newValues.insert(j, pj.expmap(delta[index]));
|
||||
newValues.insert(j, pj.retract(delta[index]));
|
||||
} else
|
||||
newValues.insert(j, pj);
|
||||
}
|
||||
|
|
@ -171,7 +171,7 @@ namespace gtsam {
|
|||
// /* ************************************************************************* */
|
||||
// // todo: insert for every element is inefficient
|
||||
// template<class J>
|
||||
// LieValues<J> LieValues<J>::expmap(const Vector& delta) const {
|
||||
// LieValues<J> LieValues<J>::retract(const Vector& delta) const {
|
||||
// if(delta.size() != dim()) {
|
||||
// cout << "LieValues::dim (" << dim() << ") <> delta.size (" << delta.size() << ")" << endl;
|
||||
// throw invalid_argument("Delta vector length does not match config dimensionality.");
|
||||
|
|
@ -183,7 +183,7 @@ namespace gtsam {
|
|||
// const J& j = value.first;
|
||||
// const typename J::Value& pj = value.second;
|
||||
// int cur_dim = pj.dim();
|
||||
// newValues.insert(j,pj.expmap(sub(delta, delta_offset, delta_offset+cur_dim)));
|
||||
// newValues.insert(j,pj.retract(sub(delta, delta_offset, delta_offset+cur_dim)));
|
||||
// delta_offset += cur_dim;
|
||||
// }
|
||||
// return newValues;
|
||||
|
|
@ -193,19 +193,19 @@ namespace gtsam {
|
|||
// todo: insert for every element is inefficient
|
||||
// todo: currently only logmaps elements in both configs
|
||||
template<class J>
|
||||
inline VectorValues LieValues<J>::logmap(const LieValues<J>& cp, const Ordering& ordering) const {
|
||||
inline VectorValues LieValues<J>::unretract(const LieValues<J>& cp, const Ordering& ordering) const {
|
||||
VectorValues delta(this->dims(ordering));
|
||||
logmap(cp, ordering, delta);
|
||||
unretract(cp, ordering, delta);
|
||||
return delta;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class J>
|
||||
void LieValues<J>::logmap(const LieValues<J>& cp, const Ordering& ordering, VectorValues& delta) const {
|
||||
void LieValues<J>::unretract(const LieValues<J>& cp, const Ordering& ordering, VectorValues& delta) const {
|
||||
typedef pair<J,typename J::Value> KeyValue;
|
||||
BOOST_FOREACH(const KeyValue& value, cp) {
|
||||
assert(this->exists(value.first));
|
||||
delta[ordering[value.first]] = this->at(value.first).logmap(value.second);
|
||||
delta[ordering[value.first]] = this->at(value.first).unretract(value.second);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -119,13 +119,13 @@ namespace gtsam {
|
|||
// Lie operations
|
||||
|
||||
/** Add a delta config to current config and returns a new config */
|
||||
LieValues expmap(const VectorValues& delta, const Ordering& ordering) const;
|
||||
LieValues retract(const VectorValues& delta, const Ordering& ordering) const;
|
||||
|
||||
/** Get a delta config about a linearization point c0 (*this) */
|
||||
VectorValues logmap(const LieValues& cp, const Ordering& ordering) const;
|
||||
VectorValues unretract(const LieValues& cp, const Ordering& ordering) const;
|
||||
|
||||
/** Get a delta config about a linearization point c0 (*this) */
|
||||
void logmap(const LieValues& cp, const Ordering& ordering, VectorValues& delta) const;
|
||||
void unretract(const LieValues& cp, const Ordering& ordering, VectorValues& delta) const;
|
||||
|
||||
// imperative methods:
|
||||
|
||||
|
|
|
|||
|
|
@ -573,7 +573,7 @@ public:
|
|||
Vector evaluateError(const X& x1, boost::optional<Matrix&> H1 = boost::none) const {
|
||||
const size_t p = X::Dim();
|
||||
if (H1) *H1 = eye(p);
|
||||
return value_.logmap(x1);
|
||||
return value_.unretract(x1);
|
||||
}
|
||||
|
||||
/** Print */
|
||||
|
|
@ -628,7 +628,7 @@ public:
|
|||
const size_t p = X::Dim();
|
||||
if (H1) *H1 = -eye(p);
|
||||
if (H2) *H2 = eye(p);
|
||||
return x1.logmap(x2);
|
||||
return x1.unretract(x2);
|
||||
}
|
||||
|
||||
private:
|
||||
|
|
|
|||
|
|
@ -120,7 +120,7 @@ namespace gtsam {
|
|||
size_t nj = feasible_.dim();
|
||||
if (allow_error_) {
|
||||
if (H) *H = eye(nj); // FIXME: this is not the right linearization for nonlinear compare
|
||||
return xj.logmap(feasible_);
|
||||
return xj.unretract(feasible_);
|
||||
} else if (compare_(feasible_,xj)) {
|
||||
if (H) *H = eye(nj);
|
||||
return zero(nj); // set error to zero if equal
|
||||
|
|
|
|||
|
|
@ -93,7 +93,7 @@ void NonlinearISAM<Values>::reorder_relinearize() {
|
|||
template<class Values>
|
||||
Values NonlinearISAM<Values>::estimate() const {
|
||||
if(isam_.size() > 0)
|
||||
return linPoint_.expmap(optimize(isam_), ordering_);
|
||||
return linPoint_.retract(optimize(isam_), ordering_);
|
||||
else
|
||||
return linPoint_;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -89,7 +89,7 @@ namespace gtsam {
|
|||
if (verbosity >= Parameters::DELTA) delta.print("delta");
|
||||
|
||||
// take old values and update it
|
||||
shared_values newValues(new C(values_->expmap(delta, *ordering_)));
|
||||
shared_values newValues(new C(values_->retract(delta, *ordering_)));
|
||||
|
||||
// maybe show output
|
||||
if (verbosity >= Parameters::VALUES) newValues->print("newValues");
|
||||
|
|
@ -179,7 +179,7 @@ namespace gtsam {
|
|||
if (verbosity >= Parameters::TRYDELTA) delta.print("delta");
|
||||
|
||||
// update values
|
||||
shared_values newValues(new T(values_->expmap(delta, *ordering_)));
|
||||
shared_values newValues(new T(values_->retract(delta, *ordering_)));
|
||||
|
||||
// create new optimization state with more adventurous lambda
|
||||
double error = graph_->error(*newValues);
|
||||
|
|
|
|||
|
|
@ -205,21 +205,21 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/** Expmap */
|
||||
TupleValues<VALUES1, VALUES2> expmap(const VectorValues& delta, const Ordering& ordering) const {
|
||||
return TupleValues(first_.expmap(delta, ordering), second_.expmap(delta, ordering));
|
||||
TupleValues<VALUES1, VALUES2> retract(const VectorValues& delta, const Ordering& ordering) const {
|
||||
return TupleValues(first_.retract(delta, ordering), second_.retract(delta, ordering));
|
||||
}
|
||||
|
||||
/** logmap each element */
|
||||
VectorValues logmap(const TupleValues<VALUES1, VALUES2>& cp, const Ordering& ordering) const {
|
||||
VectorValues unretract(const TupleValues<VALUES1, VALUES2>& cp, const Ordering& ordering) const {
|
||||
VectorValues delta(this->dims(ordering));
|
||||
logmap(cp, ordering, delta);
|
||||
unretract(cp, ordering, delta);
|
||||
return delta;
|
||||
}
|
||||
|
||||
/** logmap each element */
|
||||
void logmap(const TupleValues<VALUES1, VALUES2>& cp, const Ordering& ordering, VectorValues& delta) const {
|
||||
first_.logmap(cp.first_, ordering, delta);
|
||||
second_.logmap(cp.second_, ordering, delta);
|
||||
void unretract(const TupleValues<VALUES1, VALUES2>& cp, const Ordering& ordering, VectorValues& delta) const {
|
||||
first_.unretract(cp.first_, ordering, delta);
|
||||
second_.unretract(cp.second_, ordering, delta);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -318,18 +318,18 @@ namespace gtsam {
|
|||
|
||||
size_t dim() const { return first_.dim(); }
|
||||
|
||||
TupleValuesEnd<VALUES> expmap(const VectorValues& delta, const Ordering& ordering) const {
|
||||
return TupleValuesEnd(first_.expmap(delta, ordering));
|
||||
TupleValuesEnd<VALUES> retract(const VectorValues& delta, const Ordering& ordering) const {
|
||||
return TupleValuesEnd(first_.retract(delta, ordering));
|
||||
}
|
||||
|
||||
VectorValues logmap(const TupleValuesEnd<VALUES>& cp, const Ordering& ordering) const {
|
||||
VectorValues unretract(const TupleValuesEnd<VALUES>& cp, const Ordering& ordering) const {
|
||||
VectorValues delta(this->dims(ordering));
|
||||
logmap(cp, ordering, delta);
|
||||
unretract(cp, ordering, delta);
|
||||
return delta;
|
||||
}
|
||||
|
||||
void logmap(const TupleValuesEnd<VALUES>& cp, const Ordering& ordering, VectorValues& delta) const {
|
||||
first_.logmap(cp.first_, ordering, delta);
|
||||
void unretract(const TupleValuesEnd<VALUES>& cp, const Ordering& ordering, VectorValues& delta) const {
|
||||
first_.unretract(cp.first_, ordering, delta);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -137,7 +137,7 @@ TEST(LieValues, expmap_a)
|
|||
expected.insert(key1, Vector_(3, 2.0, 3.1, 4.2));
|
||||
expected.insert(key2, Vector_(3, 6.3, 7.4, 8.5));
|
||||
|
||||
CHECK(assert_equal(expected, config0.expmap(increment, ordering)));
|
||||
CHECK(assert_equal(expected, config0.retract(increment, ordering)));
|
||||
}
|
||||
|
||||
///* ************************************************************************* */
|
||||
|
|
@ -155,7 +155,7 @@ TEST(LieValues, expmap_a)
|
|||
// expected.insert(key1, Vector_(3, 1.0, 2.0, 3.0));
|
||||
// expected.insert(key2, Vector_(3, 6.3, 7.4, 8.5));
|
||||
//
|
||||
// CHECK(assert_equal(expected, config0.expmap(increment, ordering)));
|
||||
// CHECK(assert_equal(expected, config0.retract(increment, ordering)));
|
||||
//}
|
||||
|
||||
///* ************************************************************************* */
|
||||
|
|
@ -173,7 +173,7 @@ TEST(LieValues, expmap_a)
|
|||
// expected.insert(key1, Vector_(3, 2.0, 3.1, 4.2));
|
||||
// expected.insert(key2, Vector_(3, 6.3, 7.4, 8.5));
|
||||
//
|
||||
// CHECK(assert_equal(expected, config0.expmap(increment)));
|
||||
// CHECK(assert_equal(expected, config0.retract(increment)));
|
||||
//}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ namespace gtsam {
|
|||
boost::optional<Matrix&> H1 = boost::none,
|
||||
boost::optional<Matrix&> H2 = boost::none) const {
|
||||
X hx = x1.between(x2, H1, H2);
|
||||
return measured_.logmap(hx);
|
||||
return measured_.unretract(hx);
|
||||
}
|
||||
|
||||
inline const X measured() const {
|
||||
|
|
|
|||
|
|
@ -83,7 +83,7 @@ namespace gtsam {
|
|||
boost::none) const {
|
||||
T hx = p1.between(p2, H1, H2); // h(x)
|
||||
// manifold equivalent of h(x)-z -> log(z,h(x))
|
||||
return measured_.logmap(hx);
|
||||
return measured_.unretract(hx);
|
||||
}
|
||||
|
||||
/** return the measured */
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@ namespace gtsam {
|
|||
boost::optional<Matrix&> H1=boost::none,
|
||||
boost::optional<Matrix&> H2=boost::none) const {
|
||||
|
||||
Vector error = z_.logmap(camera.project(point,H1,H2));
|
||||
Vector error = z_.unretract(camera.project(point,H1,H2));
|
||||
// gtsam::print(error, "error");
|
||||
return error;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -118,16 +118,16 @@ namespace gtsam {
|
|||
/** vector of errors */
|
||||
Vector evaluateError(const T& p, boost::optional<Matrix&> H = boost::none) const {
|
||||
if (H) (*H) = zeros(this->dim(), p.dim());
|
||||
Vector full_logmap = T::Logmap(p);
|
||||
Vector masked_logmap = zero(this->dim());
|
||||
Vector full_unretraction = T::Unretract(p);
|
||||
Vector masked_unretraction = zero(this->dim());
|
||||
size_t masked_idx=0;
|
||||
for (size_t i=0;i<mask_.size();++i)
|
||||
if (mask_[i]) {
|
||||
masked_logmap(masked_idx) = full_logmap(i);
|
||||
masked_unretraction(masked_idx) = full_unretraction(i);
|
||||
if (H) (*H)(masked_idx, i) = 1.0;
|
||||
++masked_idx;
|
||||
}
|
||||
return masked_logmap - prior_;
|
||||
return masked_unretraction - prior_;
|
||||
}
|
||||
|
||||
// access
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ namespace gtsam {
|
|||
Vector evaluateError(const T& p, boost::optional<Matrix&> H = boost::none) const {
|
||||
if (H) (*H) = eye(p.dim());
|
||||
// manifold equivalent of h(x)-z -> log(z,h(x))
|
||||
return prior_.logmap(p);
|
||||
return prior_.unretract(p);
|
||||
}
|
||||
|
||||
private:
|
||||
|
|
|
|||
|
|
@ -130,7 +130,7 @@ pair<sharedPose2Graph, sharedPose2Values> load2D(const string& filename,
|
|||
}
|
||||
|
||||
if (addNoise)
|
||||
l1Xl2 = l1Xl2.expmap((*model)->sample());
|
||||
l1Xl2 = l1Xl2.retract((*model)->sample());
|
||||
|
||||
// Insert vertices if pure odometry file
|
||||
if (!poses->exists(id1)) poses->insert(id1, Pose2());
|
||||
|
|
|
|||
|
|
@ -69,7 +69,7 @@ namespace gtsam {
|
|||
/// Evaluate error and optionally derivative
|
||||
Vector evaluateError(const Pose2& x, boost::optional<Matrix&> H =
|
||||
boost::none) const {
|
||||
return z_.logmap(prior(x, H));
|
||||
return z_.unretract(prior(x, H));
|
||||
}
|
||||
|
||||
};
|
||||
|
|
@ -93,7 +93,7 @@ namespace gtsam {
|
|||
Vector evaluateError(const Pose2& x1, const Pose2& x2,
|
||||
boost::optional<Matrix&> H1 = boost::none,
|
||||
boost::optional<Matrix&> H2 = boost::none) const {
|
||||
return z_.logmap(odo(x1, x2, H1, H2));
|
||||
return z_.unretract(odo(x1, x2, H1, H2));
|
||||
}
|
||||
|
||||
};
|
||||
|
|
|
|||
|
|
@ -333,7 +333,7 @@ TEST( GeneralSFMFactor, optimize_varK_FixLandmarks ) {
|
|||
skew_noise, // s
|
||||
trans_noise, trans_noise // ux, uy
|
||||
) ;
|
||||
values->insert((int)i, X[i].expmap(delta)) ;
|
||||
values->insert((int)i, X[i].retract(delta)) ;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -327,7 +327,7 @@ TEST( GeneralSFMFactor, optimize_varK_FixLandmarks ) {
|
|||
trans_noise, trans_noise, trans_noise, // translation
|
||||
focal_noise, distort_noise, distort_noise // f, k1, k2
|
||||
) ;
|
||||
values->insert((int)i, X[i].expmap(delta)) ;
|
||||
values->insert((int)i, X[i].retract(delta)) ;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -146,8 +146,8 @@ TEST(Pose2Graph, optimizeThreePoses) {
|
|||
// Create initial config
|
||||
boost::shared_ptr<Pose2Values> initial(new Pose2Values());
|
||||
initial->insert(0, p0);
|
||||
initial->insert(1, hexagon[1].expmap(Vector_(3,-0.1, 0.1,-0.1)));
|
||||
initial->insert(2, hexagon[2].expmap(Vector_(3, 0.1,-0.1, 0.1)));
|
||||
initial->insert(1, hexagon[1].retract(Vector_(3,-0.1, 0.1,-0.1)));
|
||||
initial->insert(2, hexagon[2].retract(Vector_(3, 0.1,-0.1, 0.1)));
|
||||
|
||||
// Choose an ordering
|
||||
shared_ptr<Ordering> ordering(new Ordering);
|
||||
|
|
@ -186,11 +186,11 @@ TEST_UNSAFE(Pose2Graph, optimizeCircle) {
|
|||
// Create initial config
|
||||
boost::shared_ptr<Pose2Values> initial(new Pose2Values());
|
||||
initial->insert(0, p0);
|
||||
initial->insert(1, hexagon[1].expmap(Vector_(3,-0.1, 0.1,-0.1)));
|
||||
initial->insert(2, hexagon[2].expmap(Vector_(3, 0.1,-0.1, 0.1)));
|
||||
initial->insert(3, hexagon[3].expmap(Vector_(3,-0.1, 0.1,-0.1)));
|
||||
initial->insert(4, hexagon[4].expmap(Vector_(3, 0.1,-0.1, 0.1)));
|
||||
initial->insert(5, hexagon[5].expmap(Vector_(3,-0.1, 0.1,-0.1)));
|
||||
initial->insert(1, hexagon[1].retract(Vector_(3,-0.1, 0.1,-0.1)));
|
||||
initial->insert(2, hexagon[2].retract(Vector_(3, 0.1,-0.1, 0.1)));
|
||||
initial->insert(3, hexagon[3].retract(Vector_(3,-0.1, 0.1,-0.1)));
|
||||
initial->insert(4, hexagon[4].retract(Vector_(3, 0.1,-0.1, 0.1)));
|
||||
initial->insert(5, hexagon[5].retract(Vector_(3,-0.1, 0.1,-0.1)));
|
||||
|
||||
// Choose an ordering
|
||||
shared_ptr<Ordering> ordering(new Ordering);
|
||||
|
|
@ -327,7 +327,7 @@ TEST( Pose2Values, expmap )
|
|||
delta[ordering[Key(1)]] = Vector_(3, -0.1,0.0,0.0);
|
||||
delta[ordering[Key(2)]] = Vector_(3, 0.0,0.1,0.0);
|
||||
delta[ordering[Key(3)]] = Vector_(3, 0.1,0.0,0.0);
|
||||
Pose2Values actual = circle.expmap(delta, ordering);
|
||||
Pose2Values actual = circle.retract(delta, ordering);
|
||||
CHECK(assert_equal(expected,actual));
|
||||
}
|
||||
|
||||
|
|
@ -362,7 +362,7 @@ TEST( Pose2Prior, error )
|
|||
VectorValues addition(VectorValues::Zero(x0.dims(ordering)));
|
||||
addition[ordering["x1"]] = Vector_(3, 0.1, 0.0, 0.0);
|
||||
VectorValues plus = delta + addition;
|
||||
Pose2Values x1 = x0.expmap(plus, ordering);
|
||||
Pose2Values x1 = x0.retract(plus, ordering);
|
||||
Vector error_at_plus = Vector_(3,0.1/sx,0.0,0.0); // h(x)-z = 0.1 !
|
||||
CHECK(assert_equal(error_at_plus,factor.whitenedError(x1)));
|
||||
CHECK(assert_equal(error_at_plus,linear->error_vector(plus)));
|
||||
|
|
@ -428,7 +428,7 @@ TEST( Pose2Factor, error )
|
|||
// Check error after increasing p2
|
||||
VectorValues plus = delta;
|
||||
plus[ordering["x2"]] = Vector_(3, 0.1, 0.0, 0.0);
|
||||
Pose2Values x1 = x0.expmap(plus, ordering);
|
||||
Pose2Values x1 = x0.retract(plus, ordering);
|
||||
Vector error_at_plus = Vector_(3,0.1/sx,0.0,0.0); // h(x)-z = 0.1 !
|
||||
CHECK(assert_equal(error_at_plus,factor.whitenedError(x1)));
|
||||
CHECK(assert_equal(error_at_plus,linear->error_vector(plus)));
|
||||
|
|
|
|||
|
|
@ -69,11 +69,11 @@ TEST(Pose3Graph, optimizeCircle) {
|
|||
// Create initial config
|
||||
boost::shared_ptr<Pose3Values> initial(new Pose3Values());
|
||||
initial->insert(0, gT0);
|
||||
initial->insert(1, hexagon[1].expmap(Vector_(6,-0.1, 0.1,-0.1,-0.1, 0.1,-0.1)));
|
||||
initial->insert(2, hexagon[2].expmap(Vector_(6, 0.1,-0.1, 0.1, 0.1,-0.1, 0.1)));
|
||||
initial->insert(3, hexagon[3].expmap(Vector_(6,-0.1, 0.1,-0.1,-0.1, 0.1,-0.1)));
|
||||
initial->insert(4, hexagon[4].expmap(Vector_(6, 0.1,-0.1, 0.1, 0.1,-0.1, 0.1)));
|
||||
initial->insert(5, hexagon[5].expmap(Vector_(6,-0.1, 0.1,-0.1,-0.1, 0.1,-0.1)));
|
||||
initial->insert(1, hexagon[1].retract(Vector_(6,-0.1, 0.1,-0.1,-0.1, 0.1,-0.1)));
|
||||
initial->insert(2, hexagon[2].retract(Vector_(6, 0.1,-0.1, 0.1, 0.1,-0.1, 0.1)));
|
||||
initial->insert(3, hexagon[3].retract(Vector_(6,-0.1, 0.1,-0.1,-0.1, 0.1,-0.1)));
|
||||
initial->insert(4, hexagon[4].retract(Vector_(6, 0.1,-0.1, 0.1, 0.1,-0.1, 0.1)));
|
||||
initial->insert(5, hexagon[5].retract(Vector_(6,-0.1, 0.1,-0.1,-0.1, 0.1,-0.1)));
|
||||
|
||||
// Choose an ordering and optimize
|
||||
shared_ptr<Ordering> ordering(new Ordering);
|
||||
|
|
@ -140,9 +140,9 @@ TEST( Pose3Factor, error )
|
|||
x.insert(1,t1);
|
||||
x.insert(2,t2);
|
||||
|
||||
// Get error h(x)-z -> logmap(z,h(x)) = logmap(z,between(t1,t2))
|
||||
// Get error h(x)-z -> unretract(z,h(x)) = unretract(z,between(t1,t2))
|
||||
Vector actual = factor.unwhitenedError(x);
|
||||
Vector expected = z.logmap(t1.between(t2));
|
||||
Vector expected = z.unretract(t1.between(t2));
|
||||
CHECK(assert_equal(expected,actual));
|
||||
}
|
||||
|
||||
|
|
@ -223,7 +223,7 @@ TEST( Pose3Values, expmap )
|
|||
0.0,0.0,0.0, 0.1, 0.0, 0.0,
|
||||
0.0,0.0,0.0, 0.1, 0.0, 0.0,
|
||||
0.0,0.0,0.0, 0.1, 0.0, 0.0);
|
||||
Pose3Values actual = pose3SLAM::circle(4,1.0).expmap(delta, ordering);
|
||||
Pose3Values actual = pose3SLAM::circle(4,1.0).retract(delta, ordering);
|
||||
CHECK(assert_equal(expected,actual));
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -87,7 +87,7 @@ TEST( ProjectionFactor, error )
|
|||
VectorValues delta(expected_config.dims(ordering));
|
||||
delta[ordering["x1"]] = Vector_(6, 0.,0.,0., 1.,1.,1.);
|
||||
delta[ordering["l1"]] = Vector_(3, 1.,2.,3.);
|
||||
Values actual_config = config.expmap(delta, ordering);
|
||||
Values actual_config = config.retract(delta, ordering);
|
||||
CHECK(assert_equal(expected_config,actual_config,1e-9));
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -210,7 +210,7 @@ TEST( Values, update_with_large_delta) {
|
|||
delta[largeOrdering["x1"]] = Vector_(6, 0.0, 0.0, 0.0, 0.1, 0.1, 0.1);
|
||||
delta[largeOrdering["l1"]] = Vector_(3, 0.1, 0.1, 0.1);
|
||||
delta[largeOrdering["x2"]] = Vector_(6, 0.0, 0.0, 0.0, 100.1, 4.1, 9.1);
|
||||
Values actual = init.expmap(delta, largeOrdering);
|
||||
Values actual = init.retract(delta, largeOrdering);
|
||||
|
||||
CHECK(assert_equal(expected,actual));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -232,7 +232,7 @@ public:
|
|||
}
|
||||
|
||||
// Return the error between the prediction and the supplied value of p2
|
||||
return prediction.logmap(p2);
|
||||
return prediction.unretract(p2);
|
||||
}
|
||||
|
||||
};
|
||||
|
|
|
|||
|
|
@ -186,7 +186,7 @@ bool isam_check(const planarSLAM::Graph& fullgraph, const planarSLAM::Values& fu
|
|||
GaussianBayesNet gbn = *GaussianSequentialSolver(linearized).eliminate();
|
||||
// gbn.print("Expected bayesnet: ");
|
||||
VectorValues delta = optimize(gbn);
|
||||
planarSLAM::Values expected = fullinit.expmap(delta, ordering);
|
||||
planarSLAM::Values expected = fullinit.retract(delta, ordering);
|
||||
|
||||
return assert_equal(expected, actual);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -179,11 +179,11 @@ TEST(GaussianJunctionTree, slamlike) {
|
|||
|
||||
GaussianJunctionTree gjt(linearized);
|
||||
VectorValues deltaactual = gjt.optimize(&EliminateQR);
|
||||
planarSLAM::Values actual = init.expmap(deltaactual, ordering);
|
||||
planarSLAM::Values actual = init.retract(deltaactual, ordering);
|
||||
|
||||
GaussianBayesNet gbn = *GaussianSequentialSolver(linearized).eliminate();
|
||||
VectorValues delta = optimize(gbn);
|
||||
planarSLAM::Values expected = init.expmap(delta, ordering);
|
||||
planarSLAM::Values expected = init.retract(delta, ordering);
|
||||
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ TEST(testNonlinearISAM, markov_chain ) {
|
|||
}
|
||||
|
||||
cur_pose = cur_pose.compose(z);
|
||||
new_init.insert(key2, cur_pose.expmap(sampler.sample()));
|
||||
new_init.insert(key2, cur_pose.retract(sampler.sample()));
|
||||
expected.insert(key2, cur_pose);
|
||||
isam.update(new_factors, new_init);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -193,17 +193,17 @@ TEST(TupleValues, zero_expmap_logmap)
|
|||
delta[o["l2"]] = Vector_(2, 1.3, 1.4);
|
||||
|
||||
Values expected;
|
||||
expected.insert(PoseKey(1), x1.expmap(Vector_(3, 1.0, 1.1, 1.2)));
|
||||
expected.insert(PoseKey(2), x2.expmap(Vector_(3, 1.3, 1.4, 1.5)));
|
||||
expected.insert(PoseKey(1), x1.retract(Vector_(3, 1.0, 1.1, 1.2)));
|
||||
expected.insert(PoseKey(2), x2.retract(Vector_(3, 1.3, 1.4, 1.5)));
|
||||
expected.insert(PointKey(1), Point2(5.0, 6.1));
|
||||
expected.insert(PointKey(2), Point2(10.3, 11.4));
|
||||
|
||||
Values actual = values1.expmap(delta, o);
|
||||
Values actual = values1.retract(delta, o);
|
||||
CHECK(assert_equal(expected, actual));
|
||||
|
||||
// Check log
|
||||
VectorValues expected_log = delta;
|
||||
VectorValues actual_log = values1.logmap(actual, o);
|
||||
VectorValues actual_log = values1.unretract(actual, o);
|
||||
CHECK(assert_equal(expected_log, actual_log));
|
||||
}
|
||||
|
||||
|
|
@ -454,13 +454,13 @@ TEST(TupleValues, expmap)
|
|||
delta[o["l2"]] = Vector_(2, 1.3, 1.4);
|
||||
|
||||
ValuesA expected;
|
||||
expected.insert(x1k, x1.expmap(Vector_(3, 1.0, 1.1, 1.2)));
|
||||
expected.insert(x2k, x2.expmap(Vector_(3, 1.3, 1.4, 1.5)));
|
||||
expected.insert(x1k, x1.retract(Vector_(3, 1.0, 1.1, 1.2)));
|
||||
expected.insert(x2k, x2.retract(Vector_(3, 1.3, 1.4, 1.5)));
|
||||
expected.insert(l1k, Point2(5.0, 6.1));
|
||||
expected.insert(l2k, Point2(10.3, 11.4));
|
||||
|
||||
CHECK(assert_equal(expected, values1.expmap(delta, o)));
|
||||
CHECK(assert_equal(delta, values1.logmap(expected, o)));
|
||||
CHECK(assert_equal(expected, values1.retract(delta, o)));
|
||||
CHECK(assert_equal(delta, values1.unretract(expected, o)));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
@ -485,13 +485,13 @@ TEST(TupleValues, expmap_typedefs)
|
|||
delta[o["l1"]] = Vector_(2, 1.0, 1.1);
|
||||
delta[o["l2"]] = Vector_(2, 1.3, 1.4);
|
||||
|
||||
expected.insert(x1k, x1.expmap(Vector_(3, 1.0, 1.1, 1.2)));
|
||||
expected.insert(x2k, x2.expmap(Vector_(3, 1.3, 1.4, 1.5)));
|
||||
expected.insert(x1k, x1.retract(Vector_(3, 1.0, 1.1, 1.2)));
|
||||
expected.insert(x2k, x2.retract(Vector_(3, 1.3, 1.4, 1.5)));
|
||||
expected.insert(l1k, Point2(5.0, 6.1));
|
||||
expected.insert(l2k, Point2(10.3, 11.4));
|
||||
|
||||
CHECK(assert_equal(expected, TupleValues2<PoseValues, PointValues>(values1.expmap(delta, o))));
|
||||
//CHECK(assert_equal(delta, values1.logmap(expected)));
|
||||
CHECK(assert_equal(expected, TupleValues2<PoseValues, PointValues>(values1.retract(delta, o))));
|
||||
//CHECK(assert_equal(delta, values1.unretract(expected)));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
Loading…
Reference in New Issue