Merged in feature/RevertConstraintNoiseHack (pull request #21)

release/4.3a0
thduynguyen 2014-10-17 17:29:14 -04:00
commit b4763ea83a
5 changed files with 16 additions and 32 deletions

View File

@ -543,8 +543,7 @@ Matrix collect(size_t nrMatrices, ...)
void vector_scale_inplace(const Vector& v, Matrix& A, bool inf_mask) {
const DenseIndex m = A.rows();
if (inf_mask) {
// only scale the first v.size() rows of A to support augmented Matrix
for (DenseIndex i=0; i<v.size(); ++i) {
for (DenseIndex i=0; i<m; ++i) {
const double& vi = v(i);
if (std::isfinite(vi))
A.row(i) *= vi;

View File

@ -398,7 +398,6 @@ GTSAM_EXPORT Matrix collect(size_t nrMatrices, ...);
* Arguments (Matrix, Vector) scales the columns,
* (Vector, Matrix) scales the rows
* @param inf_mask when true, will not scale with a NaN or inf value.
* The inplace version also allows v.size()<A.rows() and only scales the first v.size() rows of A.
*/
GTSAM_EXPORT void vector_scale_inplace(const Vector& v, Matrix& A, bool inf_mask = false); // row
GTSAM_EXPORT Matrix vector_scale(const Vector& v, const Matrix& A, bool inf_mask = false); // row

View File

@ -339,12 +339,12 @@ Vector Constrained::whiten(const Vector& v) const {
// a hard constraint, we don't do anything.
const Vector& a = v;
const Vector& b = sigmas_;
// Now allows for whiten augmented vector with a new additional part coming
// from the Lagrange multiplier. So a.size() >= b.size()
Vector c = a;
for( DenseIndex i = 0; i < b.size(); i++ ) {
size_t n = a.size();
assert (b.size()==a.size());
Vector c(n);
for( size_t i = 0; i < n; i++ ) {
const double& ai = a(i), &bi = b(i);
if (bi!=0) c(i) = ai/bi;
c(i) = (bi==0.0) ? ai : ai/bi; // NOTE: not ediv_()
}
return c;
}
@ -362,11 +362,7 @@ double Constrained::distance(const Vector& v) const {
/* ************************************************************************* */
Matrix Constrained::Whiten(const Matrix& H) const {
// selective scaling
// Now allow augmented Matrix with a new additional part coming
// from the Lagrange multiplier.
Matrix M(H.block(0, 0, dim(), H.cols()));
Constrained::WhitenInPlace(M);
return M;
return vector_scale(invsigmas(), H, true);
}
/* ************************************************************************* */
@ -375,8 +371,6 @@ void Constrained::WhitenInPlace(Matrix& H) const {
// Scale row i of H by sigmas[i], basically multiplying H with diag(sigmas)
// Set inf_mask flag is true so that if invsigmas[i] is inf, i.e. sigmas[i] = 0,
// indicating a hard constraint, we leave H's row i in place.
// Now allow augmented Matrix with a new additional part coming
// from the Lagrange multiplier.
// Inlined: vector_scale_inplace(invsigmas(), H, true);
// vector_scale_inplace(v, A, true);
for (DenseIndex i=0; i<(DenseIndex)dim_; ++i) {
@ -399,14 +393,12 @@ void Constrained::WhitenInPlace(Eigen::Block<Matrix> H) const {
}
/* ************************************************************************* */
Constrained::shared_ptr Constrained::unit(size_t augmentedDim) const {
Vector sigmas = ones(dim()+augmentedDim);
Constrained::shared_ptr Constrained::unit() const {
Vector sigmas = ones(dim());
for (size_t i=0; i<dim(); ++i)
if (this->sigmas_(i) == 0.0)
sigmas(i) = 0.0;
Vector augmentedMu = zero(dim()+augmentedDim);
subInsert(augmentedMu, mu_, 0);
return MixedSigmas(augmentedMu, sigmas);
return MixedSigmas(mu_, sigmas);
}
/* ************************************************************************* */

View File

@ -481,9 +481,8 @@ namespace gtsam {
/**
* Returns a Unit version of a constrained noisemodel in which
* constrained sigmas remain constrained and the rest are unit scaled
* Now support augmented part from the Lagrange multiplier.
*/
shared_ptr unit(size_t augmentedDim = 0) const;
shared_ptr unit() const;
private:
/** Serialization function */
@ -732,7 +731,7 @@ namespace gtsam {
};
/// Cauchy implements the "Cauchy" robust error model (Lee2013IROS). Contributed by:
/// Dipl.-Inform. Jan Oberl<EFBFBD>nder (M.Sc.), FZI Research Center for
/// Dipl.-Inform. Jan Oberlaender (M.Sc.), FZI Research Center for
/// Information Technology, Karlsruhe, Germany.
/// oberlaender@fzi.de
/// Thanks Jan!

View File

@ -319,18 +319,13 @@ public:
terms[j].second.swap(A[j]);
}
// TODO pass unwhitened + noise model to Gaussian factor
// For now, only linearized constrained factors have noise model at linear level!!!
if(noiseModel_)
{
// TODO pass unwhitened + noise model to Gaussian factor
noiseModel::Constrained::shared_ptr constrained =
boost::dynamic_pointer_cast<noiseModel::Constrained>(this->noiseModel_);
if(constrained) {
size_t augmentedDim = terms[0].second.rows() - constrained->dim();
Vector augmentedZero = zero(augmentedDim);
Vector augmentedb = concatVectors(2, &b, &augmentedZero);
return GaussianFactor::shared_ptr(new JacobianFactor(terms, augmentedb, constrained->unit(augmentedDim)));
}
boost::dynamic_pointer_cast<noiseModel::Constrained>(this->noiseModel_);
if(constrained)
return GaussianFactor::shared_ptr(new JacobianFactor(terms, b, constrained->unit()));
else
return GaussianFactor::shared_ptr(new JacobianFactor(terms, b));
}