From 4123053c8164d236fb6a25a182abd5a34814ed25 Mon Sep 17 00:00:00 2001 From: Luca Date: Tue, 8 Apr 2014 18:55:42 -0400 Subject: [PATCH 01/25] progress on grouping - still seg faults --- gtsam_unstable/slam/SmartFactorBase.h | 42 ++++++++++++++++++--------- 1 file changed, 29 insertions(+), 13 deletions(-) diff --git a/gtsam_unstable/slam/SmartFactorBase.h b/gtsam_unstable/slam/SmartFactorBase.h index ebef221bb..973341477 100644 --- a/gtsam_unstable/slam/SmartFactorBase.h +++ b/gtsam_unstable/slam/SmartFactorBase.h @@ -417,7 +417,7 @@ public: } // **************************************************************************************************** - boost::shared_ptr > updateAugmentedHessian( + void updateAugmentedHessian( const Cameras& cameras, const Point3& point, const double lambda, bool diagonalDamping, SymmetricBlockMatrix& augmentedHessian) const { @@ -435,8 +435,8 @@ public: dims.back() = 1; updateSparseSchurComplement(Fblocks, E, PointCov, b, augmentedHessian); // augmentedHessian.matrix().block (i1,i2) = ... - std::cout << "f "<< f <keys_.size(); // cameras observing current point - size_t aug_numKeys = augmentedHessian.rows() - 1; // all cameras in the group + size_t aug_numKeys = (augmentedHessian.rows() - 1)/D; // all cameras in the group + +// MatrixDD delta = eye(D); +// size_t n1 = numKeys+1; +// for (size_t i1=0; i1 < n1-1; i1++){ +// MatrixDD Z1 = augmentedHessian(i1,i1).selfadjointView(); +// std::cout << i1 << " " << "\n" << Z1 << std::endl; +// augmentedHessian(i1,i1) = Z1 + delta; +// MatrixDD Z2 = augmentedHessian(i1,i1).selfadjointView(); +// std::cout << i1 << " " << "\n" << Z2 << std::endl; +//// for (size_t i2=i1+1; i2 < n1-1; i2++){ +//// Z = augmentedHessian(i1,i2).knownOffDiagonal(); // + delta; +//// std::cout << i1 << " " << i2 << "\n" << Z << std::endl; +//// } +// } + + MatrixDD matrixBlock; + VectorD vectorBlock; // Blockwise Schur complement for (size_t i1 = 0; i1 < numKeys; i1++) { // for each camera @@ -497,27 +514,26 @@ public: // D = (Dx2) * (2) // (augmentedHessian.matrix()).block (i1,numKeys+1) = Fi1.transpose() * b.segment < 2 > (2 * i1); // F' * b size_t aug_i1 = this->keys_[i1]; - std::cout << "i1 "<< i1 < (2 * i1) // F' * b - Fi1.transpose() * (Ei1_P * (E.transpose() * b)); // D = (Dx2) * (2x3) * (3*2m) * (2m x 1) // (DxD) = (Dx2) * ( (2xD) - (2x3) * (3x2) * (2xD) ) - std::cout << "filled 1 " <(2 * i1, 0).transpose() * Fi1); // upper triangular part of the hessian for (size_t i2 = i1+1; i2 < numKeys; i2++) { // for each camera const Matrix2D& Fi2 = Fblocks.at(i2).second; size_t aug_i2 = this->keys_[i2]; - std::cout << "i2 "<< i2 <(2 * i2, 0).transpose() * Fi2); } } // end of for over cameras From d8fafce2245370697b822bae490031371efbde47 Mon Sep 17 00:00:00 2001 From: Luca Date: Wed, 9 Apr 2014 21:00:38 -0400 Subject: [PATCH 02/25] minor changes to cout when verbosity is TRYLAMBDA --- gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index 6be69e710..0bf06f21f 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -256,6 +256,9 @@ void LevenbergMarquardtOptimizer::iterate() { double newlinearizedError = linear->error(delta); double linearizedCostChange = state_.error - newlinearizedError; + if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) + cout << "newlinearizedError = " << newlinearizedError << + "linearizedCostChange = " << linearizedCostChange << endl; if (linearizedCostChange >= 0) { // step is valid // update values @@ -266,10 +269,14 @@ void LevenbergMarquardtOptimizer::iterate() { // compute new error gttic(compute_error); if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) - cout << "calculating error" << endl; + cout << "calculating error:" << endl; newError = graph_.error(newValues); gttoc(compute_error); + if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) + cout << "old error (" << state_.error + << ") new (tentative) error (" << newError << ")" << endl; + // cost change in the original, nonlinear system (old - new) double costChange = state_.error - newError; @@ -297,8 +304,7 @@ void LevenbergMarquardtOptimizer::iterate() { break; } else if (!stopSearchingLambda) { // we failed to solved the system or we had no decrease in cost if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) - cout << "increasing lambda: old error (" << state_.error - << ") new error (" << newError << ")" << endl; + cout << "increasing lambda" << endl; increaseLambda(); // check if lambda is too big @@ -310,6 +316,8 @@ void LevenbergMarquardtOptimizer::iterate() { break; } } else { // the change in the cost is very small and it is not worth trying bigger lambdas + if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) + cout << "Levenberg-Marquardt: stopping as relative cost reduction is small" << endl; break; } } // end while From dc7b5d58c025c0f089b1d7ef93ae76dc9419d9c1 Mon Sep 17 00:00:00 2001 From: Luca Date: Wed, 9 Apr 2014 21:01:03 -0400 Subject: [PATCH 03/25] implemented efficient update of Hessian matrix via Schur complement --- gtsam_unstable/slam/SmartFactorBase.h | 266 ++++++++++++++------------ 1 file changed, 147 insertions(+), 119 deletions(-) diff --git a/gtsam_unstable/slam/SmartFactorBase.h b/gtsam_unstable/slam/SmartFactorBase.h index 973341477..49242d5ac 100644 --- a/gtsam_unstable/slam/SmartFactorBase.h +++ b/gtsam_unstable/slam/SmartFactorBase.h @@ -192,7 +192,7 @@ public: b[2 * i + 1] = e.y(); } catch (CheiralityException& e) { std::cout << "Cheirality exception " << std::endl; - exit (EXIT_FAILURE); + exit(EXIT_FAILURE); } i += 1; } @@ -219,10 +219,10 @@ public: try { Point2 reprojectionError(camera.project(point) - zi); overallError += 0.5 - * this->noise_.at(i)->distance(reprojectionError.vector()); + * this->noise_.at(i)->distance(reprojectionError.vector()); } catch (CheiralityException& e) { std::cout << "Cheirality exception " << std::endl; - exit (EXIT_FAILURE); + exit(EXIT_FAILURE); } i += 1; } @@ -244,7 +244,7 @@ public: cameras[i].project(point, boost::none, Ei); } catch (CheiralityException& e) { std::cout << "Cheirality exception " << std::endl; - exit (EXIT_FAILURE); + exit(EXIT_FAILURE); } this->noise_.at(i)->WhitenSystem(Ei, b); E.block<2, 3>(2 * i, 0) = Ei; @@ -274,7 +274,7 @@ public: -(cameras[i].project(point, Fi, Ei, Hcali) - this->measured_.at(i)).vector(); } catch (CheiralityException& e) { std::cout << "Cheirality exception " << std::endl; - exit (EXIT_FAILURE); + exit(EXIT_FAILURE); } this->noise_.at(i)->WhitenSystem(Fi, Ei, Hcali, bi); @@ -322,7 +322,7 @@ public: const double lambda = 0.0) const { int numKeys = this->keys_.size(); - std::vector < KeyMatrix2D > Fblocks; + std::vector Fblocks; double f = computeJacobians(Fblocks, E, PointCov, b, cameras, point, lambda); F = zeros(2 * numKeys, D * numKeys); @@ -345,7 +345,7 @@ public: diagonalDamping); // diagonalDamping should have no effect (only on PointCov) // Do SVD on A - Eigen::JacobiSVD < Matrix > svd(E, Eigen::ComputeFullU); + Eigen::JacobiSVD svd(E, Eigen::ComputeFullU); Vector s = svd.singularValues(); // Enull = zeros(2 * numKeys, 2 * numKeys - 3); int numKeys = this->keys_.size(); @@ -361,7 +361,7 @@ public: const Cameras& cameras, const Point3& point) const { int numKeys = this->keys_.size(); - std::vector < KeyMatrix2D > Fblocks; + std::vector Fblocks; double f = computeJacobiansSVD(Fblocks, Enull, b, cameras, point); F.resize(2 * numKeys, D * numKeys); F.setZero(); @@ -380,14 +380,14 @@ public: int numKeys = this->keys_.size(); - std::vector < KeyMatrix2D > Fblocks; + std::vector Fblocks; Matrix E; Matrix3 PointCov; Vector b; double f = computeJacobians(Fblocks, E, PointCov, b, cameras, point, lambda, diagonalDamping); -//#define HESSIAN_BLOCKS +//#define HESSIAN_BLOCKS // slower, as internally the Hessian factor will transform the blocks into SymmetricBlockMatrix #ifdef HESSIAN_BLOCKS // Create structures for Hessian Factors std::vector < Matrix > Gs(numKeys * (numKeys + 1) / 2); @@ -400,46 +400,23 @@ public: //std::vector < Vector > gs2(gs.begin(), gs.end()); return boost::make_shared < RegularHessianFactor - > (this->keys_, Gs, gs, f); -#else + > (this->keys_, Gs, gs, f); +#else // we create directly a SymmetricBlockMatrix size_t n1 = D * numKeys + 1; std::vector dims(numKeys + 1); // this also includes the b term std::fill(dims.begin(), dims.end() - 1, D); dims.back() = 1; - SymmetricBlockMatrix augmentedHessian(dims, Matrix::Zero(n1, n1)); // for 10 cameras, size should be (10*D+1 x 10*D+1) + SymmetricBlockMatrix augmentedHessian(dims, Matrix::Zero(n1, n1)); // for 10 cameras, size should be (10*D+1 x 10*D+1) sparseSchurComplement(Fblocks, E, PointCov, b, augmentedHessian); // augmentedHessian.matrix().block (i1,i2) = ... - augmentedHessian(numKeys,numKeys)(0,0) = f; - return boost::make_shared >( - this->keys_, augmentedHessian); - + augmentedHessian(numKeys, numKeys)(0, 0) = f; + return boost::make_shared >(this->keys_, + augmentedHessian); #endif } // **************************************************************************************************** - void updateAugmentedHessian( - const Cameras& cameras, const Point3& point, const double lambda, - bool diagonalDamping, SymmetricBlockMatrix& augmentedHessian) const { - - int numKeys = this->keys_.size(); - - std::vector < KeyMatrix2D > Fblocks; - Matrix E; - Matrix3 PointCov; - Vector b; - double f = computeJacobians(Fblocks, E, PointCov, b, cameras, point, lambda, - diagonalDamping); - - std::vector dims(numKeys + 1); // this also includes the b term - std::fill(dims.begin(), dims.end() - 1, D); - dims.back() = 1; - - updateSparseSchurComplement(Fblocks, E, PointCov, b, augmentedHessian); // augmentedHessian.matrix().block (i1,i2) = ... - // std::cout << "f "<< f <& Fblocks, const Matrix& E, const Matrix& PointCov, const Vector& b, /*output ->*/std::vector& Gs, std::vector& gs) const { @@ -466,7 +443,7 @@ public: int GsCount2 = 0; for (DenseIndex i1 = 0; i1 < numKeys; i1++) { // for each camera DenseIndex i1D = i1 * D; - gs.at(i1) = gs_vector.segment < D > (i1D); + gs.at(i1) = gs_vector.segment(i1D); for (DenseIndex i2 = 0; i2 < numKeys; i2++) { if (i2 >= i1) { Gs.at(GsCount2) = H.block(i1D, i2 * D); @@ -476,69 +453,6 @@ public: } } - // **************************************************************************************************** - void updateSparseSchurComplement(const std::vector& Fblocks, - const Matrix& E, const Matrix& P /*Point Covariance*/, const Vector& b, - /*output ->*/SymmetricBlockMatrix& augmentedHessian) const { - // Schur complement trick - // Gs = F' * F - F' * E * P * E' * F - // gs = F' * (b - E * P * E' * b) - - // a single point is observed in numKeys cameras - size_t numKeys = this->keys_.size(); // cameras observing current point - size_t aug_numKeys = (augmentedHessian.rows() - 1)/D; // all cameras in the group - -// MatrixDD delta = eye(D); -// size_t n1 = numKeys+1; -// for (size_t i1=0; i1 < n1-1; i1++){ -// MatrixDD Z1 = augmentedHessian(i1,i1).selfadjointView(); -// std::cout << i1 << " " << "\n" << Z1 << std::endl; -// augmentedHessian(i1,i1) = Z1 + delta; -// MatrixDD Z2 = augmentedHessian(i1,i1).selfadjointView(); -// std::cout << i1 << " " << "\n" << Z2 << std::endl; -//// for (size_t i2=i1+1; i2 < n1-1; i2++){ -//// Z = augmentedHessian(i1,i2).knownOffDiagonal(); // + delta; -//// std::cout << i1 << " " << i2 << "\n" << Z << std::endl; -//// } -// } - - MatrixDD matrixBlock; - VectorD vectorBlock; - - // Blockwise Schur complement - for (size_t i1 = 0; i1 < numKeys; i1++) { // for each camera - - const Matrix2D& Fi1 = Fblocks.at(i1).second; - const Matrix23 Ei1_P = E.block<2, 3>(2 * i1, 0) * P; - - // D = (Dx2) * (2) - // (augmentedHessian.matrix()).block (i1,numKeys+1) = Fi1.transpose() * b.segment < 2 > (2 * i1); // F' * b - size_t aug_i1 = this->keys_[i1]; - - // augmentedHessian(aug_i1,aug_numKeys) - vectorBlock = augmentedHessian(aug_i1,aug_numKeys).knownOffDiagonal(); - augmentedHessian(aug_i1,aug_numKeys) = vectorBlock + - Fi1.transpose() * b.segment < 2 > (2 * i1) // F' * b - - Fi1.transpose() * (Ei1_P * (E.transpose() * b)); // D = (Dx2) * (2x3) * (3*2m) * (2m x 1) - - // (DxD) = (Dx2) * ( (2xD) - (2x3) * (3x2) * (2xD) ) - matrixBlock = augmentedHessian(aug_i1,aug_i1).selfadjointView(); - augmentedHessian(aug_i1,aug_i1) = matrixBlock+ - Fi1.transpose() * (Fi1 - Ei1_P * E.block<2, 3>(2 * i1, 0).transpose() * Fi1); - - // upper triangular part of the hessian - for (size_t i2 = i1+1; i2 < numKeys; i2++) { // for each camera - const Matrix2D& Fi2 = Fblocks.at(i2).second; - size_t aug_i2 = this->keys_[i2]; - - // (DxD) = (Dx2) * ( (2x2) * (2xD) ) - matrixBlock = augmentedHessian(aug_i1, aug_i2).knownOffDiagonal(); - augmentedHessian(aug_i1, aug_i2) = matrixBlock - - Fi1.transpose() * (Ei1_P * E.block<2, 3>(2 * i2, 0).transpose() * Fi2); - } - } // end of for over cameras - } - // **************************************************************************************************** void sparseSchurComplement(const std::vector& Fblocks, const Matrix& E, const Matrix& P /*Point Covariance*/, const Vector& b, @@ -558,20 +472,20 @@ public: // D = (Dx2) * (2) // (augmentedHessian.matrix()).block (i1,numKeys+1) = Fi1.transpose() * b.segment < 2 > (2 * i1); // F' * b - augmentedHessian(i1,numKeys) = Fi1.transpose() * b.segment < 2 > (2 * i1) // F' * b - - Fi1.transpose() * (Ei1_P * (E.transpose() * b)); // D = (Dx2) * (2x3) * (3*2m) * (2m x 1) + augmentedHessian(i1, numKeys) = Fi1.transpose() * b.segment<2>(2 * i1) // F' * b + - Fi1.transpose() * (Ei1_P * (E.transpose() * b)); // D = (Dx2) * (2x3) * (3*2m) * (2m x 1) // (DxD) = (Dx2) * ( (2xD) - (2x3) * (3x2) * (2xD) ) - augmentedHessian(i1,i1) = - Fi1.transpose() * (Fi1 - Ei1_P * E.block<2, 3>(2 * i1, 0).transpose() * Fi1); + augmentedHessian(i1, i1) = Fi1.transpose() + * (Fi1 - Ei1_P * E.block<2, 3>(2 * i1, 0).transpose() * Fi1); // upper triangular part of the hessian - for (size_t i2 = i1+1; i2 < numKeys; i2++) { // for each camera + for (size_t i2 = i1 + 1; i2 < numKeys; i2++) { // for each camera const Matrix2D& Fi2 = Fblocks.at(i2).second; // (DxD) = (Dx2) * ( (2x2) * (2xD) ) - augmentedHessian(i1,i2) = - -Fi1.transpose() * (Ei1_P * E.block<2, 3>(2 * i2, 0).transpose() * Fi2); + augmentedHessian(i1, i2) = -Fi1.transpose() + * (Ei1_P * E.block<2, 3>(2 * i2, 0).transpose() * Fi2); } } // end of for over cameras } @@ -602,24 +516,138 @@ public: { // for i1 = i2 // D = (Dx2) * (2) - gs.at(i1) = Fi1.transpose() * b.segment < 2 > (2 * i1); // F' * b - // D = (Dx2) * (2x3) * (3*2m) * (2m x 1) - gs.at(i1) -= Fi1.transpose() * (Ei1_P * (E.transpose() * b)); + gs.at(i1) = Fi1.transpose() * b.segment<2>(2 * i1) // F' * b + -Fi1.transpose() * (Ei1_P * (E.transpose() * b)); // D = (Dx2) * (2x3) * (3*2m) * (2m x 1) // (DxD) = (Dx2) * ( (2xD) - (2x3) * (3x2) * (2xD) ) - Gs.at(GsIndex) = Fi1.transpose() * (Fi1 - Ei1_P * E.block<2, 3>(2 * i1, 0).transpose() * Fi1); + Gs.at(GsIndex) = Fi1.transpose() + * (Fi1 - Ei1_P * E.block<2, 3>(2 * i1, 0).transpose() * Fi1); GsIndex++; } // upper triangular part of the hessian - for (size_t i2 = i1+1; i2 < numKeys; i2++) { // for each camera + for (size_t i2 = i1 + 1; i2 < numKeys; i2++) { // for each camera const Matrix2D& Fi2 = Fblocks.at(i2).second; // (DxD) = (Dx2) * ( (2x2) * (2xD) ) - Gs.at(GsIndex) = -Fi1.transpose() * (Ei1_P * E.block<2, 3>(2 * i2, 0).transpose() * Fi2); + Gs.at(GsIndex) = -Fi1.transpose() + * (Ei1_P * E.block<2, 3>(2 * i2, 0).transpose() * Fi2); GsIndex++; } } // end of for over cameras } + + // **************************************************************************************************** + void updateAugmentedHessian(const Cameras& cameras, const Point3& point, + const double lambda, bool diagonalDamping, + SymmetricBlockMatrix& augmentedHessian, + const FastVector allKeys) const { + + // int numKeys = this->keys_.size(); + + std::vector Fblocks; + Matrix E; + Matrix3 PointCov; + Vector b; + double f = computeJacobians(Fblocks, E, PointCov, b, cameras, point, lambda, + diagonalDamping); + + updateSparseSchurComplement(Fblocks, E, PointCov, b, f, allKeys, augmentedHessian); // augmentedHessian.matrix().block (i1,i2) = ... + } + + // **************************************************************************************************** + void updateSparseSchurComplement(const std::vector& Fblocks, + const Matrix& E, const Matrix& P /*Point Covariance*/, const Vector& b, + const double f, const FastVector allKeys, + /*output ->*/SymmetricBlockMatrix& augmentedHessian) const { + // Schur complement trick + // Gs = F' * F - F' * E * P * E' * F + // gs = F' * (b - E * P * E' * b) + MatrixDD matrixBlock; + VectorD vectorBlock; + + FastMap KeySlotMap; + for (size_t slot=0; slot < allKeys.size(); slot++) + KeySlotMap.insert(std::make_pair(allKeys[slot],slot)); + + bool debug= false; + + // a single point is observed in numKeys cameras + size_t numKeys = this->keys_.size(); // cameras observing current point + size_t aug_numKeys = (augmentedHessian.rows() - 1) / D; // all cameras in the group + + // Blockwise Schur complement + for (size_t i1 = 0; i1 < numKeys; i1++) { // for each camera in the current factor + + const Matrix2D& Fi1 = Fblocks.at(i1).second; + const Matrix23 Ei1_P = E.block<2, 3>(2 * i1, 0) * P; + + // D = (Dx2) * (2) + // allKeys are the list of all camera keys in the group, e.g, (1,3,4,5,7) + // we should map those to a slot in the local (grouped) hessian (0,1,2,3,4) + Key cameraKey_i1 = this->keys_[i1]; + size_t aug_i1 = KeySlotMap[cameraKey_i1]; + + // information vector - store previous vector + vectorBlock = augmentedHessian(aug_i1, aug_numKeys).knownOffDiagonal(); + if(debug) std::cout << "(before) augmentedHessian(" << aug_i1 << "," << aug_numKeys << ")= \n" << + vectorBlock << std::endl; + + // add contribution of current factor + augmentedHessian(aug_i1, aug_numKeys) = vectorBlock + + Fi1.transpose() * b.segment<2>(2 * i1) // F' * b + - Fi1.transpose() * (Ei1_P * (E.transpose() * b)); // D = (Dx2) * (2x3) * (3*2m) * (2m x 1) + + vectorBlock = augmentedHessian(aug_i1, aug_numKeys).knownOffDiagonal(); + if(debug) std::cout << "(after) augmentedHessian(" << aug_i1 << "," << aug_numKeys << ")= \n" << + vectorBlock << std::endl; + + // (DxD) = (Dx2) * ( (2xD) - (2x3) * (3x2) * (2xD) ) + // main block diagonal - store previous block + matrixBlock = augmentedHessian(aug_i1, aug_i1); + if(debug) std::cout << "(before) augmentedHessian(" << aug_i1 << "," << aug_i1 << ")= \n" << + matrixBlock << std::endl; + + // add contribution of current factor + augmentedHessian(aug_i1, aug_i1) = matrixBlock + + Fi1.transpose() + * (Fi1 - Ei1_P * E.block<2, 3>(2 * i1, 0).transpose() * Fi1); + + matrixBlock = augmentedHessian(aug_i1, aug_i1); + if(debug) std::cout << "(after) augmentedHessian(" << aug_i1 << "," << aug_i1 << ")= \n" << + matrixBlock << std::endl; + + // upper triangular part of the hessian + for (size_t i2 = i1 + 1; i2 < numKeys; i2++) { // for each camera + const Matrix2D& Fi2 = Fblocks.at(i2).second; + + Key cameraKey_i2 = this->keys_[i2]; + size_t aug_i2 = KeySlotMap[cameraKey_i2]; + + // (DxD) = (Dx2) * ( (2x2) * (2xD) ) + // off diagonal block - store previous block + matrixBlock = augmentedHessian(aug_i1, aug_i2).knownOffDiagonal(); + + if(debug) std::cout << "(aug_i1= " << aug_i1 << ", aug_i2= " << aug_i2 << ") (i2= " <(2 * i2, 0).transpose() * Fi2); + + if(debug) std::cout << "(after) augmentedHessian(" << aug_i1 << "," << aug_i2 << ")= \n" << + augmentedHessian(aug_i1, aug_i2).knownOffDiagonal() << std::endl; + + matrixBlock = augmentedHessian(aug_i1, aug_i2).knownOffDiagonal(); + if(debug) std::cout << "(after, after) augmentedHessian(" << aug_i1 << "," << aug_i2 << ")= \n" << + matrixBlock<< std::endl; + } + } // end of for over cameras + + augmentedHessian(aug_numKeys, aug_numKeys)(0, 0) += f; + } + // **************************************************************************************************** boost::shared_ptr > createImplicitSchurFactor( const Cameras& cameras, const Point3& point, double lambda = 0.0, @@ -636,13 +664,13 @@ public: boost::shared_ptr > createJacobianQFactor( const Cameras& cameras, const Point3& point, double lambda = 0.0, bool diagonalDamping = false) const { - std::vector < KeyMatrix2D > Fblocks; + std::vector Fblocks; Matrix E; Matrix3 PointCov; Vector b; computeJacobians(Fblocks, E, PointCov, b, cameras, point, lambda, diagonalDamping); - return boost::make_shared < JacobianFactorQ > (Fblocks, E, PointCov, b); + return boost::make_shared >(Fblocks, E, PointCov, b); } private: From 4bdefc3f702c2182d7d0f96e734b940463b881dd Mon Sep 17 00:00:00 2001 From: Luca Date: Wed, 9 Apr 2014 21:04:52 -0400 Subject: [PATCH 04/25] removed debug bloks --- gtsam_unstable/slam/SmartFactorBase.h | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/gtsam_unstable/slam/SmartFactorBase.h b/gtsam_unstable/slam/SmartFactorBase.h index 49242d5ac..40b8b2b09 100644 --- a/gtsam_unstable/slam/SmartFactorBase.h +++ b/gtsam_unstable/slam/SmartFactorBase.h @@ -597,10 +597,6 @@ public: + Fi1.transpose() * b.segment<2>(2 * i1) // F' * b - Fi1.transpose() * (Ei1_P * (E.transpose() * b)); // D = (Dx2) * (2x3) * (3*2m) * (2m x 1) - vectorBlock = augmentedHessian(aug_i1, aug_numKeys).knownOffDiagonal(); - if(debug) std::cout << "(after) augmentedHessian(" << aug_i1 << "," << aug_numKeys << ")= \n" << - vectorBlock << std::endl; - // (DxD) = (Dx2) * ( (2xD) - (2x3) * (3x2) * (2xD) ) // main block diagonal - store previous block matrixBlock = augmentedHessian(aug_i1, aug_i1); @@ -612,10 +608,6 @@ public: + Fi1.transpose() * (Fi1 - Ei1_P * E.block<2, 3>(2 * i1, 0).transpose() * Fi1); - matrixBlock = augmentedHessian(aug_i1, aug_i1); - if(debug) std::cout << "(after) augmentedHessian(" << aug_i1 << "," << aug_i1 << ")= \n" << - matrixBlock << std::endl; - // upper triangular part of the hessian for (size_t i2 = i1 + 1; i2 < numKeys; i2++) { // for each camera const Matrix2D& Fi2 = Fblocks.at(i2).second; @@ -635,13 +627,6 @@ public: augmentedHessian(aug_i1, aug_i2) = matrixBlock - Fi1.transpose() * (Ei1_P * E.block<2, 3>(2 * i2, 0).transpose() * Fi2); - - if(debug) std::cout << "(after) augmentedHessian(" << aug_i1 << "," << aug_i2 << ")= \n" << - augmentedHessian(aug_i1, aug_i2).knownOffDiagonal() << std::endl; - - matrixBlock = augmentedHessian(aug_i1, aug_i2).knownOffDiagonal(); - if(debug) std::cout << "(after, after) augmentedHessian(" << aug_i1 << "," << aug_i2 << ")= \n" << - matrixBlock<< std::endl; } } // end of for over cameras From 6ef11bb2975ffe3d3c18c484aeb5ddc0e055fcfa Mon Sep 17 00:00:00 2001 From: Luca Date: Wed, 9 Apr 2014 21:14:38 -0400 Subject: [PATCH 05/25] removed debug statements --- gtsam_unstable/slam/SmartFactorBase.h | 23 +++++------------------ 1 file changed, 5 insertions(+), 18 deletions(-) diff --git a/gtsam_unstable/slam/SmartFactorBase.h b/gtsam_unstable/slam/SmartFactorBase.h index 40b8b2b09..d4d3fd489 100644 --- a/gtsam_unstable/slam/SmartFactorBase.h +++ b/gtsam_unstable/slam/SmartFactorBase.h @@ -569,8 +569,6 @@ public: for (size_t slot=0; slot < allKeys.size(); slot++) KeySlotMap.insert(std::make_pair(allKeys[slot],slot)); - bool debug= false; - // a single point is observed in numKeys cameras size_t numKeys = this->keys_.size(); // cameras observing current point size_t aug_numKeys = (augmentedHessian.rows() - 1) / D; // all cameras in the group @@ -588,21 +586,15 @@ public: size_t aug_i1 = KeySlotMap[cameraKey_i1]; // information vector - store previous vector - vectorBlock = augmentedHessian(aug_i1, aug_numKeys).knownOffDiagonal(); - if(debug) std::cout << "(before) augmentedHessian(" << aug_i1 << "," << aug_numKeys << ")= \n" << - vectorBlock << std::endl; - + // vectorBlock = augmentedHessian(aug_i1, aug_numKeys).knownOffDiagonal(); // add contribution of current factor - augmentedHessian(aug_i1, aug_numKeys) = vectorBlock + augmentedHessian(aug_i1, aug_numKeys) = augmentedHessian(aug_i1, aug_numKeys).knownOffDiagonal() + Fi1.transpose() * b.segment<2>(2 * i1) // F' * b - Fi1.transpose() * (Ei1_P * (E.transpose() * b)); // D = (Dx2) * (2x3) * (3*2m) * (2m x 1) // (DxD) = (Dx2) * ( (2xD) - (2x3) * (3x2) * (2xD) ) // main block diagonal - store previous block matrixBlock = augmentedHessian(aug_i1, aug_i1); - if(debug) std::cout << "(before) augmentedHessian(" << aug_i1 << "," << aug_i1 << ")= \n" << - matrixBlock << std::endl; - // add contribution of current factor augmentedHessian(aug_i1, aug_i1) = matrixBlock + Fi1.transpose() @@ -613,18 +605,13 @@ public: const Matrix2D& Fi2 = Fblocks.at(i2).second; Key cameraKey_i2 = this->keys_[i2]; - size_t aug_i2 = KeySlotMap[cameraKey_i2]; + size_t aug_i2 = KeySlotMap[cameraKey_i2]; // (DxD) = (Dx2) * ( (2x2) * (2xD) ) // off diagonal block - store previous block - matrixBlock = augmentedHessian(aug_i1, aug_i2).knownOffDiagonal(); - - if(debug) std::cout << "(aug_i1= " << aug_i1 << ", aug_i2= " << aug_i2 << ") (i2= " <(2 * i2, 0).transpose() * Fi2); } From 926b4c644316833067df7d1b1bf5ec5b0ff2b8da Mon Sep 17 00:00:00 2001 From: Luca Date: Mon, 14 Apr 2014 11:41:55 -0400 Subject: [PATCH 06/25] removed temporary variables - minor --- gtsam_unstable/slam/SmartFactorBase.h | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/gtsam_unstable/slam/SmartFactorBase.h b/gtsam_unstable/slam/SmartFactorBase.h index d4d3fd489..2852e9d83 100644 --- a/gtsam_unstable/slam/SmartFactorBase.h +++ b/gtsam_unstable/slam/SmartFactorBase.h @@ -562,8 +562,9 @@ public: // Schur complement trick // Gs = F' * F - F' * E * P * E' * F // gs = F' * (b - E * P * E' * b) + MatrixDD matrixBlock; - VectorD vectorBlock; + typedef SymmetricBlockMatrix::Block Block; ///< A block from the Hessian matrix FastMap KeySlotMap; for (size_t slot=0; slot < allKeys.size(); slot++) @@ -582,8 +583,8 @@ public: // D = (Dx2) * (2) // allKeys are the list of all camera keys in the group, e.g, (1,3,4,5,7) // we should map those to a slot in the local (grouped) hessian (0,1,2,3,4) - Key cameraKey_i1 = this->keys_[i1]; - size_t aug_i1 = KeySlotMap[cameraKey_i1]; + // Key cameraKey_i1 = this->keys_[i1]; + DenseIndex aug_i1 = KeySlotMap[this->keys_[i1]]; // information vector - store previous vector // vectorBlock = augmentedHessian(aug_i1, aug_numKeys).knownOffDiagonal(); @@ -596,24 +597,22 @@ public: // main block diagonal - store previous block matrixBlock = augmentedHessian(aug_i1, aug_i1); // add contribution of current factor - augmentedHessian(aug_i1, aug_i1) = matrixBlock - + Fi1.transpose() - * (Fi1 - Ei1_P * E.block<2, 3>(2 * i1, 0).transpose() * Fi1); + augmentedHessian(aug_i1, aug_i1) = matrixBlock + + ( Fi1.transpose() * (Fi1 - Ei1_P * E.block<2, 3>(2 * i1, 0).transpose() * Fi1) ); // upper triangular part of the hessian for (size_t i2 = i1 + 1; i2 < numKeys; i2++) { // for each camera const Matrix2D& Fi2 = Fblocks.at(i2).second; - Key cameraKey_i2 = this->keys_[i2]; - size_t aug_i2 = KeySlotMap[cameraKey_i2]; + //Key cameraKey_i2 = this->keys_[i2]; + DenseIndex aug_i2 = KeySlotMap[this->keys_[i2]]; // (DxD) = (Dx2) * ( (2x2) * (2xD) ) // off diagonal block - store previous block // matrixBlock = augmentedHessian(aug_i1, aug_i2).knownOffDiagonal(); // add contribution of current factor augmentedHessian(aug_i1, aug_i2) = augmentedHessian(aug_i1, aug_i2).knownOffDiagonal() - - Fi1.transpose() - * (Ei1_P * E.block<2, 3>(2 * i2, 0).transpose() * Fi2); + - Fi1.transpose() * (Ei1_P * E.block<2, 3>(2 * i2, 0).transpose() * Fi2); } } // end of for over cameras From 4552327e54e0bfd1d20cb1479ef844b506b335be Mon Sep 17 00:00:00 2001 From: Luca Date: Wed, 16 Apr 2014 19:41:23 -0400 Subject: [PATCH 07/25] added space :) --- gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index 0bf06f21f..3f7e80ed4 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -258,7 +258,7 @@ void LevenbergMarquardtOptimizer::iterate() { double linearizedCostChange = state_.error - newlinearizedError; if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "newlinearizedError = " << newlinearizedError << - "linearizedCostChange = " << linearizedCostChange << endl; + " linearizedCostChange = " << linearizedCostChange << endl; if (linearizedCostChange >= 0) { // step is valid // update values @@ -280,7 +280,7 @@ void LevenbergMarquardtOptimizer::iterate() { // cost change in the original, nonlinear system (old - new) double costChange = state_.error - newError; - if (linearizedCostChange > 1e-15) { // the error has to decrease to satify this condition + if (linearizedCostChange > 1e-15) { // the error has to decrease to satisfy this condition // fidelity of linearized model VS original system between modelFidelity = costChange / linearizedCostChange; // if we decrease the error in the nonlinear system and modelFidelity is above threshold @@ -293,7 +293,6 @@ void LevenbergMarquardtOptimizer::iterate() { // if the change is small we terminate if (fabs(costChange) < minAbsoluteTolerance) stopSearchingLambda = true; - } } From dd780e356cf9aadb15edb2b91832e87972ded919 Mon Sep 17 00:00:00 2001 From: Luca Date: Mon, 21 Apr 2014 18:59:33 -0400 Subject: [PATCH 08/25] fixed computation of linearized error in implicit schur factors --- gtsam_unstable/slam/ImplicitSchurFactor.h | 69 +++++++++++++++++++---- 1 file changed, 59 insertions(+), 10 deletions(-) diff --git a/gtsam_unstable/slam/ImplicitSchurFactor.h b/gtsam_unstable/slam/ImplicitSchurFactor.h index 234d13f1f..d7d1bc21c 100644 --- a/gtsam_unstable/slam/ImplicitSchurFactor.h +++ b/gtsam_unstable/slam/ImplicitSchurFactor.h @@ -235,25 +235,33 @@ public: typedef std::vector Error2s; /** - * @brief Calculate corrected error Q*e = (I - E*P*E')*e + * @brief Calculate corrected error Q*(e-2*b) = (I - E*P*E')*(e-2*b) */ - void projectError(const Error2s& e1, Error2s& e2) const { + void projectError2(const Error2s& e1, Error2s& e2) const { - // d1 = E.transpose() * e1 = (3*2m)*2m + // d1 = E.transpose() * (e1-2*b) = (3*2m)*2m Vector3 d1; d1.setZero(); for (size_t k = 0; k < size(); k++) - d1 += E_.block < 2, 3 > (2 * k, 0).transpose() * e1[k]; + d1 += E_.block < 2, 3 > (2 * k, 0).transpose() * (e1[k] - 2 * b_.segment < 2 > (k * 2)); // d2 = E.transpose() * e1 = (3*2m)*2m Vector3 d2 = PointCovariance_ * d1; // e3 = alpha*(e1 - E*d2) = 1*[2m-(2m*3)*3] for (size_t k = 0; k < size(); k++) - e2[k] = e1[k] - E_.block < 2, 3 > (2 * k, 0) * d2; + e2[k] = e1[k] - 2 * b_.segment < 2 > (k * 2) - E_.block < 2, 3 > (2 * k, 0) * d2; } - /// needed to be GaussianFactor - (I - E*P*E')*(F*x - b) + /* + * This definition matches the linearized error in the Hessian Factor: + * LinError(x) = x'*H*x - 2*x'*eta + f + * with: + * H = F' * (I-E'*P*E) * F = F' * Q * F + * eta = F' * (I-E'*P*E) * b = F' * Q * b + * f = nonlinear error + * (x'*H*x - 2*x'*eta + f) = x'*F'*Q*F*x - 2*x'*F'*Q *b + f = x'*F'*Q*(F*x - 2*b) + f + */ virtual double error(const VectorValues& x) const { // resize does not do malloc if correct size @@ -262,15 +270,56 @@ public: // e1 = F * x - b = (2m*dm)*dm for (size_t k = 0; k < size(); ++k) - e1[k] = Fblocks_[k].second * x.at(keys_[k]) - b_.segment < 2 > (k * 2); - projectError(e1, e2); + e1[k] = Fblocks_[k].second * x.at(keys_[k]); + projectError2(e1, e2); double result = 0; for (size_t k = 0; k < size(); ++k) - result += dot(e2[k], e2[k]); - return 0.5 * result; + result += dot(e1[k], e2[k]); + + double f = b_.squaredNorm(); + return 0.5 * (result + f); } + /// needed to be GaussianFactor - (I - E*P*E')*(F*x - b) + // This is wrong and does not match the definition in Hessian + // virtual double error(const VectorValues& x) const { + // + // // resize does not do malloc if correct size + // e1.resize(size()); + // e2.resize(size()); + // + // // e1 = F * x - b = (2m*dm)*dm + // for (size_t k = 0; k < size(); ++k) + // e1[k] = Fblocks_[k].second * x.at(keys_[k]) - b_.segment < 2 > (k * 2); + // projectError(e1, e2); + // + // double result = 0; + // for (size_t k = 0; k < size(); ++k) + // result += dot(e2[k], e2[k]); + // + // std::cout << "implicitFactor::error result " << result << std::endl; + // return 0.5 * result; + // } + /** + * @brief Calculate corrected error Q*e = (I - E*P*E')*e + */ + void projectError(const Error2s& e1, Error2s& e2) const { + + // d1 = E.transpose() * e1 = (3*2m)*2m + Vector3 d1; + d1.setZero(); + for (size_t k = 0; k < size(); k++) + d1 += E_.block < 2, 3 > (2 * k, 0).transpose() * e1[k]; + + // d2 = E.transpose() * e1 = (3*2m)*2m + Vector3 d2 = PointCovariance_ * d1; + + // e3 = alpha*(e1 - E*d2) = 1*[2m-(2m*3)*3] + for (size_t k = 0; k < size(); k++) + e2[k] = e1[k] - E_.block < 2, 3 > (2 * k, 0) * d2; + } + /// Scratch space for multiplyHessianAdd mutable Error2s e1, e2; From e98c4c2ac2ec6e4cb8ef359e508d538e11d75f3b Mon Sep 17 00:00:00 2001 From: Luca Date: Mon, 21 Apr 2014 19:18:21 -0400 Subject: [PATCH 09/25] added output message for TRYLAMBDA verbosity level --- gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index 3f7e80ed4..827df7619 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -291,8 +291,12 @@ void LevenbergMarquardtOptimizer::iterate() { double minAbsoluteTolerance = params_.relativeErrorTol * state_.error; // if the change is small we terminate - if (fabs(costChange) < minAbsoluteTolerance) + if (fabs(costChange) < minAbsoluteTolerance){ + if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) + cout << "fabs(costChange)="< Date: Tue, 22 Apr 2014 11:55:16 -0400 Subject: [PATCH 10/25] moved logging at the end of the iteration --- .../nonlinear/LevenbergMarquardtOptimizer.cpp | 25 ++++++++----------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index 827df7619..e6c24f050 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -214,20 +214,6 @@ void LevenbergMarquardtOptimizer::iterate() { // Build damped system for this lambda (adds prior factors that make it like gradient descent) GaussianFactorGraph dampedSystem = buildDampedSystem(*linear); - // Log current error/lambda to file - if (!params_.logFile.empty()) { - ofstream os(params_.logFile.c_str(), ios::app); - - boost::posix_time::ptime currentTime = - boost::posix_time::microsec_clock::universal_time(); - - os << state_.totalNumberInnerIterations << "," - << 1e-6 * (currentTime - state_.startTime).total_microseconds() << "," - << state_.error << "," << state_.lambda << endl; - } - - ++state_.totalNumberInnerIterations; - // Try solving double modelFidelity = 0.0; bool step_is_successful = false; @@ -300,6 +286,17 @@ void LevenbergMarquardtOptimizer::iterate() { } } + // Log current error/lambda to file + if (!params_.logFile.empty()) { + ofstream os(params_.logFile.c_str(), ios::app); + boost::posix_time::ptime currentTime = boost::posix_time::microsec_clock::universal_time(); + os << /*inner iterations*/ state_.totalNumberInnerIterations << "," + << 1e-6 * (currentTime - state_.startTime).total_microseconds() << "," + << /*current error*/ state_.error << "," << state_.lambda << "," + << /*outer iterations*/ state_.iterations << endl; + } + ++state_.totalNumberInnerIterations; + if (step_is_successful) { // we have successfully decreased the cost and we have good modelFidelity state_.values.swap(newValues); state_.error = newError; From 37b750411f14d2f1a547fc97f201da0e28615ced Mon Sep 17 00:00:00 2001 From: Luca Date: Tue, 22 Apr 2014 18:38:12 -0400 Subject: [PATCH 11/25] added gradientAtZero with raw memory access --- gtsam/linear/GaussianFactor.h | 3 +++ gtsam/linear/GaussianFactorGraph.h | 2 +- gtsam/linear/HessianFactor.cpp | 17 +++++++++++++++++ gtsam/linear/HessianFactor.h | 2 ++ gtsam/linear/JacobianFactor.cpp | 5 +++++ gtsam/linear/JacobianFactor.h | 3 +++ gtsam_unstable/slam/ImplicitSchurFactor.h | 22 ++++++++++++++++++++++ 7 files changed, 53 insertions(+), 1 deletion(-) diff --git a/gtsam/linear/GaussianFactor.h b/gtsam/linear/GaussianFactor.h index 543822ce0..8c8e2cb2b 100644 --- a/gtsam/linear/GaussianFactor.h +++ b/gtsam/linear/GaussianFactor.h @@ -130,6 +130,9 @@ namespace gtsam { /// A'*b for Jacobian, eta for Hessian virtual VectorValues gradientAtZero() const = 0; + /// A'*b for Jacobian, eta for Hessian (raw memory version) + virtual void gradientAtZero(double* d) const = 0; + private: /** Serialization function */ friend class boost::serialization::access; diff --git a/gtsam/linear/GaussianFactorGraph.h b/gtsam/linear/GaussianFactorGraph.h index 50442ac6b..ae5730b68 100644 --- a/gtsam/linear/GaussianFactorGraph.h +++ b/gtsam/linear/GaussianFactorGraph.h @@ -257,7 +257,7 @@ namespace gtsam { * @param [output] g A VectorValues to store the gradient, which must be preallocated, * see allocateVectorValues * @return The gradient as a VectorValues */ - VectorValues gradientAtZero() const; + virtual VectorValues gradientAtZero() const; /** Optimize along the gradient direction, with a closed-form computation to perform the line * search. The gradient is computed about \f$ \delta x=0 \f$. diff --git a/gtsam/linear/HessianFactor.cpp b/gtsam/linear/HessianFactor.cpp index 18fa44e8b..8d76c7651 100644 --- a/gtsam/linear/HessianFactor.cpp +++ b/gtsam/linear/HessianFactor.cpp @@ -600,6 +600,23 @@ VectorValues HessianFactor::gradientAtZero() const { return g; } +/* ************************************************************************* */ +// TODO: currently assumes all variables of the same size 9 and keys arranged from 0 to n +void HessianFactor::gradientAtZero(double* d) const { + + // Use eigen magic to access raw memory + typedef Eigen::Matrix DVector; + typedef Eigen::Map DMap; + + // Loop over all variables in the factor + for (DenseIndex pos = 0; pos < (DenseIndex)size(); ++pos) { + Key j = keys_[pos]; + // Get the diagonal block, and insert its diagonal + DVector dj = -info_(pos,size()).knownOffDiagonal(); + DMap(d + 9 * j) += dj; + } +} + /* ************************************************************************* */ std::pair, boost::shared_ptr > EliminateCholesky(const GaussianFactorGraph& factors, const Ordering& keys) diff --git a/gtsam/linear/HessianFactor.h b/gtsam/linear/HessianFactor.h index faf2ccd33..62d84925c 100644 --- a/gtsam/linear/HessianFactor.h +++ b/gtsam/linear/HessianFactor.h @@ -387,6 +387,8 @@ namespace gtsam { /// eta for Hessian VectorValues gradientAtZero() const; + virtual void gradientAtZero(double* d) const; + /** * Densely partially eliminate with Cholesky factorization. JacobianFactors are * left-multiplied with their transpose to form the Hessian using the conversion constructor diff --git a/gtsam/linear/JacobianFactor.cpp b/gtsam/linear/JacobianFactor.cpp index c080f75cb..bb9d5afc0 100644 --- a/gtsam/linear/JacobianFactor.cpp +++ b/gtsam/linear/JacobianFactor.cpp @@ -573,6 +573,11 @@ VectorValues JacobianFactor::gradientAtZero() const { return g; } +/* ************************************************************************* */ +void JacobianFactor::gradientAtZero(double* d) const { + throw std::runtime_error("gradientAtZero not implemented for Jacobian factor"); +} + /* ************************************************************************* */ pair JacobianFactor::jacobian() const { pair result = jacobianUnweighted(); diff --git a/gtsam/linear/JacobianFactor.h b/gtsam/linear/JacobianFactor.h index 5a567c2c7..b90012822 100644 --- a/gtsam/linear/JacobianFactor.h +++ b/gtsam/linear/JacobianFactor.h @@ -286,6 +286,9 @@ namespace gtsam { /// A'*b for Jacobian VectorValues gradientAtZero() const; + /* ************************************************************************* */ + virtual void gradientAtZero(double* d) const; + /** Return a whitened version of the factor, i.e. with unit diagonal noise model. */ JacobianFactor whiten() const; diff --git a/gtsam_unstable/slam/ImplicitSchurFactor.h b/gtsam_unstable/slam/ImplicitSchurFactor.h index d7d1bc21c..8237b8c07 100644 --- a/gtsam_unstable/slam/ImplicitSchurFactor.h +++ b/gtsam_unstable/slam/ImplicitSchurFactor.h @@ -426,6 +426,28 @@ public: return g; } + /** + * Calculate gradient, which is -F'Q*b, see paper - RAW MEMORY ACCESS + */ + void gradientAtZero(double* d) const { + + // Use eigen magic to access raw memory + typedef Eigen::Matrix DVector; + typedef Eigen::Map DMap; + + // calculate Q*b + e1.resize(size()); + e2.resize(size()); + for (size_t k = 0; k < size(); k++) + e1[k] = b_.segment < 2 > (2 * k); + projectError(e1, e2); + + for (size_t k = 0; k < size(); ++k) { // for each camera in the factor + Key j = keys_[k]; + DMap(d + D * j) += -Fblocks_[k].second.transpose() * e2[k]; + } + } + }; // ImplicitSchurFactor From e56666e85c651ba7ba645fec11e105f7d6686b5d Mon Sep 17 00:00:00 2001 From: Luca Date: Wed, 23 Apr 2014 14:53:01 -0400 Subject: [PATCH 12/25] fixed print function --- gtsam_unstable/slam/ImplicitSchurFactor.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/gtsam_unstable/slam/ImplicitSchurFactor.h b/gtsam_unstable/slam/ImplicitSchurFactor.h index 8237b8c07..0afdbcb71 100644 --- a/gtsam_unstable/slam/ImplicitSchurFactor.h +++ b/gtsam_unstable/slam/ImplicitSchurFactor.h @@ -87,7 +87,8 @@ public: } /// print - void print(const std::string& s, const KeyFormatter& formatter) const { + void print(const std::string& s = "", + const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { std::cout << " ImplicitSchurFactor " << std::endl; Factor::print(s); std::cout << " PointCovariance_ \n" << PointCovariance_ << std::endl; From 2b52bad56950e4ae2966a503d9cba3c0e3a7a0e0 Mon Sep 17 00:00:00 2001 From: Luca Date: Wed, 23 Apr 2014 15:15:48 -0400 Subject: [PATCH 13/25] optimized point damping --- gtsam_unstable/slam/SmartFactorBase.h | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/gtsam_unstable/slam/SmartFactorBase.h b/gtsam_unstable/slam/SmartFactorBase.h index 2852e9d83..79a56ea7f 100644 --- a/gtsam_unstable/slam/SmartFactorBase.h +++ b/gtsam_unstable/slam/SmartFactorBase.h @@ -302,15 +302,18 @@ public: // Point covariance inv(E'*E) Matrix3 EtE = E.transpose() * E; - Matrix3 DMatrix = eye(E.cols()); // damping matrix if (diagonalDamping) { // diagonal of the hessian - DMatrix(0, 0) = EtE(0, 0); - DMatrix(1, 1) = EtE(1, 1); - DMatrix(2, 2) = EtE(2, 2); + EtE(0, 0) += lambda * EtE(0, 0); + EtE(1, 1) += lambda * EtE(1, 1); + EtE(2, 2) += lambda * EtE(2, 2); + }else{ + EtE(0, 0) += lambda; + EtE(1, 1) += lambda; + EtE(2, 2) += lambda; } - PointCov.noalias() = (EtE + lambda * DMatrix).inverse(); + PointCov.noalias() = (EtE).inverse(); return f; } From d9e93f316ada77e687d0853f73332fc4c2e8de2e Mon Sep 17 00:00:00 2001 From: Luca Date: Wed, 23 Apr 2014 18:43:14 -0400 Subject: [PATCH 14/25] error computation now does not include noise model --- gtsam_unstable/slam/SmartFactorBase.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/gtsam_unstable/slam/SmartFactorBase.h b/gtsam_unstable/slam/SmartFactorBase.h index 79a56ea7f..e60b7e930 100644 --- a/gtsam_unstable/slam/SmartFactorBase.h +++ b/gtsam_unstable/slam/SmartFactorBase.h @@ -218,8 +218,7 @@ public: const Point2& zi = this->measured_.at(i); try { Point2 reprojectionError(camera.project(point) - zi); - overallError += 0.5 - * this->noise_.at(i)->distance(reprojectionError.vector()); + overallError += 0.5 * reprojectionError.vector().squaredNorm(); } catch (CheiralityException& e) { std::cout << "Cheirality exception " << std::endl; exit(EXIT_FAILURE); From 8c657f88579b1df333ca7fff0e29f4ff8ee83375 Mon Sep 17 00:00:00 2001 From: Luca Date: Wed, 23 Apr 2014 19:00:15 -0400 Subject: [PATCH 15/25] slight change in implementation of hessianBlockDiagonal --- gtsam_unstable/slam/ImplicitSchurFactor.h | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/gtsam_unstable/slam/ImplicitSchurFactor.h b/gtsam_unstable/slam/ImplicitSchurFactor.h index 0afdbcb71..5c797fccd 100644 --- a/gtsam_unstable/slam/ImplicitSchurFactor.h +++ b/gtsam_unstable/slam/ImplicitSchurFactor.h @@ -189,19 +189,24 @@ public: /// Return the block diagonal of the Hessian for this factor virtual std::map hessianBlockDiagonal() const { std::map blocks; + // F'*(I - E*P*E')*F for (size_t pos = 0; pos < size(); ++pos) { Key j = keys_[pos]; - const Matrix2D& Fj = Fblocks_[pos].second; // F'*F - F'*E*P*E'*F (9*2)*(2*9) - (9*2)*(2*3)*(3*3)*(3*2)*(2*9) - Eigen::Matrix FtE = Fj.transpose() - * E_.block<2, 3>(2 * pos, 0); - blocks[j] = Fj.transpose() * Fj - - FtE * PointCovariance_ * FtE.transpose(); + const Matrix2D& Fj = Fblocks_[pos].second; + // Eigen::Matrix FtE = Fj.transpose() + // * E_.block<2, 3>(2 * pos, 0); + // blocks[j] = Fj.transpose() * Fj + // - FtE * PointCovariance_ * FtE.transpose(); + + const Matrix23& Ej = E_.block<2, 3>(2 * pos, 0); + blocks[j] = Fj.transpose() * (Fj - Ej * PointCovariance_ * Ej.transpose() * Fj); + // F'*(I - E*P*E')*F, TODO: this should work, but it does not :-( -// static const Eigen::Matrix I2 = eye(2); -// Eigen::Matrix Q = // -// I2 - E_.block<2, 3>(2 * pos, 0) * PointCovariance_ * E_.block<2, 3>(2 * pos, 0).transpose(); -// blocks[j] = Fj.transpose() * Q * Fj; + // static const Eigen::Matrix I2 = eye(2); + // Eigen::Matrix Q = // + // I2 - E_.block<2, 3>(2 * pos, 0) * PointCovariance_ * E_.block<2, 3>(2 * pos, 0).transpose(); + // blocks[j] = Fj.transpose() * Q * Fj; } return blocks; } From a95126599fd354bda9d941468027dc00a8007103 Mon Sep 17 00:00:00 2001 From: Zsolt Kira Date: Thu, 24 Apr 2014 09:47:48 -0400 Subject: [PATCH 16/25] Fix LM behavior when GaussianFactorGraph is subclassed. Use clone method instead of assignment to local GaussianFactorGraph when creating damped system. --- gtsam/linear/GaussianFactorGraph.cpp | 7 +++++++ gtsam/linear/GaussianFactorGraph.h | 8 +++++++- gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp | 10 ++++++---- gtsam/nonlinear/LevenbergMarquardtOptimizer.h | 2 +- 4 files changed, 21 insertions(+), 6 deletions(-) diff --git a/gtsam/linear/GaussianFactorGraph.cpp b/gtsam/linear/GaussianFactorGraph.cpp index b56270a55..d6e4663e3 100644 --- a/gtsam/linear/GaussianFactorGraph.cpp +++ b/gtsam/linear/GaussianFactorGraph.cpp @@ -75,6 +75,13 @@ namespace gtsam { return dims_accumulated; } + /* ************************************************************************* */ + GaussianFactorGraph::shared_ptr GaussianFactorGraph::cloneToPtr() const { + gtsam::GaussianFactorGraph::shared_ptr result(new GaussianFactorGraph()); + *result = *this; + return result; + } + /* ************************************************************************* */ GaussianFactorGraph GaussianFactorGraph::clone() const { GaussianFactorGraph result; diff --git a/gtsam/linear/GaussianFactorGraph.h b/gtsam/linear/GaussianFactorGraph.h index ae5730b68..692310f26 100644 --- a/gtsam/linear/GaussianFactorGraph.h +++ b/gtsam/linear/GaussianFactorGraph.h @@ -160,7 +160,13 @@ namespace gtsam { * Cloning preserves null factors so indices for the original graph are still * valid for the cloned graph. */ - GaussianFactorGraph clone() const; + virtual GaussianFactorGraph clone() const; + + /** + * CloneToPtr() performs a simple assignment to a new graph and returns it. + * There is no preservation of null factors! + */ + virtual GaussianFactorGraph::shared_ptr cloneToPtr() const; /** * Returns the negation of all factors in this graph - corresponds to antifactors. diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index e6c24f050..187abece6 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -138,7 +138,7 @@ void LevenbergMarquardtOptimizer::decreaseLambda(double stepQuality) { } /* ************************************************************************* */ -GaussianFactorGraph LevenbergMarquardtOptimizer::buildDampedSystem( +GaussianFactorGraph::shared_ptr LevenbergMarquardtOptimizer::buildDampedSystem( const GaussianFactorGraph& linear) { gttic(damp); @@ -159,7 +159,8 @@ GaussianFactorGraph LevenbergMarquardtOptimizer::buildDampedSystem( // for each of the variables, add a prior double sigma = 1.0 / std::sqrt(state_.lambda); - GaussianFactorGraph damped = linear; + GaussianFactorGraph::shared_ptr dampedPtr = linear.cloneToPtr(); + GaussianFactorGraph &damped = (*dampedPtr); damped.reserve(damped.size() + state_.values.size()); if (params_.diagonalDamping) { BOOST_FOREACH(const VectorValues::KeyValuePair& key_vector, state_.hessianDiagonal) { @@ -188,7 +189,7 @@ GaussianFactorGraph LevenbergMarquardtOptimizer::buildDampedSystem( } } gttoc(damp); - return damped; + return dampedPtr; } /* ************************************************************************* */ @@ -212,7 +213,8 @@ void LevenbergMarquardtOptimizer::iterate() { cout << "trying lambda = " << state_.lambda << endl; // Build damped system for this lambda (adds prior factors that make it like gradient descent) - GaussianFactorGraph dampedSystem = buildDampedSystem(*linear); + GaussianFactorGraph::shared_ptr dampedSystemPtr = buildDampedSystem(*linear); + GaussianFactorGraph &dampedSystem = (*dampedSystemPtr); // Try solving double modelFidelity = 0.0; diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h index 47952f9e4..211830ed8 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h @@ -255,7 +255,7 @@ public: } /** Build a damped system for a specific lambda */ - GaussianFactorGraph buildDampedSystem(const GaussianFactorGraph& linear); + GaussianFactorGraph::shared_ptr buildDampedSystem(const GaussianFactorGraph& linear); friend class ::NonlinearOptimizerMoreOptimizationTest; /// @} From 185c572b5a03acf6ac3ee83d132356bae51fbdc0 Mon Sep 17 00:00:00 2001 From: Luca Date: Thu, 24 Apr 2014 11:28:41 -0400 Subject: [PATCH 17/25] removed wrong base class --- gtsam_unstable/slam/ImplicitSchurFactor.h | 1 - 1 file changed, 1 deletion(-) diff --git a/gtsam_unstable/slam/ImplicitSchurFactor.h b/gtsam_unstable/slam/ImplicitSchurFactor.h index 5c797fccd..c0f339b30 100644 --- a/gtsam_unstable/slam/ImplicitSchurFactor.h +++ b/gtsam_unstable/slam/ImplicitSchurFactor.h @@ -24,7 +24,6 @@ class ImplicitSchurFactor: public GaussianFactor { public: typedef ImplicitSchurFactor This; ///< Typedef to this class - typedef JacobianFactor Base; ///< Typedef to base class typedef boost::shared_ptr shared_ptr; ///< shared_ptr to this class protected: From f15f7f38932d7b170e1c54704d4d6634523eab52 Mon Sep 17 00:00:00 2001 From: Luca Date: Thu, 24 Apr 2014 12:02:11 -0400 Subject: [PATCH 18/25] Jacobian factors' gradientAtZero with raw memory access does nothing (to be fixed) --- gtsam/linear/JacobianFactor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gtsam/linear/JacobianFactor.cpp b/gtsam/linear/JacobianFactor.cpp index bb9d5afc0..a63bbf473 100644 --- a/gtsam/linear/JacobianFactor.cpp +++ b/gtsam/linear/JacobianFactor.cpp @@ -575,7 +575,7 @@ VectorValues JacobianFactor::gradientAtZero() const { /* ************************************************************************* */ void JacobianFactor::gradientAtZero(double* d) const { - throw std::runtime_error("gradientAtZero not implemented for Jacobian factor"); + //throw std::runtime_error("gradientAtZero not implemented for Jacobian factor"); } /* ************************************************************************* */ From 4769e3c3fa10f033db2a263ee6c64a549bd44ddb Mon Sep 17 00:00:00 2001 From: Zsolt Kira Date: Thu, 24 Apr 2014 12:05:37 -0400 Subject: [PATCH 19/25] Fix for unit test based on previous LM change --- tests/testNonlinearOptimizer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/testNonlinearOptimizer.cpp b/tests/testNonlinearOptimizer.cpp index b976cca90..6d8a056fc 100644 --- a/tests/testNonlinearOptimizer.cpp +++ b/tests/testNonlinearOptimizer.cpp @@ -295,7 +295,7 @@ TEST_UNSAFE(NonlinearOptimizer, MoreOptimization) { // test the diagonal GaussianFactorGraph::shared_ptr linear = optimizer.linearize(); - GaussianFactorGraph damped = optimizer.buildDampedSystem(*linear); + GaussianFactorGraph damped = *optimizer.buildDampedSystem(*linear); VectorValues d = linear->hessianDiagonal(), // expectedDiagonal = d + params.lambdaInitial * d; EXPECT(assert_equal(expectedDiagonal, damped.hessianDiagonal())); @@ -309,7 +309,7 @@ TEST_UNSAFE(NonlinearOptimizer, MoreOptimization) { EXPECT(assert_equal(expectedGradient,linear->gradientAtZero())); // Check that the gradient is zero for damped system (it is not!) - damped = optimizer.buildDampedSystem(*linear); + damped = *optimizer.buildDampedSystem(*linear); VectorValues actualGradient = damped.gradientAtZero(); EXPECT(assert_equal(expectedGradient,actualGradient)); From 15201bda0f741c512218affe68a0c37cf2e31a5d Mon Sep 17 00:00:00 2001 From: Luca Date: Mon, 28 Apr 2014 13:16:25 -0400 Subject: [PATCH 20/25] fixed landmark key --- gtsam/slam/dataset.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gtsam/slam/dataset.cpp b/gtsam/slam/dataset.cpp index 4a7166f38..60c17ec9e 100644 --- a/gtsam/slam/dataset.cpp +++ b/gtsam/slam/dataset.cpp @@ -706,7 +706,7 @@ bool writeBALfromValues(const string& filename, const SfM_data &data, Values& va } for (size_t j = 0; j < dataValues.number_tracks(); j++){ // for each point - Key pointKey = symbol('l',j); + Key pointKey = P(j); if(values.exists(pointKey)){ Point3 point = values.at(pointKey); dataValues.tracks[j].p = point; From f1fb2c09a4f835700130b04f0b2d57645a720a3b Mon Sep 17 00:00:00 2001 From: Luca Date: Fri, 2 May 2014 15:01:42 -0400 Subject: [PATCH 21/25] bug fix in LM --- gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index 187abece6..f3c145ffa 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -268,14 +268,17 @@ void LevenbergMarquardtOptimizer::iterate() { // cost change in the original, nonlinear system (old - new) double costChange = state_.error - newError; - if (linearizedCostChange > 1e-15) { // the error has to decrease to satisfy this condition + if (linearizedCostChange > 1e-20) { // the (linear) error has to decrease to satisfy this condition // fidelity of linearized model VS original system between modelFidelity = costChange / linearizedCostChange; // if we decrease the error in the nonlinear system and modelFidelity is above threshold step_is_successful = modelFidelity > params_.minModelFidelity; - } else { - step_is_successful = true; // linearizedCostChange close to zero + if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) + cout << "modelFidelity: " << modelFidelity << endl; } + //else { + // step_is_successful = true; // linearizedCostChange close to zero + //} double minAbsoluteTolerance = params_.relativeErrorTol * state_.error; // if the change is small we terminate From c1650f72fdd80f6a1d1bc6c0b865db83ce2da142 Mon Sep 17 00:00:00 2001 From: Luca Date: Fri, 2 May 2014 15:23:19 -0400 Subject: [PATCH 22/25] added comments --- gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index f3c145ffa..689642e7f 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -275,10 +275,7 @@ void LevenbergMarquardtOptimizer::iterate() { step_is_successful = modelFidelity > params_.minModelFidelity; if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "modelFidelity: " << modelFidelity << endl; - } - //else { - // step_is_successful = true; // linearizedCostChange close to zero - //} + } // else we consider the step non successful and we either increase lambda or stop if error change is small double minAbsoluteTolerance = params_.relativeErrorTol * state_.error; // if the change is small we terminate From 9f499d2257756c49f45bef6f5bc685944fad4ffd Mon Sep 17 00:00:00 2001 From: Luca Date: Fri, 2 May 2014 16:05:08 -0400 Subject: [PATCH 23/25] fixed key in unit test --- gtsam/slam/tests/testDataset.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/gtsam/slam/tests/testDataset.cpp b/gtsam/slam/tests/testDataset.cpp index f9a2cb34f..4ae30f508 100644 --- a/gtsam/slam/tests/testDataset.cpp +++ b/gtsam/slam/tests/testDataset.cpp @@ -19,10 +19,12 @@ #include +#include #include #include #include +using namespace gtsam::symbol_shorthand; using namespace std; using namespace gtsam; @@ -176,7 +178,7 @@ TEST( dataSet, writeBALfromValues_Dubrovnik){ value.insert(poseKey, pose); } for(size_t j=0; j < readData.number_tracks(); j++){ // for each point - Key pointKey = symbol('l',j); + Key pointKey = P(j); Point3 point = poseChange.transform_from( readData.tracks[j].p ); value.insert(pointKey, point); } @@ -208,7 +210,7 @@ TEST( dataSet, writeBALfromValues_Dubrovnik){ EXPECT(assert_equal(expectedPose,actualPose, 1e-7)); Point3 expectedPoint = track0.p; - Key pointKey = symbol('l',0); + Key pointKey = P(0); Point3 actualPoint = value.at(pointKey); EXPECT(assert_equal(expectedPoint,actualPoint, 1e-6)); } From e8a47675914f3a8b6910afd6c9c75e459e3f6b1b Mon Sep 17 00:00:00 2001 From: Luca Date: Fri, 2 May 2014 16:10:51 -0400 Subject: [PATCH 24/25] cleaned code to log LM iterations --- .../nonlinear/LevenbergMarquardtOptimizer.cpp | 33 ++++++++++++------- gtsam/nonlinear/LevenbergMarquardtOptimizer.h | 3 +- 2 files changed, 23 insertions(+), 13 deletions(-) diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index 689642e7f..2cde6768f 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -192,6 +192,19 @@ GaussianFactorGraph::shared_ptr LevenbergMarquardtOptimizer::buildDampedSystem( return dampedPtr; } +/* ************************************************************************* */ +// Log current error/lambda to file +inline void LevenbergMarquardtOptimizer::writeLogFile(double currentError){ + if (!params_.logFile.empty()) { + ofstream os(params_.logFile.c_str(), ios::app); + boost::posix_time::ptime currentTime = boost::posix_time::microsec_clock::universal_time(); + os << /*inner iterations*/ state_.totalNumberInnerIterations << "," + << 1e-6 * (currentTime - state_.startTime).total_microseconds() << "," + << /*current error*/ currentError << "," << state_.lambda << "," + << /*outer iterations*/ state_.iterations << endl; + } +} + /* ************************************************************************* */ void LevenbergMarquardtOptimizer::iterate() { @@ -206,6 +219,9 @@ void LevenbergMarquardtOptimizer::iterate() { cout << "linearizing = " << endl; GaussianFactorGraph::shared_ptr linear = linearize(); + if(state_.totalNumberInnerIterations==0) // write initial error + writeLogFile(state_.error); + // Keep increasing lambda until we make make progress while (true) { @@ -288,36 +304,29 @@ void LevenbergMarquardtOptimizer::iterate() { } } - // Log current error/lambda to file - if (!params_.logFile.empty()) { - ofstream os(params_.logFile.c_str(), ios::app); - boost::posix_time::ptime currentTime = boost::posix_time::microsec_clock::universal_time(); - os << /*inner iterations*/ state_.totalNumberInnerIterations << "," - << 1e-6 * (currentTime - state_.startTime).total_microseconds() << "," - << /*current error*/ state_.error << "," << state_.lambda << "," - << /*outer iterations*/ state_.iterations << endl; - } ++state_.totalNumberInnerIterations; if (step_is_successful) { // we have successfully decreased the cost and we have good modelFidelity state_.values.swap(newValues); state_.error = newError; decreaseLambda(modelFidelity); + writeLogFile(state_.error); break; } else if (!stopSearchingLambda) { // we failed to solved the system or we had no decrease in cost if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "increasing lambda" << endl; increaseLambda(); + writeLogFile(state_.error); // check if lambda is too big if (state_.lambda >= params_.lambdaUpperBound) { if (nloVerbosity >= NonlinearOptimizerParams::TERMINATION) - cout - << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" - << endl; + cout << "Warning: Levenberg-Marquardt giving up because " + "cannot decrease error with maximum lambda" << endl; break; } } else { // the change in the cost is very small and it is not worth trying bigger lambdas + writeLogFile(state_.error); if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "Levenberg-Marquardt: stopping as relative cost reduction is small" << endl; break; diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h index 211830ed8..f365fc524 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h @@ -113,7 +113,6 @@ public: inline void setDiagonalDamping(bool flag) { diagonalDamping = flag; } - inline void setUseFixedLambdaFactor(bool flag) { useFixedLambdaFactor_ = flag; } @@ -258,6 +257,8 @@ public: GaussianFactorGraph::shared_ptr buildDampedSystem(const GaussianFactorGraph& linear); friend class ::NonlinearOptimizerMoreOptimizationTest; + void writeLogFile(double currentError); + /// @} protected: From 5c420f1aecb7eaa95056e2a3b7a5b1eb2fb4a1e8 Mon Sep 17 00:00:00 2001 From: Luca Date: Sun, 4 May 2014 18:16:40 -0400 Subject: [PATCH 25/25] added references --- gtsam/navigation/CombinedImuFactor.h | 5 +++++ gtsam/navigation/ImuFactor.h | 8 +++++++- gtsam_unstable/slam/SmartProjectionPoseFactor.h | 10 ++++++++++ 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/gtsam/navigation/CombinedImuFactor.h b/gtsam/navigation/CombinedImuFactor.h index 559568c84..06e79324d 100644 --- a/gtsam/navigation/CombinedImuFactor.h +++ b/gtsam/navigation/CombinedImuFactor.h @@ -34,6 +34,11 @@ namespace gtsam { * * @addtogroup SLAM * + * If you are using the factor, please cite: + * L. Carlone, Z. Kira, C. Beall, V. Indelman, F. Dellaert, Eliminating conditionally + * independent sets in factor graphs: a unifying perspective based on smart factors, + * Int. Conf. on Robotics and Automation (ICRA), 2014. + * * REFERENCES: * [1] G.S. Chirikjian, "Stochastic Models, Information Theory, and Lie Groups", Volume 2, 2008. * [2] T. Lupton and S.Sukkarieh, "Visual-Inertial-Aided Navigation for High-Dynamic Motion in Built diff --git a/gtsam/navigation/ImuFactor.h b/gtsam/navigation/ImuFactor.h index b1e20297e..f5b5293f8 100644 --- a/gtsam/navigation/ImuFactor.h +++ b/gtsam/navigation/ImuFactor.h @@ -33,7 +33,13 @@ namespace gtsam { /** * * @addtogroup SLAM - * * REFERENCES: + * + * If you are using the factor, please cite: + * L. Carlone, Z. Kira, C. Beall, V. Indelman, F. Dellaert, Eliminating conditionally + * independent sets in factor graphs: a unifying perspective based on smart factors, + * Int. Conf. on Robotics and Automation (ICRA), 2014. + * + ** REFERENCES: * [1] G.S. Chirikjian, "Stochastic Models, Information Theory, and Lie Groups", Volume 2, 2008. * [2] T. Lupton and S.Sukkarieh, "Visual-Inertial-Aided Navigation for High-Dynamic Motion in Built * Environments Without Initial Conditions", TRO, 28(1):61-76, 2012. diff --git a/gtsam_unstable/slam/SmartProjectionPoseFactor.h b/gtsam_unstable/slam/SmartProjectionPoseFactor.h index 879e5ab80..8f9617561 100644 --- a/gtsam_unstable/slam/SmartProjectionPoseFactor.h +++ b/gtsam_unstable/slam/SmartProjectionPoseFactor.h @@ -22,6 +22,16 @@ #include "SmartProjectionFactor.h" namespace gtsam { +/** + * + * @addtogroup SLAM + * + * If you are using the factor, please cite: + * L. Carlone, Z. Kira, C. Beall, V. Indelman, F. Dellaert, Eliminating conditionally + * independent sets in factor graphs: a unifying perspective based on smart factors, + * Int. Conf. on Robotics and Automation (ICRA), 2014. + * + */ /** * The calibration is known here. The factor only constraints poses (variable dimension is 6)