From 42a3963c7eb0fd8ea94206b3afdc5109c3ddff78 Mon Sep 17 00:00:00 2001 From: Alex Cunningham Date: Sun, 6 Nov 2011 19:40:48 +0000 Subject: [PATCH] Merge branch 'master' into retraction_name --- gtsam/base/Makefile.am | 2 +- gtsam/base/tests/timeVirtual.cpp | 11 ++ gtsam/base/tests/timeVirtual2.cpp | 135 ++++++++++++++++++++++++ gtsam/nonlinear/DoglegOptimizerImpl.cpp | 12 +-- gtsam/nonlinear/DoglegOptimizerImpl.h | 16 +-- tests/Makefile.am | 3 +- 6 files changed, 162 insertions(+), 17 deletions(-) create mode 100644 gtsam/base/tests/timeVirtual2.cpp diff --git a/gtsam/base/Makefile.am b/gtsam/base/Makefile.am index da4e07bbd..0d4189dd7 100644 --- a/gtsam/base/Makefile.am +++ b/gtsam/base/Makefile.am @@ -37,7 +37,7 @@ sources += DSFVector.cpp check_PROGRAMS += tests/testBTree tests/testDSF tests/testDSFVector # Timing tests -noinst_PROGRAMS = tests/timeMatrix tests/timeVirtual tests/timeTest +noinst_PROGRAMS = tests/timeMatrix tests/timeVirtual tests/timeVirtual2 tests/timeTest noinst_PROGRAMS += tests/timeMatrixOps #---------------------------------------------------------------------------------------------------- diff --git a/gtsam/base/tests/timeVirtual.cpp b/gtsam/base/tests/timeVirtual.cpp index ddcaa3b17..a61ce7b77 100644 --- a/gtsam/base/tests/timeVirtual.cpp +++ b/gtsam/base/tests/timeVirtual.cpp @@ -1,3 +1,14 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + /** * @file timeVirtual.cpp * @brief Time the overhead of using virtual destructors and methods diff --git a/gtsam/base/tests/timeVirtual2.cpp b/gtsam/base/tests/timeVirtual2.cpp new file mode 100644 index 000000000..56190b91e --- /dev/null +++ b/gtsam/base/tests/timeVirtual2.cpp @@ -0,0 +1,135 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file timeVirtual.cpp + * @brief Time the overhead of using virtual destructors and methods + * @author Richard Roberts + * @date Nov 6, 2011 + */ + +#include + +#include +#include + +#include + +using namespace std; +using namespace boost; + +struct DtorTestBase { + DtorTestBase() { cout << " DtorTestBase" << endl; } + virtual ~DtorTestBase() { cout << " ~DtorTestBase" << endl; } +}; + +struct DtorTestDerived : public DtorTestBase { + DtorTestDerived() { cout << " DtorTestDerived" << endl; } + virtual ~DtorTestDerived() { cout << " ~DtorTestDerived" << endl; } +}; + + +struct VirtualBase { + VirtualBase() { } + virtual void method() = 0; + virtual ~VirtualBase() { } +}; + +struct VirtualDerived : public VirtualBase { + double data; + VirtualDerived() { data = rand(); } + virtual void method() { data = rand(); } + virtual ~VirtualDerived() { } +}; + +struct NonVirtualBase { + NonVirtualBase() { } + ~NonVirtualBase() { } +}; + +struct NonVirtualDerived : public NonVirtualBase { + double data; + NonVirtualDerived() { data = rand(); } + void method() { data = rand(); } + ~NonVirtualDerived() { } +}; + + +int main(int argc, char *argv[]) { + + // Virtual destructor test + cout << "Stack objects:" << endl; + cout << "Base:" << endl; + { DtorTestBase b; } + cout << "Derived:" << endl; + { DtorTestDerived d; } + + cout << "Heap objects:" << endl; + cout << "Base:" << endl; + { DtorTestBase *b = new DtorTestBase(); delete b; } + cout << "Derived:" << endl; + { DtorTestDerived *d = new DtorTestDerived(); delete d; } + cout << "Derived with base pointer:" << endl; + { DtorTestBase *b = new DtorTestDerived(); delete b; } + + int n = 10000000; + + VirtualBase** b = new VirtualBase*[n]; + tic_(0, "Virtual"); + tic_(0, "new"); + for(int i=0; imethod(); + toc_(1, "method"); + tic_(2, "dynamic_cast"); + for(int i=0; i(b[i]); + if(d) + d->method(); + } + toc_(2, "dynamic_cast"); + tic_(3, "delete"); + for(int i=0; imethod(); + toc_(1, "method"); + tic_(2, "dynamic_cast (does nothing)"); + for(int i=0; imethod(); + toc_(2, "dynamic_cast (does nothing)"); + tic_(3, "delete"); + for(int i=0; i= 0.0); double DeltaSq = Delta*Delta; double x_u_norm_sq = x_u.vector().squaredNorm(); double x_n_norm_sq = x_n.vector().squaredNorm(); - cout << "Steepest descent magnitude " << sqrt(x_u_norm_sq) << ", Newton's method magnitude " << sqrt(x_n_norm_sq) << endl; + if(verbose) cout << "Steepest descent magnitude " << sqrt(x_u_norm_sq) << ", Newton's method magnitude " << sqrt(x_n_norm_sq) << endl; if(DeltaSq < x_u_norm_sq) { // Trust region is smaller than steepest descent update VectorValues x_d = VectorValues::SameStructure(x_u); x_d.vector() = x_u.vector() * sqrt(DeltaSq / x_u_norm_sq); - cout << "In steepest descent region with fraction " << sqrt(DeltaSq / x_u_norm_sq) << " of steepest descent magnitude" << endl; + if(verbose) cout << "In steepest descent region with fraction " << sqrt(DeltaSq / x_u_norm_sq) << " of steepest descent magnitude" << endl; return x_d; } else if(DeltaSq < x_n_norm_sq) { // Trust region boundary is between steepest descent point and Newton's method point return ComputeBlend(Delta, x_u, x_n); } else { assert(DeltaSq >= x_n_norm_sq); - cout << "In pure Newton's method region" << endl; + if(verbose) cout << "In pure Newton's method region" << endl; // Trust region is larger than Newton's method point return x_n; } } /* ************************************************************************* */ -VectorValues DoglegOptimizerImpl::ComputeBlend(double Delta, const VectorValues& x_u, const VectorValues& x_n) { +VectorValues DoglegOptimizerImpl::ComputeBlend(double Delta, const VectorValues& x_u, const VectorValues& x_n, const bool verbose) { // See doc/trustregion.lyx or doc/trustregion.pdf @@ -64,7 +64,7 @@ VectorValues DoglegOptimizerImpl::ComputeBlend(double Delta, const VectorValues& } // Compute blended point - cout << "In blend region with fraction " << tau << " of Newton's method point" << endl; + if(verbose) cout << "In blend region with fraction " << tau << " of Newton's method point" << endl; VectorValues blend = VectorValues::SameStructure(x_u); blend.vector() = (1. - tau) * x_u.vector() + tau * x_n.vector(); return blend; diff --git a/gtsam/nonlinear/DoglegOptimizerImpl.h b/gtsam/nonlinear/DoglegOptimizerImpl.h index ca43b25bc..eefa76df1 100644 --- a/gtsam/nonlinear/DoglegOptimizerImpl.h +++ b/gtsam/nonlinear/DoglegOptimizerImpl.h @@ -75,7 +75,7 @@ struct DoglegOptimizerImpl { template static IterationResult Iterate( double Delta, TrustRegionAdaptationMode mode, const M& Rd, - const F& f, const VALUES& x0, const Ordering& ordering, double f_error); + const F& f, const VALUES& x0, const Ordering& ordering, const double f_error, const bool verbose=false); /** * Compute the dogleg point given a trust region radius \f$ \Delta \f$. The @@ -98,7 +98,7 @@ struct DoglegOptimizerImpl { * @param bayesNet The Bayes' net \f$ (R,d) \f$ as described above. * @return The dogleg point \f$ \delta x_d \f$ */ - static VectorValues ComputeDoglegPoint(double Delta, const VectorValues& x_u, const VectorValues& x_n); + static VectorValues ComputeDoglegPoint(double Delta, const VectorValues& x_u, const VectorValues& x_n, const bool verbose=false); /** Compute the minimizer \f$ \delta x_u \f$ of the line search along the gradient direction \f$ g \f$ of * the function @@ -132,7 +132,7 @@ struct DoglegOptimizerImpl { * @param xu Steepest descent minimizer * @param xn Newton's method minimizer */ - static VectorValues ComputeBlend(double Delta, const VectorValues& x_u, const VectorValues& x_n); + static VectorValues ComputeBlend(double Delta, const VectorValues& x_u, const VectorValues& x_n, const bool verbose=false); }; @@ -140,7 +140,7 @@ struct DoglegOptimizerImpl { template typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate( double Delta, TrustRegionAdaptationMode mode, const M& Rd, - const F& f, const VALUES& x0, const Ordering& ordering, double f_error) { + const F& f, const VALUES& x0, const Ordering& ordering, const double f_error, const bool verbose) { // Compute steepest descent and Newton's method points VectorValues dx_u = ComputeSteepestDescentPoint(Rd); @@ -156,7 +156,7 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate( // Compute dog leg point result.dx_d = ComputeDoglegPoint(Delta, dx_u, dx_n); - cout << "Delta = " << Delta << ", dx_d_norm = " << result.dx_d.vector().norm() << endl; + if(verbose) cout << "Delta = " << Delta << ", dx_d_norm = " << result.dx_d.vector().norm() << endl; // Compute expmapped solution const VALUES x_d(x0.retract(result.dx_d, ordering)); @@ -167,8 +167,8 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate( // Compute decrease in M const double new_M_error = jfg.error(result.dx_d); - cout << "f error: " << f_error << " -> " << result.f_error << endl; - cout << "M error: " << M_error << " -> " << new_M_error << endl; + if(verbose) cout << "f error: " << f_error << " -> " << result.f_error << endl; + if(verbose) cout << "M error: " << M_error << " -> " << new_M_error << endl; // Compute gain ratio. Here we take advantage of the invariant that the // Bayes' net error at zero is equal to the nonlinear error @@ -176,7 +176,7 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate( 0.5 : (f_error - result.f_error) / (M_error - new_M_error); - cout << "rho = " << rho << endl; + if(verbose) cout << "rho = " << rho << endl; if(rho >= 0.75) { // M agrees very well with f, so try to increase lambda diff --git a/tests/Makefile.am b/tests/Makefile.am index c88aa93a2..39f3bfa5d 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -14,8 +14,7 @@ check_PROGRAMS += testGraph check_PROGRAMS += testInference check_PROGRAMS += testGaussianJunctionTree check_PROGRAMS += testNonlinearEquality testNonlinearFactor testNonlinearFactorGraph -check_PROGRAMS += testNonlinearOptimizer -# check_PROGRAMS += testDoglegOptimizer # Uses debugging prints so commented out in SVN +check_PROGRAMS += testNonlinearOptimizer testDoglegOptimizer check_PROGRAMS += testSymbolicBayesNet testSymbolicFactorGraph check_PROGRAMS += testTupleValues check_PROGRAMS += testNonlinearISAM