Merge branch 'master' into retraction_name
parent
5798868ab7
commit
42a3963c7e
|
@ -37,7 +37,7 @@ sources += DSFVector.cpp
|
||||||
check_PROGRAMS += tests/testBTree tests/testDSF tests/testDSFVector
|
check_PROGRAMS += tests/testBTree tests/testDSF tests/testDSFVector
|
||||||
|
|
||||||
# Timing tests
|
# Timing tests
|
||||||
noinst_PROGRAMS = tests/timeMatrix tests/timeVirtual tests/timeTest
|
noinst_PROGRAMS = tests/timeMatrix tests/timeVirtual tests/timeVirtual2 tests/timeTest
|
||||||
noinst_PROGRAMS += tests/timeMatrixOps
|
noinst_PROGRAMS += tests/timeMatrixOps
|
||||||
|
|
||||||
#----------------------------------------------------------------------------------------------------
|
#----------------------------------------------------------------------------------------------------
|
||||||
|
|
|
@ -1,3 +1,14 @@
|
||||||
|
/* ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||||
|
* Atlanta, Georgia 30332-0415
|
||||||
|
* All Rights Reserved
|
||||||
|
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||||
|
|
||||||
|
* See LICENSE for the license information
|
||||||
|
|
||||||
|
* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @file timeVirtual.cpp
|
* @file timeVirtual.cpp
|
||||||
* @brief Time the overhead of using virtual destructors and methods
|
* @brief Time the overhead of using virtual destructors and methods
|
||||||
|
|
|
@ -0,0 +1,135 @@
|
||||||
|
/* ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||||
|
* Atlanta, Georgia 30332-0415
|
||||||
|
* All Rights Reserved
|
||||||
|
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||||
|
|
||||||
|
* See LICENSE for the license information
|
||||||
|
|
||||||
|
* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file timeVirtual.cpp
|
||||||
|
* @brief Time the overhead of using virtual destructors and methods
|
||||||
|
* @author Richard Roberts
|
||||||
|
* @date Nov 6, 2011
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <gtsam/base/timing.h>
|
||||||
|
|
||||||
|
#include <boost/shared_ptr.hpp>
|
||||||
|
#include <boost/intrusive_ptr.hpp>
|
||||||
|
|
||||||
|
#include <iostream>
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
using namespace boost;
|
||||||
|
|
||||||
|
struct DtorTestBase {
|
||||||
|
DtorTestBase() { cout << " DtorTestBase" << endl; }
|
||||||
|
virtual ~DtorTestBase() { cout << " ~DtorTestBase" << endl; }
|
||||||
|
};
|
||||||
|
|
||||||
|
struct DtorTestDerived : public DtorTestBase {
|
||||||
|
DtorTestDerived() { cout << " DtorTestDerived" << endl; }
|
||||||
|
virtual ~DtorTestDerived() { cout << " ~DtorTestDerived" << endl; }
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
struct VirtualBase {
|
||||||
|
VirtualBase() { }
|
||||||
|
virtual void method() = 0;
|
||||||
|
virtual ~VirtualBase() { }
|
||||||
|
};
|
||||||
|
|
||||||
|
struct VirtualDerived : public VirtualBase {
|
||||||
|
double data;
|
||||||
|
VirtualDerived() { data = rand(); }
|
||||||
|
virtual void method() { data = rand(); }
|
||||||
|
virtual ~VirtualDerived() { }
|
||||||
|
};
|
||||||
|
|
||||||
|
struct NonVirtualBase {
|
||||||
|
NonVirtualBase() { }
|
||||||
|
~NonVirtualBase() { }
|
||||||
|
};
|
||||||
|
|
||||||
|
struct NonVirtualDerived : public NonVirtualBase {
|
||||||
|
double data;
|
||||||
|
NonVirtualDerived() { data = rand(); }
|
||||||
|
void method() { data = rand(); }
|
||||||
|
~NonVirtualDerived() { }
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
int main(int argc, char *argv[]) {
|
||||||
|
|
||||||
|
// Virtual destructor test
|
||||||
|
cout << "Stack objects:" << endl;
|
||||||
|
cout << "Base:" << endl;
|
||||||
|
{ DtorTestBase b; }
|
||||||
|
cout << "Derived:" << endl;
|
||||||
|
{ DtorTestDerived d; }
|
||||||
|
|
||||||
|
cout << "Heap objects:" << endl;
|
||||||
|
cout << "Base:" << endl;
|
||||||
|
{ DtorTestBase *b = new DtorTestBase(); delete b; }
|
||||||
|
cout << "Derived:" << endl;
|
||||||
|
{ DtorTestDerived *d = new DtorTestDerived(); delete d; }
|
||||||
|
cout << "Derived with base pointer:" << endl;
|
||||||
|
{ DtorTestBase *b = new DtorTestDerived(); delete b; }
|
||||||
|
|
||||||
|
int n = 10000000;
|
||||||
|
|
||||||
|
VirtualBase** b = new VirtualBase*[n];
|
||||||
|
tic_(0, "Virtual");
|
||||||
|
tic_(0, "new");
|
||||||
|
for(int i=0; i<n; ++i)
|
||||||
|
b[i] = new VirtualDerived();
|
||||||
|
toc_(0, "new");
|
||||||
|
tic_(1, "method");
|
||||||
|
for(int i=0; i<n; ++i)
|
||||||
|
b[i]->method();
|
||||||
|
toc_(1, "method");
|
||||||
|
tic_(2, "dynamic_cast");
|
||||||
|
for(int i=0; i<n; ++i) {
|
||||||
|
VirtualDerived* d = dynamic_cast<VirtualDerived*>(b[i]);
|
||||||
|
if(d)
|
||||||
|
d->method();
|
||||||
|
}
|
||||||
|
toc_(2, "dynamic_cast");
|
||||||
|
tic_(3, "delete");
|
||||||
|
for(int i=0; i<n; ++i)
|
||||||
|
delete b[i];
|
||||||
|
toc_(3, "delete");
|
||||||
|
toc_(0, "Virtual");
|
||||||
|
delete[] b;
|
||||||
|
|
||||||
|
|
||||||
|
NonVirtualDerived** d = new NonVirtualDerived*[n];
|
||||||
|
tic_(1, "NonVirtual");
|
||||||
|
tic_(0, "new");
|
||||||
|
for(int i=0; i<n; ++i)
|
||||||
|
d[i] = new NonVirtualDerived();
|
||||||
|
toc_(0, "new");
|
||||||
|
tic_(1, "method");
|
||||||
|
for(int i=0; i<n; ++i)
|
||||||
|
d[i]->method();
|
||||||
|
toc_(1, "method");
|
||||||
|
tic_(2, "dynamic_cast (does nothing)");
|
||||||
|
for(int i=0; i<n; ++i)
|
||||||
|
d[i]->method();
|
||||||
|
toc_(2, "dynamic_cast (does nothing)");
|
||||||
|
tic_(3, "delete");
|
||||||
|
for(int i=0; i<n; ++i)
|
||||||
|
delete d[i];
|
||||||
|
toc_(3, "delete");
|
||||||
|
toc_(1, "NonVirtual");
|
||||||
|
delete[] d;
|
||||||
|
|
||||||
|
tictoc_finishedIteration_();
|
||||||
|
tictoc_print_();
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
|
@ -9,33 +9,33 @@
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
VectorValues DoglegOptimizerImpl::ComputeDoglegPoint(
|
VectorValues DoglegOptimizerImpl::ComputeDoglegPoint(
|
||||||
double Delta, const VectorValues& x_u, const VectorValues& x_n) {
|
double Delta, const VectorValues& x_u, const VectorValues& x_n, const bool verbose) {
|
||||||
|
|
||||||
// Get magnitude of each update and find out which segment Delta falls in
|
// Get magnitude of each update and find out which segment Delta falls in
|
||||||
assert(Delta >= 0.0);
|
assert(Delta >= 0.0);
|
||||||
double DeltaSq = Delta*Delta;
|
double DeltaSq = Delta*Delta;
|
||||||
double x_u_norm_sq = x_u.vector().squaredNorm();
|
double x_u_norm_sq = x_u.vector().squaredNorm();
|
||||||
double x_n_norm_sq = x_n.vector().squaredNorm();
|
double x_n_norm_sq = x_n.vector().squaredNorm();
|
||||||
cout << "Steepest descent magnitude " << sqrt(x_u_norm_sq) << ", Newton's method magnitude " << sqrt(x_n_norm_sq) << endl;
|
if(verbose) cout << "Steepest descent magnitude " << sqrt(x_u_norm_sq) << ", Newton's method magnitude " << sqrt(x_n_norm_sq) << endl;
|
||||||
if(DeltaSq < x_u_norm_sq) {
|
if(DeltaSq < x_u_norm_sq) {
|
||||||
// Trust region is smaller than steepest descent update
|
// Trust region is smaller than steepest descent update
|
||||||
VectorValues x_d = VectorValues::SameStructure(x_u);
|
VectorValues x_d = VectorValues::SameStructure(x_u);
|
||||||
x_d.vector() = x_u.vector() * sqrt(DeltaSq / x_u_norm_sq);
|
x_d.vector() = x_u.vector() * sqrt(DeltaSq / x_u_norm_sq);
|
||||||
cout << "In steepest descent region with fraction " << sqrt(DeltaSq / x_u_norm_sq) << " of steepest descent magnitude" << endl;
|
if(verbose) cout << "In steepest descent region with fraction " << sqrt(DeltaSq / x_u_norm_sq) << " of steepest descent magnitude" << endl;
|
||||||
return x_d;
|
return x_d;
|
||||||
} else if(DeltaSq < x_n_norm_sq) {
|
} else if(DeltaSq < x_n_norm_sq) {
|
||||||
// Trust region boundary is between steepest descent point and Newton's method point
|
// Trust region boundary is between steepest descent point and Newton's method point
|
||||||
return ComputeBlend(Delta, x_u, x_n);
|
return ComputeBlend(Delta, x_u, x_n);
|
||||||
} else {
|
} else {
|
||||||
assert(DeltaSq >= x_n_norm_sq);
|
assert(DeltaSq >= x_n_norm_sq);
|
||||||
cout << "In pure Newton's method region" << endl;
|
if(verbose) cout << "In pure Newton's method region" << endl;
|
||||||
// Trust region is larger than Newton's method point
|
// Trust region is larger than Newton's method point
|
||||||
return x_n;
|
return x_n;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
VectorValues DoglegOptimizerImpl::ComputeBlend(double Delta, const VectorValues& x_u, const VectorValues& x_n) {
|
VectorValues DoglegOptimizerImpl::ComputeBlend(double Delta, const VectorValues& x_u, const VectorValues& x_n, const bool verbose) {
|
||||||
|
|
||||||
// See doc/trustregion.lyx or doc/trustregion.pdf
|
// See doc/trustregion.lyx or doc/trustregion.pdf
|
||||||
|
|
||||||
|
@ -64,7 +64,7 @@ VectorValues DoglegOptimizerImpl::ComputeBlend(double Delta, const VectorValues&
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compute blended point
|
// Compute blended point
|
||||||
cout << "In blend region with fraction " << tau << " of Newton's method point" << endl;
|
if(verbose) cout << "In blend region with fraction " << tau << " of Newton's method point" << endl;
|
||||||
VectorValues blend = VectorValues::SameStructure(x_u);
|
VectorValues blend = VectorValues::SameStructure(x_u);
|
||||||
blend.vector() = (1. - tau) * x_u.vector() + tau * x_n.vector();
|
blend.vector() = (1. - tau) * x_u.vector() + tau * x_n.vector();
|
||||||
return blend;
|
return blend;
|
||||||
|
|
|
@ -75,7 +75,7 @@ struct DoglegOptimizerImpl {
|
||||||
template<class M, class F, class VALUES>
|
template<class M, class F, class VALUES>
|
||||||
static IterationResult Iterate(
|
static IterationResult Iterate(
|
||||||
double Delta, TrustRegionAdaptationMode mode, const M& Rd,
|
double Delta, TrustRegionAdaptationMode mode, const M& Rd,
|
||||||
const F& f, const VALUES& x0, const Ordering& ordering, double f_error);
|
const F& f, const VALUES& x0, const Ordering& ordering, const double f_error, const bool verbose=false);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Compute the dogleg point given a trust region radius \f$ \Delta \f$. The
|
* Compute the dogleg point given a trust region radius \f$ \Delta \f$. The
|
||||||
|
@ -98,7 +98,7 @@ struct DoglegOptimizerImpl {
|
||||||
* @param bayesNet The Bayes' net \f$ (R,d) \f$ as described above.
|
* @param bayesNet The Bayes' net \f$ (R,d) \f$ as described above.
|
||||||
* @return The dogleg point \f$ \delta x_d \f$
|
* @return The dogleg point \f$ \delta x_d \f$
|
||||||
*/
|
*/
|
||||||
static VectorValues ComputeDoglegPoint(double Delta, const VectorValues& x_u, const VectorValues& x_n);
|
static VectorValues ComputeDoglegPoint(double Delta, const VectorValues& x_u, const VectorValues& x_n, const bool verbose=false);
|
||||||
|
|
||||||
/** Compute the minimizer \f$ \delta x_u \f$ of the line search along the gradient direction \f$ g \f$ of
|
/** Compute the minimizer \f$ \delta x_u \f$ of the line search along the gradient direction \f$ g \f$ of
|
||||||
* the function
|
* the function
|
||||||
|
@ -132,7 +132,7 @@ struct DoglegOptimizerImpl {
|
||||||
* @param xu Steepest descent minimizer
|
* @param xu Steepest descent minimizer
|
||||||
* @param xn Newton's method minimizer
|
* @param xn Newton's method minimizer
|
||||||
*/
|
*/
|
||||||
static VectorValues ComputeBlend(double Delta, const VectorValues& x_u, const VectorValues& x_n);
|
static VectorValues ComputeBlend(double Delta, const VectorValues& x_u, const VectorValues& x_n, const bool verbose=false);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -140,7 +140,7 @@ struct DoglegOptimizerImpl {
|
||||||
template<class M, class F, class VALUES>
|
template<class M, class F, class VALUES>
|
||||||
typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
|
typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
|
||||||
double Delta, TrustRegionAdaptationMode mode, const M& Rd,
|
double Delta, TrustRegionAdaptationMode mode, const M& Rd,
|
||||||
const F& f, const VALUES& x0, const Ordering& ordering, double f_error) {
|
const F& f, const VALUES& x0, const Ordering& ordering, const double f_error, const bool verbose) {
|
||||||
|
|
||||||
// Compute steepest descent and Newton's method points
|
// Compute steepest descent and Newton's method points
|
||||||
VectorValues dx_u = ComputeSteepestDescentPoint(Rd);
|
VectorValues dx_u = ComputeSteepestDescentPoint(Rd);
|
||||||
|
@ -156,7 +156,7 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
|
||||||
// Compute dog leg point
|
// Compute dog leg point
|
||||||
result.dx_d = ComputeDoglegPoint(Delta, dx_u, dx_n);
|
result.dx_d = ComputeDoglegPoint(Delta, dx_u, dx_n);
|
||||||
|
|
||||||
cout << "Delta = " << Delta << ", dx_d_norm = " << result.dx_d.vector().norm() << endl;
|
if(verbose) cout << "Delta = " << Delta << ", dx_d_norm = " << result.dx_d.vector().norm() << endl;
|
||||||
|
|
||||||
// Compute expmapped solution
|
// Compute expmapped solution
|
||||||
const VALUES x_d(x0.retract(result.dx_d, ordering));
|
const VALUES x_d(x0.retract(result.dx_d, ordering));
|
||||||
|
@ -167,8 +167,8 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
|
||||||
// Compute decrease in M
|
// Compute decrease in M
|
||||||
const double new_M_error = jfg.error(result.dx_d);
|
const double new_M_error = jfg.error(result.dx_d);
|
||||||
|
|
||||||
cout << "f error: " << f_error << " -> " << result.f_error << endl;
|
if(verbose) cout << "f error: " << f_error << " -> " << result.f_error << endl;
|
||||||
cout << "M error: " << M_error << " -> " << new_M_error << endl;
|
if(verbose) cout << "M error: " << M_error << " -> " << new_M_error << endl;
|
||||||
|
|
||||||
// Compute gain ratio. Here we take advantage of the invariant that the
|
// Compute gain ratio. Here we take advantage of the invariant that the
|
||||||
// Bayes' net error at zero is equal to the nonlinear error
|
// Bayes' net error at zero is equal to the nonlinear error
|
||||||
|
@ -176,7 +176,7 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
|
||||||
0.5 :
|
0.5 :
|
||||||
(f_error - result.f_error) / (M_error - new_M_error);
|
(f_error - result.f_error) / (M_error - new_M_error);
|
||||||
|
|
||||||
cout << "rho = " << rho << endl;
|
if(verbose) cout << "rho = " << rho << endl;
|
||||||
|
|
||||||
if(rho >= 0.75) {
|
if(rho >= 0.75) {
|
||||||
// M agrees very well with f, so try to increase lambda
|
// M agrees very well with f, so try to increase lambda
|
||||||
|
|
|
@ -14,8 +14,7 @@ check_PROGRAMS += testGraph
|
||||||
check_PROGRAMS += testInference
|
check_PROGRAMS += testInference
|
||||||
check_PROGRAMS += testGaussianJunctionTree
|
check_PROGRAMS += testGaussianJunctionTree
|
||||||
check_PROGRAMS += testNonlinearEquality testNonlinearFactor testNonlinearFactorGraph
|
check_PROGRAMS += testNonlinearEquality testNonlinearFactor testNonlinearFactorGraph
|
||||||
check_PROGRAMS += testNonlinearOptimizer
|
check_PROGRAMS += testNonlinearOptimizer testDoglegOptimizer
|
||||||
# check_PROGRAMS += testDoglegOptimizer # Uses debugging prints so commented out in SVN
|
|
||||||
check_PROGRAMS += testSymbolicBayesNet testSymbolicFactorGraph
|
check_PROGRAMS += testSymbolicBayesNet testSymbolicFactorGraph
|
||||||
check_PROGRAMS += testTupleValues
|
check_PROGRAMS += testTupleValues
|
||||||
check_PROGRAMS += testNonlinearISAM
|
check_PROGRAMS += testNonlinearISAM
|
||||||
|
|
Loading…
Reference in New Issue