diff --git a/.cproject b/.cproject
index 91d9c7689..2439190b6 100644
--- a/.cproject
+++ b/.cproject
@@ -309,6 +309,14 @@
true
true
+
+ make
+ -j2
+ testGaussianFactor.run
+ true
+ true
+ true
+
make
-j2
@@ -335,7 +343,6 @@
make
-
tests/testBayesTree.run
true
false
@@ -343,7 +350,6 @@
make
-
testBinaryBayesNet.run
true
false
@@ -391,7 +397,6 @@
make
-
testSymbolicBayesNet.run
true
false
@@ -399,7 +404,6 @@
make
-
tests/testSymbolicFactor.run
true
false
@@ -407,7 +411,6 @@
make
-
testSymbolicFactorGraph.run
true
false
@@ -423,20 +426,11 @@
make
-
tests/testBayesTree
true
false
true
-
- make
- -j2
- testGaussianFactor.run
- true
- true
- true
-
make
-j5
@@ -525,22 +519,6 @@
false
true
-
- make
- -j2
- tests/testPose2.run
- true
- true
- true
-
-
- make
- -j2
- tests/testPose3.run
- true
- true
- true
-
make
-j2
@@ -557,6 +535,22 @@
true
true
+
+ make
+ -j2
+ tests/testPose2.run
+ true
+ true
+ true
+
+
+ make
+ -j2
+ tests/testPose3.run
+ true
+ true
+ true
+
make
-j2
@@ -581,26 +575,26 @@
true
true
-
+
make
- -j2
- all
+ -j5
+ testValues.run
true
true
true
-
+
make
- -j2
- check
+ -j5
+ testOrdering.run
true
true
true
-
+
make
- -j2
- clean
+ -j5
+ testKey.run
true
true
true
@@ -685,26 +679,26 @@
true
true
-
+
make
- -j5
- testValues.run
+ -j2
+ all
true
true
true
-
+
make
- -j5
- testOrdering.run
+ -j2
+ check
true
true
true
-
+
make
- -j5
- testKey.run
+ -j2
+ clean
true
true
true
@@ -959,7 +953,6 @@
make
-
testGraph.run
true
false
@@ -967,7 +960,6 @@
make
-
testJunctionTree.run
true
false
@@ -975,7 +967,6 @@
make
-
testSymbolicBayesNetB.run
true
false
@@ -1111,7 +1102,6 @@
make
-
testErrors.run
true
false
@@ -1575,6 +1565,7 @@
make
+
testSimulated2DOriented.run
true
false
@@ -1614,6 +1605,7 @@
make
+
testSimulated2D.run
true
false
@@ -1621,6 +1613,7 @@
make
+
testSimulated3D.run
true
false
@@ -1836,6 +1829,7 @@
make
+
tests/testGaussianISAM2
true
false
@@ -1857,14 +1851,110 @@
true
true
-
+
make
-j2
- install
+ testRot3.run
true
true
true
+
+ make
+ -j2
+ testRot2.run
+ true
+ true
+ true
+
+
+ make
+ -j2
+ testPose3.run
+ true
+ true
+ true
+
+
+ make
+ -j2
+ timeRot3.run
+ true
+ true
+ true
+
+
+ make
+ -j2
+ testPose2.run
+ true
+ true
+ true
+
+
+ make
+ -j2
+ testCal3_S2.run
+ true
+ true
+ true
+
+
+ make
+ -j2
+ testSimpleCamera.run
+ true
+ true
+ true
+
+
+ make
+ -j2
+ testHomography2.run
+ true
+ true
+ true
+
+
+ make
+ -j2
+ testCalibratedCamera.run
+ true
+ true
+ true
+
+
+ make
+ -j2
+ check
+ true
+ true
+ true
+
+
+ make
+ -j2
+ clean
+ true
+ true
+ true
+
+
+ make
+ -j2
+ testPoint2.run
+ true
+ true
+ true
+
+
+ make
+ -j1
+ install
+ true
+ false
+ true
+
make
-j2
@@ -1875,10 +1965,10 @@
make
- -j5
+ -j1
check
true
- true
+ false
true
@@ -2058,7 +2148,6 @@
cpack
-
-G DEB
true
false
@@ -2066,7 +2155,6 @@
cpack
-
-G RPM
true
false
@@ -2074,7 +2162,6 @@
cpack
-
-G TGZ
true
false
@@ -2082,7 +2169,6 @@
cpack
-
--config CPackSourceConfig.cmake
true
false
@@ -2224,98 +2310,34 @@
true
true
-
+
make
- -j2
- testRot3.run
+ -j5
+ testSpirit.run
true
true
true
-
+
make
- -j2
- testRot2.run
+ -j5
+ testWrap.run
true
true
true
-
+
make
- -j2
- testPose3.run
+ -j5
+ check.wrap
true
true
true
-
+
make
- -j2
- timeRot3.run
- true
- true
- true
-
-
- make
- -j2
- testPose2.run
- true
- true
- true
-
-
- make
- -j2
- testCal3_S2.run
- true
- true
- true
-
-
- make
- -j2
- testSimpleCamera.run
- true
- true
- true
-
-
- make
- -j2
- testHomography2.run
- true
- true
- true
-
-
- make
- -j2
- testCalibratedCamera.run
- true
- true
- true
-
-
- make
- -j2
- check
- true
- true
- true
-
-
- make
- -j2
- clean
- true
- true
- true
-
-
- make
- -j2
- testPoint2.run
+ -j5
+ wrap
true
true
true
@@ -2359,38 +2381,6 @@
false
true
-
- make
- -j5
- testSpirit.run
- true
- true
- true
-
-
- make
- -j5
- testWrap.run
- true
- true
- true
-
-
- make
- -j5
- check.wrap
- true
- true
- true
-
-
- make
- -j5
- wrap
- true
- true
- true
-
diff --git a/.settings/org.eclipse.cdt.core.prefs b/.settings/org.eclipse.cdt.core.prefs
deleted file mode 100644
index 3278fdadc..000000000
--- a/.settings/org.eclipse.cdt.core.prefs
+++ /dev/null
@@ -1,16 +0,0 @@
-eclipse.preferences.version=1
-environment/project/cdt.managedbuild.toolchain.gnu.macosx.base.1359703544.127261216/PATH/delimiter=\:
-environment/project/cdt.managedbuild.toolchain.gnu.macosx.base.1359703544.127261216/PATH/operation=append
-environment/project/cdt.managedbuild.toolchain.gnu.macosx.base.1359703544.127261216/PATH/value=/home/ydjian/matlab/R2012a/bin
-environment/project/cdt.managedbuild.toolchain.gnu.macosx.base.1359703544.127261216/append=true
-environment/project/cdt.managedbuild.toolchain.gnu.macosx.base.1359703544.127261216/appendContributed=true
-environment/project/cdt.managedbuild.toolchain.gnu.macosx.base.1359703544.1441575890/PATH/delimiter=\:
-environment/project/cdt.managedbuild.toolchain.gnu.macosx.base.1359703544.1441575890/PATH/operation=append
-environment/project/cdt.managedbuild.toolchain.gnu.macosx.base.1359703544.1441575890/PATH/value=/home/ydjian/matlab/R2012a/bin
-environment/project/cdt.managedbuild.toolchain.gnu.macosx.base.1359703544.1441575890/append=true
-environment/project/cdt.managedbuild.toolchain.gnu.macosx.base.1359703544.1441575890/appendContributed=true
-environment/project/cdt.managedbuild.toolchain.gnu.macosx.base.1359703544/PATH/delimiter=\:
-environment/project/cdt.managedbuild.toolchain.gnu.macosx.base.1359703544/PATH/operation=append
-environment/project/cdt.managedbuild.toolchain.gnu.macosx.base.1359703544/PATH/value=/home/ydjian/matlab/R2012a/bin
-environment/project/cdt.managedbuild.toolchain.gnu.macosx.base.1359703544/append=true
-environment/project/cdt.managedbuild.toolchain.gnu.macosx.base.1359703544/appendContributed=true
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 66fc43c50..d94f259f9 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -109,7 +109,7 @@ if(CYGWIN OR MSVC OR WIN32)
set(Boost_USE_STATIC_LIBS 1)
endif()
find_package(Boost 1.43 COMPONENTS serialization system filesystem thread date_time regex REQUIRED)
-set(GTSAM_BOOST_LIBRARIES ${Boost_SERIALIZATION_LIBRARY})
+set(GTSAM_BOOST_LIBRARIES ${Boost_SERIALIZATION_LIBRARY} ${Boost_SYSTEM_LIBRARY})
# General build settings
include_directories(
diff --git a/examples/Pose2SLAMExample_graph.m b/examples/Pose2SLAMExample_graph.m
new file mode 100644
index 000000000..65271c053
--- /dev/null
+++ b/examples/Pose2SLAMExample_graph.m
@@ -0,0 +1,47 @@
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% GTSAM Copyright 2010, Georgia Tech Research Corporation,
+% Atlanta, Georgia 30332-0415
+% All Rights Reserved
+% Authors: Frank Dellaert, et al. (see THANKS for the full author list)
+%
+% See LICENSE for the license information
+%
+% @brief Read graph from file and perform GraphSLAM
+% @author Frank Dellaert
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%% Initialize graph, initial estimate, and odometry noise
+import gtsam.*
+model = noiseModel.Diagonal.Sigmas([0.05; 0.05; 5*pi/180]);
+maxID = 0;
+addNoise = false;
+smart = true;
+[graph,initial]=load2D('Data/w100-odom.graph',model,maxID,addNoise,smart);
+initial.print(sprintf('Initial estimate:\n'));
+
+%% Add a Gaussian prior on pose x_1
+import gtsam.*
+priorMean = Pose2(0.0, 0.0, 0.0); % prior mean is at origin
+priorNoise = noiseModel.Diagonal.Sigmas([0.01; 0.01; 0.01]);
+graph.addPosePrior(0, priorMean, priorNoise); % add directly to graph
+
+%% Plot Initial Estimate
+figure(1);clf
+P=initial.poses;
+plot(P(:,1),P(:,2),'g-*'); axis equal
+
+%% Optimize using Levenberg-Marquardt optimization with an ordering from colamd
+result = graph.optimize(initial,1);
+P=result.poses;
+hold on; plot(P(:,1),P(:,2),'b-*')
+result.print(sprintf('\nFinal result:\n'));
+
+%% Plot Covariance Ellipses
+marginals = graph.marginals(result);
+P={};
+for i=1:result.size()-1
+ pose_i = result.pose(i);
+ P{i}=marginals.marginalCovariance(i);
+ plotPose2(pose_i,'b',P{i})
+end
+fprintf(1,'%.5f %.5f %.5f\n',P{99})
\ No newline at end of file
diff --git a/gtsam.h b/gtsam.h
index e48ced5ee..646e44141 100644
--- a/gtsam.h
+++ b/gtsam.h
@@ -1472,6 +1472,10 @@ virtual class GenericStereoFactor : gtsam::NonlinearFactor {
};
typedef gtsam::GenericStereoFactor GenericStereoFactor3D;
+#include
+pair load2D(string filename,
+ gtsam::noiseModel::Diagonal* model, int maxID, bool addNoise, bool smart);
+
} //\namespace gtsam
//*************************************************************************
diff --git a/gtsam/CMakeLists.txt b/gtsam/CMakeLists.txt
index 39ae67507..56bce727b 100644
--- a/gtsam/CMakeLists.txt
+++ b/gtsam/CMakeLists.txt
@@ -85,7 +85,7 @@ message(STATUS "Install prefix: ${CMAKE_INSTALL_PREFIX}")
if (GTSAM_BUILD_STATIC_LIBRARY)
message(STATUS "Building GTSAM - static")
add_library(gtsam-static STATIC ${gtsam_srcs})
- target_link_libraries(gtsam-static ${Boost_SERIALIZATION_LIBRARY})
+ target_link_libraries(gtsam-static ${GTSAM_BOOST_LIBRARIES})
set_target_properties(gtsam-static PROPERTIES
OUTPUT_NAME gtsam
CLEAN_DIRECT_OUTPUT 1
@@ -100,7 +100,7 @@ endif (GTSAM_BUILD_STATIC_LIBRARY)
if (GTSAM_BUILD_SHARED_LIBRARY)
message(STATUS "Building GTSAM - shared")
add_library(gtsam-shared SHARED ${gtsam_srcs})
- target_link_libraries(gtsam-shared ${Boost_SERIALIZATION_LIBRARY})
+ target_link_libraries(gtsam-shared ${GTSAM_BOOST_LIBRARIES})
set_target_properties(gtsam-shared PROPERTIES
OUTPUT_NAME gtsam
CLEAN_DIRECT_OUTPUT 1
diff --git a/gtsam/geometry/Cal3DS2.cpp b/gtsam/geometry/Cal3DS2.cpp
index dcb6d6d54..8f15f81d2 100644
--- a/gtsam/geometry/Cal3DS2.cpp
+++ b/gtsam/geometry/Cal3DS2.cpp
@@ -78,6 +78,36 @@ Point2 Cal3DS2::uncalibrate(const Point2& p,
return Point2(fx_* x2 + s_ * y2 + u0_, fy_ * y2 + v0_) ;
}
+/* ************************************************************************* */
+Point2 Cal3DS2::calibrate(const Point2& pi, const double tol) const {
+ // Use the following fixed point iteration to invert the radial distortion.
+ // pn_{t+1} = (inv(K)*pi - dp(pn_{t})) / g(pn_{t})
+
+ const Point2 invKPi ((1 / fx_) * (pi.x() - u0_ - (s_ / fy_) * (pi.y() - v0_)),
+ (1 / fy_) * (pi.y() - v0_));
+
+ // initialize by ignoring the distortion at all, might be problematic for pixels around boundary
+ Point2 pn = invKPi;
+
+ // iterate until the uncalibrate is close to the actual pixel coordinate
+ const int maxIterations = 10;
+ int iteration;
+ for ( iteration = 0 ; iteration < maxIterations ; ++iteration ) {
+ if ( uncalibrate(pn).dist(pi) <= tol ) break;
+ const double x = pn.x(), y = pn.y(), xy = x*y, xx = x*x, yy = y*y ;
+ const double r = xx + yy ;
+ const double g = (1+k1_*r+k2_*r*r) ;
+ const double dx = 2*k3_*xy + k4_*(r+2*xx) ;
+ const double dy = 2*k4_*xy + k3_*(r+2*yy) ;
+ pn = (invKPi - Point2(dx,dy))/g ;
+ }
+
+ if ( iteration >= maxIterations )
+ throw std::runtime_error("Cal3DS2::calibrate fails to converge. need a better initialization");
+
+ return pn;
+}
+
/* ************************************************************************* */
Matrix Cal3DS2::D2d_intrinsic(const Point2& p) const {
//const double fx = fx_, fy = fy_, s = s_ ;
diff --git a/gtsam/geometry/Cal3DS2.h b/gtsam/geometry/Cal3DS2.h
index d26b9a19d..d33b452c1 100644
--- a/gtsam/geometry/Cal3DS2.h
+++ b/gtsam/geometry/Cal3DS2.h
@@ -96,6 +96,9 @@ public:
boost::optional H1 = boost::none,
boost::optional H2 = boost::none) const ;
+ /// Conver a pixel coordinate to ideal coordinate
+ Point2 calibrate(const Point2& p, const double tol=1e-5) const;
+
/// Derivative of uncalibrate wrpt intrinsic coordinates
Matrix D2d_intrinsic(const Point2& p) const ;
diff --git a/gtsam/geometry/Cal3_S2.h b/gtsam/geometry/Cal3_S2.h
index 9d6cb4fbf..3c88aaeb2 100644
--- a/gtsam/geometry/Cal3_S2.h
+++ b/gtsam/geometry/Cal3_S2.h
@@ -136,8 +136,8 @@ namespace gtsam {
/// convert image coordinates uv to intrinsic coordinates xy
Point2 calibrate(const Point2& p) const {
const double u = p.x(), v = p.y();
- return Point2((1 / fx_) * (u - u0_ - (s_ / fy_) * (v - v0_)), (1 / fy_)
- * (v - v0_));
+ return Point2((1 / fx_) * (u - u0_ - (s_ / fy_) * (v - v0_)),
+ (1 / fy_) * (v - v0_));
}
/// @}
diff --git a/gtsam/geometry/tests/testCal3DS2.cpp b/gtsam/geometry/tests/testCal3DS2.cpp
index 9f8777e49..c73ae1182 100644
--- a/gtsam/geometry/tests/testCal3DS2.cpp
+++ b/gtsam/geometry/tests/testCal3DS2.cpp
@@ -29,7 +29,7 @@ static Cal3DS2 K(500, 100, 0.1, 320, 240, 1e-3, 2.0*1e-3, 3.0*1e-3, 4.0*1e-3);
static Point2 p(2,3);
/* ************************************************************************* */
-TEST( Cal3DS2, calibrate)
+TEST( Cal3DS2, uncalibrate)
{
Vector k = K.k() ;
double r = p.x()*p.x() + p.y()*p.y() ;
@@ -43,6 +43,14 @@ TEST( Cal3DS2, calibrate)
CHECK(assert_equal(q,p_i));
}
+TEST( Cal3DS2, calibrate )
+{
+ Point2 pn(0.5, 0.5);
+ Point2 pi = K.uncalibrate(pn);
+ Point2 pn_hat = K.calibrate(pi);
+ CHECK( pn.equals(pn_hat, 1e-5));
+}
+
Point2 uncalibrate_(const Cal3DS2& k, const Point2& pt) { return k.uncalibrate(pt); }
/* ************************************************************************* */
diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.cpp b/gtsam/nonlinear/GaussNewtonOptimizer.cpp
index c447a39e3..05ba56a21 100644
--- a/gtsam/nonlinear/GaussNewtonOptimizer.cpp
+++ b/gtsam/nonlinear/GaussNewtonOptimizer.cpp
@@ -16,8 +16,6 @@
* @date Feb 26, 2012
*/
-#include
-#include
#include
using namespace std;
@@ -32,22 +30,8 @@ void GaussNewtonOptimizer::iterate() {
// Linearize graph
GaussianFactorGraph::shared_ptr linear = graph_.linearize(current.values, *params_.ordering);
- // Optimize
- VectorValues delta;
- {
- if ( params_.isMultifrontal() ) {
- delta = GaussianJunctionTree(*linear).optimize(params_.getEliminationFunction());
- }
- else if ( params_.isSequential() ) {
- delta = gtsam::optimize(*EliminationTree::Create(*linear)->eliminate(params_.getEliminationFunction()));
- }
- else if ( params_.isCG() ) {
- throw runtime_error("todo: ");
- }
- else {
- throw runtime_error("Optimization parameter is invalid: GaussNewtonParams::elimination");
- }
- }
+ // Solve Factor Graph
+ const VectorValues delta = solveGaussianFactorGraph(*linear, params_);
// Maybe show output
if(params_.verbosity >= NonlinearOptimizerParams::DELTA) delta.print("delta");
diff --git a/gtsam/nonlinear/GradientDescentOptimizer.cpp b/gtsam/nonlinear/GradientDescentOptimizer.cpp
deleted file mode 100644
index 4a471de4d..000000000
--- a/gtsam/nonlinear/GradientDescentOptimizer.cpp
+++ /dev/null
@@ -1,138 +0,0 @@
-/**
- * @file GradientDescentOptimizer.cpp
- * @brief
- * @author ydjian
- * @date Jun 11, 2012
- */
-
-#include
-#include
-#include
-#include
-#include
-
-#include
-
-using namespace std;
-
-namespace gtsam {
-
-/**
- * Return the gradient vector of a nonlinear factor given a linearization point and a variable ordering
- * Can be moved to NonlinearFactorGraph.h if desired
- */
-void gradientInPlace(const NonlinearFactorGraph &nfg, const Values &values, const Ordering &ordering, VectorValues &g) {
-
- // Linearize graph
- GaussianFactorGraph::shared_ptr linear = nfg.linearize(values, ordering);
- FactorGraph jfg; jfg.reserve(linear->size());
- BOOST_FOREACH(const GaussianFactorGraph::sharedFactor& factor, *linear) {
- if(boost::shared_ptr jf = boost::dynamic_pointer_cast(factor))
- jfg.push_back((jf));
- else
- jfg.push_back(boost::make_shared(*factor));
- }
-
- // compute the gradient direction
- gradientAtZero(jfg, g);
-}
-
-
-/* ************************************************************************* */
-void GradientDescentOptimizer::iterate() {
-
-
- // Pull out parameters we'll use
- const NonlinearOptimizerParams::Verbosity nloVerbosity = params_.verbosity;
-
- // compute the gradient vector
- gradientInPlace(graph_, state_.values, *ordering_, *gradient_);
-
- /* normalize it such that it becomes a unit vector */
- const double g = gradient_->vector().norm();
- gradient_->vector() /= g;
-
- // perform the golden section search algorithm to decide the the optimal step size
- // detail refer to http://en.wikipedia.org/wiki/Golden_section_search
- VectorValues step = VectorValues::SameStructure(*gradient_);
- const double phi = 0.5*(1.0+std::sqrt(5.0)), resphi = 2.0 - phi, tau = 1e-5;
- double minStep = -1.0, maxStep = 0,
- newStep = minStep + (maxStep-minStep) / (phi+1.0) ;
-
- step.vector() = newStep * gradient_->vector();
- Values newValues = state_.values.retract(step, *ordering_);
- double newError = graph_.error(newValues);
-
- if ( nloVerbosity ) {
- std::cout << "minStep = " << minStep << ", maxStep = " << maxStep << ", newStep = " << newStep << ", newError = " << newError << std::endl;
- }
-
- while (true) {
- const bool flag = (maxStep - newStep > newStep - minStep) ? true : false ;
- const double testStep = flag ?
- newStep + resphi * (maxStep - newStep) : newStep - resphi * (newStep - minStep);
-
- if ( (maxStep- minStep) < tau * (std::fabs(testStep) + std::fabs(newStep)) ) {
- newStep = 0.5*(minStep+maxStep);
- step.vector() = newStep * gradient_->vector();
- newValues = state_.values.retract(step, *ordering_);
- newError = graph_.error(newValues);
-
- if ( newError < state_.error ) {
- state_.values = state_.values.retract(step, *ordering_);
- state_.error = graph_.error(state_.values);
- }
-
- break;
- }
-
- step.vector() = testStep * gradient_->vector();
- const Values testValues = state_.values.retract(step, *ordering_);
- const double testError = graph_.error(testValues);
-
- // update the working range
- if ( testError >= newError ) {
- if ( flag ) maxStep = testStep;
- else minStep = testStep;
- }
- else {
- if ( flag ) {
- minStep = newStep;
- newStep = testStep;
- newError = testError;
- }
- else {
- maxStep = newStep;
- newStep = testStep;
- newError = testError;
- }
- }
-
- if ( nloVerbosity ) {
- std::cout << "minStep = " << minStep << ", maxStep = " << maxStep << ", newStep = " << newStep << ", newError = " << newError << std::endl;
- }
- }
- // Increment the iteration counter
- ++state_.iterations;
-}
-
-double ConjugateGradientOptimizer::System::error(const State &state) const {
- return graph_.error(state);
-}
-
-ConjugateGradientOptimizer::System::Gradient ConjugateGradientOptimizer::System::gradient(const State &state) const {
- Gradient result = state.zeroVectors(ordering_);
- gradientInPlace(graph_, state, ordering_, result);
- return result;
-}
-ConjugateGradientOptimizer::System::State ConjugateGradientOptimizer::System::advance(const State ¤t, const double alpha, const Gradient &g) const {
- Gradient step = g;
- step.vector() *= alpha;
- return current.retract(step, ordering_);
-}
-
-Values ConjugateGradientOptimizer::optimize() {
- return conjugateGradient(System(graph_, *ordering_), initialEstimate_, params_, !cg_);
-}
-
-} /* namespace gtsam */
diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp
index 15c2262fa..256763c59 100644
--- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp
+++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp
@@ -19,12 +19,9 @@
#include
#include
+#include
#include // For NegativeMatrixException
-#include
-#include
-#include
-#include
#include
#include
@@ -106,34 +103,8 @@ void LevenbergMarquardtOptimizer::iterate() {
// Try solving
try {
-
- // Optimize
- VectorValues delta;
- if ( params_.isMultifrontal() ) {
- delta = GaussianJunctionTree(dampedSystem).optimize(params_.getEliminationFunction());
- }
- else if ( params_.isSequential() ) {
- delta = gtsam::optimize(*EliminationTree::Create(dampedSystem)->eliminate(params_.getEliminationFunction()));
- }
- else if ( params_.isCG() ) {
-
- if ( !params_.iterativeParams ) throw runtime_error("LMSolver: cg parameter has to be assigned ...");
-
- if ( boost::dynamic_pointer_cast(params_.iterativeParams)) {
- SimpleSPCGSolver solver (dampedSystem, *boost::dynamic_pointer_cast(params_.iterativeParams));
- delta = solver.optimize();
- }
- else if ( boost::dynamic_pointer_cast(params_.iterativeParams) ) {
- SubgraphSolver solver (dampedSystem, *boost::dynamic_pointer_cast(params_.iterativeParams));
- delta = solver.optimize();
- }
- else {
- throw runtime_error("LMSolver: special cg parameter type is not handled in LM solver ...");
- }
- }
- else {
- throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::elimination");
- }
+ // Solve Damped Gaussian Factor Graph
+ const VectorValues delta = solveGaussianFactorGraph(dampedSystem, params_);
if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "linear delta norm = " << delta.vector().norm() << endl;
if (lmVerbosity >= LevenbergMarquardtParams::TRYDELTA) delta.print("delta");
diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp
new file mode 100644
index 000000000..f0b3852ef
--- /dev/null
+++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp
@@ -0,0 +1,64 @@
+/**
+ * @file GradientDescentOptimizer.cpp
+ * @brief
+ * @author ydjian
+ * @date Jun 11, 2012
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+using namespace std;
+
+namespace gtsam {
+
+/* Return the gradient vector of a nonlinear factor given a linearization point and a variable ordering
+ * Can be moved to NonlinearFactorGraph.h if desired */
+void gradientInPlace(const NonlinearFactorGraph &nfg, const Values &values, const Ordering &ordering, VectorValues &g) {
+ // Linearize graph
+ GaussianFactorGraph::shared_ptr linear = nfg.linearize(values, ordering);
+ FactorGraph jfg; jfg.reserve(linear->size());
+ BOOST_FOREACH(const GaussianFactorGraph::sharedFactor& factor, *linear) {
+ if(boost::shared_ptr jf = boost::dynamic_pointer_cast(factor))
+ jfg.push_back((jf));
+ else
+ jfg.push_back(boost::make_shared(*factor));
+ }
+ // compute the gradient direction
+ gradientAtZero(jfg, g);
+}
+
+double NonlinearConjugateGradientOptimizer::System::error(const State &state) const {
+ return graph_.error(state);
+}
+
+NonlinearConjugateGradientOptimizer::System::Gradient NonlinearConjugateGradientOptimizer::System::gradient(const State &state) const {
+ Gradient result = state.zeroVectors(ordering_);
+ gradientInPlace(graph_, state, ordering_, result);
+ return result;
+}
+NonlinearConjugateGradientOptimizer::System::State NonlinearConjugateGradientOptimizer::System::advance(const State ¤t, const double alpha, const Gradient &g) const {
+ Gradient step = g;
+ step.vector() *= alpha;
+ return current.retract(step, ordering_);
+}
+
+void NonlinearConjugateGradientOptimizer::iterate() {
+ size_t dummy ;
+ boost::tie(state_.values, dummy) = nonlinearConjugateGradient(System(graph_, *ordering_), state_.values, params_, true /* single iterations */);
+ ++state_.iterations;
+ state_.error = graph_.error(state_.values);
+}
+
+const Values& NonlinearConjugateGradientOptimizer::optimize() {
+ boost::tie(state_.values, state_.iterations) = nonlinearConjugateGradient(System(graph_, *ordering_), state_.values, params_, false /* up to convergent */);
+ state_.error = graph_.error(state_.values);
+ return state_.values;
+}
+
+} /* namespace gtsam */
diff --git a/gtsam/nonlinear/GradientDescentOptimizer.h b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h
similarity index 71%
rename from gtsam/nonlinear/GradientDescentOptimizer.h
rename to gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h
index 2407ee39f..5bac11f5e 100644
--- a/gtsam/nonlinear/GradientDescentOptimizer.h
+++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h
@@ -1,7 +1,7 @@
/**
* @file GradientDescentOptimizer.cpp
* @brief
- * @author ydjian
+ * @author Yong-Dian Jian
* @date Jun 11, 2012
*/
@@ -9,75 +9,31 @@
#include
#include
+#include
namespace gtsam {
-/* an implementation of gradient-descent method using the NLO interface */
-
-class GradientDescentState : public NonlinearOptimizerState {
-
+/** An implementation of the nonlinear cg method using the template below */
+class NonlinearConjugateGradientState : public NonlinearOptimizerState {
public:
-
typedef NonlinearOptimizerState Base;
-
- GradientDescentState(const NonlinearFactorGraph& graph, const Values& values)
+ NonlinearConjugateGradientState(const NonlinearFactorGraph& graph, const Values& values)
: Base(graph, values) {}
};
-class GradientDescentOptimizer : public NonlinearOptimizer {
-
-public:
-
- typedef boost::shared_ptr shared_ptr;
- typedef NonlinearOptimizer Base;
- typedef GradientDescentState States;
- typedef NonlinearOptimizerParams Parameters;
-
-protected:
-
- Parameters params_;
- States state_;
- Ordering::shared_ptr ordering_;
- VectorValues::shared_ptr gradient_;
-
-public:
-
- GradientDescentOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Parameters& params = Parameters())
- : Base(graph), params_(params), state_(graph, initialValues),
- ordering_(initialValues.orderingArbitrary()),
- gradient_(new VectorValues(initialValues.zeroVectors(*ordering_))) {}
-
- virtual ~GradientDescentOptimizer() {}
-
- virtual void iterate();
-
-protected:
-
- virtual const NonlinearOptimizerState& _state() const { return state_; }
- virtual const NonlinearOptimizerParams& _params() const { return params_; }
-};
-
-
-/**
- * An implementation of the nonlinear cg method using the template below
- */
-
-class ConjugateGradientOptimizer {
-
+class NonlinearConjugateGradientOptimizer : public NonlinearOptimizer {
+ /* a class for the nonlinearConjugateGradient template */
class System {
-
public:
-
typedef Values State;
typedef VectorValues Gradient;
+ typedef NonlinearOptimizerParams Parameters;
protected:
-
- NonlinearFactorGraph graph_;
- Ordering ordering_;
+ const NonlinearFactorGraph &graph_;
+ const Ordering &ordering_;
public:
-
System(const NonlinearFactorGraph &graph, const Ordering &ordering): graph_(graph), ordering_(ordering) {}
double error(const State &state) const ;
Gradient gradient(const State &state) const ;
@@ -85,35 +41,32 @@ class ConjugateGradientOptimizer {
};
public:
-
+ typedef NonlinearOptimizer Base;
+ typedef NonlinearConjugateGradientState States;
typedef NonlinearOptimizerParams Parameters;
- typedef boost::shared_ptr shared_ptr;
+ typedef boost::shared_ptr shared_ptr;
protected:
-
- NonlinearFactorGraph graph_;
- Values initialEstimate_;
+ States state_;
Parameters params_;
Ordering::shared_ptr ordering_;
VectorValues::shared_ptr gradient_;
- bool cg_;
public:
- ConjugateGradientOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues,
- const Parameters& params = Parameters(), const bool cg = true)
- : graph_(graph), initialEstimate_(initialValues), params_(params),
- ordering_(initialValues.orderingArbitrary()),
- gradient_(new VectorValues(initialValues.zeroVectors(*ordering_))),
- cg_(cg) {}
+ NonlinearConjugateGradientOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues,
+ const Parameters& params = Parameters())
+ : Base(graph), state_(graph, initialValues), params_(params), ordering_(initialValues.orderingArbitrary()),
+ gradient_(new VectorValues(initialValues.zeroVectors(*ordering_))){}
- virtual ~ConjugateGradientOptimizer() {}
- virtual Values optimize () ;
+ virtual ~NonlinearConjugateGradientOptimizer() {}
+ virtual void iterate();
+ virtual const Values& optimize ();
+ virtual const NonlinearOptimizerState& _state() const { return state_; }
+ virtual const NonlinearOptimizerParams& _params() const { return params_; }
};
-/**
- * Implement the golden-section line search algorithm
- */
+/** Implement the golden-section line search algorithm */
template
double lineSearch(const S &system, const V currentValues, const W &gradient) {
@@ -171,18 +124,20 @@ double lineSearch(const S &system, const V currentValues, const W &gradient) {
*
* The last parameter is a switch between gradient-descent and conjugate gradient
*/
-
template
-V conjugateGradient(const S &system, const V &initial, const NonlinearOptimizerParams ¶ms, const bool gradientDescent) {
+boost::tuple nonlinearConjugateGradient(const S &system, const V &initial, const NonlinearOptimizerParams ¶ms, const bool singleIteration, const bool gradientDescent = false) {
- GTSAM_CONCEPT_MANIFOLD_TYPE(V);
+ // GTSAM_CONCEPT_MANIFOLD_TYPE(V);
+
+ Index iteration = 0;
// check if we're already close enough
double currentError = system.error(initial);
if(currentError <= params.errorTol) {
- if (params.verbosity >= NonlinearOptimizerParams::ERROR)
+ if (params.verbosity >= NonlinearOptimizerParams::ERROR){
std::cout << "Exiting, as error = " << currentError << " < " << params.errorTol << std::endl;
- return initial;
+ }
+ return boost::tie(initial, iteration);
}
V currentValues = initial;
@@ -194,14 +149,12 @@ V conjugateGradient(const S &system, const V &initial, const NonlinearOptimizerP
double alpha = lineSearch(system, currentValues, direction);
currentValues = system.advance(prevValues, alpha, direction);
currentError = system.error(currentValues);
- Index iteration = 0;
// Maybe show output
if (params.verbosity >= NonlinearOptimizerParams::ERROR) std::cout << "Initial error: " << currentError << std::endl;
// Iterative loop
do {
-
if ( gradientDescent == true) {
direction = system.gradient(currentValues);
}
@@ -222,13 +175,14 @@ V conjugateGradient(const S &system, const V &initial, const NonlinearOptimizerP
// Maybe show output
if(params.verbosity >= NonlinearOptimizerParams::ERROR) std::cout << "currentError: " << currentError << std::endl;
} while( ++iteration < params.maxIterations &&
+ !singleIteration &&
!checkConvergence(params.relativeErrorTol, params.absoluteErrorTol, params.errorTol, prevError, currentError, params.verbosity));
// Printing if verbose
if (params.verbosity >= NonlinearOptimizerParams::ERROR && iteration >= params.maxIterations)
- std::cout << "Terminating because reached maximum iterations" << std::endl;
+ std::cout << "nonlinearConjugateGradient: Terminating because reached maximum iterations" << std::endl;
- return currentValues;
+ return boost::tie(currentValues, iteration);
}
diff --git a/gtsam/nonlinear/SuccessiveLinearizationOptimizer.cpp b/gtsam/nonlinear/SuccessiveLinearizationOptimizer.cpp
new file mode 100644
index 000000000..0fe0440ae
--- /dev/null
+++ b/gtsam/nonlinear/SuccessiveLinearizationOptimizer.cpp
@@ -0,0 +1,81 @@
+/**
+ * @file SuccessiveLinearizationOptimizer.cpp
+ * @brief
+ * @date Jul 24, 2012
+ * @author Yong-Dian Jian
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+namespace gtsam {
+
+void SuccessiveLinearizationParams::print(const std::string& str) const {
+ NonlinearOptimizerParams::print(str);
+ switch ( linearSolverType ) {
+ case MULTIFRONTAL_CHOLESKY:
+ std::cout << " linear solver type: MULTIFRONTAL CHOLESKY\n";
+ break;
+ case MULTIFRONTAL_QR:
+ std::cout << " linear solver type: MULTIFRONTAL QR\n";
+ break;
+ case SEQUENTIAL_CHOLESKY:
+ std::cout << " linear solver type: SEQUENTIAL CHOLESKY\n";
+ break;
+ case SEQUENTIAL_QR:
+ std::cout << " linear solver type: SEQUENTIAL QR\n";
+ break;
+ case CHOLMOD:
+ std::cout << " linear solver type: CHOLMOD\n";
+ break;
+ case CG:
+ std::cout << " linear solver type: CG\n";
+ break;
+ default:
+ std::cout << " linear solver type: (invalid)\n";
+ break;
+ }
+
+ if(ordering)
+ std::cout << " ordering: custom\n";
+ else
+ std::cout << " ordering: COLAMD\n";
+
+ std::cout.flush();
+}
+
+VectorValues solveGaussianFactorGraph(const GaussianFactorGraph &gfg, const SuccessiveLinearizationParams ¶ms) {
+ VectorValues delta;
+ if ( params.isMultifrontal() ) {
+ delta = GaussianJunctionTree(gfg).optimize(params.getEliminationFunction());
+ }
+ else if ( params.isSequential() ) {
+ delta = gtsam::optimize(*EliminationTree::Create(gfg)->eliminate(params.getEliminationFunction()));
+ }
+ else if ( params.isCG() ) {
+ if ( !params.iterativeParams ) throw std::runtime_error("solveGaussianFactorGraph: cg parameter has to be assigned ...");
+ if ( boost::dynamic_pointer_cast(params.iterativeParams)) {
+ SimpleSPCGSolver solver (gfg, *boost::dynamic_pointer_cast(params.iterativeParams));
+ delta = solver.optimize();
+ }
+ else if ( boost::dynamic_pointer_cast(params.iterativeParams) ) {
+ SubgraphSolver solver (gfg, *boost::dynamic_pointer_cast(params.iterativeParams));
+ delta = solver.optimize();
+ }
+ else {
+ throw std::runtime_error("solveGaussianFactorGraph: special cg parameter type is not handled in LM solver ...");
+ }
+ }
+ else {
+ throw std::runtime_error("solveGaussianFactorGraph: Optimization parameter is invalid");
+ }
+ return delta;
+}
+
+}
diff --git a/gtsam/nonlinear/SuccessiveLinearizationOptimizer.h b/gtsam/nonlinear/SuccessiveLinearizationOptimizer.h
index b46686fe7..fc44621f6 100644
--- a/gtsam/nonlinear/SuccessiveLinearizationOptimizer.h
+++ b/gtsam/nonlinear/SuccessiveLinearizationOptimizer.h
@@ -40,7 +40,6 @@ public:
IterativeOptimizationParameters::shared_ptr iterativeParams; ///< The container for iterativeOptimization parameters. used in CG Solvers.
SuccessiveLinearizationParams() : linearSolverType(MULTIFRONTAL_CHOLESKY) {}
-
virtual ~SuccessiveLinearizationParams() {}
virtual void print(const std::string& str = "") const {
@@ -62,7 +61,7 @@ public:
std::cout << " linear solver type: CHOLMOD\n";
break;
case CONJUGATE_GRADIENT:
- std::cout << " linear solver type: CG\n";
+ std::cout << " linear solver type: CONJUGATE GRADIENT\n";
break;
default:
std::cout << " linear solver type: (invalid)\n";
@@ -77,7 +76,7 @@ public:
std::cout.flush();
}
- inline bool isMultifrontal() const {
+ inline bool isMultifrontal() const {
return (linearSolverType == MULTIFRONTAL_CHOLESKY) || (linearSolverType == MULTIFRONTAL_QR); }
inline bool isSequential() const {
@@ -87,7 +86,9 @@ public:
inline bool isCG() const { return (linearSolverType == CONJUGATE_GRADIENT); }
- GaussianFactorGraph::Eliminate getEliminationFunction() {
+ virtual void print(const std::string& str) const;
+
+ GaussianFactorGraph::Eliminate getEliminationFunction() const {
switch (linearSolverType) {
case MULTIFRONTAL_CHOLESKY:
case SEQUENTIAL_CHOLESKY:
@@ -132,4 +133,7 @@ private:
}
};
+/* a wrapper for solving a GaussianFactorGraph according to the parameters */
+VectorValues solveGaussianFactorGraph(const GaussianFactorGraph &gfg, const SuccessiveLinearizationParams ¶ms) ;
+
} /* namespace gtsam */
diff --git a/gtsam/slam/dataset.cpp b/gtsam/slam/dataset.cpp
index e1c282594..0502bb69b 100644
--- a/gtsam/slam/dataset.cpp
+++ b/gtsam/slam/dataset.cpp
@@ -90,10 +90,8 @@ pair load2D(
bool addNoise, bool smart) {
cout << "Will try to read " << filename << endl;
ifstream is(filename.c_str());
- if (!is) {
- cout << "load2D: can not find the file!";
- exit(-1);
- }
+ if (!is)
+ throw std::invalid_argument("load2D: can not find the file!");
pose2SLAM::Values::shared_ptr poses(new pose2SLAM::Values);
pose2SLAM::Graph::shared_ptr graph(new pose2SLAM::Graph);
diff --git a/tests/testGradientDescentOptimizer.cpp b/tests/testGradientDescentOptimizer.cpp
index c72c6f716..6f03bc017 100644
--- a/tests/testGradientDescentOptimizer.cpp
+++ b/tests/testGradientDescentOptimizer.cpp
@@ -7,7 +7,7 @@
#include
#include
-#include
+#include
#include
#include
#include
@@ -55,7 +55,6 @@ boost::tuple generateProblem() {
return boost::tie(graph, initialEstimate);
}
-
/* ************************************************************************* */
TEST(optimize, GradientDescentOptimizer) {
@@ -94,8 +93,7 @@ TEST(optimize, ConjugateGradientOptimizer) {
param.maxIterations = 500; /* requires a larger number of iterations to converge */
param.verbosity = NonlinearOptimizerParams::SILENT;
-
- ConjugateGradientOptimizer optimizer(graph, initialEstimate, param, true);
+ NonlinearConjugateGradientOptimizer optimizer(graph, initialEstimate, param);
Values result = optimizer.optimize();
// cout << "cg final error = " << graph.error(result) << endl;
@@ -103,32 +101,6 @@ TEST(optimize, ConjugateGradientOptimizer) {
DOUBLES_EQUAL(0.0, graph.error(result), 1e-2);
}
-/* ************************************************************************* */
-TEST(optimize, GradientDescentOptimizer2) {
-
- NonlinearFactorGraph graph;
- Values initialEstimate;
-
- boost::tie(graph, initialEstimate) = generateProblem();
-// cout << "initial error = " << graph.error(initialEstimate) << endl ;
-
- // Single Step Optimization using Levenberg-Marquardt
- NonlinearOptimizerParams param;
- param.maxIterations = 500; /* requires a larger number of iterations to converge */
- param.verbosity = NonlinearOptimizerParams::SILENT;
-
-
- ConjugateGradientOptimizer optimizer(graph, initialEstimate, param, false);
- Values result = optimizer.optimize();
-// cout << "gd2 solver final error = " << graph.error(result) << endl;
-
- /* the optimality of the solution is not comparable to the */
- DOUBLES_EQUAL(0.0, graph.error(result), 1e-2);
-}
-
-
-
-
/* ************************************************************************* */
int main() { TestResult tr; return TestRegistry::runAllTests(tr); }
/* ************************************************************************* */
diff --git a/wrap/ReturnValue.cpp b/wrap/ReturnValue.cpp
index 37e6b15ff..4f5a030cc 100644
--- a/wrap/ReturnValue.cpp
+++ b/wrap/ReturnValue.cpp
@@ -19,8 +19,8 @@ using namespace wrap;
string ReturnValue::return_type(bool add_ptr, pairing p) const {
if (p==pair && isPair) {
string str = "pair< " +
- maybe_shared_ptr(add_ptr && isPtr1, qualifiedType1("::"), type1) + ", " +
- maybe_shared_ptr(add_ptr && isPtr2, qualifiedType2("::"), type2) + " >";
+ maybe_shared_ptr(add_ptr || isPtr1, qualifiedType1("::"), type1) + ", " +
+ maybe_shared_ptr(add_ptr || isPtr2, qualifiedType2("::"), type2) + " >";
return str;
} else
return maybe_shared_ptr(add_ptr && isPtr1, (p==arg2)? qualifiedType2("::") : qualifiedType1("::"), (p==arg2)? type2 : type1);
@@ -52,6 +52,9 @@ void ReturnValue::wrap_result(const string& result, FileWriter& file, const Type
string cppType2 = qualifiedType2("::"), matlabType2 = qualifiedType2(".");
if (isPair) {
+ // For a pair, store the returned pair so we do not evaluate the function twice
+ file.oss << " " << return_type(false, pair) << " pairResult = " << result << ";\n";
+
// first return value in pair
if (category1 == ReturnValue::CLASS) { // if we are going to make one
string objCopy, ptrType;
@@ -59,21 +62,21 @@ void ReturnValue::wrap_result(const string& result, FileWriter& file, const Type
const bool isVirtual = typeAttributes.at(cppType1).isVirtual;
if(isVirtual) {
if(isPtr1)
- objCopy = result + ".first";
+ objCopy = "pairResult.first";
else
- objCopy = result + ".first.clone()";
+ objCopy = "pairResult.first.clone()";
} else {
if(isPtr1)
- objCopy = result + ".first";
+ objCopy = "pairResult.first";
else
- objCopy = ptrType + "(new " + cppType1 + "(" + result + ".first))";
+ objCopy = ptrType + "(new " + cppType1 + "(pairResult.first))";
}
file.oss << " out[0] = wrap_shared_ptr(" << objCopy << ",\"" << matlabType1 << "\", " << (isVirtual ? "true" : "false") << ");\n";
} else if(isPtr1) {
- file.oss << " Shared" << type1 <<"* ret = new Shared" << type1 << "(" << result << ".first);" << endl;
+ file.oss << " Shared" << type1 <<"* ret = new Shared" << type1 << "(pairResult.first);" << endl;
file.oss << " out[0] = wrap_shared_ptr(ret,\"" << matlabType1 << "\", false);\n";
} else // if basis type
- file.oss << " out[0] = wrap< " << return_type(true,arg1) << " >(" << result << ".first);\n";
+ file.oss << " out[0] = wrap< " << return_type(true,arg1) << " >(pairResult.first);\n";
// second return value in pair
if (category2 == ReturnValue::CLASS) { // if we are going to make one
@@ -82,21 +85,21 @@ void ReturnValue::wrap_result(const string& result, FileWriter& file, const Type
const bool isVirtual = typeAttributes.at(cppType2).isVirtual;
if(isVirtual) {
if(isPtr2)
- objCopy = result + ".second";
+ objCopy = "pairResult.second";
else
- objCopy = result + ".second.clone()";
+ objCopy = "pairResult.second.clone()";
} else {
if(isPtr2)
- objCopy = result + ".second";
+ objCopy = "pairResult.second";
else
- objCopy = ptrType + "(new " + cppType2 + "(" + result + ".second))";
+ objCopy = ptrType + "(new " + cppType2 + "(pairResult.second))";
}
- file.oss << " out[0] = wrap_shared_ptr(" << objCopy << ",\"" << matlabType2 << "\", " << (isVirtual ? "true" : "false") << ");\n";
+ file.oss << " out[1] = wrap_shared_ptr(" << objCopy << ",\"" << matlabType2 << "\", " << (isVirtual ? "true" : "false") << ");\n";
} else if(isPtr2) {
- file.oss << " Shared" << type2 <<"* ret = new Shared" << type2 << "(" << result << ".second);" << endl;
+ file.oss << " Shared" << type2 <<"* ret = new Shared" << type2 << "(pairResult.second);" << endl;
file.oss << " out[1] = wrap_shared_ptr(ret,\"" << matlabType2 << "\");\n";
} else
- file.oss << " out[1] = wrap< " << return_type(true,arg2) << " >(" << result << ".second);\n";
+ file.oss << " out[1] = wrap< " << return_type(true,arg2) << " >(pairResult.second);\n";
}
else if (category1 == ReturnValue::CLASS){
string objCopy, ptrType;