diff --git a/gtsam/nonlinear/nonlinear.i b/gtsam/nonlinear/nonlinear.i index da5e8b29c..7d2911374 100644 --- a/gtsam/nonlinear/nonlinear.i +++ b/gtsam/nonlinear/nonlinear.i @@ -403,6 +403,8 @@ virtual class NonlinearOptimizerParams { bool isSequential() const; bool isCholmod() const; bool isIterative() const; + + gtsam::NonlinearOptimizerParams::IterationHook iterationHook; }; bool checkConvergence(double relativeErrorTreshold, diff --git a/python/gtsam/tests/test_NonlinearOptimizer.py b/python/gtsam/tests/test_NonlinearOptimizer.py index 2158e10bb..37afd9e1c 100644 --- a/python/gtsam/tests/test_NonlinearOptimizer.py +++ b/python/gtsam/tests/test_NonlinearOptimizer.py @@ -83,6 +83,26 @@ class TestScenario(GtsamTestCase): actual = GncLMOptimizer(self.fg, self.initial_values, gncParams).optimize() self.assertAlmostEqual(0, self.fg.error(actual)) + def test_iteration_hook(self): + # set up iteration hook to track some testable values + iteration_count = 0 + final_error = 0 + final_values = None + def iteration_hook(iter, error_before, error_after): + nonlocal iteration_count, final_error, final_values + iteration_count = iter + final_error = error_after + final_values = optimizer.values() + # optimize + params = LevenbergMarquardtParams.CeresDefaults() + params.setOrdering(self.ordering) + params.iterationHook = iteration_hook + optimizer = LevenbergMarquardtOptimizer(self.fg, self.initial_values, params) + actual = optimizer.optimize() + self.assertAlmostEqual(0, self.fg.error(actual)) + self.gtsamAssertEquals(final_values, actual) + self.assertEqual(self.fg.error(actual), final_error) + self.assertEqual(optimizer.iterations(), iteration_count) if __name__ == "__main__": unittest.main()