Merge pull request #1127 from borglab/feature/python/iterationHook
commit
06bc78175c
|
@ -484,6 +484,9 @@ virtual class NonlinearOptimizerParams {
|
|||
bool isSequential() const;
|
||||
bool isCholmod() const;
|
||||
bool isIterative() const;
|
||||
|
||||
// This only applies to python since matlab does not have lambda machinery.
|
||||
gtsam::NonlinearOptimizerParams::IterationHook iterationHook;
|
||||
};
|
||||
|
||||
bool checkConvergence(double relativeErrorTreshold,
|
||||
|
|
|
@ -15,12 +15,10 @@ from __future__ import print_function
|
|||
import unittest
|
||||
|
||||
import gtsam
|
||||
from gtsam import (DoglegOptimizer, DoglegParams,
|
||||
DummyPreconditionerParameters, GaussNewtonOptimizer,
|
||||
GaussNewtonParams, GncLMParams, GncLMOptimizer,
|
||||
LevenbergMarquardtOptimizer, LevenbergMarquardtParams,
|
||||
NonlinearFactorGraph, Ordering,
|
||||
PCGSolverParameters, Point2, PriorFactorPoint2, Values)
|
||||
from gtsam import (DoglegOptimizer, DoglegParams, DummyPreconditionerParameters,
|
||||
GaussNewtonOptimizer, GaussNewtonParams, GncLMParams, GncLMOptimizer,
|
||||
LevenbergMarquardtOptimizer, LevenbergMarquardtParams, NonlinearFactorGraph,
|
||||
Ordering, PCGSolverParameters, Point2, PriorFactorPoint2, Values)
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
KEY1 = 1
|
||||
|
@ -28,63 +26,83 @@ KEY2 = 2
|
|||
|
||||
|
||||
class TestScenario(GtsamTestCase):
|
||||
def test_optimize(self):
|
||||
"""Do trivial test with three optimizer variants."""
|
||||
fg = NonlinearFactorGraph()
|
||||
"""Do trivial test with three optimizer variants."""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up the optimization problem and ordering"""
|
||||
# create graph
|
||||
self.fg = NonlinearFactorGraph()
|
||||
model = gtsam.noiseModel.Unit.Create(2)
|
||||
fg.add(PriorFactorPoint2(KEY1, Point2(0, 0), model))
|
||||
self.fg.add(PriorFactorPoint2(KEY1, Point2(0, 0), model))
|
||||
|
||||
# test error at minimum
|
||||
xstar = Point2(0, 0)
|
||||
optimal_values = Values()
|
||||
optimal_values.insert(KEY1, xstar)
|
||||
self.assertEqual(0.0, fg.error(optimal_values), 0.0)
|
||||
self.optimal_values = Values()
|
||||
self.optimal_values.insert(KEY1, xstar)
|
||||
self.assertEqual(0.0, self.fg.error(self.optimal_values), 0.0)
|
||||
|
||||
# test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 =
|
||||
x0 = Point2(3, 3)
|
||||
initial_values = Values()
|
||||
initial_values.insert(KEY1, x0)
|
||||
self.assertEqual(9.0, fg.error(initial_values), 1e-3)
|
||||
self.initial_values = Values()
|
||||
self.initial_values.insert(KEY1, x0)
|
||||
self.assertEqual(9.0, self.fg.error(self.initial_values), 1e-3)
|
||||
|
||||
# optimize parameters
|
||||
ordering = Ordering()
|
||||
ordering.push_back(KEY1)
|
||||
self.ordering = Ordering()
|
||||
self.ordering.push_back(KEY1)
|
||||
|
||||
# Gauss-Newton
|
||||
def test_gauss_newton(self):
|
||||
gnParams = GaussNewtonParams()
|
||||
gnParams.setOrdering(ordering)
|
||||
actual1 = GaussNewtonOptimizer(fg, initial_values, gnParams).optimize()
|
||||
self.assertAlmostEqual(0, fg.error(actual1))
|
||||
gnParams.setOrdering(self.ordering)
|
||||
actual = GaussNewtonOptimizer(self.fg, self.initial_values, gnParams).optimize()
|
||||
self.assertAlmostEqual(0, self.fg.error(actual))
|
||||
|
||||
# Levenberg-Marquardt
|
||||
def test_levenberg_marquardt(self):
|
||||
lmParams = LevenbergMarquardtParams.CeresDefaults()
|
||||
lmParams.setOrdering(ordering)
|
||||
actual2 = LevenbergMarquardtOptimizer(
|
||||
fg, initial_values, lmParams).optimize()
|
||||
self.assertAlmostEqual(0, fg.error(actual2))
|
||||
lmParams.setOrdering(self.ordering)
|
||||
actual = LevenbergMarquardtOptimizer(self.fg, self.initial_values, lmParams).optimize()
|
||||
self.assertAlmostEqual(0, self.fg.error(actual))
|
||||
|
||||
# Levenberg-Marquardt
|
||||
def test_levenberg_marquardt_pcg(self):
|
||||
lmParams = LevenbergMarquardtParams.CeresDefaults()
|
||||
lmParams.setLinearSolverType("ITERATIVE")
|
||||
cgParams = PCGSolverParameters()
|
||||
cgParams.setPreconditionerParams(DummyPreconditionerParameters())
|
||||
lmParams.setIterativeParams(cgParams)
|
||||
actual2 = LevenbergMarquardtOptimizer(
|
||||
fg, initial_values, lmParams).optimize()
|
||||
self.assertAlmostEqual(0, fg.error(actual2))
|
||||
actual = LevenbergMarquardtOptimizer(self.fg, self.initial_values, lmParams).optimize()
|
||||
self.assertAlmostEqual(0, self.fg.error(actual))
|
||||
|
||||
# Dogleg
|
||||
def test_dogleg(self):
|
||||
dlParams = DoglegParams()
|
||||
dlParams.setOrdering(ordering)
|
||||
actual3 = DoglegOptimizer(fg, initial_values, dlParams).optimize()
|
||||
self.assertAlmostEqual(0, fg.error(actual3))
|
||||
|
||||
# Graduated Non-Convexity (GNC)
|
||||
gncParams = GncLMParams()
|
||||
actual4 = GncLMOptimizer(fg, initial_values, gncParams).optimize()
|
||||
self.assertAlmostEqual(0, fg.error(actual4))
|
||||
|
||||
dlParams.setOrdering(self.ordering)
|
||||
actual = DoglegOptimizer(self.fg, self.initial_values, dlParams).optimize()
|
||||
self.assertAlmostEqual(0, self.fg.error(actual))
|
||||
|
||||
def test_graduated_non_convexity(self):
|
||||
gncParams = GncLMParams()
|
||||
actual = GncLMOptimizer(self.fg, self.initial_values, gncParams).optimize()
|
||||
self.assertAlmostEqual(0, self.fg.error(actual))
|
||||
|
||||
def test_iteration_hook(self):
|
||||
# set up iteration hook to track some testable values
|
||||
iteration_count = 0
|
||||
final_error = 0
|
||||
final_values = None
|
||||
def iteration_hook(iter, error_before, error_after):
|
||||
nonlocal iteration_count, final_error, final_values
|
||||
iteration_count = iter
|
||||
final_error = error_after
|
||||
final_values = optimizer.values()
|
||||
# optimize
|
||||
params = LevenbergMarquardtParams.CeresDefaults()
|
||||
params.setOrdering(self.ordering)
|
||||
params.iterationHook = iteration_hook
|
||||
optimizer = LevenbergMarquardtOptimizer(self.fg, self.initial_values, params)
|
||||
actual = optimizer.optimize()
|
||||
self.assertAlmostEqual(0, self.fg.error(actual))
|
||||
self.gtsamAssertEquals(final_values, actual)
|
||||
self.assertEqual(self.fg.error(actual), final_error)
|
||||
self.assertEqual(optimizer.iterations(), iteration_count)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -18,7 +18,7 @@ import numpy as np
|
|||
from gtsam import Rot3
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
from gtsam.utils.logging_optimizer import gtsam_optimize
|
||||
from gtsam.utils.logging_optimizer import gtsam_optimize, optimize_using
|
||||
|
||||
KEY = 0
|
||||
MODEL = gtsam.noiseModel.Unit.Create(3)
|
||||
|
@ -34,19 +34,20 @@ class TestOptimizeComet(GtsamTestCase):
|
|||
rotations = {R, R.inverse()} # mean is the identity
|
||||
self.expected = Rot3()
|
||||
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
for R in rotations:
|
||||
graph.add(gtsam.PriorFactorRot3(KEY, R, MODEL))
|
||||
initial = gtsam.Values()
|
||||
initial.insert(KEY, R)
|
||||
self.params = gtsam.GaussNewtonParams()
|
||||
self.optimizer = gtsam.GaussNewtonOptimizer(
|
||||
graph, initial, self.params)
|
||||
def check(actual):
|
||||
# Check that optimizing yields the identity
|
||||
self.gtsamAssertEquals(actual.atRot3(KEY), self.expected, tol=1e-6)
|
||||
# Check that logging output prints out 3 lines (exact intermediate values differ by OS)
|
||||
self.assertEqual(self.capturedOutput.getvalue().count('\n'), 3)
|
||||
# reset stdout catcher
|
||||
self.capturedOutput.truncate(0)
|
||||
self.check = check
|
||||
|
||||
self.lmparams = gtsam.LevenbergMarquardtParams()
|
||||
self.lmoptimizer = gtsam.LevenbergMarquardtOptimizer(
|
||||
graph, initial, self.lmparams
|
||||
)
|
||||
self.graph = gtsam.NonlinearFactorGraph()
|
||||
for R in rotations:
|
||||
self.graph.add(gtsam.PriorFactorRot3(KEY, R, MODEL))
|
||||
self.initial = gtsam.Values()
|
||||
self.initial.insert(KEY, R)
|
||||
|
||||
# setup output capture
|
||||
self.capturedOutput = StringIO()
|
||||
|
@ -63,22 +64,29 @@ class TestOptimizeComet(GtsamTestCase):
|
|||
def hook(_, error):
|
||||
print(error)
|
||||
|
||||
# Only thing we require from optimizer is an iterate method
|
||||
gtsam_optimize(self.optimizer, self.params, hook)
|
||||
|
||||
# Check that optimizing yields the identity.
|
||||
actual = self.optimizer.values()
|
||||
self.gtsamAssertEquals(actual.atRot3(KEY), self.expected, tol=1e-6)
|
||||
# Wrapper function sets the hook and calls optimizer.optimize() for us.
|
||||
params = gtsam.GaussNewtonParams()
|
||||
actual = optimize_using(gtsam.GaussNewtonOptimizer, hook, self.graph, self.initial)
|
||||
self.check(actual)
|
||||
actual = optimize_using(gtsam.GaussNewtonOptimizer, hook, self.graph, self.initial, params)
|
||||
self.check(actual)
|
||||
actual = gtsam_optimize(gtsam.GaussNewtonOptimizer(self.graph, self.initial, params),
|
||||
params, hook)
|
||||
self.check(actual)
|
||||
|
||||
def test_lm_simple_printing(self):
|
||||
"""Make sure we are properly terminating LM"""
|
||||
def hook(_, error):
|
||||
print(error)
|
||||
|
||||
gtsam_optimize(self.lmoptimizer, self.lmparams, hook)
|
||||
|
||||
actual = self.lmoptimizer.values()
|
||||
self.gtsamAssertEquals(actual.atRot3(KEY), self.expected, tol=1e-6)
|
||||
params = gtsam.LevenbergMarquardtParams()
|
||||
actual = optimize_using(gtsam.LevenbergMarquardtOptimizer, hook, self.graph, self.initial)
|
||||
self.check(actual)
|
||||
actual = optimize_using(gtsam.LevenbergMarquardtOptimizer, hook, self.graph, self.initial,
|
||||
params)
|
||||
self.check(actual)
|
||||
actual = gtsam_optimize(gtsam.LevenbergMarquardtOptimizer(self.graph, self.initial, params),
|
||||
params, hook)
|
||||
|
||||
@unittest.skip("Not a test we want run every time, as needs comet.ml account")
|
||||
def test_comet(self):
|
||||
|
|
|
@ -6,6 +6,53 @@ Author: Jing Wu and Frank Dellaert
|
|||
|
||||
from gtsam import NonlinearOptimizer, NonlinearOptimizerParams
|
||||
import gtsam
|
||||
from typing import Any, Callable
|
||||
|
||||
OPTIMIZER_PARAMS_MAP = {
|
||||
gtsam.GaussNewtonOptimizer: gtsam.GaussNewtonParams,
|
||||
gtsam.LevenbergMarquardtOptimizer: gtsam.LevenbergMarquardtParams,
|
||||
gtsam.DoglegOptimizer: gtsam.DoglegParams,
|
||||
gtsam.GncGaussNewtonOptimizer: gtsam.GaussNewtonParams,
|
||||
gtsam.GncLMOptimizer: gtsam.LevenbergMarquardtParams
|
||||
}
|
||||
|
||||
|
||||
def optimize_using(OptimizerClass, hook, *args) -> gtsam.Values:
|
||||
""" Wraps the constructor and "optimize()" call for an Optimizer together and adds an iteration
|
||||
hook.
|
||||
Example usage:
|
||||
```python
|
||||
def hook(optimizer, error):
|
||||
print("iteration {:}, error = {:}".format(optimizer.iterations(), error))
|
||||
solution = optimize_using(gtsam.GaussNewtonOptimizer, hook, graph, init, params)
|
||||
```
|
||||
Iteration hook's args are (optimizer, error) and return type should be None
|
||||
|
||||
Args:
|
||||
OptimizerClass (T): A NonlinearOptimizer class (e.g. GaussNewtonOptimizer,
|
||||
LevenbergMarquardtOptimizer)
|
||||
hook ([T, double] -> None): Function to callback after each iteration. Args are (optimizer,
|
||||
error) and return should be None.
|
||||
*args: Arguments that would be passed into the OptimizerClass constructor, usually:
|
||||
graph, init, [params]
|
||||
Returns:
|
||||
(gtsam.Values): A Values object representing the optimization solution.
|
||||
"""
|
||||
# Add the iteration hook to the NonlinearOptimizerParams
|
||||
for arg in args:
|
||||
if isinstance(arg, gtsam.NonlinearOptimizerParams):
|
||||
arg.iterationHook = lambda iteration, error_before, error_after: hook(
|
||||
optimizer, error_after)
|
||||
break
|
||||
else:
|
||||
params = OPTIMIZER_PARAMS_MAP[OptimizerClass]()
|
||||
params.iterationHook = lambda iteration, error_before, error_after: hook(
|
||||
optimizer, error_after)
|
||||
args = (*args, params)
|
||||
# Construct Optimizer and optimize
|
||||
optimizer = OptimizerClass(*args)
|
||||
hook(optimizer, optimizer.error()) # Call hook once with init values to match behavior below
|
||||
return optimizer.optimize()
|
||||
|
||||
|
||||
def optimize(optimizer, check_convergence, hook):
|
||||
|
@ -21,7 +68,8 @@ def optimize(optimizer, check_convergence, hook):
|
|||
current_error = optimizer.error()
|
||||
hook(optimizer, current_error)
|
||||
|
||||
# Iterative loop
|
||||
# Iterative loop. Cannot use `params.iterationHook` because we don't have access to params
|
||||
# (backwards compatibility issue).
|
||||
while True:
|
||||
# Do next iteration
|
||||
optimizer.iterate()
|
||||
|
@ -36,6 +84,7 @@ def gtsam_optimize(optimizer,
|
|||
params,
|
||||
hook):
|
||||
""" Given an optimizer and params, iterate until convergence.
|
||||
Recommend using optimize_using instead.
|
||||
After each iteration, hook(optimizer) is called.
|
||||
After the function, use values and errors to get the result.
|
||||
Arguments:
|
||||
|
|
Loading…
Reference in New Issue