Fixed bug with shared solvers in recursive LM nonlinear optimizer
							parent
							
								
									7404f78bc1
								
							
						
					
					
						commit
						06b08c6f85
					
				|  | @ -292,60 +292,61 @@ namespace gtsam { | |||
| 		double next_error = error_; | ||||
| 
 | ||||
| 		shared_values next_values = values_; | ||||
| 		shared_solver solver = solver_; | ||||
| 
 | ||||
| 		while(true) { | ||||
| 		  if (verbosity >= Parameters::TRYLAMBDA) cout << "trying lambda = " << lambda << endl; | ||||
| 			if (verbosity >= Parameters::TRYLAMBDA) cout << "trying lambda = " << lambda << endl; | ||||
| 
 | ||||
| 		  // add prior-factors
 | ||||
| 		  typename L::shared_ptr damped(new L(linear)); | ||||
| 		  { | ||||
| 		    double sigma = 1.0 / sqrt(lambda); | ||||
| 		    damped->reserve(damped->size() + dimensions_->size()); | ||||
| 		    // for each of the variables, add a prior
 | ||||
| 		    for(Index j=0; j<dimensions_->size(); ++j) { | ||||
| 		      size_t dim = (*dimensions_)[j]; | ||||
| 		      Matrix A = eye(dim); | ||||
| 		      Vector b = zero(dim); | ||||
| 		      SharedDiagonal model = noiseModel::Isotropic::Sigma(dim,sigma); | ||||
| 		      GaussianFactor::shared_ptr prior(new JacobianFactor(j, A, b, model)); | ||||
| 		      damped->push_back(prior); | ||||
| 		    } | ||||
| 		  } | ||||
| 		  if (verbosity >= Parameters::DAMPED) damped->print("damped"); | ||||
| 			// add prior-factors
 | ||||
| 			typename L::shared_ptr damped(new L(linear)); | ||||
| 			{ | ||||
| 				double sigma = 1.0 / sqrt(lambda); | ||||
| 				damped->reserve(damped->size() + dimensions_->size()); | ||||
| 				// for each of the variables, add a prior
 | ||||
| 				for(Index j=0; j<dimensions_->size(); ++j) { | ||||
| 					size_t dim = (*dimensions_)[j]; | ||||
| 					Matrix A = eye(dim); | ||||
| 					Vector b = zero(dim); | ||||
| 					SharedDiagonal model = noiseModel::Isotropic::Sigma(dim,sigma); | ||||
| 					GaussianFactor::shared_ptr prior(new JacobianFactor(j, A, b, model)); | ||||
| 					damped->push_back(prior); | ||||
| 				} | ||||
| 			} | ||||
| 			if (verbosity >= Parameters::DAMPED) damped->print("damped"); | ||||
| 
 | ||||
| 		  // solve
 | ||||
| 		  S solver(*damped); // not solver_ !!
 | ||||
| 			// solve
 | ||||
| 			solver.reset(new S(*damped)); | ||||
| 
 | ||||
| 		  VectorValues delta = *solver.optimize(); | ||||
| 		  if (verbosity >= Parameters::TRYDELTA) delta.print("delta"); | ||||
| 			VectorValues delta = *solver->optimize(); | ||||
| 			if (verbosity >= Parameters::TRYDELTA) delta.print("delta"); | ||||
| 
 | ||||
| 		  // update values
 | ||||
| 		  shared_values newValues(new C(values_->expmap(delta, *ordering_))); // TODO: updateValues
 | ||||
| 			// update values
 | ||||
| 			shared_values newValues(new C(values_->expmap(delta, *ordering_))); // TODO: updateValues
 | ||||
| 
 | ||||
| 		  // create new optimization state with more adventurous lambda
 | ||||
| 		  double error = graph_->error(*newValues); | ||||
| 			// create new optimization state with more adventurous lambda
 | ||||
| 			double error = graph_->error(*newValues); | ||||
| 
 | ||||
| 		  if (verbosity >= Parameters::TRYLAMBDA) cout << "next error = " << error << endl; | ||||
| 			if (verbosity >= Parameters::TRYLAMBDA) cout << "next error = " << error << endl; | ||||
| 
 | ||||
| 		  if( error <= error_ ) { | ||||
| 		  	next_values = newValues; | ||||
| 		  	next_error = error; | ||||
| 			  lambda /= factor; | ||||
| 		  	break; | ||||
| 		  } | ||||
| 		  else { | ||||
| 		  	// Either we're not cautious, or the same lambda was worse than the current error.
 | ||||
| 		  	// The more adventurous lambda was worse too, so make lambda more conservative
 | ||||
| 		  	// and keep the same values.
 | ||||
| 		  	if(lambdaMode >= Parameters::BOUNDED && lambda >= 1.0e5) { | ||||
| 		  		break; | ||||
| 		  	} else { | ||||
| 		  		lambda *= factor; | ||||
| 		  	} | ||||
| 		  } | ||||
| 			if( error <= error_ ) { | ||||
| 				next_values = newValues; | ||||
| 				next_error = error; | ||||
| 				lambda /= factor; | ||||
| 				break; | ||||
| 			}	else { | ||||
| 				// Either we're not cautious, or the same lambda was worse than the current error.
 | ||||
| 				// The more adventurous lambda was worse too, so make lambda more conservative
 | ||||
| 				// and keep the same values.
 | ||||
| 				if(lambdaMode >= Parameters::BOUNDED && lambda >= 1.0e5) { | ||||
| 					break; | ||||
| 				} else { | ||||
| 					lambda *= factor; | ||||
| 				} | ||||
| 			} | ||||
| 		} // end while
 | ||||
| 
 | ||||
| 		return newValuesErrorLambda_(next_values, next_error, lambda); | ||||
| 		return NonlinearOptimizer(graph_, next_values, next_error, ordering_, solver, | ||||
| 				parameters_->newLambda_(lambda), dimensions_, iterations_); | ||||
| 	} | ||||
| 
 | ||||
| 	/* ************************************************************************* */ | ||||
|  |  | |||
|  | @ -187,13 +187,13 @@ TEST( NonlinearOptimizer, optimize_LM_recursive ) | |||
| 	Point2 xstar(0,0); | ||||
| 	example::Values cstar; | ||||
| 	cstar.insert(simulated2D::PoseKey(1), xstar); | ||||
| 	DOUBLES_EQUAL(0.0,fg->error(cstar),0.0); | ||||
| 	EXPECT_DOUBLES_EQUAL(0.0,fg->error(cstar),0.0); | ||||
| 
 | ||||
| 	// test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 =
 | ||||
| 	Point2 x0(3,3); | ||||
| 	boost::shared_ptr<example::Values> c0(new example::Values); | ||||
| 	c0->insert(simulated2D::PoseKey(1), x0); | ||||
| 	DOUBLES_EQUAL(199.0,fg->error(*c0),1e-3); | ||||
| 	EXPECT_DOUBLES_EQUAL(199.0,fg->error(*c0),1e-3); | ||||
| 
 | ||||
| 	// optimize parameters
 | ||||
| 	shared_ptr<Ordering> ord(new Ordering()); | ||||
|  | @ -207,7 +207,13 @@ TEST( NonlinearOptimizer, optimize_LM_recursive ) | |||
| 
 | ||||
| 	// Levenberg-Marquardt
 | ||||
| 	Optimizer actual2 = optimizer.levenbergMarquardtRecursive(); | ||||
| 	DOUBLES_EQUAL(0,fg->error(*(actual2.values())),tol); | ||||
| 	EXPECT_DOUBLES_EQUAL(0,fg->error(*(actual2.values())),tol); | ||||
| 
 | ||||
| 	// calculate the marginal
 | ||||
| 	Matrix actualCovariance; Vector mean; | ||||
| 	boost::tie(mean, actualCovariance) = actual2.marginalCovariance("x1"); | ||||
| 	Matrix expectedCovariance = Matrix_(2,2, 8.60817108, 0.0, 0.0, 0.01); | ||||
| 	EXPECT(assert_equal(expectedCovariance, actualCovariance, tol)); | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue