Merge pull request #943 from borglab/fix/deprecations
commit
384be1f872
|
@ -220,8 +220,8 @@ TEST(Vector, axpy )
|
|||
Vector x = Vector3(10., 20., 30.);
|
||||
Vector y0 = Vector3(2.0, 5.0, 6.0);
|
||||
Vector y1 = y0, y2 = y0;
|
||||
axpy(0.1,x,y1);
|
||||
axpy(0.1,x,y2.head(3));
|
||||
y1 += 0.1 * x;
|
||||
y2.head(3) += 0.1 * x;
|
||||
Vector expected = Vector3(3.0, 7.0, 9.0);
|
||||
EXPECT(assert_equal(expected,y1));
|
||||
EXPECT(assert_equal(expected,Vector(y2)));
|
||||
|
|
|
@ -111,10 +111,10 @@ double dot(const Errors& a, const Errors& b) {
|
|||
|
||||
/* ************************************************************************* */
|
||||
template<>
|
||||
void axpy<Errors,Errors>(double alpha, const Errors& x, Errors& y) {
|
||||
void axpy(double alpha, const Errors& x, Errors& y) {
|
||||
Errors::const_iterator it = x.begin();
|
||||
for(Vector& yi: y)
|
||||
axpy(alpha,*(it++),yi);
|
||||
yi += alpha * (*(it++));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -66,7 +66,7 @@ namespace gtsam {
|
|||
* BLAS level 2 style
|
||||
*/
|
||||
template <>
|
||||
GTSAM_EXPORT void axpy<Errors,Errors>(double alpha, const Errors& x, Errors& y);
|
||||
GTSAM_EXPORT void axpy(double alpha, const Errors& x, Errors& y);
|
||||
|
||||
/** print with optional string */
|
||||
GTSAM_EXPORT void print(const Errors& a, const std::string& s = "Error");
|
||||
|
|
|
@ -173,7 +173,7 @@ void SubgraphPreconditioner::transposeMultiplyAdd
|
|||
Errors::const_iterator it = e.begin();
|
||||
for(auto& key_value: y) {
|
||||
const Vector& ei = *it;
|
||||
axpy(alpha, ei, key_value.second);
|
||||
key_value.second += alpha * ei;
|
||||
++it;
|
||||
}
|
||||
transposeMultiplyAdd2(alpha, it, e.end(), y);
|
||||
|
@ -191,7 +191,7 @@ void SubgraphPreconditioner::transposeMultiplyAdd2 (double alpha,
|
|||
|
||||
VectorValues x = VectorValues::Zero(y); // x = 0
|
||||
Ab2_->transposeMultiplyAdd(1.0,e2,x); // x += A2'*e2
|
||||
axpy(alpha, Rc1_->backSubstituteTranspose(x), y); // y += alpha*inv(R1')*x
|
||||
y += alpha * Rc1_->backSubstituteTranspose(x); // y += alpha*inv(R1')*x
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -72,7 +72,7 @@ namespace gtsam {
|
|||
double takeOptimalStep(V& x) {
|
||||
// TODO: can we use gamma instead of dot(d,g) ????? Answer not trivial
|
||||
double alpha = -dot(d, g) / dot(Ad, Ad); // calculate optimal step-size
|
||||
axpy(alpha, d, x); // // do step in new search direction, x += alpha*d
|
||||
x += alpha * d; // do step in new search direction, x += alpha*d
|
||||
return alpha;
|
||||
}
|
||||
|
||||
|
@ -106,7 +106,7 @@ namespace gtsam {
|
|||
double beta = new_gamma / gamma;
|
||||
// d = g + d*beta;
|
||||
d *= beta;
|
||||
axpy(1.0, g, d);
|
||||
d += 1.0 * g;
|
||||
}
|
||||
|
||||
gamma = new_gamma;
|
||||
|
|
|
@ -32,7 +32,7 @@ TEST( Errors, arithmetic )
|
|||
e += Vector2(1.0,2.0), Vector3(3.0,4.0,5.0);
|
||||
DOUBLES_EQUAL(1+4+9+16+25,dot(e,e),1e-9);
|
||||
|
||||
axpy(2.0,e,e);
|
||||
axpy(2.0, e, e);
|
||||
Errors expected;
|
||||
expected += Vector2(3.0,6.0), Vector3(9.0,12.0,15.0);
|
||||
CHECK(assert_equal(expected,e));
|
||||
|
|
|
@ -78,7 +78,8 @@ VectorValues DoglegOptimizerImpl::ComputeBlend(double delta, const VectorValues&
|
|||
|
||||
// Compute blended point
|
||||
if(verbose) cout << "In blend region with fraction " << tau << " of Newton's method point" << endl;
|
||||
VectorValues blend = (1. - tau) * x_u; axpy(tau, x_n, blend);
|
||||
VectorValues blend = (1. - tau) * x_u;
|
||||
blend += tau * x_n;
|
||||
return blend;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue