Merge remote-tracking branch 'origin/develop' into fix/zeroRowInQR
commit
82f6ca64a5
|
|
@ -91,8 +91,42 @@ namespace gtsam {
|
|||
double Z = K_->baseline() * K_->fx() / (measured[0] - measured[1]);
|
||||
double X = Z * (measured[0] - K_->px()) / K_->fx();
|
||||
double Y = Z * (measured[2] - K_->py()) / K_->fy();
|
||||
Point3 world_point = leftCamPose_.transform_from(Point3(X, Y, Z));
|
||||
return world_point;
|
||||
Point3 point = leftCamPose_.transform_from(Point3(X, Y, Z));
|
||||
return point;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Point3 StereoCamera::backproject2(const StereoPoint2& z, OptionalJacobian<3, 6> H1,
|
||||
OptionalJacobian<3, 3> H2) const {
|
||||
const Cal3_S2Stereo& K = *K_;
|
||||
const double fx = K.fx(), fy = K.fy(), cx = K.px(), cy = K.py(), b = K.baseline();
|
||||
|
||||
double uL = z.uL(), uR = z.uR(), v = z.v();
|
||||
double disparity = uL - uR;
|
||||
|
||||
double local_z = b * fx / disparity;
|
||||
const Point3 local(local_z * (uL - cx)/ fx, local_z * (v - cy) / fy, local_z);
|
||||
|
||||
if(H1 || H2) {
|
||||
double z_partial_uR = local_z/disparity;
|
||||
double x_partial_uR = local.x()/disparity;
|
||||
double y_partial_uR = local.y()/disparity;
|
||||
Matrix3 D_local_z;
|
||||
D_local_z << -x_partial_uR + local.z()/fx, x_partial_uR, 0,
|
||||
-y_partial_uR, y_partial_uR, local.z() / fy,
|
||||
-z_partial_uR, z_partial_uR, 0;
|
||||
|
||||
Matrix3 D_point_local;
|
||||
const Point3 point = leftCamPose_.transform_from(local, H1, D_point_local);
|
||||
|
||||
if(H2) {
|
||||
*H2 = D_point_local * D_local_z;
|
||||
}
|
||||
|
||||
return point;
|
||||
}
|
||||
|
||||
return leftCamPose_.transform_from(local);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -137,6 +137,14 @@ public:
|
|||
/// back-project a measurement
|
||||
Point3 backproject(const StereoPoint2& z) const;
|
||||
|
||||
/** Back-project the 2D point and compute optional derivatives
|
||||
* @param H1 derivative with respect to pose
|
||||
* @param H2 derivative with respect to point
|
||||
*/
|
||||
Point3 backproject2(const StereoPoint2& z,
|
||||
OptionalJacobian<3, 6> H1 = boost::none,
|
||||
OptionalJacobian<3, 3> H2 = boost::none) const;
|
||||
|
||||
/// @}
|
||||
/// @name Deprecated
|
||||
/// @{
|
||||
|
|
|
|||
|
|
@ -99,13 +99,13 @@ TEST( StereoCamera, Dproject)
|
|||
Matrix actual1; stereoCam.project2(landmark, actual1, boost::none);
|
||||
CHECK(assert_equal(expected1,actual1,1e-7));
|
||||
|
||||
Matrix expected2 = numericalDerivative32(project3,camPose, landmark, *K);
|
||||
Matrix expected2 = numericalDerivative32(project3, camPose, landmark, *K);
|
||||
Matrix actual2; stereoCam.project2(landmark, boost::none, actual2);
|
||||
CHECK(assert_equal(expected2,actual2,1e-7));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( StereoCamera, backproject)
|
||||
TEST( StereoCamera, backproject_case1)
|
||||
{
|
||||
Cal3_S2Stereo::shared_ptr K2(new Cal3_S2Stereo(1500, 1500, 0, 320, 240, 0.5));
|
||||
StereoCamera stereoCam2(Pose3(), K2);
|
||||
|
|
@ -117,7 +117,7 @@ TEST( StereoCamera, backproject)
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( StereoCamera, backproject2)
|
||||
TEST( StereoCamera, backproject_case2)
|
||||
{
|
||||
Rot3 R(0.589511291, -0.804859792, 0.0683931805,
|
||||
-0.804435942, -0.592650676, -0.0405925523,
|
||||
|
|
@ -132,6 +132,53 @@ TEST( StereoCamera, backproject2)
|
|||
CHECK(assert_equal(z, actual, 1e-3));
|
||||
}
|
||||
|
||||
static Point3 backproject3(const Pose3& pose, const StereoPoint2& point, const Cal3_S2Stereo& K) {
|
||||
return StereoCamera(pose, boost::make_shared<Cal3_S2Stereo>(K)).backproject(point);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( StereoCamera, backproject2_case1)
|
||||
{
|
||||
Cal3_S2Stereo::shared_ptr K2(new Cal3_S2Stereo(1500, 1500, 0, 320, 240, 0.5));
|
||||
StereoCamera stereoCam2(Pose3(), K2);
|
||||
|
||||
Point3 expected_point(1.2, 2.3, 4.5);
|
||||
StereoPoint2 stereo_point = stereoCam2.project(expected_point);
|
||||
|
||||
Matrix actual_jacobian_1, actual_jacobian_2;
|
||||
Point3 actual_point = stereoCam2.backproject2(stereo_point, actual_jacobian_1, actual_jacobian_2);
|
||||
CHECK(assert_equal(expected_point, actual_point, 1e-8));
|
||||
|
||||
Matrix expected_jacobian_to_pose = numericalDerivative31(backproject3, Pose3(), stereo_point, *K2);
|
||||
CHECK(assert_equal(expected_jacobian_to_pose, actual_jacobian_1, 1e-6));
|
||||
|
||||
Matrix expected_jacobian_to_point = numericalDerivative32(backproject3, Pose3(), stereo_point, *K2);
|
||||
CHECK(assert_equal(expected_jacobian_to_point, actual_jacobian_2, 1e-6));
|
||||
}
|
||||
|
||||
TEST( StereoCamera, backproject2_case2)
|
||||
{
|
||||
Rot3 R(0.589511291, -0.804859792, 0.0683931805,
|
||||
-0.804435942, -0.592650676, -0.0405925523,
|
||||
0.0732045588, -0.0310882277, -0.996832359);
|
||||
Point3 t(53.5239823, 23.7866016, -4.42379876);
|
||||
Cal3_S2Stereo::shared_ptr K(new Cal3_S2Stereo(1733.75, 1733.75, 0, 689.645, 508.835, 0.0699612));
|
||||
StereoCamera camera(Pose3(R,t), K);
|
||||
StereoPoint2 z(184.812, 129.068, 714.768);
|
||||
|
||||
Matrix actual_jacobian_1, actual_jacobian_2;
|
||||
Point3 l = camera.backproject2(z, actual_jacobian_1, actual_jacobian_2);
|
||||
|
||||
StereoPoint2 actual = camera.project(l);
|
||||
CHECK(assert_equal(z, actual, 1e-3));
|
||||
|
||||
Matrix expected_jacobian_to_pose = numericalDerivative31(backproject3, Pose3(R,t), z, *K);
|
||||
CHECK(assert_equal(expected_jacobian_to_pose, actual_jacobian_1, 1e-6));
|
||||
|
||||
Matrix expected_jacobian_to_point = numericalDerivative32(backproject3, Pose3(R,t), z, *K);
|
||||
CHECK(assert_equal(expected_jacobian_to_point, actual_jacobian_2, 1e-6));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr);}
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -568,6 +568,51 @@ void JacobianFactor::multiplyHessianAdd(double alpha, const VectorValues& x,
|
|||
transposeMultiplyAdd(alpha, Ax, y);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/** Raw memory access version of multiplyHessianAdd y += alpha * A'*A*x
|
||||
* Note: this is not assuming a fixed dimension for the variables,
|
||||
* but requires the vector accumulatedDims to tell the dimension of
|
||||
* each variable: e.g.: x0 has dim 3, x2 has dim 6, x3 has dim 2,
|
||||
* then accumulatedDims is [0 3 9 11 13]
|
||||
* NOTE: size of accumulatedDims is size of keys + 1!!
|
||||
* TODO Frank asks: why is this here if not regular ????
|
||||
*/
|
||||
void JacobianFactor::multiplyHessianAdd(double alpha, const double* x, double* y,
|
||||
const std::vector<size_t>& accumulatedDims) const {
|
||||
|
||||
/// Use Eigen magic to access raw memory
|
||||
typedef Eigen::Map<Vector> VectorMap;
|
||||
typedef Eigen::Map<const Vector> ConstVectorMap;
|
||||
|
||||
if (empty())
|
||||
return;
|
||||
Vector Ax = zero(Ab_.rows());
|
||||
|
||||
/// Just iterate over all A matrices and multiply in correct config part (looping over keys)
|
||||
/// E.g.: Jacobian A = [A0 A1 A2] multiplies x = [x0 x1 x2]'
|
||||
/// Hence: Ax = A0 x0 + A1 x1 + A2 x2 (hence we loop over the keys and accumulate)
|
||||
for (size_t pos = 0; pos < size(); ++pos) {
|
||||
size_t offset = accumulatedDims[keys_[pos]];
|
||||
size_t dim = accumulatedDims[keys_[pos] + 1] - offset;
|
||||
Ax += Ab_(pos) * ConstVectorMap(x + offset, dim);
|
||||
}
|
||||
/// Deal with noise properly, need to Double* whiten as we are dividing by variance
|
||||
if (model_) {
|
||||
model_->whitenInPlace(Ax);
|
||||
model_->whitenInPlace(Ax);
|
||||
}
|
||||
|
||||
/// multiply with alpha
|
||||
Ax *= alpha;
|
||||
|
||||
/// Again iterate over all A matrices and insert Ai^T into y
|
||||
for (size_t pos = 0; pos < size(); ++pos) {
|
||||
size_t offset = accumulatedDims[keys_[pos]];
|
||||
size_t dim = accumulatedDims[keys_[pos] + 1] - offset;
|
||||
VectorMap(y + offset, dim) += Ab_(pos).transpose() * Ax;
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues JacobianFactor::gradientAtZero() const {
|
||||
VectorValues g;
|
||||
|
|
|
|||
|
|
@ -290,6 +290,17 @@ namespace gtsam {
|
|||
/** y += alpha * A'*A*x */
|
||||
void multiplyHessianAdd(double alpha, const VectorValues& x, VectorValues& y) const;
|
||||
|
||||
/**
|
||||
* Raw memory access version of multiplyHessianAdd y += alpha * A'*A*x
|
||||
* Requires the vector accumulatedDims to tell the dimension of
|
||||
* each variable: e.g.: x0 has dim 3, x2 has dim 6, x3 has dim 2,
|
||||
* then accumulatedDims is [0 3 9 11 13]
|
||||
* NOTE: size of accumulatedDims is size of keys + 1!!
|
||||
* TODO(frank): we should probably kill this if no longer needed
|
||||
*/
|
||||
void multiplyHessianAdd(double alpha, const double* x, double* y,
|
||||
const std::vector<size_t>& accumulatedDims) const;
|
||||
|
||||
/// A'*b for Jacobian
|
||||
VectorValues gradientAtZero() const;
|
||||
|
||||
|
|
|
|||
|
|
@ -10,9 +10,10 @@
|
|||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file HessianFactor.cpp
|
||||
* @file Scatter.cpp
|
||||
* @author Richard Roberts
|
||||
* @date Dec 8, 2010
|
||||
* @author Frank Dellaert
|
||||
* @date June 2015
|
||||
*/
|
||||
|
||||
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@
|
|||
* @brief Maps global variable indices to slot indices
|
||||
* @author Richard Roberts
|
||||
* @author Frank Dellaert
|
||||
* @date Dec 8, 2010
|
||||
* @date June 2015
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ public:
|
|||
|
||||
public:
|
||||
|
||||
double lambdaInitial; ///< The initial Levenberg-Marquardt damping term (default: 1e-4)
|
||||
double lambdaInitial; ///< The initial Levenberg-Marquardt damping term (default: 1e-5)
|
||||
double lambdaFactor; ///< The amount by which to multiply or divide lambda when adjusting lambda (default: 10.0)
|
||||
double lambdaUpperBound; ///< The maximum lambda to try before assuming the optimization has failed (default: 1e5)
|
||||
double lambdaLowerBound; ///< The minimum lambda used in LM (default: 0)
|
||||
|
|
|
|||
|
|
@ -68,6 +68,8 @@ public:
|
|||
JacobianFactor(keys, augmentedMatrix, sigmas) {
|
||||
}
|
||||
|
||||
using JacobianFactor::multiplyHessianAdd;
|
||||
|
||||
/** y += alpha * A'*A*x */
|
||||
virtual void multiplyHessianAdd(double alpha, const VectorValues& x,
|
||||
VectorValues& y) const {
|
||||
|
|
@ -182,50 +184,6 @@ public:
|
|||
return model_ ? model_->whiten(Ax) : Ax;
|
||||
}
|
||||
|
||||
/** Raw memory access version of multiplyHessianAdd y += alpha * A'*A*x
|
||||
* Note: this is not assuming a fixed dimension for the variables,
|
||||
* but requires the vector accumulatedDims to tell the dimension of
|
||||
* each variable: e.g.: x0 has dim 3, x2 has dim 6, x3 has dim 2,
|
||||
* then accumulatedDims is [0 3 9 11 13]
|
||||
* NOTE: size of accumulatedDims is size of keys + 1!!
|
||||
* TODO Frank asks: why is this here if not regular ????
|
||||
*/
|
||||
void multiplyHessianAdd(double alpha, const double* x, double* y,
|
||||
const std::vector<size_t>& accumulatedDims) const {
|
||||
|
||||
/// Use Eigen magic to access raw memory
|
||||
typedef Eigen::Map<Vector> VectorMap;
|
||||
typedef Eigen::Map<const Vector> ConstVectorMap;
|
||||
|
||||
if (empty())
|
||||
return;
|
||||
Vector Ax = zero(Ab_.rows());
|
||||
|
||||
/// Just iterate over all A matrices and multiply in correct config part (looping over keys)
|
||||
/// E.g.: Jacobian A = [A0 A1 A2] multiplies x = [x0 x1 x2]'
|
||||
/// Hence: Ax = A0 x0 + A1 x1 + A2 x2 (hence we loop over the keys and accumulate)
|
||||
for (size_t pos = 0; pos < size(); ++pos) {
|
||||
size_t offset = accumulatedDims[keys_[pos]];
|
||||
size_t dim = accumulatedDims[keys_[pos] + 1] - offset;
|
||||
Ax += Ab_(pos) * ConstVectorMap(x + offset, dim);
|
||||
}
|
||||
/// Deal with noise properly, need to Double* whiten as we are dividing by variance
|
||||
if (model_) {
|
||||
model_->whitenInPlace(Ax);
|
||||
model_->whitenInPlace(Ax);
|
||||
}
|
||||
|
||||
/// multiply with alpha
|
||||
Ax *= alpha;
|
||||
|
||||
/// Again iterate over all A matrices and insert Ai^T into y
|
||||
for (size_t pos = 0; pos < size(); ++pos) {
|
||||
size_t offset = accumulatedDims[keys_[pos]];
|
||||
size_t dim = accumulatedDims[keys_[pos] + 1] - offset;
|
||||
VectorMap(y + offset, dim) += Ab_(pos).transpose() * Ax;
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
// end class RegularJacobianFactor
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue