更新标注

master
liuyancheng 2021-09-22 19:42:25 +08:00
parent a55d24a467
commit 6117fcf244
4 changed files with 1003 additions and 703 deletions

1
.gitignore vendored
View File

@ -47,3 +47,4 @@ cmake-build-debug/
*.pyc
*.osa
.vscode/settings.json

View File

@ -20,9 +20,22 @@
#include <boost/serialization/export.hpp>
namespace ORB_SLAM3 {
namespace ORB_SLAM3
{
cv::Point2f KannalaBrandt8::project(const cv::Point3f &p3D) {
/**
* @brief
* xc = Xc/Zc, yc = Yc/Zc
* r^2 = xc^2 + yc^2
* θ = arctan(r)
* θd = k0*θ + k1*θ^3 + k2*θ^5 + k3*θ^7 + k4*θ^9
* xd = θd/r * xc yd = θd/r * yc
* u = fx*xd + cx v = fy*yd + cy
* @param p3D
* @return
*/
cv::Point2f KannalaBrandt8::project(const cv::Point3f &p3D)
{
const float x2_plus_y2 = p3D.x * p3D.x + p3D.y * p3D.y;
const float theta = atan2f(sqrtf(x2_plus_y2), p3D.z);
const float psi = atan2f(p3D.y, p3D.x);
@ -32,25 +45,41 @@ namespace ORB_SLAM3 {
const float theta5 = theta3 * theta2;
const float theta7 = theta5 * theta2;
const float theta9 = theta7 * theta2;
const float r = theta + mvParameters[4] * theta3 + mvParameters[5] * theta5
+ mvParameters[6] * theta7 + mvParameters[7] * theta9;
const float r = theta + mvParameters[4] * theta3 + mvParameters[5] * theta5 + mvParameters[6] * theta7 + mvParameters[7] * theta9;
return cv::Point2f(mvParameters[0] * r * cos(psi) + mvParameters[2],
mvParameters[1] * r * sin(psi) + mvParameters[3]);
}
}
/**
* @brief
* @param m3D
* @return
*/
cv::Point2f KannalaBrandt8::project(const cv::Matx31f &m3D)
{
return this->project(cv::Point3f(m3D(0), m3D(1), m3D(2)));
}
cv::Point2f KannalaBrandt8::project(const cv::Matx31f &m3D) {
return this->project(cv::Point3f(m3D(0),m3D(1),m3D(2)));
}
/**
* @brief
* @param m3D
* @return
*/
cv::Point2f KannalaBrandt8::project(const cv::Mat &m3D)
{
const float *p3D = m3D.ptr<float>();
cv::Point2f KannalaBrandt8::project(const cv::Mat &m3D) {
const float* p3D = m3D.ptr<float>();
return this->project(cv::Point3f(p3D[0], p3D[1], p3D[2]));
}
return this->project(cv::Point3f(p3D[0],p3D[1],p3D[2]));
}
Eigen::Vector2d KannalaBrandt8::project(const Eigen::Vector3d &v3D) {
/**
* @brief
* @param m3D
* @return
*/
Eigen::Vector2d KannalaBrandt8::project(const Eigen::Vector3d &v3D)
{
const double x2_plus_y2 = v3D[0] * v3D[0] + v3D[1] * v3D[1];
const double theta = atan2f(sqrtf(x2_plus_y2), v3D[2]);
const double psi = atan2f(v3D[1], v3D[0]);
@ -60,53 +89,94 @@ namespace ORB_SLAM3 {
const double theta5 = theta3 * theta2;
const double theta7 = theta5 * theta2;
const double theta9 = theta7 * theta2;
const double r = theta + mvParameters[4] * theta3 + mvParameters[5] * theta5
+ mvParameters[6] * theta7 + mvParameters[7] * theta9;
const double r = theta + mvParameters[4] * theta3 + mvParameters[5] * theta5 + mvParameters[6] * theta7 + mvParameters[7] * theta9;
Eigen::Vector2d res;
res[0] = mvParameters[0] * r * cos(psi) + mvParameters[2];
res[1] = mvParameters[1] * r * sin(psi) + mvParameters[3];
res[0] = mvParameters[0] * r * cos(psi) + mvParameters[2]; // cos(psi) = xc / r
res[1] = mvParameters[1] * r * sin(psi) + mvParameters[3]; // sin(psi) = yc / r
return res;
}
}
cv::Mat KannalaBrandt8::projectMat(const cv::Point3f &p3D) {
/**
* @brief
* @param m3D
* @return
*/
cv::Mat KannalaBrandt8::projectMat(const cv::Point3f &p3D)
{
cv::Point2f point = this->project(p3D);
cv::Mat ret = (cv::Mat_<float>(2,1) << point.x, point.y);
cv::Mat ret = (cv::Mat_<float>(2, 1) << point.x, point.y);
return ret.clone();
}
}
float KannalaBrandt8::uncertainty2(const Eigen::Matrix<double,2,1> &p2D)
{
float KannalaBrandt8::uncertainty2(const Eigen::Matrix<double, 2, 1> &p2D)
{
return 1.f;
}
}
cv::Mat KannalaBrandt8::unprojectMat(const cv::Point2f &p2D){
/**
* @brief ,matKannalaBrandt8::TriangulateMatches
* @param p2D
* @return
*/
cv::Mat KannalaBrandt8::unprojectMat(const cv::Point2f &p2D)
{
cv::Point3f ray = this->unproject(p2D);
cv::Mat ret = (cv::Mat_<float>(3,1) << ray.x, ray.y, ray.z);
cv::Mat ret = (cv::Mat_<float>(3, 1) << ray.x, ray.y, ray.z);
return ret.clone();
}
}
cv::Matx31f KannalaBrandt8::unprojectMat_(const cv::Point2f &p2D) {
/**
* @brief
* @param p2D
* @return
*/
cv::Matx31f KannalaBrandt8::unprojectMat_(const cv::Point2f &p2D)
{
cv::Point3f ray = this->unproject(p2D);
cv::Matx31f r{ray.x, ray.y, ray.z};
return r;
}
}
cv::Point3f KannalaBrandt8::unproject(const cv::Point2f &p2D) {
/**
* @brief
* uv
* xd = (u - cx) / fx yd = (v - cy) / fy
* xc = xd * tan(θ) / θd yc = yd * tan(θ) / θd
* θd
* xd^2 + yd^2 = θd^2/r^2 * (xc^2 + yc^2)
* θd = sqrt(xd^2 + yd^2) / sqrt(xc^2 + yc^2) * r
* r = sqrt(xc^2 + yc^2)
* θd = sqrt(xd^2 + yd^2)
* tan(θ),θ
* θd = θ + k1*θ^3 + k2*θ^5 + k3*θ^7 + k4*θ^9
* θθ
* θθdθ + k1*θ^3 + k2*θ^5 + k3*θ^7 + k4*θ^9 = f(θ)
* e(θ) = f(θ) - θd θe(θ) = 0
* e(θ+δθ) = e(θ) + e`(θ) * δθ = 0
* e`(θ) = 1 + 3*k1*θ^*2 + 5*k2*θ^4 + 7*k3*θ^6 + 8*k4*θ^8
* δθ = -e(θ)/e`(θ) θ
* xc = xd * tan(θ) / θd yc = yd * tan(θ) / θd
* @param p2D
* @return
*/
cv::Point3f KannalaBrandt8::unproject(const cv::Point2f &p2D)
{
//Use Newthon method to solve for theta with good precision (err ~ e-6)
cv::Point2f pw((p2D.x - mvParameters[2]) / mvParameters[0], (p2D.y - mvParameters[3]) / mvParameters[1]);
cv::Point2f pw((p2D.x - mvParameters[2]) / mvParameters[0], (p2D.y - mvParameters[3]) / mvParameters[1]); // xd yd
float scale = 1.f;
float theta_d = sqrtf(pw.x * pw.x + pw.y * pw.y);
theta_d = fminf(fmaxf(-CV_PI / 2.f, theta_d), CV_PI / 2.f);
float theta_d = sqrtf(pw.x * pw.x + pw.y * pw.y); // sin(psi) = yc / r
theta_d = fminf(fmaxf(-CV_PI / 2.f, theta_d), CV_PI / 2.f); // 不能超过180度
if (theta_d > 1e-8) {
if (theta_d > 1e-8)
{
//Compensate distortion iteratively
float theta = theta_d;
for (int j = 0; j < 10; j++) {
float theta2 = theta * theta, theta4 = theta2 * theta2, theta6 = theta4 * theta2, theta8 =
theta4 * theta4;
// 开始迭代
for (int j = 0; j < 10; j++)
{
float theta2 = theta * theta, theta4 = theta2 * theta2, theta6 = theta4 * theta2, theta8 = theta4 * theta4;
float k0_theta2 = mvParameters[4] * theta2, k1_theta4 = mvParameters[5] * theta4;
float k2_theta6 = mvParameters[6] * theta6, k3_theta8 = mvParameters[7] * theta8;
float theta_fix = (theta * (1 + k0_theta2 + k1_theta4 + k2_theta6 + k3_theta8) - theta_d) /
@ -116,13 +186,25 @@ namespace ORB_SLAM3 {
break;
}
//scale = theta - theta_d;
// 求得tan(θ) / θd
scale = std::tan(theta) / theta_d;
}
return cv::Point3f(pw.x * scale, pw.y * scale, 1.f);
}
}
cv::Mat KannalaBrandt8::projectJac(const cv::Point3f &p3D) {
/**
* @brief
* u = fx * θd * x / sqrt(x^2 + y^2) + cx
* v = fy * θd * y / sqrt(x^2 + y^2) + cy
* xyz
* θd = θ + k1*θ^3 + k2*θ^5 + k3*θ^7 + k4*θ^9
* θ = arctan(sqrt(x^2 + y^2), z)
* @param p3D
* @return
*/
cv::Mat KannalaBrandt8::projectJac(const cv::Point3f &p3D)
{
float x2 = p3D.x * p3D.x, y2 = p3D.y * p3D.y, z2 = p3D.z * p3D.z;
float r2 = x2 + y2;
float r = sqrt(r2);
@ -140,23 +222,34 @@ namespace ORB_SLAM3 {
9 * mvParameters[7] * theta8;
cv::Mat Jac(2, 3, CV_32F);
Jac.at<float>(0, 0) = mvParameters[0] * (fd * p3D.z * x2 / (r2 * (r2 + z2)) + f * y2 / r3);
Jac.at<float>(0, 0) = mvParameters[0] * (fd * p3D.z * x2 / (r2 * (r2 + z2)) + f * y2 / r3); // ∂u/∂x
Jac.at<float>(1, 0) =
mvParameters[1] * (fd * p3D.z * p3D.y * p3D.x / (r2 * (r2 + z2)) - f * p3D.y * p3D.x / r3);
mvParameters[1] * (fd * p3D.z * p3D.y * p3D.x / (r2 * (r2 + z2)) - f * p3D.y * p3D.x / r3); // ∂v/∂x
Jac.at<float>(0, 1) =
mvParameters[0] * (fd * p3D.z * p3D.y * p3D.x / (r2 * (r2 + z2)) - f * p3D.y * p3D.x / r3);
Jac.at<float>(1, 1) = mvParameters[1] * (fd * p3D.z * y2 / (r2 * (r2 + z2)) + f * x2 / r3);
mvParameters[0] * (fd * p3D.z * p3D.y * p3D.x / (r2 * (r2 + z2)) - f * p3D.y * p3D.x / r3); // ∂u/∂y
Jac.at<float>(1, 1) = mvParameters[1] * (fd * p3D.z * y2 / (r2 * (r2 + z2)) + f * x2 / r3); // ∂v/∂y
Jac.at<float>(0, 2) = -mvParameters[0] * fd * p3D.x / (r2 + z2);
Jac.at<float>(1, 2) = -mvParameters[1] * fd * p3D.y / (r2 + z2);
Jac.at<float>(0, 2) = -mvParameters[0] * fd * p3D.x / (r2 + z2); // ∂u/∂z
Jac.at<float>(1, 2) = -mvParameters[1] * fd * p3D.y / (r2 + z2); // ∂v/∂z
std::cout << "CV JAC: " << Jac << std::endl;
return Jac.clone();
}
}
Eigen::Matrix<double, 2, 3> KannalaBrandt8::projectJac(const Eigen::Vector3d &v3D) {
/**
* @brief ,
* u = fx * θd * x / sqrt(x^2 + y^2)
* v = fy * θd * y / sqrt(x^2 + y^2)
* xyz
* θd = θ + k1*θ^3 + k2*θ^5 + k3*θ^7 + k4*θ^9
* θ = arctan(sqrt(x^2 + y^2), z)
* @param p3D
* @return
*/
Eigen::Matrix<double, 2, 3> KannalaBrandt8::projectJac(const Eigen::Vector3d &v3D)
{
double x2 = v3D[0] * v3D[0], y2 = v3D[1] * v3D[1], z2 = v3D[2] * v3D[2];
double r2 = x2 + y2;
double r = sqrt(r2);
@ -186,15 +279,27 @@ namespace ORB_SLAM3 {
JacGood(1, 2) = -mvParameters[1] * fd * v3D[1] / (r2 + z2);
return JacGood;
}
}
cv::Mat KannalaBrandt8::unprojectJac(const cv::Point2f &p2D) {
cv::Mat KannalaBrandt8::unprojectJac(const cv::Point2f &p2D)
{
return cv::Mat();
}
}
bool KannalaBrandt8::ReconstructWithTwoViews(const std::vector<cv::KeyPoint>& vKeys1, const std::vector<cv::KeyPoint>& vKeys2, const std::vector<int> &vMatches12,
cv::Mat &R21, cv::Mat &t21, std::vector<cv::Point3f> &vP3D, std::vector<bool> &vbTriangulated){
if(!tvr){
/** 三角化恢复三维点,但要提前做去畸变 单目初始化中使用
* @param vKeys1
* @param vKeys2
* @param vMatches12 vKeys1vKeys2
* @param R21
* @param t21
* @param vP3D
* @param vbTriangulated
*/
bool KannalaBrandt8::ReconstructWithTwoViews(const std::vector<cv::KeyPoint> &vKeys1, const std::vector<cv::KeyPoint> &vKeys2, const std::vector<int> &vMatches12,
cv::Mat &R21, cv::Mat &t21, std::vector<cv::Point3f> &vP3D, std::vector<bool> &vbTriangulated)
{
if (!tvr)
{
cv::Mat K = this->toK();
tvr = new TwoViewReconstruction(K);
}
@ -203,82 +308,98 @@ namespace ORB_SLAM3 {
std::vector<cv::KeyPoint> vKeysUn1 = vKeys1, vKeysUn2 = vKeys2;
std::vector<cv::Point2f> vPts1(vKeys1.size()), vPts2(vKeys2.size());
for(size_t i = 0; i < vKeys1.size(); i++) vPts1[i] = vKeys1[i].pt;
for(size_t i = 0; i < vKeys2.size(); i++) vPts2[i] = vKeys2[i].pt;
for (size_t i = 0; i < vKeys1.size(); i++)
vPts1[i] = vKeys1[i].pt;
for (size_t i = 0; i < vKeys2.size(); i++)
vPts2[i] = vKeys2[i].pt;
cv::Mat D = (cv::Mat_<float>(4,1) << mvParameters[4], mvParameters[5], mvParameters[6], mvParameters[7]);
cv::Mat R = cv::Mat::eye(3,3,CV_32F);
cv::Mat D = (cv::Mat_<float>(4, 1) << mvParameters[4], mvParameters[5], mvParameters[6], mvParameters[7]);
cv::Mat R = cv::Mat::eye(3, 3, CV_32F);
cv::Mat K = this->toK();
cv::fisheye::undistortPoints(vPts1,vPts1,K,D,R,K);
cv::fisheye::undistortPoints(vPts2,vPts2,K,D,R,K);
cv::fisheye::undistortPoints(vPts1, vPts1, K, D, R, K);
cv::fisheye::undistortPoints(vPts2, vPts2, K, D, R, K);
for(size_t i = 0; i < vKeys1.size(); i++) vKeysUn1[i].pt = vPts1[i];
for(size_t i = 0; i < vKeys2.size(); i++) vKeysUn2[i].pt = vPts2[i];
for (size_t i = 0; i < vKeys1.size(); i++)
vKeysUn1[i].pt = vPts1[i];
for (size_t i = 0; i < vKeys2.size(); i++)
vKeysUn2[i].pt = vPts2[i];
return tvr->Reconstruct(vKeysUn1,vKeysUn2,vMatches12,R21,t21,vP3D,vbTriangulated);
}
return tvr->Reconstruct(vKeysUn1, vKeysUn2, vMatches12, R21, t21, vP3D, vbTriangulated);
}
cv::Mat KannalaBrandt8::toK() {
/**
* @brief
* @return K
*/
cv::Mat KannalaBrandt8::toK()
{
cv::Mat K = (cv::Mat_<float>(3, 3)
<< mvParameters[0], 0.f, mvParameters[2], 0.f, mvParameters[1], mvParameters[3], 0.f, 0.f, 1.f);
<< mvParameters[0],
0.f, mvParameters[2], 0.f, mvParameters[1], mvParameters[3], 0.f, 0.f, 1.f);
return K;
}
}
cv::Matx33f KannalaBrandt8::toK_() {
cv::Matx33f KannalaBrandt8::toK_()
{
cv::Matx33f K{mvParameters[0], 0.f, mvParameters[2], 0.f, mvParameters[1], mvParameters[3], 0.f, 0.f, 1.f};
return K;
}
}
bool KannalaBrandt8::epipolarConstrain(GeometricCamera* pCamera2, const cv::KeyPoint &kp1, const cv::KeyPoint &kp2, const cv::Mat &R12, const cv::Mat &t12, const float sigmaLevel, const float unc) {
bool KannalaBrandt8::epipolarConstrain(GeometricCamera *pCamera2, const cv::KeyPoint &kp1, const cv::KeyPoint &kp2, const cv::Mat &R12, const cv::Mat &t12, const float sigmaLevel, const float unc)
{
cv::Mat p3D;
return this->TriangulateMatches(pCamera2,kp1,kp2,R12,t12,sigmaLevel,unc,p3D) > 0.0001f;
}
return this->TriangulateMatches(pCamera2, kp1, kp2, R12, t12, sigmaLevel, unc, p3D) > 0.0001f;
}
bool KannalaBrandt8::epipolarConstrain_(GeometricCamera* pCamera2, const cv::KeyPoint &kp1, const cv::KeyPoint &kp2, const cv::Matx33f &R12, const cv::Matx31f &t12, const float sigmaLevel, const float unc) {
bool KannalaBrandt8::epipolarConstrain_(GeometricCamera *pCamera2, const cv::KeyPoint &kp1, const cv::KeyPoint &kp2, const cv::Matx33f &R12, const cv::Matx31f &t12, const float sigmaLevel, const float unc)
{
cv::Matx31f p3D;
return this->TriangulateMatches_(pCamera2,kp1,kp2,R12,t12,sigmaLevel,unc,p3D) > 0.0001f;
}
return this->TriangulateMatches_(pCamera2, kp1, kp2, R12, t12, sigmaLevel, unc, p3D) > 0.0001f;
}
bool KannalaBrandt8::matchAndtriangulate(const cv::KeyPoint& kp1, const cv::KeyPoint& kp2, GeometricCamera* pOther,
cv::Mat& Tcw1, cv::Mat& Tcw2,
bool KannalaBrandt8::matchAndtriangulate(const cv::KeyPoint &kp1, const cv::KeyPoint &kp2, GeometricCamera *pOther,
cv::Mat &Tcw1, cv::Mat &Tcw2,
const float sigmaLevel1, const float sigmaLevel2,
cv::Mat& x3Dtriangulated){
cv::Mat Rcw1 = Tcw1.colRange(0,3).rowRange(0,3);
cv::Mat &x3Dtriangulated)
{
cv::Mat Rcw1 = Tcw1.colRange(0, 3).rowRange(0, 3);
cv::Mat Rwc1 = Rcw1.t();
cv::Mat tcw1 = Tcw1.rowRange(0,3).col(3);
cv::Mat tcw1 = Tcw1.rowRange(0, 3).col(3);
cv::Mat Rcw2 = Tcw2.colRange(0,3).rowRange(0,3);
cv::Mat Rcw2 = Tcw2.colRange(0, 3).rowRange(0, 3);
cv::Mat Rwc2 = Rcw2.t();
cv::Mat tcw2 = Tcw2.rowRange(0,3).col(3);
cv::Mat tcw2 = Tcw2.rowRange(0, 3).col(3);
cv::Point3f ray1c = this->unproject(kp1.pt);
cv::Point3f ray2c = pOther->unproject(kp2.pt);
cv::Mat r1(3,1,CV_32F);
cv::Mat r1(3, 1, CV_32F);
r1.at<float>(0) = ray1c.x;
r1.at<float>(1) = ray1c.y;
r1.at<float>(2) = ray1c.z;
cv::Mat r2(3,1,CV_32F);
cv::Mat r2(3, 1, CV_32F);
r2.at<float>(0) = ray2c.x;
r2.at<float>(1) = ray2c.y;
r2.at<float>(2) = ray2c.z;
//Check parallax between rays
cv::Mat ray1 = Rwc1*r1;
cv::Mat ray2 = Rwc2*r2;
cv::Mat ray1 = Rwc1 * r1;
cv::Mat ray2 = Rwc2 * r2;
const float cosParallaxRays = ray1.dot(ray2)/(cv::norm(ray1)*cv::norm(ray2));
const float cosParallaxRays = ray1.dot(ray2) / (cv::norm(ray1) * cv::norm(ray2));
//If parallax is lower than 0.9998, reject this match
if(cosParallaxRays > 0.9998){
if (cosParallaxRays > 0.9998)
{
return false;
}
//Parallax is good, so we try to triangulate
cv::Point2f p11,p22;
cv::Point2f p11, p22;
p11.x = ray1c.x;
p11.y = ray1c.y;
@ -288,19 +409,20 @@ namespace ORB_SLAM3 {
cv::Mat x3D;
Triangulate(p11,p22,Tcw1,Tcw2,x3D);
Triangulate(p11, p22, Tcw1, Tcw2, x3D);
cv::Mat x3Dt = x3D.t();
//Check triangulation in front of cameras
float z1 = Rcw1.row(2).dot(x3Dt)+tcw1.at<float>(2);
if(z1<=0){ //Point is not in front of the first camera
float z1 = Rcw1.row(2).dot(x3Dt) + tcw1.at<float>(2);
if (z1 <= 0)
{ //Point is not in front of the first camera
return false;
}
float z2 = Rcw2.row(2).dot(x3Dt)+tcw2.at<float>(2);
if(z2<=0){ //Point is not in front of the first camera
float z2 = Rcw2.row(2).dot(x3Dt) + tcw2.at<float>(2);
if (z2 <= 0)
{ //Point is not in front of the first camera
return false;
}
@ -312,7 +434,8 @@ namespace ORB_SLAM3 {
float errX1 = uv1.x - kp1.pt.x;
float errY1 = uv1.y - kp1.pt.y;
if((errX1*errX1+errY1*errY1)>5.991*sigmaLevel1){ //Reprojection error is high
if ((errX1 * errX1 + errY1 * errY1) > 5.991 * sigmaLevel1)
{ //Reprojection error is high
return false;
}
@ -324,7 +447,8 @@ namespace ORB_SLAM3 {
float errX2 = uv2.x - kp2.pt.x;
float errY2 = uv2.y - kp2.pt.y;
if((errX2*errX2+errY2*errY2)>5.991*sigmaLevel2){ //Reprojection error is high
if ((errX2 * errX2 + errY2 * errY2) > 5.991 * sigmaLevel2)
{ //Reprojection error is high
return false;
}
@ -333,25 +457,43 @@ namespace ORB_SLAM3 {
x3Dtriangulated = x3D.clone();
return true;
}
}
float KannalaBrandt8::TriangulateMatches(GeometricCamera *pCamera2, const cv::KeyPoint &kp1, const cv::KeyPoint &kp2, const cv::Mat &R12, const cv::Mat &t12, const float sigmaLevel, const float unc, cv::Mat& p3D) {
/**
* @brief
* @param pCamera2
* @param kp1
* @param kp2
* @param R12 2->1
* @param t12 2->1
* @param sigmaLevel 1
* @param unc 2
* @param p3D
* @return
*/
float KannalaBrandt8::TriangulateMatches(GeometricCamera *pCamera2, const cv::KeyPoint &kp1, const cv::KeyPoint &kp2, const cv::Mat &R12, const cv::Mat &t12, const float sigmaLevel, const float unc, cv::Mat &p3D)
{
// 1. 得到对应特征点的相平面坐标
cv::Mat r1 = this->unprojectMat(kp1.pt);
cv::Mat r2 = pCamera2->unprojectMat(kp2.pt);
//Check parallax
cv::Mat r21 = R12*r2;
// 2. 查看射线夹角
// 这里有点像极线约束但并不是将r2通过R12旋转到与r1同方向的坐标系
// 然后计算他们的夹角看其是否超过1.14° 讲真,这个条件感觉有些苛刻
cv::Mat r21 = R12 * r2;
const float cosParallaxRays = r1.dot(r21)/(cv::norm(r1)*cv::norm(r21));
const float cosParallaxRays = r1.dot(r21) / (cv::norm(r1) * cv::norm(r21));
if(cosParallaxRays > 0.9998){
if (cosParallaxRays > 0.9998)
{
return -1;
}
//Parallax is good, so we try to triangulate
cv::Point2f p11,p22;
const float* pr1 = r1.ptr<float>();
const float* pr2 = r2.ptr<float>();
cv::Point2f p11, p22;
const float *pr1 = r1.ptr<float>();
const float *pr2 = r2.ptr<float>();
p11.x = pr1[0];
p11.y = pr1[1];
@ -360,34 +502,41 @@ namespace ORB_SLAM3 {
p22.y = pr2[1];
cv::Mat x3D;
cv::Mat Tcw1 = (cv::Mat_<float>(3,4) << 1.f,0.f,0.f,0.f,
0.f,1.f,0.f,0.f,
0.f,0.f,1.f,0.f);
// 3. 设定变换矩阵用于三角化
cv::Mat Tcw1 = (cv::Mat_<float>(3, 4) << 1.f, 0.f, 0.f, 0.f,
0.f, 1.f, 0.f, 0.f,
0.f, 0.f, 1.f, 0.f);
cv::Mat Tcw2;
cv::Mat R21 = R12.t();
cv::Mat t21 = -R21*t12;
cv::hconcat(R21,t21,Tcw2);
cv::Mat t21 = -R21 * t12;
cv::hconcat(R21, t21, Tcw2);
Triangulate(p11,p22,Tcw1,Tcw2,x3D);
// 4. 三角化
Triangulate(p11, p22, Tcw1, Tcw2, x3D);
cv::Mat x3Dt = x3D.t();
// 深度值是否正常
float z1 = x3D.at<float>(2);
if(z1 <= 0){
if (z1 <= 0)
{
return -1;
}
float z2 = R21.row(2).dot(x3Dt)+t21.at<float>(2);
if(z2<=0){
float z2 = R21.row(2).dot(x3Dt) + t21.at<float>(2);
if (z2 <= 0)
{
return -1;
}
//Check reprojection error
// 5. 做下投影计算重投影误差
cv::Point2f uv1 = this->project(x3D);
float errX1 = uv1.x - kp1.pt.x;
float errY1 = uv1.y - kp1.pt.y;
if((errX1*errX1+errY1*errY1)>5.991 * sigmaLevel){ //Reprojection error is high
if ((errX1 * errX1 + errY1 * errY1) > 5.991 * sigmaLevel)
{ //Reprojection error is high
return -1;
}
@ -397,30 +546,33 @@ namespace ORB_SLAM3 {
float errX2 = uv2.x - kp2.pt.x;
float errY2 = uv2.y - kp2.pt.y;
if((errX2*errX2+errY2*errY2)>5.991 * unc){ //Reprojection error is high
if ((errX2 * errX2 + errY2 * errY2) > 5.991 * unc)
{ //Reprojection error is high
return -1;
}
p3D = x3D.clone();
return z1;
}
}
float KannalaBrandt8::TriangulateMatches_(GeometricCamera *pCamera2, const cv::KeyPoint &kp1, const cv::KeyPoint &kp2, const cv::Matx33f &R12, const cv::Matx31f &t12, const float sigmaLevel, const float unc, cv::Matx31f& p3D) {
float KannalaBrandt8::TriangulateMatches_(GeometricCamera *pCamera2, const cv::KeyPoint &kp1, const cv::KeyPoint &kp2, const cv::Matx33f &R12, const cv::Matx31f &t12, const float sigmaLevel, const float unc, cv::Matx31f &p3D)
{
cv::Matx31f r1 = this->unprojectMat_(kp1.pt);
cv::Matx31f r2 = pCamera2->unprojectMat_(kp2.pt);
//Check parallax
cv::Matx31f r21 = R12*r2;
cv::Matx31f r21 = R12 * r2;
const float cosParallaxRays = r1.dot(r21)/(cv::norm(r1)*cv::norm(r21));
const float cosParallaxRays = r1.dot(r21) / (cv::norm(r1) * cv::norm(r21));
if(cosParallaxRays > 0.9998){
if (cosParallaxRays > 0.9998)
{
return -1;
}
//Parallax is good, so we try to triangulate
cv::Point2f p11,p22;
cv::Point2f p11, p22;
p11.x = r1(0);
p11.y = r1(1);
@ -429,28 +581,30 @@ namespace ORB_SLAM3 {
p22.y = r2(1);
cv::Matx31f x3D;
cv::Matx44f Tcw1{1.f,0.f,0.f,0.f,
0.f,1.f,0.f,0.f,
0.f,0.f,1.f,0.f};
cv::Matx44f Tcw1{1.f, 0.f, 0.f, 0.f,
0.f, 1.f, 0.f, 0.f,
0.f, 0.f, 1.f, 0.f};
cv::Matx33f R21 = R12.t();
cv::Matx31f t21 = -R21*t12;
cv::Matx31f t21 = -R21 * t12;
cv::Matx44f Tcw2{R21(0,0),R21(0,1),R21(0,2),t21(0),
R21(1,0),R21(1,1),R21(1,2),t21(1),
R21(2,0),R21(2,1),R21(2,2),t21(2),
0.f,0.f,0.f,1.f};
cv::Matx44f Tcw2{R21(0, 0), R21(0, 1), R21(0, 2), t21(0),
R21(1, 0), R21(1, 1), R21(1, 2), t21(1),
R21(2, 0), R21(2, 1), R21(2, 2), t21(2),
0.f, 0.f, 0.f, 1.f};
Triangulate_(p11,p22,Tcw1,Tcw2,x3D);
Triangulate_(p11, p22, Tcw1, Tcw2, x3D);
cv::Matx13f x3Dt = x3D.t();
float z1 = x3D(2);
if(z1 <= 0){
if (z1 <= 0)
{
return -1;
}
float z2 = R21.row(2).dot(x3Dt)+t21(2);
if(z2<=0){
float z2 = R21.row(2).dot(x3Dt) + t21(2);
if (z2 <= 0)
{
return -1;
}
@ -460,7 +614,8 @@ namespace ORB_SLAM3 {
float errX1 = uv1.x - kp1.pt.x;
float errY1 = uv1.y - kp1.pt.y;
if((errX1*errX1+errY1*errY1)>5.991 * sigmaLevel){ //Reprojection error is high
if ((errX1 * errX1 + errY1 * errY1) > 5.991 * sigmaLevel)
{ //Reprojection error is high
return -1;
}
@ -470,52 +625,68 @@ namespace ORB_SLAM3 {
float errX2 = uv2.x - kp2.pt.x;
float errY2 = uv2.y - kp2.pt.y;
if((errX2*errX2+errY2*errY2)>5.991 * unc){ //Reprojection error is high
if ((errX2 * errX2 + errY2 * errY2) > 5.991 * unc)
{ //Reprojection error is high
return -1;
}
p3D = x3D;
return z1;
}
}
std::ostream & operator<<(std::ostream &os, const KannalaBrandt8 &kb) {
std::ostream &operator<<(std::ostream &os, const KannalaBrandt8 &kb)
{
os << kb.mvParameters[0] << " " << kb.mvParameters[1] << " " << kb.mvParameters[2] << " " << kb.mvParameters[3] << " "
<< kb.mvParameters[4] << " " << kb.mvParameters[5] << " " << kb.mvParameters[6] << " " << kb.mvParameters[7];
return os;
}
}
std::istream & operator>>(std::istream &is, KannalaBrandt8 &kb) {
std::istream &operator>>(std::istream &is, KannalaBrandt8 &kb)
{
float nextParam;
for(size_t i = 0; i < 8; i++){
for (size_t i = 0; i < 8; i++)
{
assert(is.good()); //Make sure the input stream is good
is >> nextParam;
kb.mvParameters[i] = nextParam;
}
return is;
}
}
void KannalaBrandt8::Triangulate(const cv::Point2f &p1, const cv::Point2f &p2, const cv::Mat &Tcw1, const cv::Mat &Tcw2, cv::Mat &x3D)
{
cv::Mat A(4,4,CV_32F);
/**
* @brief
* @param p1
* @param p2
* @param Tcw1 3×4
* @param Tcw2 3×4 T21
* @param x3D
*/
void KannalaBrandt8::Triangulate(const cv::Point2f &p1, const cv::Point2f &p2, const cv::Mat &Tcw1, const cv::Mat &Tcw2, cv::Mat &x3D)
{
cv::Mat A(4, 4, CV_32F);
// 代码中有用到三角化的地方有TwoViewReconstruction::Triangulate LocalMapping::CreateNewMapPoints KannalaBrandt8::Triangulate Initializer::Triangulate
// 见Initializer.cpp的Triangulate函数,A矩阵构建的方式类似不同的是乘的反对称矩阵那个是像素坐标构成的而这个是归一化坐标构成的
// Pc = Tcw*Pw 左右两面乘Pc的反对称矩阵 [Pc]x * Tcw *Pw = 0 构成了A矩阵中间涉及一个尺度a因为都是归一化平面但右面是0所以直接可以约掉不影响最后的尺度
// 0 -1 y Tcw.row(0) -Tcw.row(1) + y*Tcw.row(2)
// 1 0 -x * Tcw.row(1) = Tcw.row(0) - x*Tcw.row(2)
// -y x 0 Tcw.row(2) x*Tcw.row(1) - y*Tcw.row(0)
// 发现上述矩阵线性相关所以取前两维两个点构成了4行的矩阵就是如下的操作求出的是4维的结果[X,Y,Z,A]所以需要除以最后一维使之为1就成了[X,Y,Z,1]这种齐次形式
A.row(0) = p1.x * Tcw1.row(2) - Tcw1.row(0);
A.row(1) = p1.y * Tcw1.row(2) - Tcw1.row(1);
A.row(2) = p2.x * Tcw2.row(2) - Tcw2.row(0);
A.row(3) = p2.y * Tcw2.row(2) - Tcw2.row(1);
A.row(0) = p1.x*Tcw1.row(2)-Tcw1.row(0);
A.row(1) = p1.y*Tcw1.row(2)-Tcw1.row(1);
A.row(2) = p2.x*Tcw2.row(2)-Tcw2.row(0);
A.row(3) = p2.y*Tcw2.row(2)-Tcw2.row(1);
cv::Mat u,w,vt;
cv::SVD::compute(A,w,u,vt,cv::SVD::MODIFY_A| cv::SVD::FULL_UV);
cv::Mat u, w, vt;
cv::SVD::compute(A, w, u, vt, cv::SVD::MODIFY_A | cv::SVD::FULL_UV);
x3D = vt.row(3).t();
x3D = x3D.rowRange(0,3)/x3D.at<float>(3);
}
x3D = x3D.rowRange(0, 3) / x3D.at<float>(3);
}
void KannalaBrandt8::Triangulate_(const cv::Point2f &p1, const cv::Point2f &p2, const cv::Matx44f &Tcw1, const cv::Matx44f &Tcw2, cv::Matx31f &x3D)
{
void KannalaBrandt8::Triangulate_(const cv::Point2f &p1, const cv::Point2f &p2, const cv::Matx44f &Tcw1, const cv::Matx44f &Tcw2, cv::Matx31f &x3D)
{
cv::Matx14f A0, A1, A2, A3;
A0 = p1.x * Tcw1.row(2) - Tcw1.row(0);
A1 = p1.y * Tcw1.row(2) - Tcw1.row(1);
A2 = p2.x * Tcw2.row(2) - Tcw2.row(0);
@ -529,9 +700,9 @@ namespace ORB_SLAM3 {
cv::Matx44f u, vt;
cv::Matx41f w;
cv::SVD::compute(A,w,u,vt,cv::SVD::MODIFY_A| cv::SVD::FULL_UV);
cv::SVD::compute(A, w, u, vt, cv::SVD::MODIFY_A | cv::SVD::FULL_UV);
cv::Matx41f x3D_h = vt.row(3).t();
// x3D = x3D_h.get_minor<3,1>(0,0) / x3D_h(3);
x3D = cv::Matx31f(x3D_h.get_minor<3,1>(0,0)(0) / x3D_h(3), x3D_h.get_minor<3,1>(0,0)(1) / x3D_h(3), x3D_h.get_minor<3,1>(0,0)(2) / x3D_h(3));
}
x3D = cv::Matx31f(x3D_h.get_minor<3, 1>(0, 0)(0) / x3D_h(3), x3D_h.get_minor<3, 1>(0, 0)(1) / x3D_h(3), x3D_h.get_minor<3, 1>(0, 0)(2) / x3D_h(3));
}
}

View File

@ -20,60 +20,118 @@
#include <boost/serialization/export.hpp>
namespace ORB_SLAM3 {
namespace ORB_SLAM3
{
long unsigned int GeometricCamera::nNextId=0;
long unsigned int GeometricCamera::nNextId = 0;
cv::Point2f Pinhole::project(const cv::Point3f &p3D) {
/**
* @brief
* @param p3D
* @return
*/
cv::Point2f Pinhole::project(const cv::Point3f &p3D)
{
return cv::Point2f(mvParameters[0] * p3D.x / p3D.z + mvParameters[2],
mvParameters[1] * p3D.y / p3D.z + mvParameters[3]);
}
}
cv::Point2f Pinhole::project(const cv::Matx31f &m3D) {
return this->project(cv::Point3f(m3D(0),m3D(1),m3D(2)));
}
/**
* @brief
* @param m3D
* @return
*/
cv::Point2f Pinhole::project(const cv::Matx31f &m3D)
{
return this->project(cv::Point3f(m3D(0), m3D(1), m3D(2)));
}
cv::Point2f Pinhole::project(const cv::Mat &m3D) {
const float* p3D = m3D.ptr<float>();
/**
* @brief
* @param v3D
* @return
*/
cv::Point2f Pinhole::project(const cv::Mat &m3D)
{
const float *p3D = m3D.ptr<float>();
return this->project(cv::Point3f(p3D[0],p3D[1],p3D[2]));
}
return this->project(cv::Point3f(p3D[0], p3D[1], p3D[2]));
}
Eigen::Vector2d Pinhole::project(const Eigen::Vector3d &v3D) {
/**
* @brief
* @param p3D
* @return
*/
Eigen::Vector2d Pinhole::project(const Eigen::Vector3d &v3D)
{
Eigen::Vector2d res;
res[0] = mvParameters[0] * v3D[0] / v3D[2] + mvParameters[2];
res[1] = mvParameters[1] * v3D[1] / v3D[2] + mvParameters[3];
return res;
}
}
cv::Mat Pinhole::projectMat(const cv::Point3f &p3D) {
/**
* @brief
* @param p3D
* @return
*/
cv::Mat Pinhole::projectMat(const cv::Point3f &p3D)
{
cv::Point2f point = this->project(p3D);
return (cv::Mat_<float>(2,1) << point.x, point.y);
}
return (cv::Mat_<float>(2, 1) << point.x, point.y);
}
float Pinhole::uncertainty2(const Eigen::Matrix<double,2,1> &p2D)
{
/**
* @brief
*/
float Pinhole::uncertainty2(const Eigen::Matrix<double, 2, 1> &p2D)
{
return 1.0;
}
}
cv::Point3f Pinhole::unproject(const cv::Point2f &p2D) {
/**
* @brief
* @param p2D
* @return
*/
cv::Point3f Pinhole::unproject(const cv::Point2f &p2D)
{
return cv::Point3f((p2D.x - mvParameters[2]) / mvParameters[0], (p2D.y - mvParameters[3]) / mvParameters[1],
1.f);
}
}
cv::Mat Pinhole::unprojectMat(const cv::Point2f &p2D){
/**
* @brief
* @param p2D
* @return
*/
cv::Mat Pinhole::unprojectMat(const cv::Point2f &p2D)
{
cv::Point3f ray = this->unproject(p2D);
return (cv::Mat_<float>(3,1) << ray.x, ray.y, ray.z);
}
return (cv::Mat_<float>(3, 1) << ray.x, ray.y, ray.z);
}
cv::Matx31f Pinhole::unprojectMat_(const cv::Point2f &p2D) {
/**
* @brief
* @param p2D
* @return
*/
cv::Matx31f Pinhole::unprojectMat_(const cv::Point2f &p2D)
{
cv::Point3f ray = this->unproject(p2D);
cv::Matx31f r{ray.x, ray.y, ray.z};
return r;
}
}
cv::Mat Pinhole::projectJac(const cv::Point3f &p3D) {
/**
* @brief
* @param p3D
* @return
*/
cv::Mat Pinhole::projectJac(const cv::Point3f &p3D)
{
cv::Mat Jac(2, 3, CV_32F);
Jac.at<float>(0, 0) = mvParameters[0] / p3D.z;
Jac.at<float>(0, 1) = 0.f;
@ -83,9 +141,15 @@ namespace ORB_SLAM3 {
Jac.at<float>(1, 2) = -mvParameters[1] * p3D.y / (p3D.z * p3D.z);
return Jac;
}
}
Eigen::Matrix<double, 2, 3> Pinhole::projectJac(const Eigen::Vector3d &v3D) {
/**
* @brief
* @param v3D
* @return
*/
Eigen::Matrix<double, 2, 3> Pinhole::projectJac(const Eigen::Vector3d &v3D)
{
Eigen::Matrix<double, 2, 3> Jac;
Jac(0, 0) = mvParameters[0] / v3D[2];
Jac(0, 1) = 0.f;
@ -95,9 +159,15 @@ namespace ORB_SLAM3 {
Jac(1, 2) = -mvParameters[1] * v3D[1] / (v3D[2] * v3D[2]);
return Jac;
}
}
cv::Mat Pinhole::unprojectJac(const cv::Point2f &p2D) {
/**
* @brief
* @param p2D
* @return
*/
cv::Mat Pinhole::unprojectJac(const cv::Point2f &p2D)
{
cv::Mat Jac(3, 2, CV_32F);
Jac.at<float>(0, 0) = 1 / mvParameters[0];
Jac.at<float>(0, 1) = 0.f;
@ -107,108 +177,161 @@ namespace ORB_SLAM3 {
Jac.at<float>(2, 1) = 0.f;
return Jac;
}
}
bool Pinhole::ReconstructWithTwoViews(const std::vector<cv::KeyPoint>& vKeys1, const std::vector<cv::KeyPoint>& vKeys2, const std::vector<int> &vMatches12,
cv::Mat &R21, cv::Mat &t21, std::vector<cv::Point3f> &vP3D, std::vector<bool> &vbTriangulated){
if(!tvr){
/** 三角化恢复三维点
* @param vKeys1
* @param vKeys2
* @param vMatches12 vKeys1vKeys2
* @param R21
* @param t21
* @param vP3D
* @param vbTriangulated
*/
bool Pinhole::ReconstructWithTwoViews(const std::vector<cv::KeyPoint> &vKeys1, const std::vector<cv::KeyPoint> &vKeys2, const std::vector<int> &vMatches12,
cv::Mat &R21, cv::Mat &t21, std::vector<cv::Point3f> &vP3D, std::vector<bool> &vbTriangulated)
{
if (!tvr)
{
cv::Mat K = this->toK();
tvr = new TwoViewReconstruction(K);
}
return tvr->Reconstruct(vKeys1,vKeys2,vMatches12,R21,t21,vP3D,vbTriangulated);
}
return tvr->Reconstruct(vKeys1, vKeys2, vMatches12, R21, t21, vP3D, vbTriangulated);
}
cv::Mat Pinhole::toK() {
/**
* @brief
* @return K
*/
cv::Mat Pinhole::toK()
{
cv::Mat K = (cv::Mat_<float>(3, 3)
<< mvParameters[0], 0.f, mvParameters[2], 0.f, mvParameters[1], mvParameters[3], 0.f, 0.f, 1.f);
<< mvParameters[0],
0.f, mvParameters[2], 0.f, mvParameters[1], mvParameters[3], 0.f, 0.f, 1.f);
return K;
}
}
cv::Matx33f Pinhole::toK_() {
/**
* @brief
* @return K
*/
cv::Matx33f Pinhole::toK_()
{
cv::Matx33f K{mvParameters[0], 0.f, mvParameters[2], 0.f, mvParameters[1], mvParameters[3], 0.f, 0.f, 1.f};
return K;
}
}
bool Pinhole::epipolarConstrain(GeometricCamera* pCamera2, const cv::KeyPoint &kp1, const cv::KeyPoint &kp2, const cv::Mat &R12, const cv::Mat &t12, const float sigmaLevel, const float unc) {
/**
* @brief 线
* @param pCamera2
* @param kp1
* @param kp2
* @param R12 2->1
* @param t12 2->1
* @param sigmaLevel 1
* @param unc 2
* @return
*/
bool Pinhole::epipolarConstrain(GeometricCamera *pCamera2, const cv::KeyPoint &kp1, const cv::KeyPoint &kp2, const cv::Mat &R12, const cv::Mat &t12, const float sigmaLevel, const float unc)
{
//Compute Fundamental Matrix
cv::Mat t12x = SkewSymmetricMatrix(t12);
cv::Mat K1 = this->toK();
cv::Mat K2 = pCamera2->toK();
cv::Mat F12 = K1.t().inv()*t12x*R12*K2.inv();
cv::Mat F12 = K1.t().inv() * t12x * R12 * K2.inv();
// Epipolar line in second image l = x1'F12 = [a b c]
const float a = kp1.pt.x*F12.at<float>(0,0)+kp1.pt.y*F12.at<float>(1,0)+F12.at<float>(2,0);
const float b = kp1.pt.x*F12.at<float>(0,1)+kp1.pt.y*F12.at<float>(1,1)+F12.at<float>(2,1);
const float c = kp1.pt.x*F12.at<float>(0,2)+kp1.pt.y*F12.at<float>(1,2)+F12.at<float>(2,2);
const float a = kp1.pt.x * F12.at<float>(0, 0) + kp1.pt.y * F12.at<float>(1, 0) + F12.at<float>(2, 0);
const float b = kp1.pt.x * F12.at<float>(0, 1) + kp1.pt.y * F12.at<float>(1, 1) + F12.at<float>(2, 1);
const float c = kp1.pt.x * F12.at<float>(0, 2) + kp1.pt.y * F12.at<float>(1, 2) + F12.at<float>(2, 2);
const float num = a*kp2.pt.x+b*kp2.pt.y+c;
const float num = a * kp2.pt.x + b * kp2.pt.y + c;
const float den = a*a+b*b;
const float den = a * a + b * b;
if(den==0)
if (den == 0)
return false;
const float dsqr = num*num/den;
const float dsqr = num * num / den;
return dsqr<3.84*unc;
}
return dsqr < 3.84 * unc;
}
bool Pinhole::epipolarConstrain_(GeometricCamera *pCamera2, const cv::KeyPoint &kp1, const cv::KeyPoint &kp2, const cv::Matx33f &R12, const cv::Matx31f &t12, const float sigmaLevel, const float unc) {
/**
* @brief 线
* @param pCamera2
* @param kp1
* @param kp2
* @param R12 2->1
* @param t12 2->1
* @param sigmaLevel 1
* @param unc 2
* @return
*/
bool Pinhole::epipolarConstrain_(GeometricCamera *pCamera2, const cv::KeyPoint &kp1, const cv::KeyPoint &kp2, const cv::Matx33f &R12, const cv::Matx31f &t12, const float sigmaLevel, const float unc)
{
//Compute Fundamental Matrix
auto t12x = SkewSymmetricMatrix_(t12);
auto K1 = this->toK_();
auto K2 = pCamera2->toK_();
cv::Matx33f F12 = K1.t().inv()*t12x*R12*K2.inv();
cv::Matx33f F12 = K1.t().inv() * t12x * R12 * K2.inv();
// Epipolar line in second image l = x1'F12 = [a b c]
const float a = kp1.pt.x*F12(0,0)+kp1.pt.y*F12(1,0)+F12(2,0);
const float b = kp1.pt.x*F12(0,1)+kp1.pt.y*F12(1,1)+F12(2,1);
const float c = kp1.pt.x*F12(0,2)+kp1.pt.y*F12(1,2)+F12(2,2);
const float a = kp1.pt.x * F12(0, 0) + kp1.pt.y * F12(1, 0) + F12(2, 0);
const float b = kp1.pt.x * F12(0, 1) + kp1.pt.y * F12(1, 1) + F12(2, 1);
const float c = kp1.pt.x * F12(0, 2) + kp1.pt.y * F12(1, 2) + F12(2, 2);
const float num = a*kp2.pt.x+b*kp2.pt.y+c;
const float num = a * kp2.pt.x + b * kp2.pt.y + c;
const float den = a*a+b*b;
const float den = a * a + b * b;
if(den==0)
if (den == 0)
return false;
const float dsqr = num*num/den;
const float dsqr = num * num / den;
return dsqr<3.84*unc;
}
return dsqr < 3.84 * unc;
}
std::ostream & operator<<(std::ostream &os, const Pinhole &ph) {
std::ostream &operator<<(std::ostream &os, const Pinhole &ph)
{
os << ph.mvParameters[0] << " " << ph.mvParameters[1] << " " << ph.mvParameters[2] << " " << ph.mvParameters[3];
return os;
}
}
std::istream & operator>>(std::istream &is, Pinhole &ph) {
std::istream &operator>>(std::istream &is, Pinhole &ph)
{
float nextParam;
for(size_t i = 0; i < 4; i++){
for (size_t i = 0; i < 4; i++)
{
assert(is.good()); //Make sure the input stream is good
is >> nextParam;
ph.mvParameters[i] = nextParam;
}
return is;
}
}
cv::Mat Pinhole::SkewSymmetricMatrix(const cv::Mat &v)
{
return (cv::Mat_<float>(3,3) << 0, -v.at<float>(2), v.at<float>(1),
v.at<float>(2), 0,-v.at<float>(0),
/**
* @brief
*/
cv::Mat Pinhole::SkewSymmetricMatrix(const cv::Mat &v)
{
return (cv::Mat_<float>(3, 3) << 0, -v.at<float>(2), v.at<float>(1),
v.at<float>(2), 0, -v.at<float>(0),
-v.at<float>(1), v.at<float>(0), 0);
}
}
cv::Matx33f Pinhole::SkewSymmetricMatrix_(const cv::Matx31f &v)
{
/**
* @brief
*/
cv::Matx33f Pinhole::SkewSymmetricMatrix_(const cv::Matx31f &v)
{
cv::Matx33f skew{0.f, -v(2), v(1),
v(2), 0.f, -v(0),
-v(1), v(0), 0.f};
return skew;
}
}
}

View File

@ -922,8 +922,13 @@ Eigen::Matrix3d Skew(const Eigen::Vector3d &w)
return W;
}
// BUG 应该改成svd.matrixV().transpose()
Eigen::Matrix3d NormalizeRotation(const Eigen::Matrix3d &R)
{
// 这里关注一下
// 1. 对于行列数一样的矩阵Eigen::ComputeThinU | Eigen::ComputeThinV 与 Eigen::ComputeFullU | Eigen::ComputeFullV 一样
// 2. 对于行列数不同的矩阵例如3*4 或者 4*3 矩阵只有3个奇异向量计算的时候如果是Thin 那么得出的UV矩阵列数只能是3如果是full那么就是4
// 3. thin会损失一部分数据但是会加快计算对于大型矩阵解算方程时可以用thin加速得到结果
Eigen::JacobiSVD<Eigen::Matrix3d> svd(R,Eigen::ComputeFullU | Eigen::ComputeFullV);
return svd.matrixU()*svd.matrixV();
}