更新标注

master
laobai 2022-05-21 23:56:11 +08:00
parent 2fd8754e44
commit 0b0f2d49ed
6 changed files with 146 additions and 108 deletions

View File

@ -294,6 +294,7 @@ public:
void Update(const double *pu)
{
// 强行优化不可观的数据,会导致不收敛
Rwg = Rwg * ExpSO3(pu[0], pu[1], 0.0);
}
@ -436,6 +437,7 @@ public:
}
// 由2*2的像素点信息矩阵变成了9*9的关于旋转平移与三维点坐标的信息矩阵
// 可以理解为像素的不确定性给旋转平移跟三维点带来了多大的不确定性
Eigen::Matrix<double, 9, 9> GetHessian()
{
linearizeOplus();

View File

@ -1,20 +1,20 @@
/**
* This file is part of ORB-SLAM3
*
* Copyright (C) 2017-2021 Carlos Campos, Richard Elvira, Juan J. Gómez Rodríguez, José M.M. Montiel and Juan D. Tardós, University of Zaragoza.
* Copyright (C) 2014-2016 Raúl Mur-Artal, José M.M. Montiel and Juan D. Tardós, University of Zaragoza.
*
* ORB-SLAM3 is free software: you can redistribute it and/or modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* ORB-SLAM3 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
* the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with ORB-SLAM3.
* If not, see <http://www.gnu.org/licenses/>.
*/
* This file is part of ORB-SLAM3
*
* Copyright (C) 2017-2021 Carlos Campos, Richard Elvira, Juan J. Gómez Rodríguez, José M.M. Montiel and Juan D. Tardós, University of Zaragoza.
* Copyright (C) 2014-2016 Raúl Mur-Artal, José M.M. Montiel and Juan D. Tardós, University of Zaragoza.
*
* ORB-SLAM3 is free software: you can redistribute it and/or modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* ORB-SLAM3 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
* the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with ORB-SLAM3.
* If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef ORB_SLAM3_OPTIMIZABLETYPES_H
#define ORB_SLAM3_OPTIMIZABLETYPES_H
@ -26,194 +26,214 @@
#include <Eigen/Geometry>
#include <include/CameraModels/GeometricCamera.h>
namespace ORB_SLAM3 {
class EdgeSE3ProjectXYZOnlyPose: public g2o::BaseUnaryEdge<2, Eigen::Vector2d, g2o::VertexSE3Expmap>{
namespace ORB_SLAM3
{
// 左目纯位姿优化的边,左目点的重投影误差相对于左目位姿
class EdgeSE3ProjectXYZOnlyPose : public g2o::BaseUnaryEdge<2, Eigen::Vector2d, g2o::VertexSE3Expmap>
{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
EdgeSE3ProjectXYZOnlyPose(){}
EdgeSE3ProjectXYZOnlyPose() {}
bool read(std::istream& is);
bool read(std::istream &is);
bool write(std::ostream& os) const;
bool write(std::ostream &os) const;
void computeError() {
const g2o::VertexSE3Expmap* v1 = static_cast<const g2o::VertexSE3Expmap*>(_vertices[0]);
void computeError()
{
const g2o::VertexSE3Expmap *v1 = static_cast<const g2o::VertexSE3Expmap *>(_vertices[0]);
Eigen::Vector2d obs(_measurement);
_error = obs-pCamera->project(v1->estimate().map(Xw));
_error = obs - pCamera->project(v1->estimate().map(Xw));
}
bool isDepthPositive() {
const g2o::VertexSE3Expmap* v1 = static_cast<const g2o::VertexSE3Expmap*>(_vertices[0]);
return (v1->estimate().map(Xw))(2)>0.0;
bool isDepthPositive()
{
const g2o::VertexSE3Expmap *v1 = static_cast<const g2o::VertexSE3Expmap *>(_vertices[0]);
return (v1->estimate().map(Xw))(2) > 0.0;
}
virtual void linearizeOplus();
Eigen::Vector3d Xw;
GeometricCamera* pCamera;
GeometricCamera *pCamera;
};
class EdgeSE3ProjectXYZOnlyPoseToBody: public g2o::BaseUnaryEdge<2, Eigen::Vector2d, g2o::VertexSE3Expmap>{
// 两个相机中的右目上的重投影误差与左目位姿的边
class EdgeSE3ProjectXYZOnlyPoseToBody : public g2o::BaseUnaryEdge<2, Eigen::Vector2d, g2o::VertexSE3Expmap>
{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
EdgeSE3ProjectXYZOnlyPoseToBody(){}
EdgeSE3ProjectXYZOnlyPoseToBody() {}
bool read(std::istream& is);
bool read(std::istream &is);
bool write(std::ostream& os) const;
bool write(std::ostream &os) const;
void computeError() {
const g2o::VertexSE3Expmap* v1 = static_cast<const g2o::VertexSE3Expmap*>(_vertices[0]);
void computeError()
{
const g2o::VertexSE3Expmap *v1 = static_cast<const g2o::VertexSE3Expmap *>(_vertices[0]);
Eigen::Vector2d obs(_measurement);
_error = obs-pCamera->project((mTrl * v1->estimate()).map(Xw));
_error = obs - pCamera->project((mTrl * v1->estimate()).map(Xw));
}
bool isDepthPositive() {
const g2o::VertexSE3Expmap* v1 = static_cast<const g2o::VertexSE3Expmap*>(_vertices[0]);
return ((mTrl * v1->estimate()).map(Xw))(2)>0.0;
bool isDepthPositive()
{
const g2o::VertexSE3Expmap *v1 = static_cast<const g2o::VertexSE3Expmap *>(_vertices[0]);
return ((mTrl * v1->estimate()).map(Xw))(2) > 0.0;
}
virtual void linearizeOplus();
Eigen::Vector3d Xw;
GeometricCamera* pCamera;
GeometricCamera *pCamera;
g2o::SE3Quat mTrl;
};
class EdgeSE3ProjectXYZ: public g2o::BaseBinaryEdge<2, Eigen::Vector2d, g2o::VertexSBAPointXYZ, g2o::VertexSE3Expmap>{
// 左目纯位姿优化的边,左目点的重投影误差相对于左目位姿以及三维点
class EdgeSE3ProjectXYZ : public g2o::BaseBinaryEdge<2, Eigen::Vector2d, g2o::VertexSBAPointXYZ, g2o::VertexSE3Expmap>
{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
EdgeSE3ProjectXYZ();
bool read(std::istream& is);
bool read(std::istream &is);
bool write(std::ostream& os) const;
bool write(std::ostream &os) const;
void computeError() {
const g2o::VertexSE3Expmap* v1 = static_cast<const g2o::VertexSE3Expmap*>(_vertices[1]);
const g2o::VertexSBAPointXYZ* v2 = static_cast<const g2o::VertexSBAPointXYZ*>(_vertices[0]);
void computeError()
{
const g2o::VertexSE3Expmap *v1 = static_cast<const g2o::VertexSE3Expmap *>(_vertices[1]);
const g2o::VertexSBAPointXYZ *v2 = static_cast<const g2o::VertexSBAPointXYZ *>(_vertices[0]);
Eigen::Vector2d obs(_measurement);
_error = obs-pCamera->project(v1->estimate().map(v2->estimate()));
_error = obs - pCamera->project(v1->estimate().map(v2->estimate()));
}
bool isDepthPositive() {
const g2o::VertexSE3Expmap* v1 = static_cast<const g2o::VertexSE3Expmap*>(_vertices[1]);
const g2o::VertexSBAPointXYZ* v2 = static_cast<const g2o::VertexSBAPointXYZ*>(_vertices[0]);
return ((v1->estimate().map(v2->estimate()))(2)>0.0);
bool isDepthPositive()
{
const g2o::VertexSE3Expmap *v1 = static_cast<const g2o::VertexSE3Expmap *>(_vertices[1]);
const g2o::VertexSBAPointXYZ *v2 = static_cast<const g2o::VertexSBAPointXYZ *>(_vertices[0]);
return ((v1->estimate().map(v2->estimate()))(2) > 0.0);
}
virtual void linearizeOplus();
GeometricCamera* pCamera;
GeometricCamera *pCamera;
};
class EdgeSE3ProjectXYZToBody: public g2o::BaseBinaryEdge<2, Eigen::Vector2d, g2o::VertexSBAPointXYZ, g2o::VertexSE3Expmap>{
// 两个相机中的右目上的重投影误差与左目位姿以及三维点的边
class EdgeSE3ProjectXYZToBody : public g2o::BaseBinaryEdge<2, Eigen::Vector2d, g2o::VertexSBAPointXYZ, g2o::VertexSE3Expmap>
{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
EdgeSE3ProjectXYZToBody();
bool read(std::istream& is);
bool read(std::istream &is);
bool write(std::ostream& os) const;
bool write(std::ostream &os) const;
void computeError() {
const g2o::VertexSE3Expmap* v1 = static_cast<const g2o::VertexSE3Expmap*>(_vertices[1]);
const g2o::VertexSBAPointXYZ* v2 = static_cast<const g2o::VertexSBAPointXYZ*>(_vertices[0]);
void computeError()
{
const g2o::VertexSE3Expmap *v1 = static_cast<const g2o::VertexSE3Expmap *>(_vertices[1]);
const g2o::VertexSBAPointXYZ *v2 = static_cast<const g2o::VertexSBAPointXYZ *>(_vertices[0]);
Eigen::Vector2d obs(_measurement);
_error = obs-pCamera->project((mTrl * v1->estimate()).map(v2->estimate()));
_error = obs - pCamera->project((mTrl * v1->estimate()).map(v2->estimate()));
}
bool isDepthPositive() {
const g2o::VertexSE3Expmap* v1 = static_cast<const g2o::VertexSE3Expmap*>(_vertices[1]);
const g2o::VertexSBAPointXYZ* v2 = static_cast<const g2o::VertexSBAPointXYZ*>(_vertices[0]);
return ((mTrl * v1->estimate()).map(v2->estimate()))(2)>0.0;
bool isDepthPositive()
{
const g2o::VertexSE3Expmap *v1 = static_cast<const g2o::VertexSE3Expmap *>(_vertices[1]);
const g2o::VertexSBAPointXYZ *v2 = static_cast<const g2o::VertexSBAPointXYZ *>(_vertices[0]);
return ((mTrl * v1->estimate()).map(v2->estimate()))(2) > 0.0;
}
virtual void linearizeOplus();
GeometricCamera* pCamera;
GeometricCamera *pCamera;
g2o::SE3Quat mTrl;
};
// sim3节点
class VertexSim3Expmap : public g2o::BaseVertex<7, g2o::Sim3>
{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
VertexSim3Expmap();
virtual bool read(std::istream& is);
virtual bool write(std::ostream& os) const;
virtual bool read(std::istream &is);
virtual bool write(std::ostream &os) const;
virtual void setToOriginImpl() {
// 原始值
virtual void setToOriginImpl()
{
_estimate = g2o::Sim3();
}
virtual void oplusImpl(const double* update_)
// 更新
virtual void oplusImpl(const double *update_)
{
Eigen::Map<g2o::Vector7d> update(const_cast<double*>(update_));
Eigen::Map<g2o::Vector7d> update(const_cast<double *>(update_));
if (_fix_scale)
update[6] = 0;
g2o::Sim3 s(update);
setEstimate(s*estimate());
setEstimate(s * estimate());
}
GeometricCamera* pCamera1, *pCamera2;
GeometricCamera *pCamera1, *pCamera2;
bool _fix_scale;
};
class EdgeSim3ProjectXYZ : public g2o::BaseBinaryEdge<2, Eigen::Vector2d, g2o::VertexSBAPointXYZ, ORB_SLAM3::VertexSim3Expmap>
// sim3边
class EdgeSim3ProjectXYZ : public g2o::BaseBinaryEdge<2, Eigen::Vector2d, g2o::VertexSBAPointXYZ, ORB_SLAM3::VertexSim3Expmap>
{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
EdgeSim3ProjectXYZ();
virtual bool read(std::istream& is);
virtual bool write(std::ostream& os) const;
virtual bool read(std::istream &is);
virtual bool write(std::ostream &os) const;
void computeError()
{
const ORB_SLAM3::VertexSim3Expmap* v1 = static_cast<const ORB_SLAM3::VertexSim3Expmap*>(_vertices[1]);
const g2o::VertexSBAPointXYZ* v2 = static_cast<const g2o::VertexSBAPointXYZ*>(_vertices[0]);
const ORB_SLAM3::VertexSim3Expmap *v1 = static_cast<const ORB_SLAM3::VertexSim3Expmap *>(_vertices[1]);
const g2o::VertexSBAPointXYZ *v2 = static_cast<const g2o::VertexSBAPointXYZ *>(_vertices[0]);
Eigen::Vector2d obs(_measurement);
_error = obs-v1->pCamera1->project(v1->estimate().map(v2->estimate()));
_error = obs - v1->pCamera1->project(v1->estimate().map(v2->estimate()));
}
// 自动求导没错g2o也有自动求导
// virtual void linearizeOplus();
};
class EdgeInverseSim3ProjectXYZ : public g2o::BaseBinaryEdge<2, Eigen::Vector2d, g2o::VertexSBAPointXYZ, VertexSim3Expmap>
// sim3反投的边
class EdgeInverseSim3ProjectXYZ : public g2o::BaseBinaryEdge<2, Eigen::Vector2d, g2o::VertexSBAPointXYZ, VertexSim3Expmap>
{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
EdgeInverseSim3ProjectXYZ();
virtual bool read(std::istream& is);
virtual bool write(std::ostream& os) const;
virtual bool read(std::istream &is);
virtual bool write(std::ostream &os) const;
void computeError()
{
const ORB_SLAM3::VertexSim3Expmap* v1 = static_cast<const ORB_SLAM3::VertexSim3Expmap*>(_vertices[1]);
const g2o::VertexSBAPointXYZ* v2 = static_cast<const g2o::VertexSBAPointXYZ*>(_vertices[0]);
const ORB_SLAM3::VertexSim3Expmap *v1 = static_cast<const ORB_SLAM3::VertexSim3Expmap *>(_vertices[1]);
const g2o::VertexSBAPointXYZ *v2 = static_cast<const g2o::VertexSBAPointXYZ *>(_vertices[0]);
Eigen::Vector2d obs(_measurement);
_error = obs-v1->pCamera2->project((v1->estimate().inverse().map(v2->estimate())));
_error = obs - v1->pCamera2->project((v1->estimate().inverse().map(v2->estimate())));
}
// 自动求导没错g2o也有自动求导
// virtual void linearizeOplus();
};
}
#endif //ORB_SLAM3_OPTIMIZABLETYPES_H
#endif // ORB_SLAM3_OPTIMIZABLETYPES_H

View File

@ -225,6 +225,7 @@ void ImuCamPose::Update(const double *pu)
ut << pu[3], pu[4], pu[5];
// Update body pose
// 更新的是imu位姿
twb += Rwb * ut;
Rwb = Rwb * ExpSO3(ur);
@ -398,6 +399,11 @@ void EdgeMono::linearizeOplus()
const Eigen::Matrix3d &Rcb = VPose->estimate().Rcb[cam_idx];
const Eigen::Matrix<double,2,3> proj_jac = VPose->estimate().pCamera[cam_idx]->projectJac(Xc);
// 误差 = 观测-预测
// -1 误差相对于预测的
// proj_jac 像素相对于相机坐标系下的三维点的雅克比
// Rcw 相机坐标系下的三维点 相对于世界坐标系下的三维点雅克比
_jacobianOplusXi = -proj_jac * Rcw;
Eigen::Matrix<double,3,6> SE3deriv;
@ -409,7 +415,16 @@ void EdgeMono::linearizeOplus()
-z , 0.0, x, 0.0, 1.0, 0.0,
y , -x, 0.0, 0.0, 0.0, 1.0;
_jacobianOplusXj = proj_jac * Rcb * SE3deriv; // TODO optimize this product
// proj_jac 像素相对于相机坐标系下的三维点的雅克比
// Rcb 相机坐标系下的三维点 相对于imu坐标系下的三维点雅克比
// SE3deriv imu坐标系下的三维点 相对于 imu rt的雅克比
// Rwb.t() * (Pw - twb) = Pb
// 求Pb对于Rwb与twb的雅克比使用扰动的方式 Rwb -> Rwb * EXP(φ) twb -> twb + Rwb * δt
// 先带入Rwb (Rwb * EXP(φ)).t() * (Pw - twb) - Rwb.t() * (Pw - twb)
// 打开后算得imu坐标系下的三维点 相对于 imu r的雅克比为Pb^
// 同理带入t可得imu坐标系下的三维点 相对于 imu t的雅克比为 -I
// 差了个负号因为与proj_jac负号相抵因此是正确的
_jacobianOplusXj = proj_jac * Rcb * SE3deriv; // symbol different becasue of update mode
}
/**

View File

@ -236,7 +236,7 @@ void LocalMapping::Run()
if(!mpCurrentKeyFrame->GetMap()->isImuInitialized() && mbInertial)
{
// 在函数InitializeIMU里设置IMU成功初始化标志 SetImuInitialized
// IMU第一阶段初始化
// IMU第一初始化
if (mbMonocular)
InitializeIMU(1e2, 1e10, true);
else
@ -259,7 +259,7 @@ void LocalMapping::Run()
// Step 9 如果距离IMU第一阶段初始化成功累计时间差小于100s进行VIBA
if ((mTinit<50.0f) && mbInertial)
{
// Step 9.1 根据条件判断是否进行VIBA1IMU第二阶段初始化)
// Step 9.1 根据条件判断是否进行VIBA1IMU第二初始化)
// 条件1、当前关键帧所在的地图还未完成IMU初始化---并且--------2、正常跟踪状态----------
if(mpCurrentKeyFrame->GetMap()->isImuInitialized() && mpTracker->mState==Tracking::OK) // Enter here everytime local-mapping is called
{
@ -278,7 +278,7 @@ void LocalMapping::Run()
cout << "end VIBA 1" << endl;
}
}
// Step 9.2 根据条件判断是否进行VIBA2IMU第三阶段初始化)
// Step 9.2 根据条件判断是否进行VIBA2IMU第三初始化)
// 当前关键帧所在的地图还未完成VIBA 2
else if(!mpCurrentKeyFrame->GetMap()->GetIniertialBA2()){
if (mTinit>15.0f){
@ -1605,20 +1605,20 @@ void LocalMapping::InitializeIMU(float priorG, float priorA, bool bFIBA)
}
// dirG = sV1 - sVn + n*Rwg*g*t
// 归一化
// 归一化,约等于重力在世界坐标系下的方向
dirG = dirG/dirG.norm();
// 原本的重力方向
Eigen::Vector3f gI(0.0f, 0.0f, -1.0f);
// 求速度方向与重力方向的角轴
// 求重力在世界坐标系下的方向与重力在重力坐标系下的方向的叉乘
Eigen::Vector3f v = gI.cross(dirG);
// 求角轴模长
// 求叉乘模长
const float nv = v.norm();
// 求转角大小
const float cosg = gI.dot(dirG);
const float ang = acos(cosg);
// 先计算旋转向量,在除去角轴大小
// v/nv 表示垂直于两个向量的轴 ang 表示转的角度,组成角轴
Eigen::Vector3f vzg = v*ang/nv;
// 获得重力方向到当前速度方向的旋转向量
// 获得重力坐标系到世界坐标系的旋转矩阵的初值
Rwg = Sophus::SO3f::exp(vzg).matrix();
mRwg = Rwg.cast<double>();
mTinit = mpCurrentKeyFrame->mTimeStamp-mFirstTs;
@ -1691,7 +1691,7 @@ void LocalMapping::InitializeIMU(float priorG, float priorA, bool bFIBA)
if (bFIBA)
{
// 5. 承接上一步纯imu优化按照之前的结果更新了尺度信息及适应重力方向所以要结合地图进行一次视觉加imu的全局优化这次带了MP等信息
// 1.0版本里面不直接赋值了,而是将所有优化后的信息保存到变量里面
// ! 1.0版本里面不直接赋值了,而是将所有优化后的信息保存到变量里面
if (priorA!=0.f)
Optimizer::FullInertialBA(mpAtlas->GetCurrentMap(), 100, false, mpCurrentKeyFrame->mnId, NULL, true, priorG, priorA);
else
@ -1708,7 +1708,8 @@ void LocalMapping::InitializeIMU(float priorG, float priorA, bool bFIBA)
unsigned long GBAid = mpCurrentKeyFrame->mnId;
// Process keyframes in the queue
// 6. 处理一下新来的关键帧,这些关键帧没有参与优化
// 6. 处理一下新来的关键帧这些关键帧没有参与优化但是这部分bInitializing为true只在第2次跟第3次初始化会有新的关键帧进来
// 这部分关键帧也需要被更新
while(CheckNewKeyFrames())
{
ProcessNewKeyFrame();
@ -1718,7 +1719,7 @@ void LocalMapping::InitializeIMU(float priorG, float priorA, bool bFIBA)
// Correct keyframes starting at map first keyframe
// 7. 更新位姿与三维点
// 获取地图中初始关键帧
// 获取地图中初始关键帧,第一帧肯定经过修正的
list<KeyFrame*> lpKFtoCheck(mpAtlas->GetCurrentMap()->mvpKeyFrameOrigins.begin(),mpAtlas->GetCurrentMap()->mvpKeyFrameOrigins.end());
// 初始就一个关键帧,顺藤摸瓜找到父子相连的所有关键帧

View File

@ -189,9 +189,9 @@ void EdgeSE3ProjectXYZ::linearizeOplus()
_jacobianOplusXi = projectJac * T.rotation().toRotationMatrix();
Eigen::Matrix<double, 3, 6> SE3deriv;
SE3deriv << 0.f, z, -y, 1.f, 0.f, 0.f,
-z, 0.f, x, 0.f, 1.f, 0.f,
y, -x, 0.f, 0.f, 0.f, 1.f;
SE3deriv << 0.f, z, -y, 1.f, 0.f, 0.f,
-z, 0.f, x, 0.f, 1.f, 0.f,
y, -x, 0.f, 0.f, 0.f, 1.f;
_jacobianOplusXj = projectJac * SE3deriv;
}
@ -252,9 +252,9 @@ void EdgeSE3ProjectXYZToBody::linearizeOplus()
double z = X_l[2];
Eigen::Matrix<double, 3, 6> SE3deriv;
SE3deriv << 0.f, z, -y, 1.f, 0.f, 0.f,
-z, 0.f, x, 0.f, 1.f, 0.f,
y, -x, 0.f, 0.f, 0.f, 1.f;
SE3deriv << 0.f, z, -y, 1.f, 0.f, 0.f,
-z, 0.f, x, 0.f, 1.f, 0.f,
y, -x, 0.f, 0.f, 0.f, 1.f;
/*
ρlw != tlw 使Pl = Rlw*Pw + tlw
Pl = EXP(ξlw)*Pw Pr = Rrl * EXP(ξlw) * Pw + trl

View File

@ -4777,7 +4777,7 @@ void Tracking::UpdateFrameIMU(const float s, const IMU::Bias &b, KeyFrame* pCurr
// mlRelativeFramePoses 存放的是Tcr
// 三个变量一一对应
// 这部分操作貌似没用
// mlRelativeFramePoses用于输出位姿因此初始化之前里面数据没有尺度所以要更新下尺度
for(auto lit=mlRelativeFramePoses.begin(),lend=mlRelativeFramePoses.end();lit!=lend;lit++, lRit++, lbL++)
{
if(*lbL)