From d10c6785584a22c45f8c358b0684ee3e5694eba1 Mon Sep 17 00:00:00 2001 From: electech6 Date: Fri, 3 Sep 2021 14:24:01 +0800 Subject: [PATCH] update --- .../mono_inertial_tum_vi.cc | 9 +- Examples/euroc_eval_examples.sh | 36 + Examples/euroc_examples.sh | 182 +++ Examples/tum_vi_eval_examples.sh | 11 + Examples/tum_vi_examples.sh | 240 ++++ include/Atlas.h | 4 +- include/Config.h | 6 + include/G2oTypes.h | 10 +- include/ImuTypes.h | 2 +- include/System.h | 2 + include/Tracking.h | 4 +- src/Atlas.cc | 16 +- src/Frame.cc | 8 +- src/G2oTypes.cc | 130 ++- src/ImuTypes.cc | 203 +++- src/KeyFrameDatabase.cc | 80 +- src/LocalMapping.cc | 193 +++- src/LoopClosing.cc | 586 ++++++++-- src/MLPnPsolver.cpp | 1021 ++++++++++++----- src/MapPoint.cc | 84 +- src/ORBextractor.cc | 2 +- src/OptimizableTypes.cpp | 31 +- src/Optimizer.cc | 700 ++++++++++- src/System.cc | 9 +- src/Tracking.cc | 635 ++++++++-- src/TwoViewReconstruction.cc | 987 ++++++++++------ 26 files changed, 4184 insertions(+), 1007 deletions(-) create mode 100644 Examples/euroc_eval_examples.sh create mode 100644 Examples/euroc_examples.sh create mode 100644 Examples/tum_vi_eval_examples.sh create mode 100644 Examples/tum_vi_examples.sh create mode 100644 include/Config.h diff --git a/Examples/Monocular-Inertial/mono_inertial_tum_vi.cc b/Examples/Monocular-Inertial/mono_inertial_tum_vi.cc index 30a313a..17f90d7 100644 --- a/Examples/Monocular-Inertial/mono_inertial_tum_vi.cc +++ b/Examples/Monocular-Inertial/mono_inertial_tum_vi.cc @@ -64,9 +64,9 @@ int main(int argc, char **argv) vector< vector > vTimestampsCam; //图像时间戳 vector< vector > vAcc, vGyro; //加速度计,陀螺仪 vector< vector > vTimestampsImu; //IMU时间戳 - vector nImages; + vector nImages; //图像序列 vector nImu; - vector first_imu(num_seq,0); + vector first_imu(num_seq,0); //记录和第一帧图像时间戳最接近的imu时间戳索引 vstrImageFilenames.resize(num_seq); vTimestampsCam.resize(num_seq); @@ -168,13 +168,16 @@ int main(int argc, char **argv) { // cout << "t_cam " << tframe << endl; // Step 6 把上一图像帧和当前图像帧之间的imu信息存储在vImuMeas里 - // 注意第一个图像帧没有对应的imu数据 //?是否存在一帧,因为之前是从最接近图像第一帧的imu算起,可能无效 + // 注意第一个图像帧没有对应的imu数据 + // seq: 数据集序列索引;first_imu[seq]:当前数据集中和当前帧图像最接近的imu时间戳索引;ni:图像的索引 while(vTimestampsImu[seq][first_imu[seq]]<=vTimestampsCam[seq][ni]) { + // 存储IMU的加速度计信息vAcc、陀螺仪信息vGyro、时间戳信息vTimestampsImu vImuMeas.push_back(ORB_SLAM3::IMU::Point(vAcc[seq][first_imu[seq]].x,vAcc[seq][first_imu[seq]].y,vAcc[seq][first_imu[seq]].z, vGyro[seq][first_imu[seq]].x,vGyro[seq][first_imu[seq]].y,vGyro[seq][first_imu[seq]].z, vTimestampsImu[seq][first_imu[seq]])); // cout << "t_imu = " << fixed << vImuMeas.back().t << endl; + // 更新 first_imu[seq]++; } } diff --git a/Examples/euroc_eval_examples.sh b/Examples/euroc_eval_examples.sh new file mode 100644 index 0000000..67b9a4c --- /dev/null +++ b/Examples/euroc_eval_examples.sh @@ -0,0 +1,36 @@ +#!/bin/bash +pathDatasetEuroc='/Datasets/EuRoC' #Example, it is necesary to change it by the dataset path + +# Single Session Example (Pure visual) +echo "Launching MH01 with Stereo sensor" +./Stereo/stereo_euroc ../Vocabulary/ORBvoc.txt ./Stereo/EuRoC.yaml "$pathDatasetEuroc"/MH01 ./Stereo/EuRoC_TimeStamps/MH01.txt dataset-MH01_stereo +echo "------------------------------------" +echo "Evaluation of MH01 trajectory with Stereo sensor" +python ../evaluation/evaluate_ate_scale.py ../evaluation/Ground_truth/EuRoC_left_cam/MH01_GT.txt f_dataset-MH01_stereo.txt --plot MH01_stereo.pdf + + + +# MultiSession Example (Pure visual) +echo "Launching Machine Hall with Stereo sensor" +./Stereo/stereo_euroc ../Vocabulary/ORBvoc.txt ./Stereo/EuRoC.yaml "$pathDatasetEuroc"/MH01 ./Stereo/EuRoC_TimeStamps/MH01.txt "$pathDatasetEuroc"/MH02 ./Stereo/EuRoC_TimeStamps/MH02.txt "$pathDatasetEuroc"/MH03 ./Stereo/EuRoC_TimeStamps/MH03.txt "$pathDatasetEuroc"/MH04 ./Stereo/EuRoC_TimeStamps/MH04.txt "$pathDatasetEuroc"/MH05 ./Stereo/EuRoC_TimeStamps/MH05.txt dataset-MH01_to_MH05_stereo +echo "------------------------------------" +echo "Evaluation of MAchine Hall trajectory with Stereo sensor" +python ../evaluation/evaluate_ate_scale.py ../evaluation/Ground_truth/EuRoC_left_cam/MH_GT.txt f_dataset-MH01_to_MH05_stereo.txt --plot MH01_to_MH05_stereo.pdf + + +# Single Session Example (Visual-Inertial) +echo "Launching V102 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_euroc ../Vocabulary/ORBvoc.txt ./Monocular-Inertial/EuRoC.yaml "$pathDatasetEuroc"/V102 ./Monocular-Inertial/EuRoC_TimeStamps/V102.txt dataset-V102_monoi +echo "------------------------------------" +echo "Evaluation of V102 trajectory with Monocular-Inertial sensor" +python ../evaluation/evaluate_ate_scale.py "$pathDatasetEuroc"/V102/mav0/state_groundtruth_estimate0/data.csv f_dataset-V102_monoi.txt --plot V102_monoi.pdf + + +# MultiSession Monocular Examples + +echo "Launching Vicon Room 2 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_euroc ../Vocabulary/ORBvoc.txt ./Monocular-Inertial/EuRoC.yaml "$pathDatasetEuroc"/V201 ./Monocular-Inertial/EuRoC_TimeStamps/V201.txt "$pathDatasetEuroc"/V202 ./Monocular-Inertial/EuRoC_TimeStamps/V202.txt "$pathDatasetEuroc"/V203 ./Monocular-Inertial/EuRoC_TimeStamps/V203.txt dataset-V201_to_V203_monoi +echo "------------------------------------" +echo "Evaluation of Vicon Room 2 trajectory with Stereo sensor" +python ../evaluation/evaluate_ate_scale.py ../evaluation/Ground_truth/EuRoC_imu/V2_GT.txt f_dataset-V201_to_V203_monoi.txt --plot V201_to_V203_monoi.pdf + diff --git a/Examples/euroc_examples.sh b/Examples/euroc_examples.sh new file mode 100644 index 0000000..2e27cfb --- /dev/null +++ b/Examples/euroc_examples.sh @@ -0,0 +1,182 @@ +#!/bin/bash +pathDatasetEuroc='/Datasets/EuRoC' #Example, it is necesary to change it by the dataset path + +#------------------------------------ +# Monocular Examples +echo "Launching MH01 with Monocular sensor" +./Monocular/mono_euroc ../Vocabulary/ORBvoc.txt ./Monocular/EuRoC.yaml "$pathDatasetEuroc"/MH01 ./Monocular/EuRoC_TimeStamps/MH01.txt dataset-MH01_mono + +echo "Launching MH02 with Monocular sensor" +./Monocular/mono_euroc ../Vocabulary/ORBvoc.txt ./Monocular/EuRoC.yaml "$pathDatasetEuroc"/MH02 ./Monocular/EuRoC_TimeStamps/MH02.txt dataset-MH02_mono + +echo "Launching MH03 with Monocular sensor" +./Monocular/mono_euroc ../Vocabulary/ORBvoc.txt ./Monocular/EuRoC.yaml "$pathDatasetEuroc"/MH03 ./Monocular/EuRoC_TimeStamps/MH03.txt dataset-MH03_mono + +echo "Launching MH04 with Monocular sensor" +./Monocular/mono_euroc ../Vocabulary/ORBvoc.txt ./Monocular/EuRoC.yaml "$pathDatasetEuroc"/MH04 ./Monocular/EuRoC_TimeStamps/MH04.txt dataset-MH04_mono + +echo "Launching MH05 with Monocular sensor" +./Monocular/mono_euroc ../Vocabulary/ORBvoc.txt ./Monocular/EuRoC.yaml "$pathDatasetEuroc"/MH05 ./Monocular/EuRoC_TimeStamps/MH05.txt dataset-MH05_mono + +echo "Launching V101 with Monocular sensor" +./Monocular/mono_euroc ../Vocabulary/ORBvoc.txt ./Monocular/EuRoC.yaml "$pathDatasetEuroc"/V101 ./Monocular/EuRoC_TimeStamps/V101.txt dataset-V101_mono + +echo "Launching V102 with Monocular sensor" +./Monocular/mono_euroc ../Vocabulary/ORBvoc.txt ./Monocular/EuRoC.yaml "$pathDatasetEuroc"/V102 ./Monocular/EuRoC_TimeStamps/V102.txt dataset-V102_mono + +echo "Launching V103 with Monocular sensor" +./Monocular/mono_euroc ../Vocabulary/ORBvoc.txt ./Monocular/EuRoC.yaml "$pathDatasetEuroc"/V103 ./Monocular/EuRoC_TimeStamps/V103.txt dataset-V103_mono + +echo "Launching V201 with Monocular sensor" +./Monocular/mono_euroc ../Vocabulary/ORBvoc.txt ./Monocular/EuRoC.yaml "$pathDatasetEuroc"/V201 ./Monocular/EuRoC_TimeStamps/V201.txt dataset-V201_mono + +echo "Launching V202 with Monocular sensor" +./Monocular/mono_euroc ../Vocabulary/ORBvoc.txt ./Monocular/EuRoC.yaml "$pathDatasetEuroc"/V202 ./Monocular/EuRoC_TimeStamps/V202.txt dataset-V202_mono + +echo "Launching V203 with Monocular sensor" +./Monocular/mono_euroc ../Vocabulary/ORBvoc.txt ./Monocular/EuRoC.yaml "$pathDatasetEuroc"/V203 ./Monocular/EuRoC_TimeStamps/V203.txt dataset-V203_mono + +# MultiSession Monocular Examples +echo "Launching Machine Hall with Monocular sensor" +./Monocular/mono_euroc ../Vocabulary/ORBvoc.txt ./Monocular/EuRoC.yaml "$pathDatasetEuroc"/MH01 ./Monocular/EuRoC_TimeStamps/MH01.txt "$pathDatasetEuroc"/MH02 ./Monocular/EuRoC_TimeStamps/MH02.txt "$pathDatasetEuroc"/MH03 ./Monocular/EuRoC_TimeStamps/MH03.txt "$pathDatasetEuroc"/MH04 ./Monocular/EuRoC_TimeStamps/MH04.txt "$pathDatasetEuroc"/MH05 ./Monocular/EuRoC_TimeStamps/MH05.txt dataset-MH01_to_MH05_mono + +echo "Launching Vicon Room 1 with Monocular sensor" +./Monocular/mono_euroc ../Vocabulary/ORBvoc.txt ./Monocular/EuRoC.yaml "$pathDatasetEuroc"/V101 ./Monocular/EuRoC_TimeStamps/V101.txt "$pathDatasetEuroc"/V102 ./Monocular/EuRoC_TimeStamps/V102.txt "$pathDatasetEuroc"/V103 ./Monocular/EuRoC_TimeStamps/V103.txt dataset-V101_to_V103_mono + +echo "Launching Vicon Room 2 with Monocular sensor" +./Monocular/mono_euroc ../Vocabulary/ORBvoc.txt ./Monocular/EuRoC.yaml "$pathDatasetEuroc"/V201 ./Monocular/EuRoC_TimeStamps/V201.txt "$pathDatasetEuroc"/V202 ./Monocular/EuRoC_TimeStamps/V202.txt "$pathDatasetEuroc"/V203 ./Monocular/EuRoC_TimeStamps/V203.txt dataset-V201_to_V203_mono + +#------------------------------------ +# Stereo Examples +echo "Launching MH01 with Stereo sensor" +./Stereo/stereo_euroc ../Vocabulary/ORBvoc.txt ./Stereo/EuRoC.yaml "$pathDatasetEuroc"/MH01 ./Stereo/EuRoC_TimeStamps/MH01.txt dataset-MH01_stereo + +echo "Launching MH02 with Stereo sensor" +./Stereo/stereo_euroc ../Vocabulary/ORBvoc.txt ./Stereo/EuRoC.yaml "$pathDatasetEuroc"/MH02 ./Stereo/EuRoC_TimeStamps/MH02.txt dataset-MH02_stereo + +echo "Launching MH03 with Stereo sensor" +./Stereo/stereo_euroc ../Vocabulary/ORBvoc.txt ./Stereo/EuRoC.yaml "$pathDatasetEuroc"/MH03 ./Stereo/EuRoC_TimeStamps/MH03.txt dataset-MH03_stereo + +echo "Launching MH04 with Stereo sensor" +./Stereo/stereo_euroc ../Vocabulary/ORBvoc.txt ./Stereo/EuRoC.yaml "$pathDatasetEuroc"/MH04 ./Stereo/EuRoC_TimeStamps/MH04.txt dataset-MH04_stereo + +echo "Launching MH05 with Stereo sensor" +./Stereo/stereo_euroc ../Vocabulary/ORBvoc.txt ./Stereo/EuRoC.yaml "$pathDatasetEuroc"/MH05 ./Stereo/EuRoC_TimeStamps/MH05.txt dataset-MH05_stereo + +echo "Launching V101 with Stereo sensor" +./Stereo/stereo_euroc ../Vocabulary/ORBvoc.txt ./Stereo/EuRoC.yaml "$pathDatasetEuroc"/V101 ./Stereo/EuRoC_TimeStamps/V101.txt dataset-V101_stereo + +echo "Launching V102 with Stereo sensor" +./Stereo/stereo_euroc ../Vocabulary/ORBvoc.txt ./Stereo/EuRoC.yaml "$pathDatasetEuroc"/V102 ./Stereo/EuRoC_TimeStamps/V102.txt dataset-V102_stereo + +echo "Launching V103 with Stereo sensor" +./Stereo/stereo_euroc ../Vocabulary/ORBvoc.txt ./Stereo/EuRoC.yaml "$pathDatasetEuroc"/V103 ./Stereo/EuRoC_TimeStamps/V103.txt dataset-V103_stereo + +echo "Launching V201 with Stereo sensor" +./Stereo/stereo_euroc ../Vocabulary/ORBvoc.txt ./Stereo/EuRoC.yaml "$pathDatasetEuroc"/V201 ./Stereo/EuRoC_TimeStamps/V201.txt dataset-V201_stereo + +echo "Launching V202 with Stereo sensor" +./Stereo/stereo_euroc ../Vocabulary/ORBvoc.txt ./Stereo/EuRoC.yaml "$pathDatasetEuroc"/V202 ./Stereo/EuRoC_TimeStamps/V202.txt dataset-V202_stereo + +echo "Launching V203 with Stereo sensor" +./Stereo/stereo_euroc ../Vocabulary/ORBvoc.txt ./Stereo/EuRoC.yaml "$pathDatasetEuroc"/V203 ./Stereo/EuRoC_TimeStamps/V203.txt dataset-V203_stereo + +# MultiSession Stereo Examples +echo "Launching Machine Hall with Stereo sensor" +./Stereo/stereo_euroc ../Vocabulary/ORBvoc.txt ./Stereo/EuRoC.yaml "$pathDatasetEuroc"/MH01 ./Stereo/EuRoC_TimeStamps/MH01.txt "$pathDatasetEuroc"/MH02 ./Stereo/EuRoC_TimeStamps/MH02.txt "$pathDatasetEuroc"/MH03 ./Stereo/EuRoC_TimeStamps/MH03.txt "$pathDatasetEuroc"/MH04 ./Stereo/EuRoC_TimeStamps/MH04.txt "$pathDatasetEuroc"/MH05 ./Stereo/EuRoC_TimeStamps/MH05.txt dataset-MH01_to_MH05_stereo + +echo "Launching Vicon Room 1 with Stereo sensor" +./Stereo/stereo_euroc ../Vocabulary/ORBvoc.txt ./Stereo/EuRoC.yaml "$pathDatasetEuroc"/V101 ./Stereo/EuRoC_TimeStamps/V101.txt "$pathDatasetEuroc"/V102 ./Stereo/EuRoC_TimeStamps/V102.txt "$pathDatasetEuroc"/V103 ./Stereo/EuRoC_TimeStamps/V103.txt dataset-V101_to_V103_stereo + +echo "Launching Vicon Room 2 with Stereo sensor" +./Stereo/stereo_euroc ../Vocabulary/ORBvoc.txt ./Stereo/EuRoC.yaml "$pathDatasetEuroc"/V201 ./Stereo/EuRoC_TimeStamps/V201.txt "$pathDatasetEuroc"/V202 ./Stereo/EuRoC_TimeStamps/V202.txt "$pathDatasetEuroc"/V203 ./Stereo/EuRoC_TimeStamps/V203.txt dataset-V201_to_V203_stereo + +#------------------------------------ +# Monocular-Inertial Examples +echo "Launching MH01 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_euroc ../Vocabulary/ORBvoc.txt ./Monocular-Inertial/EuRoC.yaml "$pathDatasetEuroc"/MH01 ./Monocular-Inertial/EuRoC_TimeStamps/MH01.txt dataset-MH01_monoi + +echo "Launching MH02 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_euroc ../Vocabulary/ORBvoc.txt ./Monocular-Inertial/EuRoC.yaml "$pathDatasetEuroc"/MH02 ./Monocular-Inertial/EuRoC_TimeStamps/MH02.txt dataset-MH02_monoi + +echo "Launching MH03 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_euroc ../Vocabulary/ORBvoc.txt ./Monocular-Inertial/EuRoC.yaml "$pathDatasetEuroc"/MH03 ./Monocular-Inertial/EuRoC_TimeStamps/MH03.txt dataset-MH03_monoi + +echo "Launching MH04 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_euroc ../Vocabulary/ORBvoc.txt ./Monocular-Inertial/EuRoC.yaml "$pathDatasetEuroc"/MH04 ./Monocular-Inertial/EuRoC_TimeStamps/MH04.txt dataset-MH04_monoi + +echo "Launching MH05 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_euroc ../Vocabulary/ORBvoc.txt ./Monocular-Inertial/EuRoC.yaml "$pathDatasetEuroc"/MH05 ./Monocular-Inertial/EuRoC_TimeStamps/MH05.txt dataset-MH05_monoi + +echo "Launching V101 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_euroc ../Vocabulary/ORBvoc.txt ./Monocular-Inertial/EuRoC.yaml "$pathDatasetEuroc"/V101 ./Monocular-Inertial/EuRoC_TimeStamps/V101.txt dataset-V101_monoi + +echo "Launching V102 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_euroc ../Vocabulary/ORBvoc.txt ./Monocular-Inertial/EuRoC.yaml "$pathDatasetEuroc"/V102 ./Monocular-Inertial/EuRoC_TimeStamps/V102.txt dataset-V102_monoi + +echo "Launching V103 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_euroc ../Vocabulary/ORBvoc.txt ./Monocular-Inertial/EuRoC.yaml "$pathDatasetEuroc"/V103 ./Monocular-Inertial/EuRoC_TimeStamps/V103.txt dataset-V103_monoi + +echo "Launching V201 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_euroc ../Vocabulary/ORBvoc.txt ./Monocular-Inertial/EuRoC.yaml "$pathDatasetEuroc"/V201 ./Monocular-Inertial/EuRoC_TimeStamps/V201.txt dataset-V201_monoi + +echo "Launching V202 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_euroc ../Vocabulary/ORBvoc.txt ./Monocular-Inertial/EuRoC.yaml "$pathDatasetEuroc"/V202 ./Monocular-Inertial/EuRoC_TimeStamps/V202.txt dataset-V202_monoi + +echo "Launching V203 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_euroc ../Vocabulary/ORBvoc.txt ./Monocular-Inertial/EuRoC.yaml "$pathDatasetEuroc"/V203 ./Monocular-Inertial/EuRoC_TimeStamps/V203.txt dataset-V203_monoi + +# MultiSession Monocular Examples +echo "Launching Machine Hall with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_euroc ../Vocabulary/ORBvoc.txt ./Monocular-Inertial/EuRoC.yaml "$pathDatasetEuroc"/MH01 ./Monocular-Inertial/EuRoC_TimeStamps/MH01.txt "$pathDatasetEuroc"/MH02 ./Monocular-Inertial/EuRoC_TimeStamps/MH02.txt "$pathDatasetEuroc"/MH03 ./Monocular-Inertial/EuRoC_TimeStamps/MH03.txt "$pathDatasetEuroc"/MH04 ./Monocular-Inertial/EuRoC_TimeStamps/MH04.txt "$pathDatasetEuroc"/MH05 ./Monocular-Inertial/EuRoC_TimeStamps/MH05.txt dataset-MH01_to_MH05_monoi + +echo "Launching Vicon Room 1 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_euroc ../Vocabulary/ORBvoc.txt ./Monocular-Inertial/EuRoC.yaml "$pathDatasetEuroc"/V101 ./Monocular-Inertial/EuRoC_TimeStamps/V101.txt "$pathDatasetEuroc"/V102 ./Monocular-Inertial/EuRoC_TimeStamps/V102.txt "$pathDatasetEuroc"/V103 ./Monocular-Inertial/EuRoC_TimeStamps/V103.txt dataset-V101_to_V103_monoi + +echo "Launching Vicon Room 2 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_euroc ../Vocabulary/ORBvoc.txt ./Monocular-Inertial/EuRoC.yaml "$pathDatasetEuroc"/V201 ./Monocular-Inertial/EuRoC_TimeStamps/V201.txt "$pathDatasetEuroc"/V202 ./Monocular-Inertial/EuRoC_TimeStamps/V202.txt "$pathDatasetEuroc"/V203 ./Monocular-Inertial/EuRoC_TimeStamps/V203.txt dataset-V201_to_V203_monoi + +#------------------------------------ +# Stereo-Inertial Examples +echo "Launching MH01 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_euroc ../Vocabulary/ORBvoc.txt ./Stereo-Inertial/EuRoC.yaml "$pathDatasetEuroc"/MH01 ./Stereo-Inertial/EuRoC_TimeStamps/MH01.txt dataset-MH01_stereoi + +echo "Launching MH02 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_euroc ../Vocabulary/ORBvoc.txt ./Stereo-Inertial/EuRoC.yaml "$pathDatasetEuroc"/MH02 ./Stereo-Inertial/EuRoC_TimeStamps/MH02.txt dataset-MH02_stereoi + +echo "Launching MH03 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_euroc ../Vocabulary/ORBvoc.txt ./Stereo-Inertial/EuRoC.yaml "$pathDatasetEuroc"/MH03 ./Stereo-Inertial/EuRoC_TimeStamps/MH03.txt dataset-MH03_stereoi + +echo "Launching MH04 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_euroc ../Vocabulary/ORBvoc.txt ./Stereo-Inertial/EuRoC.yaml "$pathDatasetEuroc"/MH04 ./Stereo-Inertial/EuRoC_TimeStamps/MH04.txt dataset-MH04_stereoi + +echo "Launching MH05 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_euroc ../Vocabulary/ORBvoc.txt ./Stereo-Inertial/EuRoC.yaml "$pathDatasetEuroc"/MH05 ./Stereo-Inertial/EuRoC_TimeStamps/MH05.txt dataset-MH05_stereoi + +echo "Launching V101 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_euroc ../Vocabulary/ORBvoc.txt ./Stereo-Inertial/EuRoC.yaml "$pathDatasetEuroc"/V101 ./Stereo-Inertial/EuRoC_TimeStamps/V101.txt dataset-V101_stereoi + +echo "Launching V102 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_euroc ../Vocabulary/ORBvoc.txt ./Stereo-Inertial/EuRoC.yaml "$pathDatasetEuroc"/V102 ./Stereo-Inertial/EuRoC_TimeStamps/V102.txt dataset-V102_stereoi + +echo "Launching V103 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_euroc ../Vocabulary/ORBvoc.txt ./Stereo-Inertial/EuRoC.yaml "$pathDatasetEuroc"/V103 ./Stereo-Inertial/EuRoC_TimeStamps/V103.txt dataset-V103_stereoi + +echo "Launching V201 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_euroc ../Vocabulary/ORBvoc.txt ./Stereo-Inertial/EuRoC.yaml "$pathDatasetEuroc"/V201 ./Stereo-Inertial/EuRoC_TimeStamps/V201.txt dataset-V201_stereoi + +echo "Launching V202 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_euroc ../Vocabulary/ORBvoc.txt ./Stereo-Inertial/EuRoC.yaml "$pathDatasetEuroc"/V202 ./Stereo-Inertial/EuRoC_TimeStamps/V202.txt dataset-V202_stereoi + +echo "Launching V203 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_euroc ../Vocabulary/ORBvoc.txt ./Stereo-Inertial/EuRoC.yaml "$pathDatasetEuroc"/V203 ./Stereo-Inertial/EuRoC_TimeStamps/V203.txt dataset-V203_stereoi + +# MultiSession Stereo-Inertial Examples +echo "Launching Machine Hall with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_euroc ../Vocabulary/ORBvoc.txt ./Stereo-Inertial/EuRoC.yaml "$pathDatasetEuroc"/MH01 ./Stereo-Inertial/EuRoC_TimeStamps/MH01.txt "$pathDatasetEuroc"/MH02 ./Stereo-Inertial/EuRoC_TimeStamps/MH02.txt "$pathDatasetEuroc"/MH03 ./Stereo-Inertial/EuRoC_TimeStamps/MH03.txt "$pathDatasetEuroc"/MH04 ./Stereo-Inertial/EuRoC_TimeStamps/MH04.txt "$pathDatasetEuroc"/MH05 ./Stereo-Inertial/EuRoC_TimeStamps/MH05.txt dataset-MH01_to_MH05_stereoi + +echo "Launching Vicon Room 1 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_euroc ../Vocabulary/ORBvoc.txt ./Stereo-Inertial/EuRoC.yaml "$pathDatasetEuroc"/V101 ./Stereo-Inertial/EuRoC_TimeStamps/V101.txt "$pathDatasetEuroc"/V102 ./Stereo-Inertial/EuRoC_TimeStamps/V102.txt "$pathDatasetEuroc"/V103 ./Stereo-Inertial/EuRoC_TimeStamps/V103.txt dataset-V101_to_V103_stereoi + +echo "Launching Vicon Room 2 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_euroc ../Vocabulary/ORBvoc.txt ./Stereo-Inertial/EuRoC.yaml "$pathDatasetEuroc"/V201 ./Stereo-Inertial/EuRoC_TimeStamps/V201.txt "$pathDatasetEuroc"/V202 ./Stereo-Inertial/EuRoC_TimeStamps/V202.txt "$pathDatasetEuroc"/V203 ./Stereo-Inertial/EuRoC_TimeStamps/V203.txt dataset-V201_to_V203_stereoi diff --git a/Examples/tum_vi_eval_examples.sh b/Examples/tum_vi_eval_examples.sh new file mode 100644 index 0000000..6c27c8f --- /dev/null +++ b/Examples/tum_vi_eval_examples.sh @@ -0,0 +1,11 @@ +#!/bin/bash +pathDatasetTUM_VI='/Datasets/TUM_VI' #Example, it is necesary to change it by the dataset path + + +# Single Session Example + +echo "Launching Magistrale 1 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt ./Stereo-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-magistrale1_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-magistrale1_512_16/mav0/cam1/data ./Stereo-Inertial/TUM_TimeStamps/dataset-magistrale1_512.txt ./Stereo-Inertial/TUM_IMU/dataset-magistrale1_512.txt dataset-magistrale1_512_stereoi +echo "------------------------------------" +echo "Evaluation of Magistrale 1 trajectory with Stereo-Inertial sensor" +python ../evaluation/evaluate_ate_scale.py "$pathDatasetTUM_VI"/magistrale1_512_16/mav0/mocap0/data.csv f_dataset-magistrale1_512_stereoi.txt --plot magistrale1_512_stereoi.pdf diff --git a/Examples/tum_vi_examples.sh b/Examples/tum_vi_examples.sh new file mode 100644 index 0000000..90c2d29 --- /dev/null +++ b/Examples/tum_vi_examples.sh @@ -0,0 +1,240 @@ +#!/bin/bash +pathDatasetTUM_VI='/Datasets/TUM_VI' #Example, it is necesary to change it by the dataset path + +#------------------------------------ +# Monocular Examples +echo "Launching Room 1 with Monocular sensor" +./Monocular/mono_tum_vi ../Vocabulary/ORBvoc.txt Monocular/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-room1_512_16/mav0/cam0/data Monocular/TUM_TimeStamps/dataset-room1_512.txt dataset-room1_512_mono + +echo "Launching Room 2 with Monocular sensor" +./Monocular/mono_tum_vi ../Vocabulary/ORBvoc.txt Monocular/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-room2_512_16/mav0/cam0/data Monocular/TUM_TimeStamps/dataset-room2_512.txt dataset-room2_512_mono + +echo "Launching Room 3 with Monocular sensor" +./Monocular/mono_tum_vi ../Vocabulary/ORBvoc.txt Monocular/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-room3_512_16/mav0/cam0/data Monocular/TUM_TimeStamps/dataset-room3_512.txt dataset-room3_512_mono + +echo "Launching Room 4 with Monocular sensor" +./Monocular/mono_tum_vi ../Vocabulary/ORBvoc.txt Monocular/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-room4_512_16/mav0/cam0/data Monocular/TUM_TimeStamps/dataset-room4_512.txt dataset-room4_512_mono + +echo "Launching Room 5 with Monocular sensor" +./Monocular/mono_tum_vi ../Vocabulary/ORBvoc.txt Monocular/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-room5_512_16/mav0/cam0/data Monocular/TUM_TimeStamps/dataset-room5_512.txt dataset-room5_512_mono + +echo "Launching Room 6 with Monocular sensor" +./Monocular/mono_tum_vi ../Vocabulary/ORBvoc.txt Monocular/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-room6_512_16/mav0/cam0/data Monocular/TUM_TimeStamps/dataset-room6_512.txt dataset-room6_512_mono + + +#------------------------------------ +# Stereo Examples +echo "Launching Room 1 with Stereo sensor" +./Stereo/stereo_tum_vi ../Vocabulary/ORBvoc.txt Stereo/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-room1_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-room1_512_16/mav0/cam1/data Stereo/TUM_TimeStamps/dataset-room1_512.txt dataset-room1_512_stereo + +echo "Launching Room 2 with Stereo sensor" +./Stereo/stereo_tum_vi ../Vocabulary/ORBvoc.txt Stereo/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-room2_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-room2_512_16/mav0/cam1/data Stereo/TUM_TimeStamps/dataset-room2_512.txt dataset-room2_512_stereo + +echo "Launching Room 3 with Stereo sensor" +./Stereo/stereo_tum_vi ../Vocabulary/ORBvoc.txt Stereo/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-room3_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-room3_512_16/mav0/cam1/data Stereo/TUM_TimeStamps/dataset-room3_512.txt dataset-room3_512_stereo + +echo "Launching Room 4 with Stereo sensor" +./Stereo/stereo_tum_vi ../Vocabulary/ORBvoc.txt Stereo/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-room4_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-room4_512_16/mav0/cam1/data Stereo/TUM_TimeStamps/dataset-room4_512.txt dataset-room4_512_stereo + +echo "Launching Room 5 with Stereo sensor" +./Stereo/stereo_tum_vi ../Vocabulary/ORBvoc.txt Stereo/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-room5_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-room5_512_16/mav0/cam1/data Stereo/TUM_TimeStamps/dataset-room5_512.txt dataset-room5_512_stereo + +echo "Launching Room 6 with Stereo sensor" +./Stereo/stereo_tum_vi ../Vocabulary/ORBvoc.txt Stereo/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-room6_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-room6_512_16/mav0/cam1/data Stereo/TUM_TimeStamps/dataset-room6_512.txt dataset-room6_512_stereo + + +#------------------------------------ +# Monocular-Inertial Examples +echo "Launching Corridor 1 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-corridor1_512_16/mav0/cam0/data Monocular-Inertial/TUM_TimeStamps/dataset-corridor1_512.txt Monocular-Inertial/TUM_IMU/dataset-corridor1_512.txt dataset-corridor1_512_monoi + +echo "Launching Corridor 2 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-corridor2_512_16/mav0/cam0/data Monocular-Inertial/TUM_TimeStamps/dataset-corridor2_512.txt Monocular-Inertial/TUM_IMU/dataset-corridor2_512.txt dataset-corridor2_512_monoi + +echo "Launching Corridor 3 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-corridor3_512_16/mav0/cam0/data Monocular-Inertial/TUM_TimeStamps/dataset-corridor3_512.txt Monocular-Inertial/TUM_IMU/dataset-corridor3_512.txt dataset-corridor3_512_monoi + +echo "Launching Corridor 4 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-corridor4_512_16/mav0/cam0/data Monocular-Inertial/TUM_TimeStamps/dataset-corridor4_512.txt Monocular-Inertial/TUM_IMU/dataset-corridor4_512.txt dataset-corridor4_512_monoi + +echo "Launching Corridor 5 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-corridor5_512_16/mav0/cam0/data Monocular-Inertial/TUM_TimeStamps/dataset-corridor5_512.txt Monocular-Inertial/TUM_IMU/dataset-corridor5_512.txt dataset-corridor5_512_monoi + + +echo "Launching Magistrale 1 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-magistrale1_512_16/mav0/cam0/data Monocular-Inertial/TUM_TimeStamps/dataset-magistrale1_512.txt Monocular-Inertial/TUM_IMU/dataset-magistrale1_512.txt dataset-magistrale1_512_monoi + +echo "Launching Magistrale 2 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-magistrale2_512_16/mav0/cam0/data Monocular-Inertial/TUM_TimeStamps/dataset-magistrale2_512.txt Monocular-Inertial/TUM_IMU/dataset-magistrale2_512.txt dataset-magistrale2_512 + +echo "Launching Magistrale 3 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-magistrale3_512_16/mav0/cam0/data Monocular-Inertial/TUM_TimeStamps/dataset-magistrale3_512.txt Monocular-Inertial/TUM_IMU/dataset-magistrale3_512.txt dataset-magistrale3_512_monoi + +echo "Launching Magistrale 4 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-magistrale4_512_16/mav0/cam0/data Monocular-Inertial/TUM_TimeStamps/dataset-magistrale4_512.txt Monocular-Inertial/TUM_IMU/dataset-magistrale4_512.txt dataset-magistrale4_512_monoi + +echo "Launching Magistrale 5 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-magistrale5_512_16/mav0/cam0/data Monocular-Inertial/TUM_TimeStamps/dataset-magistrale5_512.txt Monocular-Inertial/TUM_IMU/dataset-magistrale5_512.txt dataset-magistrale5_512_monoi + +echo "Launching Magistrale 6 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-magistrale6_512_16/mav0/cam0/data Monocular-Inertial/TUM_TimeStamps/dataset-magistrale6_512.txt Monocular-Inertial/TUM_IMU/dataset-magistrale6_512.txt dataset-magistrale6_512_monoi + + +echo "Launching Outdoor 1 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512_far.yaml "$pathDatasetTUM_VI"/dataset-outdoors1_512_16/mav0/cam0/data Monocular-Inertial/TUM_TimeStamps/dataset-outdoors1_512.txt Monocular-Inertial/TUM_IMU/dataset-outdoors1_512.txt dataset-outdoors1_512_monoi + +echo "Launching Outdoor 2 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512_far.yaml "$pathDatasetTUM_VI"/dataset-outdoors2_512_16/mav0/cam0/data Monocular-Inertial/TUM_TimeStamps/dataset-outdoors2_512.txt Monocular-Inertial/TUM_IMU/dataset-outdoors2_512.txt dataset-outdoors2_512_monoi + +echo "Launching Outdoor 3 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512_far.yaml "$pathDatasetTUM_VI"/dataset-outdoors3_512_16/mav0/cam0/data Monocular-Inertial/TUM_TimeStamps/dataset-outdoors3_512.txt Monocular-Inertial/TUM_IMU/dataset-outdoors3_512.txt dataset-outdoors3_512_monoi + +echo "Launching Outdoor 4 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512_far.yaml "$pathDatasetTUM_VI"/dataset-outdoors4_512_16/mav0/cam0/data Monocular-Inertial/TUM_TimeStamps/dataset-outdoors4_512.txt Monocular-Inertial/TUM_IMU/dataset-outdoors4_512.txt dataset-outdoors4_512_monoi + +echo "Launching Outdoor 5 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512_far.yaml "$pathDatasetTUM_VI"/dataset-outdoors5_512_16/mav0/cam0/data Monocular-Inertial/TUM_TimeStamps/dataset-outdoors5_512.txt Monocular-Inertial/TUM_IMU/dataset-outdoors5_512.txt dataset-outdoors5_512_monoi + +echo "Launching Outdoor 6 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512_far.yaml "$pathDatasetTUM_VI"/dataset-outdoors6_512_16/mav0/cam0/data Monocular-Inertial/TUM_TimeStamps/dataset-outdoors6_512.txt Monocular-Inertial/TUM_IMU/dataset-outdoors6_512.txt dataset-outdoors6_512_monoi + +echo "Launching Outdoor 7 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512_far.yaml "$pathDatasetTUM_VI"/dataset-outdoors7_512_16/mav0/cam0/data Monocular-Inertial/TUM_TimeStamps/dataset-outdoors7_512.txt Monocular-Inertial/TUM_IMU/dataset-outdoors7_512.txt dataset-outdoors7_512_monoi + +echo "Launching Outdoor 8 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512_far.yaml "$pathDatasetTUM_VI"/dataset-outdoors8_512_16/mav0/cam0/data Monocular-Inertial/TUM_TimeStamps/dataset-outdoors8_512.txt Monocular-Inertial/TUM_IMU/dataset-outdoors8_512.txt dataset-outdoors8_512_monoi + + +echo "Launching Room 1 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-room1_512_16/mav0/cam0/data Monocular-Inertial/TUM_TimeStamps/dataset-room1_512.txt Monocular-Inertial/TUM_IMU/dataset-room1_512.txt dataset-room1_512_monoi + +echo "Launching Room 2 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-room2_512_16/mav0/cam0/data Monocular-Inertial/TUM_TimeStamps/dataset-room2_512.txt Monocular-Inertial/TUM_IMU/dataset-room2_512.txt dataset-room2_512_monoi + +echo "Launching Room 3 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-room3_512_16/mav0/cam0/data Monocular-Inertial/TUM_TimeStamps/dataset-room3_512.txt Monocular-Inertial/TUM_IMU/dataset-room3_512.txt dataset-room3_512_monoi + +echo "Launching Room 4 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-room4_512_16/mav0/cam0/data Monocular-Inertial/TUM_TimeStamps/dataset-room4_512.txt Monocular-Inertial/TUM_IMU/dataset-room4_512.txt dataset-room4_512_monoi + +echo "Launching Room 5 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-room5_512_16/mav0/cam0/data Monocular-Inertial/TUM_TimeStamps/dataset-room5_512.txt Monocular-Inertial/TUM_IMU/dataset-room5_512.txt dataset-room5_512_monoi + +echo "Launching Room 6 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-room6_512_16/mav0/cam0/data Monocular-Inertial/TUM_TimeStamps/dataset-room6_512.txt Monocular-Inertial/TUM_IMU/dataset-room6_512.txt dataset-room6_512_monoi + + +echo "Launching Slides 1 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-slides1_512_16/mav0/cam0/data Monocular-Inertial/TUM_TimeStamps/dataset-slides1_512.txt Monocular-Inertial/TUM_IMU/dataset-slides1_512.txt dataset-slides1_512_monoi + +echo "Launching Slides 2 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-slides2_512_16/mav0/cam0/data Monocular-Inertial/TUM_TimeStamps/dataset-slides2_512.txt Monocular-Inertial/TUM_IMU/dataset-slides2_512.txt dataset-slides2_512_monoi + +echo "Launching Slides 3 with Monocular-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-slides3_512_16/mav0/cam0/data Monocular-Inertial/TUM_TimeStamps/dataset-slides3_512.txt Monocular-Inertial/TUM_IMU/dataset-slides3_512.txt dataset-slides3_512_monoi + + +# MultiSession Monocular Examples +echo "Launching Room 1, Magistrale 1, Magistrale 5 and Slides 1 in the same session with Stereo-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-room1_512_16/mav0/cam0/data Stereo-Inertial/TUM_TimeStamps/dataset-room1_512.txt Stereo-Inertial/TUM_IMU/dataset-room1_512.txt "$pathDatasetTUM_VI"/dataset-magistrale1_512_16/mav0/cam0/data Stereo-Inertial/TUM_TimeStamps/dataset-magistrale1_512.txt Stereo-Inertial/TUM_IMU/dataset-magistrale1_512.txt "$pathDatasetTUM_VI"/dataset-magistrale5_512_16/mav0/cam0/data Stereo-Inertial/TUM_TimeStamps/dataset-magistrale5_512.txt Stereo-Inertial/TUM_IMU/dataset-magistrale5_512.txt "$pathDatasetTUM_VI"/dataset-slides1_512_16/mav0/cam0/data Stereo-Inertial/TUM_TimeStamps/dataset-slides1_512.txt Stereo-Inertial/TUM_IMU/dataset-slides1_512.txt dataset-room1_mag1_mag5_slides1_monoi + +echo "Launching all Rooms (1-6) in the same session with Stereo-Inertial sensor" +./Monocular-Inertial/mono_inertial_tum_vi ../Vocabulary/ORBvoc.txt Monocular-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-room1_512_16/mav0/cam0/data Stereo-Inertial/TUM_TimeStamps/dataset-room1_512.txt Stereo-Inertial/TUM_IMU/dataset-room1_512.txt "$pathDatasetTUM_VI"/dataset-room2_512_16/mav0/cam0/data Stereo-Inertial/TUM_TimeStamps/dataset-room2_512.txt Stereo-Inertial/TUM_IMU/dataset-room2_512.txt "$pathDatasetTUM_VI"/dataset-room3_512_16/mav0/cam0/data Stereo-Inertial/TUM_TimeStamps/dataset-room3_512.txt Stereo-Inertial/TUM_IMU/dataset-room3_512.txt "$pathDatasetTUM_VI"/dataset-room4_512_16/mav0/cam0/data Stereo-Inertial/TUM_TimeStamps/dataset-room4_512.txt Stereo-Inertial/TUM_IMU/dataset-room4_512.txt "$pathDatasetTUM_VI"/dataset-room5_512_16/mav0/cam0/data Stereo-Inertial/TUM_TimeStamps/dataset-room5_512.txt Stereo-Inertial/TUM_IMU/dataset-room5_512.txt "$pathDatasetTUM_VI"/dataset-room6_512_16/mav0/cam0/data Stereo-Inertial/TUM_TimeStamps/dataset-room6_512.txt Stereo-Inertial/TUM_IMU/dataset-room6_512.txt dataset-rooms123456_monoi + +#------------------------------------ +# Stereo-Inertial Examples +echo "Launching Corridor 1 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-corridor1_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-corridor1_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-corridor1_512.txt Stereo-Inertial/TUM_IMU/dataset-corridor1_512.txt dataset-corridor1_512_stereoi + +echo "Launching Corridor 2 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-corridor2_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-corridor2_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-corridor2_512.txt Stereo-Inertial/TUM_IMU/dataset-corridor2_512.txt dataset-corridor2_512_stereoi + +echo "Launching Corridor 3 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-corridor3_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-corridor3_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-corridor3_512.txt Stereo-Inertial/TUM_IMU/dataset-corridor3_512.txt dataset-corridor3_512_stereoi + +echo "Launching Corridor 4 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-corridor4_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-corridor4_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-corridor4_512.txt Stereo-Inertial/TUM_IMU/dataset-corridor4_512.txt dataset-corridor4_512_stereoi + +echo "Launching Corridor 5 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-corridor5_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-corridor5_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-corridor5_512.txt Stereo-Inertial/TUM_IMU/dataset-corridor5_512.txt dataset-corridor5_512_stereoi + + +echo "Launching Magistrale 1 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-magistrale1_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-magistrale1_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-magistrale1_512.txt Stereo-Inertial/TUM_IMU/dataset-magistrale1_512.txt dataset-magistrale1_512_stereoi + +echo "Launching Magistrale 2 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-magistrale2_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-magistrale2_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-magistrale2_512.txt Stereo-Inertial/TUM_IMU/dataset-magistrale2_512.txt dataset-magistrale2_512_stereoi + +echo "Launching Magistrale 3 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-magistrale3_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-magistrale3_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-magistrale3_512.txt Stereo-Inertial/TUM_IMU/dataset-magistrale3_512.txt dataset-magistrale3_512_stereoi + +echo "Launching Magistrale 4 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-magistrale4_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-magistrale4_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-magistrale4_512.txt Stereo-Inertial/TUM_IMU/dataset-magistrale4_512.txt dataset-magistrale4_512_stereoi + +echo "Launching Magistrale 5 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-magistrale5_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-magistrale5_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-magistrale5_512.txt Stereo-Inertial/TUM_IMU/dataset-magistrale5_512.txt dataset-magistrale5_512_stereoi + +echo "Launching Magistrale 6 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-magistrale6_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-magistrale6_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-magistrale6_512.txt Stereo-Inertial/TUM_IMU/dataset-magistrale6_512.txt dataset-magistrale6_512_stereoi + + +echo "Launching Outdoor 1 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512_outdoors.yaml "$pathDatasetTUM_VI"/dataset-outdoors1_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-outdoors1_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-outdoors1_512.txt Stereo-Inertial/TUM_IMU/dataset-outdoors1_512.txt outdoors1_512_stereoi + +echo "Launching Outdoor 2 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512_outdoors.yaml "$pathDatasetTUM_VI"/dataset-outdoors2_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-outdoors2_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-outdoors2_512.txt Stereo-Inertial/TUM_IMU/dataset-outdoors2_512.txt outdoors2_512_stereoi + +echo "Launching Outdoor 3 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512_outdoors.yaml "$pathDatasetTUM_VI"/dataset-outdoors3_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-outdoors3_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-outdoors3_512.txt Stereo-Inertial/TUM_IMU/dataset-outdoors3_512.txt outdoors3_512 + +echo "Launching Outdoor 4 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512_outdoors.yaml "$pathDatasetTUM_VI"/dataset-outdoors4_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-outdoors4_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-outdoors4_512.txt Stereo-Inertial/TUM_IMU/dataset-outdoors4_512.txt outdoors4_512 + +echo "Launching Outdoor 5 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512_outdoors.yaml "$pathDatasetTUM_VI"/dataset-outdoors5_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-outdoors5_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-outdoors5_512.txt Stereo-Inertial/TUM_IMU/dataset-outdoors5_512.txt outdoors5_512_stereoi + +echo "Launching Outdoor 6 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512_outdoors.yaml "$pathDatasetTUM_VI"/dataset-outdoors6_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-outdoors6_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-outdoors6_512.txt Stereo-Inertial/TUM_IMU/dataset-outdoors6_512.txt outdoors6_512_stereoi + +echo "Launching Outdoor 7 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512_outdoors.yaml "$pathDatasetTUM_VI"/dataset-outdoors7_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-outdoors7_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-outdoors7_512.txt Stereo-Inertial/TUM_IMU/dataset-outdoors7_512.txt outdoors7_512_stereoi + +echo "Launching Outdoor 8 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512_outdoors.yaml "$pathDatasetTUM_VI"/dataset-outdoors8_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-outdoors8_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-outdoors8_512.txt Stereo-Inertial/TUM_IMU/dataset-outdoors8_512.txt outdoors8_512_stereoi + + +echo "Launching Room 1 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-room1_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-room1_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-room1_512.txt Stereo-Inertial/TUM_IMU/dataset-room1_512.txt dataset-room1_512_stereoi + +echo "Launching Room 2 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-room2_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-room2_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-room2_512.txt Stereo-Inertial/TUM_IMU/dataset-room2_512.txt dataset-room2_512_stereoi + +echo "Launching Room 3 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-room3_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-room3_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-room3_512.txt Stereo-Inertial/TUM_IMU/dataset-room3_512.txt dataset-room3_512_stereoi + +echo "Launching Room 4 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-room4_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-room4_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-room4_512.txt Stereo-Inertial/TUM_IMU/dataset-room4_512.txt dataset-room4_512_stereoi + +echo "Launching Room 5 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-room5_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-room5_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-room5_512.txt Stereo-Inertial/TUM_IMU/dataset-room5_512.txt dataset-room5_512_stereoi + +echo "Launching Room 6 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-room6_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-room6_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-room6_512.txt Stereo-Inertial/TUM_IMU/dataset-room6_512.txt dataset-room6_512_stereoi + + +echo "Launching Slides 1 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-slides1_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-slides1_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-slides1_512.txt Stereo-Inertial/TUM_IMU/dataset-slides1_512.txt dataset-slides1_512_stereoi + +echo "Launching Slides 2 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-slides2_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-slides2_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-slides2_512.txt Stereo-Inertial/TUM_IMU/dataset-slides2_512.txt dataset-slides2_512_stereoi + +echo "Launching Slides 3 with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-slides3_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-slides3_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-slides3_512.txt Stereo-Inertial/TUM_IMU/dataset-slides3_512.txt dataset-slides3_512_stereoi + + +# MultiSession Stereo-Inertial Examples +echo "Launching Room 1, Magistrale 1, Magistrale 5 and Slides 1 in the same session with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-room1_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-room1_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-room1_512.txt Stereo-Inertial/TUM_IMU/dataset-room1_512.txt "$pathDatasetTUM_VI"/dataset-magistrale1_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-magistrale1_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-magistrale1_512.txt Stereo-Inertial/TUM_IMU/dataset-magistrale1_512.txt "$pathDatasetTUM_VI"/dataset-magistrale5_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-magistrale5_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-magistrale5_512.txt Stereo-Inertial/TUM_IMU/dataset-magistrale5_512.txt "$pathDatasetTUM_VI"/dataset-slides1_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-slides1_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-slides1_512.txt Stereo-Inertial/TUM_IMU/dataset-slides1_512.txt dataset-room1_mag1_mag5_slides1_stereoi + +echo "Launching all Rooms (1-6) in the same session with Stereo-Inertial sensor" +./Stereo-Inertial/stereo_inertial_tum_vi ../Vocabulary/ORBvoc.txt Stereo-Inertial/TUM_512.yaml "$pathDatasetTUM_VI"/dataset-room1_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-room1_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-room1_512.txt Stereo-Inertial/TUM_IMU/dataset-room1_512.txt "$pathDatasetTUM_VI"/dataset-room2_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-room2_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-room2_512.txt Stereo-Inertial/TUM_IMU/dataset-room2_512.txt "$pathDatasetTUM_VI"/dataset-room3_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-room3_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-room3_512.txt Stereo-Inertial/TUM_IMU/dataset-room3_512.txt "$pathDatasetTUM_VI"/dataset-room4_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-room4_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-room4_512.txt Stereo-Inertial/TUM_IMU/dataset-room4_512.txt "$pathDatasetTUM_VI"/dataset-room5_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-room5_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-room5_512.txt Stereo-Inertial/TUM_IMU/dataset-room5_512.txt "$pathDatasetTUM_VI"/dataset-room6_512_16/mav0/cam0/data "$pathDatasetTUM_VI"/dataset-room6_512_16/mav0/cam1/data Stereo-Inertial/TUM_TimeStamps/dataset-room6_512.txt Stereo-Inertial/TUM_IMU/dataset-room6_512.txt dataset-rooms123456_stereoi diff --git a/include/Atlas.h b/include/Atlas.h index f89925e..59b948c 100644 --- a/include/Atlas.h +++ b/include/Atlas.h @@ -50,7 +50,7 @@ public: Atlas(); Atlas(int initKFid); // When its initialization the first map is created ~Atlas(); - + // 创建新地图 void CreateNewMap(); void ChangeMap(Map* pMap); @@ -107,7 +107,7 @@ public: protected: - std::set mspMaps; + std::set mspMaps; //存放所有的地图 std::set mspBadMaps; Map* mpCurrentMap; diff --git a/include/Config.h b/include/Config.h new file mode 100644 index 0000000..ab01feb --- /dev/null +++ b/include/Config.h @@ -0,0 +1,6 @@ +#ifndef CONFIG_H +#define CONFIG_H + +//#define REGISTER_TIMES + +#endif // CONFIG_H diff --git a/include/G2oTypes.h b/include/G2oTypes.h index 3f7113f..f132b11 100644 --- a/include/G2oTypes.h +++ b/include/G2oTypes.h @@ -336,6 +336,13 @@ public: } }; +/** + * @brief 视觉重投影的边 + * 这里或许会有疑问,OptimizableTypes.h 里面不是定义了视觉重投影的边么? + * 原因是这样,那里面定义的边也用,不过只是在纯视觉时,没有imu情况下,因为用已经定义好的节点就好 + * 但是加入imu时,优化要有残差的边与重投影的边同时存在,且两个边可能连接同一个位姿节点,所以需要重新弄一个包含imu位姿的节点 + * 因此,边也需要重新写,并且在imu优化时使用这个边 + */ class EdgeMono : public g2o::BaseBinaryEdge<2,Eigen::Vector2d,g2o::VertexSBAPointXYZ,VertexPose> { public: @@ -489,6 +496,7 @@ public: const int cam_idx; }; +// TO COMMENT class EdgeInertial : public g2o::BaseMultiEdge<9,Vector9d> { public: @@ -858,6 +866,6 @@ public: Eigen::Vector3d dtij; }; -} //namespace ORB_SLAM2 +} //namespace ORB_SLAM3 #endif // G2OTYPES_H diff --git a/include/ImuTypes.h b/include/ImuTypes.h index d3da0a8..d2a767b 100644 --- a/include/ImuTypes.h +++ b/include/ImuTypes.h @@ -280,6 +280,6 @@ cv::Mat NormalizeRotation(const cv::Mat &R); } -} //namespace ORB_SLAM2 +} //namespace ORB_SLAM3 #endif // IMUTYPES_H diff --git a/include/System.h b/include/System.h index 4395710..cfc2792 100644 --- a/include/System.h +++ b/include/System.h @@ -43,9 +43,11 @@ namespace ORB_SLAM3 { +// 打印中间信息 class Verbose { public: + // 显示信息量程度 enum eLevel { VERBOSITY_QUIET=0, diff --git a/include/Tracking.h b/include/Tracking.h index f88704e..d932274 100644 --- a/include/Tracking.h +++ b/include/Tracking.h @@ -109,9 +109,9 @@ public: NO_IMAGES_YET=0, //当前无图像 NOT_INITIALIZED=1, //有图像但是没有完成初始化 OK=2, //正常跟踪状态 - RECENTLY_LOST=3, //IMU模式:当前地图中的KF>10,且丢失时间<5秒。纯视觉模式没有该状态 + RECENTLY_LOST=3, //IMU模式:当前地图中的KF>10,且丢失时间<5秒。纯视觉模式:没有该状态 LOST=4, //IMU模式:当前帧跟丢超过5s。纯视觉模式:重定位失败 - OK_KLT=5 + OK_KLT=5 //未使用 }; eTrackingState mState; diff --git a/src/Atlas.cc b/src/Atlas.cc index a003ae0..444daac 100644 --- a/src/Atlas.cc +++ b/src/Atlas.cc @@ -55,15 +55,23 @@ Atlas::~Atlas() } } +/** + * @brief 创建新地图,如果当前活跃地图有效,先存储当前地图为不活跃地图,然后新建地图;否则,可以直接新建地图。 + * + */ void Atlas::CreateNewMap() { + // 锁住地图集 unique_lock lock(mMutexAtlas); cout << "Creation of new map with id: " << Map::nNextId << endl; + + // 如果当前活跃地图有效,先存储当前地图为不活跃地图后退出 if(mpCurrentMap){ cout << "Exits current map " << endl; + // mnLastInitKFidMap为当前地图创建时第1个关键帧的id,它是在上一个地图最大关键帧id的基础上增加1 if(!mspMaps.empty() && mnLastInitKFidMap < mpCurrentMap->GetMaxKFid()) mnLastInitKFidMap = mpCurrentMap->GetMaxKFid()+1; //The init KF is the next of current maximum - + // 将当前地图储存起来,其实就是把mIsInUse标记为false mpCurrentMap->SetStoredMap(); cout << "Saved map with ID: " << mpCurrentMap->GetId() << endl; @@ -72,9 +80,9 @@ void Atlas::CreateNewMap() } cout << "Creation of new map with last KF id: " << mnLastInitKFidMap << endl; - mpCurrentMap = new Map(mnLastInitKFidMap); - mpCurrentMap->SetCurrentMap(); - mspMaps.insert(mpCurrentMap); + mpCurrentMap = new Map(mnLastInitKFidMap); //新建地图 + mpCurrentMap->SetCurrentMap(); //设置为活跃地图 + mspMaps.insert(mpCurrentMap); //插入地图集 } void Atlas::ChangeMap(Map* pMap) diff --git a/src/Frame.cc b/src/Frame.cc index 6a462b0..93a0002 100644 --- a/src/Frame.cc +++ b/src/Frame.cc @@ -364,8 +364,8 @@ Frame::Frame(const cv::Mat &imGray, const double &timeStamp, ORBextractor* extra mTimeORB_Ext = std::chrono::duration_cast >(time_EndExtORB - time_StartExtORB).count(); #endif - //求出特征点的个数 - + + //提取特征点的个数 N = mvKeys.size(); //如果没有能够成功提取出特征点,那么就直接返回了 if(mvKeys.empty()) @@ -1369,9 +1369,11 @@ Frame::Frame(const cv::Mat &imLeft, const cv::Mat &imRight, const double &timeSt mTimeORB_Ext = std::chrono::duration_cast >(time_EndExtORB - time_StartExtORB).count(); #endif - + // 左图中提取的特征点数目 Nleft = mvKeys.size(); + // 右图中提取的特征点数目 Nright = mvKeysRight.size(); + // 特征点总数 N = Nleft + Nright; if(N == 0) diff --git a/src/G2oTypes.cc b/src/G2oTypes.cc index e416106..4a3d497 100644 --- a/src/G2oTypes.cc +++ b/src/G2oTypes.cc @@ -487,35 +487,46 @@ VertexAccBias::VertexAccBias(Frame *pF) setEstimate(ba); } - - +/** + * @brief 局部地图中imu的局部地图优化(此时已经初始化完毕不需要再优化重力方向与尺度) + * @param pInt 预积分相关内容 + */ EdgeInertial::EdgeInertial(IMU::Preintegrated *pInt):JRg(Converter::toMatrix3d(pInt->JRg)), JVg(Converter::toMatrix3d(pInt->JVg)), JPg(Converter::toMatrix3d(pInt->JPg)), JVa(Converter::toMatrix3d(pInt->JVa)), JPa(Converter::toMatrix3d(pInt->JPa)), mpInt(pInt), dt(pInt->dT) { + // 准备工作,把预积分类里面的值先取出来,包含信息的是两帧之间n多个imu信息预积分来的 // This edge links 6 vertices + // 6元边 resize(6); + // 1. 定义重力 g << 0, 0, -IMU::GRAVITY_VALUE; - cv::Mat cvInfo = pInt->C.rowRange(0,9).colRange(0,9).inv(cv::DECOMP_SVD); + // 2. 读取协方差矩阵的前9*9部分的逆矩阵,该部分表示的是预积分测量噪声的协方差矩阵 + cv::Mat cvInfo = pInt->C.rowRange(0, 9).colRange(0, 9).inv(cv::DECOMP_SVD); + // 转成eigen Matrix9d Matrix9d Info; - for(int r=0;r<9;r++) - for(int c=0;c<9;c++) - Info(r,c)=cvInfo.at(r,c); - Info = (Info+Info.transpose())/2; - Eigen::SelfAdjointEigenSolver > es(Info); - Eigen::Matrix eigs = es.eigenvalues(); - for(int i=0;i<9;i++) - if(eigs[i]<1e-12) - eigs[i]=0; - Info = es.eigenvectors()*eigs.asDiagonal()*es.eigenvectors().transpose(); + for (int r = 0; r < 9; r++) + for (int c = 0; c < 9; c++) + Info(r, c) = cvInfo.at(r, c); + // 3. 强制让其成为对角矩阵 + Info = (Info + Info.transpose()) / 2; + // 4. 让特征值很小的时候置为0,再重新计算信息矩阵(暂不知这么操作的目的是什么,先搞清楚操作流程吧) + Eigen::SelfAdjointEigenSolver> es(Info); + Eigen::Matrix eigs = es.eigenvalues(); // 矩阵特征值 + for (int i = 0; i < 9; i++) + if (eigs[i] < 1e-12) + eigs[i] = 0; + // asDiagonal 生成对角矩阵 + Info = es.eigenvectors() * eigs.asDiagonal() * es.eigenvectors().transpose(); setInformation(Info); } - - - +/** + * @brief 计算误差 + */ void EdgeInertial::computeError() { + // 计算残差 // TODO Maybe Reintegrate inertial measurments when difference between linearization point and current estimate is too big const VertexPose* VP1 = static_cast(_vertices[0]); const VertexVelocity* VV1= static_cast(_vertices[1]); @@ -536,6 +547,7 @@ void EdgeInertial::computeError() _error << er, ev, ep; } +// 计算雅克比矩阵 void EdgeInertial::linearizeOplus() { const VertexPose* VP1 = static_cast(_vertices[0]); @@ -549,42 +561,52 @@ void EdgeInertial::linearizeOplus() Eigen::Vector3d dbg; dbg << db.bwx, db.bwy, db.bwz; - const Eigen::Matrix3d Rwb1 = VP1->estimate().Rwb; - const Eigen::Matrix3d Rbw1 = Rwb1.transpose(); - const Eigen::Matrix3d Rwb2 = VP2->estimate().Rwb; + const Eigen::Matrix3d Rwb1 = VP1->estimate().Rwb; // Ri + const Eigen::Matrix3d Rbw1 = Rwb1.transpose(); // Ri.t() + const Eigen::Matrix3d Rwb2 = VP2->estimate().Rwb; // Rj const Eigen::Matrix3d dR = Converter::toMatrix3d(mpInt->GetDeltaRotation(b1)); - const Eigen::Matrix3d eR = dR.transpose()*Rbw1*Rwb2; - const Eigen::Vector3d er = LogSO3(eR); - const Eigen::Matrix3d invJr = InverseRightJacobianSO3(er); + const Eigen::Matrix3d eR = dR.transpose() * Rbw1 * Rwb2; // r△Rij + const Eigen::Vector3d er = LogSO3(eR); // r△φij + const Eigen::Matrix3d invJr = InverseRightJacobianSO3(er); // Jr^-1(log(△Rij)) + // 就很神奇,_jacobianOplus个数等于边的个数,里面的大小等于观测值维度(也就是残差)× 每个节点待优化值的维度 // Jacobians wrt Pose 1 + // _jacobianOplus[0] 9*6矩阵 总体来说就是三个残差分别对pose1的旋转与平移(p)求导 _jacobianOplus[0].setZero(); // rotation + // (0,0)起点的3*3块表示旋转残差对pose1的旋转求导 _jacobianOplus[0].block<3,3>(0,0) = -invJr*Rwb2.transpose()*Rwb1; // OK + // (3,0)起点的3*3块表示速度残差对pose1的旋转求导 _jacobianOplus[0].block<3,3>(3,0) = Skew(Rbw1*(VV2->estimate() - VV1->estimate() - g*dt)); // OK + // (6,0)起点的3*3块表示位置残差对pose1的旋转求导 _jacobianOplus[0].block<3,3>(6,0) = Skew(Rbw1*(VP2->estimate().twb - VP1->estimate().twb - VV1->estimate()*dt - 0.5*g*dt*dt)); // OK // translation + // (6,3)起点的3*3块表示位置残差对pose1的位置求导 _jacobianOplus[0].block<3,3>(6,3) = -Eigen::Matrix3d::Identity(); // OK // Jacobians wrt Velocity 1 + // _jacobianOplus[1] 9*3矩阵 总体来说就是三个残差分别对pose1的速度求导 _jacobianOplus[1].setZero(); _jacobianOplus[1].block<3,3>(3,0) = -Rbw1; // OK _jacobianOplus[1].block<3,3>(6,0) = -Rbw1*dt; // OK // Jacobians wrt Gyro 1 + // _jacobianOplus[2] 9*3矩阵 总体来说就是三个残差分别对陀螺仪偏置的速度求导 _jacobianOplus[2].setZero(); _jacobianOplus[2].block<3,3>(0,0) = -invJr*eR.transpose()*RightJacobianSO3(JRg*dbg)*JRg; // OK _jacobianOplus[2].block<3,3>(3,0) = -JVg; // OK _jacobianOplus[2].block<3,3>(6,0) = -JPg; // OK // Jacobians wrt Accelerometer 1 + // _jacobianOplus[3] 9*3矩阵 总体来说就是三个残差分别对加速度计偏置的速度求导 _jacobianOplus[3].setZero(); _jacobianOplus[3].block<3,3>(3,0) = -JVa; // OK _jacobianOplus[3].block<3,3>(6,0) = -JPa; // OK // Jacobians wrt Pose 2 + // _jacobianOplus[3] 9*6矩阵 总体来说就是三个残差分别对pose2的旋转与平移(p)求导 _jacobianOplus[4].setZero(); // rotation _jacobianOplus[4].block<3,3>(0,0) = invJr; // OK @@ -592,33 +614,43 @@ void EdgeInertial::linearizeOplus() _jacobianOplus[4].block<3,3>(6,3) = Rbw1*Rwb2; // OK // Jacobians wrt Velocity 2 + // _jacobianOplus[3] 9*3矩阵 总体来说就是三个残差分别对pose2的速度求导 _jacobianOplus[5].setZero(); _jacobianOplus[5].block<3,3>(3,0) = Rbw1; // OK } +// localmapping中imu初始化所用的边,除了正常的几个优化变量外还优化了重力方向与尺度 EdgeInertialGS::EdgeInertialGS(IMU::Preintegrated *pInt):JRg(Converter::toMatrix3d(pInt->JRg)), JVg(Converter::toMatrix3d(pInt->JVg)), JPg(Converter::toMatrix3d(pInt->JPg)), JVa(Converter::toMatrix3d(pInt->JVa)), JPa(Converter::toMatrix3d(pInt->JPa)), mpInt(pInt), dt(pInt->dT) { + // 准备工作,把预积分类里面的值先取出来,包含信息的是两帧之间n多个imu信息预积分来的 // This edge links 8 vertices + // 8元边 resize(8); + // 1. 定义重力 gI << 0, 0, -IMU::GRAVITY_VALUE; - cv::Mat cvInfo = pInt->C.rowRange(0,9).colRange(0,9).inv(cv::DECOMP_SVD); + // 2. 读取协方差矩阵的前9*9部分的逆矩阵,该部分表示的是预积分测量噪声的协方差矩阵 + cv::Mat cvInfo = pInt->C.rowRange(0, 9).colRange(0, 9).inv(cv::DECOMP_SVD); + // 转成eigen Matrix9d Matrix9d Info; for(int r=0;r<9;r++) for(int c=0;c<9;c++) Info(r,c)=cvInfo.at(r,c); + // 3. 强制让其成为对角矩阵 Info = (Info+Info.transpose())/2; + // 4. 让特征值很小的时候置为0,再重新计算信息矩阵(暂不知这么操作的目的是什么,先搞清楚操作流程吧) Eigen::SelfAdjointEigenSolver > es(Info); - Eigen::Matrix eigs = es.eigenvalues(); + Eigen::Matrix eigs = es.eigenvalues(); // 矩阵特征值 for(int i=0;i<9;i++) if(eigs[i]<1e-12) eigs[i]=0; + // asDiagonal 生成对角矩阵 Info = es.eigenvectors()*eigs.asDiagonal()*es.eigenvectors().transpose(); setInformation(Info); } - +// 计算误差 void EdgeInertialGS::computeError() { @@ -638,6 +670,8 @@ void EdgeInertialGS::computeError() const Eigen::Vector3d dV = Converter::toVector3d(mpInt->GetDeltaVelocity(b)); const Eigen::Vector3d dP = Converter::toVector3d(mpInt->GetDeltaPosition(b)); + // 计算残差。广义上讲都是真实值 = 残差 + imu,旋转为imu*残差=真实值 + // dR.transpose() 为imu预积分的值,VP1->estimate().Rwb.transpose() * VP2->estimate().Rwb 为相机的Rwc在乘上相机与imu的标定外参矩阵 const Eigen::Vector3d er = LogSO3(dR.transpose()*VP1->estimate().Rwb.transpose()*VP2->estimate().Rwb); const Eigen::Vector3d ev = VP1->estimate().Rwb.transpose()*(s*(VV2->estimate() - VV1->estimate()) - g*dt) - dV; const Eigen::Vector3d ep = VP1->estimate().Rwb.transpose()*(s*(VP2->estimate().twb - VP1->estimate().twb - VV1->estimate()*dt) - g*dt*dt/2) - dP; @@ -645,6 +679,7 @@ void EdgeInertialGS::computeError() _error << er, ev, ep; } +// 计算雅克比矩阵 void EdgeInertialGS::linearizeOplus() { const VertexPose* VP1 = static_cast(_vertices[0]); @@ -655,53 +690,66 @@ void EdgeInertialGS::linearizeOplus() const VertexVelocity* VV2 = static_cast(_vertices[5]); const VertexGDir* VGDir = static_cast(_vertices[6]); const VertexScale* VS = static_cast(_vertices[7]); + // 1. 获取偏置的该变量,因为要对这个东西求导 const IMU::Bias b(VA->estimate()[0],VA->estimate()[1],VA->estimate()[2],VG->estimate()[0],VG->estimate()[1],VG->estimate()[2]); const IMU::Bias db = mpInt->GetDeltaBias(b); + // 陀螺仪的偏置改变量 Eigen::Vector3d dbg; dbg << db.bwx, db.bwy, db.bwz; - const Eigen::Matrix3d Rwb1 = VP1->estimate().Rwb; - const Eigen::Matrix3d Rbw1 = Rwb1.transpose(); - const Eigen::Matrix3d Rwb2 = VP2->estimate().Rwb; - const Eigen::Matrix3d Rwg = VGDir->estimate().Rwg; - Eigen::MatrixXd Gm = Eigen::MatrixXd::Zero(3,2); - Gm(0,1) = -IMU::GRAVITY_VALUE; - Gm(1,0) = IMU::GRAVITY_VALUE; + const Eigen::Matrix3d Rwb1 = VP1->estimate().Rwb; // Ri + const Eigen::Matrix3d Rbw1 = Rwb1.transpose(); // Ri.t() + const Eigen::Matrix3d Rwb2 = VP2->estimate().Rwb; // Rj + const Eigen::Matrix3d Rwg = VGDir->estimate().Rwg; // Rwg + Eigen::MatrixXd Gm = Eigen::MatrixXd::Zero(3, 2); + Gm(0, 1) = -IMU::GRAVITY_VALUE; + Gm(1, 0) = IMU::GRAVITY_VALUE; const double s = VS->estimate(); const Eigen::MatrixXd dGdTheta = Rwg*Gm; + // 预积分得来的dR const Eigen::Matrix3d dR = Converter::toMatrix3d(mpInt->GetDeltaRotation(b)); - const Eigen::Matrix3d eR = dR.transpose()*Rbw1*Rwb2; - const Eigen::Vector3d er = LogSO3(eR); - const Eigen::Matrix3d invJr = InverseRightJacobianSO3(er); + const Eigen::Matrix3d eR = dR.transpose() * Rbw1 * Rwb2; // r△Rij + const Eigen::Vector3d er = LogSO3(eR); // r△φij + const Eigen::Matrix3d invJr = InverseRightJacobianSO3(er); // Jr^-1(log(△Rij)) + // 就很神奇,_jacobianOplus个数等于边的个数,里面的大小等于观测值维度(也就是残差)× 每个节点待优化值的维度 // Jacobians wrt Pose 1 + // _jacobianOplus[0] 9*6矩阵 总体来说就是三个残差分别对pose1的旋转与平移(p)求导 _jacobianOplus[0].setZero(); // rotation + // (0,0)起点的3*3块表示旋转残差对pose1的旋转求导 _jacobianOplus[0].block<3,3>(0,0) = -invJr*Rwb2.transpose()*Rwb1; + // (3,0)起点的3*3块表示速度残差对pose1的旋转求导 _jacobianOplus[0].block<3,3>(3,0) = Skew(Rbw1*(s*(VV2->estimate() - VV1->estimate()) - g*dt)); + // (6,0)起点的3*3块表示位置残差对pose1的旋转求导 _jacobianOplus[0].block<3,3>(6,0) = Skew(Rbw1*(s*(VP2->estimate().twb - VP1->estimate().twb - VV1->estimate()*dt) - 0.5*g*dt*dt)); // translation + // (6,3)起点的3*3块表示位置残差对pose1的位置求导 _jacobianOplus[0].block<3,3>(6,3) = -s*Eigen::Matrix3d::Identity(); // Jacobians wrt Velocity 1 + // _jacobianOplus[1] 9*3矩阵 总体来说就是三个残差分别对pose1的速度求导 _jacobianOplus[1].setZero(); _jacobianOplus[1].block<3,3>(3,0) = -s*Rbw1; _jacobianOplus[1].block<3,3>(6,0) = -s*Rbw1*dt; // Jacobians wrt Gyro bias + // _jacobianOplus[2] 9*3矩阵 总体来说就是三个残差分别对陀螺仪偏置的速度求导 _jacobianOplus[2].setZero(); _jacobianOplus[2].block<3,3>(0,0) = -invJr*eR.transpose()*RightJacobianSO3(JRg*dbg)*JRg; _jacobianOplus[2].block<3,3>(3,0) = -JVg; _jacobianOplus[2].block<3,3>(6,0) = -JPg; // Jacobians wrt Accelerometer bias + // _jacobianOplus[3] 9*3矩阵 总体来说就是三个残差分别对加速度计偏置的速度求导 _jacobianOplus[3].setZero(); _jacobianOplus[3].block<3,3>(3,0) = -JVa; _jacobianOplus[3].block<3,3>(6,0) = -JPa; // Jacobians wrt Pose 2 + // _jacobianOplus[3] 9*6矩阵 总体来说就是三个残差分别对pose2的旋转与平移(p)求导 _jacobianOplus[4].setZero(); // rotation _jacobianOplus[4].block<3,3>(0,0) = invJr; @@ -709,15 +757,18 @@ void EdgeInertialGS::linearizeOplus() _jacobianOplus[4].block<3,3>(6,3) = s*Rbw1*Rwb2; // Jacobians wrt Velocity 2 + // _jacobianOplus[3] 9*3矩阵 总体来说就是三个残差分别对pose2的速度求导 _jacobianOplus[5].setZero(); _jacobianOplus[5].block<3,3>(3,0) = s*Rbw1; // Jacobians wrt Gravity direction + // _jacobianOplus[3] 9*2矩阵 总体来说就是三个残差分别对重力方向求导 _jacobianOplus[6].setZero(); _jacobianOplus[6].block<3,2>(3,0) = -Rbw1*dGdTheta*dt; _jacobianOplus[6].block<3,2>(6,0) = -0.5*Rbw1*dGdTheta*dt*dt; // Jacobians wrt scale factor + // _jacobianOplus[3] 9*1矩阵 总体来说就是三个残差分别对尺度求导 _jacobianOplus[7].setZero(); _jacobianOplus[7].block<3,1>(3,0) = Rbw1*(VV2->estimate()-VV1->estimate()); _jacobianOplus[7].block<3,1>(6,0) = Rbw1*(VP2->estimate().twb-VP1->estimate().twb-VV1->estimate()*dt); @@ -754,9 +805,13 @@ void EdgePriorPoseImu::linearizeOplus() { const VertexPose* VP = static_cast(_vertices[0]); const Eigen::Vector3d er = LogSO3(Rwb.transpose()*VP->estimate().Rwb); + // 就很神奇,_jacobianOplus个数等于边的个数,里面的大小等于观测值维度(也就是3旋转3平移3速度6偏置)× 每个节点待优化值的维度 + // 源码可读性太差了。。。里面会自动分配矩阵大小,计算改变量时按照对应位置来 _jacobianOplus[0].setZero(); - _jacobianOplus[0].block<3,3>(0,0) = InverseRightJacobianSO3(er); - _jacobianOplus[0].block<3,3>(3,3) = Rwb.transpose()*VP->estimate().Rwb; + // LOG(Rbw*R*EXP(φ)) = LOG(EXP(LOG(Rbw*R) + Jr(-1)*φ)) = LOG(Rbw*R) + Jr(-1)*φ + _jacobianOplus[0].block<3, 3>(0, 0) = InverseRightJacobianSO3(er); // Jr(-1) + // Rbw*(t + R*δt - twb) = Rbw*(t - twb) + Rbw*R*δt + _jacobianOplus[0].block<3, 3>(3, 3) = Rwb.transpose() * VP->estimate().Rwb; // Rbw*R _jacobianOplus[1].setZero(); _jacobianOplus[1].block<3,3>(6,0) = Eigen::Matrix3d::Identity(); _jacobianOplus[2].setZero(); @@ -859,6 +914,7 @@ Eigen::Matrix3d RightJacobianSO3(const double x, const double y, const double z) } } +// 反对称矩阵 Eigen::Matrix3d Skew(const Eigen::Vector3d &w) { Eigen::Matrix3d W; diff --git a/src/ImuTypes.cc b/src/ImuTypes.cc index d5a4457..c6bfc87 100644 --- a/src/ImuTypes.cc +++ b/src/ImuTypes.cc @@ -27,6 +27,11 @@ namespace IMU const float eps = 1e-4; +/** + * @brief 强制让R变成一个正交矩阵 + * @param R 待优化的旋转矩阵 + * @return 优化后的矩阵 + */ cv::Mat NormalizeRotation(const cv::Mat &R) { cv::Mat U,w,Vt; @@ -34,6 +39,11 @@ cv::Mat NormalizeRotation(const cv::Mat &R) return U*Vt; } +/** + * @brief 计算反对称矩阵 + * @param v 3维向量 + * @return 反对称矩阵 + */ cv::Mat Skew(const cv::Mat &v) { const float x = v.at(0); @@ -44,6 +54,11 @@ cv::Mat Skew(const cv::Mat &v) -y, x, 0); } +/** + * @brief 计算SO3 + * @param xyz 李代数 + * @return SO3 + */ cv::Mat ExpSO3(const float &x, const float &y, const float &z) { cv::Mat I = cv::Mat::eye(3,3,CV_32F); @@ -58,6 +73,11 @@ cv::Mat ExpSO3(const float &x, const float &y, const float &z) return (I + W*sin(d)/d + W*W*(1.0f-cos(d))/d2); } +/** + * @brief 计算SO3 + * @param xyz 李代数 + * @return SO3 + */ Eigen::Matrix ExpSO3(const double &x, const double &y, const double &z) { Eigen::Matrix I = Eigen::MatrixXd::Identity(3,3); @@ -80,11 +100,21 @@ Eigen::Matrix ExpSO3(const double &x, const double &y, const double return (I + W*sin(d)/d + W*W*(1.0-cos(d))/d2); } +/** + * @brief 计算SO3 + * @param v 李代数 + * @return SO3 + */ cv::Mat ExpSO3(const cv::Mat &v) { return ExpSO3(v.at(0),v.at(1),v.at(2)); } +/** + * @brief 计算so3 + * @param R SO3 + * @return so3 + */ cv::Mat LogSO3(const cv::Mat &R) { const float tr = R.at(0,0)+R.at(1,1)+R.at(2,2); @@ -102,6 +132,11 @@ cv::Mat LogSO3(const cv::Mat &R) return theta*w/s; } +/** + * @brief 计算右雅可比 + * @param xyz 李代数 + * @return Jr + */ cv::Mat RightJacobianSO3(const float &x, const float &y, const float &z) { cv::Mat I = cv::Mat::eye(3,3,CV_32F); @@ -120,11 +155,21 @@ cv::Mat RightJacobianSO3(const float &x, const float &y, const float &z) } } +/** + * @brief 计算右雅可比 + * @param v so3 + * @return Jr + */ cv::Mat RightJacobianSO3(const cv::Mat &v) { return RightJacobianSO3(v.at(0),v.at(1),v.at(2)); } +/** + * @brief 计算右雅可比的逆 + * @param xyz so3 + * @return Jr^-1 + */ cv::Mat InverseRightJacobianSO3(const float &x, const float &y, const float &z) { cv::Mat I = cv::Mat::eye(3,3,CV_32F); @@ -143,6 +188,11 @@ cv::Mat InverseRightJacobianSO3(const float &x, const float &y, const float &z) } } +/** + * @brief 计算右雅可比的逆 + * @param v so3 + * @return Jr^-1 + */ cv::Mat InverseRightJacobianSO3(const cv::Mat &v) { return InverseRightJacobianSO3(v.at(0),v.at(1),v.at(2)); @@ -190,6 +240,11 @@ IntegratedRotation::IntegratedRotation(const cv::Point3f &angVel, const Bias &im } } +/** + * @brief 预积分类构造函数,根据输入的偏置初始化预积分参数 + * @param b_ 偏置 + * @param calib imu标定参数的类 + */ Preintegrated::Preintegrated(const Bias &b_, const Calib &calib) { Nga = calib.Cov.clone(); @@ -206,6 +261,10 @@ Preintegrated::Preintegrated(Preintegrated* pImuPre): dT(pImuPre->dT), C(pImuPre } +/** + * @brief 复制上一帧的预积分 + * @param pImuPre 上一帧的预积分 + */ void Preintegrated::CopyFrom(Preintegrated* pImuPre) { std::cout << "Preintegrated: start clone" << std::endl; @@ -219,6 +278,7 @@ void Preintegrated::CopyFrom(Preintegrated* pImuPre) dR = pImuPre->dR.clone(); dV = pImuPre->dV.clone(); dP = pImuPre->dP.clone(); + // 旋转关于陀螺仪偏置变化的雅克比,以此类推 JRg = pImuPre->JRg.clone(); JVg = pImuPre->JVg.clone(); JVa = pImuPre->JVa.clone(); @@ -234,28 +294,34 @@ void Preintegrated::CopyFrom(Preintegrated* pImuPre) std::cout << "Preintegrated: end clone" << std::endl; } - +/** + * @brief 初始化预积分 + * @param b_ 偏置 + */ void Preintegrated::Initialize(const Bias &b_) { - dR = cv::Mat::eye(3,3,CV_32F); - dV = cv::Mat::zeros(3,1,CV_32F); - dP = cv::Mat::zeros(3,1,CV_32F); - JRg = cv::Mat::zeros(3,3,CV_32F); - JVg = cv::Mat::zeros(3,3,CV_32F); - JVa = cv::Mat::zeros(3,3,CV_32F); - JPg = cv::Mat::zeros(3,3,CV_32F); - JPa = cv::Mat::zeros(3,3,CV_32F); - C = cv::Mat::zeros(15,15,CV_32F); - Info=cv::Mat(); - db = cv::Mat::zeros(6,1,CV_32F); - b=b_; - bu=b_; - avgA = cv::Mat::zeros(3,1,CV_32F); - avgW = cv::Mat::zeros(3,1,CV_32F); - dT=0.0f; - mvMeasurements.clear(); + dR = cv::Mat::eye(3, 3, CV_32F); + dV = cv::Mat::zeros(3, 1, CV_32F); + dP = cv::Mat::zeros(3, 1, CV_32F); + JRg = cv::Mat::zeros(3, 3, CV_32F); + JVg = cv::Mat::zeros(3, 3, CV_32F); + JVa = cv::Mat::zeros(3, 3, CV_32F); + JPg = cv::Mat::zeros(3, 3, CV_32F); + JPa = cv::Mat::zeros(3, 3, CV_32F); + C = cv::Mat::zeros(15, 15, CV_32F); + Info = cv::Mat(); + db = cv::Mat::zeros(6, 1, CV_32F); + b = b_; + bu = b_; // 更新后的偏置 + avgA = cv::Mat::zeros(3, 1, CV_32F); // 平均加速度 + avgW = cv::Mat::zeros(3, 1, CV_32F); // 平均角速度 + dT = 0.0f; + mvMeasurements.clear(); // 存放imu数据及dt } +/** + * @brief 根据新的偏置重新积分mvMeasurements里的数据 Optimizer::InertialOptimization调用 + */ void Preintegrated::Reintegrate() { std::unique_lock lock(mMutex); @@ -299,8 +365,8 @@ void Preintegrated::IntegrateNewMeasurement(const cv::Point3f &acceleration, con // Update delta position dP and velocity dV (rely on no-updated delta rotation) // 根据没有更新的dR来更新dP与dV eq.(38) - dP = dP + dV*dt + 0.5f*dR*acc*dt*dt; - dV = dV + dR*acc*dt; + dP = dP + dV*dt + 0.5f*dR*acc*dt*dt; // 对应viorb论文的公式(2)的第三个,位移积分 + dV = dV + dR*acc*dt; // 对应viorb论文的公式(2)的第二个,速度积分 // Compute velocity and position parts of matrices A and B (rely on non-updated delta rotation) // 根据η_ij = A * η_i,j-1 + B_j-1 * η_j-1中的A矩阵和B矩阵对速度和位移进行更新 @@ -335,9 +401,9 @@ void Preintegrated::IntegrateNewMeasurement(const cv::Point3f &acceleration, con // 小量delta初始为0,更新后通常也为0,故省略了小量的更新 // Update covariance // Step 3.更新协方差,frost经典预积分论文的第63个公式,推导了噪声(ηa, ηg)对dR dV dP 的影响 - C.rowRange(0,9).colRange(0,9) = A*C.rowRange(0,9).colRange(0,9)*A.t() + B*Nga*B.t(); + C.rowRange(0,9).colRange(0,9) = A*C.rowRange(0,9).colRange(0,9)*A.t() + B*Nga*B.t(); // B矩阵为9*6矩阵 Nga 6*6对角矩阵,3个陀螺仪噪声的平方,3个加速度计噪声的平方 // 这一部分最开始是0矩阵,随着积分次数增加,每次都加上随机游走,偏置的信息矩阵 - C.rowRange(9,15).colRange(9,15) = C.rowRange(9,15).colRange(9,15) + NgaWalk; + C.rowRange(9,15).colRange(9,15) = C.rowRange(9,15).colRange(9,15) + NgaWalk; // NgaWalk 6*6 随机游走对角矩阵 // Update rotation jacobian wrt bias correction // 计算偏置的雅克比矩阵,r对bg的导数,∂ΔRij/∂bg = (ΔRjj-1) * ∂ΔRij-1/∂bg - Jr(j-1)*t @@ -350,6 +416,10 @@ void Preintegrated::IntegrateNewMeasurement(const cv::Point3f &acceleration, con dT += dt; } +/** + * @brief 融合两个预积分,发生在删除关键帧的时候,3帧变2帧,需要把两段预积分融合 + * @param pPrev 前面的预积分 + */ void Preintegrated::MergePrevious(Preintegrated* pPrev) { if (pPrev==this) @@ -376,6 +446,10 @@ void Preintegrated::MergePrevious(Preintegrated* pPrev) } +/** + * @brief 更新偏置 + * @param bu_ 偏置 + */ void Preintegrated::SetNewBias(const Bias &bu_) { std::unique_lock lock(mMutex); @@ -389,12 +463,22 @@ void Preintegrated::SetNewBias(const Bias &bu_) db.at(5) = bu_.baz-b.baz; } +/** + * @brief 获得当前偏置与输入偏置的改变量 + * @param b_ 偏置 + * @return 改变量 + */ IMU::Bias Preintegrated::GetDeltaBias(const Bias &b_) { std::unique_lock lock(mMutex); return IMU::Bias(b_.bax-b.bax,b_.bay-b.bay,b_.baz-b.baz,b_.bwx-b.bwx,b_.bwy-b.bwy,b_.bwz-b.bwz); } +/** + * @brief 根据新的偏置计算新的dR + * @param b_ 新的偏置 + * @return dR + */ cv::Mat Preintegrated::GetDeltaRotation(const Bias &b_) { std::unique_lock lock(mMutex); @@ -405,6 +489,11 @@ cv::Mat Preintegrated::GetDeltaRotation(const Bias &b_) return NormalizeRotation(dR*ExpSO3(JRg*dbg)); } +/** + * @brief 根据新的偏置计算新的dV + * @param b_ 新的偏置 + * @return dV + */ cv::Mat Preintegrated::GetDeltaVelocity(const Bias &b_) { std::unique_lock lock(mMutex); @@ -414,6 +503,11 @@ cv::Mat Preintegrated::GetDeltaVelocity(const Bias &b_) return dV + JVg*dbg + JVa*dba; } +/** + * @brief 根据新的偏置计算新的dP + * @param b_ 新的偏置 + * @return dP + */ cv::Mat Preintegrated::GetDeltaPosition(const Bias &b_) { std::unique_lock lock(mMutex); @@ -423,61 +517,101 @@ cv::Mat Preintegrated::GetDeltaPosition(const Bias &b_) return dP + JPg*dbg + JPa*dba; } +/** + * @brief 返回经过db(δba, δbg)更新后的dR,与上面是一个意思 + * @return dR + */ cv::Mat Preintegrated::GetUpdatedDeltaRotation() { std::unique_lock lock(mMutex); return NormalizeRotation(dR*ExpSO3(JRg*db.rowRange(0,3))); } +/** + * @brief 返回经过db(δba, δbg)更新后的dV,与上面是一个意思 + * @return dV + */ cv::Mat Preintegrated::GetUpdatedDeltaVelocity() { std::unique_lock lock(mMutex); return dV + JVg*db.rowRange(0,3) + JVa*db.rowRange(3,6); } +/** + * @brief 返回经过db(δba, δbg)更新后的dP,与上面是一个意思 + * @return dP + */ cv::Mat Preintegrated::GetUpdatedDeltaPosition() { std::unique_lock lock(mMutex); return dP + JPg*db.rowRange(0,3) + JPa*db.rowRange(3,6); } +/** + * @brief 获取dR + * @return dR + */ cv::Mat Preintegrated::GetOriginalDeltaRotation() { std::unique_lock lock(mMutex); return dR.clone(); } +/** + * @brief 获取dV + * @return dV + */ cv::Mat Preintegrated::GetOriginalDeltaVelocity() { std::unique_lock lock(mMutex); return dV.clone(); } +/** + * @brief 获取dP + * @return dP + */ cv::Mat Preintegrated::GetOriginalDeltaPosition() { std::unique_lock lock(mMutex); return dP.clone(); } +/** + * @brief 获取b,更新前的偏置 + * @return b + */ Bias Preintegrated::GetOriginalBias() { std::unique_lock lock(mMutex); return b; } +/** + * @brief 获取bu,更新后的偏置 + * @return bu + */ Bias Preintegrated::GetUpdatedBias() { std::unique_lock lock(mMutex); return bu; } +/** + * @brief 获取db,更新前后的偏置差 + * @return db + */ cv::Mat Preintegrated::GetDeltaBias() { std::unique_lock lock(mMutex); return db.clone(); } -Eigen::Matrix Preintegrated::GetInformationMatrix() +/** + * @brief 获取信息矩阵,没有用到,也就是C矩阵,其中9~15每个元素取倒数, + * @return 信息矩阵 + */ +Eigen::Matrix Preintegrated::GetInformationMatrix() { std::unique_lock lock(mMutex); if(Info.empty()) @@ -495,6 +629,10 @@ Eigen::Matrix Preintegrated::GetInformationMatrix() return EI; } +/** + * @brief 赋值新的偏置 + * @param b 偏置 + */ void Bias::CopyFrom(Bias &b) { bax = b.bax; @@ -529,21 +667,34 @@ std::ostream& operator<< (std::ostream &out, const Bias &b) return out; } +/** + * @brief 设置参数 + * @param Tbc_ 位姿变换 + * @param ng 噪声 + * @param na 噪声 + * @param ngw 随机游走 + * @param naw 随机游走 + */ void Calib::Set(const cv::Mat &Tbc_, const float &ng, const float &na, const float &ngw, const float &naw) { Tbc = Tbc_.clone(); Tcb = cv::Mat::eye(4,4,CV_32F); Tcb.rowRange(0,3).colRange(0,3) = Tbc.rowRange(0,3).colRange(0,3).t(); Tcb.rowRange(0,3).col(3) = -Tbc.rowRange(0,3).colRange(0,3).t()*Tbc.rowRange(0,3).col(3); + + // 噪声协方差 Cov = cv::Mat::eye(6,6,CV_32F); const float ng2 = ng*ng; const float na2 = na*na; + Cov.at(0,0) = ng2; Cov.at(1,1) = ng2; Cov.at(2,2) = ng2; Cov.at(3,3) = na2; Cov.at(4,4) = na2; Cov.at(5,5) = na2; + + // 随机游走协方差 CovWalk = cv::Mat::eye(6,6,CV_32F); const float ngw2 = ngw*ngw; const float naw2 = naw*naw; @@ -555,6 +706,10 @@ void Calib::Set(const cv::Mat &Tbc_, const float &ng, const float &na, const flo CovWalk.at(5,5) = naw2; } +/** + * @brief imu标定参数的构造函数 + * @param calib imu标定参数 + */ Calib::Calib(const Calib &calib) { Tbc = calib.Tbc.clone(); @@ -565,4 +720,4 @@ Calib::Calib(const Calib &calib) } //namespace IMU -} //namespace ORB_SLAM2 +} //namespace ORB_SLAM3 diff --git a/src/KeyFrameDatabase.cc b/src/KeyFrameDatabase.cc index babf5a8..3477488 100644 --- a/src/KeyFrameDatabase.cc +++ b/src/KeyFrameDatabase.cc @@ -649,22 +649,33 @@ bool compFirst(const pair & a, const pair & } +/** + * @brief 找到N个融合候选N个回环候选 + * + * @param[in] pKF 当前关键帧(我们要寻找这个关键帧的回环候选帧和融合候选帧) + * @param[out] vpLoopCand 记录找到的回环候选帧 + * @param[out] vpMergeCand 记录找到的融合候选帧 + * @param[in] nNumCandidates 期望的候选数目,即回环和候选分别应该有多少个 + */ void KeyFrameDatabase::DetectNBestCandidates(KeyFrame *pKF, vector &vpLoopCand, vector &vpMergeCand, int nNumCandidates) { + // Step 1统计与当前关键帧有相同单词的关键帧 list lKFsSharingWords; //set spInsertedKFsSharing; + // 当前关键帧的共视关键帧(避免将当前关键帧的共视关键帧加入回环检测) set spConnectedKF; // Search all keyframes that share a word with current frame { unique_lock lock(mMutex); - + // 拿到当前关键帧的共视关键帧 spConnectedKF = pKF->GetConnectedKeyFrames(); + // 遍历当前关键帧bow向量的每个单词 for(DBoW2::BowVector::const_iterator vit=pKF->mBowVec.begin(), vend=pKF->mBowVec.end(); vit != vend; vit++) - { + { // 拿到当前单词的逆向索引(所有有当前单词的关键帧) list &lKFs = mvInvertedFile[vit->first]; - + // 遍历每个有该单词的关键帧 for(list::iterator lit=lKFs.begin(), lend= lKFs.end(); lit!=lend; lit++) { KeyFrame* pKFi=*lit; @@ -672,16 +683,21 @@ void KeyFrameDatabase::DetectNBestCandidates(KeyFrame *pKF, vector &v { continue; }*/ + // 如果此关键帧没有被当前关键帧访问过(防止重复添加) if(pKFi->mnPlaceRecognitionQuery!=pKF->mnId) - { + { + // 初始化公共单词数为0 pKFi->mnPlaceRecognitionWords=0; + // 如果该关键帧不是当前关键帧的共视关键帧 if(!spConnectedKF.count(pKFi)) { - + // 标记改关键帧被当前关键帧访问到 pKFi->mnPlaceRecognitionQuery=pKF->mnId; + // 把当前关键帧添加到有公共单词的关键帧列表中 lKFsSharingWords.push_back(pKFi); } } + // 递增该关键帧与当前关键帧的公共单词数 pKFi->mnPlaceRecognitionWords++; /*if(spInsertedKFsSharing.find(pKFi) == spInsertedKFsSharing.end()) { @@ -692,59 +708,77 @@ void KeyFrameDatabase::DetectNBestCandidates(KeyFrame *pKF, vector &v } } } + // 如果没有有公共单词的关键帧,直接返回 if(lKFsSharingWords.empty()) return; // Only compare against those keyframes that share enough words + // Step 2 统计所有候选帧中与当前关键帧的公共单词数最多的单词数,并筛选 int maxCommonWords=0; for(list::iterator lit=lKFsSharingWords.begin(), lend= lKFsSharingWords.end(); lit!=lend; lit++) { if((*lit)->mnPlaceRecognitionWords>maxCommonWords) maxCommonWords=(*lit)->mnPlaceRecognitionWords; } - + // 取0.8倍为阀值 int minCommonWords = maxCommonWords*0.8f; - + // 这里的pair是 <相似度,候选帧的指针> : 记录所有大于minCommonWords的候选帧与当前关键帧的相似度 list > lScoreAndMatch; - + // 只是个统计变量,貌似没有用到 int nscores=0; // Compute similarity score. + // 对所有大于minCommonWords的候选帧计算相似度 + + + // 遍历所有有公共单词的候选帧 for(list::iterator lit=lKFsSharingWords.begin(), lend= lKFsSharingWords.end(); lit!=lend; lit++) { KeyFrame* pKFi = *lit; - + // 如果当前帧的公共单词数大于minCommonWords if(pKFi->mnPlaceRecognitionWords>minCommonWords) { nscores++; + // 计算相似度 float si = mpVoc->score(pKF->mBowVec,pKFi->mBowVec); + // 记录该候选帧与当前帧的相似度 pKFi->mPlaceRecognitionScore=si; + // 记录到容器里, 每个元素是<相似度,候选帧的指针> lScoreAndMatch.push_back(make_pair(si,pKFi)); } } - + // 如果为空,直接返回,表示没有符合上述条件的关键帧 if(lScoreAndMatch.empty()) return; - + // Step 3 : 用小组得分排序得到top3总分里最高分的关键帧,作为候选帧 + // 统计以组为单位的累计相似度和组内相似度最高的关键帧, 每个pair为<小组总相似度,组内相似度最高的关键帧指针> list > lAccScoreAndMatch; float bestAccScore = 0; // Lets now accumulate score by covisibility + // 变量所有被lScoreAndMatch记录的pair <相似度,候选关键帧> for(list >::iterator it=lScoreAndMatch.begin(), itend=lScoreAndMatch.end(); it!=itend; it++) { + // 候选关键帧 KeyFrame* pKFi = it->second; + // 与候选关键帧共视关系最好的10个关键帧 vector vpNeighs = pKFi->GetBestCovisibilityKeyFrames(10); - + // 初始化最大相似度为该候选关键帧自己的相似度 float bestScore = it->first; + // 初始化小组累计得分为改候选关键帧自己的相似度 float accScore = bestScore; + // 初始化组内相似度最高的帧为该候选关键帧本身 KeyFrame* pBestKF = pKFi; + // 遍历与当前关键帧共视关系最好的10帧 for(vector::iterator vit=vpNeighs.begin(), vend=vpNeighs.end(); vit!=vend; vit++) { KeyFrame* pKF2 = *vit; + // 如果改关键帧没有被当前关键帧访问过(这里标记的是有没有公共单词)则跳过 if(pKF2->mnPlaceRecognitionQuery!=pKF->mnId) continue; - + // 累加小组总分 accScore+=pKF2->mPlaceRecognitionScore; + // 如果大与组内最高分,则重新记录 if(pKF2->mPlaceRecognitionScore>bestScore) { pBestKF=pKF2; @@ -752,39 +786,51 @@ void KeyFrameDatabase::DetectNBestCandidates(KeyFrame *pKF, vector &v } } + // 统计以组为单位的累计相似度和组内相似度最高的关键帧, 每个pair为<小组总相似度,组内相似度最高的关键帧指针> lAccScoreAndMatch.push_back(make_pair(accScore,pBestKF)); + // 统计最高得分, 这个bestAccSocre没有用到 if(accScore>bestAccScore) bestAccScore=accScore; } //cout << "Amount of candidates: " << lAccScoreAndMatch.size() << endl; - + // 按相似度从大到小排序 lAccScoreAndMatch.sort(compFirst); - + // 最后返回的变量, 记录回环的候选帧 vpLoopCand.reserve(nNumCandidates); + // 最后返回的变量, 记录融合候选帧 vpMergeCand.reserve(nNumCandidates); + // 避免重复添加 set spAlreadyAddedKF; //cout << "Candidates in score order " << endl; //for(list >::iterator it=lAccScoreAndMatch.begin(), itend=lAccScoreAndMatch.end(); it!=itend; it++) int i = 0; list >::iterator it=lAccScoreAndMatch.begin(); + // 遍历lAccScoreAndMatch中所有的pair, 每个pair为<小组总相似度,组内相似度最高的关键帧指针> while(i < lAccScoreAndMatch.size() && (vpLoopCand.size() < nNumCandidates || vpMergeCand.size() < nNumCandidates)) { //cout << "Accum score: " << it->first << endl; + // 拿到候选关键帧的指针 KeyFrame* pKFi = it->second; if(pKFi->isBad()) continue; + // 如果没有被重复添加 if(!spAlreadyAddedKF.count(pKFi)) - { + { + // 如果候选帧与当前关键帧在同一个地图里,且候选者数量还不足够 if(pKF->GetMap() == pKFi->GetMap() && vpLoopCand.size() < nNumCandidates) { + // 添加到回环候选帧里 vpLoopCand.push_back(pKFi); } + // 如果候选者与当前关键帧不再同一个地图里, 且候选者数量还不足够, 且候选者所在地图不是bad else if(pKF->GetMap() != pKFi->GetMap() && vpMergeCand.size() < nNumCandidates && !pKFi->GetMap()->IsBad()) - { + { + // 添加到融合候选帧里 vpMergeCand.push_back(pKFi); } + // 防止重复添加 spAlreadyAddedKF.insert(pKFi); } i++; diff --git a/src/LocalMapping.cc b/src/LocalMapping.cc index 1548d01..1107d06 100644 --- a/src/LocalMapping.cc +++ b/src/LocalMapping.cc @@ -120,12 +120,14 @@ void LocalMapping::Run() // Step 4 当前关键帧与相邻关键帧通过三角化产生新的地图点,使得跟踪更稳 CreateNewMapPoints(); - mbAbortBA = false; + mbAbortBA = false; // 注意orbslam2中放在了函数SearchInNeighbors(用到了mbAbortBA)后面,应该放这里更合适 // 已经处理完队列中的最后的一个关键帧 if(!CheckNewKeyFrames()) { // Find more matches in neighbor keyframes and fuse point duplications // Step 5 检查并融合当前关键帧与相邻关键帧帧(两级相邻)中重复的地图点 + // 先完成相邻关键帧与当前关键帧的地图点的融合(在相邻关键帧中查找当前关键帧的地图点), + // 再完成当前关键帧与相邻关键帧的地图点的融合(在当前关键帧中查找当前相邻关键帧的地图点) SearchInNeighbors(); } @@ -144,19 +146,19 @@ void LocalMapping::Run() // 已经处理完队列中的最后的一个关键帧,并且闭环检测没有请求停止LocalMapping if(!CheckNewKeyFrames() && !stopRequested()) { - // 地图中关键帧数目大于2个 + // 当前地图中关键帧数目大于2个 if(mpAtlas->KeyFramesInMap()>2) { // Step 6.1 处于IMU模式并且当前关键帧所在的地图已经完成IMU初始化 if(mbInertial && mpCurrentKeyFrame->GetMap()->isImuInitialized()) { - // 计算上一帧到当前帧相机光心的距离 + 上上帧到上一帧相机光心的距离 + // 计算上一关键帧到当前关键帧相机光心的距离 + 上上关键帧到上一关键帧相机光心的距离 float dist = cv::norm(mpCurrentKeyFrame->mPrevKF->GetCameraCenter() - mpCurrentKeyFrame->GetCameraCenter()) + cv::norm(mpCurrentKeyFrame->mPrevKF->mPrevKF->GetCameraCenter() - mpCurrentKeyFrame->mPrevKF->GetCameraCenter()); // 如果距离大于5厘米,记录当前KF和上一KF时间戳的差,累加到mTinit if(dist>0.05) mTinit += mpCurrentKeyFrame->mTimeStamp - mpCurrentKeyFrame->mPrevKF->mTimeStamp; - // 当前关键帧所在的地图已经完成IMU BA2 + // 当前关键帧所在的地图尚未完成IMU BA2(IMU第三阶段初始化) if(!mpCurrentKeyFrame->GetMap()->GetIniertialBA2()) { // 如果累计时间差小于10s 并且 距离小于2厘米,认为运动幅度太小,不足以初始化IMU,将mbBadImu设置为true @@ -166,9 +168,10 @@ void LocalMapping::Run() unique_lock lock(mMutexReset); mbResetRequestedActiveMap = true; mpMapToReset = mpCurrentKeyFrame->GetMap(); - mbBadImu = true; + mbBadImu = true; //在跟踪线程里会重置当前活跃地图 } } + // 判断成功跟踪匹配的点数是否足够多 // 条件---------1.1、跟踪成功的内点数目大于75-----1.2、并且是单目--或--2.1、跟踪成功的内点数目大于100-----2.2、并且不是单目 bool bLarge = ((mpTracker->GetMatchesInliers()>75)&&mbMonocular)||((mpTracker->GetMatchesInliers()>100)&&!mbMonocular); Optimizer::LocalInertialBA(mpCurrentKeyFrame, &mbAbortBA, mpCurrentKeyFrame->GetMap(),num_FixedKF_BA,num_OptKF_BA,num_MPs_BA,num_edges_BA, bLarge, !mpCurrentKeyFrame->GetMap()->GetIniertialBA2()); @@ -206,9 +209,11 @@ void LocalMapping::Run() #endif // Initialize IMU here - // Step 7 当前关键帧所在地图的IMU初始化 + // Step 7 当前关键帧所在地图未完成IMU初始化(第一阶段) if(!mpCurrentKeyFrame->GetMap()->isImuInitialized() && mbInertial) { + // 在函数InitializeIMU里设置IMU成功初始化标志 SetImuInitialized + // IMU第一阶段初始化 if (mbMonocular) InitializeIMU(1e2, 1e10, true); else @@ -217,6 +222,7 @@ void LocalMapping::Run() // Check redundant local Keyframes + // 跟踪中关键帧插入条件比较松,交给LocalMapping线程的关键帧会比较密,这里再删除冗余 // Step 8 检测并剔除当前帧相邻的关键帧中冗余的关键帧 // 冗余的判定:该关键帧的90%的地图点可以被其它关键帧观测到 KeyFrameCulling(); @@ -227,16 +233,16 @@ void LocalMapping::Run() timeKFCulling_ms = std::chrono::duration_cast >(time_EndKFCulling - time_EndLBA).count(); vdKFCullingSync_ms.push_back(timeKFCulling_ms); #endif - // Step 9 如果累计时间差小于100s 并且 是IMU模式,进行VIBA + // Step 9 如果距离初始化成功累计时间差小于100s 并且 是IMU模式,进行VIBA if ((mTinit<100.0f) && mbInertial) { - // Step 9.1 根据条件判断是否进行VIBA1 + // Step 9.1 根据条件判断是否进行VIBA1(IMU第二阶段初始化) // 条件:1、当前关键帧所在的地图还未完成IMU初始化---并且--------2、正常跟踪状态---------- if(mpCurrentKeyFrame->GetMap()->isImuInitialized() && mpTracker->mState==Tracking::OK) // Enter here everytime local-mapping is called { // 当前关键帧所在的地图还未完成VIBA 1 if(!mpCurrentKeyFrame->GetMap()->GetIniertialBA1()){ - // 如果累计时间差大于5s,开始VIBA 1 + // 如果累计时间差大于5s,开始VIBA1(IMU第二阶段初始化) if (mTinit>5.0f) { cout << "start VIBA 1" << endl; @@ -250,10 +256,10 @@ void LocalMapping::Run() } } //else if (mbNotBA2){ - // Step 9.2 根据条件判断是否进行VIBA2 + // Step 9.2 根据条件判断是否进行VIBA2(IMU第三阶段初始化) // 当前关键帧所在的地图还未完成VIBA 2 else if(!mpCurrentKeyFrame->GetMap()->GetIniertialBA2()){ - // 如果累计时间差大于15s,开始VIBA 2 + // 如果累计时间差大于15s,开始VIBA2(IMU第三阶段初始化) if (mTinit>15.0f){ // 15.0f cout << "start VIBA 2" << endl; mpCurrentKeyFrame->GetMap()->SetIniertialBA2(); @@ -267,9 +273,9 @@ void LocalMapping::Run() } // scale refinement - // Step 9.3 尺度优化 + // Step 9.3 在关键帧小于100时,会在满足一定时间间隔后多次进行尺度、重力方向优化 if (((mpAtlas->KeyFramesInMap())<=100) && - ((mTinit>25.0f && mTinit<25.5f)|| + ((mTinit>25.0f && mTinit<25.5f)|| (mTinit>35.0f && mTinit<35.5f)|| (mTinit>45.0f && mTinit<45.5f)|| (mTinit>55.0f && mTinit<55.5f)|| @@ -409,6 +415,9 @@ void LocalMapping::ProcessNewKeyFrame() mpAtlas->AddKeyFrame(mpCurrentKeyFrame); } +/** + * @brief 处理新的关键帧,使队列为空,注意这里只是处理了关键帧,并没有生成MP + */ void LocalMapping::EmptyQueue() { while(CheckNewKeyFrames()) @@ -487,10 +496,12 @@ void LocalMapping::CreateNewMapPoints() // Step 1:在当前关键帧的共视关键帧中找到共视程度最高的nn帧相邻关键帧 vector vpNeighKFs = mpCurrentKeyFrame->GetBestCovisibilityKeyFrames(nn); + // imu模式下在附近添加更多的帧进来 if (mbInertial) { KeyFrame* pKF = mpCurrentKeyFrame; int count=0; + // 在总数不够且上一关键帧存在,且添加的帧没有超过总数时 while((vpNeighKFs.size()<=nn)&&(pKF->mPrevKF)&&(count++::iterator it = std::find(vpNeighKFs.begin(), vpNeighKFs.end(), pKF->mPrevKF); @@ -561,7 +572,7 @@ void LocalMapping::CreateNewMapPoints() const float medianDepthKF2 = pKF2->ComputeSceneMedianDepth(2); // baseline与景深的比例 const float ratioBaselineDepth = baseline/medianDepthKF2; - // 如果比例特别小,基线太短恢复3D点不准,那么跳过当前邻接的关键帧,不生成3D点 + // 如果特别远(比例特别小),基线太短恢复3D点不准,那么跳过当前邻接的关键帧,不生成3D点 if(ratioBaselineDepth<0.01) continue; } @@ -573,10 +584,11 @@ void LocalMapping::CreateNewMapPoints() // Search matches that fullfil epipolar constraint // Step 5:通过BoW对两关键帧的未匹配的特征点快速匹配,用极线约束抑制离群点,生成新的匹配点对 vector > vMatchedIndices; + // imu相关,非imu时为false bool bCoarse = mbInertial && ((!mpCurrentKeyFrame->GetMap()->GetIniertialBA2() && mpCurrentKeyFrame->GetMap()->GetIniertialBA1())|| mpTracker->mState==Tracking::RECENTLY_LOST); - + // 通过极线约束的方式找到匹配点(且该点还没有成为MP,注意非单目已经生成的MP这里直接跳过不做匹配,所以最后并不会覆盖掉特征点对应的MP) matcher.SearchForTriangulation_(mpCurrentKeyFrame,pKF2,F12,vMatchedIndices,false,bCoarse); auto Rcw2 = pKF2->GetRotation_(); @@ -611,8 +623,10 @@ void LocalMapping::CreateNewMapPoints() const cv::KeyPoint &kp1 = (mpCurrentKeyFrame -> NLeft == -1) ? mpCurrentKeyFrame->mvKeysUn[idx1] : (idx1 < mpCurrentKeyFrame -> NLeft) ? mpCurrentKeyFrame -> mvKeys[idx1] : mpCurrentKeyFrame -> mvKeysRight[idx1 - mpCurrentKeyFrame -> NLeft]; + // mvuRight中存放着极限校准后双目特征点在右目对应的像素横坐标,如果不是基线校准的双目或者没有找到匹配点,其值将为-1(或者rgbd) const float kp1_ur=mpCurrentKeyFrame->mvuRight[idx1]; bool bStereo1 = (!mpCurrentKeyFrame->mpCamera2 && kp1_ur>=0); + // 查看点idx1是否为右目的点 const bool bRight1 = (mpCurrentKeyFrame -> NLeft == -1 || idx1 < mpCurrentKeyFrame -> NLeft) ? false : true; // 当前匹配在邻接关键帧中的特征点 @@ -620,11 +634,13 @@ void LocalMapping::CreateNewMapPoints() : (idx2 < pKF2 -> NLeft) ? pKF2 -> mvKeys[idx2] : pKF2 -> mvKeysRight[idx2 - pKF2 -> NLeft]; // mvuRight中存放着双目的深度值,如果不是双目,其值将为-1 + // mvuRight中存放着极限校准后双目特征点在右目对应的像素横坐标,如果不是基线校准的双目或者没有找到匹配点,其值将为-1(或者rgbd) const float kp2_ur = pKF2->mvuRight[idx2]; bool bStereo2 = (!pKF2->mpCamera2 && kp2_ur>=0); + // 查看点idx2是否为右目的点 const bool bRight2 = (pKF2 -> NLeft == -1 || idx2 < pKF2 -> NLeft) ? false : true; - + // 当目前为左右目时,确定两个点所在相机之间的位姿关系 if(mpCurrentKeyFrame->mpCamera2 && pKF2->mpCamera2){ if(bRight1 && bRight2){ Rcw1 = mpCurrentKeyFrame->GetRightRotation_(); @@ -724,10 +740,20 @@ void LocalMapping::CreateNewMapPoints() // Step 6.4:三角化恢复3D点 cv::Matx31f x3D; bool bEstimated = false; + // cosParallaxRays>0 && (bStereo1 || bStereo2 || cosParallaxRays<0.9998)表明视线角正常 + // cosParallaxRays0 && (bStereo1 || bStereo2 || (cosParallaxRays<0.9998 && mbInertial) || (cosParallaxRays<0.9998 && !mbInertial))) { // Linear Triangulation Method + // 见Initializer.cpp的Triangulate函数,A矩阵构建的方式类似,不同的是乘的反对称矩阵那个是像素坐标构成的,而这个是归一化坐标构成的 + // Pc = Tcw*Pw, 此处Tcw行数默认为3,因为不会计算第4行所以没有去掉, + // 左右两面乘Pc的反对称矩阵 [Pc]x * Tcw *Pw = 0 构成了A矩阵,中间涉及一个尺度a,因为都是归一化平面,但右面是0所以直接可以约掉不影响最后的尺度 + // 0 -1 y Tcw.row(0) -Tcw.row(1) + y*Tcw.row(2) + // 1 0 -x * Tcw.row(1) = Tcw.row(0) - x*Tcw.row(2) + // -y x 0 Tcw.row(2) x*Tcw.row(1) - y*Tcw.row(0) + // 发现上述矩阵线性相关,所以取前两维,两个点构成了4行的矩阵,就是如下的操作,求出的是4维的结果[X,Y,Z,A],所以需要除以最后一维使之为1,就成了[X,Y,Z,1]这种齐次形式 cv::Matx14f A_r0 = xn1(0) * Tcw1.row(2) - Tcw1.row(0); cv::Matx14f A_r1 = xn1(1) * Tcw1.row(2) - Tcw1.row(1); cv::Matx14f A_r2 = xn2(0) * Tcw2.row(2) - Tcw2.row(0); @@ -1044,7 +1070,12 @@ void LocalMapping::SearchInNeighbors() mpCurrentKeyFrame->UpdateConnections(); } -// 根据两关键帧的姿态计算两个关键帧之间的基本矩阵 +/** + * @brief 根据两关键帧的姿态计算两个关键帧之间的基本矩阵 + * @param pKF1 关键帧1 + * @param pKF2 关键帧2 + * @return 基本矩阵 + */ cv::Mat LocalMapping::ComputeF12(KeyFrame *&pKF1, KeyFrame *&pKF2) { // 先构造两帧之间的R12,t12 @@ -1139,7 +1170,9 @@ void LocalMapping::Release() cout << "Local Mapping RELEASE" << endl; } -// 查看当前是否允许接受关键帧 +/** + * @brief 查看是否接收关键帧,也就是当前线程是否在处理数据,当然tracking线程也不会全看这个值,他会根据队列阻塞情况 + */ bool LocalMapping::AcceptKeyFrames() { unique_lock lock(mMutexAccept); @@ -1153,7 +1186,9 @@ void LocalMapping::SetAcceptKeyFrames(bool flag) mbAcceptKeyFrames=flag; } -// 设置 mbnotStop标志的状态 +/** + * @brief 如果不让它暂停,即使发出了暂停信号也不暂停 + */ bool LocalMapping::SetNotStop(bool flag) { unique_lock lock(mMutexStop); @@ -1166,7 +1201,9 @@ bool LocalMapping::SetNotStop(bool flag) return true; } -// 终止BA +/** + * @brief 放弃这次BA + */ void LocalMapping::InterruptBA() { mbAbortBA = true; @@ -1192,15 +1229,17 @@ void LocalMapping::KeyFrameCulling() // scaleLeveli:pKFi的金字塔尺度 // scaleLevel:pKF的金字塔尺度 const int Nd = 21; // MODIFICATION_STEREO_IMU 20 This should be the same than that one from LIBA - mpCurrentKeyFrame->UpdateBestCovisibles(); + mpCurrentKeyFrame->UpdateBestCovisibles(); // 更新共视关系 + // 1. 根据Covisibility Graph提取当前帧的共视关键帧 vector vpLocalKeyFrames = mpCurrentKeyFrame->GetVectorCovisibleKeyFrames(); float redundant_th; + // 非IMU时 if(!mbInertial) redundant_th = 0.9; - else if (mbMonocular) + else if (mbMonocular) // imu 且单目时 redundant_th = 0.9; - else + else // 其他imu时 redundant_th = 0.5; const bool bInitImu = mpAtlas->isImuInitialized(); @@ -1212,6 +1251,7 @@ void LocalMapping::KeyFrameCulling() { int count = 0; KeyFrame* aux_KF = mpCurrentKeyFrame; + // 找到第前21个关键帧的关键帧id while(countmPrevKF) { aux_KF = aux_KF->mPrevKF; @@ -1306,18 +1346,22 @@ void LocalMapping::KeyFrameCulling() // Step 4:该关键帧90%以上的有效地图点被判断为冗余的,则删除该关键帧 if(nRedundantObservations>redundant_th*nMPs) { + // imu模式下需要更改前后关键帧的连续性,且预积分要叠加起来 if (mbInertial) { + // 关键帧少于Nd个,跳过不删 if (mpAtlas->KeyFramesInMap()<=Nd) continue; - + // 关键帧与当前关键帧id差一个,跳过不删 if(pKF->mnId>(mpCurrentKeyFrame->mnId-2)) continue; - + // 关键帧具有前后关键帧 if(pKF->mPrevKF && pKF->mNextKF) { const float t = pKF->mNextKF->mTimeStamp-pKF->mPrevKF->mTimeStamp; - + // 下面两个括号里的内容一模一样 + // imu初始化了,且距当前帧的ID超过21,且前后两个关键帧时间间隔小于3s + // 或者时间间隔小于0.5s if((bInitImu && (pKF->mnIdmNextKF->mpImuPreintegrated->MergePrevious(pKF->mpImuPreintegrated); @@ -1327,6 +1371,7 @@ void LocalMapping::KeyFrameCulling() pKF->mPrevKF = NULL; pKF->SetBadFlag(); } + // 没经过imu初始化的第三阶段,且关键帧与其前一个关键帧的距离小于0.02m,且前后两个关键帧时间间隔小于3s else if(!mpCurrentKeyFrame->GetMap()->GetIniertialBA2() && (cv::norm(pKF->GetImuPosition()-pKF->mPrevKF->GetImuPosition())<0.02) && (t<3)) { pKF->mNextKF->mpImuPreintegrated->MergePrevious(pKF->mpImuPreintegrated); @@ -1338,11 +1383,13 @@ void LocalMapping::KeyFrameCulling() } } } + // 非imu就没那么多事儿了,直接干掉 else { pKF->SetBadFlag(); } } + // 遍历共视关键帧个数超过一定,就不弄了 if((count > 20 && mbAbortBA) || count>100) // MODIFICATION originally 20 for mbabortBA check just 10 keyframes { break; @@ -1350,7 +1397,11 @@ void LocalMapping::KeyFrameCulling() } } -// 计算三维向量v的反对称矩阵 +/** + * @brief 返回反对称矩阵 + * @param v 三维向量 + * @return v的反对称矩阵 + */ cv::Mat LocalMapping::SkewSymmetricMatrix(const cv::Mat &v) { return (cv::Mat_(3,3) << 0, -v.at(2), v.at(1), @@ -1388,6 +1439,9 @@ void LocalMapping::RequestReset() cout << "LM: Map reset, Done!!!" << endl; } +/** + * @brief 接收重置当前地图的信号 + */ void LocalMapping::RequestResetActiveMap(Map* pMap) { { @@ -1465,14 +1519,18 @@ void LocalMapping::RequestFinish() mbFinishRequested = true; } -// 检查是否已经有外部线程请求终止当前线程 +/** + * @brief 查看完成信号,跳出while循环 + */ bool LocalMapping::CheckFinish() { unique_lock lock(mMutexFinish); return mbFinishRequested; } -// 设置当前线程已经真正地结束了 +/** + * @brief 设置当前线程已经真正地结束了 + */ void LocalMapping::SetFinish() { unique_lock lock(mMutexFinish); @@ -1481,20 +1539,30 @@ void LocalMapping::SetFinish() mbStopped = true; } -// 当前线程的run函数是否已经终止 +/** + * @brief 当前线程的run函数是否已经终止 + */ bool LocalMapping::isFinished() { unique_lock lock(mMutexFinish); return mbFinished; } +/** + * @brief imu初始化 + * @param priorG 陀螺仪偏置的信息矩阵系数,主动设置时一般bInit为true,也就是只优化最后一帧的偏置,这个数会作为计算信息矩阵时使用 + * @param priorA 加速度计偏置的信息矩阵系数 + * @param bFIBA 是否做BA优化 + */ void LocalMapping::InitializeIMU(float priorG, float priorA, bool bFIBA) { + // 1. 将所有关键帧放入列表及向量里,且查看是否满足初始化条件 if (mbResetRequested) return; float minTime; int nMinKF; + // 从时间及帧数上限制初始化,不满足下面的不进行初始化 if (mbMonocular) { minTime = 2.0; @@ -1506,11 +1574,12 @@ void LocalMapping::InitializeIMU(float priorG, float priorA, bool bFIBA) nMinKF = 10; } - + // 当前地图大于10帧才进行初始化 if(mpAtlas->KeyFramesInMap() lpKF; KeyFrame* pKF = mpCurrentKeyFrame; while(pKF->mPrevKF) @@ -1519,8 +1588,10 @@ void LocalMapping::InitializeIMU(float priorG, float priorA, bool bFIBA) pKF = pKF->mPrevKF; } lpKF.push_front(pKF); + // 以相同内容再构建一个vector vector vpKF(lpKF.begin(),lpKF.end()); + // TODO 跟上面重复? if(vpKF.size()GetMap()->isImuInitialized()) { cv::Mat cvRwg; @@ -1551,20 +1624,27 @@ void LocalMapping::InitializeIMU(float priorG, float priorA, bool bFIBA) continue; if (!(*itKF)->mPrevKF) continue; - + // Rwb(imu坐标转到初始化前世界坐标系下的坐标)*更新偏置后的速度,可以理解为在世界坐标系下的速度矢量 dirG -= (*itKF)->mPrevKF->GetImuRotation()*(*itKF)->mpImuPreintegrated->GetUpdatedDeltaVelocity(); + // 求取实际的速度,位移/时间 cv::Mat _vel = ((*itKF)->GetImuPosition() - (*itKF)->mPrevKF->GetImuPosition())/(*itKF)->mpImuPreintegrated->dT; (*itKF)->SetVelocity(_vel); (*itKF)->mPrevKF->SetVelocity(_vel); } - + // 归一化 dirG = dirG/cv::norm(dirG); + // 原本的重力方向 cv::Mat gI = (cv::Mat_(3,1) << 0.0f, 0.0f, -1.0f); + // 求速度方向与重力方向的角轴 cv::Mat v = gI.cross(dirG); + // 求角轴长度 const float nv = cv::norm(v); + // 求转角大小 const float cosg = gI.dot(dirG); const float ang = acos(cosg); - cv::Mat vzg = v*ang/nv; + // 先计算旋转向量,在除去角轴大小 + cv::Mat vzg = v * ang / nv; + // 获得重力方向到当前速度方向的旋转向量 cvRwg = IMU::ExpSO3(vzg); mRwg = Converter::toMatrix3d(cvRwg); mTinit = mpCurrentKeyFrame->mTimeStamp-mFirstTs; @@ -1578,9 +1658,11 @@ void LocalMapping::InitializeIMU(float priorG, float priorA, bool bFIBA) mScale=1.0; + // 暂时没发现在别的地方出现过 mInitTime = mpTracker->mLastFrame.mTimeStamp-vpKF.front()->mTimeStamp; std::chrono::steady_clock::time_point t0 = std::chrono::steady_clock::now(); + // 计算残差及偏置差,优化尺度重力方向及Ri Rj Vi Vj Pi Pj Optimizer::InertialOptimization(mpAtlas->GetCurrentMap(), mRwg, mScale, mbg, mba, mbMonocular, infoInertial, false, false, priorG, priorA); std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); @@ -1588,7 +1670,7 @@ void LocalMapping::InitializeIMU(float priorG, float priorA, bool bFIBA) cout << "bg after inertial-only optimization: " << mbg << endl; cout << "ba after inertial-only optimization: " << mba << endl;*/ - + // 尺度太小的话初始化认为失败 if (mScale<1e-1) { cout << "scale too small" << endl; @@ -1596,20 +1678,26 @@ void LocalMapping::InitializeIMU(float priorG, float priorA, bool bFIBA) return; } - + // 到此时为止,前面做的东西没有改变map // Before this line we are not changing the map unique_lock lock(mpAtlas->GetCurrentMap()->mMutexMapUpdate); std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); + // 尺度变化超过设定值,或者非单目时(无论带不带imu,但这个函数只在带imu时才执行,所以这个可以理解为双目imu) if ((fabs(mScale-1.f)>0.00001)||!mbMonocular) { + // 恢复重力方向与尺度信息 mpAtlas->GetCurrentMap()->ApplyScaledRotation(Converter::toCvMat(mRwg).t(),mScale,true); + // 更新普通帧的位姿,主要是当前帧与上一帧 mpTracker->UpdateFrameIMU(mScale,vpKF[0]->GetImuBias(),mpCurrentKeyFrame); } std::chrono::steady_clock::time_point t3 = std::chrono::steady_clock::now(); // Check if initialization OK + // 即使初始化成功后面还会执行这个函数重新初始化 + // 在之前没有初始化成功情况下(此时刚刚初始化成功)对每一帧都标记,后面的kf全部都在tracking里面标记为true + // 也就是初始化之前的那些关键帧即使有imu信息也不算 if (!mpAtlas->isImuInitialized()) for(int i=0;iGetGyroBias() << endl;*/ std::chrono::steady_clock::time_point t4 = std::chrono::steady_clock::now(); + // 代码里都为true if (bFIBA) { + // 承接上一步纯imu优化,按照之前的结果更新了尺度信息及适应重力方向,所以要结合地图进行一次视觉加imu的全局优化,这次带了MP等信息 if (priorA!=0.f) Optimizer::FullInertialBA(mpAtlas->GetCurrentMap(), 100, false, 0, NULL, true, priorG, priorA); else @@ -1637,6 +1727,7 @@ void LocalMapping::InitializeIMU(float priorG, float priorA, bool bFIBA) if (!mpAtlas->isImuInitialized()) { cout << "IMU in Map " << mpAtlas->GetCurrentMap()->GetId() << " is initialized" << endl; + // ! 重要!标记初始化成功 mpAtlas->SetImuInitialized(); mpTracker->t0IMU = mpTracker->mCurrentFrame.mTimeStamp; mpCurrentKeyFrame->bImu = true; @@ -1671,6 +1762,9 @@ void LocalMapping::InitializeIMU(float priorG, float priorA, bool bFIBA) return; } +/** + * @brief 通过BA优化进行尺度更新,关键帧小于100,在这里的时间段内时多次进行尺度更新 + */ void LocalMapping::ScaleRefinement() { // Minimum number of keyframes to compute a solution @@ -1680,6 +1774,7 @@ void LocalMapping::ScaleRefinement() return; // Retrieve all keyframes in temporal order + // 1. 检索所有的关键帧(当前地图) list lpKF; KeyFrame* pKF = mpCurrentKeyFrame; while(pKF->mPrevKF) @@ -1689,7 +1784,7 @@ void LocalMapping::ScaleRefinement() } lpKF.push_front(pKF); vector vpKF(lpKF.begin(),lpKF.end()); - + // 加入新添加的帧 while(CheckNewKeyFrames()) { ProcessNewKeyFrame(); @@ -1698,11 +1793,12 @@ void LocalMapping::ScaleRefinement() } const int N = vpKF.size(); - + // 2. 更新旋转与尺度 mRwg = Eigen::Matrix3d::Identity(); mScale=1.0; std::chrono::steady_clock::time_point t0 = std::chrono::steady_clock::now(); + // 优化重力方向与尺度 Optimizer::InertialOptimization(mpAtlas->GetCurrentMap(), mRwg, mScale); std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); @@ -1714,8 +1810,10 @@ void LocalMapping::ScaleRefinement() } // Before this line we are not changing the map + // 3. 开始更新地图 unique_lock lock(mpAtlas->GetCurrentMap()->mMutexMapUpdate); std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); + // 3.1 如果尺度更新较多,或是在双目imu情况下更新地图 if ((fabs(mScale-1.f)>0.00001)||!mbMonocular) { mpAtlas->GetCurrentMap()->ApplyScaledRotation(Converter::toCvMat(mRwg).t(),mScale,true); @@ -1723,6 +1821,7 @@ void LocalMapping::ScaleRefinement() } std::chrono::steady_clock::time_point t3 = std::chrono::steady_clock::now(); + // 3.2 优化的这段时间新进来的kf全部清空不要 for(list::iterator lit = mlNewKeyFrames.begin(), lend=mlNewKeyFrames.end(); lit!=lend; lit++) { (*lit)->SetBadFlag(); @@ -1738,14 +1837,17 @@ void LocalMapping::ScaleRefinement() return; } - - +/** + * @brief 返回是否正在做IMU的初始化,在tracking里面使用,如果为true,暂不添加关键帧 + */ bool LocalMapping::IsInitializing() { return bInitializing; } - +/** + * @brief 获取当前关键帧的时间戳,System::GetTimeFromIMUInit()中调用 + */ double LocalMapping::GetCurrKFTime() { @@ -1757,9 +1859,12 @@ double LocalMapping::GetCurrKFTime() return 0.0; } -KeyFrame* LocalMapping::GetCurrKF() +/** + * @brief 获取当前关键帧 + */ +KeyFrame *LocalMapping::GetCurrKF() { return mpCurrentKeyFrame; } -} //namespace ORB_SLAM +} // namespace ORB_SLAM3 diff --git a/src/LoopClosing.cc b/src/LoopClosing.cc index 280e90a..d976d19 100644 --- a/src/LoopClosing.cc +++ b/src/LoopClosing.cc @@ -68,7 +68,7 @@ void LoopClosing::Run() // Step 1 查看闭环检测队列mlpLoopKeyFrameQueue中有没有关键帧进来 if(CheckNewKeyFrames()) { - if(mpLastCurrentKF) + if(mpLastCurrentKF) // 这部分后续未使用 { mpLastCurrentKF->mvpLoopCandKFs.clear(); mpLastCurrentKF->mvpMergeCandKFs.clear(); @@ -77,6 +77,7 @@ void LoopClosing::Run() timeDetectBoW = 0; std::chrono::steady_clock::time_point time_StartDetectBoW = std::chrono::steady_clock::now(); #endif + // Step 2 检测有没有共视的区域 bool bDetected = NewDetectCommonRegions(); #ifdef REGISTER_TIMES std::chrono::steady_clock::time_point time_EndDetectBoW = std::chrono::steady_clock::now(); @@ -93,28 +94,39 @@ void LoopClosing::Run() if(bDetected) { + // Step 3 如果检测到融合(当前关键帧与其他地图有关联), 则合并地图 if(mbMergeDetected) { + // 在imu模式下没有初始化就放弃融合 if ((mpTracker->mSensor==System::IMU_MONOCULAR ||mpTracker->mSensor==System::IMU_STEREO) && (!mpCurrentKF->GetMap()->isImuInitialized())) { cout << "IMU is not initilized, merge is aborted" << endl; } + // 如果imu模式下成功初始化,或者非imu模式 else { Verbose::PrintMess("*Merged detected", Verbose::VERBOSITY_QUIET); Verbose::PrintMess("Number of KFs in the current map: " + to_string(mpCurrentKF->GetMap()->KeyFramesInMap()), Verbose::VERBOSITY_DEBUG); + // 拿到融合帧在自己地图所在坐标系(w2)下的位姿 cv::Mat mTmw = mpMergeMatchedKF->GetPose(); g2o::Sim3 gSmw2(Converter::toMatrix3d(mTmw.rowRange(0, 3).colRange(0, 3)),Converter::toVector3d(mTmw.rowRange(0, 3).col(3)),1.0); + // 拿到当前帧在自己地图所在坐标系(w1)下的位姿 cv::Mat mTcw = mpCurrentKF->GetPose(); g2o::Sim3 gScw1(Converter::toMatrix3d(mTcw.rowRange(0, 3).colRange(0, 3)),Converter::toVector3d(mTcw.rowRange(0, 3).col(3)),1.0); + // 根据共同区域检测时的Sim3结果得到当前帧在w2下的位姿 + // l = c , w2 = 2 g2o::Sim3 gSw2c = mg2oMergeSlw.inverse(); + // 这个没有用到 : 融合帧在w1下的位姿 g2o::Sim3 gSw1m = mg2oMergeSlw; + // 记录焊接变换(Sim3) T_w2_w1 , 这个量实际是两个地图坐标系的关系 T_w2_w1 = T_w2_c * T_c_w1 mSold_new = (gSw2c * gScw1); + // 如果是imu模式 if(mpCurrentKF->GetMap()->IsInertial() && mpMergeMatchedKF->GetMap()->IsInertial()) { + // 如果尺度变换太大, 则放弃融合 if(mSold_new.scale()<0.90||mSold_new.scale()>1.1){ mpMergeLastCurrentKF->SetErase(); mpMergeMatchedKF->SetErase(); @@ -127,6 +139,7 @@ void LoopClosing::Run() continue; } // If inertial, force only yaw + // 如果是imu模式,强制将焊接变换的的 roll 和 pitch 设为0 if ((mpTracker->mSensor==System::IMU_MONOCULAR ||mpTracker->mSensor==System::IMU_STEREO) && mpCurrentKF->GetMap()->GetIniertialBA1()) // TODO, maybe with GetIniertialBA1 { @@ -137,16 +150,20 @@ void LoopClosing::Run() } } + // 这个变量没有用到 mg2oMergeSmw = gSmw2 * gSw2c * gScw1; + // 更新mg2oMergeScw mg2oMergeScw = mg2oMergeSlw; #ifdef REGISTER_TIMES std::chrono::steady_clock::time_point time_StartMerge = std::chrono::steady_clock::now(); #endif if (mpTracker->mSensor==System::IMU_MONOCULAR ||mpTracker->mSensor==System::IMU_STEREO) + // 如果是imu模式,则开启 Visual-Inertial Map Merging MergeLocal2(); else + // 如果是纯视觉模式,则开启 Visual-Welding Map Merging MergeLocal(); #ifdef REGISTER_TIMES std::chrono::steady_clock::time_point time_EndMerge = std::chrono::steady_clock::now(); @@ -154,12 +171,14 @@ void LoopClosing::Run() vTimeMergeTotal_ms.push_back(timeMerge); #endif } - + // 记录时间戳 vdPR_CurrentTime.push_back(mpCurrentKF->mTimeStamp); vdPR_MatchedTime.push_back(mpMergeMatchedKF->mTimeStamp); + // 标记Place recognition结果为地图融合 vnPR_TypeRecogn.push_back(1); // Reset all variables + // 重置所有融合相关变量 mpMergeLastCurrentKF->SetErase(); mpMergeMatchedKF->SetErase(); mnMergeNumCoincidences = 0; @@ -167,7 +186,7 @@ void LoopClosing::Run() mvpMergeMPs.clear(); mnMergeNumNotFound = 0; mbMergeDetected = false; - + // 重置所有回环相关变量, 说明对与当前帧同时有回环和融合的情况只进行融合 if(mbLoopDetected) { // Reset Loop variables @@ -182,33 +201,44 @@ void LoopClosing::Run() } + // Step 4 如果(没有检测到融合)检测到回环, 则回环矫正 if(mbLoopDetected) { + // 标记时间戳 vdPR_CurrentTime.push_back(mpCurrentKF->mTimeStamp); vdPR_MatchedTime.push_back(mpLoopMatchedKF->mTimeStamp); vnPR_TypeRecogn.push_back(0); Verbose::PrintMess("*Loop detected", Verbose::VERBOSITY_QUIET); - + // 更新 mg2oLoopScw mg2oLoopScw = mg2oLoopSlw; //*mvg2oSim3LoopTcw[nCurrentIndex]; + // 如果是带imu的模式 if(mpCurrentKF->GetMap()->IsInertial()) { + // 拿到当前关键帧相对于世界坐标系的位姿 cv::Mat Twc = mpCurrentKF->GetPoseInverse(); g2o::Sim3 g2oTwc(Converter::toMatrix3d(Twc.rowRange(0, 3).colRange(0, 3)),Converter::toVector3d(Twc.rowRange(0, 3).col(3)),1.0); + + // mg2oLoopScw是通过回环检测的Sim3计算出的回环矫正后的当前关键帧的初始位姿, Twc是当前关键帧回环矫正前的位姿. + // g2oSww_new 可以理解为correction g2o::Sim3 g2oSww_new = g2oTwc*mg2oLoopScw; - + + // 拿到 roll ,pitch ,yaw Eigen::Vector3d phi = LogSO3(g2oSww_new.rotation().toRotationMatrix()); //cout << "tw2w1: " << g2oSww_new.translation() << endl; //cout << "Rw2w1: " << g2oSww_new.rotation().toRotationMatrix() << endl; //cout << "Angle Rw2w1: " << 180*phi/3.14 << endl; //cout << "scale w2w1: " << g2oSww_new.scale() << endl; - + // 这里算是通过imu重力方向验证回环结果, 如果pitch或roll角度偏差稍微有一点大,则回环失败. 对phi容忍比较大(20度) if (fabs(phi(0))<0.008f && fabs(phi(1))<0.008f && fabs(phi(2))<0.349f) { + // 如果是imu模式 if(mpCurrentKF->GetMap()->IsInertial()) { // If inertial, force only yaw + + // 如果是imu模式,强制将焊接变换的的 roll 和 pitch 设为0 if ((mpTracker->mSensor==System::IMU_MONOCULAR ||mpTracker->mSensor==System::IMU_STEREO) && mpCurrentKF->GetMap()->GetIniertialBA2()) // TODO, maybe with GetIniertialBA1 { @@ -224,6 +254,7 @@ void LoopClosing::Run() #ifdef REGISTER_TIMES std::chrono::steady_clock::time_point time_StartLoop = std::chrono::steady_clock::now(); #endif + // 开启回环矫正 CorrectLoop(); #ifdef REGISTER_TIMES std::chrono::steady_clock::time_point time_EndLoop = std::chrono::steady_clock::now(); @@ -231,11 +262,13 @@ void LoopClosing::Run() vTimeLoopTotal_ms.push_back(timeLoop); #endif } + // 如果pitch或roll角度偏差稍微有一点大,或 yaw大与20度则回环失败. else { cout << "BAD LOOP!!!" << endl; } } + // 如果是纯视觉模式 else { mvpLoopMapPoints = mvpLoopMPs; @@ -252,6 +285,7 @@ void LoopClosing::Run() } // Reset all variables + // 重置所有的回环变量 mpLoopLastCurrentKF->SetErase(); mpLoopMatchedKF->SetErase(); mnLoopNumCoincidences = 0; @@ -308,11 +342,16 @@ bool LoopClosing::CheckNewKeyFrames() unique_lock lock(mMutexLoopQueue); return(!mlpLoopKeyFrameQueue.empty()); } - +/** + * @brief 检测有没有共同区域,包括检测回环和融合匹配,sim3计算,验证 + * 对应于ORB-SLAM2里的函数DetectLoop + * @return true + * @return false + */ bool LoopClosing::NewDetectCommonRegions() { { - // Step 1 从队列中取出一个关键帧,作为当前检测闭环关键帧 + // Step 1 从队列中取出一个关键帧,作为当前检测共同区域的关键帧 unique_lock lock(mMutexLoopQueue); // 从队列头开始取,也就是先取早进来的关键帧 mpCurrentKF = mlpLoopKeyFrameQueue.front(); @@ -322,24 +361,25 @@ bool LoopClosing::NewDetectCommonRegions() // 设置当前关键帧不要在优化的过程中被删除 mpCurrentKF->SetNotErase(); mpCurrentKF->mbCurrentPlaceRecognition = true; - + // 当前关键帧对应的地图 mpLastMap = mpCurrentKF->GetMap(); } - + // Step 2 在某些情况下不进行共同区域检测 + // 1.imu模式下还没经过第二阶段初始化则不考虑 if(mpLastMap->IsInertial() && !mpLastMap->GetIniertialBA1()) { mpKeyFrameDB->add(mpCurrentKF); mpCurrentKF->SetErase(); return false; } - + // 2.双目模式下且当前地图关键帧数少于5则不考虑 if(mpTracker->mSensor == System::STEREO && mpLastMap->GetAllKeyFrames().size() < 5) //12 { mpKeyFrameDB->add(mpCurrentKF); mpCurrentKF->SetErase(); return false; } - + // 3.当前地图关键帧少于12则不进行检测 if(mpLastMap->GetAllKeyFrames().size() < 12) { mpKeyFrameDB->add(mpCurrentKF); @@ -348,35 +388,46 @@ bool LoopClosing::NewDetectCommonRegions() } //Check the last candidates with geometric validation + // Step 3 基于前一帧的历史信息判断是否进行时序几何校验,注意这里是基于共视几何校验失败才会运行的代码,阅读代码的时候可以先看后面 + // Loop candidates bool bLoopDetectedInKF = false; bool bCheckSpatial = false; - + // Step 3.1 回环的时序几何校验: 这里的判断条件里mnLoopNumCoincidences刚开始为0, 所以可以先跳过看后面 + // 如果回环的共视几何验证成功帧数目大于0 if(mnLoopNumCoincidences > 0) { bCheckSpatial = true; // Find from the last KF candidates + // 通过上一关键帧的信息,计算新的当前帧的sim3 + // Tcl = Tcw*Twl cv::Mat mTcl = mpCurrentKF->GetPose() * mpLoopLastCurrentKF->GetPoseInverse(); g2o::Sim3 gScl(Converter::toMatrix3d(mTcl.rowRange(0, 3).colRange(0, 3)),Converter::toVector3d(mTcl.rowRange(0, 3).col(3)),1.0); g2o::Sim3 gScw = gScl * mg2oLoopSlw; int numProjMatches = 0; vector vpMatchedMPs; + + // 通过把候选帧局部窗口内的地图点向新进来的关键帧投影来验证回环检测结果,并优化Sim3位姿 bool bCommonRegion = DetectAndReffineSim3FromLastKF(mpCurrentKF, mpLoopMatchedKF, gScw, numProjMatches, mvpLoopMPs, vpMatchedMPs); + // 如果找到共同区域(时序验证成功一次) if(bCommonRegion) { - + //标记时序检验成功一次 bLoopDetectedInKF = true; - + // 累计正检验的成功次数 mnLoopNumCoincidences++; + // 不再参与新的回环检测 mpLoopLastCurrentKF->SetErase(); mpLoopLastCurrentKF = mpCurrentKF; - mg2oLoopSlw = gScw; + mg2oLoopSlw = gScw; // 记录当前优化的结果为{last T_cw}即为 T_lw + // 记录匹配到的点 mvpLoopMatchedMPs = vpMatchedMPs; - + // 如果验证数大于等于3则为成功回环 mbLoopDetected = mnLoopNumCoincidences >= 3; + // 记录失败的时序校验数为0 mnLoopNumNotFound = 0; - + //! 这里的条件反了,不过对功能没什么影响,只是打印信息 if(!mbLoopDetected) { //f_succes_pr << mpCurrentKF->mNameFile << " " << "8"<< endl; @@ -384,13 +435,16 @@ bool LoopClosing::NewDetectCommonRegions() cout << "PR: Loop detected with Reffine Sim3" << endl; } } + // 如果没找到共同区域(时序验证失败一次) else { + // 当前时序验证失败 bLoopDetectedInKF = false; /*f_succes_pr << mpCurrentKF->mNameFile << " " << "8"<< endl; f_succes_pr << "% Number of spatial consensous: " << std::to_string(mnLoopNumCoincidences) << endl;*/ - + // 递增失败的时序验证次数 mnLoopNumNotFound++; + //若果连续两帧时序验证失败则整个回环检测失败 if(mnLoopNumNotFound >= 2) { /*for(int i=0; iSetErase(); mpLoopMatchedKF->SetErase(); //mg2oLoopScw; @@ -421,36 +475,50 @@ bool LoopClosing::NewDetectCommonRegions() //Merge candidates bool bMergeDetectedInKF = false; + // Step 3.2 融合的时序几何校验: 这里的判断条件里mnMergeNumCoincidences刚开始为0, 所以可以先跳过看后面 + // 如果融合的共视几何验证成功帧数目大于0 if(mnMergeNumCoincidences > 0) { // Find from the last KF candidates - + // 通过上一关键帧的信息,计算新的当前帧的sim3 + // Tcl = Tcw*Twl cv::Mat mTcl = mpCurrentKF->GetPose() * mpMergeLastCurrentKF->GetPoseInverse(); g2o::Sim3 gScl(Converter::toMatrix3d(mTcl.rowRange(0, 3).colRange(0, 3)),Converter::toVector3d(mTcl.rowRange(0, 3).col(3)),1.0); g2o::Sim3 gScw = gScl * mg2oMergeSlw; int numProjMatches = 0; vector vpMatchedMPs; + // 通过把候选帧局部窗口内的地图点向新进来的关键帧投影来验证回环检测结果,并优化Sim3位姿 bool bCommonRegion = DetectAndReffineSim3FromLastKF(mpCurrentKF, mpMergeMatchedKF, gScw, numProjMatches, mvpMergeMPs, vpMatchedMPs); + // 如果找到共同区域(时序验证成功一次) if(bCommonRegion) { //cout << "BoW: Merge reffined Sim3 transformation suscelful" << endl; + //标记时序检验成功一次 bMergeDetectedInKF = true; + // 累计正检验的成功次数 mnMergeNumCoincidences++; + + // 不再参与新的回环检测 mpMergeLastCurrentKF->SetErase(); mpMergeLastCurrentKF = mpCurrentKF; mg2oMergeSlw = gScw; mvpMergeMatchedMPs = vpMatchedMPs; - + // 如果验证数大于等于3则为成功回环 mbMergeDetected = mnMergeNumCoincidences >= 3; } else + // 如果没找到共同区域(时序验证失败一次) { //cout << "BoW: Merge reffined Sim3 transformation failed" << endl; + // 没有找到融合 mbMergeDetected = false; + // 当前时序验证失败 bMergeDetectedInKF = false; + // 递增失败的时序验证次数 mnMergeNumNotFound++; + //若果连续两帧时序验证失败则整个融合检测失败 if(mnMergeNumNotFound >= 2) { /*cout << "+++++++Merge detected failed in KF" << endl; @@ -469,6 +537,7 @@ bool LoopClosing::NewDetectCommonRegions() mvvpMergeMapPoints.clear(); mvvpMergeMatchedMapPoints.clear();*/ + // 失败后标记重置一些信息 mpMergeLastCurrentKF->SetErase(); mpMergeMatchedKF->SetErase(); mnMergeNumCoincidences = 0; @@ -480,7 +549,7 @@ bool LoopClosing::NewDetectCommonRegions() } } - + // Step 3.3 若校验成功则把当前帧添加进数据库,且返回true表示找到共同区域 if(mbMergeDetected || mbLoopDetected) { //f_time_pr << "Geo" << " " << timeGeoKF_ms.count() << endl; @@ -490,6 +559,8 @@ bool LoopClosing::NewDetectCommonRegions() //------------- //TODO: This is only necessary if we use a minimun score for pick the best candidates + + // 这两句并没有使用,作者把orbslam2里面通过minScore作为阀值筛选候选帧的策略抛弃了 const vector vpConnectedKeyFrames = mpCurrentKF->GetVectorCovisibleKeyFrames(); const DBoW2::BowVector &CurrentBowVec = mpCurrentKF->mBowVec; /*float minScore = 1; @@ -508,6 +579,7 @@ bool LoopClosing::NewDetectCommonRegions() //------------- // Extract candidates from the bag of words + // Step 4 若当前关键帧没有被检测到回环或融合,则分别通过bow拿到当前帧最好的三个回环候选帧和融合候选帧 vector vpMergeBowCand, vpLoopBowCand; //cout << "LC: bMergeDetectedInKF: " << bMergeDetectedInKF << " bLoopDetectedInKF: " << bLoopDetectedInKF << endl; if(!bMergeDetectedInKF || !bLoopDetectedInKF) @@ -516,6 +588,7 @@ bool LoopClosing::NewDetectCommonRegions() std::chrono::steady_clock::time_point time_StartDetectBoW = std::chrono::steady_clock::now(); #endif // Search in BoW + // 分别找到3个最好的候选帧, 回环候选帧放在vpLoopBowCand中,融合候选帧放在vpMergeBowCand中 mpKeyFrameDB->DetectNBestCandidates(mpCurrentKF, vpLoopBowCand, vpMergeBowCand,3); #ifdef REGISTER_TIMES std::chrono::steady_clock::time_point time_EndDetectBoW = std::chrono::steady_clock::now(); @@ -530,7 +603,7 @@ bool LoopClosing::NewDetectCommonRegions() #else std::chrono::monotonic_clock::time_point timeStartGeoBoW = std::chrono::monotonic_clock::now(); #endif*/ - + // Step 4.1 若当前关键帧没有被检测到回环,并且候选帧数量不为0,则对回环候选帧进行论文中第8页的2-5步 if(!bLoopDetectedInKF && !vpLoopBowCand.empty()) { mbLoopDetected = DetectCommonRegionsFromBoW(vpLoopBowCand, mpLoopMatchedKF, mpLoopLastCurrentKF, mg2oLoopSlw, mnLoopNumCoincidences, mvpLoopMPs, mvpLoopMatchedMPs); @@ -538,31 +611,47 @@ bool LoopClosing::NewDetectCommonRegions() // Merge candidates //cout << "LC: Find BoW candidates" << endl; - + // Step 4.2 若当前关键帧没有被检测到融合,并且候选帧数量不为0,则对融合候帧进行论文中第8页的2-5步 if(!bMergeDetectedInKF && !vpMergeBowCand.empty()) { mbMergeDetected = DetectCommonRegionsFromBoW(vpMergeBowCand, mpMergeMatchedKF, mpMergeLastCurrentKF, mg2oMergeSlw, mnMergeNumCoincidences, mvpMergeMPs, mvpMergeMatchedMPs); } //cout << "LC: add to KFDB" << endl; + // Step 5 根据结果确定有没有检测到共同区域 + // 把当前帧添加到关键帧数据库中 mpKeyFrameDB->add(mpCurrentKF); - + // 如果检测到共同区域返回true if(mbMergeDetected || mbLoopDetected) { return true; } - + // 如果没检测到则把当前关键帧erase(不参与后续共同区域检测) //cout << "LC: erase KF" << endl; mpCurrentKF->SetErase(); + // 标记当前关键帧不是当前在进行共同区域检测的帧 mpCurrentKF->mbCurrentPlaceRecognition = false; return false; } - +/** + * @brief 对新进来的关键帧进行时序几何验证,同时继续优化之前估计的 Tcw + * + * @param[in] pCurrentKF 当前关键帧 + * @param[in] pMatchedKF 候选帧 + * @param[out] gScw 世界坐标系在验证帧下的Sim3 + * @param[out] nNumProjMatches 记录匹配点的数量 + * @param[out] vpMPs 候选帧窗口内所有的地图点 + * @param[out] vpMatchedMPs 候选帧窗口内所有被匹配到的点 + * @return true 时序几何验证成功 + * @return false 时序几何验证失败 + */ bool LoopClosing::DetectAndReffineSim3FromLastKF(KeyFrame* pCurrentKF, KeyFrame* pMatchedKF, g2o::Sim3 &gScw, int &nNumProjMatches, std::vector &vpMPs, std::vector &vpMatchedMPs) { + // 还没有匹配点,所以为空 set spAlreadyMatchedMPs; + // 把候选帧局部窗口内的地图点投向新进来的当前关键帧,看是否有足够的匹配点 nNumProjMatches = FindMatchesByProjection(pCurrentKF, pMatchedKF, gScw, spAlreadyMatchedMPs, vpMPs, vpMatchedMPs); @@ -577,21 +666,28 @@ bool LoopClosing::DetectAndReffineSim3FromLastKF(KeyFrame* pCurrentKF, KeyFrame* nProjMatchesRep = 80; }*/ + // 如果大于一定的数量 if(nNumProjMatches >= nProjMatches) { + // 为OptimizeSim3接口准备数据 cv::Mat mScw = Converter::toCvMat(gScw); cv::Mat mTwm = pMatchedKF->GetPoseInverse(); g2o::Sim3 gSwm(Converter::toMatrix3d(mTwm.rowRange(0, 3).colRange(0, 3)),Converter::toVector3d(mTwm.rowRange(0, 3).col(3)),1.0); g2o::Sim3 gScm = gScw * gSwm; Eigen::Matrix mHessian7x7; + + // 单目情况下不锁定尺度 bool bFixedScale = mbFixScale; // TODO CHECK; Solo para el monocular inertial + // 如果是imu模式且地图没成熟,不锁定尺度 if(mpTracker->mSensor==System::IMU_MONOCULAR && !pCurrentKF->GetMap()->GetIniertialBA2()) bFixedScale=false; + // 继续优化 Sim3 int numOptMatches = Optimizer::OptimizeSim3(mpCurrentKF, pMatchedKF, vpMatchedMPs, gScm, 10, bFixedScale, mHessian7x7, true); + // 若果匹配的数量大于一定的数目 if(numOptMatches > nProjOptMatches) { g2o::Sim3 gScw_estimation(Converter::toMatrix3d(mScw.rowRange(0, 3).colRange(0, 3)), @@ -599,27 +695,47 @@ bool LoopClosing::DetectAndReffineSim3FromLastKF(KeyFrame* pCurrentKF, KeyFrame* vector vpMatchedMP; vpMatchedMP.resize(mpCurrentKF->GetMapPointMatches().size(), static_cast(NULL)); - + // 再次通过优化后的Sim3搜索匹配点 nNumProjMatches = FindMatchesByProjection(pCurrentKF, pMatchedKF, gScw_estimation, spAlreadyMatchedMPs, vpMPs, vpMatchedMPs); //cout << "REFFINE-SIM3: Projection with optimize Sim3 from last KF with " << nNumProjMatches << " matches" << endl; + // 若果大于期望数目,接受这个结果 if(nNumProjMatches >= nProjMatchesRep) { gScw = gScw_estimation; + // 验证成功 return true; } } } + // 验证失败 return false; } - +/** + * @brief 实现论文第8页的2-5步中的一部分功能(对后面新进来的关键帧的验证没有放在这个函数里进行) + * 1. 构造局部窗口 + * 2. Ransac 得到 T_am的初始值 + * 3. guided matching refinement + * 4. 利用地图中的共视关键帧验证(共视几何校验) + * + * @param[in] vpBowCand bow 给出的一些候选关键帧 + * @param[out] pMatchedKF2 最后成功匹配的关键帧 + * @param[out] pLastCurrentKF 用于记录当前关键帧为上一个关键帧(后续若仍需要时序几何校验需要记录此信息) + * @param[out] g2oScw 世界坐标系在当前关键帧下的Sim3 + * @param[out] nNumCoincidences 用来记录validation合格的数目 + * @param[out] vpMPs 所有地图点 + * @param[out] vpMatchedMPs 成功匹配的地图点 + * @return true 检测到一个合格的commen region + * @return false 没检测到一个合格的commen region + */ bool LoopClosing::DetectCommonRegionsFromBoW(std::vector &vpBowCand, KeyFrame* &pMatchedKF2, KeyFrame* &pLastCurrentKF, g2o::Sim3 &g2oScw, int &nNumCoincidences, std::vector &vpMPs, std::vector &vpMatchedMPs) { - int nBoWMatches = 20; - int nBoWInliers = 15; - int nSim3Inliers = 20; - int nProjMatches = 50; - int nProjOptMatches = 80; + // 一些后面会使用的阀值 + int nBoWMatches = 20; // 最低bow匹配特征点数 + int nBoWInliers = 15; // RANSAC最低的匹配点数 + int nSim3Inliers = 20; // sim3 最低内点数 + int nProjMatches = 50; // 通过投影得到的匹配点数量最低阀值 + int nProjOptMatches = 80; // 通过更小的半径,更严的距离搜索到的匹配点数量 /*if(mpTracker->mSensor==System::IMU_MONOCULAR ||mpTracker->mSensor==System::IMU_STEREO) { nBoWMatches = 20; @@ -629,15 +745,21 @@ bool LoopClosing::DetectCommonRegionsFromBoW(std::vector &vpBowCand, nProjOptMatches = 50; }*/ + //获取当前帧的共视帧(在共同区域检测中应该避免当前关键帧的共视关键帧中) set spConnectedKeyFrames = mpCurrentKF->GetConnectedKeyFrames(); + // 定义最佳共视关键帧的数量 int nNumCovisibles = 5; - + // 用于search by bow ORBmatcher matcherBoW(0.9, true); + // 用与seach by projection ORBmatcher matcher(0.75, true); + // 没有用到 int nNumGuidedMatching = 0; // Varibles to select the best numbe + // 一些用与统计最优数据的变量,我们最后返回的是最佳的一个关键帧(几何校验匹配数最高的) + KeyFrame* pBestMatchedKF; int nBestMatchesReproj = 0; int nBestNumCoindicendes = 0; @@ -645,11 +767,15 @@ bool LoopClosing::DetectCommonRegionsFromBoW(std::vector &vpBowCand, std::vector vpBestMapPoints; std::vector vpBestMatchedMapPoints; + // bow中候选关键帧的数量 int numCandidates = vpBowCand.size(); + // 这三个变量是作者为了后面打印观察记录的信息,可以忽略 vector vnStage(numCandidates, 0); vector vnMatchesStage(numCandidates, 0); int index = 0; + + //对于每个候选关键帧 for(KeyFrame* pKFi : vpBowCand) { //cout << endl << "-------------------------------" << endl; @@ -658,22 +784,36 @@ bool LoopClosing::DetectCommonRegionsFromBoW(std::vector &vpBowCand, // Current KF against KF with covisibles version + // Step 1 获得候选关键帧的局部窗口 W_m + // 拿到候选关键帧的5个最优共视帧 std::vector vpCovKFi = pKFi->GetBestCovisibilityKeyFrames(nNumCovisibles); + // 再加上候选关键帧自己(这里操作比较迷,看起来只是为了把候选关键帧放到容器的第一顺位) vpCovKFi.push_back(vpCovKFi[0]); vpCovKFi[0] = pKFi; + // search by bow 返回的参数, 记录窗口Wm中每个关键帧有哪些点能在当前关键帧Ka中通过bow找到匹配点 std::vector > vvpMatchedMPs; vvpMatchedMPs.resize(vpCovKFi.size()); + + // 记录整个窗口中有那些点能在Ka中通过bow找到匹配点(这个set是辅助容器,避免重复添加地图点) std::set spMatchedMPi; int numBoWMatches = 0; - + // 记录窗口中能通过bow在当前关键帧ka中找到最多匹配点的关键帧 KeyFrame* pMostBoWMatchesKF = pKFi; + // 记录窗口中能通过bow在当前关键帧ka中找到最多匹配点的数量 int nMostBoWNumMatches = 0; + // 下面两个变量是为了sim3 solver准备的 + //记录窗口中的地图点能在当前关键帧中找到的匹配的点(数量的上限是当前关键帧地图点的数量) std::vector vpMatchedPoints = std::vector(mpCurrentKF->GetMapPointMatches().size(), static_cast(NULL)); + // 记录上面的地图点分别对应窗口中的关键帧(数量的上限是当前关键帧地图点的数量) std::vector vpKeyFrameMatchedMP = std::vector(mpCurrentKF->GetMapPointMatches().size(), static_cast(NULL)); + // 记录在W_km中有最多匹配点的帧的局部index, 这个后面没有用到 int nIndexMostBoWMatchesKF=0; + //! bug: 并没有重新赋值pMostBoWMatchesKF, 一直是初始值: 候选关键帧 + // 遍历窗口内Wm的每个关键帧 + // Step 1.1 通过Bow寻找候选帧窗口内的关键帧地图点与当前关键帧的匹配点 for(int j=0; jisBad()) @@ -687,10 +827,14 @@ bool LoopClosing::DetectCommonRegionsFromBoW(std::vector &vpBowCand, nIndexMostBoWMatchesKF = j; } } - + // 标记是否因为窗口内有当前关键帧的共视关键帧 bool bAbortByNearKF = false; + // 遍历窗口内的每个关键帧 + // Step 1.2 把窗口内的匹配点转换为Sim3Solver接口定义的格式 for(int j=0; j &vpBowCand, } //cout << "Matches: " << num << endl; + // 遍历窗口内的某一个关键帧与当前关键帧由bow得到的匹配的地图点 + // 注意这里每个vvpMatchedMPs[j]的大小都是相等的且等于当前关键帧中的总地图点数量,详细请看searchByBow for(int k=0; k < vvpMatchedMPs[j].size(); ++k) { + // 地图点指针 MapPoint* pMPi_j = vvpMatchedMPs[j][k]; + // 如果指针为空活地图点被标记为bad,则跳过当前循环 if(!pMPi_j || pMPi_j->isBad()) continue; + // 窗口内不同关键帧与当前关键帧可能能看到相同的3D点, 利用辅助容器避免重复添加 if(spMatchedMPi.find(pMPi_j) == spMatchedMPi.end()) { + // 利用辅助容器记录添加过的点 spMatchedMPi.insert(pMPi_j); + // 统计窗口内有多少地图点能在当前关键中找到匹配 numBoWMatches++; - + //记录窗口中的地图点能在当前关键帧中找到的匹配的点 vpMatchedPoints[k]= pMPi_j; + // 记录上面的地图点分别对应窗口中的关键帧(数量的上限是当前关键帧地图点的数量) vpKeyFrameMatchedMP[k] = vpCovKFi[j]; } } } //cout <<"BoW: " << numBoWMatches << " independent putative matches" << endl; + // 当窗口内的帧不是当前关键帧的相邻帧且匹配点足够多时 + // Step 2 利用RANSAC寻找候选关键帧窗口与当前关键帧的相对位姿T_am的初始值(可能是Sim3) if(!bAbortByNearKF && numBoWMatches >= nBoWMatches) // TODO pick a good threshold { /*cout << "-------------------------------" << endl; @@ -725,10 +879,14 @@ bool LoopClosing::DetectCommonRegionsFromBoW(std::vector &vpBowCand, // Geometric validation bool bFixedScale = mbFixScale; + //? 如果是单目带imu的模式且地图没有成熟则不固定scale if(mpTracker->mSensor==System::IMU_MONOCULAR && !mpCurrentKF->GetMap()->GetIniertialBA2()) bFixedScale=false; + // 初始化sim3 solver + // Sim3Solver 的接口与orbslam2略有不同, 因为现在是1-N的对应关系 Sim3Solver solver = Sim3Solver(mpCurrentKF, pMostBoWMatchesKF, vpMatchedPoints, bFixedScale, vpKeyFrameMatchedMP); + //Sim3Solver Ransac 置信度0.99,至少20个inliers 最多300次迭 solver.SetRansacParameters(0.99, nBoWInliers, 300); // at least 15 inliers bool bNoMore = false; @@ -736,12 +894,14 @@ bool LoopClosing::DetectCommonRegionsFromBoW(std::vector &vpBowCand, int nInliers; bool bConverge = false; cv::Mat mTcm; + // 迭代到收敛 while(!bConverge && !bNoMore) { mTcm = solver.iterate(20,bNoMore, vbInliers, nInliers, bConverge); } //cout << "Num inliers: " << nInliers << endl; + // Step 3 Guide matching refinement: 利用初始的Tam信息,进行双向重投影,并非线性优化得到更精确的Tam if(bConverge) { //cout <<"BoW: " << nInliers << " inliers in Sim3Solver" << endl; @@ -749,24 +909,36 @@ bool LoopClosing::DetectCommonRegionsFromBoW(std::vector &vpBowCand, // Match by reprojection //int nNumCovisibles = 5; vpCovKFi.clear(); + // 拿到窗口内匹配最多的帧的最佳5个共视帧和它自己组成的窗口 vpCovKFi = pMostBoWMatchesKF->GetBestCovisibilityKeyFrames(nNumCovisibles); int nInitialCov = vpCovKFi.size(); vpCovKFi.push_back(pMostBoWMatchesKF); + // 这个后面没有用到 set spCheckKFs(vpCovKFi.begin(), vpCovKFi.end()); + // 辅助容器,避免重复添加地图点 set spMapPoints; + // 这两个容器是searchByProjection定义的容器 + // 记录窗口内地图点 vector vpMapPoints; + // 记录每个地图点对应的串口内的关键帧 vector vpKeyFrames; + // 遍历窗Wm内的所有关键帧 for(KeyFrame* pCovKFi : vpCovKFi) { + // 遍历窗口内每个关键帧的所有地图点 for(MapPoint* pCovMPij : pCovKFi->GetMapPointMatches()) { + // 如果指针为空或者改地图点被标记为bad if(!pCovMPij || pCovMPij->isBad()) continue; - + // 避免重复添加 if(spMapPoints.find(pCovMPij) == spMapPoints.end()) - { + { + + // 辅助容器用来记录点是否已经添加 spMapPoints.insert(pCovMPij); + // 把地图点和对应关键帧记录下来 vpMapPoints.push_back(pCovMPij); vpKeyFrames.push_back(pCovKFi); } @@ -774,87 +946,105 @@ bool LoopClosing::DetectCommonRegionsFromBoW(std::vector &vpBowCand, } //cout << "Point cloud: " << vpMapPoints.size() << endl; - + // 拿到solver 估计的 Tam初始值, 为后续的非线性优化做准备, 在这里 c 表示当前关键帧, m 表示回环/融合候选帧 g2o::Sim3 gScm(Converter::toMatrix3d(solver.GetEstimatedRotation()),Converter::toVector3d(solver.GetEstimatedTranslation()),solver.GetEstimatedScale()); g2o::Sim3 gSmw(Converter::toMatrix3d(pMostBoWMatchesKF->GetRotation()),Converter::toVector3d(pMostBoWMatchesKF->GetTranslation()),1.0); + // 利用初始的Tam估计确定世界坐标系在当前相机中的位姿 g2o::Sim3 gScw = gScm*gSmw; // Similarity matrix of current from the world position + // 准备用来SearchByProjection的位姿信息 cv::Mat mScw = Converter::toCvMat(gScw); - + // 记录最后searchByProjection的结果 vector vpMatchedMP; vpMatchedMP.resize(mpCurrentKF->GetMapPointMatches().size(), static_cast(NULL)); vector vpMatchedKF; vpMatchedKF.resize(mpCurrentKF->GetMapPointMatches().size(), static_cast(NULL)); + // Step 3.1 重新利用之前计算的mScw信息, 通过投影寻找更多的匹配点 int numProjMatches = matcher.SearchByProjection(mpCurrentKF, mScw, vpMapPoints, vpKeyFrames, vpMatchedMP, vpMatchedKF, 8, 1.5); + // 如果拿到了足够多的匹配点 if(numProjMatches >= nProjMatches) { // Optimize Sim3 transformation with every matches Eigen::Matrix mHessian7x7; bool bFixedScale = mbFixScale; + // 在imu模式下没有成熟的地图就不固定scale if(mpTracker->mSensor==System::IMU_MONOCULAR && !mpCurrentKF->GetMap()->GetIniertialBA2()) bFixedScale=false; - + // Step 3.2 利用搜索到的更多的匹配点优化双向投影误差得到的更好的 gScm (Tam) int numOptMatches = Optimizer::OptimizeSim3(mpCurrentKF, pKFi, vpMatchedMP, gScm, 10, mbFixScale, mHessian7x7, true); //cout <<"BoW: " << numOptMatches << " inliers in the Sim3 optimization" << endl; //cout << "Inliers in Sim3 optimization: " << numOptMatches << endl; + // Step 3.3 如果内点足够多,用更小的半径搜索匹配点,并且再次进行优化(p.s.这里与论文不符,并没有再次优化) if(numOptMatches >= nSim3Inliers) { //cout <<"BoW: " << numOptMatches << " inliers in Sim3 optimization" << endl; + // 前面已经声明了这些变量了,无需再次声明 g2o::Sim3 gSmw(Converter::toMatrix3d(pMostBoWMatchesKF->GetRotation()),Converter::toVector3d(pMostBoWMatchesKF->GetTranslation()),1.0); g2o::Sim3 gScw = gScm*gSmw; // Similarity matrix of current from the world position cv::Mat mScw = Converter::toCvMat(gScw); vector vpMatchedMP; vpMatchedMP.resize(mpCurrentKF->GetMapPointMatches().size(), static_cast(NULL)); + // Step 3.4 重新利用之前计算的mScw信息, 通过更小的半径和更严格的距离的投影寻找匹配点 + // 5 : 半径的增益系数(对比之前下降了)---> 更小的半径, 1.0 , hamming distance 的阀值增益系数---> 允许更小的距离 int numProjOptMatches = matcher.SearchByProjection(mpCurrentKF, mScw, vpMapPoints, vpMatchedMP, 5, 1.0); //cout <<"BoW: " << numProjOptMatches << " matches after of the Sim3 optimization" << endl; - + // ? 论文中说好的再优化一次呢?只做了搜索并没有再次进行OptimizeSim3 + // 当新的投影得到的内点数量大于nProjOptMatches时 if(numProjOptMatches >= nProjOptMatches) { + // Step 4. 用当前关键帧的相邻关键来验证前面得到的Tam(共视几何校验) + // 统计验证成功的关键帧数量 int nNumKFs = 0; //vpMatchedMPs = vpMatchedMP; //vpMPs = vpMapPoints; // Check the Sim3 transformation with the current KeyFrame covisibles + // Step 4.1 拿到用来验证的关键帧组(后称为验证组): 当前关键帧的共视关键帧 vector vpCurrentCovKFs = mpCurrentKF->GetBestCovisibilityKeyFrames(nNumCovisibles); //cout << "---" << endl; //cout << "BoW: Geometrical validation" << endl; int j = 0; + // 遍历验证组当有三个关键帧验证成功或遍历所有的关键帧后结束循环 while(nNumKFs < 3 && jGetPose() * mpCurrentKF->GetPoseInverse(); g2o::Sim3 gSjc(Converter::toMatrix3d(mTjc.rowRange(0, 3).colRange(0, 3)),Converter::toVector3d(mTjc.rowRange(0, 3).col(3)),1.0); g2o::Sim3 gSjw = gSjc * gScw; int numProjMatches_j = 0; vector vpMatchedMPs_j; + // Step 4.2 几何校验函数, 这个函数里面其实是个searchByProjection : 通过之前计算的位姿转换地图点并通过投影搜索匹配点, 若大于一定数目的任务成功验证一次 bool bValid = DetectCommonRegionsFromLastKF(pKFj,pMostBoWMatchesKF, gSjw,numProjMatches_j, vpMapPoints, vpMatchedMPs_j); if(bValid) { + // 统计valid的帧的数量 nNumKFs++; } j++; } - + // 这里又是没有用的代码,只是记录一点信息,可能是为了方便打印检查 if(nNumKFs < 3) { vnStage[index] = 8; vnMatchesStage[index] = nNumKFs; } - + // 记录第二次searchByProjection得到最多匹配点的关键帧的各种信息,最后作为回环帧/融合帧 if(nBestMatchesReproj < numProjOptMatches) { - nBestMatchesReproj = numProjOptMatches; - nBestNumCoindicendes = nNumKFs; - pBestMatchedKF = pMostBoWMatchesKF; - g2oBestScw = gScw; - vpBestMapPoints = vpMapPoints; - vpBestMatchedMapPoints = vpMatchedMP; + nBestMatchesReproj = numProjOptMatches; // 投影匹配的数量 + nBestNumCoindicendes = nNumKFs; // 成功验证的帧数 + pBestMatchedKF = pMostBoWMatchesKF; // 记录候选帧窗口内与当前关键帧相似度最高的帧 + g2oBestScw = gScw; // 记录最优的位姿(这个位姿是由Tam推到出来的 : Taw = Tam * Tmw,这里a表示c) + vpBestMapPoints = vpMapPoints; // 记录所有的地图点 + vpBestMatchedMapPoints = vpMatchedMP; // 记录所有的地图点中被成功匹配的点 } @@ -867,7 +1057,7 @@ bool LoopClosing::DetectCommonRegionsFromBoW(std::vector &vpBowCand, } index++; } - + // 如果成功找到了共同区域帧把记录的最优值存到输出变量中 if(nBestMatchesReproj > 0) { pLastCurrentKF = mpCurrentKF; @@ -877,11 +1067,12 @@ bool LoopClosing::DetectCommonRegionsFromBoW(std::vector &vpBowCand, g2oScw = g2oBestScw; vpMPs = vpBestMapPoints; vpMatchedMPs = vpBestMatchedMapPoints; - + //如果有三个成功验证则return ture return nNumCoincidences >= 3; } else { + // 这里是一些无用的变量, 左值只用来打印检查 int maxStage = -1; int maxMatched; for(int i=0; i &vpBowCand, // f_succes_pr << mpCurrentKF->mNameFile << " " << std::to_string(maxStage) << endl; // f_succes_pr << "% NumCand: " << std::to_string(numCandidates) << "; matches: " << std::to_string(maxMatched) << endl; } + + // 如果少于3个当前关键帧的共视关键帧验证了这个候选帧,那么返回失败,注意,这里的失败并不代表最终的验证失败,后续会开启时序校验 return false; } +/** + * @brief 用来验证候选帧的函数, 这个函数的名字取的不好, 函数的本意是想利用候选帧的共视关键帧来验证候选帧,不如改叫做:DetectCommonRegionsFromCoVKF + * + * @param[in] pCurrentKF 当前关键帧 + * @param[in] pMatchedKF 候选帧 + * @param[in] gScw 世界坐标系在验证帧下的Sim3 + * @param[out] nNumProjMatches 最后匹配的数目 + * @param[out] vpMPs 候选帧的窗口内所有的地图点 + * @param[out] vpMatchedMPs 候选帧的窗口内被匹配到的地图点 + * @return true 验证成功 + * @return false 验证失败 + */ bool LoopClosing::DetectCommonRegionsFromLastKF(KeyFrame* pCurrentKF, KeyFrame* pMatchedKF, g2o::Sim3 &gScw, int &nNumProjMatches, std::vector &vpMPs, std::vector &vpMatchedMPs) -{ +{ + + // 所有的匹配点 set spAlreadyMatchedMPs(vpMatchedMPs.begin(), vpMatchedMPs.end()); + // 通过Sim3变换后投影寻找匹配点 nNumProjMatches = FindMatchesByProjection(pCurrentKF, pMatchedKF, gScw, spAlreadyMatchedMPs, vpMPs, vpMatchedMPs); //cout << "Projection from last KF with " << nNumProjMatches << " matches" << endl; int nProjMatches = 30; + // 如果匹配点数目大于一定的阀值,则认为验证成功,返回ture if(nNumProjMatches >= nProjMatches) { /*cout << "PR_From_LastKF: KF " << pCurrentKF->mnId << " has " << nNumProjMatches << " with KF " << pMatchedKF->mnId << endl; @@ -954,23 +1163,44 @@ bool LoopClosing::DetectCommonRegionsFromLastKF(KeyFrame* pCurrentKF, KeyFrame* return false; } +/** + * @brief 包装与searchByProjection之上, 只不过是把窗口内的所有地图点往当前关键帧上投影, 寻找匹配点 + * + * @param[in] pCurrentKF 当前关键帧 + * @param[in] pMatchedKFw 候选帧 + * @param[in] g2oScw 世界坐标系在验证帧坐标系下的位姿 + * @param[in] spMatchedMPinOrigin 没有用上? + * @param[in] vpMapPoints 候选帧及其共视关键帧组成的窗口里所有的地图点 + * @param[in] vpMatchedMapPoints 候选帧及其共视关键帧组成的窗口里所有被匹配上的地图点 + * @return int 匹配点的数量 + */ int LoopClosing::FindMatchesByProjection(KeyFrame* pCurrentKF, KeyFrame* pMatchedKFw, g2o::Sim3 &g2oScw, set &spMatchedMPinOrigin, vector &vpMapPoints, vector &vpMatchedMapPoints) { int nNumCovisibles = 5; + //拿出候选帧的五个最好的共视关键帧 vector vpCovKFm = pMatchedKFw->GetBestCovisibilityKeyFrames(nNumCovisibles); int nInitialCov = vpCovKFm.size(); + // 把自己也加进去, 组成一个局部窗口 vpCovKFm.push_back(pMatchedKFw); + + // 辅助容器,防止重复添加 set spCheckKFs(vpCovKFm.begin(), vpCovKFm.end()); + // 拿出当前关键帧的共视关键帧 set spCurrentCovisbles = pCurrentKF->GetConnectedKeyFrames(); + // 扩充窗口 + // 对于之前里的每个关键帧 for(int i=0; i vpKFs = vpCovKFm[i]->GetBestCovisibilityKeyFrames(nNumCovisibles); int nInserted = 0; int j = 0; + // 这个while循环统计的遍历后面都没有用到, 没有任何作用 while(j < vpKFs.size() && nInserted < nNumCovisibles) { + // 如果没有被重复添加且不是当前关键帧的共视关键帧 if(spCheckKFs.find(vpKFs[j]) == spCheckKFs.end() && spCurrentCovisbles.find(vpKFs[j]) == spCurrentCovisbles.end()) { spCheckKFs.insert(vpKFs[j]); @@ -978,18 +1208,27 @@ int LoopClosing::FindMatchesByProjection(KeyFrame* pCurrentKF, KeyFrame* pMatche } ++j; } + + // 把每个帧的共视关键帧都加到窗口内 vpCovKFm.insert(vpCovKFm.end(), vpKFs.begin(), vpKFs.end()); } + // 辅助容器, 防止地图点被重复添加 set spMapPoints; + + // 清空这两个容器,重新进行searchByProjection vpMapPoints.clear(); vpMatchedMapPoints.clear(); + // 对于窗口内每个关键帧 for(KeyFrame* pKFi : vpCovKFm) { + // 对于每个关键帧的地图点 for(MapPoint* pMPij : pKFi->GetMapPointMatches()) { + // 如果指针不是空,且点不是bad if(!pMPij || pMPij->isBad()) continue; + // 如果没有被重复添加 if(spMapPoints.find(pMPij) == spMapPoints.end()) { spMapPoints.insert(pMPij); @@ -1003,7 +1242,9 @@ int LoopClosing::FindMatchesByProjection(KeyFrame* pCurrentKF, KeyFrame* pMatche ORBmatcher matcher(0.9, true); + // 初始化被匹配到的地图点容器, 匹配上限是当前帧的地图点数量 vpMatchedMapPoints.resize(pCurrentKF->GetMapPointMatches().size(), static_cast(NULL)); + // 把窗口中的点向当前关键帧投影, 搜索匹配点, 注意验证的时候用的搜索半径是最小的 int num_matches = matcher.SearchByProjection(pCurrentKF, mScw, vpMapPoints, vpMatchedMapPoints, 3, 1.5); return num_matches; @@ -1286,6 +1527,7 @@ void LoopClosing::CorrectLoop() mpCurrentKF->AddLoopEdge(mpLoopMatchedKF); // Launch a new thread to perform Global Bundle Adjustment (Only if few keyframes, if not it would take too much time) + // 闭环地图没有imu初始化或者 仅有一个地图且内部关键帧<200时才执行全局BA,否则太慢 if(!pLoopMap->isImuInitialized() || (pLoopMap->KeyFramesInMap()<200 && mpAtlas->CountMaps()==1)) { // Step 8:新建一个线程用于全局BA优化 @@ -1303,21 +1545,31 @@ void LoopClosing::CorrectLoop() mLastLoopKFid = mpCurrentKF->mnId; //TODO old varible, it is not use in the new algorithm } - +/** + * @brief 在检测到成功验证的融合帧后进行地图融合主要任务: + * 1. 焊缝区域局部BA + * 2. Essential Graph BA + * 融合两张地图为一张地图 + */ void LoopClosing::MergeLocal() { Verbose::PrintMess("MERGE: Merge Visual detected!!!!", Verbose::VERBOSITY_NORMAL); - + //mpTracker->SetStepByStep(true); + // 窗口内共视关键帧的数量 int numTemporalKFs = 15; //TODO (set by parameter): Temporal KFs in the local window if the map is inertial. //Relationship to rebuild the essential graph, it is used two times, first in the local window and later in the rest of the map + // 建立本质图必须的量 KeyFrame* pNewChild; KeyFrame* pNewParent; - + + // 当前关键帧的窗口 vector vpLocalCurrentWindowKFs; + // 候选关键帧的窗口 vector vpMergeConnectedKFs; // Flag that is true only when we stopped a running BA, in this case we need relaunch at the end of the merge + // 记录是否把全局BA停下 bool bRelaunchBA = false; Verbose::PrintMess("MERGE: Check Full Bundle Adjustment", Verbose::VERBOSITY_DEBUG); @@ -1338,8 +1590,10 @@ void LoopClosing::MergeLocal() } Verbose::PrintMess("MERGE: Request Stop Local Mapping", Verbose::VERBOSITY_DEBUG); + // 请求局部建图线程停止 mpLocalMapper->RequestStop(); // Wait until Local Mapping has effectively stopped + // 等待局部建图工作停止 while(!mpLocalMapper->isStopped()) { usleep(1000); @@ -1350,16 +1604,26 @@ void LoopClosing::MergeLocal() // Merge map will become in the new active map with the local window of KFs and MPs from the current map. // Later, the elements of the current map will be transform to the new active map reference, in order to keep real time tracking + // 当前关键帧的共视关键帧和他们观测到的地图点会先被融合, 融合后的图会变成新的当前激活地图. + // 之后, 所有当前地图的其他部分会被转换到当前地图中, 这样是为了保证tracking的实时性. + + // 当前关键帧地图的指针 Map* pCurrentMap = mpCurrentKF->GetMap(); + // 融合关键帧地图的指针 Map* pMergeMap = mpMergeMatchedKF->GetMap(); // Ensure current keyframe is updated + // 先保证当前关键帧的链接关系是最新的 mpCurrentKF->UpdateConnections(); - + // Step 1 构建当前关键帧和融合关键帧的局部窗口(关键帧+地图点) //Get the current KF and its neighbors(visual->covisibles; inertial->temporal+covisibles) + // 当前关键帧的局部窗口(仅是辅助容器,防止重复添加) set spLocalWindowKFs; //Get MPs in the welding area from the current map + // 当前关键帧局部串口能观测到的所有地图点(仅是辅助容器,防止重复添加) set spLocalWindowMPs; + // 这段代码只在visual状态下才会被使用,所以只会执行else + // Step 1.1 构造当前关键帧局部共视帧窗口 if(pCurrentMap->IsInertial() && pMergeMap->IsInertial()) //TODO Check the correct initialization { KeyFrame* pKFi = mpCurrentKF; @@ -1375,7 +1639,7 @@ void LoopClosing::MergeLocal() } pKFi = mpCurrentKF->mNextKF; - while(pKFi) + while(pKFi) //! 这里会死循环,不过无所谓,这个外面的if永远不会执行 { spLocalWindowKFs.insert(pKFi); @@ -1385,22 +1649,32 @@ void LoopClosing::MergeLocal() } else { + // 把自己先加到窗口内 spLocalWindowKFs.insert(mpCurrentKF); } + //拿到当前关键帧的numTemporalKFs(15)个最佳共视关键帧 vector vpCovisibleKFs = mpCurrentKF->GetBestCovisibilityKeyFrames(numTemporalKFs); + // 把当前帧的共视帧都加到窗口里 spLocalWindowKFs.insert(vpCovisibleKFs.begin(), vpCovisibleKFs.end()); + // 限制while循环次数 const int nMaxTries = 3; int nNumTries = 0; + // 如果窗口里的关键帧数量不够就再拉一些窗口里的关键帧的共视关键帧(二级共视关键帧)进来 while(spLocalWindowKFs.size() < numTemporalKFs && nNumTries < nMaxTries) { vector vpNewCovKFs; vpNewCovKFs.empty(); + // 遍历创口内的每一个关键帧 for(KeyFrame* pKFi : spLocalWindowKFs) { + // 拿到一些二级共视关键帧 vector vpKFiCov = pKFi->GetBestCovisibilityKeyFrames(numTemporalKFs/2); + + // 对于每个二级共视关键帧 for(KeyFrame* pKFcov : vpKFiCov) { + // 如果指针不为空,且关键帧没有被标记为bad,且没有被添加过则加到窗口内 if(pKFcov && !pKFcov->isBad() && spLocalWindowKFs.find(pKFcov) == spLocalWindowKFs.end()) { vpNewCovKFs.push_back(pKFcov); @@ -1408,11 +1682,13 @@ void LoopClosing::MergeLocal() } } - + // 用set记录,防止重复添加 spLocalWindowKFs.insert(vpNewCovKFs.begin(), vpNewCovKFs.end()); + // 递增循环次数 nNumTries++; } + // Step 1.2 把当前关键帧窗口里关键帧观测到的地图点加进来 for(KeyFrame* pKFi : spLocalWindowKFs) { if(!pKFi || pKFi->isBad()) @@ -1422,7 +1698,10 @@ void LoopClosing::MergeLocal() spLocalWindowMPs.insert(spMPs.begin(), spMPs.end()); } + // Step 1.3 构造融合帧的共视帧窗口 + // 融合关键帧能的共视关键帧们 set spMergeConnectedKFs; + // 这段代码只在visual状态下才会被使用,所以只会执行else if(pCurrentMap->IsInertial() && pMergeMap->IsInertial()) //TODO Check the correct initialization { KeyFrame* pKFi = mpMergeMatchedKF; @@ -1435,26 +1714,35 @@ void LoopClosing::MergeLocal() } pKFi = mpMergeMatchedKF->mNextKF; - while(pKFi) + while(pKFi)//! 这里会死循环,不过无所谓,if永远不会执行 { spMergeConnectedKFs.insert(pKFi); } } else { + // 先把融合关键帧自己添加到窗口内 spMergeConnectedKFs.insert(mpMergeMatchedKF); } + // 拿到融合关键帧最好的numTemporalKFs(15)个最佳共视关键帧 vpCovisibleKFs = mpMergeMatchedKF->GetBestCovisibilityKeyFrames(numTemporalKFs); spMergeConnectedKFs.insert(vpCovisibleKFs.begin(), vpCovisibleKFs.end()); + // 记录循环次数 nNumTries = 0; + // 如果融合关键帧窗口里的关键帧不够就再拉一些窗口里的关键帧的共视帧进来(二级共视关键帧) while(spMergeConnectedKFs.size() < numTemporalKFs && nNumTries < nMaxTries) { vector vpNewCovKFs; + // 遍历融合关键帧内的所有的一级共视关键帧 for(KeyFrame* pKFi : spMergeConnectedKFs) { + // 拿到一些二级共视关键帧 vector vpKFiCov = pKFi->GetBestCovisibilityKeyFrames(numTemporalKFs/2); + // 对于每个二级共视关键帧 for(KeyFrame* pKFcov : vpKFiCov) { + + // 如果指针不为空,且关键帧没有被标记为bad,且没有被添加过则加到窗口内 if(pKFcov && !pKFcov->isBad() && spMergeConnectedKFs.find(pKFcov) == spMergeConnectedKFs.end()) { vpNewCovKFs.push_back(pKFcov); @@ -1462,46 +1750,62 @@ void LoopClosing::MergeLocal() } } - + // 用set记录,防止重复添加 spMergeConnectedKFs.insert(vpNewCovKFs.begin(), vpNewCovKFs.end()); + // 递增循环次数 nNumTries++; } + // 融合关键帧共视窗口内的所有地图点 set spMapPointMerge; + // Step 1.4 把融合关键帧窗口里关键帧观测到的地图点加进来 for(KeyFrame* pKFi : spMergeConnectedKFs) { set vpMPs = pKFi->GetMapPoints(); spMapPointMerge.insert(vpMPs.begin(),vpMPs.end()); } + //把融合关键帧窗口内的地图点加到待融合的向量中 vector vpCheckFuseMapPoint; vpCheckFuseMapPoint.reserve(spMapPointMerge.size()); std::copy(spMapPointMerge.begin(), spMapPointMerge.end(), std::back_inserter(vpCheckFuseMapPoint)); - - // + // Step 2 根据之前的Sim3初始值, 记录当前帧窗口内关键帧,地图点的矫正前的值,和矫正后的初始值 + + // Step 2.1 先计算当前关键帧矫正前的值,后矫正后的初始值 cv::Mat Twc = mpCurrentKF->GetPoseInverse(); cv::Mat Rwc = Twc.rowRange(0,3).colRange(0,3); cv::Mat twc = Twc.rowRange(0,3).col(3); + + // 记录没有融合矫正之前的Swc和Scw g2o::Sim3 g2oNonCorrectedSwc(Converter::toMatrix3d(Rwc),Converter::toVector3d(twc),1.0); g2o::Sim3 g2oNonCorrectedScw = g2oNonCorrectedSwc.inverse(); + + // 拿到之前通过Sim3(见 NewDetectCommonRegion)计算得到的当前关键帧融合矫正之后的初始位姿 g2o::Sim3 g2oCorrectedScw = mg2oMergeScw; //TODO Check the transformation + // 记录当前关键帧窗口内所有关键帧融合矫正之前的位姿,和融合矫正之后的初始位姿 KeyFrameAndPose vCorrectedSim3, vNonCorrectedSim3; vCorrectedSim3[mpCurrentKF]=g2oCorrectedScw; vNonCorrectedSim3[mpCurrentKF]=g2oNonCorrectedScw; + // Step 2.2 通过当前关键帧融合矫正前后的位姿,把当前关键帧的共视窗口里面剩余的关键帧矫正前后的位姿都给填写上 + // 对于每个当前关键帧共视窗口里的关键帧 for(KeyFrame* pKFi : spLocalWindowKFs) { + // 跳过空的指针,或者坏的关键帧 if(!pKFi || pKFi->isBad()) { continue; } + // 确保这些共视关键帧在当前地图下 + // 保存第i个共视关键帧融合矫正后的初始位姿 g2o::Sim3 g2oCorrectedSiw; if(pKFi!=mpCurrentKF) { + // 先记录下融合矫正前的位姿 cv::Mat Tiw = pKFi->GetPose(); cv::Mat Riw = Tiw.rowRange(0,3).colRange(0,3); cv::Mat tiw = Tiw.rowRange(0,3).col(3); @@ -1509,6 +1813,7 @@ void LoopClosing::MergeLocal() //Pose without correction vNonCorrectedSim3[pKFi]=g2oSiw; + // 根据链式法则,利用当前关键帧和第i个共视关键帧的位姿关系,以及当前关键帧矫正后的初始位姿,推得第i个共视关键帧的矫正后的初始位姿 cv::Mat Tic = Tiw*Twc; cv::Mat Ric = Tic.rowRange(0,3).colRange(0,3); cv::Mat tic = Tic.rowRange(0,3).col(3); @@ -1520,20 +1825,24 @@ void LoopClosing::MergeLocal() { g2oCorrectedSiw = g2oCorrectedScw; } + // 这一句没有什么作用,下面又被覆盖了 pKFi->mTcwMerge = pKFi->GetPose(); // Update keyframe pose with corrected Sim3. First transform Sim3 to SE3 (scale translation) + // 根据上面计算得到的融合矫正后的初始位姿(Sim3),更新窗口内每个关键帧的mTcwMerge(矫正后的初始位姿) Eigen::Matrix3d eigR = g2oCorrectedSiw.rotation().toRotationMatrix(); Eigen::Vector3d eigt = g2oCorrectedSiw.translation(); double s = g2oCorrectedSiw.scale(); pKFi->mfScale = s; + // 修正尺度 eigt *=(1./s); //[R t/s;0 1] cv::Mat correctedTiw = Converter::toCvSE3(eigR,eigt); - + // 赋值得到的矫正后的初始位姿 pKFi->mTcwMerge = correctedTiw; + //! 貌似又是没用的代码 if(pCurrentMap->isImuInitialized()) { Eigen::Matrix3d Rcor = eigR.transpose()*vNonCorrectedSim3[pKFi].rotation().toRotationMatrix(); @@ -1541,33 +1850,43 @@ void LoopClosing::MergeLocal() } } - + // Step 2.3 记录所有地图点矫正前的位置,和矫正后的初始值 + // 对于每个窗口内的地图点 for(MapPoint* pMPi : spLocalWindowMPs) { if(!pMPi || pMPi->isBad()) continue; - + + // 拿到参考关键帧 KeyFrame* pKFref = pMPi->GetReferenceKeyFrame(); + // 拿到计算好的矫正后参考关键帧的初始位姿 g2o::Sim3 g2oCorrectedSwi = vCorrectedSim3[pKFref].inverse(); + // 拿到矫正前的参考关键帧的位姿 g2o::Sim3 g2oNonCorrectedSiw = vNonCorrectedSim3[pKFref]; // Project with non-corrected pose and project back with corrected pose + // 先把3D点转换到参考关键帧矫正前的坐标系中 cv::Mat P3Dw = pMPi->GetWorldPos(); Eigen::Matrix eigP3Dw = Converter::toVector3d(P3Dw); + // 再转换到矫正后的初始坐标系中 Eigen::Matrix eigCorrectedP3Dw = g2oCorrectedSwi.map(g2oNonCorrectedSiw.map(eigP3Dw)); + // 计算旋转部分的变化 Eigen::Matrix3d eigR = g2oCorrectedSwi.rotation().toRotationMatrix(); Eigen::Matrix3d Rcor = eigR * g2oNonCorrectedSiw.rotation().toRotationMatrix(); - + // 矫正后3d点的初始位置 cv::Mat cvCorrectedP3Dw = Converter::toCvMat(eigCorrectedP3Dw); pMPi->mPosMerge = cvCorrectedP3Dw; + // 用旋转部分的变化更新计算3D点normal的新值 pMPi->mNormalVectorMerge = Converter::toCvMat(Rcor) * pMPi->GetNormal(); } - { + // Step 3 更新两个地图的信息(当前帧所在地图,融合帧所在地图) + { // 当前地图会被更新,老的地图中的重复地图点会被剔除 unique_lock currentLock(pCurrentMap->mMutexMapUpdate); // We update the current map with the Merge information unique_lock mergeLock(pMergeMap->mMutexMapUpdate); // We remove the Kfs and MPs in the merged area from the old map - + // 对于当前关键帧共视窗口内的每一个关键帧 + // Step 3.1 更新当前关键帧共视窗口内的关键帧信息 for(KeyFrame* pKFi : spLocalWindowKFs) { if(!pKFi || pKFi->isBad()) @@ -1575,76 +1894,105 @@ void LoopClosing::MergeLocal() //cout << "Bad KF in correction" << endl; continue; } - + // 记录融合矫正前的位姿 pKFi->mTcwBefMerge = pKFi->GetPose(); pKFi->mTwcBefMerge = pKFi->GetPoseInverse(); - pKFi->SetPose(pKFi->mTcwMerge); + // 把这个关键帧的位姿设置为融合矫正后的初始位姿 + pKFi->SetPose(pKFi->mTcwMerge); // Make sure connections are updated + // 把这个关键帧的地图设置为融合帧所在的地图 pKFi->UpdateMap(pMergeMap); + // 记录这个关键帧是被哪个当前关键帧融合的 pKFi->mnMergeCorrectedForKF = mpCurrentKF->mnId; + // 把这个关键帧所有权给到融合帧所在地图里 pMergeMap->AddKeyFrame(pKFi); + // 把这个关键帧从当前活跃地图中删掉 pCurrentMap->EraseKeyFrame(pKFi); - + // 又是没用的代码 if(pCurrentMap->isImuInitialized()) { pKFi->SetVelocity(pKFi->mVwbMerge); } } - + // Step 3.2 更新当前关键帧共视帧窗口所能观测到的地图点的信息 + //把所有地图点的所有权给到老图里面 + // 对于每个当前关键帧共视窗口内的所有地图点 for(MapPoint* pMPi : spLocalWindowMPs) { if(!pMPi || pMPi->isBad()) continue; - + // 把3D点的位置设置成融合矫正之后的位置 pMPi->SetWorldPos(pMPi->mPosMerge); + // 把3D点normal设置成融合矫正之后的法向量 pMPi->SetNormalVector(pMPi->mNormalVectorMerge); + // 把3D点所在的地图设置成融合帧所在的地图 pMPi->UpdateMap(pMergeMap); + // 把3D点添加进融合帧所在地图 pMergeMap->AddMapPoint(pMPi); + // 把3D点从当前活跃地图中删掉 pCurrentMap->EraseMapPoint(pMPi); } - + // Step 3.3 更新剩余信息,如Altas和融合帧所在地图的信息 + // 在Altas中把老图重新激活 mpAtlas->ChangeMap(pMergeMap); + // 当前地图的信息都到融合帧所在地图里去了,可以设置为bad mpAtlas->SetMapBad(pCurrentMap); + // 记录地图变化次数 pMergeMap->IncreaseChangeIndex(); } + // Step 4 融合两个地图(当前关键帧所在地图和融合帧所在地图)的本质图(其实修改的是spanning tree) //Rebuild the essential graph in the local window + // 重新构造essential graph的相关信息 + // 设置当前地图的第一个关键帧不再是第一次生成树了 pCurrentMap->GetOriginKF()->SetFirstConnection(false); + // 从当前关键帧开始反向遍历整个地图 pNewChild = mpCurrentKF->GetParent(); // Old parent, it will be the new child of this KF pNewParent = mpCurrentKF; // Old child, now it will be the parent of its own parent(we need eliminate this KF from children list in its old parent) + // 把当前帧的父亲设置为融合帧 mpCurrentKF->ChangeParent(mpMergeMatchedKF); + // 作者本来想先只在当前关键帧窗口内操作,但是把第二个条件注释了, 可能是为了少些代码, 但是地图太大的时候会影响实时性 while(pNewChild /*&& spLocalWindowKFs.find(pNewChild) != spLocalWindowKFs.end()*/) { + // new parent (old child) 不再是 new child (old parent) 的 child 了 pNewChild->EraseChild(pNewParent); // We remove the relation between the old parent and the new for avoid loop + // 记录原先老parent的parent, 用于后续遍历 KeyFrame * pOldParent = pNewChild->GetParent(); - + // 把 new parent 设置为 new child 的 parent (父子关系互换) pNewChild->ChangeParent(pNewParent); //cout << "The new parent of KF " << pNewChild->mnId << " was " << pNewChild->GetParent()->mnId << endl; + // 赋值指针, 用于遍历下一组父子 pNewParent = pNewChild; pNewChild = pOldParent; } //Update the connections between the local window + // 更新融合帧局部的连接关系 mpMergeMatchedKF->UpdateConnections(); //cout << "MERGE-VISUAL: Essential graph rebuilded" << endl; //std::copy(spMapPointCurrent.begin(), spMapPointCurrent.end(), std::back_inserter(vpCheckFuseMapPoint)); + // 重新拿到融合帧局部的共视帧窗窗口 vpMergeConnectedKFs = mpMergeMatchedKF->GetVectorCovisibleKeyFrames(); vpMergeConnectedKFs.push_back(mpMergeMatchedKF); + // !bug 这里前面已经做过了, 所以这里重复添加了两遍点 vpCheckFuseMapPoint.reserve(spMapPointMerge.size()); std::copy(spMapPointMerge.begin(), spMapPointMerge.end(), std::back_inserter(vpCheckFuseMapPoint)); // Project MapPoints observed in the neighborhood of the merge keyframe // into the current keyframe and neighbors using corrected poses. // Fuse duplications. + // Step 5 把融合关键帧的共视窗口里的地图点投到当前关键帧的共视窗口里,把重复的点融合掉(用老的位置,代替新的位置) SearchAndFuse(vCorrectedSim3, vpCheckFuseMapPoint); // Update connectivity + // Step 6 再次更新链接关系 + // 更新当前关键帧共视窗口内所有关键帧的连接 for(KeyFrame* pKFi : spLocalWindowKFs) { if(!pKFi || pKFi->isBad()) @@ -1652,6 +2000,7 @@ void LoopClosing::MergeLocal() pKFi->UpdateConnections(); } + // 更新融合关键帧共视窗口内所有关键帧的连接 for(KeyFrame* pKFi : spMergeConnectedKFs) { if(!pKFi || pKFi->isBad()) @@ -1662,27 +2011,34 @@ void LoopClosing::MergeLocal() bool bStop = false; Verbose::PrintMess("MERGE: Start local BA ", Verbose::VERBOSITY_DEBUG); + // 为Local BA的接口, 把set转为vector + // Step 7 焊缝BA(Welding BA) vpLocalCurrentWindowKFs.clear(); vpMergeConnectedKFs.clear(); std::copy(spLocalWindowKFs.begin(), spLocalWindowKFs.end(), std::back_inserter(vpLocalCurrentWindowKFs)); std::copy(spMergeConnectedKFs.begin(), spMergeConnectedKFs.end(), std::back_inserter(vpMergeConnectedKFs)); if (mpTracker->mSensor==System::IMU_MONOCULAR || mpTracker->mSensor==System::IMU_STEREO) + //! 没有用的代码 { Optimizer::MergeInertialBA(mpLocalMapper->GetCurrKF(),mpMergeMatchedKF,&bStop, mpCurrentKF->GetMap(),vCorrectedSim3); } else { + // 运行的 welding BA , 优化所有的当前关键帧共视窗口里的关键帧和地图点, 固定所有融合帧共视窗口里的帧 Optimizer::LocalBundleAdjustment(mpCurrentKF, vpLocalCurrentWindowKFs, vpMergeConnectedKFs,&bStop); } // Loop closed. Release Local Mapping. + // 在welding BA 之后我们就可以继续建图了 mpLocalMapper->Release(); Verbose::PrintMess("MERGE: Finish the LBA", Verbose::VERBOSITY_DEBUG); //// + // Step 8 把之前的局部信息传递到整个当前帧所在地图中 //Update the non critical area from the current map to the merged map + // 把前面优位姿优化的结果传递到地图中其他的关键帧 vector vpCurrentMapKFs = pCurrentMap->GetAllKeyFrames(); vector vpCurrentMapMPs = pCurrentMap->GetAllMapPoints(); @@ -1697,8 +2053,11 @@ void LoopClosing::MergeLocal() { if(mpTracker->mSensor == System::MONOCULAR) { + // 锁住当前地图 unique_lock currentLock(pCurrentMap->mMutexMapUpdate); // We update the current map with the Merge information + + // Step 8.1 拿到当前帧所在地图里的所有关键帧, 重复之前的过程, 不过这里是对于地图里剩余的所有的关键帧 for(KeyFrame* pKFi : vpCurrentMapKFs) { if(!pKFi || pKFi->isBad() || pKFi->GetMap() != pCurrentMap) @@ -1774,8 +2133,11 @@ void LoopClosing::MergeLocal() } // Optimize graph (and update the loop position for each element form the begining to the end) + // Step 8.2 本质图优化 if(mpTracker->mSensor != System::MONOCULAR) - { + { + //固定 : 所有融合帧共视窗口内的关键帧 + 所有当前关键帧共视窗口内的关键帧 + //优化: 当前关键帧所在地图里的所有关键帧(除了当前关键帧共视窗口内的关键帧) + 当前地图里的所有地图点 Optimizer::OptimizeEssentialGraph(mpCurrentKF, vpMergeConnectedKFs, vpLocalCurrentWindowKFs, vpCurrentMapKFs, vpCurrentMapMPs); } @@ -1785,6 +2147,7 @@ void LoopClosing::MergeLocal() unique_lock currentLock(pCurrentMap->mMutexMapUpdate); // We update the current map with the Merge information unique_lock mergeLock(pMergeMap->mMutexMapUpdate); // We remove the Kfs and MPs in the merged area from the old map + // 确保融合后信息被更新 for(KeyFrame* pKFi : vpCurrentMapKFs) { if(!pKFi || pKFi->isBad() || pKFi->GetMap() != pCurrentMap) @@ -1810,10 +2173,15 @@ void LoopClosing::MergeLocal() } } + + //Essential graph 优化后可以重新开始局部建图了 mpLocalMapper->Release(); Verbose::PrintMess("MERGE:Completed!!!!!", Verbose::VERBOSITY_DEBUG); + // 如果之前停掉了全局的BA,就开启全局BA + // 这里没有imu, 所以isImuInitialized一定是false, 所以第二个条件一定是true + // Step 9 全局BA if(bRelaunchBA && (!pCurrentMap->isImuInitialized() || (pCurrentMap->KeyFramesInMap()<200 && mpAtlas->CountMaps()==1))) { // Launch a new thread to perform Global Bundle Adjustment @@ -1821,40 +2189,50 @@ void LoopClosing::MergeLocal() mbRunningGBA = true; mbFinishedGBA = false; mbStopGBA = false; + // 重新执行全局BA mpThreadGBA = new thread(&LoopClosing::RunGlobalBundleAdjustment,this, pMergeMap, mpCurrentKF->mnId); } + // 添加融合边(这里不是参与优化的边,只是记录) mpMergeMatchedKF->AddMergeEdge(mpCurrentKF); mpCurrentKF->AddMergeEdge(mpMergeMatchedKF); pCurrentMap->IncreaseChangeIndex(); pMergeMap->IncreaseChangeIndex(); - + // altas移除不好的地图 mpAtlas->RemoveBadMaps(); } +/** + * @brief 带imu情况下的地图融合, 这个函数90%的内容都没有被用上,迷 + */ void LoopClosing::MergeLocal2() { cout << "Merge detected!!!!" << endl; - + // 没用上 int numTemporalKFs = 11; //TODO (set by parameter): Temporal KFs in the local window if the map is inertial. //Relationship to rebuild the essential graph, it is used two times, first in the local window and later in the rest of the map + // 用来重新构造Essential Graph KeyFrame* pNewChild; KeyFrame* pNewParent; - + + // 没用上 vector vpLocalCurrentWindowKFs; vector vpMergeConnectedKFs; + // 记录用初始Sim3 计算出来的当前关键帧局部共视帧窗口内的所有关键帧矫正前的值和矫正后的初始值 KeyFrameAndPose CorrectedSim3, NonCorrectedSim3; // NonCorrectedSim3[mpCurrentKF]=mg2oLoopScw; // Flag that is true only when we stopped a running BA, in this case we need relaunch at the end of the merge + // 记录要不要重新进行全局ba bool bRelaunchBA = false; cout << "Check Full Bundle Adjustment" << endl; // If a Global Bundle Adjustment is running, abort it + // ! bug, 这里停掉的GBA不会在relaunch, 后面都没有用bRelaunchBA这个变量 if(isRunningGBA()) { unique_lock lock(mMutexGBA); @@ -1872,6 +2250,7 @@ void LoopClosing::MergeLocal2() cout << "Request Stop Local Mapping" << endl; + // 停下局部建图 mpLocalMapper->RequestStop(); // Wait until Local Mapping has effectively stopped while(!mpLocalMapper->isStopped()) @@ -1880,41 +2259,50 @@ void LoopClosing::MergeLocal2() } cout << "Local Map stopped" << endl; + // 当前关键帧地图的指针 Map* pCurrentMap = mpCurrentKF->GetMap(); + // 融合关键帧地图的指针 Map* pMergeMap = mpMergeMatchedKF->GetMap(); - + // Step 1 利用之前计算的sim3相关信息, 把整个当前地图变换到融合帧所在地图 { + // sold 是之前sim3中计算的correction,用来把当前关键帧所在的地图位姿带到融合关键帧所在的地图 float s_on = mSold_new.scale(); cv::Mat R_on = Converter::toCvMat(mSold_new.rotation().toRotationMatrix()); cv::Mat t_on = Converter::toCvMat(mSold_new.translation()); - + // 锁住altas更新地图 unique_lock lock(mpAtlas->GetCurrentMap()->mMutexMapUpdate); mpLocalMapper->EmptyQueue(); std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); + // 是否带尺度 bool bScaleVel=false; if(s_on!=1) bScaleVel=true; + // 利用sim3信息把整个当前地图转到融合帧所在地图 mpAtlas->GetCurrentMap()->ApplyScaledRotation(R_on,s_on,bScaleVel,t_on); + // 更具尺度信息更新imu信息 mpTracker->UpdateFrameIMU(s_on,mpCurrentKF->GetImuBias(),mpTracker->GetLastKeyFrame()); std::chrono::steady_clock::time_point t3 = std::chrono::steady_clock::now(); } - + // Step 2 如果地图没有成熟,在这里进行一次InertialOptimization const int numKFnew=pCurrentMap->KeyFramesInMap(); - + // imu模式下,且地图没有成熟 if((mpTracker->mSensor==System::IMU_MONOCULAR || mpTracker->mSensor==System::IMU_STEREO)&& !pCurrentMap->GetIniertialBA2()){ // Map is not completly initialized Eigen::Vector3d bg, ba; bg << 0., 0., 0.; ba << 0., 0., 0.; + // 优化当前地图中参数bg,ba Optimizer::InertialOptimization(pCurrentMap,bg,ba); IMU::Bias b (ba[0],ba[1],ba[2],bg[0],bg[1],bg[2]); unique_lock lock(mpAtlas->GetCurrentMap()->mMutexMapUpdate); + // 用优化得到的 bias 更新地图 mpTracker->UpdateFrameIMU(1.0f,b,mpTracker->GetLastKeyFrame()); // Set map initialized + // 设置地图已经成熟 pCurrentMap->SetIniertialBA2(); pCurrentMap->SetIniertialBA1(); pCurrentMap->SetImuInitialized(); @@ -1927,11 +2315,11 @@ void LoopClosing::MergeLocal2() unique_lock currentLock(pCurrentMap->mMutexMapUpdate); // We update the current map with the Merge information unique_lock mergeLock(pMergeMap->mMutexMapUpdate); // We remove the Kfs and MPs in the merged area from the old map - + // 融合帧所在地图的所有关键帧和地图点 vector vpMergeMapKFs = pMergeMap->GetAllKeyFrames(); vector vpMergeMapMPs = pMergeMap->GetAllMapPoints(); - + // 遍历每个融合帧所在地图的关键帧 for(KeyFrame* pKFi : vpMergeMapKFs) { if(!pKFi || pKFi->isBad() || pKFi->GetMap() != pMergeMap) @@ -1940,22 +2328,25 @@ void LoopClosing::MergeLocal2() } // Make sure connections are updated + // 把关键帧从融合帧所在地图删掉,从融合帧所在地图删掉 pKFi->UpdateMap(pCurrentMap); pCurrentMap->AddKeyFrame(pKFi); pMergeMap->EraseKeyFrame(pKFi); } - + // 遍历每个融合帧所在地图的地图点 for(MapPoint* pMPi : vpMergeMapMPs) { if(!pMPi || pMPi->isBad() || pMPi->GetMap() != pMergeMap) continue; + // 把地图点添加到当前帧所在地图,从融合帧所在地图删掉 pMPi->UpdateMap(pCurrentMap); pCurrentMap->AddMapPoint(pMPi); pMergeMap->EraseMapPoint(pMPi); } // Save non corrected poses (already merged maps) + // 存下所有关键帧在融合矫正之前的位姿 vector vpKFs = pCurrentMap->GetAllKeyFrames(); for(KeyFrame* pKFi : vpKFs) { @@ -1984,12 +2375,14 @@ void LoopClosing::MergeLocal2() vector vpCheckFuseMapPoint; // MapPoint vector from current map to allow to fuse duplicated points with the old map (merge) vector vpCurrentConnectedKFs; + // 拿出融合帧的局部窗口, 确保最后是(1+5), 1: 融合帧自己 2: 5个共视关键帧 mvpMergeConnectedKFs.push_back(mpMergeMatchedKF); vector aux = mpMergeMatchedKF->GetVectorCovisibleKeyFrames(); mvpMergeConnectedKFs.insert(mvpMergeConnectedKFs.end(), aux.begin(), aux.end()); if (mvpMergeConnectedKFs.size()>6) mvpMergeConnectedKFs.erase(mvpMergeConnectedKFs.begin()+6,mvpMergeConnectedKFs.end()); + // 拿出当前关键帧的局部窗口, 确保最后是(1+5), 1: 融合帧自己 2: 5个共视关键帧 mpCurrentKF->UpdateConnections(); vpCurrentConnectedKFs.push_back(mpCurrentKF); aux = mpCurrentKF->GetVectorCovisibleKeyFrames(); @@ -1997,6 +2390,7 @@ void LoopClosing::MergeLocal2() if (vpCurrentConnectedKFs.size()>6) vpCurrentConnectedKFs.erase(vpCurrentConnectedKFs.begin()+6,vpCurrentConnectedKFs.end()); + // 所有融合帧局部窗口的地图点 set spMapPointMerge; for(KeyFrame* pKFi : mvpMergeConnectedKFs) { @@ -2009,8 +2403,10 @@ void LoopClosing::MergeLocal2() vpCheckFuseMapPoint.reserve(spMapPointMerge.size()); std::copy(spMapPointMerge.begin(), spMapPointMerge.end(), std::back_inserter(vpCheckFuseMapPoint)); + // 把融合帧所在局部窗口的地图点投向当前关键帧所在窗口,移除重复的点.(新值用旧值代替) SearchAndFuse(vpCurrentConnectedKFs, vpCheckFuseMapPoint); + // 更新当前关键帧共视窗口内所有关键帧的连接 for(KeyFrame* pKFi : vpCurrentConnectedKFs) { if(!pKFi || pKFi->isBad()) @@ -2018,6 +2414,7 @@ void LoopClosing::MergeLocal2() pKFi->UpdateConnections(); } + // 更新融合关键帧共视窗口内所有关键帧的连接 for(KeyFrame* pKFi : mvpMergeConnectedKFs) { if(!pKFi || pKFi->isBad()) @@ -2034,6 +2431,7 @@ void LoopClosing::MergeLocal2() // Perform BA bool bStopFlag=false; KeyFrame* pCurrKF = mpTracker->GetLastKeyFrame(); + // Step 3 在这里进行welding BA Optimizer::MergeInertialBA(pCurrKF, mpMergeMatchedKF, &bStopFlag, pCurrentMap,CorrectedSim3); // Release Local Mapping. @@ -2049,6 +2447,7 @@ void LoopClosing::SearchAndFuse(const KeyFrameAndPose &CorrectedPosesMap, vector int total_replaces = 0; + // 遍历每个关键帧 for(KeyFrameAndPose::const_iterator mit=CorrectedPosesMap.begin(), mend=CorrectedPosesMap.end(); mit!=mend;mit++) { int num_replaces = 0; @@ -2198,6 +2597,7 @@ void LoopClosing::RunGlobalBundleAdjustment(Map* pActiveMap, unsigned long nLoop if(!bImuInit) Optimizer::GlobalBundleAdjustemnt(pActiveMap,10,&mbStopGBA,nLoopKF,false); else + // 仅有一个地图且内部关键帧<200,并且IMU完成了第一阶段初始化后才会进行下面 Optimizer::FullInertialBA(pActiveMap,7,false,nLoopKF,&mbStopGBA); #ifdef REGISTER_TIMES @@ -2207,7 +2607,7 @@ void LoopClosing::RunGlobalBundleAdjustment(Map* pActiveMap, unsigned long nLoop vTimeFullGBA_ms.push_back(timeFullGBA); #endif - + // 记录GBA已经迭代次数,用来检查全局BA过程是否是因为意外结束的 int idx = mnFullBAIdx; // Optimizer::GlobalBundleAdjustemnt(mpMap,10,&mbStopGBA,nLoopKF,false); diff --git a/src/MLPnPsolver.cpp b/src/MLPnPsolver.cpp index 303390e..1a100b3 100644 --- a/src/MLPnPsolver.cpp +++ b/src/MLPnPsolver.cpp @@ -49,6 +49,8 @@ #include +// MLPnP算法,极大似然PnP算法,解决PnP问题,用在重定位中,不用运动的先验知识来估计相机位姿 +// 基于论文《MLPNP - A REAL-TIME MAXIMUM LIKELIHOOD SOLUTION TO THE PERSPECTIVE-N-POINT PROBLEM》 namespace ORB_SLAM3 { /** @@ -62,20 +64,22 @@ namespace ORB_SLAM3 { * @param[in] N 所有2D点的个数 * @param[in] mpCamera 相机模型,利用该变量对3D点进行投影 */ - MLPnPsolver::MLPnPsolver(const Frame &F, const vector &vpMapPointMatches): - mnInliersi(0), // 内点的个数 - mnIterations(0), // Ransac迭代次数 - mnBestInliers(0), // 最佳内点数 - N(0), // 所有2D点的个数 - mpCamera(F.mpCamera) // 相机模型,利用该变量对3D点进行投影 - { - mvpMapPointMatches = vpMapPointMatches; // 待匹配的特征点,是当前帧和候选关键帧用BoW进行快速匹配的结果 - mvBearingVecs.reserve(F.mvpMapPoints.size()); // ? 初始化3D点的单位向量 - mvP2D.reserve(F.mvpMapPoints.size()); // 初始化3D点的投影点 - mvSigma2.reserve(F.mvpMapPoints.size()); // 初始化卡方检验中的sigma值 - mvP3Dw.reserve(F.mvpMapPoints.size()); // 初始化3D点坐标 - mvKeyPointIndices.reserve(F.mvpMapPoints.size()); // 初始化3D点的索引值 - mvAllIndices.reserve(F.mvpMapPoints.size()); // 初始化所有索引值 + MLPnPsolver::MLPnPsolver(const Frame &F, // 输入帧的数据 + const vector &vpMapPointMatches // 待匹配的特征点,是当前帧和候选关键帧用BoW进行快速匹配的结果 + ) : + mnInliersi(0), // 内点的个数 + mnIterations(0), // Ransac迭代次数 + mnBestInliers(0), // 最佳内点数 + N(0), // 所有2D点的个数 + mpCamera(F.mpCamera) // 相机模型,利用该变量对3D点进行投影 + { + mvpMapPointMatches = vpMapPointMatches; // 待匹配的特征点,是当前帧和候选关键帧用BoW进行快速匹配的结果 + mvBearingVecs.reserve(F.mvpMapPoints.size()); // 初始化3D点的单位向量 + mvP2D.reserve(F.mvpMapPoints.size()); // 初始化3D点的投影点 + mvSigma2.reserve(F.mvpMapPoints.size()); // 初始化卡方检验中的sigma值 + mvP3Dw.reserve(F.mvpMapPoints.size()); // 初始化3D点坐标 + mvKeyPointIndices.reserve(F.mvpMapPoints.size()); // 初始化3D点的索引值 + mvAllIndices.reserve(F.mvpMapPoints.size()); // 初始化所有索引值 // 一些必要的初始化操作 int idx = 0; @@ -120,6 +124,7 @@ namespace ORB_SLAM3 { } } + // 设置RANSAC参数 SetRansacParameters(); } @@ -141,8 +146,8 @@ namespace ORB_SLAM3 { // Step 1: 判断,如果2D点个数不足以启动RANSAC迭代过程的最小下限,则退出 if(Nproject(P3Dc); + // 计算残差 float distX = P2D.x-uv.x; float distY = P2D.y-uv.y; float error2 = distX*distX+distY*distY; + // 判定是不是内点 if(error2 vIndices; vIndices.reserve(mvbBestInliers.size()); @@ -379,10 +399,17 @@ namespace ORB_SLAM3 { } //Bearing vectors and 3D points used for this ransac iteration + // 因为是重定义,所以要另外定义局部变量,和iterate里面一样 + // 初始化单位向量和3D点,给当前重定义函数使用 bearingVectors_t bearingVecs; points_t p3DS; vector indexes; + // 注意这里是用所有内点vIndices.size()来进行相机位姿估计 + // 而iterate里面是用最小集mRansacMinSet(6个)来进行相机位姿估计 + // TODO:有什么区别呢?答:肯定有啦,mRansacMinSet只是粗略解, + // 这里之所以要Refine就是要用所有满足模型的内点来更加精确地近似表达模型 + // 这样求出来的解才会更加准确 for(size_t i=0; i rankTest(planarTest); //int r, c; //double minEigenVal = abs(eigen_solver.eigenvalues().real().minCoeff(&r, &c)); @@ -718,7 +748,7 @@ namespace ORB_SLAM3 { // |r13 r23 r33| tmp.transposeInPlace(); - + // 平移部分 t 只表示了正确的方向,没有尺度,需要求解 scale, 先求系数 double scale = 1.0 / std::sqrt(std::abs(tmp.col(1).norm() * tmp.col(2).norm())); // find best rotation matrix in frobenius sense // 利用Frobenious范数计算最佳的旋转矩阵,利用公式(19), R = U_R*V_R^T @@ -734,7 +764,7 @@ namespace ORB_SLAM3 { Rout1 *= -1.0; // rotate this matrix back using the eigen frame - // ? 因为是在平面情况下计算的,估计出来的旋转矩阵是要做一个转换的,根据公式(21),R = Rs*R + // 因为是在平面情况下计算的,估计出来的旋转矩阵是要做一个转换的,根据公式(21),R = Rs*R // 其中,Rs表示特征向量的旋转矩阵 // 注意eigenRot之前已经转置过了transposeInPlace(),所以这里Rout1在之前也转置了,即tmp.transposeInPlace() Rout1 = eigenRot.transpose() * Rout1; @@ -941,16 +971,25 @@ namespace ORB_SLAM3 { result.block<3, 1>(0, 3) = tout;//-result.block<3, 3>(0, 0) * tout; } + /** + * @brief 旋转向量转换成旋转矩阵,即李群->李代数 + * @param[in] omega 旋转向量 + * @param[out] R 旋转矩阵 + */ Eigen::Matrix3d MLPnPsolver::rodrigues2rot(const Eigen::Vector3d &omega) { + // 初始化旋转矩阵 rotation_t R = Eigen::Matrix3d::Identity(); + // 求旋转向量的反对称矩阵 Eigen::Matrix3d skewW; skewW << 0.0, -omega(2), omega(1), omega(2), 0.0, -omega(0), -omega(1), omega(0), 0.0; + // 求旋转向量的角度 double omega_norm = omega.norm(); + // 通过罗德里格斯公式把旋转向量转换成旋转矩阵 if (omega_norm > std::numeric_limits::epsilon()) R = R + sin(omega_norm) / omega_norm * skewW + (1 - cos(omega_norm)) / (omega_norm * omega_norm) * (skewW * skewW); @@ -958,18 +997,43 @@ namespace ORB_SLAM3 { return R; } + /** + * @brief 旋转矩阵转换成旋转向量,即李代数->李群 + * @param[in] R 旋转矩阵 + * @param[out] omega 旋转向量 + 问题: 李群(SO3) -> 李代数(so3)的对数映射 + 利用罗德里格斯公式,已知旋转矩阵的情况下,求解其对数映射ln(R) + 就像《视觉十四讲》里面说到的,使用李代数的一大动机是为了进行优化,而在优化过程中导数是非常必要的信息 + 李群SO(3)中完成两个矩阵乘法,不能用李代数so(3)中的加法表示,问题描述为两个李代数对数映射乘积 + 而两个李代数对数映射乘积的完整形式,可以用BCH公式表达,其中式子(左乘BCH近似雅可比J)可近似表达,该式也就是罗德里格斯公式 + 计算完罗德里格斯参数之后,就可以用非线性优化方法了,里面就要用到导数,其形式就是李代数指数映射 + 所以要在调用非线性MLPnP算法之前计算罗德里格斯参数 + */ Eigen::Vector3d MLPnPsolver::rot2rodrigues(const Eigen::Matrix3d &R) { rodrigues_t omega; omega << 0.0, 0.0, 0.0; + // R.trace() 矩阵的迹,即该矩阵的特征值总和 double trace = R.trace() - 1.0; + + // 李群(SO3) -> 李代数(so3)的对数映射 + // 对数映射ln(R)∨将一个旋转矩阵转换为一个李代数,但由于对数的泰勒展开不是很优雅所以用本式 + // wnorm是求解出来的角度 double wnorm = acos(trace / 2.0); + + // 如果wnorm大于运行编译程序的计算机所能识别的最小非零浮点数,则可以生成向量,否则为0 if (wnorm > std::numeric_limits::epsilon()) { + // |r11 r21 r31| + // R = |r12 r22 r32| + // |r13 r23 r33| omega[0] = (R(2, 1) - R(1, 2)); omega[1] = (R(0, 2) - R(2, 0)); omega[2] = (R(1, 0) - R(0, 1)); - double sc = wnorm / (2.0*sin(wnorm)); + // theta |r23 - r32| + // ln(R) = ------------ * |r31 - r13| + // 2*sin(theta) |r12 - r21| + double sc = wnorm / (2.0 * sin(wnorm)); omega *= sc; } return omega; @@ -1123,12 +1187,12 @@ namespace ORB_SLAM3 { * @param[in] t 平移向量t * @param[out] jacs Jacobian矩阵 */ - void MLPnPsolver::mlpnpJacs(const point_t& pt, const Eigen::Vector3d& nullspace_r, - const Eigen::Vector3d& nullspace_s, const rodrigues_t& w, - const translation_t& t, Eigen::MatrixXd& jacs){ - double r1 = nullspace_r[0]; - double r2 = nullspace_r[1]; - double r3 = nullspace_r[2]; + void MLPnPsolver::mlpnpJacs(const point_t &pt, const Eigen::Vector3d &nullspace_r, + const Eigen::Vector3d &nullspace_s, const rodrigues_t &w, + const translation_t &t, Eigen::MatrixXd &jacs) { + double r1 = nullspace_r[0]; + double r2 = nullspace_r[1]; + double r3 = nullspace_r[2]; double s1 = nullspace_s[0]; double s2 = nullspace_s[1]; @@ -1146,229 +1210,672 @@ namespace ORB_SLAM3 { double t2 = t[1]; double t3 = t[2]; - double t5 = w1*w1; - double t6 = w2*w2; - double t7 = w3*w3; - double t8 = t5+t6+t7; - double t9 = sqrt(t8); - double t10 = sin(t9); - double t11 = 1.0/sqrt(t8); - double t12 = cos(t9); - double t13 = t12-1.0; - double t14 = 1.0/t8; - double t16 = t10*t11*w3; - double t17 = t13*t14*w1*w2; - double t19 = t10*t11*w2; - double t20 = t13*t14*w1*w3; - double t24 = t6+t7; - double t27 = t16+t17; - double t28 = Y1*t27; - double t29 = t19-t20; - double t30 = Z1*t29; - double t31 = t13*t14*t24; - double t32 = t31+1.0; - double t33 = X1*t32; - double t15 = t1-t28+t30+t33; - double t21 = t10*t11*w1; - double t22 = t13*t14*w2*w3; - double t45 = t5+t7; - double t53 = t16-t17; - double t54 = X1*t53; - double t55 = t21+t22; - double t56 = Z1*t55; - double t57 = t13*t14*t45; - double t58 = t57+1.0; - double t59 = Y1*t58; - double t18 = t2+t54-t56+t59; - double t34 = t5+t6; - double t38 = t19+t20; - double t39 = X1*t38; - double t40 = t21-t22; - double t41 = Y1*t40; - double t42 = t13*t14*t34; - double t43 = t42+1.0; - double t44 = Z1*t43; - double t23 = t3-t39+t41+t44; - double t25 = 1.0/pow(t8,3.0/2.0); - double t26 = 1.0/(t8*t8); - double t35 = t12*t14*w1*w2; - double t36 = t5*t10*t25*w3; - double t37 = t5*t13*t26*w3*2.0; - double t46 = t10*t25*w1*w3; - double t47 = t5*t10*t25*w2; - double t48 = t5*t13*t26*w2*2.0; - double t49 = t10*t11; - double t50 = t5*t12*t14; - double t51 = t13*t26*w1*w2*w3*2.0; - double t52 = t10*t25*w1*w2*w3; - double t60 = t15*t15; - double t61 = t18*t18; - double t62 = t23*t23; - double t63 = t60+t61+t62; - double t64 = t5*t10*t25; - double t65 = 1.0/sqrt(t63); - double t66 = Y1*r2*t6; - double t67 = Z1*r3*t7; - double t68 = r1*t1*t5; - double t69 = r1*t1*t6; - double t70 = r1*t1*t7; - double t71 = r2*t2*t5; - double t72 = r2*t2*t6; - double t73 = r2*t2*t7; - double t74 = r3*t3*t5; - double t75 = r3*t3*t6; - double t76 = r3*t3*t7; - double t77 = X1*r1*t5; - double t78 = X1*r2*w1*w2; - double t79 = X1*r3*w1*w3; - double t80 = Y1*r1*w1*w2; - double t81 = Y1*r3*w2*w3; - double t82 = Z1*r1*w1*w3; - double t83 = Z1*r2*w2*w3; - double t84 = X1*r1*t6*t12; - double t85 = X1*r1*t7*t12; - double t86 = Y1*r2*t5*t12; - double t87 = Y1*r2*t7*t12; - double t88 = Z1*r3*t5*t12; - double t89 = Z1*r3*t6*t12; - double t90 = X1*r2*t9*t10*w3; - double t91 = Y1*r3*t9*t10*w1; - double t92 = Z1*r1*t9*t10*w2; - double t102 = X1*r3*t9*t10*w2; - double t103 = Y1*r1*t9*t10*w3; - double t104 = Z1*r2*t9*t10*w1; - double t105 = X1*r2*t12*w1*w2; - double t106 = X1*r3*t12*w1*w3; - double t107 = Y1*r1*t12*w1*w2; - double t108 = Y1*r3*t12*w2*w3; - double t109 = Z1*r1*t12*w1*w3; - double t110 = Z1*r2*t12*w2*w3; - double t93 = t66+t67+t68+t69+t70+t71+t72+t73+t74+t75+t76+t77+t78+t79+t80+t81+t82+t83+t84+t85+t86+t87+t88+t89+t90+t91+t92-t102-t103-t104-t105-t106-t107-t108-t109-t110; - double t94 = t10*t25*w1*w2; - double t95 = t6*t10*t25*w3; - double t96 = t6*t13*t26*w3*2.0; - double t97 = t12*t14*w2*w3; - double t98 = t6*t10*t25*w1; - double t99 = t6*t13*t26*w1*2.0; - double t100 = t6*t10*t25; - double t101 = 1.0/pow(t63,3.0/2.0); - double t111 = t6*t12*t14; - double t112 = t10*t25*w2*w3; - double t113 = t12*t14*w1*w3; - double t114 = t7*t10*t25*w2; - double t115 = t7*t13*t26*w2*2.0; - double t116 = t7*t10*t25*w1; - double t117 = t7*t13*t26*w1*2.0; - double t118 = t7*t12*t14; - double t119 = t13*t24*t26*w1*2.0; - double t120 = t10*t24*t25*w1; - double t121 = t119+t120; - double t122 = t13*t26*t34*w1*2.0; - double t123 = t10*t25*t34*w1; - double t131 = t13*t14*w1*2.0; - double t124 = t122+t123-t131; - double t139 = t13*t14*w3; - double t125 = -t35+t36+t37+t94-t139; - double t126 = X1*t125; - double t127 = t49+t50+t51+t52-t64; - double t128 = Y1*t127; - double t129 = t126+t128-Z1*t124; - double t130 = t23*t129*2.0; - double t132 = t13*t26*t45*w1*2.0; - double t133 = t10*t25*t45*w1; - double t138 = t13*t14*w2; - double t134 = -t46+t47+t48+t113-t138; - double t135 = X1*t134; - double t136 = -t49-t50+t51+t52+t64; - double t137 = Z1*t136; - double t140 = X1*s1*t5; - double t141 = Y1*s2*t6; - double t142 = Z1*s3*t7; - double t143 = s1*t1*t5; - double t144 = s1*t1*t6; - double t145 = s1*t1*t7; - double t146 = s2*t2*t5; - double t147 = s2*t2*t6; - double t148 = s2*t2*t7; - double t149 = s3*t3*t5; - double t150 = s3*t3*t6; - double t151 = s3*t3*t7; - double t152 = X1*s2*w1*w2; - double t153 = X1*s3*w1*w3; - double t154 = Y1*s1*w1*w2; - double t155 = Y1*s3*w2*w3; - double t156 = Z1*s1*w1*w3; - double t157 = Z1*s2*w2*w3; - double t158 = X1*s1*t6*t12; - double t159 = X1*s1*t7*t12; - double t160 = Y1*s2*t5*t12; - double t161 = Y1*s2*t7*t12; - double t162 = Z1*s3*t5*t12; - double t163 = Z1*s3*t6*t12; - double t164 = X1*s2*t9*t10*w3; - double t165 = Y1*s3*t9*t10*w1; - double t166 = Z1*s1*t9*t10*w2; - double t183 = X1*s3*t9*t10*w2; - double t184 = Y1*s1*t9*t10*w3; - double t185 = Z1*s2*t9*t10*w1; - double t186 = X1*s2*t12*w1*w2; - double t187 = X1*s3*t12*w1*w3; - double t188 = Y1*s1*t12*w1*w2; - double t189 = Y1*s3*t12*w2*w3; - double t190 = Z1*s1*t12*w1*w3; - double t191 = Z1*s2*t12*w2*w3; - double t167 = t140+t141+t142+t143+t144+t145+t146+t147+t148+t149+t150+t151+t152+t153+t154+t155+t156+t157+t158+t159+t160+t161+t162+t163+t164+t165+t166-t183-t184-t185-t186-t187-t188-t189-t190-t191; - double t168 = t13*t26*t45*w2*2.0; - double t169 = t10*t25*t45*w2; - double t170 = t168+t169; - double t171 = t13*t26*t34*w2*2.0; - double t172 = t10*t25*t34*w2; - double t176 = t13*t14*w2*2.0; - double t173 = t171+t172-t176; - double t174 = -t49+t51+t52+t100-t111; - double t175 = X1*t174; - double t177 = t13*t24*t26*w2*2.0; - double t178 = t10*t24*t25*w2; - double t192 = t13*t14*w1; - double t179 = -t97+t98+t99+t112-t192; - double t180 = Y1*t179; - double t181 = t49+t51+t52-t100+t111; - double t182 = Z1*t181; - double t193 = t13*t26*t34*w3*2.0; - double t194 = t10*t25*t34*w3; - double t195 = t193+t194; - double t196 = t13*t26*t45*w3*2.0; - double t197 = t10*t25*t45*w3; - double t200 = t13*t14*w3*2.0; - double t198 = t196+t197-t200; - double t199 = t7*t10*t25; - double t201 = t13*t24*t26*w3*2.0; - double t202 = t10*t24*t25*w3; - double t203 = -t49+t51+t52-t118+t199; - double t204 = Y1*t203; - double t205 = t1*2.0; - double t206 = Z1*t29*2.0; - double t207 = X1*t32*2.0; - double t208 = t205+t206+t207-Y1*t27*2.0; - double t209 = t2*2.0; - double t210 = X1*t53*2.0; - double t211 = Y1*t58*2.0; - double t212 = t209+t210+t211-Z1*t55*2.0; - double t213 = t3*2.0; - double t214 = Y1*t40*2.0; - double t215 = Z1*t43*2.0; - double t216 = t213+t214+t215-X1*t38*2.0; - jacs(0, 0) = t14*t65*(X1*r1*w1*2.0+X1*r2*w2+X1*r3*w3+Y1*r1*w2+Z1*r1*w3+r1*t1*w1*2.0+r2*t2*w1*2.0+r3*t3*w1*2.0+Y1*r3*t5*t12+Y1*r3*t9*t10-Z1*r2*t5*t12-Z1*r2*t9*t10-X1*r2*t12*w2-X1*r3*t12*w3-Y1*r1*t12*w2+Y1*r2*t12*w1*2.0-Z1*r1*t12*w3+Z1*r3*t12*w1*2.0+Y1*r3*t5*t10*t11-Z1*r2*t5*t10*t11+X1*r2*t12*w1*w3-X1*r3*t12*w1*w2-Y1*r1*t12*w1*w3+Z1*r1*t12*w1*w2-Y1*r1*t10*t11*w1*w3+Z1*r1*t10*t11*w1*w2-X1*r1*t6*t10*t11*w1-X1*r1*t7*t10*t11*w1+X1*r2*t5*t10*t11*w2+X1*r3*t5*t10*t11*w3+Y1*r1*t5*t10*t11*w2-Y1*r2*t5*t10*t11*w1-Y1*r2*t7*t10*t11*w1+Z1*r1*t5*t10*t11*w3-Z1*r3*t5*t10*t11*w1-Z1*r3*t6*t10*t11*w1+X1*r2*t10*t11*w1*w3-X1*r3*t10*t11*w1*w2+Y1*r3*t10*t11*w1*w2*w3+Z1*r2*t10*t11*w1*w2*w3)-t26*t65*t93*w1*2.0-t14*t93*t101*(t130+t15*(-X1*t121+Y1*(t46+t47+t48-t13*t14*w2-t12*t14*w1*w3)+Z1*(t35+t36+t37-t13*t14*w3-t10*t25*w1*w2))*2.0+t18*(t135+t137-Y1*(t132+t133-t13*t14*w1*2.0))*2.0)*(1.0/2.0); - jacs(0, 1) = t14*t65*(X1*r2*w1+Y1*r1*w1+Y1*r2*w2*2.0+Y1*r3*w3+Z1*r2*w3+r1*t1*w2*2.0+r2*t2*w2*2.0+r3*t3*w2*2.0-X1*r3*t6*t12-X1*r3*t9*t10+Z1*r1*t6*t12+Z1*r1*t9*t10+X1*r1*t12*w2*2.0-X1*r2*t12*w1-Y1*r1*t12*w1-Y1*r3*t12*w3-Z1*r2*t12*w3+Z1*r3*t12*w2*2.0-X1*r3*t6*t10*t11+Z1*r1*t6*t10*t11+X1*r2*t12*w2*w3-Y1*r1*t12*w2*w3+Y1*r3*t12*w1*w2-Z1*r2*t12*w1*w2-Y1*r1*t10*t11*w2*w3+Y1*r3*t10*t11*w1*w2-Z1*r2*t10*t11*w1*w2-X1*r1*t6*t10*t11*w2+X1*r2*t6*t10*t11*w1-X1*r1*t7*t10*t11*w2+Y1*r1*t6*t10*t11*w1-Y1*r2*t5*t10*t11*w2-Y1*r2*t7*t10*t11*w2+Y1*r3*t6*t10*t11*w3-Z1*r3*t5*t10*t11*w2+Z1*r2*t6*t10*t11*w3-Z1*r3*t6*t10*t11*w2+X1*r2*t10*t11*w2*w3+X1*r3*t10*t11*w1*w2*w3+Z1*r1*t10*t11*w1*w2*w3)-t26*t65*t93*w2*2.0-t14*t93*t101*(t18*(Z1*(-t35+t94+t95+t96-t13*t14*w3)-Y1*t170+X1*(t97+t98+t99-t13*t14*w1-t10*t25*w2*w3))*2.0+t15*(t180+t182-X1*(t177+t178-t13*t14*w2*2.0))*2.0+t23*(t175+Y1*(t35-t94+t95+t96-t13*t14*w3)-Z1*t173)*2.0)*(1.0/2.0); - jacs(0, 2) = t14*t65*(X1*r3*w1+Y1*r3*w2+Z1*r1*w1+Z1*r2*w2+Z1*r3*w3*2.0+r1*t1*w3*2.0+r2*t2*w3*2.0+r3*t3*w3*2.0+X1*r2*t7*t12+X1*r2*t9*t10-Y1*r1*t7*t12-Y1*r1*t9*t10+X1*r1*t12*w3*2.0-X1*r3*t12*w1+Y1*r2*t12*w3*2.0-Y1*r3*t12*w2-Z1*r1*t12*w1-Z1*r2*t12*w2+X1*r2*t7*t10*t11-Y1*r1*t7*t10*t11-X1*r3*t12*w2*w3+Y1*r3*t12*w1*w3+Z1*r1*t12*w2*w3-Z1*r2*t12*w1*w3+Y1*r3*t10*t11*w1*w3+Z1*r1*t10*t11*w2*w3-Z1*r2*t10*t11*w1*w3-X1*r1*t6*t10*t11*w3-X1*r1*t7*t10*t11*w3+X1*r3*t7*t10*t11*w1-Y1*r2*t5*t10*t11*w3-Y1*r2*t7*t10*t11*w3+Y1*r3*t7*t10*t11*w2+Z1*r1*t7*t10*t11*w1+Z1*r2*t7*t10*t11*w2-Z1*r3*t5*t10*t11*w3-Z1*r3*t6*t10*t11*w3-X1*r3*t10*t11*w2*w3+X1*r2*t10*t11*w1*w2*w3+Y1*r1*t10*t11*w1*w2*w3)-t26*t65*t93*w3*2.0-t14*t93*t101*(t18*(Z1*(t46-t113+t114+t115-t13*t14*w2)-Y1*t198+X1*(t49+t51+t52+t118-t7*t10*t25))*2.0+t23*(X1*(-t97+t112+t116+t117-t13*t14*w1)+Y1*(-t46+t113+t114+t115-t13*t14*w2)-Z1*t195)*2.0+t15*(t204+Z1*(t97-t112+t116+t117-t13*t14*w1)-X1*(t201+t202-t13*t14*w3*2.0))*2.0)*(1.0/2.0); - jacs(0, 3) = r1*t65-t14*t93*t101*t208*(1.0/2.0); - jacs(0, 4) = r2*t65-t14*t93*t101*t212*(1.0/2.0); - jacs(0, 5) = r3*t65-t14*t93*t101*t216*(1.0/2.0); - jacs(1, 0) = t14*t65*(X1*s1*w1*2.0+X1*s2*w2+X1*s3*w3+Y1*s1*w2+Z1*s1*w3+s1*t1*w1*2.0+s2*t2*w1*2.0+s3*t3*w1*2.0+Y1*s3*t5*t12+Y1*s3*t9*t10-Z1*s2*t5*t12-Z1*s2*t9*t10-X1*s2*t12*w2-X1*s3*t12*w3-Y1*s1*t12*w2+Y1*s2*t12*w1*2.0-Z1*s1*t12*w3+Z1*s3*t12*w1*2.0+Y1*s3*t5*t10*t11-Z1*s2*t5*t10*t11+X1*s2*t12*w1*w3-X1*s3*t12*w1*w2-Y1*s1*t12*w1*w3+Z1*s1*t12*w1*w2+X1*s2*t10*t11*w1*w3-X1*s3*t10*t11*w1*w2-Y1*s1*t10*t11*w1*w3+Z1*s1*t10*t11*w1*w2-X1*s1*t6*t10*t11*w1-X1*s1*t7*t10*t11*w1+X1*s2*t5*t10*t11*w2+X1*s3*t5*t10*t11*w3+Y1*s1*t5*t10*t11*w2-Y1*s2*t5*t10*t11*w1-Y1*s2*t7*t10*t11*w1+Z1*s1*t5*t10*t11*w3-Z1*s3*t5*t10*t11*w1-Z1*s3*t6*t10*t11*w1+Y1*s3*t10*t11*w1*w2*w3+Z1*s2*t10*t11*w1*w2*w3)-t14*t101*t167*(t130+t15*(Y1*(t46+t47+t48-t113-t138)+Z1*(t35+t36+t37-t94-t139)-X1*t121)*2.0+t18*(t135+t137-Y1*(-t131+t132+t133))*2.0)*(1.0/2.0)-t26*t65*t167*w1*2.0; - jacs(1, 1) = t14*t65*(X1*s2*w1+Y1*s1*w1+Y1*s2*w2*2.0+Y1*s3*w3+Z1*s2*w3+s1*t1*w2*2.0+s2*t2*w2*2.0+s3*t3*w2*2.0-X1*s3*t6*t12-X1*s3*t9*t10+Z1*s1*t6*t12+Z1*s1*t9*t10+X1*s1*t12*w2*2.0-X1*s2*t12*w1-Y1*s1*t12*w1-Y1*s3*t12*w3-Z1*s2*t12*w3+Z1*s3*t12*w2*2.0-X1*s3*t6*t10*t11+Z1*s1*t6*t10*t11+X1*s2*t12*w2*w3-Y1*s1*t12*w2*w3+Y1*s3*t12*w1*w2-Z1*s2*t12*w1*w2+X1*s2*t10*t11*w2*w3-Y1*s1*t10*t11*w2*w3+Y1*s3*t10*t11*w1*w2-Z1*s2*t10*t11*w1*w2-X1*s1*t6*t10*t11*w2+X1*s2*t6*t10*t11*w1-X1*s1*t7*t10*t11*w2+Y1*s1*t6*t10*t11*w1-Y1*s2*t5*t10*t11*w2-Y1*s2*t7*t10*t11*w2+Y1*s3*t6*t10*t11*w3-Z1*s3*t5*t10*t11*w2+Z1*s2*t6*t10*t11*w3-Z1*s3*t6*t10*t11*w2+X1*s3*t10*t11*w1*w2*w3+Z1*s1*t10*t11*w1*w2*w3)-t26*t65*t167*w2*2.0-t14*t101*t167*(t18*(X1*(t97+t98+t99-t112-t192)+Z1*(-t35+t94+t95+t96-t139)-Y1*t170)*2.0+t15*(t180+t182-X1*(-t176+t177+t178))*2.0+t23*(t175+Y1*(t35-t94+t95+t96-t139)-Z1*t173)*2.0)*(1.0/2.0); - jacs(1, 2) = t14*t65*(X1*s3*w1+Y1*s3*w2+Z1*s1*w1+Z1*s2*w2+Z1*s3*w3*2.0+s1*t1*w3*2.0+s2*t2*w3*2.0+s3*t3*w3*2.0+X1*s2*t7*t12+X1*s2*t9*t10-Y1*s1*t7*t12-Y1*s1*t9*t10+X1*s1*t12*w3*2.0-X1*s3*t12*w1+Y1*s2*t12*w3*2.0-Y1*s3*t12*w2-Z1*s1*t12*w1-Z1*s2*t12*w2+X1*s2*t7*t10*t11-Y1*s1*t7*t10*t11-X1*s3*t12*w2*w3+Y1*s3*t12*w1*w3+Z1*s1*t12*w2*w3-Z1*s2*t12*w1*w3-X1*s3*t10*t11*w2*w3+Y1*s3*t10*t11*w1*w3+Z1*s1*t10*t11*w2*w3-Z1*s2*t10*t11*w1*w3-X1*s1*t6*t10*t11*w3-X1*s1*t7*t10*t11*w3+X1*s3*t7*t10*t11*w1-Y1*s2*t5*t10*t11*w3-Y1*s2*t7*t10*t11*w3+Y1*s3*t7*t10*t11*w2+Z1*s1*t7*t10*t11*w1+Z1*s2*t7*t10*t11*w2-Z1*s3*t5*t10*t11*w3-Z1*s3*t6*t10*t11*w3+X1*s2*t10*t11*w1*w2*w3+Y1*s1*t10*t11*w1*w2*w3)-t26*t65*t167*w3*2.0-t14*t101*t167*(t18*(Z1*(t46-t113+t114+t115-t138)-Y1*t198+X1*(t49+t51+t52+t118-t199))*2.0+t23*(X1*(-t97+t112+t116+t117-t192)+Y1*(-t46+t113+t114+t115-t138)-Z1*t195)*2.0+t15*(t204+Z1*(t97-t112+t116+t117-t192)-X1*(-t200+t201+t202))*2.0)*(1.0/2.0); - jacs(1, 3) = s1*t65-t14*t101*t167*t208*(1.0/2.0); - jacs(1, 4) = s2*t65-t14*t101*t167*t212*(1.0/2.0); - jacs(1, 5) = s3*t65-t14*t101*t167*t216*(1.0/2.0); + double t5 = w1 * w1; + double t6 = w2 * w2; + double t7 = w3 * w3; + + // t8 = theta^2 = w1^2+w2^2+w3^2 + double t8 = t5 + t6 + t7; + // t9 = sqrt(t8) = sqrt(theta^2) = theta + double t9 = sqrt(t8); + // t10 = sin(theta) + double t10 = sin(t9); + // t11 = 1 / theta + double t11 = 1.0 / sqrt(t8); + // t12 = cos(theta) + double t12 = cos(t9); + // t13 = cos(theta) - 1 + double t13 = t12 - 1.0; + // t14 = 1 / theta^2 + double t14 = 1.0 / t8; + // t16 = sin(theta)/theta*w3 + // 令 A = sin(theta)/theta = t10*t11 + // t16 = A*w3 + double t16 = t10 * t11 * w3; + // t17 = (cos(theta) - 1)/theta^2 * w1 * w2 + // 令 B = (cos(theta) - 1)/theta^2 = t13*t14 + // t17 = B*w1*w2 + double t17 = t13 * t14 * w1 * w2; + // t19 = sin(theta)/theta*w2 + // = A*w2 + double t19 = t10 * t11 * w2; + // t20 = (cos(theta) - 1) / theta^2 * w1 * w3 + // = B*w1*w3 + double t20 = t13 * t14 * w1 * w3; + // t24 = w2^2+w3^2 + double t24 = t6 + t7; + // t27 = A*w3 + B*w1*w2 + // = -r12 + double t27 = t16 + t17; + // t28 = (A*w3 + B*w1*w2)*Y1 + // = -r12*Y1 + double t28 = Y1 * t27; + // t29 = A*w2 - B*w1*w3 + // = r13 + double t29 = t19 - t20; + // t30 = (A*w2 - B*w1*w3) * Z1 + // = r13 * Z1 + double t30 = Z1 * t29; + // t31 = (cos(theta) - 1) /theta^2 * (w2^2+w3^2) + // = B * (w2^2+w3^2) + double t31 = t13 * t14 * t24; + // t32 = B * (w2^2+w3^2) + 1 + // = r11 + double t32 = t31 + 1.0; + // t33 = (B * (w2^2+w3^2) + 1) * X1 + // = r11 * X1 + double t33 = X1 * t32; + // t15 = t1 - (A*w3 + B*w1*w2)*Y1 + (A*w2 - B*w1*w3) * Z1 + (B * (w2^2+w3^2) + 1) * X1 + // = t1 + r11*X1 + r12*Y1 + r13*Z1 + double t15 = t1 - t28 + t30 + t33; + // t21 = t10 * t11 * w1 = sin(theta) / theta * w1 + // = A*w1 + double t21 = t10 * t11 * w1; + // t22 = t13 * t14 * w2 * w3 = (cos(theta) - 1) / theta^2 * w2 * w3 + // = B*w2*w3 + double t22 = t13 * t14 * w2 * w3; + // t45 = t5 + t7 + // = w1^2 + w3^2 + double t45 = t5 + t7; + // t53 = t16 - t17 + // = A*w3 - B*w1*w2 = r21 + double t53 = t16 - t17; + // t54 = r21*X1 + double t54 = X1 * t53; + // t55 = A*w1 + B*w2*w3 + // = -r23 + double t55 = t21 + t22; + // t56 = -r23*Z1 + double t56 = Z1 * t55; + // t57 = t13 * t14 * t45 = (cos(theta) - 1) / theta^2 * (w1^2 + w3^2) + // = B8(w1^2+w3^2) + double t57 = t13 * t14 * t45; + // t58 = B8(w1^2+w3^2) + 1.0 + // = r22 + double t58 = t57 + 1.0; + // t59 = r22*Y1 + double t59 = Y1 * t58; + // t18 = t2 + t54 - t56 + t59 + // = t2 + r21*X1 + r22*Y1 + r23*Z1 + double t18 = t2 + t54 - t56 + t59; + // t34 = t5 + t6 + // = w1^2+w2^2 + double t34 = t5 + t6; + // t38 = t19 + t20 = A*w2+B*w1*w3 + // = -r31 + double t38 = t19 + t20; + // t39 = -r31*X1 + double t39 = X1 * t38; + // t40 = A*w1 - B*w2*w3 + // = r32 + double t40 = t21 - t22; + // t41 = r32*Y1 + double t41 = Y1 * t40; + // t42 = t13 * t14 * t34 = (cos(theta) - 1) / theta^2 * (w1^2+w2^2) + // = B*(w1^2+w2^2) + double t42 = t13 * t14 * t34; + // t43 = B*(w1^2+w2^2) + 1 + // = r33 + double t43 = t42 + 1.0; + // t44 = r33*Z1 + double t44 = Z1 * t43; + // t23 = t3 - t39 + t41 + t44 = t3 + r31*X1 + r32*Y1 + r33*Z1 + double t23 = t3 - t39 + t41 + t44; + // t25 = 1.0 / pow(theta^2, 3.0 / 2.0) + // = 1 / theta^3 + double t25 = 1.0 / pow(t8, 3.0 / 2.0); + // t26 = 1.0 / (t8 * t8) + // = 1 / theta^4 + double t26 = 1.0 / (t8 * t8); + // t35 = t12 * t14 * w1 * w2 = cos(theta) / theta^2 * w1 * w2 + // 令 cos(theta) / theta^2 = E = t12*t14 + // t35 = E*w1*w2 + double t35 = t12 * t14 * w1 * w2; + // t36 = t5 * t10 * t25 * w3 = w1^2 * sin(theta) / theta^3 * w3 + // 令 sin(theta) / theta^3 = F = t10*t25 + // t36 = F*w1^2*w3 + double t36 = t5 * t10 * t25 * w3; + // t37 = t5 * t13 * t26 * w3 * 2.0 = w1^2 * (cos(theta) - 1) / theta^4 * w3 + // 令 (cos(theta) - 1) / theta^4 = G = t13*t26 + // t37 = G*w1^2*w3 + double t37 = t5 * t13 * t26 * w3 * 2.0; + // t46 = t10 * t25 * w1 * w3 = sin(theta) / theta^3 * w1 * w3 = F*w1*w3 + double t46 = t10 * t25 * w1 * w3; + // t47 = t5 * t10 * t25 * w2 = w1^2 * sin(theta) / theta^3 * w2 = F*w1^2*w2 + double t47 = t5 * t10 * t25 * w2; + // t48 = t5 * t13 * t26 * w2 * 2.0 = w1^2 * (cos(theta) - 1) / theta^4 * 2 * w2 = G*w1^2*w2 + double t48 = t5 * t13 * t26 * w2 * 2.0; + // t49 = t10 * t11 = sin(theta) / theta = A + double t49 = t10 * t11; + // t50 = t5 * t12 * t14 = w1^2 * cos(theta) / theta^2 = E*w1^2 + double t50 = t5 * t12 * t14; + // t51 = t13 * t26 * w1 * w2 * w3 * 2.0 = (cos(theta) - 1) / theta^4 * w1 * w2 * w3 * 2 = 2*G*w1*w2*w3 + double t51 = t13 * t26 * w1 * w2 * w3 * 2.0; + // t52 = t10 * t25 * w1 * w2 * w3 = sin(theta) / theta^3 * w1 * w2 * w3 = F*w1*w2*w3 + double t52 = t10 * t25 * w1 * w2 * w3; + // t60 = t15 * t15 = (t1 + r11*X1 + r12*Y1 + r13*Z1)^2 + double t60 = t15 * t15; + // t61 = t18 * t18 = (t2 + r21*X1 + r22*Y1 + r23*Z1)^2 + double t61 = t18 * t18; + // t62 = t23 * t23 = (t3 + r31*X1 + r32*Y1 + r33*Z1)^2 + double t62 = t23 * t23; + // t63 = t60 + t61 + t62 = (t1 + r11*X1 + r12*Y1 + r13*Z1)^2 + (t2 + r21*X1 + r22*Y1 + r23*Z1)^2 + (t3 + r31*X1 + r32*Y1 + r33*Z1)^2 + double t63 = t60 + t61 + t62; + // t64 = t5 * t10 * t25 = w1^2 * sin(theta) / theta^3 = F*w1^2 + double t64 = t5 * t10 * t25; + // t65 = 1 / sqrt((t1 + r11*X1 + r12*Y1 + r13*Z1)^2 + (t2 + r21*X1 + r22*Y1 + r23*Z1)^2 + (t3 + r31*X1 + r32*Y1 + r33*Z1)^2) + double t65 = 1.0 / sqrt(t63); + // t66 = Y1 * r2 * t6 = Y1 * r2 * w2^2 + double t66 = Y1 * r2 * t6; + // t67 = Z1 * r3 * t7 = Z1 * r3 * w3^2 + double t67 = Z1 * r3 * t7; + // t68 = r1 * t1 * t5 = r1 * t1 * w1^2 + double t68 = r1 * t1 * t5; + // t69 = r1 * t1 * t6 = r1 * t1 * w2^2 + double t69 = r1 * t1 * t6; + // t70 = r1 * t1 * t7 = r1 * t1 * w3^2 + double t70 = r1 * t1 * t7; + // t71 = r2 * t2 * t5 = r2 * t2 * w1^2 + double t71 = r2 * t2 * t5; + // t72 = r2 * t2 * t6 = r2 * t2 * w2^2 + double t72 = r2 * t2 * t6; + // t73 = r2 * t2 * t7 = r2 * t2 * w3^2 + double t73 = r2 * t2 * t7; + // t74 = r3 * t3 * t5 = r3 * t3 * w1^2 + double t74 = r3 * t3 * t5; + // t75 = r3 * t3 * t6 = r3 * t3 * w2^2 + double t75 = r3 * t3 * t6; + // t76 = r3 * t3 * t7 = r3 * t3 * w3^2 + double t76 = r3 * t3 * t7; + // t77 = X1 * r1 * t5 = X1 * r1 * w1^2 + double t77 = X1 * r1 * t5; + double t78 = X1 * r2 * w1 * w2; + double t79 = X1 * r3 * w1 * w3; + double t80 = Y1 * r1 * w1 * w2; + double t81 = Y1 * r3 * w2 * w3; + double t82 = Z1 * r1 * w1 * w3; + double t83 = Z1 * r2 * w2 * w3; + // t84 = X1 * r1 * t6 * t12 = X1 * r1 * w2^2 * cos(theta) + double t84 = X1 * r1 * t6 * t12; + // t85 = X1 * r1 * t7 * t12 = X1 * r1 * w3^2 * cos(theta) + double t85 = X1 * r1 * t7 * t12; + // t86 = Y1 * r2 * t5 * t12 = Y1 * r2 * w1^2 * cos(theta) + double t86 = Y1 * r2 * t5 * t12; + // t87 = Y1 * r2 * t7 * t12 = Y1 * r2 * w3^2 * cos(theta) + double t87 = Y1 * r2 * t7 * t12; + // t88 = Z1 * r3 * t5 * t12 = Z1 * r3 * w1^2 * cos(theta) + double t88 = Z1 * r3 * t5 * t12; + // t89 = Z1 * r3 * t6 * t12 = Z1 * r3 * w2^2 * cos(theta) + double t89 = Z1 * r3 * t6 * t12; + // t90 = X1 * r2 * t9 * t10 * w3 = X1 * r2 * theta * sin(theta) * w3 + double t90 = X1 * r2 * t9 * t10 * w3; + // t91 = Y1 * r3 * t9 * t10 * w1 = Y1 * r3 * theta * sin(theta) * w1 + double t91 = Y1 * r3 * t9 * t10 * w1; + // t92 = Z1 * r1 * t9 * t10 * w2 = Z1 * r1 * theta * sin(theta) * w2 + double t92 = Z1 * r1 * t9 * t10 * w2; + // t102 = X1 * r3 * t9 * t10 * w2 = X1 * r3 * theta * sin(theta) * w2 + double t102 = X1 * r3 * t9 * t10 * w2; + // t103 = Y1 * r1 * t9 * t10 * w3 = Y1 * r1 * theta * sin(theta) * w3 + double t103 = Y1 * r1 * t9 * t10 * w3; + // t104 = Z1 * r2 * t9 * t10 * w1 = Z1 * r2 * theta * sin(theta) * w1 + double t104 = Z1 * r2 * t9 * t10 * w1; + // t105 = X1 * r2 * t12 * w1 * w2 = X1 * r2 * cos(theta) * w1 * w2 + double t105 = X1 * r2 * t12 * w1 * w2; + // t106 = X1 * r3 * t12 * w1 * w3 = X1 * r3 * cos(theta) * w1 * w3 + double t106 = X1 * r3 * t12 * w1 * w3; + // t107 = Y1 * r1 * t12 * w1 * w2 = Y1 * r1 * cos(theta) * w1 * w2 + double t107 = Y1 * r1 * t12 * w1 * w2; + // t108 = Y1 * r3 * t12 * w2 * w3 = Y1 * r3 * cos(theta) * w2 * w3 + double t108 = Y1 * r3 * t12 * w2 * w3; + // t109 = Z1 * r1 * t12 * w1 * w3 = Z1 * r1 * cos(theta) * w1 * w3 + double t109 = Z1 * r1 * t12 * w1 * w3; + // t110 = Z1 * r2 * t12 * w2 * w3 = Z1 * r2 * cos(theta) * w2 * w3 + double t110 = Z1 * r2 * t12 * w2 * w3; + // t93 = t66 + t67 + t68 + t69 + // + t70 + t71 + t72 + t73 + t74 + // + t75 + t76 + t77 + t78 + t79 + // + t80 + t81 + t82 + t83 + t84 + // + t85 + t86 + t87 + t88 + t89 + // + t90 + t91 + t92 - t102 - t103 + // - t104 - t105 - t106 - t107 + // - t108 - t109 - t110 + // + // = (Y1 * r2 * w2^2) + (Z1 * r3 * w3^2) + (r1 * t1 * w1^2) + (r1 * t1 * w2^2) + // + (r1 * t1 * w3^2) + (r2 * t2 * w1^2) + (r2 * t2 * w2^2) + (r2 * t2 * w3^2) + (r3 * t3 * w1^2) + // + (r3 * t3 * w2^2) + (r3 * t3 * w3^2) + (X1 * r1 * w1^2) + (X1 * r2 * w1 * w2) + (X1 * r3 * w1 * w3) + // + (Y1 * r1 * w1 * w2) + (Y1 * r3 * w2 * w3) + (Z1 * r1 * w1 * w3) + (Z1 * r2 * w2 * w3) + (X1 * r1 * w2^2 * cos(theta)) + // + (X1 * r1 * w3^2 * cos(theta)) + (Y1 * r2 * w1^2 * cos(theta)) + (Y1 * r2 * w3^2 * cos(theta)) + (Z1 * r3 * w1^2 * cos(theta)) + (Z1 * r3 * w2^2 * cos(theta)) + // + (X1 * r2 * theta * sin(theta) * w3) + (Y1 * r3 * theta * sin(theta) * w1) + (Z1 * r1 * theta * sin(theta) * w2) - (X1 * r3 * theta * sin(theta) * w2) - (Y1 * r1 * theta * sin(theta) * w3) + // - (Z1 * r2 * theta * sin(theta) * w1) - (X1 * r2 * cos(theta) * w1 * w2) - (X1 * r3 * cos(theta) * w1 * w3) - (Y1 * r1 * cos(theta) * w1 * w2) - () + // - (Y1 * r3 * cos(theta) * w2 * w3) - (Z1 * r1 * cos(theta) * w1 * w3) - (Z1 * r2 * cos(theta) * w2 * w3) + double t93 = + t66 + t67 + t68 + t69 + t70 + t71 + t72 + t73 + t74 + t75 + t76 + t77 + t78 + t79 + t80 + t81 + t82 + + t83 + t84 + t85 + t86 + t87 + t88 + t89 + t90 + t91 + t92 - t102 - t103 - t104 - t105 - t106 - t107 - + t108 - t109 - t110; + // t94 = sin(theta)/theta^3* w1 * w2 = F*w1*w2 + double t94 = t10 * t25 * w1 * w2; + // t95 = w2^2*sin(theta)/theta^3*w3 = F*w2^2*w3 + double t95 = t6 * t10 * t25 * w3; + // t96 = 2 * w2^2 * (cos(theta) - 1) / theta^4 * w3 = 2*G*w2^2*w3 + double t96 = t6 * t13 * t26 * w3 * 2.0; + // t97 = cos(theta) / theta^2 * w2 * w3 = E*w2*w3 + double t97 = t12 * t14 * w2 * w3; + // t98 = w2^2 * sin(theta) / theta^3 * w1 = F*w2^2*w1 + double t98 = t6 * t10 * t25 * w1; + // t99 = 2 * w2^2 * (cos(theta) - 1) / theta^4 * w1 = 2*G*w2^2*w1 + double t99 = t6 * t13 * t26 * w1 * 2.0; + // t100 = w2^2 * sin(theta) / theta^3 = F*w2^2 + double t100 = t6 * t10 * t25; + // t101 = 1.0 / pow(sqrt((t1 + r11*X1 + r12*Y1 + r13*Z1)^2 + (t2 + r21*X1 + r22*Y1 + r23*Z1)^2 + (t3 + r31*X1 + r32*Y1 + r33*Z1)^2), 3.0 / 2.0) + // = (t1 + r11*X1 + r12*Y1 + r13*Z1)^2 + (t2 + r21*X1 + r22*Y1 + r23*Z1)^2 + (t3 + r31*X1 + r32*Y1 + r33*Z1)^3 + double t101 = 1.0 / pow(t63, 3.0 / 2.0); + // t111 = w2^2 * cos(theta) / theta^2 = E*w2^2 + double t111 = t6 * t12 * t14; + // t112 = sin(theta) / theta^3 *w2 * w3 = F*w2*w3 + double t112 = t10 * t25 * w2 * w3; + // t113 = cos(theta) / theta^2 * w1 * w3 = E*w1*w3 + double t113 = t12 * t14 * w1 * w3; + // t114 = w3^2 * sin(theta) / theta^3 * w2 = F*w3^2*w2 + double t114 = t7 * t10 * t25 * w2; + // t115 = t7 * t13 * t26 * w2 * 2.0 = 2*w3^2*(cos(theta) - 1) / theta^4*w2 + double t115 = t7 * t13 * t26 * w2 * 2.0; + // t116 = w3^2 * sin(theta) / theta^3 * w1 = F*w3^2*w1 + double t116 = t7 * t10 * t25 * w1; + // t117 = 2 * w3^2 * (cos(theta) - 1) / theta^4 * w1 = 2*G*w3^2*w1 + double t117 = t7 * t13 * t26 * w1 * 2.0; + // t118 = E*w3^2 + double t118 = t7 * t12 * t14; + // t119 = 2*w1*G*(w2^2+w3^2) + double t119 = t13 * t24 * t26 * w1 * 2.0; + // t120 = F*w1*(w2^2+w3^2) + double t120 = t10 * t24 * t25 * w1; + // t121 = (2*G+F)*w1*(w2^2+w3^2) + double t121 = t119 + t120; + // t122 = 2*G*w1*(w1^2+w2^2) + double t122 = t13 * t26 * t34 * w1 * 2.0; + // t123 = F*w1*(w1^2+w2^2) + double t123 = t10 * t25 * t34 * w1; + // t131 = 2*(cos(theta) - 1) / theta^2*w1 = 2*B*w1 + double t131 = t13 * t14 * w1 * 2.0; + // t124 = t122 + t123 - t131 = 2*G*w1*(w1^2+w2^2)+F*w1*(w1^2+w2^2)-2*B*w1 + // = (2*G+F)*w1*(w1^2+w2^2)-2*B*w1 + double t124 = t122 + t123 - t131; + // t139 = t13 * t14 * w3 = B*w3 + double t139 = t13 * t14 * w3; + // t125 = -t35 + t36 + t37 + t94 - t139 = -E*w1*w2+F*w1^2*w3+G*w1^2*w3+F*w1*w2-B*w3 + double t125 = -t35 + t36 + t37 + t94 - t139; + // t126 = (-E*w1*w2+F*w1^2*w3+G*w1^2*w3+F*w1*w2-B*w3)*X1 + double t126 = X1 * t125; + // t127 = t49 + t50 + t51 + t52 - t64 = A + E*w1^2 + (2*G+F)*w1*w2*w3 - F*w1^2 + double t127 = t49 + t50 + t51 + t52 - t64; + // t128 = (A + E*w1^2 + (2*G+F)*w1*w2*w3 - F*w1^2)*Y1 + double t128 = Y1 * t127; + // t129 = (-E*w1*w2+F*w1^2*w3+G*w1^2*w3+F*w1*w2-B*w3)*X1 + (A + E*w1^2 + (2*G+F)*w1*w2*w3 - F*w1^2)*Y1 - ((2*G+F)*w1*(w1^2+w2^2)-2*B*w1)Z1 + double t129 = t126 + t128 - Z1 * t124; + // t130 = 2 * (t3 + r31*X1 + r32*Y1 + r33*Z1) + // * ((-E*w1*w2+F*w1^2*w3+G*w1^2*w3+F*w1*w2-B*w3)*X1 + (A + E*w1^2 + (2*G+F)*w1*w2*w3 - F*w1^2)*Y1 - ((2*G+F)*w1*(w1^2+w2^2)-2*B*w1)Z1) + double t130 = t23 * t129 * 2.0; + // t132 = t13 * t26 * t45 * w1 * 2.0 = 2*w1*G*(w1^2+w3^2) + double t132 = t13 * t26 * t45 * w1 * 2.0; + // t133 = t10 * t25 * t45 * w1 = F*w1*(w1^2+w3^2) + double t133 = t10 * t25 * t45 * w1; + // t138 = t13 * t14 * w2 = B*w2 + double t138 = t13 * t14 * w2; + // t134 = -t46 + t47 + t48 + t113 - t138 = -F*w1*w3+F*w1^2*w2+G*w1^2*w2+E*w1*w3-B*w2 + double t134 = -t46 + t47 + t48 + t113 - t138; + // t135 = (-F*w1*w3+F*w1^2*w2+G*w1^2*w2+E*w1*w3-B*w2)*X1 + double t135 = X1 * t134; + // t136 = -t49 - t50 + t51 + t52 + t64 = -A-E*w1^2+2*G*w1*w2*w3+F*w1*w2*w3+F*w1^2 + double t136 = -t49 - t50 + t51 + t52 + t64; + // t137 = (-A-E*w1^2+2*G*w1*w2*w3+F*w1*w2*w3+F*w1^2)*Z1 + double t137 = Z1 * t136; + + double t140 = X1 * s1 * t5; + double t141 = Y1 * s2 * t6; + double t142 = Z1 * s3 * t7; + double t143 = s1 * t1 * t5; + double t144 = s1 * t1 * t6; + double t145 = s1 * t1 * t7; + double t146 = s2 * t2 * t5; + double t147 = s2 * t2 * t6; + double t148 = s2 * t2 * t7; + double t149 = s3 * t3 * t5; + double t150 = s3 * t3 * t6; + double t151 = s3 * t3 * t7; + double t152 = X1 * s2 * w1 * w2; + double t153 = X1 * s3 * w1 * w3; + double t154 = Y1 * s1 * w1 * w2; + double t155 = Y1 * s3 * w2 * w3; + double t156 = Z1 * s1 * w1 * w3; + double t157 = Z1 * s2 * w2 * w3; + // t12 = cos(theta) + double t158 = X1 * s1 * t6 * t12; + double t159 = X1 * s1 * t7 * t12; + double t160 = Y1 * s2 * t5 * t12; + double t161 = Y1 * s2 * t7 * t12; + double t162 = Z1 * s3 * t5 * t12; + double t163 = Z1 * s3 * t6 * t12; + // t9 = theta, t10 = sin(theta) + double t164 = X1 * s2 * t9 * t10 * w3; + double t165 = Y1 * s3 * t9 * t10 * w1; + double t166 = Z1 * s1 * t9 * t10 * w2; + double t183 = X1 * s3 * t9 * t10 * w2; + double t184 = Y1 * s1 * t9 * t10 * w3; + double t185 = Z1 * s2 * t9 * t10 * w1; + // t12 = cos(theta) + double t186 = X1 * s2 * t12 * w1 * w2; + double t187 = X1 * s3 * t12 * w1 * w3; + double t188 = Y1 * s1 * t12 * w1 * w2; + double t189 = Y1 * s3 * t12 * w2 * w3; + double t190 = Z1 * s1 * t12 * w1 * w3; + double t191 = Z1 * s2 * t12 * w2 * w3; + // t167 = t140 + t141 + t142 + t143 + t144 + // + t145 + t146 + t147 + t148 + t149 + // + t150 + t151 + t152 + t153 + t154 + // + t155 + t156 + t157 + t158 + t159 + // + t160 + t161 + t162 + t163 + t164 + // + t165 + t166 - t183 - t184 - t185 + // - t186 - t187 - t188 + // - t189 - t190 - t191 + // + // = (X1 * s1 * t5) + (Y1 * s2 * t6) + (Z1 * s3 * t7) + (s1 * t1 * t5) + (s1 * t1 * t6) + // + (s1 * t1 * t7) + (s2 * t2 * t5) + (s2 * t2 * t6) + (s2 * t2 * t7) + (s3 * t3 * t5) + // + (s3 * t3 * t6) + (s3 * t3 * t7) + (X1 * s2 * w1 * w2) + (X1 * s3 * w1 * w3) + (Y1 * s1 * w1 * w2) + // + (Y1 * s3 * w2 * w3) + (Z1 * s1 * w1 * w3) + (Z1 * s2 * w2 * w3) + (X1 * s1 * t6 * t12) + (X1 * s1 * t7 * t12) + // + (Y1 * s2 * t5 * t12) + (Y1 * s2 * t7 * t12) + (Z1 * s3 * t5 * t12) + (Z1 * s3 * t6 * t12) + (X1 * s2 * t9 * t10 * w3) + // + (Y1 * s3 * t9 * t10 * w1) + (Z1 * s1 * t9 * t10 * w2) - (X1 * s3 * t9 * t10 * w2) - (Y1 * s1 * t9 * t10 * w3) - (Z1 * s2 * t9 * t10 * w1) + // - (X1 * s2 * t12 * w1 * w2) - (X1 * s3 * t12 * w1 * w3) - (Y1 * s1 * t12 * w1 * w2) + // - (Y1 * s3 * t12 * w2 * w3) - (Z1 * s1 * t12 * w1 * w3) - (Z1 * s2 * t12 * w2 * w3) + double t167 = + t140 + t141 + t142 + t143 + t144 + t145 + t146 + t147 + t148 + t149 + t150 + t151 + t152 + t153 + t154 + + t155 + t156 + t157 + t158 + t159 + t160 + t161 + t162 + t163 + t164 + t165 + t166 - t183 - t184 - t185 - + t186 - t187 - t188 - t189 - t190 - t191; + // t168 = t13 * t26 * t45 * w2 * 2.0 = 2*G*w2*(w1^2+w3^2) + double t168 = t13 * t26 * t45 * w2 * 2.0; + // t169 = t10 * t25 * t45 * w2 = F*w2*(w1^2+w3^2) + double t169 = t10 * t25 * t45 * w2; + // t170 = t168 + t169 = (2*G+F)*w2*(w1^2+w3^2) + double t170 = t168 + t169; + // t171 = t13 * t26 * t34 * w2 * 2.0 = 2*G*w2*(w1^2+w2^2) + double t171 = t13 * t26 * t34 * w2 * 2.0; + // t172 = t10 * t25 * t34 * w2 = F*w2*(w1^2+w2^2) + double t172 = t10 * t25 * t34 * w2; + // t176 = t13 * t14 * w2 * 2.0 = 2*B*w2 + double t176 = t13 * t14 * w2 * 2.0; + // t173 = t171 + t172 - t176 = (2*G+F)*w2*(w1^2+w2^2) - 2*B*w2 + double t173 = t171 + t172 - t176; + // t174 = -t49 + t51 + t52 + t100 - t111 = -A+2*G*w1*w2*w3+F*w1*w2*w3+F*w2^2-E*w2^2 + double t174 = -t49 + t51 + t52 + t100 - t111; + // t175 = X1 * t174 = (-A+2*G*w1*w2*w3+F*w1*w2*w3+F*w2^2-E*w2^2)*X1 + double t175 = X1 * t174; + // t177 = t13 * t24 * t26 * w2 * 2.0 = 2*w2*G*(w2^2+w3^2) + double t177 = t13 * t24 * t26 * w2 * 2.0; + // t178 = t10 * t24 * t25 * w2 = F*w2*(w2^2+w3^2) + double t178 = t10 * t24 * t25 * w2; + // t192 = t13 * t14 * w1 = B*w1 + double t192 = t13 * t14 * w1; + // t179 = -t97 + t98 + t99 + t112 - t192 = -E*w2*w3+F*w2^2*w1+2*G*w2^2*w1+F*w2*w3-B*w1 + double t179 = -t97 + t98 + t99 + t112 - t192; + // t180 = (-E*w2*w3+F*w2^2*w1+2*G*w2^2*w1+F*w2*w3-B*w1)*Y1 + double t180 = Y1 * t179; + // t181 = A+2*G*w1*w2*w3+F*w1*w2*w3-F*w2^2+E*w2^2 + double t181 = t49 + t51 + t52 - t100 + t111; + // t182 = (A+2*G*w1*w2*w3+F*w1*w2*w3-F*w2^2+E*w2^2)*Z1 + double t182 = Z1 * t181; + // t193 = 2*G*w3*(w2^2+w3^2) + double t193 = t13 * t26 * t34 * w3 * 2.0; + // t194 = F*w3*(w2^2+w3^2) + double t194 = t10 * t25 * t34 * w3; + // t195 = (2*G+F)*w3*(w2^2+w3^2) + double t195 = t193 + t194; + // t196 = 2*G*w3*(w1^2+w3^2) + double t196 = t13 * t26 * t45 * w3 * 2.0; + // t197 = F*w3*(w1^2+w3^2) + double t197 = t10 * t25 * t45 * w3; + // t200 = 2*B*w3 + double t200 = t13 * t14 * w3 * 2.0; + // t198 = (2*G+F)*w3*(w1^2+w3^2) - 2*B*w3 + double t198 = t196 + t197 - t200; + // t199 = F*w3^2 + double t199 = t7 * t10 * t25; + // t201 = 2*w3*G*(w2^2+w3^2) + double t201 = t13 * t24 * t26 * w3 * 2.0; + // t202 = F*w3*(w2^2+w3^2) + double t202 = t10 * t24 * t25 * w3; + // t203 = -t49 + t51 + t52 - t118 + t199 = -A+2*G*w1*w2*w3+F*w1*w2*w3-E*w3^2+F*w3^2 + double t203 = -t49 + t51 + t52 - t118 + t199; + // t204 = (-t49 + t51 + t52 - t118 + t199 = -A+2*G*w1*w2*w3+F*w1*w2*w3-E*w3^2+F*w3^2)*Y1 + double t204 = Y1 * t203; + // t205 = 2*t1 + double t205 = t1 * 2.0; + // t206 = 2*r13*Z1 + double t206 = Z1 * t29 * 2.0; + // t207 = 2*r11*X1 + double t207 = X1 * t32 * 2.0; + // t208 = 2*t1 + 2*r13*Z1 + 2*r11*X1 + 2*r12*Y1 + double t208 = t205 + t206 + t207 - Y1 * t27 * 2.0; + // t209 = 2*t2 + double t209 = t2 * 2.0; + // t210 = 2*r21*X1 + double t210 = X1 * t53 * 2.0; + // t211 = 2*r22*Y1 + double t211 = Y1 * t58 * 2.0; + // t212 = 2*t2 + 2*r21*X1 + 2*r22*Y1 + 2*r23*Z1 + double t212 = t209 + t210 + t211 - Z1 * t55 * 2.0; + + double t213 = t3 * 2.0; + double t214 = Y1 * t40 * 2.0; + double t215 = Z1 * t43 * 2.0; + double t216 = t213 + t214 + t215 - X1 * t38 * 2.0; + + // + jacs(0, 0) = t14 * t65 * (X1 * r1 * w1 * 2.0 + X1 * r2 * w2 + X1 * r3 * w3 + Y1 * r1 * w2 + Z1 * r1 * w3 + + r1 * t1 * w1 * 2.0 + r2 * t2 * w1 * 2.0 + r3 * t3 * w1 * 2.0 + Y1 * r3 * t5 * t12 + + Y1 * r3 * t9 * t10 - Z1 * r2 * t5 * t12 - Z1 * r2 * t9 * t10 - X1 * r2 * t12 * w2 - + X1 * r3 * t12 * w3 - Y1 * r1 * t12 * w2 + Y1 * r2 * t12 * w1 * 2.0 - + Z1 * r1 * t12 * w3 + Z1 * r3 * t12 * w1 * 2.0 + Y1 * r3 * t5 * t10 * t11 - + Z1 * r2 * t5 * t10 * t11 + X1 * r2 * t12 * w1 * w3 - X1 * r3 * t12 * w1 * w2 - + Y1 * r1 * t12 * w1 * w3 + Z1 * r1 * t12 * w1 * w2 - Y1 * r1 * t10 * t11 * w1 * w3 + + Z1 * r1 * t10 * t11 * w1 * w2 - X1 * r1 * t6 * t10 * t11 * w1 - + X1 * r1 * t7 * t10 * t11 * w1 + X1 * r2 * t5 * t10 * t11 * w2 + + X1 * r3 * t5 * t10 * t11 * w3 + Y1 * r1 * t5 * t10 * t11 * w2 - + Y1 * r2 * t5 * t10 * t11 * w1 - Y1 * r2 * t7 * t10 * t11 * w1 + + Z1 * r1 * t5 * t10 * t11 * w3 - Z1 * r3 * t5 * t10 * t11 * w1 - + Z1 * r3 * t6 * t10 * t11 * w1 + X1 * r2 * t10 * t11 * w1 * w3 - + X1 * r3 * t10 * t11 * w1 * w2 + Y1 * r3 * t10 * t11 * w1 * w2 * w3 + + Z1 * r2 * t10 * t11 * w1 * w2 * w3) + - t26 * t65 * t93 * w1 * 2.0 + - t14 * t93 * t101 *(t130 + t15 *(-X1 * t121 + Y1 * (t46 +t47 +t48 -t13 *t14 *w2 -t12 *t14 *w1 *w3) +Z1 *(t35 +t36 +t37 -t13 *t14 *w3 -t10 *t25 *w1 *w2)) *2.0 + + t18 *(t135+ t137 - Y1 *(t132 +t133 -t13 *t14 *w1 *2.0)) *2.0) *(1.0 / 2.0); + + + jacs(0, 1) = t14 * t65 * (X1 * r2 * w1 + Y1 * r1 * w1 + Y1 * r2 * w2 * 2.0 + Y1 * r3 * w3 + Z1 * r2 * w3 + + r1 * t1 * w2 * 2.0 + r2 * t2 * w2 * 2.0 + r3 * t3 * w2 * 2.0 - X1 * r3 * t6 * t12 - + X1 * r3 * t9 * t10 + Z1 * r1 * t6 * t12 + Z1 * r1 * t9 * t10 + + X1 * r1 * t12 * w2 * 2.0 - X1 * r2 * t12 * w1 - Y1 * r1 * t12 * w1 - + Y1 * r3 * t12 * w3 - Z1 * r2 * t12 * w3 + Z1 * r3 * t12 * w2 * 2.0 - + X1 * r3 * t6 * t10 * t11 + Z1 * r1 * t6 * t10 * t11 + X1 * r2 * t12 * w2 * w3 - + Y1 * r1 * t12 * w2 * w3 + Y1 * r3 * t12 * w1 * w2 - Z1 * r2 * t12 * w1 * w2 - + Y1 * r1 * t10 * t11 * w2 * w3 + Y1 * r3 * t10 * t11 * w1 * w2 - + Z1 * r2 * t10 * t11 * w1 * w2 - X1 * r1 * t6 * t10 * t11 * w2 + + X1 * r2 * t6 * t10 * t11 * w1 - X1 * r1 * t7 * t10 * t11 * w2 + + Y1 * r1 * t6 * t10 * t11 * w1 - Y1 * r2 * t5 * t10 * t11 * w2 - + Y1 * r2 * t7 * t10 * t11 * w2 + Y1 * r3 * t6 * t10 * t11 * w3 - + Z1 * r3 * t5 * t10 * t11 * w2 + Z1 * r2 * t6 * t10 * t11 * w3 - + Z1 * r3 * t6 * t10 * t11 * w2 + X1 * r2 * t10 * t11 * w2 * w3 + + X1 * r3 * t10 * t11 * w1 * w2 * w3 + Z1 * r1 * t10 * t11 * w1 * w2 * w3) - + t26 * t65 * t93 * w2 * 2.0 - t14 * t93 * t101 * (t18 * + (Z1 * (-t35 + t94 + t95 + t96 - t13 * t14 * w3) - + Y1 * t170 + X1 * + (t97 + t98 + t99 - t13 * t14 * w1 - + t10 * t25 * w2 * w3)) * 2.0 + t15 * + (t180 + + t182 - + X1 * + (t177 + + t178 - + t13 * + t14 * + w2 * + 2.0)) * + 2.0 + + t23 * (t175 + Y1 * (t35 - t94 + t95 + t96 - + t13 * t14 * w3) - Z1 * t173) * + 2.0) * (1.0 / 2.0); + jacs(0, 2) = t14 * t65 * (X1 * r3 * w1 + Y1 * r3 * w2 + Z1 * r1 * w1 + Z1 * r2 * w2 + Z1 * r3 * w3 * 2.0 + + r1 * t1 * w3 * 2.0 + r2 * t2 * w3 * 2.0 + r3 * t3 * w3 * 2.0 + X1 * r2 * t7 * t12 + + X1 * r2 * t9 * t10 - Y1 * r1 * t7 * t12 - Y1 * r1 * t9 * t10 + + X1 * r1 * t12 * w3 * 2.0 - X1 * r3 * t12 * w1 + Y1 * r2 * t12 * w3 * 2.0 - + Y1 * r3 * t12 * w2 - Z1 * r1 * t12 * w1 - Z1 * r2 * t12 * w2 + + X1 * r2 * t7 * t10 * t11 - Y1 * r1 * t7 * t10 * t11 - X1 * r3 * t12 * w2 * w3 + + Y1 * r3 * t12 * w1 * w3 + Z1 * r1 * t12 * w2 * w3 - Z1 * r2 * t12 * w1 * w3 + + Y1 * r3 * t10 * t11 * w1 * w3 + Z1 * r1 * t10 * t11 * w2 * w3 - + Z1 * r2 * t10 * t11 * w1 * w3 - X1 * r1 * t6 * t10 * t11 * w3 - + X1 * r1 * t7 * t10 * t11 * w3 + X1 * r3 * t7 * t10 * t11 * w1 - + Y1 * r2 * t5 * t10 * t11 * w3 - Y1 * r2 * t7 * t10 * t11 * w3 + + Y1 * r3 * t7 * t10 * t11 * w2 + Z1 * r1 * t7 * t10 * t11 * w1 + + Z1 * r2 * t7 * t10 * t11 * w2 - Z1 * r3 * t5 * t10 * t11 * w3 - + Z1 * r3 * t6 * t10 * t11 * w3 - X1 * r3 * t10 * t11 * w2 * w3 + + X1 * r2 * t10 * t11 * w1 * w2 * w3 + Y1 * r1 * t10 * t11 * w1 * w2 * w3) - + t26 * t65 * t93 * w3 * 2.0 - t14 * t93 * t101 * (t18 * (Z1 * (t46 - t113 + t114 + t115 - + t13 * t14 * w2) - Y1 * t198 + X1 * + (t49 + + t51 + + t52 + + t118 - + t7 * + t10 * + t25)) * + 2.0 + t23 * (X1 * (-t97 + t112 + t116 + t117 - + t13 * t14 * w1) + Y1 * (-t46 + + t113 + + t114 + + t115 - + t13 * + t14 * + w2) - + Z1 * t195) * 2.0 + t15 * (t204 + Z1 * + (t97 - + t112 + + t116 + + t117 - + t13 * + t14 * + w1) - + X1 * + (t201 + + t202 - + t13 * + t14 * w3 * + 2.0)) * + 2.0) * + (1.0 / 2.0); + + // = 1/2 * r1 * t65 + // - t14 * t93 + // * t101 * t208 + // = 1/2 * r1 * 1 / sqrt((t1 + r11*X1 + r12*Y1 + r13*Z1)^2 + (t2 + r21*X1 + r22*Y1 + r23*Z1)^2 + (t3 + r31*X1 + r32*Y1 + r33*Z1)^2) + // - 1/theta^2 * ((Y1 * r2 * w2^2) + (Z1 * r3 * w3^2) + (r1 * t1 * w1^2) + (r1 * t1 * w2^2) + // + (r1 * t1 * w3^2) + (r2 * t2 * w1^2) + (r2 * t2 * w2^2) + (r2 * t2 * w3^2) + (r3 * t3 * w1^2) + // + (r3 * t3 * w2^2) + (r3 * t3 * w3^2) + (X1 * r1 * w1^2) + (X1 * r2 * w1 * w2) + (X1 * r3 * w1 * w3) + // + (Y1 * r1 * w1 * w2) + (Y1 * r3 * w2 * w3) + (Z1 * r1 * w1 * w3) + (Z1 * r2 * w2 * w3) + (X1 * r1 * w2^2 * cos(theta)) + // + (X1 * r1 * w3^2 * cos(theta)) + (Y1 * r2 * w1^2 * cos(theta)) + (Y1 * r2 * w3^2 * cos(theta)) + (Z1 * r3 * w1^2 * cos(theta)) + (Z1 * r3 * w2^2 * cos(theta)) + // + (X1 * r2 * theta * sin(theta) * w3) + (Y1 * r3 * theta * sin(theta) * w1) + (Z1 * r1 * theta * sin(theta) * w2) - (X1 * r3 * theta * sin(theta) * w2) - (Y1 * r1 * theta * sin(theta) * w3) + // - (Z1 * r2 * theta * sin(theta) * w1) - (X1 * r2 * cos(theta) * w1 * w2) - (X1 * r3 * cos(theta) * w1 * w3) - (Y1 * r1 * cos(theta) * w1 * w2) - () + // - (Y1 * r3 * cos(theta) * w2 * w3) - (Z1 * r1 * cos(theta) * w1 * w3) - (Z1 * r2 * cos(theta) * w2 * w3)) + // * (pow(((t1 + r11*X1 + r12*Y1 + r13*Z1)^2 + (t2 + r21*X1 + r22*Y1 + r23*Z1)^2 + (t3 + r31*X1 + r32*Y1 + r33*Z1)^2), 3/2)) + // * (2*t1 + 2*r11*X1 + 2*r12*Y1 + 2*r13*Z1) + jacs(0, 3) = r1 * t65 - t14 * t93 * t101 * t208 * (1.0 / 2.0); + jacs(0, 4) = r2 * t65 - t14 * t93 * t101 * t212 * (1.0 / 2.0); + jacs(0, 5) = r3 * t65 - t14 * t93 * t101 * t216 * (1.0 / 2.0); + jacs(1, 0) = t14 * t65 * (X1 * s1 * w1 * 2.0 + X1 * s2 * w2 + X1 * s3 * w3 + Y1 * s1 * w2 + Z1 * s1 * w3 + + s1 * t1 * w1 * 2.0 + s2 * t2 * w1 * 2.0 + s3 * t3 * w1 * 2.0 + Y1 * s3 * t5 * t12 + + Y1 * s3 * t9 * t10 - Z1 * s2 * t5 * t12 - Z1 * s2 * t9 * t10 - X1 * s2 * t12 * w2 - + X1 * s3 * t12 * w3 - Y1 * s1 * t12 * w2 + Y1 * s2 * t12 * w1 * 2.0 - + Z1 * s1 * t12 * w3 + Z1 * s3 * t12 * w1 * 2.0 + Y1 * s3 * t5 * t10 * t11 - + Z1 * s2 * t5 * t10 * t11 + X1 * s2 * t12 * w1 * w3 - X1 * s3 * t12 * w1 * w2 - + Y1 * s1 * t12 * w1 * w3 + Z1 * s1 * t12 * w1 * w2 + X1 * s2 * t10 * t11 * w1 * w3 - + X1 * s3 * t10 * t11 * w1 * w2 - Y1 * s1 * t10 * t11 * w1 * w3 + + Z1 * s1 * t10 * t11 * w1 * w2 - X1 * s1 * t6 * t10 * t11 * w1 - + X1 * s1 * t7 * t10 * t11 * w1 + X1 * s2 * t5 * t10 * t11 * w2 + + X1 * s3 * t5 * t10 * t11 * w3 + Y1 * s1 * t5 * t10 * t11 * w2 - + Y1 * s2 * t5 * t10 * t11 * w1 - Y1 * s2 * t7 * t10 * t11 * w1 + + Z1 * s1 * t5 * t10 * t11 * w3 - Z1 * s3 * t5 * t10 * t11 * w1 - + Z1 * s3 * t6 * t10 * t11 * w1 + Y1 * s3 * t10 * t11 * w1 * w2 * w3 + + Z1 * s2 * t10 * t11 * w1 * w2 * w3) - t14 * t101 * t167 * (t130 + t15 * (Y1 * + (t46 + t47 + + t48 - t113 - + t138) + Z1 * + (t35 + + t36 + + t37 - + t94 - + t139) - + X1 * t121) * + 2.0 + t18 * + (t135 + t137 - + Y1 * (-t131 + + t132 + + t133)) * + 2.0) * + (1.0 / 2.0) - t26 * t65 * t167 * w1 * 2.0; + jacs(1, 1) = t14 * t65 * (X1 * s2 * w1 + Y1 * s1 * w1 + Y1 * s2 * w2 * 2.0 + Y1 * s3 * w3 + Z1 * s2 * w3 + + s1 * t1 * w2 * 2.0 + s2 * t2 * w2 * 2.0 + s3 * t3 * w2 * 2.0 - X1 * s3 * t6 * t12 - + X1 * s3 * t9 * t10 + Z1 * s1 * t6 * t12 + Z1 * s1 * t9 * t10 + + X1 * s1 * t12 * w2 * 2.0 - X1 * s2 * t12 * w1 - Y1 * s1 * t12 * w1 - + Y1 * s3 * t12 * w3 - Z1 * s2 * t12 * w3 + Z1 * s3 * t12 * w2 * 2.0 - + X1 * s3 * t6 * t10 * t11 + Z1 * s1 * t6 * t10 * t11 + X1 * s2 * t12 * w2 * w3 - + Y1 * s1 * t12 * w2 * w3 + Y1 * s3 * t12 * w1 * w2 - Z1 * s2 * t12 * w1 * w2 + + X1 * s2 * t10 * t11 * w2 * w3 - Y1 * s1 * t10 * t11 * w2 * w3 + + Y1 * s3 * t10 * t11 * w1 * w2 - Z1 * s2 * t10 * t11 * w1 * w2 - + X1 * s1 * t6 * t10 * t11 * w2 + X1 * s2 * t6 * t10 * t11 * w1 - + X1 * s1 * t7 * t10 * t11 * w2 + Y1 * s1 * t6 * t10 * t11 * w1 - + Y1 * s2 * t5 * t10 * t11 * w2 - Y1 * s2 * t7 * t10 * t11 * w2 + + Y1 * s3 * t6 * t10 * t11 * w3 - Z1 * s3 * t5 * t10 * t11 * w2 + + Z1 * s2 * t6 * t10 * t11 * w3 - Z1 * s3 * t6 * t10 * t11 * w2 + + X1 * s3 * t10 * t11 * w1 * w2 * w3 + Z1 * s1 * t10 * t11 * w1 * w2 * w3) - + t26 * t65 * t167 * w2 * 2.0 - t14 * t101 * t167 * (t18 * (X1 * (t97 + t98 + t99 - t112 - t192) + + Z1 * (-t35 + t94 + t95 + t96 - t139) - + Y1 * t170) * 2.0 + t15 * (t180 + t182 - + X1 * + (-t176 + t177 + + t178)) * 2.0 + + t23 * + (t175 + Y1 * (t35 - t94 + t95 + t96 - t139) - + Z1 * t173) * 2.0) * (1.0 / 2.0); + jacs(1, 2) = t14 * t65 * (X1 * s3 * w1 + Y1 * s3 * w2 + Z1 * s1 * w1 + Z1 * s2 * w2 + Z1 * s3 * w3 * 2.0 + + s1 * t1 * w3 * 2.0 + s2 * t2 * w3 * 2.0 + s3 * t3 * w3 * 2.0 + X1 * s2 * t7 * t12 + + X1 * s2 * t9 * t10 - Y1 * s1 * t7 * t12 - Y1 * s1 * t9 * t10 + + X1 * s1 * t12 * w3 * 2.0 - X1 * s3 * t12 * w1 + Y1 * s2 * t12 * w3 * 2.0 - + Y1 * s3 * t12 * w2 - Z1 * s1 * t12 * w1 - Z1 * s2 * t12 * w2 + + X1 * s2 * t7 * t10 * t11 - Y1 * s1 * t7 * t10 * t11 - X1 * s3 * t12 * w2 * w3 + + Y1 * s3 * t12 * w1 * w3 + Z1 * s1 * t12 * w2 * w3 - Z1 * s2 * t12 * w1 * w3 - + X1 * s3 * t10 * t11 * w2 * w3 + Y1 * s3 * t10 * t11 * w1 * w3 + + Z1 * s1 * t10 * t11 * w2 * w3 - Z1 * s2 * t10 * t11 * w1 * w3 - + X1 * s1 * t6 * t10 * t11 * w3 - X1 * s1 * t7 * t10 * t11 * w3 + + X1 * s3 * t7 * t10 * t11 * w1 - Y1 * s2 * t5 * t10 * t11 * w3 - + Y1 * s2 * t7 * t10 * t11 * w3 + Y1 * s3 * t7 * t10 * t11 * w2 + + Z1 * s1 * t7 * t10 * t11 * w1 + Z1 * s2 * t7 * t10 * t11 * w2 - + Z1 * s3 * t5 * t10 * t11 * w3 - Z1 * s3 * t6 * t10 * t11 * w3 + + X1 * s2 * t10 * t11 * w1 * w2 * w3 + Y1 * s1 * t10 * t11 * w1 * w2 * w3) - + t26 * t65 * t167 * w3 * 2.0 - t14 * t101 * t167 * (t18 * (Z1 * (t46 - t113 + t114 + t115 - t138) - + Y1 * t198 + + X1 * (t49 + t51 + t52 + t118 - t199)) * + 2.0 + t23 * + (X1 * (-t97 + t112 + t116 + t117 - t192) + + Y1 * (-t46 + t113 + t114 + t115 - t138) - + Z1 * t195) * 2.0 + t15 * (t204 + Z1 * + (t97 - + t112 + + t116 + + t117 - + t192) - + X1 * + (-t200 + t201 + + t202)) * + 2.0) * (1.0 / 2.0); + jacs(1, 3) = s1 * t65 - t14 * t101 * t167 * t208 * (1.0 / 2.0); + jacs(1, 4) = s2 * t65 - t14 * t101 * t167 * t212 * (1.0 / 2.0); + jacs(1, 5) = s3 * t65 - t14 * t101 * t167 * t216 * (1.0 / 2.0); } -}//End namespace ORB_SLAM2 +}//End namespace ORB_SLAM3 diff --git a/src/MapPoint.cc b/src/MapPoint.cc index 2535f32..2a736f4 100644 --- a/src/MapPoint.cc +++ b/src/MapPoint.cc @@ -27,7 +27,9 @@ namespace ORB_SLAM3 long unsigned int MapPoint::nNextId=0; mutex MapPoint::mGlobalMutex; -MapPoint::MapPoint(): +/** + * @brief 构造函数 + */ mnFirstKFid(0), mnFirstFrame(0), nObs(0), mnTrackReferenceForFrame(0), mnLastFrameSeen(0), mnBALocalForKF(0), mnFuseCandidateForKF(0), mnLoopPointForKF(0), mnCorrectedByKF(0), mnCorrectedReference(0), mnBAGlobalForKF(0), mnVisible(1), mnFound(1), mbBad(false), @@ -36,6 +38,9 @@ MapPoint::MapPoint(): mpReplaced = static_cast(NULL); } +/** + * @brief 构造函数 + */ MapPoint::MapPoint(const cv::Mat &Pos, KeyFrame *pRefKF, Map* pMap): mnFirstKFid(pRefKF->mnId), mnFirstFrame(pRefKF->mnFrameId), nObs(0), mnTrackReferenceForFrame(0), mnLastFrameSeen(0), mnBALocalForKF(0), mnFuseCandidateForKF(0), mnLoopPointForKF(0), mnCorrectedByKF(0), @@ -56,6 +61,9 @@ MapPoint::MapPoint(const cv::Mat &Pos, KeyFrame *pRefKF, Map* pMap): mnId=nNextId++; } +/** + * @brief 构造函数 + */ MapPoint::MapPoint(const double invDepth, cv::Point2f uv_init, KeyFrame* pRefKF, KeyFrame* pHostKF, Map* pMap): mnFirstKFid(pRefKF->mnId), mnFirstFrame(pRefKF->mnFrameId), nObs(0), mnTrackReferenceForFrame(0), mnLastFrameSeen(0), mnBALocalForKF(0), mnFuseCandidateForKF(0), mnLoopPointForKF(0), mnCorrectedByKF(0), @@ -77,6 +85,9 @@ MapPoint::MapPoint(const double invDepth, cv::Point2f uv_init, KeyFrame* pRefKF, mnId=nNextId++; } +/** + * @brief 构造函数 + */ MapPoint::MapPoint(const cv::Mat &Pos, Map* pMap, Frame* pFrame, const int &idxF): mnFirstKFid(-1), mnFirstFrame(pFrame->mnId), nObs(0), mnTrackReferenceForFrame(0), mnLastFrameSeen(0), mnBALocalForKF(0), mnFuseCandidateForKF(0),mnLoopPointForKF(0), mnCorrectedByKF(0), @@ -120,6 +131,10 @@ MapPoint::MapPoint(const cv::Mat &Pos, Map* pMap, Frame* pFrame, const int &idxF mnId=nNextId++; } +/** + * @brief 设定该mp的世界坐标 + * @param Pos 坐标值 + */ void MapPoint::SetWorldPos(const cv::Mat &Pos) { unique_lock lock2(mGlobalMutex); @@ -128,12 +143,18 @@ void MapPoint::SetWorldPos(const cv::Mat &Pos) mWorldPosx = cv::Matx31f(Pos.at(0), Pos.at(1), Pos.at(2)); } +/** + * @brief 返回该mp的世界坐标 + */ cv::Mat MapPoint::GetWorldPos() { unique_lock lock(mMutexPos); return mWorldPos.clone(); } +/** + * @brief 获取平均观测方向 + */ cv::Mat MapPoint::GetNormal() { unique_lock lock(mMutexPos); @@ -242,7 +263,10 @@ std::map> MapPoint::GetObservations() return mObservations; } -// 被观测到的相机数目,单目+1,双目或RGB-D则+2 +/** + * @brief 返回被观测次数,双目一帧算两次,左右目各算各的 + * @return nObs + */ int MapPoint::Observations() { unique_lock lock(mMutexFeatures); @@ -280,6 +304,10 @@ void MapPoint::SetBadFlag() mpMap->EraseMapPoint(this); } +/** + * @brief 判断该点是否已经被替换,因为替换并没有考虑普通帧的替换,不利于下一帧的跟踪,所以要坐下标记 + * @return 替换的新的点 + */ MapPoint* MapPoint::GetReplaced() { unique_lock lock1(mMutexFeatures); @@ -329,9 +357,10 @@ void MapPoint::Replace(MapPoint* pMP) tuple indexes = mit -> second; int leftIndex = get<0>(indexes), rightIndex = get<1>(indexes); - + // 2.1 判断新点是否已经在pKF里面 if(!pMP->IsInKeyFrame(pKF)) { + // 如果不在,替换特征点与mp的匹配关系 if(leftIndex != -1){ pKF->ReplaceMapPointMatch(leftIndex, pMP); pMP->AddObservation(pKF,leftIndex); @@ -341,6 +370,8 @@ void MapPoint::Replace(MapPoint* pMP) pMP->AddObservation(pKF,rightIndex); } } + // 如果新的MP在之前MP对应的关键帧里面,就撞车了。 + // 本来目的想新旧MP融为一个,这样以来一个点有可能对应两个特征点,这样是决不允许的,所以删除旧的,不动新的 else { if(leftIndex != -1){ @@ -361,6 +392,9 @@ void MapPoint::Replace(MapPoint* pMP) mpMap->EraseMapPoint(this); } +/** + * @brief 判断这个点是否设置为bad了 + */ bool MapPoint::isBad() { unique_lock lock1(mMutexFeatures,std::defer_lock); @@ -370,19 +404,37 @@ bool MapPoint::isBad() return mbBad; } +/** + * @brief Increase Visible + * + * Visible表示: + * 1. 该MapPoint在某些帧的视野范围内,通过Frame::isInFrustum()函数判断 + * 2. 该MapPoint被这些帧观测到,但并不一定能和这些帧的特征点匹配上 + * 例如:有一个MapPoint(记为M),在某一帧F的视野范围内, + * 但并不表明该点M可以和F这一帧的某个特征点能匹配上 + */ void MapPoint::IncreaseVisible(int n) { unique_lock lock(mMutexFeatures); mnVisible+=n; } +/** + * @brief Increase Found + * + * 能找到该点的帧数+n,n默认为1 + * @see Tracking::TrackLocalMap() + */ void MapPoint::IncreaseFound(int n) { unique_lock lock(mMutexFeatures); mnFound+=n; } -// 计算被找到的比例 +/** + * @brief 返回被找到/被看到 + * @return 被找到/被看到 + */ float MapPoint::GetFoundRatio() { unique_lock lock(mMutexFeatures); @@ -478,18 +530,25 @@ void MapPoint::ComputeDistinctiveDescriptors() { unique_lock lock(mMutexFeatures); + // 最好的描述子,该描述子相对于其他描述子有最小的距离中值 + // 简化来讲,中值代表了这个描述子到其它描述子的平均距离 + // 最好的描述子就是和其它描述子的平均距离最小 mDescriptor = vDescriptors[BestIdx].clone(); } } -//获取当前地图点的描述子 +/** + * @brief 返回描述子 + */ cv::Mat MapPoint::GetDescriptor() { unique_lock lock(mMutexFeatures); return mDescriptor.clone(); } -//获取当前地图点在某个关键帧的观测中,对应的特征点的ID +/** + * @brief 返回这个点在关键帧中对应的特征点id + */ tuple MapPoint::GetIndexInKeyFrame(KeyFrame *pKF) { unique_lock lock(mMutexFeatures); @@ -499,6 +558,9 @@ tuple MapPoint::GetIndexInKeyFrame(KeyFrame *pKF) return tuple(-1,-1); } +/** + * @brief return (mObservations.count(pKF)); + */ bool MapPoint::IsInKeyFrame(KeyFrame *pKF) { unique_lock lock(mMutexFeatures); @@ -589,6 +651,10 @@ void MapPoint::UpdateNormalAndDepth() } } +/** + * @brief 设置平均观测方向 + * @param normal 观测方向 + */ void MapPoint::SetNormalVector(cv::Mat& normal) { unique_lock lock3(mMutexPos); @@ -596,12 +662,18 @@ void MapPoint::SetNormalVector(cv::Mat& normal) mNormalVectorx = cv::Matx31f(mNormalVector.at(0), mNormalVector.at(1), mNormalVector.at(2)); } +/** + * @brief 返回最近距离 + */ float MapPoint::GetMinDistanceInvariance() { unique_lock lock(mMutexPos); return 0.8f*mfMinDistance; } +/** + * @brief 返回最远距离 + */ float MapPoint::GetMaxDistanceInvariance() { unique_lock lock(mMutexPos); diff --git a/src/ORBextractor.cc b/src/ORBextractor.cc index a071631..6ba2e17 100644 --- a/src/ORBextractor.cc +++ b/src/ORBextractor.cc @@ -1640,7 +1640,7 @@ static void computeDescriptors(const Mat& image, vector& keypoints, Ma // 特征点本身直接乘缩放倍数就可以了 keypoint->pt *= scale; } - + // ?TODO vLappingArea if(keypoint->pt.x >= vLappingArea[0] && keypoint->pt.x <= vLappingArea[1]){ _keypoints.at(stereoIndex) = (*keypoint); desc.row(i).copyTo(descriptors.row(stereoIndex)); diff --git a/src/OptimizableTypes.cpp b/src/OptimizableTypes.cpp index 2d222f1..4f8485a 100644 --- a/src/OptimizableTypes.cpp +++ b/src/OptimizableTypes.cpp @@ -45,7 +45,9 @@ namespace ORB_SLAM3 { return os.good(); } - +/** + * @brief 求解二维像素坐标关于位姿的雅克比矩阵 _jacobianOplusXi + */ void EdgeSE3ProjectXYZOnlyPose::linearizeOplus() { g2o::VertexSE3Expmap * vi = static_cast(_vertices[0]); Eigen::Vector3d xyz_trans = vi->estimate().map(Xw); @@ -88,7 +90,11 @@ namespace ORB_SLAM3 { return os.good(); } +/** + * @brief 求解右目的二维像素坐标关于左目位姿的雅克比矩阵 _jacobianOplusXi + */ void EdgeSE3ProjectXYZOnlyPoseToBody::linearizeOplus() { + // 获得三维点在右相机坐标系下的坐标 g2o::VertexSE3Expmap * vi = static_cast(_vertices[0]); g2o::SE3Quat T_lw(vi->estimate()); Eigen::Vector3d X_l = T_lw.map(Xw); @@ -103,6 +109,12 @@ namespace ORB_SLAM3 { -z_w , 0.f, x_w, 0.f, 1.f, 0.f, y_w , -x_w , 0.f, 0.f, 0.f, 1.f; + /* + 注意这里是对李代数求导,ρlw != tlw 所以不能使用Pl = Rlw*Pw + tlw + Pl = EXP(ξlw)*Pw Pr = Rrl * EXP(ξlw) * Pw + trl + 让Pr 对 ξlw 求雅克比 + 相当于Rrl*(Pl 对 ξlw的雅克比) + */ _jacobianOplusXi = -pCamera->projectJac(X_r) * mTrl.rotation().toRotationMatrix() * SE3deriv; } @@ -135,7 +147,9 @@ namespace ORB_SLAM3 { return os.good(); } - +/** + * @brief 求解二维像素坐标关于位姿的雅克比矩阵 _jacobianOplusXj 二维像素坐标关于三维点世界坐标的雅克比矩阵 _jacobianOplusXi + */ void EdgeSE3ProjectXYZ::linearizeOplus() { g2o::VertexSE3Expmap * vj = static_cast(_vertices[1]); g2o::SE3Quat T(vj->estimate()); @@ -148,7 +162,7 @@ namespace ORB_SLAM3 { double z = xyz_trans[2]; auto projectJac = -pCamera->projectJac(xyz_trans); - + // Pc = Rcw*Pw + tcw 先求Pw改变对Pc的影响,所以直接为Rcw,前面再乘Pc对像素的影响 _jacobianOplusXi = projectJac * T.rotation().toRotationMatrix(); Eigen::Matrix SE3deriv; @@ -188,7 +202,9 @@ namespace ORB_SLAM3 { return os.good(); } - +/** + * @brief 求解右目二维像素坐标关于位姿的雅克比矩阵 _jacobianOplusXj 右目二维像素坐标关于三维点世界坐标的雅克比矩阵 _jacobianOplusXi + */ void EdgeSE3ProjectXYZToBody::linearizeOplus() { g2o::VertexSE3Expmap * vj = static_cast(_vertices[1]); g2o::SE3Quat T_lw(vj->estimate()); @@ -208,7 +224,12 @@ namespace ORB_SLAM3 { SE3deriv << 0.f, z, -y, 1.f, 0.f, 0.f, -z , 0.f, x, 0.f, 1.f, 0.f, y , -x , 0.f, 0.f, 0.f, 1.f; - + /* + 注意这里是对李代数求导,ρlw != tlw 所以不能使用Pl = Rlw*Pw + tlw + Pl = EXP(ξlw)*Pw Pr = Rrl * EXP(ξlw) * Pw + trl + 让Pr 对 ξlw 求雅克比 + 相当于Rrl*(Pl 对 ξlw的雅克比) + */ _jacobianOplusXj = -pCamera->projectJac(X_r) * mTrl.rotation().toRotationMatrix() * SE3deriv; } diff --git a/src/Optimizer.cc b/src/Optimizer.cc index 15b91ed..74fbfab 100644 --- a/src/Optimizer.cc +++ b/src/Optimizer.cc @@ -476,13 +476,31 @@ void Optimizer::BundleAdjustment(const vector &vpKFs, const vectorGetMaxKFid(); const vector vpKFs = pMap->GetAllKeyFrames(); const vector vpMPs = pMap->GetAllMapPoints(); // Setup optimizer + // 1. 很经典的一套设置优化器流程 g2o::SparseOptimizer optimizer; g2o::BlockSolverX::LinearSolverType * linearSolver; @@ -500,8 +518,9 @@ void Optimizer::FullInertialBA(Map *pMap, int its, const bool bFixLocal, const l int nNonFixed = 0; + // 2. 设置关键帧与偏置节点 // Set KeyFrame vertices - KeyFrame* pIncKF; + KeyFrame* pIncKF; // vpKFs中最后一个id符合要求的关键帧 for(size_t i=0; imnBALocalForKF>=(maxKFid-1)) || (pKFi->mnBAFixedForKF>=(maxKFid-1)); if(!bFixed) nNonFixed++; - VP->setFixed(bFixed); + VP->setFixed(bFixed); // 固定,这里可能跟回环有关 } optimizer.addVertex(VP); - + // 如果是初始化的那几个及后面的关键帧,加入速度节点 if(pKFi->bImu) { VertexVelocity* VV = new VertexVelocity(pKFi); VV->setId(maxKFid+3*(pKFi->mnId)+1); VV->setFixed(bFixed); optimizer.addVertex(VV); + // priorA==0.f 时 bInit为false,也就是又加入了偏置节点 if (!bInit) { VertexGyroBias* VG = new VertexGyroBias(pKFi); @@ -539,7 +559,7 @@ void Optimizer::FullInertialBA(Map *pMap, int its, const bool bFixLocal, const l } } } - + // priorA!=0.f 时 bInit为true,加入了偏置节点 if (bInit) { VertexGyroBias* VG = new VertexGyroBias(pIncKF); @@ -551,18 +571,19 @@ void Optimizer::FullInertialBA(Map *pMap, int its, const bool bFixLocal, const l VA->setFixed(false); optimizer.addVertex(VA); } - + // TODO 暂时不理解,看到回环后再回看这里 if(bFixLocal) { if(nNonFixed<3) return; } + // 3. 添加关于imu的边 // IMU links for(size_t i=0;imPrevKF) { Verbose::PrintMess("NOT INERTIAL LINK TO PREVIOUS FRAME!", Verbose::VERBOSITY_NORMAL); @@ -573,9 +594,12 @@ void Optimizer::FullInertialBA(Map *pMap, int its, const bool bFixLocal, const l { if(pKFi->isBad() || pKFi->mPrevKF->mnId>maxKFid) continue; + // 这两个都必须为初始化后的关键帧 if(pKFi->bImu && pKFi->mPrevKF->bImu) { + // 3.1 根据上一帧的偏置设定当前帧的新偏置 pKFi->mpImuPreintegrated->SetNewBias(pKFi->mPrevKF->GetImuBias()); + // 3.2 提取节点 g2o::HyperGraph::Vertex* VP1 = optimizer.vertex(pKFi->mPrevKF->mnId); g2o::HyperGraph::Vertex* VV1 = optimizer.vertex(maxKFid+3*(pKFi->mPrevKF->mnId)+1); @@ -583,6 +607,7 @@ void Optimizer::FullInertialBA(Map *pMap, int its, const bool bFixLocal, const l g2o::HyperGraph::Vertex* VA1; g2o::HyperGraph::Vertex* VG2; g2o::HyperGraph::Vertex* VA2; + // 根据不同输入配置相应的偏置节点 if (!bInit) { VG1 = optimizer.vertex(maxKFid+3*(pKFi->mPrevKF->mnId)+2); @@ -617,7 +642,7 @@ void Optimizer::FullInertialBA(Map *pMap, int its, const bool bFixLocal, const l continue; } } - + // 3.3 设置边 EdgeInertial* ei = new EdgeInertial(pKFi->mpImuPreintegrated); ei->setVertex(0,dynamic_cast(VP1)); ei->setVertex(1,dynamic_cast(VV1)); @@ -628,10 +653,11 @@ void Optimizer::FullInertialBA(Map *pMap, int its, const bool bFixLocal, const l g2o::RobustKernelHuber* rki = new g2o::RobustKernelHuber; ei->setRobustKernel(rki); + // 9个自由度的卡方检验(0.05) rki->setDelta(sqrt(16.92)); optimizer.addEdge(ei); - + // 加了每一个关键帧的偏置时,还要优化两帧之间偏置的误差 if (!bInit) { EdgeGyroRW* egr= new EdgeGyroRW(); @@ -665,7 +691,7 @@ void Optimizer::FullInertialBA(Map *pMap, int its, const bool bFixLocal, const l } } } - + // 只加入pIncKF帧的偏置,优化偏置到0 if (bInit) { g2o::HyperGraph::Vertex* VG = optimizer.vertex(4*maxKFid+2); @@ -691,7 +717,7 @@ void Optimizer::FullInertialBA(Map *pMap, int its, const bool bFixLocal, const l const unsigned long iniMPid = maxKFid*5; vector vbNotIncludedMP(vpMPs.size(),false); - + // 5. 添加关于mp的节点与边,这段比较好理解,很传统的视觉上的重投影误差 for(size_t i=0; i>::const_iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++) { KeyFrame* pKFi = mit->first; @@ -719,7 +746,7 @@ void Optimizer::FullInertialBA(Map *pMap, int its, const bool bFixLocal, const l { const int leftIndex = get<0>(mit->second); cv::KeyPoint kpUn; - + // 添加边 if(leftIndex != -1 && pKFi->mvuRight[get<0>(mit->second)]<0) // Monocular observation { kpUn = pKFi->mvKeysUn[leftIndex]; @@ -822,7 +849,7 @@ void Optimizer::FullInertialBA(Map *pMap, int its, const bool bFixLocal, const l optimizer.initializeOptimization(); optimizer.optimize(its); - + // 5. 取出优化结果,对应的值赋值 // Recover optimized data //Keyframes for(size_t i=0; iIncreaseChangeIndex(); } - + /** + * @brief 位姿优化,纯视觉时使用。优化目标:单帧的位姿 + * @param pFrame 待优化的帧 + */ int Optimizer::PoseOptimization(Frame *pFrame) { // 该优化函数主要用于Tracking线程中:运动跟踪、参考帧跟踪、地图跟踪、重定位 @@ -1801,19 +1831,47 @@ void Optimizer::LocalBundleAdjustment(KeyFrame *pKF, bool* pbStopFlag, vectorIncreaseChangeIndex(); } + /** + * @brief Local Bundle Adjustment LocalMapping::Run() 使用,纯视觉 + * + * 1. Vertex: + * - g2o::VertexSE3Expmap(),LocalKeyFrames,即当前关键帧的位姿、与当前关键帧相连的关键帧的位姿 + * - g2o::VertexSE3Expmap(),FixedCameras,即能观测到LocalMapPoints的关键帧(并且不属于LocalKeyFrames)的位姿,在优化中这些关键帧的位姿不变 + * - g2o::VertexSBAPointXYZ(),LocalMapPoints,即LocalKeyFrames能观测到的所有MapPoints的位置 + * 2. Edge: + * - g2o::EdgeSE3ProjectXYZ(),BaseBinaryEdge + * + Vertex:关键帧的Tcw,MapPoint的Pw + * + measurement:MapPoint在关键帧中的二维位置(u,v) + * + InfoMatrix: invSigma2(与特征点所在的尺度有关) + * - g2o::EdgeStereoSE3ProjectXYZ(),BaseBinaryEdge + * + Vertex:关键帧的Tcw,MapPoint的Pw + * + measurement:MapPoint在关键帧中的二维位置(ul,v,ur) + * + InfoMatrix: invSigma2(与特征点所在的尺度有关) + * + * @param pKF KeyFrame + * @param pbStopFlag 是否停止优化的标志 + * @param pMap 在优化后,更新状态时需要用到Map的互斥量mMutexMapUpdate + * + * 总结下与ORBSLAM2的不同 + * 前面操作基本一样,但优化时2代去掉了误差大的点又进行优化了,3代只是统计但没有去掉继续优化,而后都是将误差大的点干掉 + */ void Optimizer::LocalBundleAdjustment(KeyFrame *pKF, bool* pbStopFlag, Map* pMap, int& num_fixedKF, int& num_OptKF, int& num_MPs, int& num_edges) { + // 该优化函数用于LocalMapping线程的局部BA优化 // Local KeyFrames: First Breath Search from Current Keyframe list lLocalKeyFrames; + // 步骤1:将当前关键帧加入lLocalKeyFrames lLocalKeyFrames.push_back(pKF); pKF->mnBALocalForKF = pKF->mnId; Map* pCurrentMap = pKF->GetMap(); + // 步骤2:找到关键帧连接的关键帧(一级相连),加入lLocalKeyFrames中 const vector vNeighKFs = pKF->GetVectorCovisibleKeyFrames(); for(int i=0, iend=vNeighKFs.size(); imnBALocalForKF = pKF->mnId; if(!pKFi->isBad() && pKFi->GetMap() == pCurrentMap) lLocalKeyFrames.push_back(pKFi); @@ -1821,6 +1879,7 @@ void Optimizer::LocalBundleAdjustment(KeyFrame *pKF, bool* pbStopFlag, Map* pMap // Local MapPoints seen in Local KeyFrames num_fixedKF = 0; + // 步骤3:遍历lLocalKeyFrames中关键帧,将它们观测的MapPoints加入到lLocalMapPoints list lLocalMapPoints; set sNumObsMP; for(list::iterator lit=lLocalKeyFrames.begin() , lend=lLocalKeyFrames.end(); lit!=lend; lit++) @@ -1849,6 +1908,7 @@ void Optimizer::LocalBundleAdjustment(KeyFrame *pKF, bool* pbStopFlag, Map* pMap num_MPs = lLocalMapPoints.size(); // Fixed Keyframes. Keyframes that see Local MapPoints but that are not Local Keyframes + // 步骤4:得到能被局部MapPoints观测到,但不属于局部关键帧的关键帧,这些关键帧在局部BA优化时不优化 list lFixedCameras; for(list::iterator lit=lLocalMapPoints.begin(), lend=lLocalMapPoints.end(); lit!=lend; lit++) { @@ -1865,6 +1925,7 @@ void Optimizer::LocalBundleAdjustment(KeyFrame *pKF, bool* pbStopFlag, Map* pMap } } } + // 步骤4.1:相比ORBSLAM2多出了判断固定关键帧的个数,最起码要两个固定的,如果实在没有就把lLocalKeyFrames中最早的KF固定,还是不够再加上第二早的KF固定 num_fixedKF = lFixedCameras.size() + num_fixedKF; if(num_fixedKF < 2) { @@ -1911,6 +1972,7 @@ void Optimizer::LocalBundleAdjustment(KeyFrame *pKF, bool* pbStopFlag, Map* pMap } // Setup optimizer + // 步骤5:构造g2o优化器 g2o::SparseOptimizer optimizer; g2o::BlockSolver_6_3::LinearSolverType * linearSolver; @@ -1931,6 +1993,7 @@ void Optimizer::LocalBundleAdjustment(KeyFrame *pKF, bool* pbStopFlag, Map* pMap unsigned long maxKFid = 0; // Set Local KeyFrame vertices + // 步骤6:添加顶点:Pose of Local KeyFrame for(list::iterator lit=lLocalKeyFrames.begin(), lend=lLocalKeyFrames.end(); lit!=lend; lit++) { KeyFrame* pKFi = *lit; @@ -1945,6 +2008,7 @@ void Optimizer::LocalBundleAdjustment(KeyFrame *pKF, bool* pbStopFlag, Map* pMap num_OptKF = lLocalKeyFrames.size(); // Set Fixed KeyFrame vertices + // 步骤7:添加顶点:Pose of Fixed KeyFrame,注意这里调用了vSE3->setFixed(true)。 for(list::iterator lit=lFixedCameras.begin(), lend=lFixedCameras.end(); lit!=lend; lit++) { KeyFrame* pKFi = *lit; @@ -1958,32 +2022,42 @@ void Optimizer::LocalBundleAdjustment(KeyFrame *pKF, bool* pbStopFlag, Map* pMap } // Set MapPoint vertices + // 步骤7:添加3D顶点 + // 存放的方式(举例) + // 边id: 1 2 3 4 5 6 7 8 9 + // KFid: 1 2 3 4 1 2 3 2 3 + // MPid: 1 1 1 1 2 2 2 3 3 + // 所以这个个数大约是点数×帧数,实际肯定比这个要少 const int nExpectedSize = (lLocalKeyFrames.size()+lFixedCameras.size())*lLocalMapPoints.size(); + // 存放单目时的边 vector vpEdgesMono; vpEdgesMono.reserve(nExpectedSize); - + // 存放单目时的KF vector vpEdgesBody; vpEdgesBody.reserve(nExpectedSize); vector vpEdgeKFMono; vpEdgeKFMono.reserve(nExpectedSize); + // 存放单目时的MP + // 存放双目鱼眼时另一个相机的KF vector vpEdgeKFBody; vpEdgeKFBody.reserve(nExpectedSize); - + // 存放双目鱼眼时另一个相机的边 vector vpMapPointEdgeMono; vpMapPointEdgeMono.reserve(nExpectedSize); - + // 存放双目鱼眼时另一个相机的MP vector vpMapPointEdgeBody; vpMapPointEdgeBody.reserve(nExpectedSize); + // 存放双目时的边 vector vpEdgesStereo; vpEdgesStereo.reserve(nExpectedSize); - + // 存放双目时的KF vector vpEdgeKFStereo; vpEdgeKFStereo.reserve(nExpectedSize); - + // 存放双目时的MP vector vpMapPointEdgeStereo; vpMapPointEdgeStereo.reserve(nExpectedSize); @@ -1993,7 +2067,7 @@ void Optimizer::LocalBundleAdjustment(KeyFrame *pKF, bool* pbStopFlag, Map* pMap int nPoints = 0; int nKFs = lLocalKeyFrames.size()+lFixedCameras.size(), nEdges = 0; - + // 添加顶点:MapPoint for(list::iterator lit=lLocalMapPoints.begin(), lend=lLocalMapPoints.end(); lit!=lend; lit++) { MapPoint* pMP = *lit; @@ -2001,6 +2075,7 @@ void Optimizer::LocalBundleAdjustment(KeyFrame *pKF, bool* pbStopFlag, Map* pMap vPoint->setEstimate(Converter::toVector3d(pMP->GetWorldPos())); int id = pMP->mnId+maxKFid+1; vPoint->setId(id); + // 这里的边缘化与滑动窗口不同,而是为了加速稀疏矩阵的计算BlockSolver_6_3默认了6维度的不边缘化,3自由度的三维点被边缘化,所以所有三维点都设置边缘化 vPoint->setMarginalized(true); optimizer.addVertex(vPoint); nPoints++; @@ -2008,6 +2083,7 @@ void Optimizer::LocalBundleAdjustment(KeyFrame *pKF, bool* pbStopFlag, Map* pMap const map> observations = pMP->GetObservations(); //Set edges + // 步骤8:对每一对关联的MapPoint和KeyFrame构建边 for(map>::const_iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++) { KeyFrame* pKFi = mit->first; @@ -2017,6 +2093,7 @@ void Optimizer::LocalBundleAdjustment(KeyFrame *pKF, bool* pbStopFlag, Map* pMap const int leftIndex = get<0>(mit->second); // Monocular observation + // 单目 if(leftIndex != -1 && pKFi->mvuRight[get<0>(mit->second)]<0) { const cv::KeyPoint &kpUn = pKFi->mvKeysUn[leftIndex]; @@ -2121,6 +2198,7 @@ void Optimizer::LocalBundleAdjustment(KeyFrame *pKF, bool* pbStopFlag, Map* pMap if(*pbStopFlag) return; + // 步骤9:开始优化 optimizer.initializeOptimization(); std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now(); @@ -2138,6 +2216,7 @@ void Optimizer::LocalBundleAdjustment(KeyFrame *pKF, bool* pbStopFlag, Map* pMap // Check inlier observations int nMonoBadObs = 0; + // 步骤10:检测outlier,并设置下次不优化,上面展示了怎么存储的,i是共享的,第i个边是由第i个MP与第i个KF组成的 for(size_t i=0, iend=vpEdgesMono.size(); i & return nIn; } - +/** + * @brief 局部地图+惯导BA LocalMapping IMU中使用,地图经过imu初始化时用这个函数代替LocalBundleAdjustment + * + * @param[in] pKF //关键帧 + * @param[in] pbStopFlag //是否停止的标志 + * @param[in] pMap //地图 + * @param[in] num_fixedKF // + * @param[in] num_OptKF + * @param[in] num_MPs + * @param[in] num_edges + * @param[in] bLarge + * @param[in] bRecInit + */ void Optimizer::LocalInertialBA(KeyFrame *pKF, bool *pbStopFlag, Map *pMap, int& num_fixedKF, int& num_OptKF, int& num_MPs, int& num_edges, bool bLarge, bool bRecInit) { + // Step 1. 确定待优化的关键帧们 Map* pCurrentMap = pKF->GetMap(); - int maxOpt=10; - int opt_it=10; + int maxOpt=10; // 最大优化关键帧数目 + int opt_it=10; // 每次优化的迭代次数 if(bLarge) { - maxOpt=25; + maxOpt=25; opt_it=4; } + // 预计待优化的关键帧数,min函数是为了控制优化关键帧的数量 const int Nd = std::min((int)pCurrentMap->KeyFramesInMap()-2,maxOpt); const unsigned long maxKFid = pKF->mnId; @@ -4253,6 +4348,7 @@ void Optimizer::LocalInertialBA(KeyFrame *pKF, bool *pbStopFlag, Map *pMap, int& int N = vpOptimizableKFs.size(); // Optimizable points seen by temporal optimizable keyframes + // Step 2. 确定这些关键帧对应的地图点,存入lLocalMapPoints list lLocalMapPoints; for(int i=0; i lFixedKeyFrames; if(vpOptimizableKFs.back()->mPrevKF) { @@ -4286,6 +4383,8 @@ void Optimizer::LocalInertialBA(KeyFrame *pKF, bool *pbStopFlag, Map *pMap, int& } // Optimizable visual KFs + // 4. 做了一系列操作发现最后lpOptVisKFs为空。这段应该是调试遗留代码,如果实现的话其实就是把共视图中在前面没有加过的关键帧们加进来,但作者可能发现之前就把共视图的全部帧加进来了,也有可能发现优化的效果不好浪费时间 + // 获得与当前关键帧有共视关系的一些关键帧,大于15个点,排序为从小到大 const int maxCovKF = 0; for(int i=0, iend=vpNeighsKFs.size(); i::iterator lit=lLocalMapPoints.begin(), lend=lLocalMapPoints.end(); lit!=lend; lit++) @@ -4342,6 +4442,7 @@ void Optimizer::LocalInertialBA(KeyFrame *pKF, bool *pbStopFlag, Map *pMap, int& bool bNonFixed = (lFixedKeyFrames.size() == 0); // Setup optimizer + // 6. 构造优化器,正式开始优化 g2o::SparseOptimizer optimizer; g2o::BlockSolverX::LinearSolverType * linearSolver; linearSolver = new g2o::LinearSolverEigen(); @@ -4363,6 +4464,7 @@ void Optimizer::LocalInertialBA(KeyFrame *pKF, bool *pbStopFlag, Map *pMap, int& // Set Local temporal KeyFrame vertices + // 7. 建立关于关键帧的节点,其中包括,位姿,速度,以及两个偏置 N=vpOptimizableKFs.size(); num_fixedKF = 0; num_OptKF = 0; @@ -4396,6 +4498,7 @@ void Optimizer::LocalInertialBA(KeyFrame *pKF, bool *pbStopFlag, Map *pMap, int& } // Set Local visual KeyFrame vertices + // 8. 建立关于共视关键帧的节点,但这里为空 for(list::iterator it=lpOptVisKFs.begin(), itEnd = lpOptVisKFs.end(); it!=itEnd; it++) { KeyFrame* pKFi = *it; @@ -4408,6 +4511,7 @@ void Optimizer::LocalInertialBA(KeyFrame *pKF, bool *pbStopFlag, Map *pMap, int& } // Set Fixed KeyFrame vertices + // 9. 建立关于固定关键帧的节点,其中包括,位姿,速度,以及两个偏置 for(list::iterator lit=lFixedKeyFrames.begin(), lend=lFixedKeyFrames.end(); lit!=lend; lit++) { KeyFrame* pKFi = *lit; @@ -4435,10 +4539,11 @@ void Optimizer::LocalInertialBA(KeyFrame *pKF, bool *pbStopFlag, Map *pMap, int& } // Create intertial constraints + // 暂时没看到有什么用 vector vei(N,(EdgeInertial*)NULL); vector vegr(N,(EdgeGyroRW*)NULL); vector vear(N,(EdgeAccRW*)NULL); - + // 10. 建立边,没有imu跳过 for(int i=0;i vpKFs, Eigen::Vector3d &b } } - +/** + * @brief 优化重力方向与尺度,LocalMapping::ScaleRefinement()中使用,优化目标有: + * 重力方向与尺度 + * @param pMap 地图 + * @param Rwg 重力方向到速度方向的转角 + * @param scale 尺度 + */ void Optimizer::InertialOptimization(Map *pMap, Eigen::Matrix3d &Rwg, double &scale) { int its = 10; @@ -5883,13 +6013,20 @@ void Optimizer::MergeBundleAdjustmentVisual(KeyFrame* pCurrentKF, vector vpAdjustKF, vector vpFixedKF, bool *pbStopFlag) { bool bShowImages = false; vector vpMPs; - +// 1. 构建g2o优化器 g2o::SparseOptimizer optimizer; g2o::BlockSolver_6_3::LinearSolverType * linearSolver; @@ -5906,11 +6043,12 @@ void Optimizer::LocalBundleAdjustment(KeyFrame* pMainKF,vector vpAdju optimizer.setForceStopFlag(pbStopFlag); long unsigned int maxKFid = 0; - set spKeyFrameBA; + set spKeyFrameBA; // 存放关键帧,包含固定的与不固定的 Map* pCurrentMap = pMainKF->GetMap(); // Set fixed KeyFrame vertices + // 2. 构建固定关键帧的节点,并储存对应的MP for(KeyFrame* pKFi : vpFixedKF) { if(pKFi->isBad() || pKFi->GetMap() != pCurrentMap) @@ -5919,7 +6057,7 @@ void Optimizer::LocalBundleAdjustment(KeyFrame* pMainKF,vector vpAdju continue; } - pKFi->mnBALocalForMerge = pMainKF->mnId; + pKFi->mnBALocalForMerge = pMainKF->mnId; // 防止重复添加 g2o::VertexSE3Expmap * vSE3 = new g2o::VertexSE3Expmap(); vSE3->setEstimate(Converter::toSE3Quat(pKFi->GetPose())); @@ -5946,13 +6084,14 @@ void Optimizer::LocalBundleAdjustment(KeyFrame* pMainKF,vector vpAdju } // Set non fixed Keyframe vertices + // 3. 构建不固定关键帧的节点,并储存对应的MP set spAdjustKF(vpAdjustKF.begin(), vpAdjustKF.end()); for(KeyFrame* pKFi : vpAdjustKF) { if(pKFi->isBad() || pKFi->GetMap() != pCurrentMap) continue; - pKFi->mnBALocalForKF = pMainKF->mnId; + pKFi->mnBALocalForKF = pMainKF->mnId; // 防止重复添加 g2o::VertexSE3Expmap * vSE3 = new g2o::VertexSE3Expmap(); vSE3->setEstimate(Converter::toSE3Quat(pKFi->GetPose())); @@ -5980,6 +6119,7 @@ void Optimizer::LocalBundleAdjustment(KeyFrame* pMainKF,vector vpAdju spKeyFrameBA.insert(pKFi); } + // 准备存放边的vector const int nExpectedSize = (vpAdjustKF.size()+vpFixedKF.size())*vpMPs.size(); vector vpEdgesMono; @@ -6004,9 +6144,11 @@ void Optimizer::LocalBundleAdjustment(KeyFrame* pMainKF,vector vpAdju const float thHuber3D = sqrt(7.815); // Set MapPoint vertices - map mpObsKFs; - map mpObsFinalKFs; - map mpObsMPs; + map mpObsKFs; // 统计每个关键帧对应的MP点数,调试输出用 + map mpObsFinalKFs; // 统计每个MP对应的关键帧数,调试输出用 + map mpObsMPs; // 统计每个MP被观测的图片数,双目就是两个,调试输出用 + + // 4. 确定MP节点与边的连接 for(unsigned int i=0; i < vpMPs.size(); ++i) { MapPoint* pMPi = vpMPs[i]; @@ -6101,6 +6243,7 @@ void Optimizer::LocalBundleAdjustment(KeyFrame* pMainKF,vector vpAdju } } + // 这段没啥用,调试输出的,暂时不看 map mStatsObs; for(map::iterator it = mpObsMPs.begin(); it != mpObsMPs.end(); ++it) { @@ -6114,7 +6257,7 @@ void Optimizer::LocalBundleAdjustment(KeyFrame* pMainKF,vector vpAdju if(pbStopFlag) if(*pbStopFlag) return; - + // 5. 优化 optimizer.initializeOptimization(); optimizer.optimize(5); @@ -6123,7 +6266,7 @@ void Optimizer::LocalBundleAdjustment(KeyFrame* pMainKF,vector vpAdju if(pbStopFlag) if(*pbStopFlag) bDoMore = false; - + // 6. 剔除误差大的边 map mWrongObsKF; if(bDoMore) { @@ -6172,6 +6315,7 @@ void Optimizer::LocalBundleAdjustment(KeyFrame* pMainKF,vector vpAdju } + // 下面这段代码都是调试用的 vector > vToErase; vToErase.reserve(vpEdgesMono.size()+vpEdgesStereo.size()); set spErasedMPs; @@ -6290,22 +6434,23 @@ void Optimizer::LocalBundleAdjustment(KeyFrame* pMainKF,vector vpAdju } // Recover optimized data - + // 7. 取出结果 //Keyframes for(KeyFrame* pKFi : vpAdjustKF) { if(pKFi->isBad()) continue; - + // 7.1 取出对应位姿,并计算t的变化量。 g2o::VertexSE3Expmap* vSE3 = static_cast(optimizer.vertex(pKFi->mnId)); g2o::SE3Quat SE3quat = vSE3->estimate(); cv::Mat Tiw = Converter::toCvMat(SE3quat); + // 统计调试用 int numMonoBadPoints = 0, numMonoOptPoints = 0; int numStereoBadPoints = 0, numStereoOptPoints = 0; - vector vpMonoMPsOpt, vpStereoMPsOpt; - vector vpMonoMPsBad, vpStereoMPsBad; - + vector vpMonoMPsOpt, vpStereoMPsOpt; // 存放mp内点 + vector vpMonoMPsBad, vpStereoMPsBad; // 存放mp外点 + // 7.2 卡方检验 for(size_t i=0, iend=vpEdgesMono.size(); i vpAdju } } - +/** + * @brief 这里面进行visual inertial ba + * @param[in] pCurrKF 当前关键帧 + * @param[in] pMergeKF 融合帧 + * @param[in] pbStopFlag 是否优化 + * @param[in] pMap 当前地图 + * @param[out] corrPoses 所有的Sim3 矫正 + */ void Optimizer::MergeInertialBA(KeyFrame* pCurrKF, KeyFrame* pMergeKF, bool *pbStopFlag, Map *pMap, LoopClosing::KeyFrameAndPose &corrPoses) { const int Nd = 6; const unsigned long maxKFid = pCurrKF->mnId; - + // Step 1 准备所有被优化的关键帧, 完全固定的帧 + // 被优化的帧, 当前帧和融合匹配帧加起来一共12个 vector vpOptimizableKFs; vpOptimizableKFs.reserve(2*Nd); // For cov KFS, inertial parameters are not optimized + // 共视帧, 不会优化imu参数,但位姿会被优化 const int maxCovKF=15; vector vpOptimizableCovKFs; vpOptimizableCovKFs.reserve(2*maxCovKF); // Add sliding window for current KF + // 当前关键帧先加到容器中 vpOptimizableKFs.push_back(pCurrKF); + // 后面用这个变量避免重复 pCurrKF->mnBALocalForKF = pCurrKF->mnId; + // 添加5个与当前关键帧相连的时序上相连的关键帧进容器 for(int i=1; imPrevKF) { + // 用mPrevKF访问时序上的上一个关键帧 vpOptimizableKFs.push_back(vpOptimizableKFs.back()->mPrevKF); vpOptimizableKFs.back()->mnBALocalForKF = pCurrKF->mnId; } else break; } - + // 记录完全固定的帧 list lFixedKeyFrames; + // vpOptimizableCovKFs时序上与vpOptimizableKFs最老的一帧相连 if(vpOptimizableKFs.back()->mPrevKF) { vpOptimizableCovKFs.push_back(vpOptimizableKFs.back()->mPrevKF); @@ -6419,18 +6578,23 @@ void Optimizer::MergeInertialBA(KeyFrame* pCurrKF, KeyFrame* pMergeKF, bool *pbS } else { + // 如果没找到时序相连的帧,把被优化的窗口缩减一个给到固定的变量 vpOptimizableCovKFs.push_back(vpOptimizableKFs.back()); vpOptimizableKFs.pop_back(); } + // 没用到 KeyFrame* pKF0 = vpOptimizableCovKFs.back(); cv::Mat Twc0 = pKF0->GetPoseInverse(); // Add temporal neighbours to merge KF (previous and next KFs) + // 同样, 对于融合帧也把它和时序上的几个邻居加到可优化的容器里 + // 先把融合帧自己加到可优化的容器里 vpOptimizableKFs.push_back(pMergeKF); pMergeKF->mnBALocalForKF = pCurrKF->mnId; // Previous KFs + // 把融合帧时序上的邻居添加到可优化的容器里, 因为融合帧左右都有时序上的邻居,所以这里先取一半 Nd/2 for(int i=1; i<(Nd/2); i++) { if(vpOptimizableKFs.back()->mPrevKF) @@ -6443,11 +6607,13 @@ void Optimizer::MergeInertialBA(KeyFrame* pCurrKF, KeyFrame* pMergeKF, bool *pbS } // We fix just once the old map + // 记录与融合帧窗口时序上紧邻的帧为完全固定的帧 if(vpOptimizableKFs.back()->mPrevKF) { lFixedKeyFrames.push_back(vpOptimizableKFs.back()->mPrevKF); vpOptimizableKFs.back()->mPrevKF->mnBAFixedForKF=pCurrKF->mnId; } + // 如果没找到,则从窗口内拿出一帧给到完全固定的帧 else { vpOptimizableKFs.back()->mnBALocalForKF=0; @@ -6457,12 +6623,13 @@ void Optimizer::MergeInertialBA(KeyFrame* pCurrKF, KeyFrame* pMergeKF, bool *pbS } // Next KFs + // 添加时序上的另外一半, 比融合帧更新的帧 if(pMergeKF->mNextKF) { vpOptimizableKFs.push_back(pMergeKF->mNextKF); vpOptimizableKFs.back()->mnBALocalForKF = pCurrKF->mnId; } - + // 继续添加直到达到2Nd个可优化关键帧,或没有新的可以添加了 while(vpOptimizableKFs.size()<(2*Nd)) { if(vpOptimizableKFs.back()->mNextKF) @@ -6477,17 +6644,23 @@ void Optimizer::MergeInertialBA(KeyFrame* pCurrKF, KeyFrame* pMergeKF, bool *pbS int N = vpOptimizableKFs.size(); // Optimizable points seen by optimizable keyframes + // Step 2 把所有被优化的点添加进来(所有被可优化关键帧看到的点) + // 添加用来优化的地图点 list lLocalMapPoints; + // 记录每个地图点没观测多少次 map mLocalObs; + // 遍历所有可优化的关键帧 for(int i=0; i vpMPs = vpOptimizableKFs[i]->GetMapPointMatches(); + // 遍历每个关键帧所有的地图点 for(vector::iterator vit=vpMPs.begin(), vend=vpMPs.end(); vit!=vend; vit++) { // Using mnBALocalForKF we avoid redundance here, one MP can not be added several times to lLocalMapPoints MapPoint* pMP = *vit; if(pMP) if(!pMP->isBad()) + // 用这个变量记录是否重复添加 if(pMP->mnBALocalForKF!=pCurrKF->mnId) { mLocalObs[pMP]=1; @@ -6499,11 +6672,15 @@ void Optimizer::MergeInertialBA(KeyFrame* pCurrKF, KeyFrame* pMergeKF, bool *pbS } } + // Step 3 把所有可优化关键帧的共视帧加进来 + // 固定所有观察到地图点,但没有被加到优化变量中的关键帧 int i = 0; const int min_obs = 10; vector vNeighCurr = pCurrKF->GetCovisiblesByWeight(min_obs); + // 遍历所有的pair<地图点指针,观测次数> for(vector::iterator lit=vNeighCurr.begin(), lend=vNeighCurr.end(); lit!=lend; lit++) { + // 拿到所有的观测 if(i>=maxCovKF) break; KeyFrame* pKFi = *lit; @@ -6527,6 +6704,7 @@ void Optimizer::MergeInertialBA(KeyFrame* pCurrKF, KeyFrame* pMergeKF, bool *pbS break; KeyFrame* pKFi = *lit; + // 如果还没有被添加到共视帧容器里 if(pKFi->mnBALocalForKF!=pCurrKF->mnId && pKFi->mnBAFixedForKF!=pCurrKF->mnId) // If optimizable or already included... { pKFi->mnBALocalForKF=pCurrKF->mnId; @@ -6538,6 +6716,7 @@ void Optimizer::MergeInertialBA(KeyFrame* pCurrKF, KeyFrame* pMergeKF, bool *pbS } } + // Step 4 设置所有关键帧的顶点 g2o::SparseOptimizer optimizer; g2o::BlockSolverX::LinearSolverType * linearSolver; linearSolver = new g2o::LinearSolverEigen(); @@ -6553,6 +6732,7 @@ void Optimizer::MergeInertialBA(KeyFrame* pCurrKF, KeyFrame* pMergeKF, bool *pbS // Set Local KeyFrame vertices N=vpOptimizableKFs.size(); + // Step 4.1 设置所有的可优化关键帧顶点 for(int i=0; isetId(pKFi->mnId); VP->setFixed(false); + // 位姿 optimizer.addVertex(VP); if(pKFi->bImu) @@ -6608,6 +6789,7 @@ void Optimizer::MergeInertialBA(KeyFrame* pCurrKF, KeyFrame* pMergeKF, bool *pbS } // Set Fixed KeyFrame vertices + // Step 4.3 设置所有完全固定的关键帧顶点 for(list::iterator lit=lFixedKeyFrames.begin(), lend=lFixedKeyFrames.end(); lit!=lend; lit++) { KeyFrame* pKFi = *lit; @@ -6633,10 +6815,16 @@ void Optimizer::MergeInertialBA(KeyFrame* pCurrKF, KeyFrame* pMergeKF, bool *pbS } } + // Step 5 设置所有的inertial的边 // Create intertial constraints + // 预积分边 vector vei(N,(EdgeInertial*)NULL); + // 角速度bias边 vector vegr(N,(EdgeGyroRW*)NULL); + // 加速地bias边 vector vear(N,(EdgeAccRW*)NULL); + + // Step 5 遍历所有可优化的关键帧 for(int i=0;ibImu && pKFi->mPrevKF->bImu && pKFi->mpImuPreintegrated) { pKFi->mpImuPreintegrated->SetNewBias(pKFi->mPrevKF->GetImuBias()); + // 位姿 g2o::HyperGraph::Vertex* VP1 = optimizer.vertex(pKFi->mPrevKF->mnId); + // 速度 g2o::HyperGraph::Vertex* VV1 = optimizer.vertex(maxKFid+3*(pKFi->mPrevKF->mnId)+1); + // 角速度bias g2o::HyperGraph::Vertex* VG1 = optimizer.vertex(maxKFid+3*(pKFi->mPrevKF->mnId)+2); + // 加速度bias g2o::HyperGraph::Vertex* VA1 = optimizer.vertex(maxKFid+3*(pKFi->mPrevKF->mnId)+3); + // 同上 g2o::HyperGraph::Vertex* VP2 = optimizer.vertex(pKFi->mnId); g2o::HyperGraph::Vertex* VV2 = optimizer.vertex(maxKFid+3*(pKFi->mnId)+1); g2o::HyperGraph::Vertex* VG2 = optimizer.vertex(maxKFid+3*(pKFi->mnId)+2); @@ -6666,7 +6859,7 @@ void Optimizer::MergeInertialBA(KeyFrame* pCurrKF, KeyFrame* pMergeKF, bool *pbS } vei[i] = new EdgeInertial(pKFi->mpImuPreintegrated); - + // 设置顶点 2*Pose + 2*Velocity + 1 角速度bias + 1 加速度bias vei[i]->setVertex(0,dynamic_cast(VP1)); vei[i]->setVertex(1,dynamic_cast(VV1)); vei[i]->setVertex(2,dynamic_cast(VG1)); @@ -6675,14 +6868,20 @@ void Optimizer::MergeInertialBA(KeyFrame* pCurrKF, KeyFrame* pMergeKF, bool *pbS vei[i]->setVertex(5,dynamic_cast(VV2)); // TODO Uncomment + // 设置优化参数 g2o::RobustKernelHuber* rki = new g2o::RobustKernelHuber; vei[i]->setRobustKernel(rki); rki->setDelta(sqrt(16.92)); + // 添加边 optimizer.addEdge(vei[i]); + + // 角速度bias的边 vegr[i] = new EdgeGyroRW(); + // 设置顶点两个角速度bias vegr[i]->setVertex(0,VG1); vegr[i]->setVertex(1,VG2); + // 设置infomation matrix cv::Mat cvInfoG = pKFi->mpImuPreintegrated->C.rowRange(9,12).colRange(9,12).inv(cv::DECOMP_SVD); Eigen::Matrix3d InfoG; @@ -6691,10 +6890,11 @@ void Optimizer::MergeInertialBA(KeyFrame* pCurrKF, KeyFrame* pMergeKF, bool *pbS InfoG(r,c)=cvInfoG.at(r,c); vegr[i]->setInformation(InfoG); optimizer.addEdge(vegr[i]); - + // 设置加速度的边 vear[i] = new EdgeAccRW(); vear[i]->setVertex(0,VA1); vear[i]->setVertex(1,VA2); + // 设置information matrix cv::Mat cvInfoA = pKFi->mpImuPreintegrated->C.rowRange(12,15).colRange(12,15).inv(cv::DECOMP_SVD); Eigen::Matrix3d InfoA; for(int r=0;r<3;r++) @@ -6711,8 +6911,11 @@ void Optimizer::MergeInertialBA(KeyFrame* pCurrKF, KeyFrame* pMergeKF, bool *pbS // Set MapPoint vertices + // Step 6 设置所有地图的顶点 + // 设置地图点顶点 const int nExpectedSize = (N+Ncov+lFixedKeyFrames.size())*lLocalMapPoints.size(); + // 对于双目单目设置不同 // Mono vector vpEdgesMono; vpEdgesMono.reserve(nExpectedSize); @@ -6740,6 +6943,8 @@ void Optimizer::MergeInertialBA(KeyFrame* pCurrKF, KeyFrame* pMergeKF, bool *pbS const unsigned long iniMPid = maxKFid*5; + // 遍历所有被可优化关键帧观测到的的地图点 + // Step 7 添加重投影的边 for(list::iterator lit=lLocalMapPoints.begin(), lend=lLocalMapPoints.end(); lit!=lend; lit++) { MapPoint* pMP = *lit; @@ -6752,11 +6957,13 @@ void Optimizer::MergeInertialBA(KeyFrame* pCurrKF, KeyFrame* pMergeKF, bool *pbS unsigned long id = pMP->mnId+iniMPid+1; vPoint->setId(id); vPoint->setMarginalized(true); + // 添加顶点 optimizer.addVertex(vPoint); const map> observations = pMP->GetObservations(); // Create visual constraints + // 添加重投影边 for(map>::const_iterator mit=observations.begin(), mend=observations.end(); mit!=mend; mit++) { KeyFrame* pKFi = mit->first; @@ -6778,28 +6985,36 @@ void Optimizer::MergeInertialBA(KeyFrame* pCurrKF, KeyFrame* pMergeKF, bool *pbS if(!pKFi->isBad()) { + // 3D点的观测 const cv::KeyPoint &kpUn = pKFi->mvKeysUn[get<0>(mit->second)]; - + // 如果是单目观测 if(pKFi->mvuRight[get<0>(mit->second)]<0) // Monocular observation { + // 投影 Eigen::Matrix obs; obs << kpUn.pt.x, kpUn.pt.y; EdgeMono* e = new EdgeMono(); + // 设置边的顶点 + // 3D点 e->setVertex(0, dynamic_cast(optimizer.vertex(id))); + // 关键帧位姿 e->setVertex(1, dynamic_cast(optimizer.vertex(pKFi->mnId))); e->setMeasurement(obs); const float &invSigma2 = pKFi->mvInvLevelSigma2[kpUn.octave]; + // 设置信息矩阵 e->setInformation(Eigen::Matrix2d::Identity()*invSigma2); g2o::RobustKernelHuber* rk = new g2o::RobustKernelHuber; e->setRobustKernel(rk); rk->setDelta(thHuberMono); + // 添加边 optimizer.addEdge(e); vpEdgesMono.push_back(e); vpEdgeKFMono.push_back(pKFi); vpMapPointEdgeMono.push_back(pMP); } + //双目 else // stereo observation { const float kp_ur = pKFi->mvuRight[get<0>(mit->second)]; @@ -6826,10 +7041,11 @@ void Optimizer::MergeInertialBA(KeyFrame* pCurrKF, KeyFrame* pMergeKF, bool *pbS } } } - + // 如果要停止就直接返回 if(pbStopFlag) if(*pbStopFlag) return; + // Step 8 开始优化 optimizer.initializeOptimization(); optimizer.optimize(3); if(pbStopFlag) @@ -6838,11 +7054,15 @@ void Optimizer::MergeInertialBA(KeyFrame* pCurrKF, KeyFrame* pMergeKF, bool *pbS optimizer.setForceStopFlag(pbStopFlag); + // 更具优化结果挑选删除外点 vector > vToErase; vToErase.reserve(vpEdgesMono.size()+vpEdgesStereo.size()); // Check inlier observations // Mono + + // Step 9 处理外点 + // 更具卡方检测来记录要删除的外点 for(size_t i=0, iend=vpEdgesMono.size(); i lock(pMap->mMutexMapUpdate); if(!vToErase.empty()) { @@ -6890,15 +7111,19 @@ void Optimizer::MergeInertialBA(KeyFrame* pCurrKF, KeyFrame* pMergeKF, bool *pbS // Recover optimized data + // Step 10 根据优化的结果,修改每个被优化的变量 //Keyframes + // 对于每个可优化关键帧 for(int i=0; i(optimizer.vertex(pKFi->mnId)); cv::Mat Tcw = Converter::toCvSE3(VP->estimate().Rcw[0], VP->estimate().tcw[0]); pKFi->SetPose(Tcw); + // 在corrPoses记录每个关键帧融合后的位姿 cv::Mat Tiw=pKFi->GetPose(); cv::Mat Riw = Tiw.rowRange(0,3).colRange(0,3); cv::Mat tiw = Tiw.rowRange(0,3).col(3); @@ -6907,31 +7132,39 @@ void Optimizer::MergeInertialBA(KeyFrame* pCurrKF, KeyFrame* pMergeKF, bool *pbS if(pKFi->bImu) - { + { + // 速度 VertexVelocity* VV = static_cast(optimizer.vertex(maxKFid+3*(pKFi->mnId)+1)); + // 修改速度 pKFi->SetVelocity(Converter::toCvMat(VV->estimate())); + // 角速度bias VertexGyroBias* VG = static_cast(optimizer.vertex(maxKFid+3*(pKFi->mnId)+2)); + // 加速度bias VertexAccBias* VA = static_cast(optimizer.vertex(maxKFid+3*(pKFi->mnId)+3)); Vector6d b; + // 修改bias b << VG->estimate(), VA->estimate(); pKFi->SetNewBias(IMU::Bias(b[3],b[4],b[5],b[0],b[1],b[2])); } } - + // 变量所有的共视帧 for(int i=0; i(optimizer.vertex(pKFi->mnId)); cv::Mat Tcw = Converter::toCvSE3(VP->estimate().Rcw[0], VP->estimate().tcw[0]); + //修改位姿 pKFi->SetPose(Tcw); + // 记录融合后的位姿 cv::Mat Tiw=pKFi->GetPose(); cv::Mat Riw = Tiw.rowRange(0,3).colRange(0,3); cv::Mat tiw = Tiw.rowRange(0,3).col(3); g2o::Sim3 g2oSiw(Converter::toMatrix3d(Riw),Converter::toVector3d(tiw),1.0); corrPoses[pKFi] = g2oSiw; + // 共视帧的imu顶点并没有被加到任何的边里面,这里可以忽略 if(pKFi->bImu) { VertexVelocity* VV = static_cast(optimizer.vertex(maxKFid+3*(pKFi->mnId)+1)); @@ -6945,8 +7178,10 @@ void Optimizer::MergeInertialBA(KeyFrame* pCurrKF, KeyFrame* pMergeKF, bool *pbS } //Points + // 对于所有的地图点 for(list::iterator lit=lLocalMapPoints.begin(), lend=lLocalMapPoints.end(); lit!=lend; lit++) { + // 跟新位置和normal等信息 MapPoint* pMP = *lit; g2o::VertexSBAPointXYZ* vPoint = static_cast(optimizer.vertex(pMP->mnId+iniMPid+1)); pMP->SetWorldPos(Converter::toCvMat(vPoint->estimate())); @@ -6956,44 +7191,70 @@ void Optimizer::MergeInertialBA(KeyFrame* pCurrKF, KeyFrame* pMergeKF, bool *pbS pMap->IncreaseChangeIndex(); } +/** + * @brief 使用上一关键帧+当前帧的视觉信息和IMU信息,优化当前帧位姿 + * + * 可分为以下几个步骤: + * // Step 1:创建g2o优化器,初始化顶点和边 + * // Step 2:启动多轮优化,剔除外点 + * // Step 3:更新当前帧位姿、速度、IMU偏置 + * // Step 4:记录当前帧的优化状态,包括参数信息和对应的海森矩阵 + * + * @param[in] pFrame 当前帧,也是待优化的帧 + * @param[in] bRecInit 调用这个函数的位置并没有传这个参数,因此它的值默认为false + * @return int 返回优化后的内点数 + */ int Optimizer::PoseInertialOptimizationLastKeyFrame(Frame *pFrame, bool bRecInit) { + // Step 1:创建g2o优化器,初始化顶点和边 + //构建一个稀疏求解器 g2o::SparseOptimizer optimizer; g2o::BlockSolverX::LinearSolverType * linearSolver; + // 使用dense的求解器,(常见非dense求解器有cholmod线性求解器和shur补线性求解器) linearSolver = new g2o::LinearSolverDense(); g2o::BlockSolverX * solver_ptr = new g2o::BlockSolverX(linearSolver); - + //使用高斯牛顿求解器 g2o::OptimizationAlgorithmGaussNewton* solver = new g2o::OptimizationAlgorithmGaussNewton(solver_ptr); optimizer.setVerbose(false); optimizer.setAlgorithm(solver); + //当前帧单(左)目地图点数目 int nInitialMonoCorrespondences=0; int nInitialStereoCorrespondences=0; + //上面两项的和,即参与优化的地图点总数目 int nInitialCorrespondences=0; // Set Frame vertex + //当前帧的位姿,旋转+平移,6-dim VertexPose* VP = new VertexPose(pFrame); VP->setId(0); VP->setFixed(false); optimizer.addVertex(VP); + //当前帧的速度,3-dim VertexVelocity* VV = new VertexVelocity(pFrame); VV->setId(1); VV->setFixed(false); optimizer.addVertex(VV); + //当前帧的陀螺仪偏置,3-dim VertexGyroBias* VG = new VertexGyroBias(pFrame); VG->setId(2); VG->setFixed(false); optimizer.addVertex(VG); + //当前帧的加速度偏置,3-dim VertexAccBias* VA = new VertexAccBias(pFrame); VA->setId(3); VA->setFixed(false); optimizer.addVertex(VA); + //setFixed(false)这个设置使以上四个顶点(15个参数)在优化时更新 // Set MapPoint vertices + //当前帧的特征点总数 const int N = pFrame->N; + //当前帧左目的特征点总数 const int Nleft = pFrame->Nleft; + //当前帧是否存在右目(即是否为双目) const bool bRight = (Nleft!=-1); vector vpEdgesMono; @@ -7005,11 +7266,14 @@ int Optimizer::PoseInertialOptimizationLastKeyFrame(Frame *pFrame, bool bRecInit vnIndexEdgeMono.reserve(N); vnIndexEdgeStereo.reserve(N); + // 自由度为2的卡方分布,显著性水平为0.05,对应的临界阈值5.991 const float thHuberMono = sqrt(5.991); + // 自由度为3的卡方分布,显著性水平为0.05,对应的临界阈值7.815 const float thHuberStereo = sqrt(7.815); { + // 锁定地图点。由于需要使用地图点来构造顶点和边,因此不希望在构造的过程中部分地图点被改写造成不一致甚至是段错误 unique_lock lock(MapPoint::mGlobalMutex); for(int i=0; imvuRight[i]<0) || i < Nleft) { + //如果是双目情况下的左目 if(i < Nleft) // pair left-right + //使用未畸变校正的特征点 kpUn = pFrame->mvKeys[i]; + //如果是单目 else + //使用畸变校正过的特征点 kpUn = pFrame->mvKeysUn[i]; + //单目地图点计数增加 nInitialMonoCorrespondences++; + //当前地图点默认设置为不是外点 pFrame->mvbOutlier[i] = false; Eigen::Matrix obs; obs << kpUn.pt.x, kpUn.pt.y; + //第一种边(视觉重投影约束):地图点投影到该帧图像的坐标与特征点坐标偏差尽可能小 EdgeMonoOnlyPose* e = new EdgeMonoOnlyPose(pMP->GetWorldPos(),0); + //将位姿作为第一个顶点 e->setVertex(0,VP); + + //设置观测值,即去畸变后的像素坐标 e->setMeasurement(obs); // Add here uncerteinty + // 获取不确定度,这里调用uncertainty2返回固定值1.0 + // ?这里的1.0是作为缺省值的意思吗?是否可以根据对视觉信息的信任度动态修改这个值,比如标定的误差? const float unc2 = pFrame->mpCamera->uncertainty2(obs); + //invSigma2 = (Inverse(协方差矩阵))^2,表明该约束在各个维度上的可信度 + // 图像金字塔层数越高,可信度越差 const float invSigma2 = pFrame->mvInvLevelSigma2[kpUn.octave]/unc2; + //设置该约束的信息矩阵 e->setInformation(Eigen::Matrix2d::Identity()*invSigma2); g2o::RobustKernelHuber* rk = new g2o::RobustKernelHuber; + // 设置鲁棒核函数,避免其误差的平方项出现数值过大的增长 注:后续在优化2次后会用e->setRobustKernel(0)禁掉鲁棒核函数 e->setRobustKernel(rk); + + //重投影误差的自由度为2,设置对应的卡方阈值 rk->setDelta(thHuberMono); + //将第一种边加入优化器 optimizer.addEdge(e); + //将第一种边加入vpEdgesMono vpEdgesMono.push_back(e); + //将对应的特征点索引加入vnIndexEdgeMono vnIndexEdgeMono.push_back(i); } // Stereo observation @@ -7118,83 +7404,128 @@ int Optimizer::PoseInertialOptimizationLastKeyFrame(Frame *pFrame, bool bRecInit } } } + //统计参与优化的地图点总数目 nInitialCorrespondences = nInitialMonoCorrespondences + nInitialStereoCorrespondences; + //pKF为上一关键帧 KeyFrame* pKF = pFrame->mpLastKeyFrame; + + //上一关键帧的位姿,旋转+平移,6-dim VertexPose* VPk = new VertexPose(pKF); VPk->setId(4); VPk->setFixed(true); optimizer.addVertex(VPk); + //上一关键帧的速度,3-dim VertexVelocity* VVk = new VertexVelocity(pKF); VVk->setId(5); VVk->setFixed(true); optimizer.addVertex(VVk); + //上一关键帧的陀螺仪偏置,3-dim VertexGyroBias* VGk = new VertexGyroBias(pKF); VGk->setId(6); VGk->setFixed(true); optimizer.addVertex(VGk); + //上一关键帧的加速度偏置,3-dim VertexAccBias* VAk = new VertexAccBias(pKF); VAk->setId(7); VAk->setFixed(true); optimizer.addVertex(VAk); + //setFixed(true)这个设置使以上四个顶点(15个参数)的值在优化时保持固定 + //既然被选为关键帧,就不能太善变 + //第二种边(IMU预积分约束):两帧之间位姿的变化量与IMU预积分的值偏差尽可能小 EdgeInertial* ei = new EdgeInertial(pFrame->mpImuPreintegrated); + //将上一关键帧四个顶点(P、V、BG、BA)和当前帧两个顶点(P、V)加入第二种边 ei->setVertex(0, VPk); ei->setVertex(1, VVk); ei->setVertex(2, VGk); ei->setVertex(3, VAk); ei->setVertex(4, VP); ei->setVertex(5, VV); + //把第二种边加入优化器 optimizer.addEdge(ei); + //第三种边(陀螺仪随机游走约束):陀螺仪的随机游走值在相近帧间不会相差太多 residual=VG-VGk + //用大白话来讲就是用固定的VGK拽住VG,防止VG在优化中放飞自我 EdgeGyroRW* egr = new EdgeGyroRW(); + + //将上一关键帧的BG加入第三种边 egr->setVertex(0,VGk); + //将当前帧的BG加入第三种边 egr->setVertex(1,VG); + //C值在预积分阶段更新,range(9,12)对应陀螺仪偏置的协方差,最终cvInfoG值为inv(∑(GyroRW^2/freq)) cv::Mat cvInfoG = pFrame->mpImuPreintegrated->C.rowRange(9,12).colRange(9,12).inv(cv::DECOMP_SVD); Eigen::Matrix3d InfoG; for(int r=0;r<3;r++) for(int c=0;c<3;c++) InfoG(r,c)=cvInfoG.at(r,c); + + //设置信息矩阵 egr->setInformation(InfoG); + //把第三种边加入优化器 optimizer.addEdge(egr); + //第四种边(加速度随机游走约束):加速度的随机游走值在相近帧间不会相差太多 residual=VA-VAk + //用大白话来讲就是用固定的VAK拽住VA,防止VA在优化中放飞自我 EdgeAccRW* ear = new EdgeAccRW(); + //将上一关键帧的BA加入第四种边 ear->setVertex(0,VAk); + //将当前帧的BA加入第四种边 ear->setVertex(1,VA); + //C值在预积分阶段更新,range(12,15)对应加速度偏置的协方差,最终cvInfoG值为inv(∑(AccRW^2/freq)) cv::Mat cvInfoA = pFrame->mpImuPreintegrated->C.rowRange(12,15).colRange(12,15).inv(cv::DECOMP_SVD); Eigen::Matrix3d InfoA; for(int r=0;r<3;r++) for(int c=0;c<3;c++) InfoA(r,c)=cvInfoA.at(r,c); + //设置信息矩阵 ear->setInformation(InfoA); + //把第四种边加入优化器 optimizer.addEdge(ear); + // Step 2:启动多轮优化,剔除外点 + // We perform 4 optimizations, after each optimization we classify observation as inlier/outlier // At the next optimization, outliers are not included, but at the end they can be classified as inliers again. + //卡方检验值呈递减趋势,目的是让检验越来越苛刻 float chi2Mono[4]={12,7.5,5.991,5.991}; float chi2Stereo[4]={15.6,9.8,7.815,7.815}; - + //4次优化的迭代次数都为10 int its[4]={10,10,10,10}; + //坏点数 int nBad=0; + //单目坏点数 int nBadMono = 0; + //双目坏点数 int nBadStereo = 0; + //单目内点数 int nInliersMono = 0; + //双目内点数 int nInliersStereo = 0; + //内点数 int nInliers=0; bool bOut = false; + + //进行4次优化 for(size_t it=0; it<4; it++) { + // 初始化优化器,这里的参数0代表只对level为0的边进行优化(不传参数默认也是0) optimizer.initializeOptimization(0); + //每次优化迭代十次 optimizer.optimize(its[it]); + //每次优化都重新统计各类点的数目 nBad=0; nBadMono = 0; nBadStereo = 0; nInliers=0; nInliersMono=0; nInliersStereo=0; + + //使用1.5倍的chi2Mono作为“近点”的卡方检验值,意味着地图点越近,检验越宽松 + //地图点如何定义为“近点”在下面的代码中有解释 float chi2close = 1.5*chi2Mono[it]; // For monocular observations @@ -7204,27 +7535,45 @@ int Optimizer::PoseInertialOptimizationLastKeyFrame(Frame *pFrame, bool bRecInit const size_t idx = vnIndexEdgeMono[i]; + // 如果这条误差边是来自于outlier if(pFrame->mvbOutlier[idx]) { + //计算这条边上次优化后的误差 e->computeError(); } + // 就是error*\Omega*error,表示了这条边考虑置信度以后的误差大小 const float chi2 = e->chi2(); + + //当地图点在当前帧的深度值小于10时,该地图点属于close(近点) + //mTrackDepth是在Frame.cc的isInFrustum函数中计算出来的 bool bClose = pFrame->mvpMapPoints[idx]->mTrackDepth<10.f; + //判断某地图点为外点的条件有以下三种: + //1.该地图点不是近点并且误差大于卡方检验值chi2Mono[it] + //2.该地图点是近点并且误差大于卡方检验值chi2close + //3.深度不为正 + //每次优化后,用更小的卡方检验值,原因是随着优化的进行,对划分为内点的信任程度越来越低 if((chi2>chi2Mono[it]&&!bClose)||(bClose && chi2>chi2close)||!e->isDepthPositive()) { + //将该点设置为外点 pFrame->mvbOutlier[idx]=true; + //外点不参与下一轮优化 e->setLevel(1); + //单目坏点数+1 nBadMono++; } else { + //将该点设置为内点(暂时) pFrame->mvbOutlier[idx]=false; + //内点继续参与下一轮优化 e->setLevel(0); + //单目内点数+1 nInliersMono++; } + //从第三次优化开始就不设置鲁棒核函数了,原因是经过两轮优化已经趋向准确值,不会有太大误差 if (it==2) e->setRobustKernel(0); } @@ -7260,7 +7609,9 @@ int Optimizer::PoseInertialOptimizationLastKeyFrame(Frame *pFrame, bool bRecInit e->setRobustKernel(0); } + //内点总数=单目内点数+双目内点数 nInliers = nInliersMono + nInliersStereo; + //坏点数=单目坏点数+双目坏点数 nBad = nBadMono + nBadStereo; if(optimizer.edges().size()<10) @@ -7272,18 +7623,24 @@ int Optimizer::PoseInertialOptimizationLastKeyFrame(Frame *pFrame, bool bRecInit } // If not too much tracks, recover not too bad points + //若4次优化后内点数小于30,尝试恢复一部分不那么糟糕的坏点 if ((nInliers<30) && !bRecInit) { + //重新从0开始统计坏点数 nBad=0; + //单目可容忍的卡方检验最大值(如果误差比这还大就不要挣扎了...) const float chi2MonoOut = 18.f; const float chi2StereoOut = 24.f; EdgeMonoOnlyPose* e1; EdgeStereoOnlyPose* e2; + //遍历所有单目特征点 for(size_t i=0, iend=vnIndexEdgeMono.size(); icomputeError(); + //判断误差值是否超过单目可容忍的卡方检验最大值,是的话就把这个点保下来 if (e1->chi2()mvbOutlier[idx]=false; else @@ -7301,20 +7658,59 @@ int Optimizer::PoseInertialOptimizationLastKeyFrame(Frame *pFrame, bool bRecInit } } + // Step 3:更新当前帧位姿、速度、IMU偏置 + // Recover optimized pose, velocity and biases + //给当前帧设置优化后的旋转、位移、速度,用来更新位姿 pFrame->SetImuPoseVelocity(Converter::toCvMat(VP->estimate().Rwb),Converter::toCvMat(VP->estimate().twb),Converter::toCvMat(VV->estimate())); Vector6d b; b << VG->estimate(), VA->estimate(); + //给当前帧设置优化后的bg,ba pFrame->mImuBias = IMU::Bias(b[3],b[4],b[5],b[0],b[1],b[2]); + // Step 4:记录当前帧的优化状态,包括参数信息和对应的海森矩阵 // Recover Hessian, marginalize keyFframe states and generate new prior for frame Eigen::Matrix H; H.setZero(); + //H(x)=J(x).t()*info*J(x) + + //J(x)取的是EdgeInertial中的_jacobianOplus[4]和_jacobianOplus[5],即EdgeInertial::computeError计算出来的er,ev,ep对当前帧Pose和Velocity的偏导 + //因此ei->GetHessian2的结果为: + //H(∂er/∂r) H(∂er/∂t) H(∂er/∂v) + //H(∂ev/∂r) H(∂ev/∂t) H(∂ev/∂v) + //H(∂ep/∂r) H(∂ep/∂t) H(∂ep/∂v) + //每项H都是3x3,故GetHessian2的结果是9x9 H.block<9,9>(0,0)+= ei->GetHessian2(); + + //J(x)取的是EdgeGyroRW中的_jacobianOplusXj,即EdgeGyroRW::computeError计算出来的_error(ebg)对当前帧bg的偏导 + //因此egr->GetHessian2的结果为: + //H(∂ebg/∂bg) ,3x3 H.block<3,3>(9,9) += egr->GetHessian2(); + + //J(x)取的是EdgeAccRW中的_jacobianOplusXj,即EdgeAccRW::computeError计算出来的_error(ebg)对当前帧ba的偏导 + //因此ear->GetHessian2的结果为: + //H(∂eba/∂ba) ,3x3 H.block<3,3>(12,12) += ear->GetHessian2(); + + //经过前面几步,Hessian Matrix长这个样子(注:省略了->GetHessian2()) + //ei ei ei ei ei ei ei ei ei 0 0 0 0 0 0 + //ei ei ei ei ei ei ei ei ei 0 0 0 0 0 0 + //ei ei ei ei ei ei ei ei ei 0 0 0 0 0 0 + //ei ei ei ei ei ei ei ei ei 0 0 0 0 0 0 + //ei ei ei ei ei ei ei ei ei 0 0 0 0 0 0 + //ei ei ei ei ei ei ei ei ei 0 0 0 0 0 0 + //ei ei ei ei ei ei ei ei ei 0 0 0 0 0 0 + //ei ei ei ei ei ei ei ei ei 0 0 0 0 0 0 + //ei ei ei ei ei ei ei ei ei 0 0 0 0 0 0 + // 0 0 0 0 0 0 0 0 0 egr egr egr 0 0 0 + // 0 0 0 0 0 0 0 0 0 egr egr egr 0 0 0 + // 0 0 0 0 0 0 0 0 0 egr egr egr 0 0 0 + // 0 0 0 0 0 0 0 0 0 0 0 0 ear ear ear + // 0 0 0 0 0 0 0 0 0 0 0 0 ear ear ear + // 0 0 0 0 0 0 0 0 0 0 0 0 ear ear ear + int tot_in = 0, tot_out = 0; for(size_t i=0, iend=vpEdgesMono.size(); impcpi = new ConstraintPoseImu(VP->estimate().Rwb,VP->estimate().twb,VV->estimate(),VG->estimate(),VA->estimate(),H); + //设eie = ei->GetHessian2()+∑(e->GetHessian()) + //则最终Hessian Matrix长这样 + //eie eie eie eie eie eie ei ei ei 0 0 0 0 0 0 + //eie eie eie eie eie eie ei ei ei 0 0 0 0 0 0 + //eie eie eie eie eie eie ei ei ei 0 0 0 0 0 0 + //eie eie eie eie eie eie ei ei ei 0 0 0 0 0 0 + //eie eie eie eie eie eie ei ei ei 0 0 0 0 0 0 + //eie eie eie eie eie eie ei ei ei 0 0 0 0 0 0 + // ei ei ei ei ei ei ei ei ei 0 0 0 0 0 0 + // ei ei ei ei ei ei ei ei ei 0 0 0 0 0 0 + // ei ei ei ei ei ei ei ei ei 0 0 0 0 0 0 + // 0 0 0 0 0 0 0 0 0 egr egr egr 0 0 0 + // 0 0 0 0 0 0 0 0 0 egr egr egr 0 0 0 + // 0 0 0 0 0 0 0 0 0 egr egr egr 0 0 0 + // 0 0 0 0 0 0 0 0 0 0 0 0 ear ear ear + // 0 0 0 0 0 0 0 0 0 0 0 0 ear ear ear + // 0 0 0 0 0 0 0 0 0 0 0 0 ear ear ear + //构造一个ConstraintPoseImu对象,为下一帧提供先验约束 + //构造对象所使用的参数是当前帧P、V、BG、BA的估计值和H矩阵 + pFrame->mpcpi = new ConstraintPoseImu(VP->estimate().Rwb,VP->estimate().twb,VV->estimate(),VG->estimate(),VA->estimate(),H); + //在PoseInertialOptimizationLastFrame函数中,会将ConstraintPoseImu信息作为“上一帧先验约束”生成一条优化边 + + //返回值:内点数 = 总地图点数目 - 坏点(外点)数目 return nInitialCorrespondences-nBad; } +/** + * @brief 使用上一帧+当前帧的视觉信息和IMU信息,优化当前帧位姿 + * + * 可分为以下几个步骤: + * // Step 1:创建g2o优化器,初始化顶点和边 + * // Step 2:启动多轮优化,剔除外点 + * // Step 3:更新当前帧位姿、速度、IMU偏置 + * // Step 4:记录当前帧的优化状态,包括参数信息和边缘化后的海森矩阵 + * + * @param[in] pFrame 当前帧,也是待优化的帧 + * @param[in] bRecInit 调用这个函数的位置并没有传这个参数,因此它的值默认为false + * @return int 返回优化后的内点数 + */ int Optimizer::PoseInertialOptimizationLastFrame(Frame *pFrame, bool bRecInit) { + // Step 1:创建g2o优化器,初始化顶点和边 + //构建一个稀疏求解器 g2o::SparseOptimizer optimizer; g2o::BlockSolverX::LinearSolverType * linearSolver; + // 使用dense的求解器,(常见非dense求解器有cholmod线性求解器和shur补线性求解器) linearSolver = new g2o::LinearSolverDense(); g2o::BlockSolverX * solver_ptr = new g2o::BlockSolverX(linearSolver); + //使用高斯牛顿求解器 g2o::OptimizationAlgorithmGaussNewton* solver = new g2o::OptimizationAlgorithmGaussNewton(solver_ptr); optimizer.setAlgorithm(solver); optimizer.setVerbose(false); + //当前帧单(左)目地图点数目 int nInitialMonoCorrespondences=0; int nInitialStereoCorrespondences=0; int nInitialCorrespondences=0; // Set Current Frame vertex + //当前帧的位姿,旋转+平移,6-dim VertexPose* VP = new VertexPose(pFrame); VP->setId(0); - VP->setFixed(false); + VP->setFixed(false); //需要优化,所以不固定 optimizer.addVertex(VP); + //当前帧的速度,3-dim VertexVelocity* VV = new VertexVelocity(pFrame); VV->setId(1); VV->setFixed(false); optimizer.addVertex(VV); + //当前帧的陀螺仪偏置,3-dim VertexGyroBias* VG = new VertexGyroBias(pFrame); VG->setId(2); VG->setFixed(false); optimizer.addVertex(VG); + //当前帧的加速度偏置,3-dim VertexAccBias* VA = new VertexAccBias(pFrame); VA->setId(3); VA->setFixed(false); optimizer.addVertex(VA); // Set MapPoint vertices + //当前帧的特征点总数 N = Nleft + Nright + //对于单目 N!=0, Nleft=-1,Nright=-1 + //对于双目 N!=0, Nleft!=-1,Nright!=-1 const int N = pFrame->N; + //当前帧左目的特征点总数 const int Nleft = pFrame->Nleft; + //当前帧是否存在右目(即是否为双目),存在为true + //?感觉可以更直接点啊 bRight = (Nright!=-1) const bool bRight = (Nleft!=-1); vector vpEdgesMono; @@ -7400,10 +7846,13 @@ int Optimizer::PoseInertialOptimizationLastFrame(Frame *pFrame, bool bRecInit) vnIndexEdgeMono.reserve(N); vnIndexEdgeStereo.reserve(N); + // 自由度为2的卡方分布,显著性水平为0.05,对应的临界阈值5.991 const float thHuberMono = sqrt(5.991); + // 自由度为3的卡方分布,显著性水平为0.05,对应的临界阈值7.815 const float thHuberStereo = sqrt(7.815); { + // 锁定地图点。由于需要使用地图点来构造顶点和边,因此不希望在构造的过程中部分地图点被改写造成不一致甚至是段错误 unique_lock lock(MapPoint::mGlobalMutex); for(int i=0; imvuRight[i]<0) || i < Nleft) { + //如果是双目情况下的左目 if(i < Nleft) // pair left-right + //使用未畸变校正的特征点 kpUn = pFrame->mvKeys[i]; + //如果是单目 else + //使用畸变校正过的特征点 kpUn = pFrame->mvKeysUn[i]; + //单目地图点计数增加 nInitialMonoCorrespondences++; + //当前地图点默认设置为不是外点 pFrame->mvbOutlier[i] = false; - + // 观测的特征点 Eigen::Matrix obs; obs << kpUn.pt.x, kpUn.pt.y; + //第一种边(视觉重投影约束):地图点投影到该帧图像的坐标与特征点坐标偏差尽可能小 EdgeMonoOnlyPose* e = new EdgeMonoOnlyPose(pMP->GetWorldPos(),0); + //将位姿作为第一个顶点 e->setVertex(0,VP); + + //设置观测值,即去畸变后的像素坐标 e->setMeasurement(obs); // Add here uncerteinty + // 获取不确定度,这里调用uncertainty2返回固定值1.0 + // ?这里返回1.0是作为缺省值,是否可以根据对视觉信息的信任度动态修改这个值,比如标定的误差? const float unc2 = pFrame->mpCamera->uncertainty2(obs); + //invSigma2 = (Inverse(协方差矩阵))^2,表明该约束在各个维度上的可信度 + // 图像金字塔层数越高,可信度越差 const float invSigma2 = pFrame->mvInvLevelSigma2[kpUn.octave]/unc2; + //设置该约束的信息矩阵 e->setInformation(Eigen::Matrix2d::Identity()*invSigma2); g2o::RobustKernelHuber* rk = new g2o::RobustKernelHuber; + // 设置鲁棒核函数,避免其误差的平方项出现数值过大的增长 注:后续在优化2次后会用e->setRobustKernel(0)禁掉鲁棒核函数 e->setRobustKernel(rk); + //重投影误差的自由度为2,设置对应的卡方阈值 rk->setDelta(thHuberMono); + //将第一种边加入优化器 optimizer.addEdge(e); + //将第一种边加入vpEdgesMono vpEdgesMono.push_back(e); + //将对应的特征点索引加入vnIndexEdgeMono vnIndexEdgeMono.push_back(i); } // Stereo observation - else if(!bRight) + else if(!bRight) //? 为什么是双目 { nInitialStereoCorrespondences++; pFrame->mvbOutlier[i] = false; @@ -7511,66 +7981,94 @@ int Optimizer::PoseInertialOptimizationLastFrame(Frame *pFrame, bool bRecInit) } } } - + //统计参与优化的地图点总数目 nInitialCorrespondences = nInitialMonoCorrespondences + nInitialStereoCorrespondences; // Set Previous Frame Vertex + //pFp为上一帧 Frame* pFp = pFrame->mpPrevFrame; + //上一帧的位姿,旋转+平移,6-dim VertexPose* VPk = new VertexPose(pFp); VPk->setId(4); VPk->setFixed(false); optimizer.addVertex(VPk); + //上一帧的速度,3-dim VertexVelocity* VVk = new VertexVelocity(pFp); VVk->setId(5); VVk->setFixed(false); optimizer.addVertex(VVk); + //上一帧的陀螺仪偏置,3-dim VertexGyroBias* VGk = new VertexGyroBias(pFp); VGk->setId(6); VGk->setFixed(false); optimizer.addVertex(VGk); + //上一帧的加速度偏置,3-dim VertexAccBias* VAk = new VertexAccBias(pFp); VAk->setId(7); VAk->setFixed(false); optimizer.addVertex(VAk); + //setFixed(false)这个设置使以上四个顶点(15个参数)的值随优化而变,这样做会给上一帧再提供一些优化空间 + //但理论上不应该优化过多,否则会有不良影响,故后面的代码会用第五种边来约束上一帧的变化量 + //第二种边(IMU预积分约束):两帧之间位姿的变化量与IMU预积分的值偏差尽可能小 EdgeInertial* ei = new EdgeInertial(pFrame->mpImuPreintegratedFrame); + //将上一帧四个顶点(P、V、BG、BA)和当前帧两个顶点(P、V)加入第二种边 ei->setVertex(0, VPk); ei->setVertex(1, VVk); ei->setVertex(2, VGk); ei->setVertex(3, VAk); ei->setVertex(4, VP); ei->setVertex(5, VV); + //把第二种边加入优化器 optimizer.addEdge(ei); + //第三种边(陀螺仪随机游走约束):陀螺仪的随机游走值在相邻帧间不会相差太多 residual=VG-VGk + //用大白话来讲就是用固定的VGK拽住VG,防止VG在优化中放飞自我 EdgeGyroRW* egr = new EdgeGyroRW(); + //将上一帧的BG加入第三种边 egr->setVertex(0,VGk); + //将当前帧的BG加入第三种边 egr->setVertex(1,VG); + //C值在预积分阶段更新,range(9,12)对应陀螺仪偏置的协方差,最终cvInfoG值为inv(∑(GyroRW^2/freq)) cv::Mat cvInfoG = pFrame->mpImuPreintegratedFrame->C.rowRange(9,12).colRange(9,12).inv(cv::DECOMP_SVD); Eigen::Matrix3d InfoG; for(int r=0;r<3;r++) for(int c=0;c<3;c++) InfoG(r,c)=cvInfoG.at(r,c); + + //设置信息矩阵 egr->setInformation(InfoG); + //把第三种边加入优化器 optimizer.addEdge(egr); + //第四种边(加速度随机游走约束):加速度的随机游走值在相近帧间不会相差太多 residual=VA-VAk + //用大白话来讲就是用固定的VAK拽住VA,防止VA在优化中放飞自我 EdgeAccRW* ear = new EdgeAccRW(); + //将上一帧的BA加入第四种边 ear->setVertex(0,VAk); + //将当前帧的BA加入第四种边 ear->setVertex(1,VA); + //C值在预积分阶段更新,range(12,15)对应加速度偏置的协方差,最终cvInfoG值为inv(∑(AccRW^2/freq)) cv::Mat cvInfoA = pFrame->mpImuPreintegratedFrame->C.rowRange(12,15).colRange(12,15).inv(cv::DECOMP_SVD); Eigen::Matrix3d InfoA; for(int r=0;r<3;r++) for(int c=0;c<3;c++) InfoA(r,c)=cvInfoA.at(r,c); + //设置信息矩阵 ear->setInformation(InfoA); + //把第四种边加入优化器 optimizer.addEdge(ear); + // ?既然有判空的操作,可以区分一下有先验信息(五条边)和无先验信息(四条边)的情况 if (!pFp->mpcpi) Verbose::PrintMess("pFp->mpcpi does not exist!!!\nPrevious Frame " + to_string(pFp->mnId), Verbose::VERBOSITY_NORMAL); + //第五种边(先验约束):上一帧信息随优化的改变量要尽可能小 EdgePriorPoseImu* ep = new EdgePriorPoseImu(pFp->mpcpi); + //将上一帧的四个顶点(P、V、BG、BA)加入第五种边 ep->setVertex(0,VPk); ep->setVertex(1,VVk); ep->setVertex(2,VGk); @@ -7578,32 +8076,53 @@ int Optimizer::PoseInertialOptimizationLastFrame(Frame *pFrame, bool bRecInit) g2o::RobustKernelHuber* rkp = new g2o::RobustKernelHuber; ep->setRobustKernel(rkp); rkp->setDelta(5); + //把第五种边加入优化器 optimizer.addEdge(ep); + // Step 2:启动多轮优化,剔除外点 + // We perform 4 optimizations, after each optimization we classify observation as inlier/outlier // At the next optimization, outliers are not included, but at the end they can be classified as inliers again. - + //与PoseInertialOptimizationLastKeyFrame函数对比,区别在于:在优化过程中保持卡方阈值不变 + // 以下参数的解释 + // 自由度为2的卡方分布,显著性水平为0.05,对应的临界阈值5.991 + // 自由度为3的卡方分布,显著性水平为0.05,对应的临界阈值7.815 + // 自由度为3的卡方分布,显著性水平为0.02,对应的临界阈值9.8 + // 自由度为3的卡方分布,显著性水平为0.001,对应的临界阈值15.6 + // 计算方法:https://stattrek.com/online-calculator/chi-square.aspx const float chi2Mono[4]={5.991,5.991,5.991,5.991}; const float chi2Stereo[4]={15.6f,9.8f,7.815f,7.815f}; + //4次优化的迭代次数都为10 const int its[4]={10,10,10,10}; + //坏点数 int nBad=0; + //单目坏点数 int nBadMono = 0; + //双目坏点数 int nBadStereo = 0; + //单目内点数 int nInliersMono = 0; + //双目内点数 int nInliersStereo = 0; + //内点数 int nInliers=0; for(size_t it=0; it<4; it++) { + // 初始化优化器,这里的参数0代表只对level为0的边进行优化(不传参数默认也是0) optimizer.initializeOptimization(0); + //每次优化迭代十次 optimizer.optimize(its[it]); + //每次优化都重新统计各类点的数目 nBad=0; nBadMono = 0; nBadStereo = 0; nInliers=0; nInliersMono=0; nInliersStereo=0; + //使用1.5倍的chi2Mono作为“近点”的卡方检验值,意味着地图点越近,检验越宽松 + //地图点如何定义为“近点”在下面的代码中有解释 float chi2close = 1.5*chi2Mono[it]; for(size_t i=0, iend=vpEdgesMono.size(); imvpMapPoints[idx]->mTrackDepth<10.f; + // 如果这条误差边是来自于outlier if(pFrame->mvbOutlier[idx]) { + //计算本次优化后的误差 e->computeError(); } + // 就是error*\Omega*error,表示了这条边考虑置信度以后的误差大小 const float chi2 = e->chi2(); + //判断某地图点为外点的条件有以下三种: + //1.该地图点不是近点并且误差大于卡方检验值chi2Mono[it] + //2.该地图点是近点并且误差大于卡方检验值chi2close + //3.深度不为正 + //每次优化后,用更小的卡方检验值,原因是随着优化的进行,对划分为内点的信任程度越来越低 if((chi2>chi2Mono[it]&&!bClose)||(bClose && chi2>chi2close)||!e->isDepthPositive()) { + //将该点设置为外点 pFrame->mvbOutlier[idx]=true; + //外点不参与下一轮优化 e->setLevel(1); + //单目坏点数+1 nBadMono++; } else { + //将该点设置为内点(暂时) pFrame->mvbOutlier[idx]=false; + //内点继续参与下一轮优化 e->setLevel(0); + //单目内点数+1 nInliersMono++; } + //从第三次优化开始就不设置鲁棒核函数了,原因是经过两轮优化已经趋向准确值,不会有太大误差 if (it==2) e->setRobustKernel(0); @@ -7668,7 +8205,9 @@ int Optimizer::PoseInertialOptimizationLastFrame(Frame *pFrame, bool bRecInit) e->setRobustKernel(0); } + //内点总数=单目内点数+双目内点数 nInliers = nInliersMono + nInliersStereo; + //坏点数=单目坏点数+双目坏点数 nBad = nBadMono + nBadStereo; if(optimizer.edges().size()<10) @@ -7678,17 +8217,21 @@ int Optimizer::PoseInertialOptimizationLastFrame(Frame *pFrame, bool bRecInit) } } - + //若4次优化后内点数小于30,尝试恢复一部分不那么糟糕的坏点 if ((nInliers<30) && !bRecInit) { + //重新从0开始统计坏点数 nBad=0; + //单目可容忍的卡方检验最大值(如果误差比这还大就不要挣扎了...) const float chi2MonoOut = 18.f; const float chi2StereoOut = 24.f; EdgeMonoOnlyPose* e1; EdgeStereoOnlyPose* e2; + //遍历所有单目特征点 for(size_t i=0, iend=vnIndexEdgeMono.size(); icomputeError(); if (e1->chi2()SetImuPoseVelocity(Converter::toCvMat(VP->estimate().Rwb),Converter::toCvMat(VP->estimate().twb),Converter::toCvMat(VV->estimate())); Vector6d b; b << VG->estimate(), VA->estimate(); + //给当前帧设置优化后的bg,ba pFrame->mImuBias = IMU::Bias(b[3],b[4],b[5],b[0],b[1],b[2]); + // Step 4:记录当前帧的优化状态,包括参数信息和边缘化后的海森矩阵 // Recover Hessian, marginalize previous frame states and generate new prior for frame + //包含本次优化所有信息矩阵的和,它代表本次优化对确定性的影响 Eigen::Matrix H; H.setZero(); + //第1步,加上EdgeInertial(IMU预积分约束)的海森矩阵 H.block<24,24>(0,0)+= ei->GetHessian(); + //第2步,加上EdgeGyroRW(陀螺仪随机游走约束)的信息矩阵: + //| 0~8 | 9~11 | 12~23 | 24~26 |27~29 + //9~11是上一帧的bg(3-dim),24~26是当前帧的bg(3-dim) Eigen::Matrix Hgr = egr->GetHessian(); H.block<3,3>(9,9) += Hgr.block<3,3>(0,0); H.block<3,3>(9,24) += Hgr.block<3,3>(0,3); H.block<3,3>(24,9) += Hgr.block<3,3>(3,0); H.block<3,3>(24,24) += Hgr.block<3,3>(3,3); + //第3步,加上EdgeAccRW(加速度随机游走约束)的信息矩阵: + //| 0~11 | 12~14 | 15~26 | 27~29 |30 + //12~14是上一帧的ba(3-dim),27~29是当前帧的ba(3-dim) Eigen::Matrix Har = ear->GetHessian(); H.block<3,3>(12,12) += Har.block<3,3>(0,0); H.block<3,3>(12,27) += Har.block<3,3>(0,3); H.block<3,3>(27,12) += Har.block<3,3>(3,0); H.block<3,3>(27,27) += Har.block<3,3>(3,3); + //第4步,加上EdgePriorPoseImu(先验约束)的信息矩阵: + //| 0~14 | 15~29 + //0~14是上一帧的P(6-dim)、V(3-dim)、BG(3-dim)、BA(3-dim) H.block<15,15>(0,0) += ep->GetHessian(); int tot_in = 0, tot_out = 0; @@ -7747,6 +8306,8 @@ int Optimizer::PoseInertialOptimizationLastFrame(Frame *pFrame, bool bRecInit) if(!pFrame->mvbOutlier[idx]) { + // 0~14 | 15~20 | 21~29 + //15~20是当前帧的P(6-dim) H.block<6,6>(15,15) += e->GetHessian(); tot_in++; } @@ -7762,6 +8323,7 @@ int Optimizer::PoseInertialOptimizationLastFrame(Frame *pFrame, bool bRecInit) if(!pFrame->mvbOutlier[idx]) { + // 0~14 | 15~20 | 21~29 H.block<6,6>(15,15) += e->GetHessian(); tot_in++; } @@ -7769,12 +8331,32 @@ int Optimizer::PoseInertialOptimizationLastFrame(Frame *pFrame, bool bRecInit) tot_out++; } + // H = |B E.t| ------> |0 0| + // |E A| |0 A-E*B.inv*E.t| H = Marginalize(H,0,14); + /* + Marginalize里的函数在此处的调用等效于: + Eigen::JacobiSVD svd(H.block(0, 0, 15, 15), Eigen::ComputeThinU | Eigen::ComputeThinV); + Eigen::JacobiSVD::SingularValuesType singularValues_inv = svd.singularValues(); + for (int i = 0; i < 15; ++i) + { + if (singularValues_inv(i) > 1e-6) + singularValues_inv(i) = 1.0 / singularValues_inv(i); + else + singularValues_inv(i) = 0; + } + Eigen::MatrixXd invHb = svd.matrixV() * singularValues_inv.asDiagonal() * svd.matrixU().transpose(); + H.block(15, 15, 15, 15) = H.block(15, 15, 15, 15) - H.block(15, 0, 15, 15) * invHb - H.block(0, 15, 15, 15); + */ + //构造一个ConstraintPoseImu对象,为下一帧提供先验约束 + //构造对象所使用的参数是当前帧P、V、BG、BA的估计值和边缘化后的H矩阵 pFrame->mpcpi = new ConstraintPoseImu(VP->estimate().Rwb,VP->estimate().twb,VV->estimate(),VG->estimate(),VA->estimate(),H.block<15,15>(15,15)); + //下一帧使用的EdgePriorPoseImu参数来自于此 delete pFp->mpcpi; pFp->mpcpi = NULL; + //返回值:内点数 = 总地图点数目 - 坏点(外点)数目 return nInitialCorrespondences-nBad; } diff --git a/src/System.cc b/src/System.cc index ec7e170..19b8247 100644 --- a/src/System.cc +++ b/src/System.cc @@ -114,7 +114,7 @@ System::System(const string &strVocFile, //词袋文件所在路 mpAtlas = new Atlas(0); if (mSensor==IMU_STEREO || mSensor==IMU_MONOCULAR) - // ? 如果是有imu的传感器类型,将mbIsInertial设置为imu属性,以后的跟踪和预积分将和这个标志有关 + // 如果是有imu的传感器类型,设置mbIsInertial = true;以后的跟踪和预积分将和这个标志有关 mpAtlas->SetInertialSensor(); // Step 6 依次创建跟踪、局部建图、闭环、显示线程 @@ -175,6 +175,7 @@ System::System(const string &strVocFile, //词袋文件所在路 mpLoopCloser->SetLocalMapper(mpLocalMapper); // Fix verbosity + // 打印输出中间的信息,设置为安静模式 Verbose::SetTh(Verbose::VERBOSITY_QUIET); } @@ -354,7 +355,7 @@ cv::Mat System::TrackMonocular(const cv::Mat &im, const double ×tamp, const mbReset = false; mbResetActiveMap = false; } - //如果检测到重置活动地图,讲重置地图设置 + //如果检测到重置活动地图的标志为true,将重置地图 else if(mbResetActiveMap) { cout << "SYSTEM-> Reseting active map in monocular case" << endl; @@ -362,14 +363,14 @@ cv::Mat System::TrackMonocular(const cv::Mat &im, const double ×tamp, const mbResetActiveMap = false; } } - // 如果是单目VIO模式,把IMU数据存储到mlQueueImuData + // 如果是单目VIO模式,把IMU数据存储到队列mlQueueImuData if (mSensor == System::IMU_MONOCULAR) for(size_t i_imu = 0; i_imu < vImuMeas.size(); i_imu++) mpTracker->GrabImuData(vImuMeas[i_imu]); // 计算相机位姿 cv::Mat Tcw = mpTracker->GrabImageMonocular(im,timestamp,filename); - + // 更新跟踪状态和参数 unique_lock lock2(mMutexState); mTrackingState = mpTracker->mState; mTrackedMapPoints = mpTracker->mCurrentFrame.mvpMapPoints; diff --git a/src/Tracking.cc b/src/Tracking.cc index 56cddf5..276ba51 100644 --- a/src/Tracking.cc +++ b/src/Tracking.cc @@ -40,6 +40,15 @@ using namespace std; +// 程序中变量名的第一个字母如果为"m"则表示为类中的成员变量,member +// 第一个、第二个字母: +// "p"表示指针数据类型 +// "n"表示int类型 +// "b"表示bool类型 +// "s"表示set类型 +// "v"表示vector数据类型 +// 'l'表示list数据类型 +// "KF"表示KeyFrame数据类型 namespace ORB_SLAM3 { @@ -52,6 +61,7 @@ Tracking::Tracking(System *pSys, ORBVocabulary* pVoc, FrameDrawer *pFrameDrawer, mnInitialFrameId(0), mbCreatedMap(false), mnFirstFrameId(0), mpCamera2(nullptr) { // Load camera parameters from settings file + // Step 1 从配置文件中加载相机参数 cv::FileStorage fSettings(strSettingPath, cv::FileStorage::READ); bool b_parse_cam = ParseCamParamFile(fSettings); @@ -1032,7 +1042,7 @@ bool Tracking::ParseORBParamFile(cv::FileStorage &fSettings) bool b_miss_params = false; int nFeatures, nLevels, fIniThFAST, fMinThFAST; float fScaleFactor; - + // 每一帧提取的特征点数 cv::FileNode node = fSettings["ORBextractor.nFeatures"]; if(!node.empty() && node.isInt()) { @@ -1043,7 +1053,7 @@ bool Tracking::ParseORBParamFile(cv::FileStorage &fSettings) std::cerr << "*ORBextractor.nFeatures parameter doesn't exist or is not an integer*" << std::endl; b_miss_params = true; } - + // 图像建立金字塔时的变化尺度 1.2 node = fSettings["ORBextractor.scaleFactor"]; if(!node.empty() && node.isReal()) { @@ -1054,7 +1064,7 @@ bool Tracking::ParseORBParamFile(cv::FileStorage &fSettings) std::cerr << "*ORBextractor.scaleFactor parameter doesn't exist or is not a real number*" << std::endl; b_miss_params = true; } - + // 尺度金字塔的层数 8 node = fSettings["ORBextractor.nLevels"]; if(!node.empty() && node.isInt()) { @@ -1065,7 +1075,7 @@ bool Tracking::ParseORBParamFile(cv::FileStorage &fSettings) std::cerr << "*ORBextractor.nLevels parameter doesn't exist or is not an integer*" << std::endl; b_miss_params = true; } - + // 提取fast特征点的默认阈值 20 node = fSettings["ORBextractor.iniThFAST"]; if(!node.empty() && node.isInt()) { @@ -1076,7 +1086,7 @@ bool Tracking::ParseORBParamFile(cv::FileStorage &fSettings) std::cerr << "*ORBextractor.iniThFAST parameter doesn't exist or is not an integer*" << std::endl; b_miss_params = true; } - + // 如果默认阈值提取不出足够fast特征点,则使用最小阈值 8 node = fSettings["ORBextractor.minThFAST"]; if(!node.empty() && node.isInt()) { @@ -1098,6 +1108,7 @@ bool Tracking::ParseORBParamFile(cv::FileStorage &fSettings) if(mSensor==System::STEREO || mSensor==System::IMU_STEREO) mpORBextractorRight = new ORBextractor(nFeatures,fScaleFactor,nLevels,fIniThFAST,fMinThFAST); + // 在单目初始化的时候,会用mpIniORBextractor来作为特征点提取器,初始化使用5倍的特征点数目 if(mSensor==System::MONOCULAR || mSensor==System::IMU_MONOCULAR) mpIniORBextractor = new ORBextractor(5*nFeatures,fScaleFactor,nLevels,fIniThFAST,fMinThFAST); @@ -1214,16 +1225,19 @@ bool Tracking::ParseIMUParamFile(cv::FileStorage &fSettings) return true; } +//设置局部建图器 void Tracking::SetLocalMapper(LocalMapping *pLocalMapper) { mpLocalMapper=pLocalMapper; } +//设置回环检测器 void Tracking::SetLoopClosing(LoopClosing *pLoopClosing) { mpLoopClosing=pLoopClosing; } +//设置可视化查看器 void Tracking::SetViewer(Viewer *pViewer) { mpViewer=pViewer; @@ -1235,13 +1249,17 @@ void Tracking::SetStepByStep(bool bSet) } - +// 输入左右目图像,可以为RGB、BGR、RGBA、GRAY +// 1、将图像转为mImGray和imGrayRight并初始化mCurrentFrame +// 2、进行tracking过程 +// 输出世界坐标系到该帧相机坐标系的变换矩阵 cv::Mat Tracking::GrabImageStereo(const cv::Mat &imRectLeft, const cv::Mat &imRectRight, const double ×tamp, string filename) { mImGray = imRectLeft; cv::Mat imGrayRight = imRectRight; mImRight = imRectRight; + // step 1 :将RGB或RGBA图像转为灰度图像 if(mImGray.channels()==3) { if(mbRGB) @@ -1255,6 +1273,7 @@ cv::Mat Tracking::GrabImageStereo(const cv::Mat &imRectLeft, const cv::Mat &imRe cvtColor(imGrayRight,imGrayRight,cv::COLOR_BGR2GRAY); } } + // 这里考虑得十分周全,甚至连四通道的图像都考虑到了 else if(mImGray.channels()==4) { if(mbRGB) @@ -1270,7 +1289,18 @@ cv::Mat Tracking::GrabImageStereo(const cv::Mat &imRectLeft, const cv::Mat &imRe } if (mSensor == System::STEREO && !mpCamera2) - mCurrentFrame = Frame(mImGray,imGrayRight,timestamp,mpORBextractorLeft,mpORBextractorRight,mpORBVocabulary,mK,mDistCoef,mbf,mThDepth,mpCamera); + mCurrentFrame = Frame( + mImGray, //左目图像 + imGrayRight, //右目图像 + timestamp, //时间戳 + mpORBextractorLeft, //左目特征提取器 + mpORBextractorRight, //右目特征提取器 + mpORBVocabulary, //字典 + mK, //内参矩阵 + mDistCoef, //去畸变参数 + mbf, //基线长度 + mThDepth, //远点,近点的区分阈值 + mpCamera); //相机模型 else if(mSensor == System::STEREO && mpCamera2) mCurrentFrame = Frame(mImGray,imGrayRight,timestamp,mpORBextractorLeft,mpORBextractorRight,mpORBVocabulary,mK,mDistCoef,mbf,mThDepth,mpCamera,mpCamera2,mTlr); else if(mSensor == System::IMU_STEREO && !mpCamera2) @@ -1285,18 +1315,23 @@ cv::Mat Tracking::GrabImageStereo(const cv::Mat &imRectLeft, const cv::Mat &imRe vdORBExtract_ms.push_back(mCurrentFrame.mTimeORB_Ext); vdStereoMatch_ms.push_back(mCurrentFrame.mTimeStereoMatch); #endif - + // Step 3 :跟踪 Track(); + //返回位姿 return mCurrentFrame.mTcw.clone(); } - +// 输入左目RGB或RGBA图像和深度图 +// 1、将图像转为mImGray和imDepth并初始化mCurrentFrame +// 2、进行tracking过程 +// 输出世界坐标系到该帧相机坐标系的变换矩阵 cv::Mat Tracking::GrabImageRGBD(const cv::Mat &imRGB,const cv::Mat &imD, const double ×tamp, string filename) { mImGray = imRGB; cv::Mat imDepth = imD; + // step 1:将RGB或RGBA图像转为灰度图像 if(mImGray.channels()==3) { if(mbRGB) @@ -1312,9 +1347,11 @@ cv::Mat Tracking::GrabImageRGBD(const cv::Mat &imRGB,const cv::Mat &imD, const d cvtColor(mImGray,mImGray,cv::COLOR_BGRA2GRAY); } + // Step 2 :将深度相机的disparity转为Depth , 也就是转换成为真正尺度下的深度 + if((fabs(mDepthMapFactor-1.0f)>1e-5) || imDepth.type()!=CV_32F) imDepth.convertTo(imDepth,CV_32F,mDepthMapFactor); - + // Step 3:构造Frame mCurrentFrame = Frame(mImGray,imDepth,timestamp,mpORBextractorLeft,mpORBVocabulary,mK,mDistCoef,mbf,mThDepth,mpCamera); mCurrentFrame.mNameFile = filename; @@ -1324,25 +1361,31 @@ cv::Mat Tracking::GrabImageRGBD(const cv::Mat &imRGB,const cv::Mat &imD, const d vdORBExtract_ms.push_back(mCurrentFrame.mTimeORB_Ext); #endif + // Step 4:跟踪 Track(); + //返回当前帧的位姿 return mCurrentFrame.mTcw.clone(); } /** - * @brief 输入单目图像,输出世界坐标系到该帧相机坐标系的变换矩阵 + * @brief + * 输入左目RGB或RGBA图像,输出世界坐标系到该帧相机坐标系的变换矩阵 * - * @param[in] im 单目图像 - * @param[in] timestamp 时间戳 - * @param[in] filename 文件名(调试用) - * @return cv::Mat 世界坐标系到相机坐标系的变换矩阵 + * @param[in] im 单目图像 + * @param[in] timestamp 时间戳 + * @return cv::Mat + * + * Step 1 :将彩色图像转为灰度图像 + * Step 2 :构造Frame + * Step 3 :跟踪 */ cv::Mat Tracking::GrabImageMonocular(const cv::Mat &im, const double ×tamp, string filename) { mImGray = im; // Step 1 :将彩色图像转为灰度图像 - //若图片是3、4通道的,还需要转化成单通道灰度图 + //若图片是3、4通道的彩色图,还需要转化成单通道灰度图 if(mImGray.channels()==3) { if(mbRGB) @@ -1358,7 +1401,7 @@ cv::Mat Tracking::GrabImageMonocular(const cv::Mat &im, const double ×tamp, cvtColor(mImGray,mImGray,cv::COLOR_BGRA2GRAY); } - // Step 2 :构造Frame + // Step 2 :构造Frame类 if (mSensor == System::MONOCULAR) { if(mState==NOT_INITIALIZED || mState==NO_IMAGES_YET ||(lastID - initID) < mMaxFrames) @@ -1377,7 +1420,7 @@ cv::Mat Tracking::GrabImageMonocular(const cv::Mat &im, const double ×tamp, mCurrentFrame = Frame(mImGray,timestamp,mpORBextractorLeft,mpORBVocabulary,mpCamera,mDistCoef,mbf,mThDepth,&mLastFrame,*mpImuCalib); } - // t0存储未初始化时的第1帧时间戳 + // t0存储未初始化时的第1帧图像时间戳 if (mState==NO_IMAGES_YET) t0=timestamp; @@ -1397,13 +1440,19 @@ cv::Mat Tracking::GrabImageMonocular(const cv::Mat &im, const double ×tamp, return mCurrentFrame.mTcw.clone(); } -//将imu数据存放在mlQueueImuData的list链表里 + +/** + * @brief 将imu数据存放在mlQueueImuData的list链表里 + * + * @param[in] imuMeasurement + */ void Tracking::GrabImuData(const IMU::Point &imuMeasurement) { unique_lock lock(mMutexImuQueue); mlQueueImuData.push_back(imuMeasurement); } +// ?TODO void Tracking::PreintegrateIMU() { // Step 1.拿到两两帧之间待处理的预积分数据,组成一个集合 @@ -1412,7 +1461,7 @@ void Tracking::PreintegrateIMU() if(!mCurrentFrame.mpPrevFrame) { Verbose::PrintMess("non prev frame ", Verbose::VERBOSITY_NORMAL); - // ? 当前帧是否进行预积分标志 + // 当前帧是否进行预积分标志 mCurrentFrame.setIntegrated(); return; } @@ -1553,6 +1602,7 @@ void Tracking::PreintegrateIMU() Verbose::PrintMess("Preintegration is finished!! ", Verbose::VERBOSITY_DEBUG); } +// ?TODO /** * @brief 跟踪不成功的时候,用初始化好的imu数据做跟踪处理,通过IMU预测状态 * 两个地方用到: @@ -1750,7 +1800,7 @@ void Tracking::Track() mbStep = false; } - // Step 1 如局部建图里认为IMU有问题,重置地图 + // Step 1 如局部建图里认为IMU有问题,重置当前活跃地图 if(mpLocalMapper->mbBadImu) { cout << "TRACK: Reset map because local mapper set the bad imu flag " << endl; @@ -1771,6 +1821,7 @@ void Tracking::Track() cerr << "ERROR: Frame with a timestamp older than previous frame detected!" << endl; unique_lock lock(mMutexImuQueue); mlQueueImuData.clear(); + // 创建新地图 CreateMapInAtlas(); return; } @@ -1785,7 +1836,7 @@ void Tracking::Track() if(mpAtlas->isImuInitialized()) { cout << "Timestamp jump detected. State set to LOST. Reseting IMU integration..." << endl; - // ? BA2标志代表什么?,BA优化成功? + // ?TODO BA2标志代表什么?,BA优化成功? if(!pCurrentMap->GetIniertialBA2()) { // 如果当前子图中imu没有经过BA2,重置active地图 @@ -1799,7 +1850,7 @@ void Tracking::Track() } else { - // 如果当前子图中imu没有初始化,重置active地图 + // 如果当前子图中imu还没有初始化,重置active地图 cout << "Timestamp jump detected, before IMU initialization. Reseting..." << endl; mpSystem->ResetActiveMap(); } @@ -1820,7 +1871,7 @@ void Tracking::Track() } mLastProcessedState=mState; - // Step 4 IMU模式下对IMU数据进行预积分 -> // ? 没有创建地图的情况下 + // Step 4 IMU模式下对IMU数据进行预积分:没有创建地图的情况下 if ((mSensor == System::IMU_MONOCULAR || mSensor == System::IMU_STEREO) && !mbCreatedMap) { #ifdef REGISTER_TIMES @@ -1939,12 +1990,14 @@ void Tracking::Track() bOK = TrackReferenceKeyFrame(); } - // Step 6.3 如果经过跟踪参考关键帧、恒速模型跟踪都失败的话,并满足一定条件就要标记为LOST了 + // 新增了一个状态RECENTLY_LOST,主要是结合IMU看看能不能拽回来 + // Step 6.3 如果经过跟踪参考关键帧、恒速模型跟踪都失败的话,并满足一定条件就要标记为RECENTLY_LOST或LOST if (!bOK) { - // 条件1:如果当前帧距离上次重定位帧不久(时间在1s内) + // 条件1:如果当前帧距离上次重定位成功不到1s // mnFramesToResetIMU 表示经过多少帧后可以重置IMU,一般设置为和帧率相同,对应的时间是1s // 条件2:单目+IMU 或者 双目+IMU模式 + // 同时满足条件1,2,标记为LOST if ( mCurrentFrame.mnId<=(mnLastRelocFrameId+mnFramesToResetIMU) && (mSensor==System::IMU_MONOCULAR || mSensor==System::IMU_STEREO)) { @@ -1952,10 +2005,12 @@ void Tracking::Track() } else if(pCurrentMap->KeyFramesInMap()>10) { - // 当前地图中关键帧数目大于10 并且 当前帧距离上次重定位帧超过1s(隐藏条件) - // 状态标记为RECENTLY_LOST + // 条件1:当前地图中关键帧数目较多(大于10) + // 条件2(隐藏条件):当前帧距离上次重定位帧超过1s(说明还比较争气,值的救)或者非IMU模式 + // 同时满足条件1,2,则将状态标记为RECENTLY_LOST,后面会结合IMU预测的位姿看看能不能拽回来 cout << "KF in map: " << pCurrentMap->KeyFramesInMap() << endl; mState = RECENTLY_LOST; + // 记录丢失时间 mTimeStampLost = mCurrentFrame.mTimeStamp; //mCurrentFrame.SetPose(mLastFrame.mTcw); } @@ -1967,24 +2022,24 @@ void Tracking::Track() } else //跟踪不正常按照下面处理 { - if (mState == RECENTLY_LOST) { - // 如果是RECENTLY_LOST状态(仅存在IMU模式下,纯视觉模式无此状态) + // 如果是RECENTLY_LOST状态 Verbose::PrintMess("Lost for a short time", Verbose::VERBOSITY_NORMAL); // bOK先置为true bOK = true; - if((mSensor == System::IMU_MONOCULAR || mSensor == System::IMU_STEREO)) // IMU模式 + if((mSensor == System::IMU_MONOCULAR || mSensor == System::IMU_STEREO)) { + // IMU模式下可以用IMU来预测位姿,看能否拽回来 // Step 6.4 如果当前地图中IMU已经成功初始化,就用IMU数据预测位姿 if(pCurrentMap->isImuInitialized()) PredictStateIMU(); else bOK = false; - // 如果当前帧距离跟丢帧已经超过了5s(time_recently_lost默认为5) - // 将RECENTLY_LOST状态改为LOST + // 如果IMU模式下当前帧距离跟丢帧超过5s还没有找回(time_recently_lost默认为5s) + // 放弃了,将RECENTLY_LOST状态改为LOST if (mCurrentFrame.mTimeStamp-mTimeStampLost>time_recently_lost) { mState = LOST; @@ -2031,7 +2086,7 @@ void Tracking::Track() } } - else // 纯定位模式,时间关系暂时不看 TOSEE + else // 纯定位模式 { // Localization Mode: Local Mapping is deactivated (TODO Not available in inertial mode) // 只进行跟踪tracking,局部地图不工作 @@ -2193,7 +2248,7 @@ void Tracking::Track() if(bOK) // 此时还OK才说明跟踪成功了 mState = OK; - else if (mState == OK) // 由上图可知只有当正常跟踪成功,但局部地图跟踪失败时执行 + else if (mState == OK) // 由上图可知只有当第一阶段跟踪成功,但第二阶段局部地图跟踪失败时执行 { // 带有IMU时状态变为最近丢失,否则直接丢失 if (mSensor == System::IMU_MONOCULAR || mSensor == System::IMU_STEREO) @@ -2224,13 +2279,13 @@ void Tracking::Track() ((mSensor == System::IMU_MONOCULAR) || (mSensor == System::IMU_STEREO)) && // IMU模式 pCurrentMap->isImuInitialized()) // IMU已经成功初始化 { - // TODO check this situation + // ?TODO check this situation Verbose::PrintMess("Saving pointer to frame. imu needs reset...", Verbose::VERBOSITY_NORMAL); Frame* pF = new Frame(mCurrentFrame); pF->mpPrevFrame = new Frame(mLastFrame); // Load preintegration - // 构造预积分处理器 + // ?构造预积分处理器 pF->mpImuPreintegratedFrame = new IMU::Preintegrated(mCurrentFrame.mpImuPreintegratedFrame); } @@ -2238,7 +2293,7 @@ void Tracking::Track() { if(bOK) // 跟踪成功 { - // 当前帧距离上次重定位帧刚好等于1s,重置IMU(还未实现 TODO) + // 当前帧距离上次重定位帧刚好等于1s,重置(还未实现 TODO) if(mCurrentFrame.mnId==(mnLastRelocFrameId+mnFramesToResetIMU)) { cout << "RESETING FRAME!!!" << endl; @@ -2327,9 +2382,6 @@ void Tracking::Track() // 判断是否需要插入关键帧 bool bNeedKF = NeedNewKeyFrame(); - - - // Check if we need to insert a new keyframe // Step 9.4 根据条件来判断是否插入关键帧 // 需要同时满足下面条件1和2 @@ -2363,7 +2415,7 @@ void Tracking::Track() } // Reset if the camera get lost soon after initialization - // Step 10 如果跟踪失败,复位 + // Step 10 如果第二阶段跟踪失败,跟踪状态为LOST if(mState==LOST) { // 如果地图中关键帧小于5,重置当前地图,退出当前跟踪 @@ -2402,6 +2454,7 @@ void Tracking::Track() if(mState==OK || mState==RECENTLY_LOST) { // Store frame pose information to retrieve the complete camera trajectory afterwards. + // Step 11:记录位姿信息,用于最后保存所有的轨迹 if(!mCurrentFrame.mTcw.empty()) { // 计算相对姿态Tcr = Tcw * Twr, Twr = Trw^-1 @@ -2423,10 +2476,14 @@ void Tracking::Track() } } - - +/* + * @brief 双目和rgbd的地图初始化,比单目简单很多 + * + * 由于具有深度信息,直接生成MapPoints + */ void Tracking::StereoInitialization() { + // 初始化要求当前帧的特征点超过500 if(mCurrentFrame.N>500) { if (mSensor == System::IMU_STEREO) @@ -2458,23 +2515,33 @@ void Tracking::StereoInitialization() mCurrentFrame.SetImuPoseVelocity(Rwb0, twb0, cv::Mat::zeros(3,1,CV_32F)); } else - mCurrentFrame.SetPose(cv::Mat::eye(4,4,CV_32F)); + mCurrentFrame.SetPose(cv::Mat::eye(4,4,CV_32F)); // 设定初始位姿为单位旋转,0平移 // Create KeyFrame + // 将当前帧构造为初始关键帧 KeyFrame* pKFini = new KeyFrame(mCurrentFrame,mpAtlas->GetCurrentMap(),mpKeyFrameDB); // Insert KeyFrame in the map + // 在地图中添加该初始关键帧 mpAtlas->AddKeyFrame(pKFini); // Create MapPoints and asscoiate to KeyFrame if(!mpCamera2){ + // 为每个特征点构造MapPoint for(int i=0; i0) { + // 通过反投影得到该特征点的世界坐标系下3D坐标 cv::Mat x3D = mCurrentFrame.UnprojectStereo(i); + // 将3D点构造为MapPoint MapPoint* pNewMP = new MapPoint(x3D,pKFini,mpAtlas->GetCurrentMap()); + // 为该MapPoint添加属性: + // a.观测到该MapPoint的关键帧 + // b.该MapPoint的描述子 + // c.该MapPoint的平均观测方向和深度范围 pNewMP->AddObservation(pKFini,i); pKFini->AddMapPoint(pNewMP,i); pNewMP->ComputeDistinctiveDescriptors(); @@ -2510,8 +2577,10 @@ void Tracking::StereoInitialization() Verbose::PrintMess("New Map created with " + to_string(mpAtlas->MapPointsInMap()) + " points", Verbose::VERBOSITY_QUIET); + // 在局部地图中添加该初始关键帧 mpLocalMapper->InsertKeyFrame(pKFini); + // 更新当前帧为上一帧 mLastFrame = Frame(mCurrentFrame); mnLastKeyFrameId=mCurrentFrame.mnId; mpLastKeyFrame = pKFini; @@ -2522,37 +2591,59 @@ void Tracking::StereoInitialization() mpReferenceKF = pKFini; mCurrentFrame.mpReferenceKF = pKFini; + // 把当前(最新的)局部MapPoints作为ReferenceMapPoints mpAtlas->SetReferenceMapPoints(mvpLocalMapPoints); mpAtlas->GetCurrentMap()->mvpKeyFrameOrigins.push_back(pKFini); mpMapDrawer->SetCurrentCameraPose(mCurrentFrame.mTcw); + //追踪成功 mState=OK; } } - +/* + * @brief 单目的地图初始化 + * + * 并行地计算基础矩阵和单应性矩阵,选取其中一个模型,恢复出最开始两帧之间的相对姿态以及点云 + * 得到初始两帧的匹配、相对运动、初始MapPoints + * + * Step 1:(未创建)得到用于初始化的第一帧,初始化需要两帧 + * Step 2:(已创建)如果当前帧特征点数大于100,则得到用于单目初始化的第二帧 + * Step 3:在mInitialFrame与mCurrentFrame中找匹配的特征点对 + * Step 4:如果初始化的两帧之间的匹配点太少,重新初始化 + * Step 5:通过H模型或F模型进行单目初始化,得到两帧间相对运动、初始MapPoints + * Step 6:删除那些无法进行三角化的匹配点 + * Step 7:将三角化得到的3D点包装成MapPoints + */ void Tracking::MonocularInitialization() { - + // Step 1 如果单目初始器还没有被创建,则创建。后面如果重新初始化时会清掉这个 if(!mpInitializer) { // Set Reference Frame + // 单目初始帧的特征点数必须大于100 if(mCurrentFrame.mvKeys.size()>100) { - + // 初始化需要两帧,分别是mInitialFrame,mCurrentFrame mInitialFrame = Frame(mCurrentFrame); + // 用当前帧更新上一帧 mLastFrame = Frame(mCurrentFrame); + // mvbPrevMatched 记录"上一帧"所有特征点 mvbPrevMatched.resize(mCurrentFrame.mvKeysUn.size()); for(size_t i=0; i1.0))) { delete mpInitializer; @@ -2580,10 +2672,22 @@ void Tracking::MonocularInitialization() } // Find correspondences - ORBmatcher matcher(0.9,true); - int nmatches = matcher.SearchForInitialization(mInitialFrame,mCurrentFrame,mvbPrevMatched,mvIniMatches,100); + // Step 3 在mInitialFrame与mCurrentFrame中找匹配的特征点对 + ORBmatcher matcher( + 0.9, //最佳的和次佳特征点评分的比值阈值,这里是比较宽松的,跟踪时一般是0.7 + true); //检查特征点的方向 + + // 对 mInitialFrame,mCurrentFrame 进行特征点匹配 + // mvbPrevMatched为参考帧的特征点坐标,初始化存储的是mInitialFrame中特征点坐标,匹配后存储的是匹配好的当前帧的特征点坐标 + // mvIniMatches 保存参考帧F1中特征点是否匹配上,index保存是F1对应特征点索引,值保存的是匹配好的F2特征点索引 + int nmatches = matcher.SearchForInitialization( + mInitialFrame,mCurrentFrame, //初始化时的参考帧和当前帧 + mvbPrevMatched, //在初始化参考帧中提取得到的特征点 + mvIniMatches, //保存匹配关系 + 100); //搜索窗口大小 // Check if there are enough correspondences + // Step 4 验证匹配结果,如果初始化的两帧之间的匹配点太少,重新初始化 if(nmatches<100) { delete mpInitializer; @@ -2595,9 +2699,15 @@ void Tracking::MonocularInitialization() cv::Mat Rcw; // Current Camera Rotation cv::Mat tcw; // Current Camera Translation vector vbTriangulated; // Triangulated Correspondences (mvIniMatches) - - if(mpCamera->ReconstructWithTwoViews(mInitialFrame.mvKeysUn,mCurrentFrame.mvKeysUn,mvIniMatches,Rcw,tcw,mvIniP3D,vbTriangulated)) + // Step 5 通过H模型或F模型进行单目初始化,得到两帧间相对运动、初始MapPoints + if(mpCamera->ReconstructWithTwoViews(mInitialFrame.mvKeysUn, //初始帧的特征点 + mCurrentFrame.mvKeysUn, //当前帧的特征点 + mvIniMatches, //当前帧和参考帧的特征点的匹配关系 + Rcw,tcw, //初始化得到的相机的位姿 + mvIniP3D, //进行三角化得到的空间点集合 + vbTriangulated)) //以及对应于mvIniMatches来讲,其中哪些点被三角化了 { + // Step 6 初始化成功后,删除那些无法进行三角化的匹配点 for(size_t i=0, iend=mvIniMatches.size(); i=0 && !vbTriangulated[i]) @@ -2608,56 +2718,83 @@ void Tracking::MonocularInitialization() } // Set Frame Poses + // Step 7 将初始化的第一帧作为世界坐标系,因此第一帧变换矩阵为单位矩阵 mInitialFrame.SetPose(cv::Mat::eye(4,4,CV_32F)); + // 由Rcw和tcw构造Tcw,并赋值给mTcw,mTcw为世界坐标系到相机坐标系的变换矩阵 cv::Mat Tcw = cv::Mat::eye(4,4,CV_32F); Rcw.copyTo(Tcw.rowRange(0,3).colRange(0,3)); tcw.copyTo(Tcw.rowRange(0,3).col(3)); mCurrentFrame.SetPose(Tcw); + // Step 8 创建初始化地图点MapPoints + // Initialize函数会得到mvIniP3D, + // mvIniP3D是cv::Point3f类型的一个容器,是个存放3D点的临时变量, + // CreateInitialMapMonocular将3D点包装成MapPoint类型存入KeyFrame和Map中 CreateInitialMapMonocular(); } } } - - +/** + * @brief 单目相机成功初始化后用三角化得到的点生成MapPoints + * + */ void Tracking::CreateInitialMapMonocular() { - // Create KeyFrames + // Create KeyFrames 认为单目初始化时候的参考帧和当前帧都是关键帧 KeyFrame* pKFini = new KeyFrame(mInitialFrame,mpAtlas->GetCurrentMap(),mpKeyFrameDB); KeyFrame* pKFcur = new KeyFrame(mCurrentFrame,mpAtlas->GetCurrentMap(),mpKeyFrameDB); if(mSensor == System::IMU_MONOCULAR) pKFini->mpImuPreintegrated = (IMU::Preintegrated*)(NULL); - + // Step 1 将初始关键帧,当前关键帧的描述子转为BoW pKFini->ComputeBoW(); pKFcur->ComputeBoW(); // Insert KFs in the map + // Step 2 将关键帧插入到地图 mpAtlas->AddKeyFrame(pKFini); mpAtlas->AddKeyFrame(pKFcur); + // Create MapPoints and asscoiate to keyframes + // Step 3 用初始化得到的3D点来生成地图点MapPoints + // mvIniMatches[i] 表示初始化两帧特征点匹配关系。 + // 具体解释:i表示帧1中关键点的索引值,vMatches12[i]的值为帧2的关键点索引值,没有匹配关系的话,vMatches12[i]值为 -1 for(size_t i=0; iGetCurrentMap()); + // Step 3.2 为该MapPoint添加属性: + // a.观测到该MapPoint的关键帧 + // b.该MapPoint的描述子 + // c.该MapPoint的平均观测方向和深度范围 + + // 表示该KeyFrame的2D特征点和对应的3D地图点 pKFini->AddMapPoint(pMP,i); pKFcur->AddMapPoint(pMP,mvIniMatches[i]); + // a.表示该MapPoint可以被哪个KeyFrame的哪个特征点观测到 pMP->AddObservation(pKFini,i); pMP->AddObservation(pKFcur,mvIniMatches[i]); + // b.从众多观测到该MapPoint的特征点中挑选最有代表性的描述子 pMP->ComputeDistinctiveDescriptors(); + // c.更新该MapPoint平均观测方向以及观测距离的范围 pMP->UpdateNormalAndDepth(); //Fill Current Frame structure + //mvIniMatches下标i表示在初始化参考帧中的特征点的序号 + //mvIniMatches[i]是初始化当前帧中的特征点的序号 mCurrentFrame.mvpMapPoints[mvIniMatches[i]] = pMP; mCurrentFrame.mvbOutlier[mvIniMatches[i]] = false; @@ -2667,6 +2804,8 @@ void Tracking::CreateInitialMapMonocular() // Update Connections + // Step 3.3 更新关键帧间的连接关系 + // 在3D点和关键帧之间建立边,每个边有一个权重,边的权重是该关键帧与当前帧公共3D点的个数 pKFini->UpdateConnections(); pKFcur->UpdateConnections(); @@ -2675,10 +2814,12 @@ void Tracking::CreateInitialMapMonocular() // Bundle Adjustment Verbose::PrintMess("New Map created with " + to_string(mpAtlas->MapPointsInMap()) + " points", Verbose::VERBOSITY_QUIET); + // Step 4 全局BA优化,同时优化所有位姿和三维点 Optimizer::GlobalBundleAdjustemnt(mpAtlas->GetCurrentMap(),20); pKFcur->PrintPointDistribution(); - + // Step 5 取场景的中值深度,用于尺度归一化 + // 为什么是 pKFini 而不是 pKCur ? 答:都可以的,内部做了位姿变换了 float medianDepth = pKFini->ComputeSceneMedianDepth(2); float invMedianDepth; if(mSensor == System::IMU_MONOCULAR) @@ -2686,6 +2827,7 @@ void Tracking::CreateInitialMapMonocular() else invMedianDepth = 1.0f/medianDepth; + //两个条件,一个是平均深度要大于0,另外一个是在当前帧中被观测到的地图点的数目应该大于50 if(medianDepth<0 || pKFcur->TrackedMapPoints(1)<50) // TODO Check, originally 100 tracks { Verbose::PrintMess("Wrong initialization, reseting...", Verbose::VERBOSITY_NORMAL); @@ -2693,12 +2835,16 @@ void Tracking::CreateInitialMapMonocular() return; } + // Step 6 将两帧之间的变换归一化到平均深度1的尺度下 // Scale initial baseline cv::Mat Tc2w = pKFcur->GetPose(); + // x/z y/z 将z归一化到1 Tc2w.col(3).rowRange(0,3) = Tc2w.col(3).rowRange(0,3)*invMedianDepth; pKFcur->SetPose(Tc2w); // Scale points + // Step 7 把3D点的尺度也归一化到1 + // 为什么是pKFini? 是不是就算是使用 pKFcur 得到的结果也是相同的? 答:是的,因为是同样的三维点 vector vpAllMapPoints = pKFini->GetMapPointMatches(); for(size_t iMP=0; iMPmpImuPreintegrated->GetUpdatedBias(),pKFcur->mImuCalib); } - + // Step 8 将关键帧插入局部地图,更新归一化后的位姿、局部地图点 mpLocalMapper->InsertKeyFrame(pKFini); mpLocalMapper->InsertKeyFrame(pKFcur); mpLocalMapper->mFirstTs=pKFcur->mTimeStamp; @@ -2731,6 +2877,7 @@ void Tracking::CreateInitialMapMonocular() mvpLocalKeyFrames.push_back(pKFcur); mvpLocalKeyFrames.push_back(pKFini); + // 单目初始化之后,得到的初始地图中的所有点都是局部地图点 mvpLocalMapPoints=mpAtlas->GetAllMapPoints(); mpReferenceKF = pKFcur; mCurrentFrame.mpReferenceKF = pKFcur; @@ -2754,14 +2901,18 @@ void Tracking::CreateInitialMapMonocular() mpAtlas->GetCurrentMap()->mvpKeyFrameOrigins.push_back(pKFini); - mState=OK; + mState=OK;// 初始化成功,至此,初始化过程完成 initID = pKFcur->mnId; } - +/** + * @brief 在Atlas中创建新地图 + * + */ void Tracking::CreateMapInAtlas() { + // ?TODO mnLastInitFrameId = mCurrentFrame.mnId; mpAtlas->CreateNewMap(); if (mSensor==System::IMU_STEREO || mSensor == System::IMU_MONOCULAR) @@ -2802,28 +2953,47 @@ void Tracking::CreateMapInAtlas() mbCreatedMap = true; } - +/* + * @brief 检查上一帧中的地图点是否需要被替换 + * + * Local Mapping线程可能会将关键帧中某些地图点进行替换,由于tracking中需要用到上一帧地图点,所以这里检查并更新上一帧中被替换的地图点 + * @see LocalMapping::SearchInNeighbors() + */ void Tracking::CheckReplacedInLastFrame() { for(int i =0; iGetReplaced(); if(pRep) { + //然后替换一下 mLastFrame.mvpMapPoints[i] = pRep; } } } } - +/* + * @brief 用参考关键帧的地图点来对当前普通帧进行跟踪 + * + * Step 1:将当前普通帧的描述子转化为BoW向量 + * Step 2:通过词袋BoW加速当前帧与参考帧之间的特征点匹配 + * Step 3: 将上一帧的位姿态作为当前帧位姿的初始值 + * Step 4: 通过优化3D-2D的重投影误差来获得位姿 + * Step 5:剔除优化后的匹配点中的外点 + * @return 如果匹配数超10,返回true + * + */ bool Tracking::TrackReferenceKeyFrame() { // Compute Bag of Words vector + // Step 1:将当前帧的描述子转化为BoW向量 mCurrentFrame.ComputeBoW(); // We perform first an ORB matching with the reference keyframe @@ -2831,31 +3001,42 @@ bool Tracking::TrackReferenceKeyFrame() ORBmatcher matcher(0.7,true); vector vpMapPointMatches; - int nmatches = matcher.SearchByBoW(mpReferenceKF,mCurrentFrame,vpMapPointMatches); + // Step 2:通过词袋BoW加速当前帧与参考帧之间的特征点匹配 + int nmatches = matcher.SearchByBoW( + mpReferenceKF, //参考关键帧 + mCurrentFrame, //当前帧 + vpMapPointMatches); //存储匹配关系 + // 匹配数目小于15,认为跟踪失败 if(nmatches<15) { cout << "TRACK_REF_KF: Less than 15 matches!!\n"; return false; } + // Step 3:将上一帧的位姿态作为当前帧位姿的初始值 mCurrentFrame.mvpMapPoints = vpMapPointMatches; - mCurrentFrame.SetPose(mLastFrame.mTcw); + mCurrentFrame.SetPose(mLastFrame.mTcw); // 用上一次的Tcw设置初值,在PoseOptimization可以收敛快一些 //mCurrentFrame.PrintPointDistribution(); // cout << " TrackReferenceKeyFrame mLastFrame.mTcw: " << mLastFrame.mTcw << endl; + // Step 4:通过优化3D-2D的重投影误差来获得位姿 Optimizer::PoseOptimization(&mCurrentFrame); // Discard outliers + // Step 5:剔除优化后的匹配点中的外点 + //之所以在优化之后才剔除外点,是因为在优化的过程中就有了对这些外点的标记 int nmatchesMap = 0; for(int i =0; i(NULL); @@ -2871,6 +3052,7 @@ bool Tracking::TrackReferenceKeyFrame() nmatches--; } else if(mCurrentFrame.mvpMapPoints[i]->Observations()>0) + //匹配的内点计数++ nmatchesMap++; } } @@ -2879,21 +3061,37 @@ bool Tracking::TrackReferenceKeyFrame() if (mSensor == System::IMU_MONOCULAR || mSensor == System::IMU_STEREO) return true; else - return nmatchesMap>=10; + return nmatchesMap>=10; // 跟踪成功的数目超过10才认为跟踪成功,否则跟踪失败 } +/** + * @brief 更新上一帧位姿,在上一帧中生成临时地图点 + * 单目情况:只计算了上一帧的世界坐标系位姿 + * 双目和rgbd情况:选取有有深度值的并且没有被选为地图点的点生成新的临时地图点,提高跟踪鲁棒性 + */ void Tracking::UpdateLastFrame() { // Update pose according to reference keyframe - KeyFrame* pRef = mLastFrame.mpReferenceKF; + // Step 1:利用参考关键帧更新上一帧在世界坐标系下的位姿 + // 上一普通帧的参考关键帧,注意这里用的是参考关键帧(位姿准)而不是上上一帧的普通帧 + KeyFrame* pRef = mLastFrame.mpReferenceKF; + // ref_keyframe 到 lastframe的位姿变换 cv::Mat Tlr = mlRelativeFramePoses.back(); - mLastFrame.SetPose(Tlr*pRef->GetPose()); + // 将上一帧的世界坐标系下的位姿计算出来 + // l:last, r:reference, w:world + // Tlw = Tlr*Trw + mLastFrame.SetPose(Tlr*pRef->GetPose()); + + // 如果上一帧为关键帧,或者单目/单目惯性,SLAM模式(?)的情况,则退出 if(mnLastKeyFrameId==mLastFrame.mnId || mSensor==System::MONOCULAR || mSensor==System::IMU_MONOCULAR || !mbOnlyTracking) return; + // Step 2:对于双目或rgbd相机,为上一帧生成新的临时地图点 + // 注意这些地图点只是用来跟踪,不加入到地图中,跟踪完后会删除 // Create "visual odometry" MapPoints // We sort points according to their measured depth by the stereo/RGB-D sensor + // Step 2.1:得到上一帧中具有有效深度值的特征点(不一定是地图点) vector > vDepthIdx; vDepthIdx.reserve(mLastFrame.N); for(int i=0; i0) { + // vDepthIdx第一个元素是某个点的深度,第二个元素是对应的特征点id vDepthIdx.push_back(make_pair(z,i)); } } + // 如果上一帧中没有有效深度的点,那么就直接退出 if(vDepthIdx.empty()) return; + // 按照深度从小到大排序 sort(vDepthIdx.begin(),vDepthIdx.end()); // We insert all close points (depthObservations()<1) { + // 地图点被创建后就没有被观测,认为不靠谱,也需要重新创建 bCreateNew = true; } if(bCreateNew) { + // Step 2.3:需要创建的点,包装为地图点。只是为了提高双目和RGBD的跟踪成功率,并没有添加复杂属性,因为后面会扔掉 + // 反投影到世界坐标系中 cv::Mat x3D = mLastFrame.UnprojectStereo(i); MapPoint* pNewMP = new MapPoint(x3D,mpAtlas->GetCurrentMap(),&mLastFrame,i); + // 加入上一帧的地图点中 mLastFrame.mvpMapPoints[i]=pNewMP; + // 标记为临时添加的MapPoint,之后在CreateNewKeyFrame之前会全部删除 mlpTemporalPoints.push_back(pNewMP); nPoints++; } else { + // 因为从近到远排序,记录其中不需要创建地图点的个数 nPoints++; } + // Step 2.4:如果地图点质量不好,停止创建地图点 + // 停止新增临时地图点必须同时满足以下条件: + // 1、当前的点的深度已经超过了设定的深度阈值(35倍基线) + // 2、nPoints已经超过100个点,说明距离比较远了,可能不准确,停掉退出 if(vDepthIdx[j].first>mThDepth && nPoints>100) { break; } } } - +/** + * @brief 根据恒定速度模型用上一帧地图点来对当前帧进行跟踪 + * Step 1:更新上一帧的位姿;对于双目或RGB-D相机,还会根据深度值生成临时地图点 + * Step 2:根据上一帧特征点对应地图点进行投影匹配 + * Step 3:优化当前帧位姿 + * Step 4:剔除地图点中外点 + * @return 如果匹配数大于10,认为跟踪成功,返回true + */ bool Tracking::TrackWithMotionModel() { + // 最小距离 < 0.9*次小距离 匹配成功,检查旋转 ORBmatcher matcher(0.9,true); // Update last frame pose according to its reference keyframe // Create "visual odometry" points if in Localization Mode + // Step 1:更新上一帧的位姿;对于双目或RGB-D相机,还会根据深度值生成临时地图点 UpdateLastFrame(); - - + // Step 2:根据IMU或者恒速模型得到当前帧的初始位姿。 + if (mpAtlas->isImuInitialized() && (mCurrentFrame.mnId>mnLastRelocFrameId+mnFramesToResetIMU)) { + // IMU完成初始化 并且 距离重定位挺久不需要重置IMU,用IMU来估计位姿 // Predict ste with IMU if it is initialized and it doesnt need reset PredictStateIMU(); return true; } else { + // 根据之前估计的速度,用恒速模型得到当前帧的初始位姿。 mCurrentFrame.SetPose(mVelocity*mLastFrame.mTcw); } - + // 清空当前帧的地图点 fill(mCurrentFrame.mvpMapPoints.begin(),mCurrentFrame.mvpMapPoints.end(),static_cast(NULL)); // Project points seen in previous frame + // 设置特征匹配过程中的搜索半径 int th; if(mSensor==System::STEREO) @@ -2981,9 +3206,11 @@ bool Tracking::TrackWithMotionModel() else th=15; + // Step 3:用上一帧地图点进行投影匹配,如果匹配点不够,则扩大搜索半径再来一次 int nmatches = matcher.SearchByProjection(mCurrentFrame,mLastFrame,th,mSensor==System::MONOCULAR || mSensor==System::IMU_MONOCULAR); // If few matches, uses a wider window search + // 如果匹配点太少,则扩大搜索半径再来一次 if(nmatches<20) { Verbose::PrintMess("Not enough matches, wider window search!!", Verbose::VERBOSITY_NORMAL); @@ -2994,6 +3221,7 @@ bool Tracking::TrackWithMotionModel() } + // 如果还是不能够获得足够的匹配点,那么就认为跟踪失败 if(nmatches<20) { Verbose::PrintMess("Not enough matches!!", Verbose::VERBOSITY_NORMAL); @@ -3004,9 +3232,11 @@ bool Tracking::TrackWithMotionModel() } // Optimize frame pose with all matches + // Step 4:利用3D-2D投影关系,优化当前帧位姿 Optimizer::PoseOptimization(&mCurrentFrame); // Discard outliers + // Step 5:剔除地图点中外点 int nmatchesMap = 0; for(int i =0; i(NULL); @@ -3028,12 +3259,14 @@ bool Tracking::TrackWithMotionModel() nmatches--; } else if(mCurrentFrame.mvpMapPoints[i]->Observations()>0) + // 累加成功匹配到的地图点数目 nmatchesMap++; } } if(mbOnlyTracking) { + // 纯定位模式下:如果成功追踪的地图点非常少,那么这里的mbVO标志就会置位 mbVO = nmatchesMap<10; return nmatches>20; } @@ -3041,9 +3274,24 @@ bool Tracking::TrackWithMotionModel() if (mSensor == System::IMU_MONOCULAR || mSensor == System::IMU_STEREO) return true; else - return nmatchesMap>=10; + return nmatchesMap>=10; // 匹配超过10个点就认为跟踪成功 } +/** + * @brief 用局部地图进行跟踪,进一步优化位姿 + * + * 1. 更新局部地图,包括局部关键帧和关键点 + * 2. 对局部MapPoints进行投影匹配 + * 3. 根据匹配对估计当前帧的姿态 + * 4. 根据姿态剔除误匹配 + * @return true if success + * + * Step 1:更新局部关键帧mvpLocalKeyFrames和局部地图点mvpLocalMapPoints + * Step 2:在局部地图中查找与当前帧匹配的MapPoints, 其实也就是对局部地图点进行跟踪 + * Step 3:更新局部所有MapPoints后对位姿再次优化 + * Step 4:更新当前帧的MapPoints被观测程度,并统计跟踪局部地图的效果 + * Step 5:决定是否跟踪成功 + */ bool Tracking::TrackLocalMap() { @@ -3054,6 +3302,8 @@ bool Tracking::TrackLocalMap() #ifdef REGISTER_TIMES std::chrono::steady_clock::time_point time_StartLMUpdate = std::chrono::steady_clock::now(); #endif + // Update Local KeyFrames and Local Points + // Step 1:更新局部关键帧 mvpLocalKeyFrames 和局部地图点 mvpLocalMapPoints UpdateLocalMap(); #ifdef REGISTER_TIMES std::chrono::steady_clock::time_point time_StartSearchLP = std::chrono::steady_clock::now(); @@ -3061,7 +3311,7 @@ bool Tracking::TrackLocalMap() double timeUpdatedLM_ms = std::chrono::duration_cast >(time_StartSearchLP - time_StartLMUpdate).count(); vdUpdatedLM_ms.push_back(timeUpdatedLM_ms); #endif - + // Step 2:筛选局部地图中新增的在视野范围内的地图点,投影到当前帧搜索匹配,得到更多的匹配关系 SearchLocalPoints(); #ifdef REGISTER_TIMES std::chrono::steady_clock::time_point time_StartPoseOpt = std::chrono::steady_clock::now(); @@ -3071,6 +3321,7 @@ bool Tracking::TrackLocalMap() #endif // TOO check outliers before PO + // 查看内外点数目,调试用 int aux1 = 0, aux2=0; for(int i=0; iisImuInitialized()) Optimizer::PoseOptimization(&mCurrentFrame); else { + // 初始化,重定位,重新开启一个地图都会使mnLastRelocFrameId变化 if(mCurrentFrame.mnId<=mnLastRelocFrameId+mnFramesToResetIMU) { + // 如果距离上次重定位时间比较近,积累的IMU数据较少,优化时暂不使用IMU数据 Verbose::PrintMess("TLM: PoseOptimization ", Verbose::VERBOSITY_DEBUG); Optimizer::PoseOptimization(&mCurrentFrame); } - else + else // 如果积累的IMU数据量比较多,考虑使用IMU数据优化 { + // mbMapUpdated变化见Tracking::PredictStateIMU() + // 未更新地图 if(!mbMapUpdated) { Verbose::PrintMess("TLM: PoseInertialOptimizationLastFrame ", Verbose::VERBOSITY_DEBUG); @@ -3113,7 +3370,7 @@ bool Tracking::TrackLocalMap() vnKeyFramesLM.push_back(mvpLocalKeyFrames.size()); vnMapPointsLM.push_back(mvpLocalMapPoints.size()); - + // 查看内外点数目,调试用 aux1 = 0, aux2 = 0; for(int i=0; iIncreaseFound(); + //查看当前是否是在纯定位过程 if(!mbOnlyTracking) { + // 如果该地图点被相机观测数目nObs大于0,匹配内点计数+1 + // nObs: 被观测到的相机数目,单目+1,双目或RGB-D则+2 if(mCurrentFrame.mvpMapPoints[i]->Observations()>0) mnMatchesInliers++; } else + // 记录当前帧跟踪到的地图点数目,用于统计跟踪效果 mnMatchesInliers++; } + // 如果这个地图点是外点,并且当前相机输入还是双目的时候,就删除这个点 + // 原因分析:因为双目本身可以左右互匹配,删掉无所谓 else if(mSensor==System::STEREO) mCurrentFrame.mvpMapPoints[i] = static_cast(NULL); } @@ -3149,13 +3415,16 @@ bool Tracking::TrackLocalMap() // Decide if the tracking was succesful // More restrictive if there was a relocalization recently mpLocalMapper->mnMatchesInliers=mnMatchesInliers; + // Step 5:根据跟踪匹配数目及重定位情况决定是否跟踪成功 + // 如果最近刚刚发生了重定位,那么至少成功匹配50个点才认为是成功跟踪 if(mCurrentFrame.mnId10)&&(mState==RECENTLY_LOST)) return true; - + // IMU模式下至少成功跟踪15个才算成功 if (mSensor == System::IMU_MONOCULAR) { if(mnMatchesInliers<15) @@ -3176,13 +3445,25 @@ bool Tracking::TrackLocalMap() } else { + //以上情况都不满足,只要跟踪的地图点大于30个就认为成功了 if(mnMatchesInliers<30) return false; else return true; } } - +/** + * @brief 判断当前帧是否需要插入关键帧 + * + * Step 1:纯VO模式下不插入关键帧,如果局部地图被闭环检测使用,则不插入关键帧 + * Step 2:如果距离上一次重定位比较近,或者关键帧数目超出最大限制,不插入关键帧 + * Step 3:得到参考关键帧跟踪到的地图点数量 + * Step 4:查询局部地图管理器是否繁忙,也就是当前能否接受新的关键帧 + * Step 5:对于双目或RGBD摄像头,统计可以添加的有效地图点总数 和 跟踪到的地图点数量 + * Step 6:决策是否需要插入关键帧 + * @return true 需要 + * @return false 不需要 + */ bool Tracking::NeedNewKeyFrame() { if(((mSensor == System::IMU_MONOCULAR) || (mSensor == System::IMU_STEREO)) && !mpAtlas->GetCurrentMap()->isImuInitialized()) @@ -3194,11 +3475,12 @@ bool Tracking::NeedNewKeyFrame() else return false; } - + // Step 1:纯VO模式下不插入关键帧 if(mbOnlyTracking) return false; // If Local Mapping is freezed by a Loop Closure do not insert keyframes + // Step 2:如果局部地图线程被闭环检测使用,则不插入关键帧 if(mpLocalMapper->isStopped() || mpLocalMapper->stopRequested()) { return false; @@ -3207,32 +3489,46 @@ bool Tracking::NeedNewKeyFrame() // Return false if IMU is initialazing if (mpLocalMapper->IsInitializing()) return false; + + // 获取当前地图中的关键帧数目 const int nKFs = mpAtlas->KeyFramesInMap(); // Do not insert keyframes if not enough frames have passed from last relocalisation + // mCurrentFrame.mnId是当前帧的ID + // mnLastRelocFrameId是最近一次重定位帧的ID + // mMaxFrames等于图像输入的帧率 + // Step 3:如果距离上一次重定位比较近,并且关键帧数目超出最大限制,不插入关键帧 if(mCurrentFrame.mnIdmMaxFrames) { return false; } // Tracked MapPoints in the reference keyframe + // Step 4:得到参考关键帧跟踪到的地图点数量 + // UpdateLocalKeyFrames 函数中会将与当前关键帧共视程度最高的关键帧设定为当前帧的参考关键帧 + + // 地图点的最小观测次数 int nMinObs = 3; if(nKFs<=2) nMinObs=2; + // 参考关键帧地图点中观测的数目>= nMinObs的地图点数目 int nRefMatches = mpReferenceKF->TrackedMapPoints(nMinObs); // Local Mapping accept keyframes? + // Step 5:查询局部地图线程是否繁忙,当前能否接受新的关键帧 bool bLocalMappingIdle = mpLocalMapper->AcceptKeyFrames(); // Check how many "close" points are being tracked and how many could be potentially created. - int nNonTrackedClose = 0; - int nTrackedClose= 0; + // Step 6:对于双目或RGBD摄像头,统计成功跟踪的近点的数量,如果跟踪到的近点太少,没有跟踪到的近点较多,可以插入关键帧 + int nNonTrackedClose = 0; //双目或RGB-D中没有跟踪到的近点 + int nTrackedClose= 0; //双目或RGB-D中成功跟踪的近点(三维点) if(mSensor!=System::MONOCULAR && mSensor!=System::IMU_MONOCULAR) { int N = (mCurrentFrame.Nleft == -1) ? mCurrentFrame.N : mCurrentFrame.Nleft; for(int i =0; i0 && mCurrentFrame.mvDepth[i]70); + // Step 7:决策是否需要插入关键帧 // Thresholds + // Step 7.1:设定比例阈值,当前帧和参考关键帧跟踪到点的比例,比例越大,越倾向于增加关键帧 float thRefRatio = 0.75f; + + // 关键帧只有一帧,那么插入关键帧的阈值设置的低一点,插入频率较低 if(nKFs<2) thRefRatio = 0.4f; + //单目情况下插入关键帧的频率很高 if(mSensor==System::MONOCULAR) thRefRatio = 0.9f; @@ -3266,12 +3568,18 @@ bool Tracking::NeedNewKeyFrame() } // Condition 1a: More than "MaxFrames" have passed from last keyframe insertion + // Step 7.2:很长时间没有插入关键帧,可以插入 const bool c1a = mCurrentFrame.mnId>=mnLastKeyFrameId+mMaxFrames; // Condition 1b: More than "MinFrames" have passed and Local Mapping is idle + // Step 7.3:满足插入关键帧的最小间隔并且localMapper处于空闲状态,可以插入 const bool c1b = ((mCurrentFrame.mnId>=mnLastKeyFrameId+mMinFrames) && bLocalMappingIdle); //Condition 1c: tracking is weak - const bool c1c = mSensor!=System::MONOCULAR && mSensor!=System::IMU_MONOCULAR && mSensor!=System::IMU_STEREO && (mnMatchesInliers15); // Temporal condition for Inertial cases @@ -3300,8 +3608,10 @@ bool Tracking::NeedNewKeyFrame() { // If the mapping accepts keyframes, insert keyframe. // Otherwise send a signal to interrupt BA + // Step 7.6:local mapping空闲时可以直接插入,不空闲的时候要根据情况插入 if(bLocalMappingIdle) { + //可以插入关键帧 return true; } else @@ -3309,33 +3619,54 @@ bool Tracking::NeedNewKeyFrame() mpLocalMapper->InterruptBA(); if(mSensor!=System::MONOCULAR && mSensor!=System::IMU_MONOCULAR) { + // 队列里不能阻塞太多关键帧 + // tracking插入关键帧不是直接插入,而且先插入到mlNewKeyFrames中, + // 然后localmapper再逐个pop出来插入到mspKeyFrames if(mpLocalMapper->KeyframesInQueue()<3) + //队列中的关键帧数目不是很多,可以插入 return true; else + //队列中缓冲的关键帧数目太多,暂时不能插入 return false; } else + //对于单目情况,就直接无法插入关键帧了 + //? 为什么这里对单目情况的处理不一样? + //回答:可能是单目关键帧相对比较密集 return false; } } else + //不满足上面的条件,自然不能插入关键帧 return false; } +/** + * @brief 创建新的关键帧 + * 对于非单目的情况,同时创建新的MapPoints + * + * Step 1:将当前帧构造成关键帧 + * Step 2:将当前关键帧设置为当前帧的参考关键帧 + * Step 3:对于双目或rgbd摄像头,为当前帧生成新的MapPoints + */ void Tracking::CreateNewKeyFrame() { + // 如果局部建图线程正在初始化或关闭了,就无法插入关键帧 if(mpLocalMapper->IsInitializing()) return; if(!mpLocalMapper->SetNotStop(true)) return; + // Step 1:将当前帧构造成关键帧 KeyFrame* pKF = new KeyFrame(mCurrentFrame,mpAtlas->GetCurrentMap(),mpKeyFrameDB); if(mpAtlas->isImuInitialized()) pKF->bImu = true; pKF->SetNewBias(mCurrentFrame.mImuBias); + // Step 2:将当前关键帧设置为当前帧的参考关键帧 + // 在UpdateLocalKeyFrames函数中会将与当前关键帧共视程度最高的关键帧设定为当前帧的参考关键帧 mpReferenceKF = pKF; mCurrentFrame.mpReferenceKF = pKF; @@ -3352,9 +3683,11 @@ void Tracking::CreateNewKeyFrame() { mpImuPreintegratedFromLastKF = new IMU::Preintegrated(pKF->GetImuBias(),pKF->mImuCalib); } - + // 这段代码和 Tracking::UpdateLastFrame 中的那一部分代码功能相同 + // Step 3:对于双目或rgbd摄像头,为当前帧生成新的地图点;单目无操作 if(mSensor!=System::MONOCULAR && mSensor != System::IMU_MONOCULAR) // TODO check if incluide imu_stereo { + // 根据Tcw计算mRcw、mtcw和mRwc、mOw mCurrentFrame.UpdatePoseMatrices(); // cout << "create new MPs" << endl; // We sort points by the measured depth by the stereo/RGBD sensor. @@ -3363,7 +3696,7 @@ void Tracking::CreateNewKeyFrame() int maxPoint = 100; if(mSensor == System::IMU_STEREO) maxPoint = 100; - + // Step 3.1:得到当前帧有深度值的特征点(不一定是地图点) vector > vDepthIdx; int N = (mCurrentFrame.Nleft != -1) ? mCurrentFrame.Nleft : mCurrentFrame.N; vDepthIdx.reserve(mCurrentFrame.N); @@ -3372,14 +3705,18 @@ void Tracking::CreateNewKeyFrame() float z = mCurrentFrame.mvDepth[i]; if(z>0) { + // 第一个元素是深度,第二个元素是对应的特征点的id vDepthIdx.push_back(make_pair(z,i)); } } if(!vDepthIdx.empty()) { + // Step 3.2:按照深度从小到大排序 sort(vDepthIdx.begin(),vDepthIdx.end()); + // Step 3.3:从中找出不是地图点的生成临时地图点 + // 处理的近点的个数 int nPoints = 0; for(size_t j=0; j(NULL); } + // 如果需要就新建地图点,这里的地图点不是临时的,是全局地图中新建地图点,用于跟踪 if(bCreateNew) { cv::Mat x3D; @@ -3408,6 +3747,7 @@ void Tracking::CreateNewKeyFrame() } MapPoint* pNewMP = new MapPoint(x3D,pKF,mpAtlas->GetCurrentMap()); + // 这些添加属性的操作是每次创建MapPoint后都要做的 pNewMP->AddObservation(pKF,i); //Check if it is a stereo observation in order to not @@ -3428,9 +3768,13 @@ void Tracking::CreateNewKeyFrame() } else { + // 因为从近到远排序,记录其中不需要创建地图点的个数 nPoints++; } + // Step 3.4:停止新建地图点必须同时满足以下条件: + // 1、当前的点的深度已经超过了设定的深度阈值(35倍基线) + // 2、nPoints已经超过100个点,说明距离比较远了,可能不准确,停掉退出 if(vDepthIdx[j].first>mThDepth && nPoints>maxPoint) { break; @@ -3442,19 +3786,28 @@ void Tracking::CreateNewKeyFrame() } } - + // Step 4:插入关键帧 + // 关键帧插入到列表 mlNewKeyFrames中,等待local mapping线程临幸 mpLocalMapper->InsertKeyFrame(pKF); + // 插入好了,允许局部建图停止 mpLocalMapper->SetNotStop(false); + // 当前帧成为新的关键帧,更新 mnLastKeyFrameId = mCurrentFrame.mnId; mpLastKeyFrame = pKF; //cout << "end creating new KF" << endl; } + +/** + * @brief 用局部地图点进行投影匹配,得到更多的匹配关系 + * 注意:局部地图点中已经是当前帧地图点的不需要再投影,只需要将此外的并且在视野范围内的点和当前帧进行投影匹配 + */ void Tracking::SearchLocalPoints() { // Do not search map points already matched + // Step 1:遍历当前帧的地图点,标记这些地图点不参与之后的投影搜索匹配 for(vector::iterator vit=mCurrentFrame.mvpMapPoints.begin(), vend=mCurrentFrame.mvpMapPoints.end(); vit!=vend; vit++) { MapPoint* pMP = *vit; @@ -3466,29 +3819,39 @@ void Tracking::SearchLocalPoints() } else { + // 更新能观测到该点的帧数加1(被当前帧观测了) pMP->IncreaseVisible(); + // 标记该点被当前帧观测到 pMP->mnLastFrameSeen = mCurrentFrame.mnId; + // 标记该点在后面搜索匹配时不被投影,因为已经有匹配了 pMP->mbTrackInView = false; pMP->mbTrackInViewR = false; } } } + // 准备进行投影匹配的点的数目 int nToMatch=0; // Project points in frame and check its visibility + // Step 2:判断所有局部地图点中除当前帧地图点外的点,是否在当前帧视野范围内 for(vector::iterator vit=mvpLocalMapPoints.begin(), vend=mvpLocalMapPoints.end(); vit!=vend; vit++) { MapPoint* pMP = *vit; + // 已经被当前帧观测到的地图点肯定在视野范围内,跳过 if(pMP->mnLastFrameSeen == mCurrentFrame.mnId) continue; + // 跳过坏点 if(pMP->isBad()) continue; // Project (this fills MapPoint variables for matching) + // 判断地图点是否在在当前帧视野内 if(mCurrentFrame.isInFrustum(pMP,0.5)) { + // 观测到该点的帧数加1 pMP->IncreaseVisible(); + // 只有在视野范围内的地图点才参与之后的投影匹配 nToMatch++; } if(pMP->mbTrackInView) @@ -3497,11 +3860,12 @@ void Tracking::SearchLocalPoints() } } + // Step 3:如果需要进行投影匹配的点的数目大于0,就进行投影匹配,增加更多的匹配关系 if(nToMatch>0) { ORBmatcher matcher(0.8); int th = 1; - if(mSensor==System::RGBD) + if(mSensor==System::RGBD) //RGBD相机输入的时候,搜索的阈值会变得稍微大一些 th=3; if(mpAtlas->isImuInitialized()) { @@ -3516,43 +3880,60 @@ void Tracking::SearchLocalPoints() } // If the camera has been relocalised recently, perform a coarser search + // 如果不久前进行过重定位,那么进行一个更加宽泛的搜索,阈值需要增大 if(mCurrentFrame.mnIdmbFarPoints, mpLocalMapper->mThFarPoints); } } +/** + * @brief 更新LocalMap + * + * 局部地图包括: + * 1、K1个关键帧、K2个临近关键帧和参考关键帧 + * 2、由这些关键帧观测到的MapPoints + */ void Tracking::UpdateLocalMap() { // This is for visualization + // 设置参考地图点用于绘图显示局部地图点(红色) mpAtlas->SetReferenceMapPoints(mvpLocalMapPoints); // Update + // 用共视图来更新局部关键帧和局部地图点 UpdateLocalKeyFrames(); UpdateLocalPoints(); } +/* + * @brief 更新局部关键点。先把局部地图清空,然后将局部关键帧的有效地图点添加到局部地图中 + */ void Tracking::UpdateLocalPoints() { + // Step 1:清空局部地图点 mvpLocalMapPoints.clear(); int count_pts = 0; - + // Step 2:遍历局部关键帧 mvpLocalKeyFrames for(vector::const_reverse_iterator itKF=mvpLocalKeyFrames.rbegin(), itEndKF=mvpLocalKeyFrames.rend(); itKF!=itEndKF; ++itKF) { KeyFrame* pKF = *itKF; const vector vpMPs = pKF->GetMapPointMatches(); + // step 2:将局部关键帧的地图点添加到mvpLocalMapPoints for(vector::const_iterator itMP=vpMPs.begin(), itEndMP=vpMPs.end(); itMP!=itEndMP; itMP++) { MapPoint* pMP = *itMP; if(!pMP) continue; + // 用该地图点的成员变量mnTrackReferenceForFrame 记录当前帧的id + // 表示它已经是当前帧的局部地图点了,可以防止重复添加局部地图点 if(pMP->mnTrackReferenceForFrame==mCurrentFrame.mnId) continue; if(!pMP->isBad()) @@ -3565,10 +3946,20 @@ void Tracking::UpdateLocalPoints() } } - +/** + * @brief 跟踪局部地图函数里,更新局部关键帧 + * 方法是遍历当前帧的地图点,将观测到这些地图点的关键帧和相邻的关键帧及其父子关键帧,作为mvpLocalKeyFrames + * Step 1:遍历当前帧的地图点,记录所有能观测到当前帧地图点的关键帧 + * Step 2:更新局部关键帧(mvpLocalKeyFrames),添加局部关键帧包括以下3种类型 + * 类型1:能观测到当前帧地图点的关键帧,也称一级共视关键帧 + * 类型2:一级共视关键帧的共视关键帧,称为二级共视关键帧 + * 类型3:一级共视关键帧的子关键帧、父关键帧 + * Step 3:更新当前帧的参考关键帧,与自己共视程度最高的关键帧作为参考关键帧 + */ void Tracking::UpdateLocalKeyFrames() { // Each map point vote for the keyframes in which it has been observed + // Step 1:遍历当前帧的地图点,记录所有能观测到当前帧地图点的关键帧 map keyframeCounter; if(!mpAtlas->isImuInitialized() || (mCurrentFrame.mnIdisBad()) { + // 得到观测到该地图点的关键帧和该地图点在关键帧中的索引 const map> observations = pMP->GetObservations(); + // 由于一个地图点可以被多个关键帧观测到,因此对于每一次观测,都对观测到这个地图点的关键帧进行累计投票 for(map>::const_iterator it=observations.begin(), itend=observations.end(); it!=itend; it++) + // 这里的操作非常精彩! + // map[key] = value,当要插入的键存在时,会覆盖键对应的原来的值。如果键不存在,则添加一组键值对 + // it->first 是地图点看到的关键帧,同一个关键帧看到的地图点会累加到该关键帧计数 + // 所以最后keyframeCounter 第一个参数表示某个关键帧,第2个参数表示该关键帧看到了多少当前帧(mCurrentFrame)的地图点,也就是共视程度 keyframeCounter[it->first]++; } else @@ -3615,56 +4012,72 @@ void Tracking::UpdateLocalKeyFrames() } + // 存储具有最多观测次数(max)的关键帧 int max=0; KeyFrame* pKFmax= static_cast(NULL); + // Step 2:更新局部关键帧(mvpLocalKeyFrames),添加局部关键帧有3种类型 + // 先清空局部关键帧 mvpLocalKeyFrames.clear(); + // 先申请3倍内存,不够后面再加 mvpLocalKeyFrames.reserve(3*keyframeCounter.size()); // All keyframes that observe a map point are included in the local map. Also check which keyframe shares most points + // Step 2.1 类型1:能观测到当前帧地图点的关键帧作为局部关键帧 (将邻居拉拢入伙)(一级共视关键帧) for(map::const_iterator it=keyframeCounter.begin(), itEnd=keyframeCounter.end(); it!=itEnd; it++) { KeyFrame* pKF = it->first; + // 如果设定为要删除的,跳过 if(pKF->isBad()) continue; - + + // 寻找具有最大观测数目的关键帧 if(it->second>max) { max=it->second; pKFmax=pKF; } + // 添加到局部关键帧的列表里 mvpLocalKeyFrames.push_back(pKF); + // 用该关键帧的成员变量mnTrackReferenceForFrame 记录当前帧的id + // 表示它已经是当前帧的局部关键帧了,可以防止重复添加局部关键帧 pKF->mnTrackReferenceForFrame = mCurrentFrame.mnId; } // Include also some not-already-included keyframes that are neighbors to already-included keyframes + // Step 2.2 遍历一级共视关键帧,寻找更多的局部关键帧 for(vector::const_iterator itKF=mvpLocalKeyFrames.begin(), itEndKF=mvpLocalKeyFrames.end(); itKF!=itEndKF; itKF++) { // Limit the number of keyframes + // 处理的局部关键帧不超过80帧 if(mvpLocalKeyFrames.size()>80) break; KeyFrame* pKF = *itKF; + // 类型2:一级共视关键帧的共视(前10个)关键帧,称为二级共视关键帧(将邻居的邻居拉拢入伙) + // 如果共视帧不足10帧,那么就返回所有具有共视关系的关键帧 const vector vNeighs = pKF->GetBestCovisibilityKeyFrames(10); - - + // vNeighs 是按照共视程度从大到小排列 for(vector::const_iterator itNeighKF=vNeighs.begin(), itEndNeighKF=vNeighs.end(); itNeighKF!=itEndNeighKF; itNeighKF++) { KeyFrame* pNeighKF = *itNeighKF; if(!pNeighKF->isBad()) { + // mnTrackReferenceForFrame防止重复添加局部关键帧 if(pNeighKF->mnTrackReferenceForFrame!=mCurrentFrame.mnId) { mvpLocalKeyFrames.push_back(pNeighKF); pNeighKF->mnTrackReferenceForFrame=mCurrentFrame.mnId; + //? 找到一个就直接跳出for循环? break; } } } + // 类型3:将一级共视关键帧的子关键帧作为局部关键帧(将邻居的孩子们拉拢入伙) const set spChilds = pKF->GetChilds(); for(set::const_iterator sit=spChilds.begin(), send=spChilds.end(); sit!=send; sit++) { @@ -3675,18 +4088,22 @@ void Tracking::UpdateLocalKeyFrames() { mvpLocalKeyFrames.push_back(pChildKF); pChildKF->mnTrackReferenceForFrame=mCurrentFrame.mnId; + //? 找到一个就直接跳出for循环? break; } } } + // 类型3:将一级共视关键帧的父关键帧(将邻居的父母们拉拢入伙) KeyFrame* pParent = pKF->GetParent(); if(pParent) { + // mnTrackReferenceForFrame防止重复添加局部关键帧 if(pParent->mnTrackReferenceForFrame!=mCurrentFrame.mnId) { mvpLocalKeyFrames.push_back(pParent); pParent->mnTrackReferenceForFrame=mCurrentFrame.mnId; + //! 感觉是个bug!如果找到父关键帧会直接跳出整个循环 break; } } @@ -3712,6 +4129,7 @@ void Tracking::UpdateLocalKeyFrames() } } + // Step 3:更新当前帧的参考关键帧,与自己共视程度最高的关键帧作为参考关键帧 if(pKFmax) { mpReferenceKF = pKFmax; @@ -3719,6 +4137,18 @@ void Tracking::UpdateLocalKeyFrames() } } +/** + * @details 重定位过程 + * @return true + * @return false + * + * Step 1:计算当前帧特征点的词袋向量 + * Step 2:找到与当前帧相似的候选关键帧 + * Step 3:通过BoW进行匹配 + * Step 4:通过EPnP算法估计姿态 + * Step 5:通过PoseOptimization对姿态进行优化求解 + * Step 6:如果内点较少,则通过投影的方式对之前未匹配的点进行匹配,再进行优化求解 + */ bool Tracking::Relocalization() { Verbose::PrintMess("Starting relocalization", Verbose::VERBOSITY_NORMAL); @@ -3768,6 +4198,7 @@ bool Tracking::Relocalization() { // 当前帧和候选关键帧用BoW进行快速匹配,匹配结果记录在vvpMapPointMatches,nmatches表示匹配的数目 int nmatches = matcher.SearchByBoW(pKF,mCurrentFrame,vvpMapPointMatches[i]); + // 如果和当前帧的匹配数小于15,那么只能放弃这个关键帧 if(nmatches<15) { vbDiscarded[i] = true; @@ -3803,8 +4234,10 @@ bool Tracking::Relocalization() // 为什么搞这么复杂?答:是担心误闭环 while(nCandidates>0 && !bMatch) { + //遍历当前所有的候选关键帧 for(int i=0; i=50) { // 根据投影匹配的结果,再次采用3D-2D pnp BA优化位姿 @@ -3920,6 +4354,7 @@ bool Tracking::Relocalization() // If the pose is supported by enough inliers stop ransacs and continue + // 如果对于当前的候选关键帧已经有足够的内点(50个)了,那么就认为重定位成功 if(nGood>=50) { bMatch = true; @@ -3927,15 +4362,18 @@ bool Tracking::Relocalization() break; } } - } + }//一直运行,知道已经没有足够的关键帧,或者是已经有成功匹配上的关键帧 } + // 折腾了这么久还是没有匹配上,重定位失败 if(!bMatch) { return false; } else { + // 如果匹配上了,说明当前帧重定位成功了(当前帧已经有了自己的位姿) + // 记录成功重定位帧的id,防止短时间多次重定位 mnLastRelocFrameId = mCurrentFrame.mnId; cout << "Relocalized!!" << endl; return true; @@ -3943,10 +4381,11 @@ bool Tracking::Relocalization() } -void Tracking::Reset(bool bLocMap) +//整个追踪线程执行复位操作 +void Tracking::Reset() { Verbose::PrintMess("System Reseting", Verbose::VERBOSITY_NORMAL); - + //基本上是挨个请求各个线程终止 if(mpViewer) { mpViewer->RequestStop(); @@ -3980,6 +4419,7 @@ void Tracking::Reset(bool bLocMap) mpAtlas->SetInertialSensor(); mnInitialFrameId = 0; + //然后复位各种变量 KeyFrame::nNextId = 0; Frame::nNextId = 0; mState = NO_IMAGES_YET; @@ -4131,6 +4571,7 @@ void Tracking::ChangeCalibration(const string &strSettingPath) mbf = fSettings["Camera.bf"]; + //做标记,表示在初始化帧的时候将会是第一个帧,要对它进行一些特殊的初始化操作 Frame::mbInitialComputations = true; } diff --git a/src/TwoViewReconstruction.cc b/src/TwoViewReconstruction.cc index af41678..240fde9 100644 --- a/src/TwoViewReconstruction.cc +++ b/src/TwoViewReconstruction.cc @@ -20,25 +20,35 @@ #include "Thirdparty/DBoW2/DUtils/Random.h" -#include - +#include using namespace std; namespace ORB_SLAM3 { -TwoViewReconstruction::TwoViewReconstruction(cv::Mat& K, float sigma, int iterations) +TwoViewReconstruction::TwoViewReconstruction(cv::Mat &K, float sigma, int iterations) { mK = K.clone(); mSigma = sigma; - mSigma2 = sigma*sigma; + mSigma2 = sigma * sigma; mMaxIterations = iterations; } -bool TwoViewReconstruction::Reconstruct(const std::vector& vKeys1, const std::vector& vKeys2, const vector &vMatches12, +/** + * @brief 单目初始化重要的环节,先获得rt在通过三角化恢复3d点坐标 + * @param vKeys1 第一帧的关键点 + * @param vKeys2 第二帧的关键点 + * @param vMatches12 匹配关系,长度与vKeys1一样,对应位置存放vKeys2中关键点的下标 + * @param R21 顾名思义 + * @param t21 顾名思义 + * @param vP3D 恢复出的三维点 + * @param vbTriangulated 是否三角化成功,用于统计匹配点数量 + */ +bool TwoViewReconstruction::Reconstruct(const std::vector &vKeys1, const std::vector &vKeys2, const vector &vMatches12, cv::Mat &R21, cv::Mat &t21, vector &vP3D, vector &vbTriangulated) { + // 1. 准备工作,提取匹配关系及准备RANSAC mvKeys1.clear(); mvKeys2.clear(); @@ -47,18 +57,18 @@ bool TwoViewReconstruction::Reconstruct(const std::vector& vKeys1, // Fill structures with current keypoints and matches with reference frame // Reference Frame: 1, Current Frame: 2 - mvMatches12.clear(); + mvMatches12.clear(); // 存放匹配点的id mvMatches12.reserve(mvKeys2.size()); mvbMatched1.resize(mvKeys1.size()); - for(size_t i=0, iend=vMatches12.size();i=0) + if (vMatches12[i] >= 0) { - mvMatches12.push_back(make_pair(i,vMatches12[i])); - mvbMatched1[i]=true; + mvMatches12.push_back(make_pair(i, vMatches12[i])); + mvbMatched1[i] = true; } else - mvbMatched1[i]=false; + mvbMatched1[i] = false; } const int N = mvMatches12.size(); @@ -68,28 +78,31 @@ bool TwoViewReconstruction::Reconstruct(const std::vector& vKeys1, vAllIndices.reserve(N); vector vAvailableIndices; - for(int i=0; i >(mMaxIterations,vector(8,0)); + // 默认200次 + mvSets = vector>(mMaxIterations, vector(8, 0)); DUtils::Random::SeedRandOnce(0); - - for(int it=0; it& vKeys1, float SH, SF; cv::Mat H, F; - thread threadH(&TwoViewReconstruction::FindHomography,this,ref(vbMatchesInliersH), ref(SH), ref(H)); - thread threadF(&TwoViewReconstruction::FindFundamental,this,ref(vbMatchesInliersF), ref(SF), ref(F)); + // 3. 双线程分别计算 + // 加ref为了提供引用 + thread threadH(&TwoViewReconstruction::FindHomography, this, ref(vbMatchesInliersH), ref(SH), ref(H)); + thread threadF(&TwoViewReconstruction::FindFundamental, this, ref(vbMatchesInliersF), ref(SF), ref(F)); // Wait until both threads have finished threadH.join(); threadF.join(); // Compute ratio of scores - if(SH+SF == 0.f) return false; - float RH = SH/(SH+SF); + if (SH + SF == 0.f) + return false; + float RH = SH / (SH + SF); float minParallax = 1.0; // Try to reconstruct from homography or fundamental depending on the ratio (0.40-0.45) - if(RH>0.50) // if(RH>0.40) + // 4. 看哪个分高用哪个,分别是通过H重建与通过F重建 + // ORBSLAM2这里的值是0.4, TOSEE + if (RH > 0.50) // if(RH>0.40) { //cout << "Initialization from Homography" << endl; - return ReconstructH(vbMatchesInliersH,H, mK,R21,t21,vP3D,vbTriangulated,minParallax,50); + return ReconstructH(vbMatchesInliersH, H, mK, R21, t21, vP3D, vbTriangulated, minParallax, 50); } else //if(pF_HF>0.6) { //cout << "Initialization from Fundamental" << endl; - return ReconstructF(vbMatchesInliersF,F,mK,R21,t21,vP3D,vbTriangulated,minParallax,50); + return ReconstructF(vbMatchesInliersF, F, mK, R21, t21, vP3D, vbTriangulated, minParallax, 50); } } +/** + * @brief 计算H矩阵,同时计算得分与内点 + * @param vbMatchesInliers 经过H矩阵验证,是否为内点,大小为mvMatches12 + * @param score 得分 + * @param H21 1到2的H矩阵 + */ void TwoViewReconstruction::FindHomography(vector &vbMatchesInliers, float &score, cv::Mat &H21) { // Number of putative matches + // 匹配成功的个数 const int N = mvMatches12.size(); // Normalize coordinates vector vPn1, vPn2; cv::Mat T1, T2; - Normalize(mvKeys1,vPn1, T1); - Normalize(mvKeys2,vPn2, T2); + // 像素坐标标准化,先去均值,再除均长 + Normalize(mvKeys1, vPn1, T1); + Normalize(mvKeys2, vPn2, T2); cv::Mat T2inv = T2.inv(); // Best Results variables score = 0.0; - vbMatchesInliers = vector(N,false); + vbMatchesInliers = vector(N, false); // Iteration variables vector vPn1i(8); vector vPn2i(8); cv::Mat H21i, H12i; - vector vbCurrentInliers(N,false); + vector vbCurrentInliers(N, false); float currentScore; // Perform all RANSAC iterations and save the solution with highest score - for(int it=0; itscore) + if (currentScore > score) { H21 = H21i.clone(); vbMatchesInliers = vbCurrentInliers; @@ -176,7 +204,12 @@ void TwoViewReconstruction::FindHomography(vector &vbMatchesInliers, float } } - +/** + * @brief 计算F矩阵,同时计算得分与内点 + * @param vbMatchesInliers 经过F矩阵验证,是否为内点,大小为mvMatches12 + * @param score 得分 + * @param F21 1到2的F矩阵 + */ void TwoViewReconstruction::FindFundamental(vector &vbMatchesInliers, float &score, cv::Mat &F21) { // Number of putative matches @@ -185,26 +218,26 @@ void TwoViewReconstruction::FindFundamental(vector &vbMatchesInliers, floa // Normalize coordinates vector vPn1, vPn2; cv::Mat T1, T2; - Normalize(mvKeys1,vPn1, T1); - Normalize(mvKeys2,vPn2, T2); + Normalize(mvKeys1, vPn1, T1); + Normalize(mvKeys2, vPn2, T2); cv::Mat T2t = T2.t(); // Best Results variables score = 0.0; - vbMatchesInliers = vector(N,false); + vbMatchesInliers = vector(N, false); // Iteration variables vector vPn1i(8); vector vPn2i(8); cv::Mat F21i; - vector vbCurrentInliers(N,false); + vector vbCurrentInliers(N, false); float currentScore; // Perform all RANSAC iterations and save the solution with highest score - for(int it=0; it &vbMatchesInliers, floa vPn2i[j] = vPn2[mvMatches12[idx].second]; } - cv::Mat Fn = ComputeF21(vPn1i,vPn2i); + cv::Mat Fn = ComputeF21(vPn1i, vPn2i); - F21i = T2t*Fn*T1; + // FN得到的是基于归一化坐标的F矩阵,必须乘上归一化过程矩阵才是最后的基于像素坐标的F + F21i = T2t * Fn * T1; currentScore = CheckFundamental(F21i, vbCurrentInliers, mSigma); - if(currentScore>score) + if (currentScore > score) { F21 = F21i.clone(); vbMatchesInliers = vbCurrentInliers; @@ -227,119 +261,152 @@ void TwoViewReconstruction::FindFundamental(vector &vbMatchesInliers, floa } } - +/** + * @brief 从特征点匹配求homography(normalized DLT) + * |x'| | h1 h2 h3 ||x| + * |y'| = a | h4 h5 h6 ||y| 简写: x' = a H x, a为一个尺度因子 + * |1 | | h7 h8 h9 ||1| + * 使用DLT(direct linear tranform)求解该模型 + * x' = a H x + * ---> (x') 叉乘 (H x) = 0 + * ---> Ah = 0 + * A = | 0 0 0 -x -y -1 xy' yy' y'| h = | h1 h2 h3 h4 h5 h6 h7 h8 h9 | + * |-x -y -1 0 0 0 xx' yx' x'| + * 通过SVD求解Ah = 0,A'A最小特征值对应的特征向量即为解 + * @param vP1 归一化后的点, in reference frame + * @param vP2 归一化后的点, in current frame + * @return 单应矩阵 + * @see Multiple View Geometry in Computer Vision - Algorithm 4.2 p109 + */ cv::Mat TwoViewReconstruction::ComputeH21(const vector &vP1, const vector &vP2) { const int N = vP1.size(); - cv::Mat A(2*N,9,CV_32F); + cv::Mat A(2 * N, 9, CV_32F); - for(int i=0; i(2*i,0) = 0.0; - A.at(2*i,1) = 0.0; - A.at(2*i,2) = 0.0; - A.at(2*i,3) = -u1; - A.at(2*i,4) = -v1; - A.at(2*i,5) = -1; - A.at(2*i,6) = v2*u1; - A.at(2*i,7) = v2*v1; - A.at(2*i,8) = v2; - - A.at(2*i+1,0) = u1; - A.at(2*i+1,1) = v1; - A.at(2*i+1,2) = 1; - A.at(2*i+1,3) = 0.0; - A.at(2*i+1,4) = 0.0; - A.at(2*i+1,5) = 0.0; - A.at(2*i+1,6) = -u2*u1; - A.at(2*i+1,7) = -u2*v1; - A.at(2*i+1,8) = -u2; + A.at(2 * i, 0) = 0.0; + A.at(2 * i, 1) = 0.0; + A.at(2 * i, 2) = 0.0; + A.at(2 * i, 3) = -u1; + A.at(2 * i, 4) = -v1; + A.at(2 * i, 5) = -1; + A.at(2 * i, 6) = v2 * u1; + A.at(2 * i, 7) = v2 * v1; + A.at(2 * i, 8) = v2; + A.at(2 * i + 1, 0) = u1; + A.at(2 * i + 1, 1) = v1; + A.at(2 * i + 1, 2) = 1; + A.at(2 * i + 1, 3) = 0.0; + A.at(2 * i + 1, 4) = 0.0; + A.at(2 * i + 1, 5) = 0.0; + A.at(2 * i + 1, 6) = -u2 * u1; + A.at(2 * i + 1, 7) = -u2 * v1; + A.at(2 * i + 1, 8) = -u2; } - cv::Mat u,w,vt; + cv::Mat u, w, vt; - cv::SVDecomp(A,w,u,vt,cv::SVD::MODIFY_A | cv::SVD::FULL_UV); + cv::SVDecomp(A, w, u, vt, cv::SVD::MODIFY_A | cv::SVD::FULL_UV); return vt.row(8).reshape(0, 3); } -cv::Mat TwoViewReconstruction::ComputeF21(const vector &vP1,const vector &vP2) +/** + * @brief 从特征点匹配求fundamental matrix(normalized 8点法) + * x'Fx = 0 整理可得:Af = 0 + * A = | x'x x'y x' y'x y'y y' x y 1 |, f = | f1 f2 f3 f4 f5 f6 f7 f8 f9 | + * 通过SVD求解Af = 0,A'A最小特征值对应的特征向量即为解 + * @param vP1 归一化后的点, in reference frame + * @param vP2 归一化后的点, in current frame + * @return 基础矩阵 + * @see Multiple View Geometry in Computer Vision - Algorithm 11.1 p282 (中文版 p191) + */ +cv::Mat TwoViewReconstruction::ComputeF21(const vector &vP1, const vector &vP2) { const int N = vP1.size(); - cv::Mat A(N,9,CV_32F); + cv::Mat A(N, 9, CV_32F); - for(int i=0; i(i,0) = u2*u1; - A.at(i,1) = u2*v1; - A.at(i,2) = u2; - A.at(i,3) = v2*u1; - A.at(i,4) = v2*v1; - A.at(i,5) = v2; - A.at(i,6) = u1; - A.at(i,7) = v1; - A.at(i,8) = 1; + A.at(i, 0) = u2 * u1; + A.at(i, 1) = u2 * v1; + A.at(i, 2) = u2; + A.at(i, 3) = v2 * u1; + A.at(i, 4) = v2 * v1; + A.at(i, 5) = v2; + A.at(i, 6) = u1; + A.at(i, 7) = v1; + A.at(i, 8) = 1; } - cv::Mat u,w,vt; + cv::Mat u, w, vt; - cv::SVDecomp(A,w,u,vt,cv::SVD::MODIFY_A | cv::SVD::FULL_UV); + cv::SVDecomp(A, w, u, vt, cv::SVD::MODIFY_A | cv::SVD::FULL_UV); cv::Mat Fpre = vt.row(8).reshape(0, 3); - cv::SVDecomp(Fpre,w,u,vt,cv::SVD::MODIFY_A | cv::SVD::FULL_UV); + // 这里注意计算完要强制让第三个奇异值为0 + cv::SVDecomp(Fpre, w, u, vt, cv::SVD::MODIFY_A | cv::SVD::FULL_UV); - w.at(2)=0; + w.at(2) = 0; - return u*cv::Mat::diag(w)*vt; + return u * cv::Mat::diag(w) * vt; } +/** + * @brief 检查结果 + * @param H21 顾名思义 + * @param H12 顾名思义 + * @param vbMatchesInliers 匹配是否合法,大小为mvMatches12 + * @param sigma 默认为1 + */ float TwoViewReconstruction::CheckHomography(const cv::Mat &H21, const cv::Mat &H12, vector &vbMatchesInliers, float sigma) -{ +{ const int N = mvMatches12.size(); - const float h11 = H21.at(0,0); - const float h12 = H21.at(0,1); - const float h13 = H21.at(0,2); - const float h21 = H21.at(1,0); - const float h22 = H21.at(1,1); - const float h23 = H21.at(1,2); - const float h31 = H21.at(2,0); - const float h32 = H21.at(2,1); - const float h33 = H21.at(2,2); + const float h11 = H21.at(0, 0); + const float h12 = H21.at(0, 1); + const float h13 = H21.at(0, 2); + const float h21 = H21.at(1, 0); + const float h22 = H21.at(1, 1); + const float h23 = H21.at(1, 2); + const float h31 = H21.at(2, 0); + const float h32 = H21.at(2, 1); + const float h33 = H21.at(2, 2); - const float h11inv = H12.at(0,0); - const float h12inv = H12.at(0,1); - const float h13inv = H12.at(0,2); - const float h21inv = H12.at(1,0); - const float h22inv = H12.at(1,1); - const float h23inv = H12.at(1,2); - const float h31inv = H12.at(2,0); - const float h32inv = H12.at(2,1); - const float h33inv = H12.at(2,2); + const float h11inv = H12.at(0, 0); + const float h12inv = H12.at(0, 1); + const float h13inv = H12.at(0, 2); + const float h21inv = H12.at(1, 0); + const float h22inv = H12.at(1, 1); + const float h23inv = H12.at(1, 2); + const float h31inv = H12.at(2, 0); + const float h32inv = H12.at(2, 1); + const float h33inv = H12.at(2, 2); vbMatchesInliers.resize(N); float score = 0; - + // 基于卡方检验计算出的阈值 自由度为2的卡方分布,显著性水平为0.05,对应的临界阈值 const float th = 5.991; - const float invSigmaSquare = 1.0/(sigma*sigma); + const float invSigmaSquare = 1.0 / (sigma * sigma); - for(int i=0; ith) + if (chiSquare1 > th) bIn = false; else score += th - chiSquare1; @@ -370,52 +437,60 @@ float TwoViewReconstruction::CheckHomography(const cv::Mat &H21, const cv::Mat & // Reprojection error in second image // x1in2 = H21*x1 - const float w1in2inv = 1.0/(h31*u1+h32*v1+h33); - const float u1in2 = (h11*u1+h12*v1+h13)*w1in2inv; - const float v1in2 = (h21*u1+h22*v1+h23)*w1in2inv; + const float w1in2inv = 1.0 / (h31 * u1 + h32 * v1 + h33); + const float u1in2 = (h11 * u1 + h12 * v1 + h13) * w1in2inv; + const float v1in2 = (h21 * u1 + h22 * v1 + h23) * w1in2inv; - const float squareDist2 = (u2-u1in2)*(u2-u1in2)+(v2-v1in2)*(v2-v1in2); + const float squareDist2 = (u2 - u1in2) * (u2 - u1in2) + (v2 - v1in2) * (v2 - v1in2); - const float chiSquare2 = squareDist2*invSigmaSquare; + const float chiSquare2 = squareDist2 * invSigmaSquare; - if(chiSquare2>th) + if (chiSquare2 > th) bIn = false; else score += th - chiSquare2; - if(bIn) - vbMatchesInliers[i]=true; + if (bIn) + vbMatchesInliers[i] = true; else - vbMatchesInliers[i]=false; + vbMatchesInliers[i] = false; } return score; } +/** + * @brief 检查结果 + * @param F21 顾名思义 + * @param vbMatchesInliers 匹配是否合法,大小为mvMatches12 + * @param sigma 默认为1 + */ float TwoViewReconstruction::CheckFundamental(const cv::Mat &F21, vector &vbMatchesInliers, float sigma) { const int N = mvMatches12.size(); - const float f11 = F21.at(0,0); - const float f12 = F21.at(0,1); - const float f13 = F21.at(0,2); - const float f21 = F21.at(1,0); - const float f22 = F21.at(1,1); - const float f23 = F21.at(1,2); - const float f31 = F21.at(2,0); - const float f32 = F21.at(2,1); - const float f33 = F21.at(2,2); + const float f11 = F21.at(0, 0); + const float f12 = F21.at(0, 1); + const float f13 = F21.at(0, 2); + const float f21 = F21.at(1, 0); + const float f22 = F21.at(1, 1); + const float f23 = F21.at(1, 2); + const float f31 = F21.at(2, 0); + const float f32 = F21.at(2, 1); + const float f33 = F21.at(2, 2); vbMatchesInliers.resize(N); float score = 0; + // 基于卡方检验计算出的阈值 自由度为1的卡方分布,显著性水平为0.05,对应的临界阈值 const float th = 3.841; + // 基于卡方检验计算出的阈值 自由度为2的卡方分布,显著性水平为0.05,对应的临界阈值 const float thScore = 5.991; - const float invSigmaSquare = 1.0/(sigma*sigma); + const float invSigmaSquare = 1.0 / (sigma * sigma); - for(int i=0; i & // Reprojection error in second image // l2=F21x1=(a2,b2,c2) + // 计算 img1 上的点在 img2 上投影得到的极线 l2 = F21 * p1 = (a2,b2,c2) + const float a2 = f11 * u1 + f12 * v1 + f13; + const float b2 = f21 * u1 + f22 * v1 + f23; + const float c2 = f31 * u1 + f32 * v1 + f33; - const float a2 = f11*u1+f12*v1+f13; - const float b2 = f21*u1+f22*v1+f23; - const float c2 = f31*u1+f32*v1+f33; + // 计算误差 e = (a * p2.x + b * p2.y + c) / sqrt(a * a + b * b) + const float num2 = a2 * u2 + b2 * v2 + c2; - const float num2 = a2*u2+b2*v2+c2; + const float squareDist1 = num2 * num2 / (a2 * a2 + b2 * b2); - const float squareDist1 = num2*num2/(a2*a2+b2*b2); - - const float chiSquare1 = squareDist1*invSigmaSquare; - - if(chiSquare1>th) + const float chiSquare1 = squareDist1 * invSigmaSquare; + // 自由度为1是因为这里的计算是点到线的距离,判定分数自由度为2的原因可能是为了与H矩阵持平 + if (chiSquare1 > th) bIn = false; else score += thScore - chiSquare1; // Reprojection error in second image // l1 =x2tF21=(a1,b1,c1) + // 与上面相同只不过反过来了 + const float a1 = f11 * u2 + f21 * v2 + f31; + const float b1 = f12 * u2 + f22 * v2 + f32; + const float c1 = f13 * u2 + f23 * v2 + f33; - const float a1 = f11*u2+f21*v2+f31; - const float b1 = f12*u2+f22*v2+f32; - const float c1 = f13*u2+f23*v2+f33; + const float num1 = a1 * u1 + b1 * v1 + c1; - const float num1 = a1*u1+b1*v1+c1; + const float squareDist2 = num1 * num1 / (a1 * a1 + b1 * b1); - const float squareDist2 = num1*num1/(a1*a1+b1*b1); + const float chiSquare2 = squareDist2 * invSigmaSquare; - const float chiSquare2 = squareDist2*invSigmaSquare; - - if(chiSquare2>th) + if (chiSquare2 > th) bIn = false; else score += thScore - chiSquare2; - if(bIn) - vbMatchesInliers[i]=true; + if (bIn) + vbMatchesInliers[i] = true; else - vbMatchesInliers[i]=false; + vbMatchesInliers[i] = false; } return score; } +/** + * @brief 通过基础矩阵重建 + * @param vbMatchesInliers 匹配是否合法,大小为mvMatches12 + * @param F21 顾名思义 + * @param K 相机内参 + * @param R21 旋转(要输出的) + * @param t21 平移(要输出的) + * @param vP3D 恢复的三维点(要输出的) + * @param vbTriangulated 大小与mvKeys1一致,表示哪个点被重建了 + * @param minParallax 1 + * @param minTriangulated 50 + */ bool TwoViewReconstruction::ReconstructF(vector &vbMatchesInliers, cv::Mat &F21, cv::Mat &K, - cv::Mat &R21, cv::Mat &t21, vector &vP3D, vector &vbTriangulated, float minParallax, int minTriangulated) + cv::Mat &R21, cv::Mat &t21, vector &vP3D, + vector &vbTriangulated, float minParallax, int minTriangulated) { - int N=0; - for(size_t i=0, iend = vbMatchesInliers.size() ; i vP3D1, vP3D2, vP3D3, vP3D4; - vector vbTriangulated1,vbTriangulated2,vbTriangulated3, vbTriangulated4; - float parallax1,parallax2, parallax3, parallax4; + vector vbTriangulated1, vbTriangulated2, vbTriangulated3, vbTriangulated4; + float parallax1, parallax2, parallax3, parallax4; - int nGood1 = CheckRT(R1,t1,mvKeys1,mvKeys2,mvMatches12,vbMatchesInliers,K, vP3D1, 4.0*mSigma2, vbTriangulated1, parallax1); - int nGood2 = CheckRT(R2,t1,mvKeys1,mvKeys2,mvMatches12,vbMatchesInliers,K, vP3D2, 4.0*mSigma2, vbTriangulated2, parallax2); - int nGood3 = CheckRT(R1,t2,mvKeys1,mvKeys2,mvMatches12,vbMatchesInliers,K, vP3D3, 4.0*mSigma2, vbTriangulated3, parallax3); - int nGood4 = CheckRT(R2,t2,mvKeys1,mvKeys2,mvMatches12,vbMatchesInliers,K, vP3D4, 4.0*mSigma2, vbTriangulated4, parallax4); - - int maxGood = max(nGood1,max(nGood2,max(nGood3,nGood4))); + int nGood1 = CheckRT(R1, t1, mvKeys1, mvKeys2, mvMatches12, vbMatchesInliers, K, vP3D1, 4.0 * mSigma2, vbTriangulated1, parallax1); + int nGood2 = CheckRT(R2, t1, mvKeys1, mvKeys2, mvMatches12, vbMatchesInliers, K, vP3D2, 4.0 * mSigma2, vbTriangulated2, parallax2); + int nGood3 = CheckRT(R1, t2, mvKeys1, mvKeys2, mvMatches12, vbMatchesInliers, K, vP3D3, 4.0 * mSigma2, vbTriangulated3, parallax3); + int nGood4 = CheckRT(R2, t2, mvKeys1, mvKeys2, mvMatches12, vbMatchesInliers, K, vP3D4, 4.0 * mSigma2, vbTriangulated4, parallax4); + // 统计重建出点的数量最大值 + int maxGood = max(nGood1, max(nGood2, max(nGood3, nGood4))); R21 = cv::Mat(); t21 = cv::Mat(); + // 起码要重建出超过百分之90的匹配点 + int nMinGood = max(static_cast(0.9 * N), minTriangulated); - int nMinGood = max(static_cast(0.9*N),minTriangulated); - + // 4. 看看有没有脱颖而出的结果,且最大值要高于要求的最低值,如果大家都差不多说明有问题,因为4个结果中只有一个会正常 + // 大家都很多的话反而不正常了。。。 int nsimilar = 0; - if(nGood1>0.7*maxGood) + if (nGood1 > 0.7 * maxGood) nsimilar++; - if(nGood2>0.7*maxGood) + if (nGood2 > 0.7 * maxGood) nsimilar++; - if(nGood3>0.7*maxGood) + if (nGood3 > 0.7 * maxGood) nsimilar++; - if(nGood4>0.7*maxGood) + if (nGood4 > 0.7 * maxGood) nsimilar++; // If there is not a clear winner or not enough triangulated points reject initialization - if(maxGood1) + if (maxGood < nMinGood || nsimilar > 1) { return false; } // If best reconstruction has enough parallax initialize - if(maxGood==nGood1) + // 5. 使用最好的结果输出 + if (maxGood == nGood1) { - if(parallax1>minParallax) + if (parallax1 > minParallax) { vP3D = vP3D1; vbTriangulated = vbTriangulated1; @@ -536,9 +632,10 @@ bool TwoViewReconstruction::ReconstructF(vector &vbMatchesInliers, cv::Mat t1.copyTo(t21); return true; } - }else if(maxGood==nGood2) + } + else if (maxGood == nGood2) { - if(parallax2>minParallax) + if (parallax2 > minParallax) { vP3D = vP3D2; vbTriangulated = vbTriangulated2; @@ -547,9 +644,10 @@ bool TwoViewReconstruction::ReconstructF(vector &vbMatchesInliers, cv::Mat t1.copyTo(t21); return true; } - }else if(maxGood==nGood3) + } + else if (maxGood == nGood3) { - if(parallax3>minParallax) + if (parallax3 > minParallax) { vP3D = vP3D3; vbTriangulated = vbTriangulated3; @@ -558,9 +656,10 @@ bool TwoViewReconstruction::ReconstructF(vector &vbMatchesInliers, cv::Mat t2.copyTo(t21); return true; } - }else if(maxGood==nGood4) + } + else if (maxGood == nGood4) { - if(parallax4>minParallax) + if (parallax4 > minParallax) { vP3D = vP3D4; vbTriangulated = vbTriangulated4; @@ -574,31 +673,106 @@ bool TwoViewReconstruction::ReconstructF(vector &vbMatchesInliers, cv::Mat return false; } +/** + * @brief 从H恢复R t + * H矩阵分解常见有两种方法:Faugeras SVD-based decomposition 和 Zhang SVD-based decomposition + * 参考文献:Motion and structure from motion in a piecewise plannar environment + * 这篇参考文献和下面的代码使用了Faugeras SVD-based decomposition算法 + * @see + * - Faugeras et al, Motion and structure from motion in a piecewise planar environment. International Journal of Pattern Recognition and Artificial Intelligence, 1988. + * - Deeper understanding of the homography decomposition for vision-based control + * 设平面法向量 n = (a, b, c)^t 有一点(x, y, z)在平面上,则ax + by + cz = d 即 1/d * n^t * x = 1 其中d表示 + * x' = R*x + t 从下面开始x 与 x'都表示归一化坐标 + * λ2*x' = R*(λ1*x) + t = R*(λ1*x) + t * 1/d * n^t * (λ1*x) + * x' = λ*(R + t * 1/d * n^t) * x = H^ * x + * 对应图像坐标 u' = G * u G = KH^K.inv + * H^ ~= d*R + t * n^t = U∧V^t ∧ = U^t * H^ * V = d*U^t * R * V + (U^t * t) * (V^t * n)^t + * s = det(U) * det(V) s 有可能是 1 或 -1 ∧ = s^2 * d*U^t * R * V + (U^t * t) * (V^t * n)^t = (s*d) * s * U^t * R * V + (U^t * t) * (V^t * n)^t + * 令 R' = s * U^t * R * V t' = U^t * t n' = V^t * n d' = s * d + * ∧ = d' * R' + t' * n'^t 所以∧也可以认为是一个单应矩阵,其中加入s只为说明有符号相反的可能 + * 设∧ = | d1, 0, 0 | 取e1 = (1, 0, 0)^t e2 = (0, 1, 0)^t e3 = (0, 0, 1)^t + * | 0, d2, 0 | + * | 0, 0, d3 | + * n' = (a1, 0, 0)^t + (0, b1, 0)^t + (0, 0, c1)^t = a1*e1 + b1*e2 + c1*e3 + * + * ∧ = [d1*e1, d2*e2, d3*e3] = [d' * R' * e1, d' * R' * e2, d' * R' * e3] + [t' * a1, t' * b1, t' * c1] + * ==> d1*e1 = d' * R' * e1 + t' * a1 + * d2*e2 = d' * R' * e2 + t' * b1 + * d3*e3 = d' * R' * e3 + t' * c1 + * + * + * 上面式子每两个消去t可得 + * d'R'(b1e1 - a1e2) = d1b1e1 - d2a1e2 + * d'R'(c1e2 - b1e3) = d2c1e1 - d3b1e3 同时取二范数,因为旋转对二范数没影响,所以可以约去 + * d'R'(a1e3 - c1e1) = d3a1e3 - d1c1e1 + * + * (d'^2 - d2^2)*a1^2 + (d'^2 - d1^2)*b1^2 = 0 + * (d'^2 - d3^2)*b1^2 + (d'^2 - d2^2)*c1^2 = 0 令 d'^2 - d1^2 = x1 d'^2 - d2^2 = x2 d'^2 - d3^2 = x3 + * (d'^2 - d1^2)*c1^2 + (d'^2 - d3^2)*a1^2 = 0 + * + * + * | x2 x1 0 | | a1^2 | + * | 0 x3 x2 | * | b1^2 | = 0 ===> x1 * x2 * x3 = 0 有(d'^2 - d1^2) * (d'^2 - d2^2) * (d'^2 - d3^2) = 0 + * | x3 0 x1 | | c1^2 | + * 由于d1 >= d2 >= d3 所以d' = d2 or -d2 + * + * ----------------------------------------------------------------------------------------------------------------------------------------------------------------- + * 下面分情况讨论,当d' > 0 再根据a1^2 + b1^2 + c1^2 = 1 ??????哪来的根据,不晓得 + * 能够求得a1 = ε1 * sqrt((d1^2 - d2^2) / (d1^2 - d3^2)) + * b1 = 0 + * c1 = ε2 * sqrt((d2^2 - d3^2) / (d1^2 - d3^2)) 其中ε1 ε2 可以为正负1 + * 结果带入 d2*e2 = d' * R' * e2 + t' * b1 => R' * e2 = e2 + * | cosθ 0 -sinθ | + * ==> R' = | 0 1 0 | n' 与 R' 带入 d'R'(a1e3 - c1e1) = d3a1e3 - d1c1e1 + * | sinθ 0 cosθ | + * | cosθ 0 -sinθ | | -c1 | | -d1c1 | + * d' * | 0 1 0 | * | 0 | = | 0 | 能够解出 sinθ 与 cosθ + * | sinθ 0 cosθ | | a1 | | d3a1 | + * + * 到此为止得到了 R' 再根据 d1*e1 = d' * R' * e1 + t' * a1 + * d2*e2 = d' * R' * e2 + t' * b1 + * d3*e3 = d' * R' * e3 + t' * c1 + * + * 求得 t' = (d1 - d3) * (a1, 0, c1)^t + * @param vbMatchesInliers 匹配是否合法,大小为mvMatches12 + * @param H21 顾名思义 + * @param K 相机内参 + * @param R21 旋转(要输出的) + * @param t21 平移(要输出的) + * @param vP3D 恢复的三维点(要输出的) + * @param vbTriangulated 大小与vbMatchesInliers,表示哪个点被重建了 + * @param minParallax 1 + * @param minTriangulated 50 + */ bool TwoViewReconstruction::ReconstructH(vector &vbMatchesInliers, cv::Mat &H21, cv::Mat &K, - cv::Mat &R21, cv::Mat &t21, vector &vP3D, vector &vbTriangulated, float minParallax, int minTriangulated) + cv::Mat &R21, cv::Mat &t21, vector &vP3D, + vector &vbTriangulated, float minParallax, int minTriangulated) { - int N=0; - for(size_t i=0, iend = vbMatchesInliers.size() ; i(0); float d2 = w.at(1); float d3 = w.at(2); - - if(d1/d2<1.00001 || d2/d3<1.00001) + // SVD分解的正常情况是特征值降序排列 + if (d1 / d2 < 1.00001 || d2 / d3 < 1.00001) { return false; } @@ -608,90 +782,116 @@ bool TwoViewReconstruction::ReconstructH(vector &vbMatchesInliers, cv::Mat vt.reserve(8); vn.reserve(8); - //n'=[x1 0 x3] 4 posibilities e1=e3=1, e1=1 e3=-1, e1=-1 e3=1, e1=e3=-1 - float aux1 = sqrt((d1*d1-d2*d2)/(d1*d1-d3*d3)); - float aux3 = sqrt((d2*d2-d3*d3)/(d1*d1-d3*d3)); - float x1[] = {aux1,aux1,-aux1,-aux1}; - float x3[] = {aux3,-aux3,aux3,-aux3}; + // n'=[x1 0 x3] 4 posibilities e1=e3=1, e1=1 e3=-1, e1=-1 e3=1, e1=e3=-1 + // step2:计算法向量 + // n'=[x1 0 x3] 4 posibilities e1=e3=1, e1=1 e3=-1, e1=-1 e3=1, e1=e3=-1 + // 法向量n'= [x1 0 x3] + float aux1 = sqrt((d1 * d1 - d2 * d2) / (d1 * d1 - d3 * d3)); + float aux3 = sqrt((d2 * d2 - d3 * d3) / (d1 * d1 - d3 * d3)); + float x1[] = {aux1, aux1, -aux1, -aux1}; + float x3[] = {aux3, -aux3, aux3, -aux3}; - //case d'=d2 - float aux_stheta = sqrt((d1*d1-d2*d2)*(d2*d2-d3*d3))/((d1+d3)*d2); + // case d'=d2 + // step3:恢复旋转矩阵 + // step3.1:计算 sin(theta)和cos(theta),case d'=d2 + float aux_stheta = sqrt((d1 * d1 - d2 * d2) * (d2 * d2 - d3 * d3)) / ((d1 + d3) * d2); - float ctheta = (d2*d2+d1*d3)/((d1+d3)*d2); + float ctheta = (d2 * d2 + d1 * d3) / ((d1 + d3) * d2); float stheta[] = {aux_stheta, -aux_stheta, -aux_stheta, aux_stheta}; - for(int i=0; i<4; i++) - { - cv::Mat Rp=cv::Mat::eye(3,3,CV_32F); - Rp.at(0,0)=ctheta; - Rp.at(0,2)=-stheta[i]; - Rp.at(2,0)=stheta[i]; - Rp.at(2,2)=ctheta; + // step3.2:计算四种旋转矩阵R,t + // 计算旋转矩阵 R‘,计算ppt中公式18 + // | ctheta 0 -aux_stheta| | aux1| + // Rp = | 0 1 0 | tp = | 0 | + // | aux_stheta 0 ctheta | |-aux3| - cv::Mat R = s*U*Rp*Vt; + // | ctheta 0 aux_stheta| | aux1| + // Rp = | 0 1 0 | tp = | 0 | + // |-aux_stheta 0 ctheta | | aux3| + + // | ctheta 0 aux_stheta| |-aux1| + // Rp = | 0 1 0 | tp = | 0 | + // |-aux_stheta 0 ctheta | |-aux3| + + // | ctheta 0 -aux_stheta| |-aux1| + // Rp = | 0 1 0 | tp = | 0 | + // | aux_stheta 0 ctheta | | aux3| + for (int i = 0; i < 4; i++) + { + cv::Mat Rp = cv::Mat::eye(3, 3, CV_32F); + Rp.at(0, 0) = ctheta; + Rp.at(0, 2) = -stheta[i]; + Rp.at(2, 0) = stheta[i]; + Rp.at(2, 2) = ctheta; + + cv::Mat R = s * U * Rp * Vt; vR.push_back(R); - cv::Mat tp(3,1,CV_32F); - tp.at(0)=x1[i]; - tp.at(1)=0; - tp.at(2)=-x3[i]; - tp*=d1-d3; + cv::Mat tp(3, 1, CV_32F); + tp.at(0) = x1[i]; + tp.at(1) = 0; + tp.at(2) = -x3[i]; + tp *= d1 - d3; - cv::Mat t = U*tp; - vt.push_back(t/cv::norm(t)); + // 这里虽然对t有归一化,并没有决定单目整个SLAM过程的尺度 + // 因为CreateInitialMapMonocular函数对3D点深度会缩放,然后反过来对 t 有改变 + cv::Mat t = U * tp; + vt.push_back(t / cv::norm(t)); - cv::Mat np(3,1,CV_32F); - np.at(0)=x1[i]; - np.at(1)=0; - np.at(2)=x3[i]; + cv::Mat np(3, 1, CV_32F); + np.at(0) = x1[i]; + np.at(1) = 0; + np.at(2) = x3[i]; - cv::Mat n = V*np; - if(n.at(2)<0) - n=-n; + cv::Mat n = V * np; + if (n.at(2) < 0) + n = -n; vn.push_back(n); } //case d'=-d2 - float aux_sphi = sqrt((d1*d1-d2*d2)*(d2*d2-d3*d3))/((d1-d3)*d2); + // step3.3:计算 sin(theta)和cos(theta),case d'=-d2 + float aux_sphi = sqrt((d1 * d1 - d2 * d2) * (d2 * d2 - d3 * d3)) / ((d1 - d3) * d2); - float cphi = (d1*d3-d2*d2)/((d1-d3)*d2); + float cphi = (d1 * d3 - d2 * d2) / ((d1 - d3) * d2); float sphi[] = {aux_sphi, -aux_sphi, -aux_sphi, aux_sphi}; - for(int i=0; i<4; i++) + // step3.4:计算四种旋转矩阵R,t + // 计算旋转矩阵 R‘ + for (int i = 0; i < 4; i++) { - cv::Mat Rp=cv::Mat::eye(3,3,CV_32F); - Rp.at(0,0)=cphi; - Rp.at(0,2)=sphi[i]; - Rp.at(1,1)=-1; - Rp.at(2,0)=sphi[i]; - Rp.at(2,2)=-cphi; + cv::Mat Rp = cv::Mat::eye(3, 3, CV_32F); + Rp.at(0, 0) = cphi; + Rp.at(0, 2) = sphi[i]; + Rp.at(1, 1) = -1; + Rp.at(2, 0) = sphi[i]; + Rp.at(2, 2) = -cphi; - cv::Mat R = s*U*Rp*Vt; + cv::Mat R = s * U * Rp * Vt; vR.push_back(R); - cv::Mat tp(3,1,CV_32F); - tp.at(0)=x1[i]; - tp.at(1)=0; - tp.at(2)=x3[i]; - tp*=d1+d3; + cv::Mat tp(3, 1, CV_32F); + tp.at(0) = x1[i]; + tp.at(1) = 0; + tp.at(2) = x3[i]; + tp *= d1 + d3; - cv::Mat t = U*tp; - vt.push_back(t/cv::norm(t)); + cv::Mat t = U * tp; + vt.push_back(t / cv::norm(t)); - cv::Mat np(3,1,CV_32F); - np.at(0)=x1[i]; - np.at(1)=0; - np.at(2)=x3[i]; + cv::Mat np(3, 1, CV_32F); + np.at(0) = x1[i]; + np.at(1) = 0; + np.at(2) = x3[i]; - cv::Mat n = V*np; - if(n.at(2)<0) - n=-n; + cv::Mat n = V * np; + if (n.at(2) < 0) + n = -n; vn.push_back(n); } - int bestGood = 0; - int secondBestGood = 0; + int secondBestGood = 0; int bestSolutionIdx = -1; float bestParallax = -1; vector bestP3D; @@ -699,14 +899,16 @@ bool TwoViewReconstruction::ReconstructH(vector &vbMatchesInliers, cv::Mat // Instead of applying the visibility constraints proposed in the Faugeras' paper (which could fail for points seen with low parallax) // We reconstruct all hypotheses and check in terms of triangulated points and parallax - for(size_t i=0; i<8; i++) + // step4:d'=d2和d'=-d2分别对应8组(R t),通过恢复3D点并判断是否在相机正前方的方法来确定最优解 + for (size_t i = 0; i < 8; i++) { float parallaxi; vector vP3Di; vector vbTriangulatedi; - int nGood = CheckRT(vR[i],vt[i],mvKeys1,mvKeys2,mvMatches12,vbMatchesInliers,K,vP3Di, 4.0*mSigma2, vbTriangulatedi, parallaxi); - - if(nGood>bestGood) + int nGood = CheckRT(vR[i], vt[i], mvKeys1, mvKeys2, mvMatches12, vbMatchesInliers, K, vP3Di, 4.0 * mSigma2, vbTriangulatedi, parallaxi); + + // 保留最优的和次优的 + if (nGood > bestGood) { secondBestGood = bestGood; bestGood = nGood; @@ -715,14 +917,14 @@ bool TwoViewReconstruction::ReconstructH(vector &vbMatchesInliers, cv::Mat bestP3D = vP3Di; bestTriangulated = vbTriangulatedi; } - else if(nGood>secondBestGood) + else if (nGood > secondBestGood) { secondBestGood = nGood; } } - - if(secondBestGood<0.75*bestGood && bestParallax>=minParallax && bestGood>minTriangulated && bestGood>0.9*N) + // step5:通过判断最优是否明显好于次优,从而判断该次Homography分解是否成功 + if (secondBestGood < 0.75 * bestGood && bestParallax >= minParallax && bestGood > minTriangulated && bestGood > 0.9 * N) { vR[bestSolutionIdx].copyTo(R21); vt[bestSolutionIdx].copyTo(t21); @@ -735,21 +937,39 @@ bool TwoViewReconstruction::ReconstructH(vector &vbMatchesInliers, cv::Mat return false; } +/** + * @brief 三角化恢复三维点 + * @param kp1 特征点1 + * @param kp2 特征点2 + * @param P1 特征点1的投影矩阵 [K*R | K*t] + * @param P2 特征点2的投影矩阵 + * @param x3D 恢复的三维点坐标 + */ void TwoViewReconstruction::Triangulate(const cv::KeyPoint &kp1, const cv::KeyPoint &kp2, const cv::Mat &P1, const cv::Mat &P2, cv::Mat &x3D) { - cv::Mat A(4,4,CV_32F); + cv::Mat A(4, 4, CV_32F); + // x = a*P*X, 左右两面乘Pc的反对称矩阵 a*[x]^ * P *X = 0 构成了A矩阵,中间涉及一个尺度a,因为都是归一化平面,但右面是0所以直接可以约掉不影响最后的尺度 + // 0 -1 v P(0) -P.row(1) + v*P.row(2) + // 1 0 -u * P(1) = P.row(0) - u*P.row(2) + // -v u 0 P(2) u*P.row(1) - v*P.row(0) + // 发现上述矩阵线性相关,所以取前两维,两个点构成了4行的矩阵,就是如下的操作,求出的是4维的结果[X,Y,Z,A],所以需要除以最后一维使之为1,就成了[X,Y,Z,1]这种齐次形式 + A.row(0) = kp1.pt.x * P1.row(2) - P1.row(0); + A.row(1) = kp1.pt.y * P1.row(2) - P1.row(1); + A.row(2) = kp2.pt.x * P2.row(2) - P2.row(0); + A.row(3) = kp2.pt.y * P2.row(2) - P2.row(1); - A.row(0) = kp1.pt.x*P1.row(2)-P1.row(0); - A.row(1) = kp1.pt.y*P1.row(2)-P1.row(1); - A.row(2) = kp2.pt.x*P2.row(2)-P2.row(0); - A.row(3) = kp2.pt.y*P2.row(2)-P2.row(1); - - cv::Mat u,w,vt; - cv::SVD::compute(A,w,u,vt,cv::SVD::MODIFY_A| cv::SVD::FULL_UV); + cv::Mat u, w, vt; + cv::SVD::compute(A, w, u, vt, cv::SVD::MODIFY_A | cv::SVD::FULL_UV); x3D = vt.row(3).t(); - x3D = x3D.rowRange(0,3)/x3D.at(3); + x3D = x3D.rowRange(0, 3) / x3D.at(3); } +/** + * @brief 像素坐标标准化,计算点集的横纵均值,与均值偏差的均值。最后返回的是变化矩阵T 直接乘以像素坐标的齐次向量即可获得去中心去均值后的特征点坐标 + * @param vKeys 特征点 + * @param vNormalizedPoints 去中心去均值后的特征点坐标 + * @param T 变化矩阵 + */ void TwoViewReconstruction::Normalize(const vector &vKeys, vector &vNormalizedPoints, cv::Mat &T) { float meanX = 0; @@ -758,19 +978,19 @@ void TwoViewReconstruction::Normalize(const vector &vKeys, vector< vNormalizedPoints.resize(N); - for(int i=0; i &vKeys, vector< meanDevX += fabs(vNormalizedPoints[i].x); meanDevY += fabs(vNormalizedPoints[i].y); } + // 2. 确定新原点后计算与新原点的距离均值 + meanDevX = meanDevX / N; + meanDevY = meanDevY / N; + // 3. 去均值化 + float sX = 1.0 / meanDevX; + float sY = 1.0 / meanDevY; - meanDevX = meanDevX/N; - meanDevY = meanDevY/N; - - float sX = 1.0/meanDevX; - float sY = 1.0/meanDevY; - - for(int i=0; i(0,0) = sX; - T.at(1,1) = sY; - T.at(0,2) = -meanX*sX; - T.at(1,2) = -meanY*sY; + // 4. 计算变化矩阵 + T = cv::Mat::eye(3, 3, CV_32F); + T.at(0, 0) = sX; + T.at(1, 1) = sY; + T.at(0, 2) = -meanX * sX; + T.at(1, 2) = -meanY * sY; } - +/** + * @brief 进行cheirality check,从而进一步找出F分解后最合适的解 + * @param R 旋转 + * @param t 平移 + * @param vKeys1 特征点 + * @param vKeys2 特征点 + * @param vMatches12 匹配关系 + * @param vbMatchesInliers 匹配关系是否有效 + * @param K 内参 + * @param vP3D 三维点 + * @param th2 误差半径 + * @param vbGood 大小与mvKeys1一致,表示哪个点被重建了 + * @param parallax + */ int TwoViewReconstruction::CheckRT(const cv::Mat &R, const cv::Mat &t, const vector &vKeys1, const vector &vKeys2, - const vector &vMatches12, vector &vbMatchesInliers, - const cv::Mat &K, vector &vP3D, float th2, vector &vbGood, float ¶llax) + const vector &vMatches12, vector &vbMatchesInliers, + const cv::Mat &K, vector &vP3D, float th2, vector &vbGood, float ¶llax) { // Calibration parameters - const float fx = K.at(0,0); - const float fy = K.at(1,1); - const float cx = K.at(0,2); - const float cy = K.at(1,2); + const float fx = K.at(0, 0); + const float fy = K.at(1, 1); + const float cx = K.at(0, 2); + const float cy = K.at(1, 2); - vbGood = vector(vKeys1.size(),false); + vbGood = vector(vKeys1.size(), false); vP3D.resize(vKeys1.size()); vector vCosParallax; vCosParallax.reserve(vKeys1.size()); // Camera 1 Projection Matrix K[I|0] - cv::Mat P1(3,4,CV_32F,cv::Scalar(0)); - K.copyTo(P1.rowRange(0,3).colRange(0,3)); + // 步骤1:得到一个相机的投影矩阵 + // 以第一个相机的光心作为世界坐标系 + cv::Mat P1(3, 4, CV_32F, cv::Scalar(0)); + K.copyTo(P1.rowRange(0, 3).colRange(0, 3)); - cv::Mat O1 = cv::Mat::zeros(3,1,CV_32F); + cv::Mat O1 = cv::Mat::zeros(3, 1, CV_32F); // Camera 2 Projection Matrix K[R|t] - cv::Mat P2(3,4,CV_32F); - R.copyTo(P2.rowRange(0,3).colRange(0,3)); - t.copyTo(P2.rowRange(0,3).col(3)); - P2 = K*P2; + // 步骤2:得到第二个相机的投影矩阵 + cv::Mat P2(3, 4, CV_32F); + R.copyTo(P2.rowRange(0, 3).colRange(0, 3)); + t.copyTo(P2.rowRange(0, 3).col(3)); + P2 = K * P2; - cv::Mat O2 = -R.t()*t; + // 第二个相机的光心在世界坐标系下的坐标 + cv::Mat O2 = -R.t() * t; - int nGood=0; + int nGood = 0; - for(size_t i=0, iend=vMatches12.size();i(0)) || !isfinite(p3dC1.at(1)) || !isfinite(p3dC1.at(2))) + if (!isfinite(p3dC1.at(0)) || !isfinite(p3dC1.at(1)) || !isfinite(p3dC1.at(2))) { - vbGood[vMatches12[i].first]=false; + vbGood[vMatches12[i].first] = false; continue; } // Check parallax + // 步骤4:计算视差角余弦值 cv::Mat normal1 = p3dC1 - O1; float dist1 = cv::norm(normal1); cv::Mat normal2 = p3dC1 - O2; float dist2 = cv::norm(normal2); - float cosParallax = normal1.dot(normal2)/(dist1*dist2); + float cosParallax = normal1.dot(normal2) / (dist1 * dist2); + // 步骤5:判断3D点是否在两个摄像头前方 // Check depth in front of first camera (only if enough parallax, as "infinite" points can easily go to negative depth) - if(p3dC1.at(2)<=0 && cosParallax<0.99998) + // 步骤5.1:3D点深度为负,在第一个摄像头后方,淘汰 + if (p3dC1.at(2) <= 0 && cosParallax < 0.99998) continue; // Check depth in front of second camera (only if enough parallax, as "infinite" points can easily go to negative depth) - cv::Mat p3dC2 = R*p3dC1+t; + // 步骤5.2:3D点深度为负,在第二个摄像头后方,淘汰 + cv::Mat p3dC2 = R * p3dC1 + t; - if(p3dC2.at(2)<=0 && cosParallax<0.99998) + if (p3dC2.at(2) <= 0 && cosParallax < 0.99998) continue; + // 步骤6:计算重投影误差 // Check reprojection error in first image + // 计算3D点在第一个图像上的投影误差 float im1x, im1y; - float invZ1 = 1.0/p3dC1.at(2); - im1x = fx*p3dC1.at(0)*invZ1+cx; - im1y = fy*p3dC1.at(1)*invZ1+cy; + float invZ1 = 1.0 / p3dC1.at(2); + im1x = fx * p3dC1.at(0) * invZ1 + cx; + im1y = fy * p3dC1.at(1) * invZ1 + cy; - float squareError1 = (im1x-kp1.pt.x)*(im1x-kp1.pt.x)+(im1y-kp1.pt.y)*(im1y-kp1.pt.y); + float squareError1 = (im1x - kp1.pt.x) * (im1x - kp1.pt.x) + (im1y - kp1.pt.y) * (im1y - kp1.pt.y); - if(squareError1>th2) + // 步骤6.1:重投影误差太大,跳过淘汰 + // 一般视差角比较小时重投影误差比较大 + if (squareError1 > th2) continue; // Check reprojection error in second image + // 计算3D点在第二个图像上的投影误差 float im2x, im2y; - float invZ2 = 1.0/p3dC2.at(2); - im2x = fx*p3dC2.at(0)*invZ2+cx; - im2y = fy*p3dC2.at(1)*invZ2+cy; + float invZ2 = 1.0 / p3dC2.at(2); + im2x = fx * p3dC2.at(0) * invZ2 + cx; + im2y = fy * p3dC2.at(1) * invZ2 + cy; - float squareError2 = (im2x-kp2.pt.x)*(im2x-kp2.pt.x)+(im2y-kp2.pt.y)*(im2y-kp2.pt.y); + float squareError2 = (im2x - kp2.pt.x) * (im2x - kp2.pt.x) + (im2y - kp2.pt.y) * (im2y - kp2.pt.y); - if(squareError2>th2) + // 步骤6.2:重投影误差太大,跳过淘汰 + // 一般视差角比较小时重投影误差比较大 + if (squareError2 > th2) continue; + // 步骤7:统计经过检验的3D点个数,记录3D点视差角 vCosParallax.push_back(cosParallax); - vP3D[vMatches12[i].first] = cv::Point3f(p3dC1.at(0),p3dC1.at(1),p3dC1.at(2)); + vP3D[vMatches12[i].first] = cv::Point3f(p3dC1.at(0), p3dC1.at(1), p3dC1.at(2)); nGood++; - if(cosParallax<0.99998) - vbGood[vMatches12[i].first]=true; + if (cosParallax < 0.99998) + vbGood[vMatches12[i].first] = true; } - - if(nGood>0) + // 步骤8:得到3D点中较大的视差角 + if (nGood > 0) { - sort(vCosParallax.begin(),vCosParallax.end()); + // 从小到大排序 + sort(vCosParallax.begin(), vCosParallax.end()); - size_t idx = min(50,int(vCosParallax.size()-1)); - parallax = acos(vCosParallax[idx])*180/CV_PI; + // trick! 排序后并没有取最大的视差角 + // 取一个较大的视差角 + size_t idx = min(50, int(vCosParallax.size() - 1)); + parallax = acos(vCosParallax[idx]) * 180 / CV_PI; } else - parallax=0; + parallax = 0; return nGood; } +/** + * @brief 分解Essential矩阵 + * 解释的比较好的博客:https://blog.csdn.net/weixin_44580210/article/details/90344511 + * F矩阵通过结合内参可以得到Essential矩阵,分解E矩阵将得到4组解 + * 这4组解分别为[R1,t],[R1,-t],[R2,t],[R2,-t] + * ## 反对称矩阵性质 + * 多视图几何上定义:一个3×3的矩阵是本质矩阵的充要条件是它的奇异值中有两个相等而第三个是0,为什么呢? + * 首先我们知道 E=[t]×​R=SR其中S为反对称矩阵,反对称矩阵有什么性质呢? + * 结论1:如果 S 是实的反对称矩阵,那么S=UBU^T,其中 B 为形如diag(a1​Z,a2​Z...am​Z,0,0...0)的分块对角阵,其中 Z = [0, 1; -1, 0] + * 反对称矩阵的特征矢量都是纯虚数并且奇数阶的反对称矩阵必是奇异的 + * 那么根据这个结论我们可以将 S 矩阵写成 S=kUZU^⊤,而 Z 为 + * | 0, 1, 0 | + * |-1, 0, 0 | + * | 0, 0, 0 | + * Z = diag(1, 1, 0) * W W 为 + * | 0,-1, 0 | + * | 1, 0, 0 | + * | 0, 0, 1 | + * E=SR=Udiag(1,1,0)(WU^⊤R) 这样就证明了 E 拥有两个相等的奇异值 + * + * ## 恢复相机矩阵 + * 假定第一个摄像机矩阵是P=[I∣0],为了计算第二个摄像机矩阵P′,必须把 E 矩阵分解为反对成举着和旋转矩阵的乘积 SR。 + * 还是根据上面的结论1,我们在相差一个常数因子的前提下有 S=UZU^T,我们假设旋转矩阵分解为UXV^T,注意这里是假设旋转矩阵分解形式为UXV^T,并不是旋转矩阵的svd分解, + * 其中 UV都是E矩阵分解出的 + * Udiag(1,1,0)V^T = E = SR = (UZU^T)(UXV^⊤) = U(ZX)V^T + * 则有 ZX = diag(1,1,0),因此 x=W或者 X=W^T + * 结论:如果 E 的SVD分解为 Udiag(1,1,0)V^⊤,E = SR有两种分解形式,分别是: S = UZU^⊤ R = UWVTor UW^TV^⊤ + + * 接着分析,又因为St=0(自己和自己叉乘肯定为0嘛)以及∥t∥=1(对两个摄像机矩阵的基线的一种常用归一化),因此 t = U(0,0,1)^T = u3​, + * 即矩阵 U 的最后一列,这样的好处是不用再去求S了,应为t的符号不确定,R矩阵有两种可能,因此其分解有如下四种情况: + * P′=[UWV^T ∣ +u3​] or [UWV^T ∣ −u3​] or [UW^TV^T ∣ +u3​] or [UW^TV^T ∣ −u3​] + * @param E Essential Matrix + * @param R1 Rotation Matrix 1 + * @param R2 Rotation Matrix 2 + * @param t Translation + * @see Multiple View Geometry in Computer Vision - Result 9.19 p259 + */ void TwoViewReconstruction::DecomposeE(const cv::Mat &E, cv::Mat &R1, cv::Mat &R2, cv::Mat &t) { - cv::Mat u,w,vt; - cv::SVD::compute(E,w,u,vt); + cv::Mat u, w, vt; + cv::SVD::compute(E, w, u, vt); + // 对 t 有归一化,但是这个地方并没有决定单目整个SLAM过程的尺度 + // 因为CreateInitialMapMonocular函数对3D点深度会缩放,然后反过来对 t 有改变 u.col(2).copyTo(t); - t=t/cv::norm(t); + t = t / cv::norm(t); - cv::Mat W(3,3,CV_32F,cv::Scalar(0)); - W.at(0,1)=-1; - W.at(1,0)=1; - W.at(2,2)=1; + cv::Mat W(3, 3, CV_32F, cv::Scalar(0)); + W.at(0, 1) = -1; + W.at(1, 0) = 1; + W.at(2, 2) = 1; - R1 = u*W*vt; - if(cv::determinant(R1)<0) - R1=-R1; + R1 = u * W * vt; + if (cv::determinant(R1) < 0) // 旋转矩阵有行列式为1的约束 + R1 = -R1; - R2 = u*W.t()*vt; - if(cv::determinant(R2)<0) - R2=-R2; + R2 = u * W.t() * vt; + if (cv::determinant(R2) < 0) + R2 = -R2; } -} //namespace ORB_SLAM +} // namespace ORB_SLAM3