From 3fd2fcccb741a3acb084c3573417c1e46be394e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B0=8F=E7=99=BD=E9=80=86=E8=A2=AD?= <741299292@qq.com> Date: Sun, 31 Oct 2021 16:41:01 +0800 Subject: [PATCH] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E6=B3=A8=E9=87=8A?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- include/CameraModels/GeometricCamera.h | 15 ++++++++--- include/CameraModels/KannalaBrandt8.h | 14 ++++++++-- include/CameraModels/Pinhole.h | 9 ++++++- src/CameraModels/KannalaBrandt8.cpp | 36 ++++++++++++++++++++----- src/CameraModels/Pinhole.cpp | 37 +++++++++++++++++--------- src/Frame.cc | 3 ++- src/ORBmatcher.cc | 2 +- src/TwoViewReconstruction.cc | 7 ++--- 8 files changed, 92 insertions(+), 31 deletions(-) diff --git a/include/CameraModels/GeometricCamera.h b/include/CameraModels/GeometricCamera.h index 49fff32..6f27b36 100644 --- a/include/CameraModels/GeometricCamera.h +++ b/include/CameraModels/GeometricCamera.h @@ -41,35 +41,44 @@ namespace ORB_SLAM3 { GeometricCamera(const std::vector &_vParameters) : mvParameters(_vParameters) {} ~GeometricCamera() {} + // 投影函数 virtual cv::Point2f project(const cv::Point3f &p3D) = 0; virtual cv::Point2f project(const cv::Matx31f &m3D) = 0; virtual cv::Point2f project(const cv::Mat& m3D) = 0; virtual Eigen::Vector2d project(const Eigen::Vector3d & v3D) = 0; virtual cv::Mat projectMat(const cv::Point3f& p3D) = 0; + // 返回像素点的不确定性,返回的都是1.0f virtual float uncertainty2(const Eigen::Matrix &p2D) = 0; + // 反投影 virtual cv::Point3f unproject(const cv::Point2f &p2D) = 0; virtual cv::Mat unprojectMat(const cv::Point2f &p2D) = 0; virtual cv::Matx31f unprojectMat_(const cv::Point2f &p2D) = 0; + // 投影雅可比 virtual cv::Mat projectJac(const cv::Point3f &p3D) = 0; virtual Eigen::Matrix projectJac(const Eigen::Vector3d& v3D) = 0; + // 反投影雅可比(未使用) virtual cv::Mat unprojectJac(const cv::Point2f &p2D) = 0; + // 三角化恢复三维点,主要在单目初始化中使用 virtual bool ReconstructWithTwoViews(const std::vector& vKeys1, const std::vector& vKeys2, const std::vector &vMatches12, cv::Mat &R21, cv::Mat &t21, std::vector &vP3D, std::vector &vbTriangulated) = 0; - + // 返回K矩阵 virtual cv::Mat toK() = 0; virtual cv::Matx33f toK_() = 0; + // 极线约束,三角化MP时使用 virtual bool epipolarConstrain(GeometricCamera* otherCamera, const cv::KeyPoint& kp1, const cv::KeyPoint& kp2, const cv::Mat& R12, const cv::Mat& t12, const float sigmaLevel, const float unc) = 0; virtual bool epipolarConstrain_(GeometricCamera* otherCamera, const cv::KeyPoint& kp1, const cv::KeyPoint& kp2, const cv::Matx33f& R12, const cv::Matx31f& t12, const float sigmaLevel, const float unc) = 0; + // 获取与设置相机内参数 float getParameter(const int i){return mvParameters[i];} void setParameter(const float p, const size_t i){mvParameters[i] = p;} + // 参数的size size_t size(){return mvParameters.size();} virtual bool matchAndtriangulate(const cv::KeyPoint& kp1, const cv::KeyPoint& kp2, GeometricCamera* pOther, @@ -77,8 +86,8 @@ namespace ORB_SLAM3 { const float sigmaLevel1, const float sigmaLevel2, cv::Mat& x3Dtriangulated) = 0; + // 关于ID与type的获取函数,但是代码中没有使用 unsigned int GetId() { return mnId; } - unsigned int GetType() { return mnType; } const unsigned int CAM_PINHOLE = 0; @@ -87,7 +96,7 @@ namespace ORB_SLAM3 { static long unsigned int nNextId; protected: - std::vector mvParameters; + std::vector mvParameters; // 存放参数,包括k的参数与畸变 unsigned int mnId; diff --git a/include/CameraModels/KannalaBrandt8.h b/include/CameraModels/KannalaBrandt8.h index a5ef535..b5534dd 100644 --- a/include/CameraModels/KannalaBrandt8.h +++ b/include/CameraModels/KannalaBrandt8.h @@ -59,38 +59,47 @@ namespace ORB_SLAM3 { mnType = CAM_FISHEYE; } + // 投影函数 cv::Point2f project(const cv::Point3f &p3D); cv::Point2f project(const cv::Matx31f &m3D); cv::Point2f project(const cv::Mat& m3D); Eigen::Vector2d project(const Eigen::Vector3d & v3D); cv::Mat projectMat(const cv::Point3f& p3D); + // 返回像素点的不确定性,返回的都是1.0f float uncertainty2(const Eigen::Matrix &p2D); + // 反投影 cv::Point3f unproject(const cv::Point2f &p2D); cv::Mat unprojectMat(const cv::Point2f &p2D); cv::Matx31f unprojectMat_(const cv::Point2f &p2D); + // 投影雅可比 cv::Mat projectJac(const cv::Point3f &p3D); Eigen::Matrix projectJac(const Eigen::Vector3d& v3D); + // 反投影雅可比(未使用) cv::Mat unprojectJac(const cv::Point2f &p2D); + // 三角化恢复三维点 bool ReconstructWithTwoViews(const std::vector& vKeys1, const std::vector& vKeys2, const std::vector &vMatches12, cv::Mat &R21, cv::Mat &t21, std::vector &vP3D, std::vector &vbTriangulated); + // 返回K矩阵 cv::Mat toK(); cv::Matx33f toK_(); + // 极线约束 bool epipolarConstrain(GeometricCamera* pCamera2, const cv::KeyPoint& kp1, const cv::KeyPoint& kp2, const cv::Mat& R12, const cv::Mat& t12, const float sigmaLevel, const float unc); bool epipolarConstrain_(GeometricCamera* pCamera2, const cv::KeyPoint& kp1, const cv::KeyPoint& kp2, const cv::Matx33f& R12, const cv::Matx31f& t12, const float sigmaLevel, const float unc); - + // 三角化匹配 float TriangulateMatches(GeometricCamera* pCamera2, const cv::KeyPoint& kp1, const cv::KeyPoint& kp2, const cv::Mat& R12, const cv::Mat& t12, const float sigmaLevel, const float unc, cv::Mat& p3D); float TriangulateMatches_(GeometricCamera* pCamera2, const cv::KeyPoint& kp1, const cv::KeyPoint& kp2, const cv::Matx33f& R12, const cv::Matx31f& t12, const float sigmaLevel, const float unc, cv::Matx31f& p3D); std::vector mvLappingArea; + // 匹配三角化 bool matchAndtriangulate(const cv::KeyPoint& kp1, const cv::KeyPoint& kp2, GeometricCamera* pOther, cv::Mat& Tcw1, cv::Mat& Tcw2, const float sigmaLevel1, const float sigmaLevel2, @@ -99,13 +108,14 @@ namespace ORB_SLAM3 { friend std::ostream& operator<<(std::ostream& os, const KannalaBrandt8& kb); friend std::istream& operator>>(std::istream& is, KannalaBrandt8& kb); private: - const float precision; + const float precision; // 迭代使用 //Parameters vector corresponds to //[fx, fy, cx, cy, k0, k1, k2, k3] TwoViewReconstruction* tvr; + // 三角化 void Triangulate(const cv::Point2f &p1, const cv::Point2f &p2, const cv::Mat &Tcw1, const cv::Mat &Tcw2,cv::Mat &x3D); void Triangulate_(const cv::Point2f &p1, const cv::Point2f &p2, const cv::Matx44f &Tcw1, const cv::Matx44f &Tcw2,cv::Matx31f &x3D); }; diff --git a/include/CameraModels/Pinhole.h b/include/CameraModels/Pinhole.h index 01b703e..6ccbc36 100644 --- a/include/CameraModels/Pinhole.h +++ b/include/CameraModels/Pinhole.h @@ -58,30 +58,37 @@ namespace ORB_SLAM3 { if(tvr) delete tvr; } + // 投影函数 cv::Point2f project(const cv::Point3f &p3D); cv::Point2f project(const cv::Matx31f &m3D); cv::Point2f project(const cv::Mat &m3D); Eigen::Vector2d project(const Eigen::Vector3d & v3D); cv::Mat projectMat(const cv::Point3f& p3D); + // 返回像素点的不确定性,返回的都是1.0f float uncertainty2(const Eigen::Matrix &p2D); + // 反投影 cv::Point3f unproject(const cv::Point2f &p2D); cv::Mat unprojectMat(const cv::Point2f &p2D); cv::Matx31f unprojectMat_(const cv::Point2f &p2D); - + // 投影雅可比 cv::Mat projectJac(const cv::Point3f &p3D); Eigen::Matrix projectJac(const Eigen::Vector3d& v3D); + // 反投影雅可比(未使用) cv::Mat unprojectJac(const cv::Point2f &p2D); + // 三角化恢复三维点 bool ReconstructWithTwoViews(const std::vector& vKeys1, const std::vector& vKeys2, const std::vector &vMatches12, cv::Mat &R21, cv::Mat &t21, std::vector &vP3D, std::vector &vbTriangulated); + // 返回K矩阵 cv::Mat toK(); cv::Matx33f toK_(); + // 极线约束 bool epipolarConstrain(GeometricCamera* pCamera2, const cv::KeyPoint& kp1, const cv::KeyPoint& kp2, const cv::Mat& R12, const cv::Mat& t12, const float sigmaLevel, const float unc); bool epipolarConstrain_(GeometricCamera* pCamera2, const cv::KeyPoint& kp1, const cv::KeyPoint& kp2, const cv::Matx33f& R12, const cv::Matx31f& t12, const float sigmaLevel, const float unc); diff --git a/src/CameraModels/KannalaBrandt8.cpp b/src/CameraModels/KannalaBrandt8.cpp index 662a21b..b454029 100644 --- a/src/CameraModels/KannalaBrandt8.cpp +++ b/src/CameraModels/KannalaBrandt8.cpp @@ -141,8 +141,18 @@ cv::Matx31f KannalaBrandt8::unprojectMat_(const cv::Point2f &p2D) /** * @brief 反投影 + * 投影过程 + * xc​ = Xc/Zc, yc = Yc/Zc + * r^2 = xc^2 + yc^2 + * θ = arctan(r) + * θd = k0*θ + k1*θ^3 + k2*θ^5 + k3*θ^7 + k4*θ^9 + * xd = θd/r * xc yd = θd/r * yc + * u = fx*xd + cx v = fy*yd + cy + * + * * 已知u与v 未矫正的特征点像素坐标 * xd = (u - cx) / fx yd = (v - cy) / fy + * 待求的 xc = xd * r / θd yc = yd * r / θd * 待求的 xc = xd * tan(θ) / θd yc = yd * tan(θ) / θd * 其中 θd的算法如下: * xd^2 + yd^2 = θd^2/r^2 * (xc^2 + yc^2) @@ -172,17 +182,22 @@ cv::Point3f KannalaBrandt8::unproject(const cv::Point2f &p2D) if (theta_d > 1e-8) { //Compensate distortion iteratively - float theta = theta_d; + float theta = theta_d; // θ的初始值定为了θd // 开始迭代 for (int j = 0; j < 10; j++) { - float theta2 = theta * theta, theta4 = theta2 * theta2, theta6 = theta4 * theta2, theta8 = theta4 * theta4; - float k0_theta2 = mvParameters[4] * theta2, k1_theta4 = mvParameters[5] * theta4; - float k2_theta6 = mvParameters[6] * theta6, k3_theta8 = mvParameters[7] * theta8; + float theta2 = theta * theta, + theta4 = theta2 * theta2, + theta6 = theta4 * theta2, + theta8 = theta4 * theta4; + float k0_theta2 = mvParameters[4] * theta2, + k1_theta4 = mvParameters[5] * theta4; + float k2_theta6 = mvParameters[6] * theta6, + k3_theta8 = mvParameters[7] * theta8; float theta_fix = (theta * (1 + k0_theta2 + k1_theta4 + k2_theta6 + k3_theta8) - theta_d) / (1 + 3 * k0_theta2 + 5 * k1_theta4 + 7 * k2_theta6 + 9 * k3_theta8); theta = theta - theta_fix; - if (fabsf(theta_fix) < precision) + if (fabsf(theta_fix) < precision) // 如果更新量变得很小,表示接近最终值 break; } //scale = theta - theta_d; @@ -194,7 +209,7 @@ cv::Point3f KannalaBrandt8::unproject(const cv::Point2f &p2D) } /** - * @brief 求解三维点关于二维像素坐标的雅克比矩阵 + * @brief 求解二维像素坐标关于三维点的雅克比矩阵 * u = fx * θd * x / sqrt(x^2 + y^2) + cx * v = fy * θd * y / sqrt(x^2 + y^2) + cy * 这两个式子分别对 xyz 求导 @@ -376,23 +391,27 @@ bool KannalaBrandt8::matchAndtriangulate(const cv::KeyPoint &kp1, const cv::KeyP cv::Point3f ray1c = this->unproject(kp1.pt); cv::Point3f ray2c = pOther->unproject(kp2.pt); + // 获得点1在帧1的归一化坐标 cv::Mat r1(3, 1, CV_32F); r1.at(0) = ray1c.x; r1.at(1) = ray1c.y; r1.at(2) = ray1c.z; + // 获得点2在帧2的归一化坐标 cv::Mat r2(3, 1, CV_32F); r2.at(0) = ray2c.x; r2.at(1) = ray2c.y; r2.at(2) = ray2c.z; //Check parallax between rays + // 射线在世界坐标系下 cv::Mat ray1 = Rwc1 * r1; cv::Mat ray2 = Rwc2 * r2; const float cosParallaxRays = ray1.dot(ray2) / (cv::norm(ray1) * cv::norm(ray2)); //If parallax is lower than 0.9998, reject this match + // 夹角几乎为0时返回,因为表示这个点过远,三角化会带来大量误差 if (cosParallaxRays > 0.9998) { return false; @@ -409,10 +428,12 @@ bool KannalaBrandt8::matchAndtriangulate(const cv::KeyPoint &kp1, const cv::KeyP cv::Mat x3D; + // 三角化 Triangulate(p11, p22, Tcw1, Tcw2, x3D); cv::Mat x3Dt = x3D.t(); + // 查看点是否位于相机前面 //Check triangulation in front of cameras float z1 = Rcw1.row(2).dot(x3Dt) + tcw1.at(2); if (z1 <= 0) @@ -426,6 +447,7 @@ bool KannalaBrandt8::matchAndtriangulate(const cv::KeyPoint &kp1, const cv::KeyP return false; } + // 查看投影误差 //Check reprojection error in first keyframe // -Transform point into camera reference system cv::Mat x3D1 = Rcw1 * x3D + tcw1; @@ -480,7 +502,7 @@ float KannalaBrandt8::TriangulateMatches(GeometricCamera *pCamera2, const cv::Ke //Check parallax // 2. 查看射线夹角 // 这里有点像极线约束,但并不是,将r2通过R12旋转到与r1同方向的坐标系 - // 然后计算他们的夹角,看其是否超过1.14° 讲真,这个条件感觉有些苛刻 + // 然后计算他们的夹角,看其是否超过1.14° cv::Mat r21 = R12 * r2; const float cosParallaxRays = r1.dot(r21) / (cv::norm(r1) * cv::norm(r21)); diff --git a/src/CameraModels/Pinhole.cpp b/src/CameraModels/Pinhole.cpp index 33f24f7..2ba059d 100644 --- a/src/CameraModels/Pinhole.cpp +++ b/src/CameraModels/Pinhole.cpp @@ -26,7 +26,7 @@ namespace ORB_SLAM3 long unsigned int GeometricCamera::nNextId = 0; /** - * @brief 投影 + * @brief 相机坐标系下的三维点投影到无畸变像素平面 * @param p3D 三维点 * @return 像素坐标 */ @@ -37,7 +37,7 @@ cv::Point2f Pinhole::project(const cv::Point3f &p3D) } /** - * @brief 投影 + * @brief 相机坐标系下的三维点投影到无畸变像素平面 * @param m3D 三维点 * @return 像素坐标 */ @@ -47,7 +47,7 @@ cv::Point2f Pinhole::project(const cv::Matx31f &m3D) } /** - * @brief 投影 + * @brief 相机坐标系下的三维点投影到无畸变像素平面 * @param v3D 三维点 * @return 像素坐标 */ @@ -59,7 +59,7 @@ cv::Point2f Pinhole::project(const cv::Mat &m3D) } /** - * @brief 投影 + * @brief 相机坐标系下的三维点投影到无畸变像素平面 * @param p3D 三维点 * @return 像素坐标 */ @@ -73,7 +73,7 @@ Eigen::Vector2d Pinhole::project(const Eigen::Vector3d &v3D) } /** - * @brief 投影 + * @brief 相机坐标系下的三维点投影到无畸变像素平面 * @param p3D 三维点 * @return 像素坐标 */ @@ -94,7 +94,7 @@ float Pinhole::uncertainty2(const Eigen::Matrix &p2D) /** * @brief 反投影 * @param p2D 特征点 - * @return + * @return 归一化坐标 */ cv::Point3f Pinhole::unproject(const cv::Point2f &p2D) { @@ -105,7 +105,7 @@ cv::Point3f Pinhole::unproject(const cv::Point2f &p2D) /** * @brief 反投影 * @param p2D 特征点 - * @return + * @return 归一化坐标 */ cv::Mat Pinhole::unprojectMat(const cv::Point2f &p2D) { @@ -116,7 +116,7 @@ cv::Mat Pinhole::unprojectMat(const cv::Point2f &p2D) /** * @brief 反投影 * @param p2D 特征点 - * @return + * @return 归一化坐标 */ cv::Matx31f Pinhole::unprojectMat_(const cv::Point2f &p2D) { @@ -126,7 +126,7 @@ cv::Matx31f Pinhole::unprojectMat_(const cv::Point2f &p2D) } /** - * @brief 求解三维点关于二维像素坐标的雅克比矩阵 + * @brief 求解二维像素坐标关于三维点坐标的雅克比矩阵 * @param p3D 三维点 * @return */ @@ -144,7 +144,7 @@ cv::Mat Pinhole::projectJac(const cv::Point3f &p3D) } /** - * @brief 求解三维点关于二维像素坐标的雅克比矩阵 + * @brief 求解二维像素坐标关于三维点坐标的雅克比矩阵 * @param v3D 三维点 * @return */ @@ -162,7 +162,13 @@ Eigen::Matrix Pinhole::projectJac(const Eigen::Vector3d &v3D) } /** - * @brief 求解二维像素坐标关于三维点的雅克比矩阵 + * @brief 求解归一化坐标关于二维像素坐标的雅克比矩阵,也就是反投影雅可比 + * u = fx*x + cx x = (u - cx)/fx + * v = fy*y + cy y = (v - cy)/fy + * (x, y, 1) 对于 (u, v)求雅可比 + * 1/fx 0 + * 0 1/fy + * 0 0 * @param p2D 特征点 * @return */ @@ -179,7 +185,7 @@ cv::Mat Pinhole::unprojectJac(const cv::Point2f &p2D) return Jac; } -/** 三角化恢复三维点 +/** 三角化恢复三维点 单目初始化时使用 * @param vKeys1 第一帧的关键点 * @param vKeys2 第二帧的关键点 * @param vMatches12 匹配关系,长度与vKeys1一样,对应位置存放vKeys2中关键点的下标 @@ -231,7 +237,7 @@ cv::Matx33f Pinhole::toK_() * @param R12 2->1的旋转 * @param t12 2->1的平移 * @param sigmaLevel 特征点1的尺度的平方 - * @param unc 特征点2的尺度的平方 + * @param unc 特征点2的尺度的平方,1.2^2n * @return 三维点恢复的成功与否 */ bool Pinhole::epipolarConstrain(GeometricCamera *pCamera2, const cv::KeyPoint &kp1, const cv::KeyPoint &kp2, const cv::Mat &R12, const cv::Mat &t12, const float sigmaLevel, const float unc) @@ -243,10 +249,15 @@ bool Pinhole::epipolarConstrain(GeometricCamera *pCamera2, const cv::KeyPoint &k cv::Mat F12 = K1.t().inv() * t12x * R12 * K2.inv(); // Epipolar line in second image l = x1'F12 = [a b c] + // u2, + // (u1, v1, 1) * F12 * (v2,) = 0 --> (a, b, c) * (u2, v2, 1)^t = 0 --> a*u2 + b*v2 + c = 0 + // 1 const float a = kp1.pt.x * F12.at(0, 0) + kp1.pt.y * F12.at(1, 0) + F12.at(2, 0); const float b = kp1.pt.x * F12.at(0, 1) + kp1.pt.y * F12.at(1, 1) + F12.at(2, 1); const float c = kp1.pt.x * F12.at(0, 2) + kp1.pt.y * F12.at(1, 2) + F12.at(2, 2); + // 点到直线距离的公式 + // d = |a*u2 + b*v2 + c| / sqrt(a^2 + b^2) const float num = a * kp2.pt.x + b * kp2.pt.y + c; const float den = a * a + b * b; diff --git a/src/Frame.cc b/src/Frame.cc index 93a0002..2d85bd3 100644 --- a/src/Frame.cc +++ b/src/Frame.cc @@ -946,7 +946,8 @@ void Frame::UndistortKeyPoints() // Undistort points // 函数reshape(int cn,int rows=0) 其中cn为更改后的通道数,rows=0表示这个行将保持原来的参数不变 - //为了能够直接调用opencv的函数来去畸变,需要先将矩阵调整为2通道(对应坐标x,y) + //为了能够直接调用opencv的函数来去畸变,需要先将矩阵调整为2通道(对应坐标x,y) + // cv::undistortPoints最后一个矩阵为空矩阵时,得到的点为归一化坐标点 mat=mat.reshape(2); cv::undistortPoints(mat,mat, static_cast(mpCamera)->toK(),mDistCoef,cv::Mat(),mK); //调整回只有一个通道,回归我们正常的处理方式 diff --git a/src/ORBmatcher.cc b/src/ORBmatcher.cc index 86f2053..1a04d7c 100644 --- a/src/ORBmatcher.cc +++ b/src/ORBmatcher.cc @@ -1628,7 +1628,7 @@ int ORBmatcher::SearchForTriangulation(KeyFrame *pKF1, KeyFrame *pKF2, cv::Mat F return nmatches; } - + // 没看到在哪里用到 int ORBmatcher::SearchForTriangulation(KeyFrame *pKF1, KeyFrame *pKF2, cv::Mat F12, vector > &vMatchedPairs, const bool bOnlyStereo, vector &vMatchedPoints) { diff --git a/src/TwoViewReconstruction.cc b/src/TwoViewReconstruction.cc index 240fde9..6563e58 100644 --- a/src/TwoViewReconstruction.cc +++ b/src/TwoViewReconstruction.cc @@ -57,9 +57,10 @@ bool TwoViewReconstruction::Reconstruct(const std::vector &vKeys1, // Fill structures with current keypoints and matches with reference frame // Reference Frame: 1, Current Frame: 2 + // 填写mvMatches12,里面存放的是vKeys1,vKeys2匹配点的索引, mvMatches12.clear(); // 存放匹配点的id mvMatches12.reserve(mvKeys2.size()); - mvbMatched1.resize(mvKeys1.size()); + mvbMatched1.resize(mvKeys1.size()); // 长度与vKeys1,表示对应位置的vKeys1中的点是否有匹配关系 for (size_t i = 0, iend = vMatches12.size(); i < iend; i++) { if (vMatches12[i] >= 0) @@ -78,7 +79,7 @@ bool TwoViewReconstruction::Reconstruct(const std::vector &vKeys1, vAllIndices.reserve(N); vector vAvailableIndices; - // 使用vAllIndices为了保证选不到同一个点 + // 使用vAllIndices为了保证8个点选不到同一个点 for (int i = 0; i < N; i++) { vAllIndices.push_back(i); @@ -102,7 +103,7 @@ bool TwoViewReconstruction::Reconstruct(const std::vector &vKeys1, mvSets[it][j] = idx; - // 保证选不到同一个点 + // 保证选不到同一个点,这么做的话可以删去vAvailableIndices已选点的索引 vAvailableIndices[randi] = vAvailableIndices.back(); vAvailableIndices.pop_back(); }