/* ---------------------------------------------------------------------------- * GTSAM Copyright 2010, Georgia Tech Research Corporation, * Atlanta, Georgia 30332-0415 * All Rights Reserved * Authors: Frank Dellaert, et al. (see THANKS for the full author list) * See LICENSE for the license information * -------------------------------------------------------------------------- */ /** * @file SmartProjectionPoseFactorRollingShutter.h * @brief Smart projection factor on poses modeling rolling shutter effect with given readout time * @author Luca Carlone */ #pragma once #include #include #include #include namespace gtsam { /** * * @addtogroup SLAM * * If you are using the factor, please cite: * L. Carlone, Z. Kira, C. Beall, V. Indelman, F. Dellaert, * Eliminating conditionally independent sets in factor graphs: * a unifying perspective based on smart factors, * Int. Conf. on Robotics and Automation (ICRA), 2014. */ /** * This factor optimizes the pose of the body assuming a rolling shutter model of the camera with given readout time. * This factor requires that values contain (for each pixel observation) consecutive camera poses * from which the pixel observation pose can be interpolated. * @addtogroup SLAM */ template class SmartProjectionPoseFactorRollingShutter : public SmartProjectionFactor< PinholePose > { protected: /// shared pointer to calibration object (one for each observation) std::vector > K_all_; /// The keys of the pose of the body (with respect to an external world frame): two consecutive poses for each observation std::vector> world_P_body_key_pairs_; /// interpolation factor (one for each observation) to interpolate between pair of consecutive poses std::vector gammas_; /// Pose of the camera in the body frame std::vector body_P_sensors_; ///< Pose of the camera in the body frame public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW /// shorthand for base class type typedef SmartProjectionFactor > Base; /// shorthand for this class typedef SmartProjectionPoseFactorRollingShutter This; /// shorthand for a smart pointer to a factor typedef boost::shared_ptr shared_ptr; static const int DimBlock = 12; ///< size of the variable stacking 2 poses from which the observation pose is interpolated static const int DimPose = 6; ///< Pose3 dimension static const int ZDim = 2; ///< Measurement dimension (Point2) typedef Eigen::Matrix MatrixZD; // F blocks (derivatives wrt camera) typedef std::vector > FBlocks; // vector of F blocks /** * Constructor * @param Isotropic measurement noise * @param params internal parameters of the smart factors */ SmartProjectionPoseFactorRollingShutter( const SharedNoiseModel& sharedNoiseModel, const SmartProjectionParams& params = SmartProjectionParams()) : Base(sharedNoiseModel, params) { } /** Virtual destructor */ ~SmartProjectionPoseFactorRollingShutter() override = default; /** * add a new measurement, with 2 pose keys, camera calibration, and observed pixel. * @param measured is the 2-dimensional location of the projection of a * single landmark in the a single view (the measurement), interpolated from the 2 poses * @param world_P_body_key1 is the key corresponding to the first body poses (time <= time pixel is acquired) * @param world_P_body_key2 is the key corresponding to the second body poses (time >= time pixel is acquired) * @param gamma in [0,1] is the interpolation factor, such that if gamma = 0 the interpolated pose is the same as world_P_body_key * @param K is the (fixed) camera intrinsic calibration */ void add(const Point2& measured, const Key& world_P_body_key1, const Key& world_P_body_key2, const double& gamma, const boost::shared_ptr& K, const Pose3 body_P_sensor = Pose3::identity()) { // store measurements in base class (note: we manyally add keys below to make sure they are unique this->measured_.push_back(measured); // but we also store the extrinsic calibration keys in the same order world_P_body_key_pairs_.push_back( std::make_pair(world_P_body_key1, world_P_body_key2)); // pose keys are assumed to be unique, so we avoid duplicates here if (std::find(this->keys_.begin(), this->keys_.end(), world_P_body_key1) == this->keys_.end()) this->keys_.push_back(world_P_body_key1); // add only unique keys if (std::find(this->keys_.begin(), this->keys_.end(), world_P_body_key2) == this->keys_.end()) this->keys_.push_back(world_P_body_key2); // add only unique keys // store interpolation factors gammas_.push_back(gamma); // store fixed calibration K_all_.push_back(K); // store extrinsics of the camera body_P_sensors_.push_back(body_P_sensor); } /** * Variant of the previous one in which we include a set of measurements * @param measurements vector of the 2m dimensional location of the projection * of a single landmark in the m views (the measurements) * @param world_P_body_key_pairs vector of (1 for each view) containing the pair of poses from which each view can be interpolated * @param Ks vector of intrinsic calibration objects */ void add(const Point2Vector& measurements, const std::vector>& world_P_body_key_pairs, const std::vector& gammas, const std::vector>& Ks, const std::vector body_P_sensors) { assert(world_P_body_key_pairs.size() == measurements.size()); assert(world_P_body_key_pairs.size() == gammas.size()); assert(world_P_body_key_pairs.size() == Ks.size()); for (size_t i = 0; i < measurements.size(); i++) { add(measurements[i], world_P_body_key_pairs[i].first, world_P_body_key_pairs[i].second, gammas[i], Ks[i], body_P_sensors[i]); } } /** * Variant of the previous one in which we include a set of measurements with * the same (intrinsic and extrinsic) calibration * @param measurements vector of the 2m dimensional location of the projection * of a single landmark in the m views (the measurements) * @param world_P_body_key_pairs vector of (1 for each view) containing the pair of poses from which each view can be interpolated * @param K the (known) camera calibration (same for all measurements) */ void add(const Point2Vector& measurements, const std::vector>& world_P_body_key_pairs, const std::vector& gammas, const boost::shared_ptr& K, const Pose3 body_P_sensor = Pose3::identity()) { assert(world_P_body_key_pairs.size() == measurements.size()); assert(world_P_body_key_pairs.size() == gammas.size()); for (size_t i = 0; i < measurements.size(); i++) { add(measurements[i], world_P_body_key_pairs[i].first, world_P_body_key_pairs[i].second, gammas[i], K, body_P_sensor); } } /// return the calibration object inline std::vector> calibration() const { return K_all_; } /// return (for each observation) the key of the pair of poses from which we interpolate const std::vector> world_P_body_key_pairs() const { return world_P_body_key_pairs_; } /// return the interpolation factors gammas const std::vector getGammas() const { return gammas_; } /// return the extrinsic camera calibration body_P_sensors const std::vector body_P_sensors() const { return body_P_sensors_; } /** * print * @param s optional string naming the factor * @param keyFormatter optional formatter useful for printing Symbols */ void print(const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { std::cout << s << "SmartProjectionPoseFactorRollingShutter: \n "; for (size_t i = 0; i < K_all_.size(); i++) { std::cout << "-- Measurement nr " << i << std::endl; std::cout << " pose1 key: " << keyFormatter(world_P_body_key_pairs_[i].first) << std::endl; std::cout << " pose2 key: " << keyFormatter(world_P_body_key_pairs_[i].second) << std::endl; std::cout << " gamma: " << gammas_[i] << std::endl; body_P_sensors_[i].print("extrinsic calibration:\n"); K_all_[i]->print("intrinsic calibration = "); } Base::print("", keyFormatter); } /// equals bool equals(const NonlinearFactor& p, double tol = 1e-9) const override { const SmartProjectionPoseFactorRollingShutter* e = dynamic_cast*>(&p); double keyPairsEqual = true; if(this->world_P_body_key_pairs_.size() == e->world_P_body_key_pairs().size()){ for(size_t k=0; k< this->world_P_body_key_pairs_.size(); k++){ const Key key1own = world_P_body_key_pairs_[k].first; const Key key1e = e->world_P_body_key_pairs()[k].first; const Key key2own = world_P_body_key_pairs_[k].second; const Key key2e = e->world_P_body_key_pairs()[k].second; if ( !(key1own == key1e) || !(key2own == key2e) ){ keyPairsEqual = false; break; } } }else{ keyPairsEqual = false; } double extrinsicCalibrationEqual = true; if(this->body_P_sensors_.size() == e->body_P_sensors().size()){ for(size_t i=0; i< this->body_P_sensors_.size(); i++){ if (!body_P_sensors_[i].equals(e->body_P_sensors()[i])){ extrinsicCalibrationEqual = false; break; } } }else{ extrinsicCalibrationEqual = false; } return e && Base::equals(p, tol) && K_all_ == e->calibration() && gammas_ == e->getGammas() && keyPairsEqual && extrinsicCalibrationEqual; } /** * Compute jacobian F, E and error vector at a given linearization point * @param values Values structure which must contain camera poses * corresponding to keys involved in this factor * @return Return arguments are the camera jacobians Fs (including the jacobian with * respect to both the body pose and extrinsic pose), the point Jacobian E, * and the error vector b. Note that the jacobians are computed for a given point. */ void computeJacobiansWithTriangulatedPoint(FBlocks& Fs, Matrix& E, Vector& b, const Values& values) const { if (!this->result_) { throw("computeJacobiansWithTriangulatedPoint"); } else { // valid result: compute jacobians size_t numViews = this->measured_.size(); E = Matrix::Zero(2 * numViews, 3); // a Point2 for each view (point jacobian) b = Vector::Zero(2 * numViews); // a Point2 for each view Eigen::Matrix dProject_dPoseCam; Eigen::Matrix dInterpPose_dPoseBody1, dInterpPose_dPoseBody2, dPoseCam_dInterpPose; Eigen::Matrix Ei; for (size_t i = 0; i < numViews; i++) { // for each camera/measurement Pose3 w_P_body1 = values.at(world_P_body_key_pairs_[i].first); Pose3 w_P_body2 = values.at(world_P_body_key_pairs_[i].second); double interpolationFactor = gammas_[i]; // get interpolated pose: Pose3 w_P_body = interpolate(w_P_body1, w_P_body2,interpolationFactor, dInterpPose_dPoseBody1, dInterpPose_dPoseBody2); Pose3 body_P_cam = body_P_sensors_[i]; Pose3 w_P_cam = w_P_body.compose(body_P_cam, dPoseCam_dInterpPose); PinholeCamera camera(w_P_cam, *K_all_[i]); // get jacobians and error vector for current measurement Point2 reprojectionError_i = Point2( camera.project(*this->result_, dProject_dPoseCam, Ei) - this->measured_.at(i)); Eigen::Matrix J; // 2 x 12 J.block(0, 0, ZDim, 6) = dProject_dPoseCam * dPoseCam_dInterpPose * dInterpPose_dPoseBody1; // (2x6) * (6x6) * (6x6) J.block(0, 6, ZDim, 6) = dProject_dPoseCam * dPoseCam_dInterpPose * dInterpPose_dPoseBody2; // (2x6) * (6x6) * (6x6) // fit into the output structures Fs.push_back(J); size_t row = 2 * i; b.segment(row) = -reprojectionError_i; E.block(row, 0) = Ei; } } } /// linearize and return a Hessianfactor that is an approximation of error(p) boost::shared_ptr > createHessianFactor( const Values& values, const double lambda = 0.0, bool diagonalDamping = false) const { // we may have multiple observation sharing the same keys (due to the rolling shutter interpolation), // hence the number of unique keys may be smaller than 2 * nrMeasurements size_t nrUniqueKeys = this->keys_.size(); // Create structures for Hessian Factors KeyVector js; std::vector < Matrix > Gs(nrUniqueKeys * (nrUniqueKeys + 1) / 2); std::vector < Vector > gs(nrUniqueKeys); if (this->measured_.size() != this->cameras(values).size()) throw std::runtime_error("SmartProjectionPoseFactorRollingShutter: " "measured_.size() inconsistent with input"); // triangulate 3D point at given linearization point this->triangulateSafe(this->cameras(values)); if (!this->result_) { // failed: return "empty/zero" Hessian for (Matrix& m : Gs) m = Matrix::Zero(DimPose, DimPose); for (Vector& v : gs) v = Vector::Zero(DimPose); return boost::make_shared < RegularHessianFactor > (this->keys_, Gs, gs, 0.0); } // compute Jacobian given triangulated 3D Point FBlocks Fs; Matrix F, E; Vector b; this->computeJacobiansWithTriangulatedPoint(Fs, E, b, values); // Whiten using noise model this->noiseModel_->WhitenSystem(E, b); for (size_t i = 0; i < Fs.size(); i++) Fs[i] = this->noiseModel_->Whiten(Fs[i]); Matrix3 P = Base::Cameras::PointCov(E, lambda, diagonalDamping); // build augmented Hessian (with last row/column being the information vector) // these are the keys that correspond to the blocks in augmentedHessian (output of SchurComplement) KeyVector nonuniqueKeys; for (size_t i = 0; i < world_P_body_key_pairs_.size(); i++) { nonuniqueKeys.push_back(world_P_body_key_pairs_.at(i).first); nonuniqueKeys.push_back(world_P_body_key_pairs_.at(i).second); } // but we need to get the augumented hessian wrt the unique keys in key_ SymmetricBlockMatrix augmentedHessianUniqueKeys = Base::Cameras::SchurComplementAndRearrangeBlocks_3_12_6( Fs, E, P, b, nonuniqueKeys, this->keys_); return boost::make_shared < RegularHessianFactor > (this->keys_, augmentedHessianUniqueKeys); } /** * error calculates the error of the factor. */ double error(const Values& values) const override { if (this->active(values)) { return this->totalReprojectionError(this->cameras(values)); } else { // else of active flag return 0.0; } } /** * Collect all cameras involved in this factor * @param values Values structure which must contain camera poses * corresponding to keys involved in this factor * @return Cameras */ typename Base::Cameras cameras(const Values& values) const override { size_t numViews = this->measured_.size(); assert(world_P_body_keys_.size() == K_all_.size()); assert(world_P_body_keys_.size() == body_P_cam_keys_.size()); typename Base::Cameras cameras; for (size_t i = 0; i < numViews; i++) { // for each measurement Pose3 w_P_body1 = values.at(world_P_body_key_pairs_[i].first); Pose3 w_P_body2 = values.at(world_P_body_key_pairs_[i].second); double interpolationFactor = gammas_[i]; Pose3 w_P_body = interpolate(w_P_body1, w_P_body2, interpolationFactor); Pose3 body_P_cam = body_P_sensors_[i]; Pose3 w_P_cam = w_P_body.compose(body_P_cam); std::cout << "id : " << i << std::endl; w_P_cam.print("w_P_cam\n"); cameras.emplace_back(w_P_cam, K_all_[i]); } return cameras; } /** * Linearize to Gaussian Factor (possibly adding a damping factor Lambda for LM) * @param values Values structure which must contain camera poses and extrinsic pose for this factor * @return a Gaussian factor */ boost::shared_ptr linearizeDamped( const Values& values, const double lambda = 0.0) const { // depending on flag set on construction we may linearize to different linear factors switch (this->params_.linearizationMode) { case HESSIAN: return this->createHessianFactor(values, lambda); default: throw std::runtime_error( "SmartProjectionPoseFactorRollingShutter: unknown linearization mode"); } } /// linearize boost::shared_ptr linearize(const Values& values) const override { return this->linearizeDamped(values); } private: /// Serialization function friend class boost::serialization::access; template void serialize(ARCHIVE& ar, const unsigned int /*version*/) { ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base); ar & BOOST_SERIALIZATION_NVP(K_all_); } }; // end of class declaration /// traits template struct traits > : public Testable > { }; } // namespace gtsam