Merged develop into feature/windows-fixes
commit
903d29214b
|
@ -1,4 +1,5 @@
|
|||
/build*
|
||||
.idea
|
||||
*.pyc
|
||||
*.DS_Store
|
||||
/examples/Data/dubrovnik-3-7-pre-rewritten.txt
|
||||
|
|
|
@ -66,7 +66,7 @@ option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also
|
|||
option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON)
|
||||
option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module" OFF)
|
||||
option(GTSAM_ALLOW_DEPRECATED_SINCE_V4 "Allow use of methods/functions deprecated in GTSAM 4" ON)
|
||||
option(GTSAM_USE_VECTOR3_POINTS "Simply typdef Point3 to eigen::Vector3d" OFF)
|
||||
option(GTSAM_TYPEDEF_POINTS_TO_VECTORS "Typdef Point2 and Point3 to Eigen::Vector equivalents" OFF)
|
||||
option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON)
|
||||
option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON)
|
||||
|
||||
|
@ -91,8 +91,8 @@ if(GTSAM_BUILD_PYTHON AND GTSAM_ALLOW_DEPRECATED_SINCE_V4)
|
|||
message(FATAL_ERROR "GTSAM_BUILD_PYTHON and GTSAM_ALLOW_DEPRECATED_SINCE_V4 are both enabled. The python module cannot be compiled with deprecated functions turned on. Turn one of the two options off.")
|
||||
endif()
|
||||
|
||||
if(GTSAM_INSTALL_MATLAB_TOOLBOX AND GTSAM_USE_VECTOR3_POINTS)
|
||||
message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX and GTSAM_USE_VECTOR3_POINTS are both enabled. For now, the MATLAB toolbox cannot deal with this yet. Please turn one of the two options off.")
|
||||
if(GTSAM_INSTALL_MATLAB_TOOLBOX AND GTSAM_TYPEDEF_POINTS_TO_VECTORS)
|
||||
message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX and GTSAM_TYPEDEF_POINTS_TO_VECTORS are both enabled. For now, the MATLAB toolbox cannot deal with this yet. Please turn one of the two options off.")
|
||||
endif()
|
||||
|
||||
# Flags for choosing default packaging tools
|
||||
|
@ -491,7 +491,7 @@ print_config_flag(${GTSAM_ENABLE_CONSISTENCY_CHECKS} "Runtime consistency chec
|
|||
print_config_flag(${GTSAM_ROT3_EXPMAP} "Rot3 retract is full ExpMap ")
|
||||
print_config_flag(${GTSAM_POSE3_EXPMAP} "Pose3 retract is full ExpMap ")
|
||||
print_config_flag(${GTSAM_ALLOW_DEPRECATED_SINCE_V4} "Deprecated in GTSAM 4 allowed ")
|
||||
print_config_flag(${GTSAM_USE_VECTOR3_POINTS} "Point3 is typedef to Vector3 ")
|
||||
print_config_flag(${GTSAM_TYPEDEF_POINTS_TO_VECTORS} "Point3 is typedef to Vector3 ")
|
||||
print_config_flag(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ")
|
||||
print_config_flag(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration")
|
||||
|
||||
|
|
|
@ -68,6 +68,12 @@ protected:
|
|||
testGroup##testName##Instance; \
|
||||
void testGroup##testName##Test::run (TestResult& result_)
|
||||
|
||||
/**
|
||||
* Declare friend in a class to test its private methods
|
||||
*/
|
||||
#define FRIEND_TEST(testGroup, testName) \
|
||||
friend class testGroup##testName##Test;
|
||||
|
||||
/**
|
||||
* For debugging only: use TEST_UNSAFE to allow debuggers to have access to exceptions, as this
|
||||
* will not wrap execution with a try/catch block
|
||||
|
|
|
@ -18,8 +18,6 @@
|
|||
//
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
|
||||
#ifndef TESTRESULT_H
|
||||
#define TESTRESULT_H
|
||||
|
||||
|
|
|
@ -61,18 +61,18 @@ mark_as_advanced(GTSAM_CMAKE_C_FLAGS_TIMING GTSAM_CMAKE_CXX_FLAGS_TIMING GTSAM_C
|
|||
|
||||
# Apply the gtsam specific build flags as normal variables. This makes it so that they only
|
||||
# apply to the gtsam part of the build if gtsam is built as a subproject
|
||||
set(CMAKE_C_FLAGS ${GTSAM_CMAKE_C_FLAGS})
|
||||
set(CMAKE_CXX_FLAGS ${GTSAM_CMAKE_CXX_FLAGS})
|
||||
set(CMAKE_C_FLAGS_DEBUG ${GTSAM_CMAKE_C_FLAGS_DEBUG})
|
||||
set(CMAKE_CXX_FLAGS_DEBUG ${GTSAM_CMAKE_CXX_FLAGS_DEBUG})
|
||||
set(CMAKE_C_FLAGS_RELWITHDEBINFO ${GTSAM_CMAKE_C_FLAGS_RELWITHDEBINFO})
|
||||
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO ${GTSAM_CMAKE_CXX_FLAGS_RELWITHDEBINFO})
|
||||
set(CMAKE_C_FLAGS_RELEASE ${GTSAM_CMAKE_C_FLAGS_RELEASE})
|
||||
set(CMAKE_CXX_FLAGS_RELEASE ${GTSAM_CMAKE_CXX_FLAGS_RELEASE})
|
||||
set(CMAKE_C_FLAGS_PROFILING ${GTSAM_CMAKE_C_FLAGS_PROFILING})
|
||||
set(CMAKE_CXX_FLAGS_PROFILING ${GTSAM_CMAKE_CXX_FLAGS_PROFILING})
|
||||
set(CMAKE_C_FLAGS_TIMING ${GTSAM_CMAKE_C_FLAGS_TIMING})
|
||||
set(CMAKE_CXX_FLAGS_TIMING ${GTSAM_CMAKE_CXX_FLAGS_TIMING})
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${GTSAM_CMAKE_C_FLAGS}")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${GTSAM_CMAKE_CXX_FLAGS}")
|
||||
set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${GTSAM_CMAKE_C_FLAGS_DEBUG}")
|
||||
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${GTSAM_CMAKE_CXX_FLAGS_DEBUG}")
|
||||
set(CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} ${GTSAM_CMAKE_C_FLAGS_RELWITHDEBINFO}")
|
||||
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} ${GTSAM_CMAKE_CXX_FLAGS_RELWITHDEBINFO}")
|
||||
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${GTSAM_CMAKE_C_FLAGS_RELEASE}")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${GTSAM_CMAKE_CXX_FLAGS_RELEASE}")
|
||||
set(CMAKE_C_FLAGS_PROFILING "${CMAKE_C_FLAGS_PROFILING} ${GTSAM_CMAKE_C_FLAGS_PROFILING}")
|
||||
set(CMAKE_CXX_FLAGS_PROFILING "${CMAKE_CXX_FLAGS_PROFILING} ${GTSAM_CMAKE_CXX_FLAGS_PROFILING}")
|
||||
set(CMAKE_C_FLAGS_TIMING "${CMAKE_C_FLAGS_TIMING} ${GTSAM_CMAKE_C_FLAGS_TIMING}")
|
||||
set(CMAKE_CXX_FLAGS_TIMING "${CMAKE_CXX_FLAGS_TIMING} ${GTSAM_CMAKE_CXX_FLAGS_TIMING}")
|
||||
|
||||
set(CMAKE_SHARED_LINKER_FLAGS_TIMING ${GTSAM_CMAKE_SHARED_LINKER_FLAGS_TIMING})
|
||||
set(CMAKE_MODULE_LINKER_FLAGS_TIMING ${GTSAM_CMAKE_MODULE_LINKER_FLAGS_TIMING})
|
||||
|
|
|
@ -48,8 +48,7 @@ public:
|
|||
virtual Vector evaluateError(const Pose3& pose, boost::optional<Matrix&> H =
|
||||
boost::none) const {
|
||||
SimpleCamera camera(pose, *K_);
|
||||
Point2 reprojectionError(camera.project(P_, H, boost::none, boost::none) - p_);
|
||||
return reprojectionError.vector();
|
||||
return camera.project(P_, H, boost::none, boost::none) - p_;
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
NAME QP example
|
||||
ROWS
|
||||
N obj
|
||||
G r1
|
||||
L r2
|
||||
COLUMNS
|
||||
c1 r1 2.0 r2 -1.0
|
||||
c1 obj 1.5
|
||||
c2 r1 1.0 r2 2.0
|
||||
c2 obj -2.0
|
||||
RHS
|
||||
rhs1 obj -4.0
|
||||
rhs1 r1 2.0 r2 6.0
|
||||
RANGES
|
||||
BOUNDS
|
||||
UP BOUNDS c1 20.0
|
||||
QUADOBJ
|
||||
c1 c1 8.0
|
||||
c1 c2 2.0
|
||||
c2 c2 10.0
|
||||
ENDATA
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,269 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file imuFactorsExample
|
||||
* @brief Test example for using GTSAM ImuFactor and ImuCombinedFactor navigation code.
|
||||
* @author Garrett (ghemann@gmail.com), Luca Carlone
|
||||
*/
|
||||
|
||||
/**
|
||||
* Example of use of the imuFactors (imuFactor and combinedImuFactor) in conjunction with GPS
|
||||
* - you can test imuFactor (resp. combinedImuFactor) by commenting (resp. uncommenting)
|
||||
* the line #define USE_COMBINED (few lines below)
|
||||
* - we read IMU and GPS data from a CSV file, with the following format:
|
||||
* A row starting with "i" is the first initial position formatted with
|
||||
* N, E, D, qx, qY, qZ, qW, velN, velE, velD
|
||||
* A row starting with "0" is an imu measurement
|
||||
* linAccN, linAccE, linAccD, angVelN, angVelE, angVelD
|
||||
* A row starting with "1" is a gps correction formatted with
|
||||
* N, E, D, qX, qY, qZ, qW
|
||||
* Note that for GPS correction, we're only using the position not the rotation. The
|
||||
* rotation is provided in the file for ground truth comparison.
|
||||
*/
|
||||
|
||||
// GTSAM related includes.
|
||||
#include <gtsam/navigation/CombinedImuFactor.h>
|
||||
#include <gtsam/navigation/GPSFactor.h>
|
||||
#include <gtsam/navigation/ImuFactor.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
#include <gtsam/slam/PriorFactor.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
|
||||
// Uncomment line below to use the CombinedIMUFactor as opposed to the standard ImuFactor.
|
||||
// #define USE_COMBINED
|
||||
|
||||
using namespace gtsam;
|
||||
using namespace std;
|
||||
|
||||
using symbol_shorthand::X; // Pose3 (x,y,z,r,p,y)
|
||||
using symbol_shorthand::V; // Vel (xdot,ydot,zdot)
|
||||
using symbol_shorthand::B; // Bias (ax,ay,az,gx,gy,gz)
|
||||
|
||||
const string output_filename = "imuFactorExampleResults.csv";
|
||||
|
||||
// This will either be PreintegratedImuMeasurements (for ImuFactor) or
|
||||
// PreintegratedCombinedMeasurements (for CombinedImuFactor).
|
||||
PreintegrationType *imu_preintegrated_;
|
||||
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
string data_filename;
|
||||
if (argc < 2) {
|
||||
printf("using default CSV file\n");
|
||||
data_filename = findExampleDataFile("imuAndGPSdata.csv");
|
||||
} else {
|
||||
data_filename = argv[1];
|
||||
}
|
||||
|
||||
// Set up output file for plotting errors
|
||||
FILE* fp_out = fopen(output_filename.c_str(), "w+");
|
||||
fprintf(fp_out, "#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m),gt_qx,gt_qy,gt_qz,gt_qw\n");
|
||||
|
||||
// Begin parsing the CSV file. Input the first line for initialization.
|
||||
// From there, we'll iterate through the file and we'll preintegrate the IMU
|
||||
// or add in the GPS given the input.
|
||||
ifstream file(data_filename.c_str());
|
||||
string value;
|
||||
|
||||
// Format is (N,E,D,qX,qY,qZ,qW,velN,velE,velD)
|
||||
Eigen::Matrix<double,10,1> initial_state = Eigen::Matrix<double,10,1>::Zero();
|
||||
getline(file, value, ','); // i
|
||||
for (int i=0; i<9; i++) {
|
||||
getline(file, value, ',');
|
||||
initial_state(i) = atof(value.c_str());
|
||||
}
|
||||
getline(file, value, '\n');
|
||||
initial_state(9) = atof(value.c_str());
|
||||
cout << "initial state:\n" << initial_state.transpose() << "\n\n";
|
||||
|
||||
// Assemble initial quaternion through gtsam constructor ::quaternion(w,x,y,z);
|
||||
Rot3 prior_rotation = Rot3::Quaternion(initial_state(6), initial_state(3),
|
||||
initial_state(4), initial_state(5));
|
||||
Point3 prior_point(initial_state.head<3>());
|
||||
Pose3 prior_pose(prior_rotation, prior_point);
|
||||
Vector3 prior_velocity(initial_state.tail<3>());
|
||||
imuBias::ConstantBias prior_imu_bias; // assume zero initial bias
|
||||
|
||||
Values initial_values;
|
||||
int correction_count = 0;
|
||||
initial_values.insert(X(correction_count), prior_pose);
|
||||
initial_values.insert(V(correction_count), prior_velocity);
|
||||
initial_values.insert(B(correction_count), prior_imu_bias);
|
||||
|
||||
// Assemble prior noise model and add it the graph.
|
||||
noiseModel::Diagonal::shared_ptr pose_noise_model = noiseModel::Diagonal::Sigmas((Vector(6) << 0.01, 0.01, 0.01, 0.5, 0.5, 0.5).finished()); // rad,rad,rad,m, m, m
|
||||
noiseModel::Diagonal::shared_ptr velocity_noise_model = noiseModel::Isotropic::Sigma(3,0.1); // m/s
|
||||
noiseModel::Diagonal::shared_ptr bias_noise_model = noiseModel::Isotropic::Sigma(6,1e-3);
|
||||
|
||||
// Add all prior factors (pose, velocity, bias) to the graph.
|
||||
NonlinearFactorGraph *graph = new NonlinearFactorGraph();
|
||||
graph->add(PriorFactor<Pose3>(X(correction_count), prior_pose, pose_noise_model));
|
||||
graph->add(PriorFactor<Vector3>(V(correction_count), prior_velocity,velocity_noise_model));
|
||||
graph->add(PriorFactor<imuBias::ConstantBias>(B(correction_count), prior_imu_bias,bias_noise_model));
|
||||
|
||||
// We use the sensor specs to build the noise model for the IMU factor.
|
||||
double accel_noise_sigma = 0.0003924;
|
||||
double gyro_noise_sigma = 0.000205689024915;
|
||||
double accel_bias_rw_sigma = 0.004905;
|
||||
double gyro_bias_rw_sigma = 0.000001454441043;
|
||||
Matrix33 measured_acc_cov = Matrix33::Identity(3,3) * pow(accel_noise_sigma,2);
|
||||
Matrix33 measured_omega_cov = Matrix33::Identity(3,3) * pow(gyro_noise_sigma,2);
|
||||
Matrix33 integration_error_cov = Matrix33::Identity(3,3)*1e-8; // error committed in integrating position from velocities
|
||||
Matrix33 bias_acc_cov = Matrix33::Identity(3,3) * pow(accel_bias_rw_sigma,2);
|
||||
Matrix33 bias_omega_cov = Matrix33::Identity(3,3) * pow(gyro_bias_rw_sigma,2);
|
||||
Matrix66 bias_acc_omega_int = Matrix::Identity(6,6)*1e-5; // error in the bias used for preintegration
|
||||
|
||||
boost::shared_ptr<PreintegratedCombinedMeasurements::Params> p = PreintegratedCombinedMeasurements::Params::MakeSharedD(0.0);
|
||||
// PreintegrationBase params:
|
||||
p->accelerometerCovariance = measured_acc_cov; // acc white noise in continuous
|
||||
p->integrationCovariance = integration_error_cov; // integration uncertainty continuous
|
||||
// should be using 2nd order integration
|
||||
// PreintegratedRotation params:
|
||||
p->gyroscopeCovariance = measured_omega_cov; // gyro white noise in continuous
|
||||
// PreintegrationCombinedMeasurements params:
|
||||
p->biasAccCovariance = bias_acc_cov; // acc bias in continuous
|
||||
p->biasOmegaCovariance = bias_omega_cov; // gyro bias in continuous
|
||||
p->biasAccOmegaInt = bias_acc_omega_int;
|
||||
|
||||
#ifdef USE_COMBINED
|
||||
imu_preintegrated_ = new PreintegratedCombinedMeasurements(p, prior_imu_bias);
|
||||
#else
|
||||
imu_preintegrated_ = new PreintegratedImuMeasurements(p, prior_imu_bias);
|
||||
#endif
|
||||
|
||||
// Store previous state for the imu integration and the latest predicted outcome.
|
||||
NavState prev_state(prior_pose, prior_velocity);
|
||||
NavState prop_state = prev_state;
|
||||
imuBias::ConstantBias prev_bias = prior_imu_bias;
|
||||
|
||||
// Keep track of the total error over the entire run for a simple performance metric.
|
||||
double current_position_error = 0.0, current_orientation_error = 0.0;
|
||||
|
||||
double output_time = 0.0;
|
||||
double dt = 0.005; // The real system has noise, but here, results are nearly
|
||||
// exactly the same, so keeping this for simplicity.
|
||||
|
||||
// All priors have been set up, now iterate through the data file.
|
||||
while (file.good()) {
|
||||
|
||||
// Parse out first value
|
||||
getline(file, value, ',');
|
||||
int type = atoi(value.c_str());
|
||||
|
||||
if (type == 0) { // IMU measurement
|
||||
Eigen::Matrix<double,6,1> imu = Eigen::Matrix<double,6,1>::Zero();
|
||||
for (int i=0; i<5; ++i) {
|
||||
getline(file, value, ',');
|
||||
imu(i) = atof(value.c_str());
|
||||
}
|
||||
getline(file, value, '\n');
|
||||
imu(5) = atof(value.c_str());
|
||||
|
||||
// Adding the IMU preintegration.
|
||||
imu_preintegrated_->integrateMeasurement(imu.head<3>(), imu.tail<3>(), dt);
|
||||
|
||||
} else if (type == 1) { // GPS measurement
|
||||
Eigen::Matrix<double,7,1> gps = Eigen::Matrix<double,7,1>::Zero();
|
||||
for (int i=0; i<6; ++i) {
|
||||
getline(file, value, ',');
|
||||
gps(i) = atof(value.c_str());
|
||||
}
|
||||
getline(file, value, '\n');
|
||||
gps(6) = atof(value.c_str());
|
||||
|
||||
correction_count++;
|
||||
|
||||
// Adding IMU factor and GPS factor and optimizing.
|
||||
#ifdef USE_COMBINED
|
||||
PreintegratedCombinedMeasurements *preint_imu_combined = dynamic_cast<PreintegratedCombinedMeasurements*>(imu_preintegrated_);
|
||||
CombinedImuFactor imu_factor(X(correction_count-1), V(correction_count-1),
|
||||
X(correction_count ), V(correction_count ),
|
||||
B(correction_count-1), B(correction_count ),
|
||||
*preint_imu_combined);
|
||||
graph->add(imu_factor);
|
||||
#else
|
||||
PreintegratedImuMeasurements *preint_imu = dynamic_cast<PreintegratedImuMeasurements*>(imu_preintegrated_);
|
||||
ImuFactor imu_factor(X(correction_count-1), V(correction_count-1),
|
||||
X(correction_count ), V(correction_count ),
|
||||
B(correction_count-1),
|
||||
*preint_imu);
|
||||
graph->add(imu_factor);
|
||||
imuBias::ConstantBias zero_bias(Vector3(0, 0, 0), Vector3(0, 0, 0));
|
||||
graph->add(BetweenFactor<imuBias::ConstantBias>(B(correction_count-1),
|
||||
B(correction_count ),
|
||||
zero_bias, bias_noise_model));
|
||||
#endif
|
||||
|
||||
noiseModel::Diagonal::shared_ptr correction_noise = noiseModel::Isotropic::Sigma(3,1.0);
|
||||
GPSFactor gps_factor(X(correction_count),
|
||||
Point3(gps(0), // N,
|
||||
gps(1), // E,
|
||||
gps(2)), // D,
|
||||
correction_noise);
|
||||
graph->add(gps_factor);
|
||||
|
||||
// Now optimize and compare results.
|
||||
prop_state = imu_preintegrated_->predict(prev_state, prev_bias);
|
||||
initial_values.insert(X(correction_count), prop_state.pose());
|
||||
initial_values.insert(V(correction_count), prop_state.v());
|
||||
initial_values.insert(B(correction_count), prev_bias);
|
||||
|
||||
LevenbergMarquardtOptimizer optimizer(*graph, initial_values);
|
||||
Values result = optimizer.optimize();
|
||||
|
||||
// Overwrite the beginning of the preintegration for the next step.
|
||||
prev_state = NavState(result.at<Pose3>(X(correction_count)),
|
||||
result.at<Vector3>(V(correction_count)));
|
||||
prev_bias = result.at<imuBias::ConstantBias>(B(correction_count));
|
||||
|
||||
// Reset the preintegration object.
|
||||
imu_preintegrated_->resetIntegrationAndSetBias(prev_bias);
|
||||
|
||||
// Print out the position and orientation error for comparison.
|
||||
Vector3 gtsam_position = prev_state.pose().translation();
|
||||
Vector3 position_error = gtsam_position - gps.head<3>();
|
||||
current_position_error = position_error.norm();
|
||||
|
||||
Quaternion gtsam_quat = prev_state.pose().rotation().toQuaternion();
|
||||
Quaternion gps_quat(gps(6), gps(3), gps(4), gps(5));
|
||||
Quaternion quat_error = gtsam_quat * gps_quat.inverse();
|
||||
quat_error.normalize();
|
||||
Vector3 euler_angle_error(quat_error.x()*2,
|
||||
quat_error.y()*2,
|
||||
quat_error.z()*2);
|
||||
current_orientation_error = euler_angle_error.norm();
|
||||
|
||||
// display statistics
|
||||
cout << "Position error:" << current_position_error << "\t " << "Angular error:" << current_orientation_error << "\n";
|
||||
|
||||
fprintf(fp_out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",
|
||||
output_time, gtsam_position(0), gtsam_position(1), gtsam_position(2),
|
||||
gtsam_quat.x(), gtsam_quat.y(), gtsam_quat.z(), gtsam_quat.w(),
|
||||
gps(0), gps(1), gps(2),
|
||||
gps_quat.x(), gps_quat.y(), gps_quat.z(), gps_quat.w());
|
||||
|
||||
output_time += 1.0;
|
||||
|
||||
} else {
|
||||
cerr << "ERROR parsing file\n";
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
fclose(fp_out);
|
||||
cout << "Complete, results written to " << output_filename << "\n\n";;
|
||||
return 0;
|
||||
}
|
|
@ -67,7 +67,7 @@ int main(int argc, char** argv){
|
|||
initial_estimate.insert(5, Point3(0, -0.5, 5));
|
||||
|
||||
//create Levenberg-Marquardt optimizer for resulting factor graph, optimize
|
||||
LevenbergMarquardtOptimizer optimizer = LevenbergMarquardtOptimizer(graph, initial_estimate);
|
||||
LevenbergMarquardtOptimizer optimizer(graph, initial_estimate);
|
||||
Values result = optimizer.optimize();
|
||||
|
||||
result.print("Final result:\n");
|
||||
|
|
|
@ -105,7 +105,7 @@ int main(int argc, char** argv){
|
|||
//create Levenberg-Marquardt optimizer to optimize the factor graph
|
||||
LevenbergMarquardtParams params;
|
||||
params.orderingType = Ordering::METIS;
|
||||
LevenbergMarquardtOptimizer optimizer = LevenbergMarquardtOptimizer(graph, initial_estimate, params);
|
||||
LevenbergMarquardtOptimizer optimizer(graph, initial_estimate, params);
|
||||
Values result = optimizer.optimize();
|
||||
|
||||
cout << "Final result sample:" << endl;
|
||||
|
|
|
@ -70,7 +70,7 @@ int main() {
|
|||
|
||||
// Predict the new value with the EKF class
|
||||
Point2 x1_predict = ekf.predict(factor1);
|
||||
x1_predict.print("X1 Predict");
|
||||
traits<Point2>::Print(x1_predict, "X1 Predict");
|
||||
|
||||
|
||||
|
||||
|
@ -91,7 +91,7 @@ int main() {
|
|||
|
||||
// Update the Kalman Filter with the measurement
|
||||
Point2 x1_update = ekf.update(factor2);
|
||||
x1_update.print("X1 Update");
|
||||
traits<Point2>::Print(x1_update, "X1 Update");
|
||||
|
||||
|
||||
|
||||
|
@ -101,13 +101,13 @@ int main() {
|
|||
difference = Point2(1,0);
|
||||
BetweenFactor<Point2> factor3(x1, x2, difference, Q);
|
||||
Point2 x2_predict = ekf.predict(factor1);
|
||||
x2_predict.print("X2 Predict");
|
||||
traits<Point2>::Print(x2_predict, "X2 Predict");
|
||||
|
||||
// Update
|
||||
Point2 z2(2.0, 0.0);
|
||||
PriorFactor<Point2> factor4(x2, z2, R);
|
||||
Point2 x2_update = ekf.update(factor4);
|
||||
x2_update.print("X2 Update");
|
||||
traits<Point2>::Print(x2_update, "X2 Update");
|
||||
|
||||
|
||||
|
||||
|
@ -117,13 +117,13 @@ int main() {
|
|||
difference = Point2(1,0);
|
||||
BetweenFactor<Point2> factor5(x2, x3, difference, Q);
|
||||
Point2 x3_predict = ekf.predict(factor5);
|
||||
x3_predict.print("X3 Predict");
|
||||
traits<Point2>::Print(x3_predict, "X3 Predict");
|
||||
|
||||
// Update
|
||||
Point2 z3(3.0, 0.0);
|
||||
PriorFactor<Point2> factor6(x3, z3, R);
|
||||
Point2 x3_update = ekf.update(factor6);
|
||||
x3_update.print("X3 Update");
|
||||
traits<Point2>::Print(x3_update, "X3 Update");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
19
gtsam.h
19
gtsam.h
|
@ -266,23 +266,12 @@ class Point2 {
|
|||
|
||||
// Group
|
||||
static gtsam::Point2 identity();
|
||||
gtsam::Point2 inverse() const;
|
||||
gtsam::Point2 compose(const gtsam::Point2& p2) const;
|
||||
gtsam::Point2 between(const gtsam::Point2& p2) const;
|
||||
|
||||
// Manifold
|
||||
gtsam::Point2 retract(Vector v) const;
|
||||
Vector localCoordinates(const gtsam::Point2& p) const;
|
||||
|
||||
// Lie Group
|
||||
static gtsam::Point2 Expmap(Vector v);
|
||||
static Vector Logmap(const gtsam::Point2& p);
|
||||
|
||||
// Standard Interface
|
||||
double x() const;
|
||||
double y() const;
|
||||
Vector vector() const;
|
||||
double dist(const gtsam::Point2& p2) const;
|
||||
double distance(const gtsam::Point2& p2) const;
|
||||
double norm() const;
|
||||
|
||||
// enabling serialization functionality
|
||||
|
@ -1368,7 +1357,7 @@ virtual class HessianFactor : gtsam::GaussianFactor {
|
|||
|
||||
//Standard Interface
|
||||
size_t rows() const;
|
||||
Matrix info() const;
|
||||
Matrix information() const;
|
||||
double constantTerm() const;
|
||||
Vector linearTerm() const;
|
||||
|
||||
|
@ -1941,10 +1930,10 @@ virtual class LinearContainerFactor : gtsam::NonlinearFactor {
|
|||
gtsam::JacobianFactor* toJacobian() const;
|
||||
gtsam::HessianFactor* toHessian() const;
|
||||
|
||||
static gtsam::NonlinearFactorGraph convertLinearGraph(const gtsam::GaussianFactorGraph& linear_graph,
|
||||
static gtsam::NonlinearFactorGraph ConvertLinearGraph(const gtsam::GaussianFactorGraph& linear_graph,
|
||||
const gtsam::Values& linearizationPoint);
|
||||
|
||||
static gtsam::NonlinearFactorGraph convertLinearGraph(const gtsam::GaussianFactorGraph& linear_graph);
|
||||
static gtsam::NonlinearFactorGraph ConvertLinearGraph(const gtsam::GaussianFactorGraph& linear_graph);
|
||||
|
||||
// enabling serialization functionality
|
||||
void serializable() const;
|
||||
|
|
|
@ -13,13 +13,16 @@
|
|||
* @file DSFVector.h
|
||||
* @date Jun 25, 2010
|
||||
* @author Kai Ni
|
||||
* @brief A faster implementation for DSF, which uses vector rather than btree. As a result, the size of the forest is prefixed.
|
||||
* @brief A faster implementation for DSF, which uses vector rather than btree.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/dllexport.h>
|
||||
#include <gtsam/global_includes.h>
|
||||
|
||||
#include <boost/shared_ptr.hpp>
|
||||
|
||||
#include <vector>
|
||||
#include <set>
|
||||
#include <map>
|
||||
|
@ -41,27 +44,26 @@ private:
|
|||
boost::shared_ptr<V> v_;///< Stores parent pointers, representative iff v[i]==i
|
||||
|
||||
public:
|
||||
/// constructor that allocate new memory, allows for keys 0...numNodes-1
|
||||
/// Constructor that allocates new memory, allows for keys 0...numNodes-1.
|
||||
DSFBase(const size_t numNodes);
|
||||
|
||||
/// constructor that uses the existing memory
|
||||
/// Constructor that uses an existing, pre-allocated vector.
|
||||
DSFBase(const boost::shared_ptr<V>& v_in);
|
||||
|
||||
/// find the label of the set in which {key} lives
|
||||
/// Find the label of the set in which {key} lives.
|
||||
size_t find(size_t key) const;
|
||||
|
||||
/// Merge two sets
|
||||
/// Merge the sets containing i1 and i2. Does nothing if i1 and i2 are already in the same set.
|
||||
void merge(const size_t& i1, const size_t& i2);
|
||||
|
||||
/// @deprecated old name
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
|
||||
inline size_t findSet(size_t key) const {return find(key);}
|
||||
|
||||
/// @deprecated old name
|
||||
inline void makeUnionInPlace(const size_t& i1, const size_t& i2) {return merge(i1,i2);}
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
* DSFVector additionaly keeps a vector of keys to support more expensive operations
|
||||
* DSFVector additionally keeps a vector of keys to support more expensive operations
|
||||
* @addtogroup base
|
||||
*/
|
||||
class GTSAM_EXPORT DSFVector: public DSFBase {
|
||||
|
@ -70,27 +72,27 @@ private:
|
|||
std::vector<size_t> keys_; ///< stores keys to support more expensive operations
|
||||
|
||||
public:
|
||||
/// constructor that allocate new memory, uses sequential keys 0...numNodes-1
|
||||
/// Constructor that allocates new memory, uses sequential keys 0...numNodes-1.
|
||||
DSFVector(const size_t numNodes);
|
||||
|
||||
/// constructor that allocates memory, uses given keys
|
||||
/// Constructor that allocates memory, uses given keys.
|
||||
DSFVector(const std::vector<size_t>& keys);
|
||||
|
||||
/// constructor that uses the existing memory
|
||||
/// Constructor that uses existing vectors.
|
||||
DSFVector(const boost::shared_ptr<V>& v_in, const std::vector<size_t>& keys);
|
||||
|
||||
// all operations below loop over all keys and hence are *at least* O(n)
|
||||
// All operations below loop over all keys and hence are *at least* O(n)
|
||||
|
||||
/// find whether there is one and only one occurrence for the given {label}
|
||||
/// Find whether there is one and only one occurrence for the given {label}.
|
||||
bool isSingleton(const size_t& label) const;
|
||||
|
||||
/// get the nodes in the tree with the given label
|
||||
/// Get the nodes in the tree with the given label
|
||||
std::set<size_t> set(const size_t& label) const;
|
||||
|
||||
/// return all sets, i.e. a partition of all elements
|
||||
/// Return all sets, i.e. a partition of all elements.
|
||||
std::map<size_t, std::set<size_t> > sets() const;
|
||||
|
||||
/// return all sets, i.e. a partition of all elements
|
||||
/// Return all sets, i.e. a partition of all elements.
|
||||
std::map<size_t, std::vector<size_t> > arrays() const;
|
||||
};
|
||||
|
||||
|
|
|
@ -51,32 +51,42 @@ SymmetricBlockMatrix SymmetricBlockMatrix::LikeActiveViewOf(
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VerticalBlockMatrix SymmetricBlockMatrix::choleskyPartial(
|
||||
DenseIndex nFrontals) {
|
||||
// Do dense elimination
|
||||
if (blockStart() != 0)
|
||||
throw std::invalid_argument(
|
||||
"Can only do Cholesky when the SymmetricBlockMatrix is not a restricted view, i.e. when blockStart == 0.");
|
||||
if (!gtsam::choleskyPartial(matrix_, offset(nFrontals)))
|
||||
Matrix SymmetricBlockMatrix::block(DenseIndex I, DenseIndex J) const {
|
||||
if (I == J) {
|
||||
return diagonalBlock(I);
|
||||
} else if (I < J) {
|
||||
return aboveDiagonalBlock(I, J);
|
||||
} else {
|
||||
return aboveDiagonalBlock(J, I).transpose();
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void SymmetricBlockMatrix::choleskyPartial(DenseIndex nFrontals) {
|
||||
gttic(VerticalBlockMatrix_choleskyPartial);
|
||||
DenseIndex topleft = variableColOffsets_[blockStart_];
|
||||
if (!gtsam::choleskyPartial(matrix_, offset(nFrontals) - topleft, topleft))
|
||||
throw CholeskyFailed();
|
||||
}
|
||||
|
||||
// Split conditional
|
||||
/* ************************************************************************* */
|
||||
VerticalBlockMatrix SymmetricBlockMatrix::split(DenseIndex nFrontals) {
|
||||
gttic(VerticalBlockMatrix_split);
|
||||
|
||||
// Create one big conditionals with many frontal variables.
|
||||
gttic(Construct_conditional);
|
||||
const size_t varDim = offset(nFrontals);
|
||||
VerticalBlockMatrix Ab = VerticalBlockMatrix::LikeActiveViewOf(*this, varDim);
|
||||
Ab.full() = matrix_.topRows(varDim);
|
||||
Ab.full().triangularView<Eigen::StrictlyLower>().setZero();
|
||||
gttoc(Construct_conditional);
|
||||
// Construct a VerticalBlockMatrix that contains [R Sd]
|
||||
const size_t n1 = offset(nFrontals);
|
||||
VerticalBlockMatrix RSd = VerticalBlockMatrix::LikeActiveViewOf(*this, n1);
|
||||
|
||||
// Copy into it.
|
||||
RSd.full() = matrix_.topRows(n1);
|
||||
RSd.full().triangularView<Eigen::StrictlyLower>().setZero();
|
||||
|
||||
gttic(Remaining_factor);
|
||||
// Take lower-right block of Ab_ to get the remaining factor
|
||||
blockStart() = nFrontals;
|
||||
gttoc(Remaining_factor);
|
||||
|
||||
return Ab;
|
||||
return RSd;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
||||
} //\ namespace gtsam
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
@ -19,7 +19,6 @@
|
|||
|
||||
#include <gtsam/base/FastVector.h>
|
||||
#include <gtsam/base/Matrix.h>
|
||||
#include <gtsam/base/SymmetricBlockMatrixBlockExpr.h>
|
||||
#include <gtsam/base/types.h>
|
||||
#include <gtsam/dllexport.h>
|
||||
#include <boost/serialization/nvp.hpp>
|
||||
|
@ -53,8 +52,8 @@ namespace gtsam {
|
|||
{
|
||||
public:
|
||||
typedef SymmetricBlockMatrix This;
|
||||
typedef SymmetricBlockMatrixBlockExpr<This> Block;
|
||||
typedef SymmetricBlockMatrixBlockExpr<const This> constBlock;
|
||||
typedef Eigen::Block<Matrix> Block;
|
||||
typedef Eigen::Block<const Matrix> constBlock;
|
||||
|
||||
protected:
|
||||
Matrix matrix_; ///< The full matrix
|
||||
|
@ -105,12 +104,12 @@ namespace gtsam {
|
|||
throw std::invalid_argument("Requested to create a SymmetricBlockMatrix with dimensions that do not sum to the total size of the provided matrix.");
|
||||
assertInvariants();
|
||||
}
|
||||
|
||||
|
||||
/// Copy the block structure, but do not copy the matrix data. If blockStart() has been
|
||||
/// modified, this copies the structure of the corresponding matrix view. In the destination
|
||||
/// SymmetricBlockMatrix, blockStart() will be 0.
|
||||
static SymmetricBlockMatrix LikeActiveViewOf(const SymmetricBlockMatrix& other);
|
||||
|
||||
|
||||
/// Copy the block structure, but do not copy the matrix data. If blockStart() has been
|
||||
/// modified, this copies the structure of the corresponding matrix view. In the destination
|
||||
/// SymmetricBlockMatrix, blockStart() will be 0.
|
||||
|
@ -123,71 +122,165 @@ namespace gtsam {
|
|||
DenseIndex cols() const { return rows(); }
|
||||
|
||||
/// Block count
|
||||
DenseIndex nBlocks() const { assertInvariants(); return variableColOffsets_.size() - 1 - blockStart_; }
|
||||
DenseIndex nBlocks() const { return nActualBlocks() - blockStart_; }
|
||||
|
||||
/// Access the block with vertical block index \c i_block and horizontal block index \c j_block.
|
||||
/// Note that the actual block accessed in the underlying matrix is relative to blockStart().
|
||||
Block operator()(DenseIndex i_block, DenseIndex j_block) {
|
||||
return Block(*this, i_block, j_block);
|
||||
/// Number of dimensions for variable on this diagonal block.
|
||||
DenseIndex getDim(DenseIndex block) const {
|
||||
return calcIndices(block, block, 1, 1)[2];
|
||||
}
|
||||
|
||||
/// Access the block with vertical block index \c i_block and horizontal block index \c j_block.
|
||||
/// Note that the actual block accessed in the underlying matrix is relative to blockStart().
|
||||
constBlock operator()(DenseIndex i_block, DenseIndex j_block) const {
|
||||
return constBlock(*this, i_block, j_block);
|
||||
/// @name Block getter methods.
|
||||
/// @{
|
||||
|
||||
/// Get a copy of a block (anywhere in the matrix).
|
||||
/// This method makes a copy - use the methods below if performance is critical.
|
||||
Matrix block(DenseIndex I, DenseIndex J) const;
|
||||
|
||||
/// Return the J'th diagonal block as a self adjoint view.
|
||||
Eigen::SelfAdjointView<Block, Eigen::Upper> diagonalBlock(DenseIndex J) {
|
||||
return block_(J, J).selfadjointView<Eigen::Upper>();
|
||||
}
|
||||
|
||||
/// Access the range of blocks starting with vertical block index \c i_startBlock, ending with
|
||||
/// vertical block index \c i_endBlock, starting with horizontal block index \c j_startBlock,
|
||||
/// and ending with horizontal block index \c j_endBlock. End block indices are exclusive. Note
|
||||
/// that the actual blocks accessed in the underlying matrix are relative to blockStart().
|
||||
Block range(DenseIndex i_startBlock, DenseIndex i_endBlock, DenseIndex j_startBlock, DenseIndex j_endBlock) {
|
||||
assertInvariants();
|
||||
return Block(*this, i_startBlock, j_startBlock, i_endBlock - i_startBlock, j_endBlock - j_startBlock);
|
||||
/// Return the J'th diagonal block as a self adjoint view.
|
||||
Eigen::SelfAdjointView<constBlock, Eigen::Upper> diagonalBlock(DenseIndex J) const {
|
||||
return block_(J, J).selfadjointView<Eigen::Upper>();
|
||||
}
|
||||
|
||||
/// Access the range of blocks starting with vertical block index \c i_startBlock, ending with
|
||||
/// vertical block index \c i_endBlock, starting with horizontal block index \c j_startBlock,
|
||||
/// and ending with horizontal block index \c j_endBlock. End block indices are exclusive. Note
|
||||
/// that the actual blocks accessed in the underlying matrix are relative to blockStart().
|
||||
constBlock range(DenseIndex i_startBlock, DenseIndex i_endBlock, DenseIndex j_startBlock, DenseIndex j_endBlock) const {
|
||||
assertInvariants();
|
||||
return constBlock(*this, i_startBlock, j_startBlock, i_endBlock - i_startBlock, j_endBlock - j_startBlock);
|
||||
/// Get the diagonal of the J'th diagonal block.
|
||||
Vector diagonal(DenseIndex J) const {
|
||||
return block_(J, J).diagonal();
|
||||
}
|
||||
|
||||
/** Return the full matrix, *not* including any portions excluded by firstBlock(). */
|
||||
Block full()
|
||||
{
|
||||
return Block(*this, 0, nBlocks(), 0);
|
||||
/// Get block above the diagonal (I, J).
|
||||
constBlock aboveDiagonalBlock(DenseIndex I, DenseIndex J) const {
|
||||
assert(I < J);
|
||||
return block_(I, J);
|
||||
}
|
||||
|
||||
/** Return the full matrix, *not* including any portions excluded by firstBlock(). */
|
||||
constBlock full() const
|
||||
{
|
||||
return constBlock(*this, 0, nBlocks(), 0);
|
||||
/// Return the square sub-matrix that contains blocks(i:j, i:j).
|
||||
Eigen::SelfAdjointView<constBlock, Eigen::Upper> selfadjointView(
|
||||
DenseIndex I, DenseIndex J) const {
|
||||
assert(J > I);
|
||||
return block_(I, I, J - I, J - I).selfadjointView<Eigen::Upper>();
|
||||
}
|
||||
|
||||
/** Access to full matrix, including any portions excluded by firstBlock() to other operations. */
|
||||
Eigen::SelfAdjointView<const Matrix, Eigen::Upper> matrix() const
|
||||
{
|
||||
return matrix_;
|
||||
/// Return the square sub-matrix that contains blocks(i:j, i:j) as a triangular view.
|
||||
Eigen::TriangularView<constBlock, Eigen::Upper> triangularView(DenseIndex I,
|
||||
DenseIndex J) const {
|
||||
assert(J > I);
|
||||
return block_(I, I, J - I, J - I).triangularView<Eigen::Upper>();
|
||||
}
|
||||
|
||||
/** Access to full matrix, including any portions excluded by firstBlock() to other operations. */
|
||||
Eigen::SelfAdjointView<Matrix, Eigen::Upper> matrix()
|
||||
{
|
||||
return matrix_;
|
||||
/// Get a range [i,j) from the matrix. Indices are in block units.
|
||||
constBlock aboveDiagonalRange(DenseIndex i_startBlock,
|
||||
DenseIndex i_endBlock,
|
||||
DenseIndex j_startBlock,
|
||||
DenseIndex j_endBlock) const {
|
||||
assert(i_startBlock < j_startBlock);
|
||||
assert(i_endBlock <= j_startBlock);
|
||||
return block_(i_startBlock, j_startBlock, i_endBlock - i_startBlock,
|
||||
j_endBlock - j_startBlock);
|
||||
}
|
||||
|
||||
/// Return the absolute offset in the underlying matrix of the start of the specified \c block.
|
||||
DenseIndex offset(DenseIndex block) const
|
||||
{
|
||||
assertInvariants();
|
||||
DenseIndex actualBlock = block + blockStart_;
|
||||
checkBlock(actualBlock);
|
||||
return variableColOffsets_[actualBlock];
|
||||
/// Get a range [i,j) from the matrix. Indices are in block units.
|
||||
Block aboveDiagonalRange(DenseIndex i_startBlock, DenseIndex i_endBlock,
|
||||
DenseIndex j_startBlock, DenseIndex j_endBlock) {
|
||||
assert(i_startBlock < j_startBlock);
|
||||
assert(i_endBlock <= j_startBlock);
|
||||
return block_(i_startBlock, j_startBlock, i_endBlock - i_startBlock,
|
||||
j_endBlock - j_startBlock);
|
||||
}
|
||||
|
||||
/// @}
|
||||
/// @name Block setter methods.
|
||||
/// @{
|
||||
|
||||
/// Set a diagonal block. Only the upper triangular portion of `xpr` is evaluated.
|
||||
template <typename XprType>
|
||||
void setDiagonalBlock(DenseIndex I, const XprType& xpr) {
|
||||
block_(I, I).triangularView<Eigen::Upper>() = xpr.template triangularView<Eigen::Upper>();
|
||||
}
|
||||
|
||||
/// Set an off-diagonal block. Only the upper triangular portion of `xpr` is evaluated.
|
||||
template <typename XprType>
|
||||
void setOffDiagonalBlock(DenseIndex I, DenseIndex J, const XprType& xpr) {
|
||||
assert(I != J);
|
||||
if (I < J) {
|
||||
block_(I, J) = xpr;
|
||||
} else {
|
||||
block_(J, I) = xpr.transpose();
|
||||
}
|
||||
}
|
||||
|
||||
/// Increment the diagonal block by the values in `xpr`. Only reads the upper triangular part of `xpr`.
|
||||
template <typename XprType>
|
||||
void updateDiagonalBlock(DenseIndex I, const XprType& xpr) {
|
||||
// TODO(gareth): Eigen won't let us add triangular or self-adjoint views
|
||||
// here, so we do it manually.
|
||||
auto dest = block_(I, I);
|
||||
assert(dest.rows() == xpr.rows());
|
||||
assert(dest.cols() == xpr.cols());
|
||||
for (DenseIndex col = 0; col < dest.cols(); ++col) {
|
||||
for (DenseIndex row = 0; row <= col; ++row) {
|
||||
dest(row, col) += xpr(row, col);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Update an off diagonal block.
|
||||
/// NOTE(emmett): This assumes noalias().
|
||||
template <typename XprType>
|
||||
void updateOffDiagonalBlock(DenseIndex I, DenseIndex J, const XprType& xpr) {
|
||||
assert(I != J);
|
||||
if (I < J) {
|
||||
block_(I, J).noalias() += xpr;
|
||||
} else {
|
||||
block_(J, I).noalias() += xpr.transpose();
|
||||
}
|
||||
}
|
||||
|
||||
/// @}
|
||||
/// @name Accessing the full matrix.
|
||||
/// @{
|
||||
|
||||
/// Get self adjoint view.
|
||||
Eigen::SelfAdjointView<Block, Eigen::Upper> selfadjointView() {
|
||||
return full().selfadjointView<Eigen::Upper>();
|
||||
}
|
||||
|
||||
/// Get self adjoint view.
|
||||
Eigen::SelfAdjointView<constBlock, Eigen::Upper> selfadjointView() const {
|
||||
return full().selfadjointView<Eigen::Upper>();
|
||||
}
|
||||
|
||||
/// Set the entire active matrix. Only reads the upper triangular part of `xpr`.
|
||||
template <typename XprType>
|
||||
void setFullMatrix(const XprType& xpr) {
|
||||
full().triangularView<Eigen::Upper>() = xpr.template triangularView<Eigen::Upper>();
|
||||
}
|
||||
|
||||
/// Set the entire active matrix zero.
|
||||
void setZero() {
|
||||
full().triangularView<Eigen::Upper>().setZero();
|
||||
}
|
||||
|
||||
/// Negate the entire active matrix.
|
||||
void negate() {
|
||||
full().triangularView<Eigen::Upper>() *= -1.0;
|
||||
}
|
||||
|
||||
/// Invert the entire active matrix in place.
|
||||
void invertInPlace() {
|
||||
const auto identity = Matrix::Identity(rows(), rows());
|
||||
full().triangularView<Eigen::Upper>() =
|
||||
selfadjointView()
|
||||
.llt()
|
||||
.solve(identity)
|
||||
.triangularView<Eigen::Upper>();
|
||||
}
|
||||
|
||||
/// @}
|
||||
|
||||
/// Retrieve or modify the first logical block, i.e. the block referenced by block index 0.
|
||||
/// Blocks before it will be inaccessible, except by accessing the underlying matrix using
|
||||
/// matrix().
|
||||
|
@ -197,11 +290,86 @@ namespace gtsam {
|
|||
/// it will be inaccessible, except by accessing the underlying matrix using matrix().
|
||||
DenseIndex blockStart() const { return blockStart_; }
|
||||
|
||||
/// Do partial Cholesky in-place and return the eliminated block matrix, leaving the remaining
|
||||
/// symmetric matrix in place.
|
||||
VerticalBlockMatrix choleskyPartial(DenseIndex nFrontals);
|
||||
/**
|
||||
* Given the augmented Hessian [A1'A1 A1'A2 A1'b
|
||||
* A2'A1 A2'A2 A2'b
|
||||
* b'A1 b'A2 b'b]
|
||||
* on x1 and x2, does partial Cholesky in-place to obtain [R Sd;0 L] such that
|
||||
* R'R = A1'A1
|
||||
* R'Sd = [A1'A2 A1'b]
|
||||
* L'L is the augmented Hessian on the the separator x2
|
||||
* R and Sd can be interpreted as a GaussianConditional |R*x1 + S*x2 - d]^2
|
||||
*/
|
||||
void choleskyPartial(DenseIndex nFrontals);
|
||||
|
||||
/**
|
||||
* After partial Cholesky, we can optionally split off R and Sd, to be interpreted as
|
||||
* a GaussianConditional |R*x1 + S*x2 - d]^2. We leave the symmetric lower block L in place,
|
||||
* and adjust block_start so now *this refers to it.
|
||||
*/
|
||||
VerticalBlockMatrix split(DenseIndex nFrontals);
|
||||
|
||||
protected:
|
||||
|
||||
/// Number of offsets in the full matrix.
|
||||
DenseIndex nOffsets() const {
|
||||
return variableColOffsets_.size();
|
||||
}
|
||||
|
||||
/// Number of actual blocks in the full matrix.
|
||||
DenseIndex nActualBlocks() const {
|
||||
return nOffsets() - 1;
|
||||
}
|
||||
|
||||
/// Get an offset for a block index (in the active view).
|
||||
DenseIndex offset(DenseIndex block) const {
|
||||
assert(block >= 0);
|
||||
const DenseIndex actual_index = block + blockStart();
|
||||
assert(actual_index < nOffsets());
|
||||
return variableColOffsets_[actual_index];
|
||||
}
|
||||
|
||||
/// Get an arbitrary block from the matrix. Indices are in block units.
|
||||
constBlock block_(DenseIndex iBlock, DenseIndex jBlock,
|
||||
DenseIndex blockRows = 1, DenseIndex blockCols = 1) const {
|
||||
const std::array<DenseIndex, 4> indices =
|
||||
calcIndices(iBlock, jBlock, blockRows, blockCols);
|
||||
return matrix_.block(indices[0], indices[1], indices[2], indices[3]);
|
||||
}
|
||||
|
||||
/// Get an arbitrary block from the matrix. Indices are in block units.
|
||||
Block block_(DenseIndex iBlock, DenseIndex jBlock, DenseIndex blockRows = 1,
|
||||
DenseIndex blockCols = 1) {
|
||||
const std::array<DenseIndex, 4> indices =
|
||||
calcIndices(iBlock, jBlock, blockRows, blockCols);
|
||||
return matrix_.block(indices[0], indices[1], indices[2], indices[3]);
|
||||
}
|
||||
|
||||
/// Get the full matrix as a block.
|
||||
constBlock full() const {
|
||||
return block_(0, 0, nBlocks(), nBlocks());
|
||||
}
|
||||
|
||||
/// Get the full matrix as a block.
|
||||
Block full() {
|
||||
return block_(0, 0, nBlocks(), nBlocks());
|
||||
}
|
||||
|
||||
/// Compute the indices into the underlying matrix for a given block.
|
||||
std::array<DenseIndex, 4> calcIndices(DenseIndex iBlock, DenseIndex jBlock,
|
||||
DenseIndex blockRows,
|
||||
DenseIndex blockCols) const {
|
||||
assert(blockRows >= 0);
|
||||
assert(blockCols >= 0);
|
||||
|
||||
// adjust indices to account for start and size of blocks
|
||||
const DenseIndex denseI = offset(iBlock);
|
||||
const DenseIndex denseJ = offset(jBlock);
|
||||
const DenseIndex denseRows = offset(iBlock + blockRows) - denseI;
|
||||
const DenseIndex denseCols = offset(jBlock + blockCols) - denseJ;
|
||||
return {{denseI, denseJ, denseRows, denseCols}};
|
||||
}
|
||||
|
||||
void assertInvariants() const
|
||||
{
|
||||
assert(matrix_.rows() == matrix_.cols());
|
||||
|
@ -209,21 +377,6 @@ namespace gtsam {
|
|||
assert(blockStart_ < (DenseIndex)variableColOffsets_.size());
|
||||
}
|
||||
|
||||
void checkBlock(DenseIndex block) const
|
||||
{
|
||||
static_cast<void>(block); //Disable unused varibale warnings.
|
||||
assert(matrix_.rows() == matrix_.cols());
|
||||
assert(matrix_.cols() == variableColOffsets_.back());
|
||||
assert(block >= 0);
|
||||
assert(block < (DenseIndex)variableColOffsets_.size()-1);
|
||||
assert(variableColOffsets_[block] < matrix_.cols() && variableColOffsets_[block+1] <= matrix_.cols());
|
||||
}
|
||||
|
||||
DenseIndex offsetUnchecked(DenseIndex block) const
|
||||
{
|
||||
return variableColOffsets_[block + blockStart_];
|
||||
}
|
||||
|
||||
template<typename ITERATOR>
|
||||
void fillOffsets(ITERATOR firstBlockDim, ITERATOR lastBlockDim, bool appendOneDimension)
|
||||
{
|
||||
|
|
|
@ -1,337 +0,0 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file SymmetricBlockMatrixBlockExpr.h
|
||||
* @brief Matrix expression for a block of a SymmetricBlockMatrix
|
||||
* @author Richard Roberts
|
||||
* @date Nov 20, 2013
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/Matrix.h>
|
||||
|
||||
namespace gtsam { template<typename SymmetricBlockMatrixType> class SymmetricBlockMatrixBlockExpr; }
|
||||
namespace gtsam { class SymmetricBlockMatrix; }
|
||||
|
||||
// traits class for Eigen expressions
|
||||
namespace Eigen
|
||||
{
|
||||
namespace internal
|
||||
{
|
||||
template<typename SymmetricBlockMatrixType>
|
||||
struct traits<gtsam::SymmetricBlockMatrixBlockExpr<SymmetricBlockMatrixType> > :
|
||||
public traits<typename gtsam::const_selector<
|
||||
SymmetricBlockMatrixType, gtsam::SymmetricBlockMatrix, gtsam::Matrix, const gtsam::Matrix>::type>
|
||||
{
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
namespace gtsam
|
||||
{
|
||||
/// A matrix expression that references a single block of a SymmetricBlockMatrix. Depending on
|
||||
/// the position of the block, this expression will behave either as a regular matrix block, a
|
||||
/// transposed matrix block, or a symmetric matrix block. The only reason this class is templated
|
||||
/// on SymmetricBlockMatrixType is to allow for both const and non-const references.
|
||||
template<typename SymmetricBlockMatrixType>
|
||||
class SymmetricBlockMatrixBlockExpr : public Eigen::EigenBase<SymmetricBlockMatrixBlockExpr<SymmetricBlockMatrixType> >
|
||||
{
|
||||
protected:
|
||||
SymmetricBlockMatrixType& xpr_; ///< The referenced SymmetricBlockMatrix
|
||||
DenseIndex densei_; ///< The scalar indices of the referenced block
|
||||
DenseIndex densej_; ///< The scalar indices of the referenced block
|
||||
DenseIndex denseRows_; ///< The scalar size of the referenced block
|
||||
DenseIndex denseCols_; ///< The scalar size of the referenced block
|
||||
enum BlockType { Plain, SelfAdjoint, Transposed } blockType_; ///< The type of the referenced block, as determined by the block position
|
||||
typedef SymmetricBlockMatrixBlockExpr<SymmetricBlockMatrixType> This;
|
||||
|
||||
public:
|
||||
// Typedefs and constants used in Eigen
|
||||
typedef typename const_selector<SymmetricBlockMatrixType, SymmetricBlockMatrix,
|
||||
typename Eigen::internal::traits<This>::Scalar&, typename Eigen::internal::traits<This>::Scalar>::type ScalarRef;
|
||||
typedef typename Eigen::internal::traits<This>::Scalar Scalar;
|
||||
typedef typename Eigen::internal::traits<This>::Index Index;
|
||||
static const Index ColsAtCompileTime = Eigen::Dynamic;
|
||||
static const Index RowsAtCompileTime = Eigen::Dynamic;
|
||||
|
||||
typedef typename const_selector<SymmetricBlockMatrixType, SymmetricBlockMatrix, Matrix, const Matrix>::type
|
||||
DenseMatrixType;
|
||||
|
||||
typedef Eigen::Map<DenseMatrixType, 0, Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic> > OffDiagonal;
|
||||
typedef Eigen::SelfAdjointView<Eigen::Block<DenseMatrixType>, Eigen::Upper> SelfAdjointView;
|
||||
typedef Eigen::TriangularView<Eigen::Block<DenseMatrixType>, Eigen::Upper> TriangularView;
|
||||
|
||||
protected:
|
||||
mutable Eigen::Block<DenseMatrixType> myBlock_;
|
||||
template<typename OtherSymmetricBlockMatrixType> friend class SymmetricBlockMatrixBlockExpr;
|
||||
|
||||
public:
|
||||
/// Create a SymmetricBlockMatrixBlockExpr from the specified block of a SymmetricBlockMatrix.
|
||||
SymmetricBlockMatrixBlockExpr(SymmetricBlockMatrixType& blockMatrix, Index iBlock, Index jBlock) :
|
||||
xpr_(blockMatrix), myBlock_(blockMatrix.matrix_.block(0, 0, 0, 0))
|
||||
{
|
||||
initIndices(iBlock, jBlock);
|
||||
}
|
||||
|
||||
/// Create a SymmetricBlockMatrixBlockExpr from the specified range of blocks of a
|
||||
/// SymmetricBlockMatrix.
|
||||
SymmetricBlockMatrixBlockExpr(SymmetricBlockMatrixType& blockMatrix,
|
||||
Index firstRowBlock, Index firstColBlock, Index rowBlocks, Index colBlocks) :
|
||||
xpr_(blockMatrix), myBlock_(blockMatrix.matrix_.block(0, 0, 0, 0))
|
||||
{
|
||||
initIndices(firstRowBlock, firstColBlock, rowBlocks, colBlocks);
|
||||
}
|
||||
|
||||
/// Create a SymmetricBlockMatrixBlockExpr from the specified range of blocks of a
|
||||
/// SymmetricBlockMatrix.
|
||||
SymmetricBlockMatrixBlockExpr(SymmetricBlockMatrixType& blockMatrix, Index firstBlock, Index blocks, char /*dummy*/) :
|
||||
xpr_(blockMatrix), myBlock_(blockMatrix.matrix_.block(0, 0, 0, 0))
|
||||
{
|
||||
initIndices(firstBlock, firstBlock, blocks, blocks);
|
||||
}
|
||||
|
||||
inline Index rows() const { return blockType_ != Transposed ? denseRows_ : denseCols_; }
|
||||
inline Index cols() const { return blockType_ != Transposed ? denseCols_ : denseRows_; }
|
||||
|
||||
inline BlockType blockType() const { return blockType_; }
|
||||
|
||||
inline ScalarRef operator()(Index row, Index col) const
|
||||
{
|
||||
return coeffInternal<ScalarRef>(row, col);
|
||||
}
|
||||
|
||||
inline OffDiagonal knownOffDiagonal() const
|
||||
{
|
||||
typedef Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic> DynamicStride;
|
||||
|
||||
// We can return a Map if we are either on an off-diagonal block, or a block of size 0 or 1
|
||||
assert(blockType_ != SelfAdjoint || (denseRows_ <= 1 && denseCols_ <= 1));
|
||||
if(blockType_ == Transposed)
|
||||
{
|
||||
// Swap the inner and outer stride to produce a transposed Map
|
||||
Eigen::Block<DenseMatrixType> block = const_cast<This&>(*this).xpr_.matrix_.block(densei_, densej_, denseRows_, denseCols_);
|
||||
return Eigen::Map<DenseMatrixType, 0, DynamicStride>(block.data(), block.cols(), block.rows(),
|
||||
DynamicStride(block.innerStride(), block.outerStride()));
|
||||
}
|
||||
else
|
||||
{
|
||||
Eigen::Block<DenseMatrixType> block = const_cast<This&>(*this).xpr_.matrix_.block(densei_, densej_, denseRows_, denseCols_);
|
||||
return Eigen::Map<DenseMatrixType, 0, DynamicStride>(block.data(), block.rows(), block.cols(),
|
||||
DynamicStride(block.outerStride(), block.innerStride()));
|
||||
}
|
||||
}
|
||||
|
||||
inline SelfAdjointView selfadjointView() const
|
||||
{
|
||||
assert(blockType_ == SelfAdjoint);
|
||||
return myBlock_;
|
||||
}
|
||||
|
||||
inline TriangularView triangularView() const
|
||||
{
|
||||
assert(blockType_ == SelfAdjoint);
|
||||
return myBlock_;
|
||||
}
|
||||
|
||||
template<typename Dest> inline void evalTo(Dest& dst) const
|
||||
{
|
||||
// Just try to assign to the object using either a selfadjoint view or a block view
|
||||
if(blockType_ == SelfAdjoint)
|
||||
dst = selfadjointView();
|
||||
else if(blockType_ == Plain)
|
||||
dst = myBlock_;
|
||||
else
|
||||
dst = myBlock_.transpose();
|
||||
}
|
||||
|
||||
//template<typename MatrixType> inline void evalTo(const Eigen::SelfAdjointView<MatrixType, Eigen::Upper>& rhs) const
|
||||
//{
|
||||
// if(blockType_ == SelfAdjoint)
|
||||
// rhs.nestedExpression().triangularView<Eigen::Upper>() = triangularView();
|
||||
// else
|
||||
// throw std::invalid_argument("Cannot assign an off-diagonal block to a self-adjoint matrix");
|
||||
//}
|
||||
|
||||
//template<typename MatrixType> inline void evalTo(const Eigen::TriangularView<MatrixType, Eigen::Upper>& rhs) const
|
||||
//{
|
||||
// if(blockType_ == SelfAdjoint)
|
||||
// rhs.nestedExpression().triangularView<Eigen::Upper>() = triangularView();
|
||||
// else
|
||||
// throw std::invalid_argument("Cannot assign an off-diagonal block to a self-adjoint matrix");
|
||||
//}
|
||||
|
||||
template<typename RhsDerived>
|
||||
This& operator=(const Eigen::MatrixBase<RhsDerived>& rhs)
|
||||
{
|
||||
// Just try to assign to the object using either a selfadjoint view or a block view
|
||||
if(blockType_ == SelfAdjoint)
|
||||
triangularView() = rhs.derived().template triangularView<Eigen::Upper>();
|
||||
else if(blockType_ == Plain)
|
||||
myBlock_ = rhs.derived();
|
||||
else
|
||||
myBlock_.transpose() = rhs.derived();
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<typename MatrixType>
|
||||
This& operator=(const Eigen::SelfAdjointView<MatrixType, Eigen::Upper>& rhs)
|
||||
{
|
||||
if(blockType_ == SelfAdjoint)
|
||||
triangularView() = rhs.nestedExpression().template triangularView<Eigen::Upper>();
|
||||
else
|
||||
throw std::invalid_argument("Cannot assign a self-adjoint matrix to an off-diagonal block");
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<typename OtherSymmetricBlockMatrixType>
|
||||
This& operator=(const SymmetricBlockMatrixBlockExpr<OtherSymmetricBlockMatrixType>& other)
|
||||
{
|
||||
_doAssign(other);
|
||||
return *this;
|
||||
}
|
||||
|
||||
This& operator=(const This& other)
|
||||
{
|
||||
// This version is required so GCC doesn't synthesize a default operator=.
|
||||
_doAssign(other);
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<typename OtherSymmetricBlockMatrixType>
|
||||
This& operator+=(const SymmetricBlockMatrixBlockExpr<OtherSymmetricBlockMatrixType>& other)
|
||||
{
|
||||
if(blockType_ == SelfAdjoint)
|
||||
{
|
||||
assert((BlockType)other.blockType() == SelfAdjoint);
|
||||
triangularView() += other.triangularView().nestedExpression();
|
||||
}
|
||||
else if(blockType_ == Plain)
|
||||
{
|
||||
assert((BlockType)other.blockType() == Plain || (BlockType)other.blockType() == Transposed);
|
||||
if((BlockType)other.blockType() == Transposed)
|
||||
myBlock_ += other.myBlock_.transpose();
|
||||
else
|
||||
myBlock_ += other.myBlock_;
|
||||
}
|
||||
else
|
||||
{
|
||||
assert((BlockType)other.blockType() == Plain || (BlockType)other.blockType() == Transposed);
|
||||
if((BlockType)other.blockType() == Transposed)
|
||||
myBlock_.transpose() += other.myBlock_.transpose();
|
||||
else
|
||||
myBlock_.transpose() += other.myBlock_;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
void initIndices(Index iBlock, Index jBlock, Index blockRows = 1, Index blockCols = 1)
|
||||
{
|
||||
if(iBlock == jBlock && blockRows == blockCols)
|
||||
{
|
||||
densei_ = xpr_.offset(iBlock);
|
||||
densej_ = densei_;
|
||||
if(blockRows > 0)
|
||||
xpr_.checkBlock(iBlock + blockRows - 1);
|
||||
denseRows_ = xpr_.offsetUnchecked(iBlock + blockRows) - densei_;
|
||||
if(blockCols > 0)
|
||||
xpr_.checkBlock(jBlock + blockCols - 1);
|
||||
denseCols_ = xpr_.offsetUnchecked(jBlock + blockCols) - densej_;
|
||||
blockType_ = SelfAdjoint;
|
||||
}
|
||||
else
|
||||
{
|
||||
if(jBlock > iBlock || (iBlock == jBlock && blockCols > blockRows))
|
||||
{
|
||||
densei_ = xpr_.offset(iBlock);
|
||||
densej_ = xpr_.offset(jBlock);
|
||||
if(blockRows > 0)
|
||||
xpr_.checkBlock(iBlock + blockRows - 1);
|
||||
denseRows_ = xpr_.offsetUnchecked(iBlock + blockRows) - densei_;
|
||||
if(blockCols > 0)
|
||||
xpr_.checkBlock(jBlock + blockCols - 1);
|
||||
denseCols_ = xpr_.offsetUnchecked(jBlock + blockCols) - densej_;
|
||||
blockType_ = Plain;
|
||||
}
|
||||
else
|
||||
{
|
||||
densei_ = xpr_.offset(jBlock);
|
||||
densej_ = xpr_.offset(iBlock);
|
||||
if(blockCols > 0)
|
||||
xpr_.checkBlock(jBlock + blockCols - 1);
|
||||
denseRows_ = xpr_.offsetUnchecked(jBlock + blockCols) - densei_;
|
||||
if(blockRows > 0)
|
||||
xpr_.checkBlock(iBlock + blockRows - 1);
|
||||
denseCols_ = xpr_.offsetUnchecked(iBlock + blockRows) - densej_;
|
||||
blockType_ = Transposed;
|
||||
}
|
||||
|
||||
// Validate that the block does not cross below the diagonal (the indices have already been
|
||||
// flipped above the diagonal for ranges starting below the diagonal).
|
||||
if(densei_ + denseRows_ > densej_ + 1)
|
||||
throw std::invalid_argument("Off-diagonal block ranges may not cross the diagonal");
|
||||
}
|
||||
|
||||
new (&myBlock_) Eigen::Block<DenseMatrixType>(xpr_.matrix_.block(densei_, densej_, denseRows_, denseCols_));
|
||||
}
|
||||
|
||||
template<typename ScalarType>
|
||||
inline ScalarType coeffInternal(Index row, Index col) const
|
||||
{
|
||||
// We leave index checking up to the Block class
|
||||
if(blockType_ == Plain)
|
||||
{
|
||||
return myBlock_(row, col);
|
||||
}
|
||||
else if(blockType_ == SelfAdjoint)
|
||||
{
|
||||
if(row <= col)
|
||||
return myBlock_(row, col);
|
||||
else
|
||||
return myBlock_.transpose()(row, col);
|
||||
}
|
||||
else
|
||||
{
|
||||
return myBlock_.transpose()(row, col);
|
||||
}
|
||||
}
|
||||
|
||||
template<typename OtherSymmetricBlockMatrixType>
|
||||
void _doAssign(const SymmetricBlockMatrixBlockExpr<OtherSymmetricBlockMatrixType>& other)
|
||||
{
|
||||
if(blockType_ == SelfAdjoint)
|
||||
{
|
||||
assert((BlockType)other.blockType() == SelfAdjoint);
|
||||
triangularView() = other.triangularView().nestedExpression();
|
||||
}
|
||||
else if(blockType_ == Plain)
|
||||
{
|
||||
assert((BlockType)other.blockType() == Plain || (BlockType)other.blockType() == Transposed);
|
||||
if((BlockType)other.blockType() == Transposed)
|
||||
myBlock_ = other.myBlock_.transpose();
|
||||
else
|
||||
myBlock_ = other.myBlock_;
|
||||
}
|
||||
else
|
||||
{
|
||||
assert((BlockType)other.blockType() == Plain || (BlockType)other.blockType() == Transposed);
|
||||
if((BlockType)other.blockType() == Transposed)
|
||||
myBlock_.transpose() = other.myBlock_.transpose();
|
||||
else
|
||||
myBlock_.transpose() = other.myBlock_;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
}
|
|
@ -18,11 +18,12 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/config.h> // Configuration from CMake
|
||||
|
||||
#include <gtsam/base/Vector.h>
|
||||
#include <boost/serialization/assume_abstract.hpp>
|
||||
#include <memory>
|
||||
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
|
@ -37,7 +38,7 @@ namespace gtsam {
|
|||
* Values can operate generically on Value objects, retracting or computing
|
||||
* local coordinates for many Value objects of different types.
|
||||
*
|
||||
* Inheriting from the DerivedValue class templated provides a generic implementation of
|
||||
* Inheriting from the DerivedValue class template provides a generic implementation of
|
||||
* the pure virtual functions retract_(), localCoordinates_(), and equals_(), eliminating
|
||||
* the need to implement these functions in your class. Note that you must inherit from
|
||||
* DerivedValue templated on the class you are defining. For example you cannot define
|
||||
|
|
|
@ -311,7 +311,7 @@ struct traits<Eigen::Matrix<double, M, N, Options, MaxRows, MaxCols> > :
|
|||
typedef Eigen::Matrix<double, dimension, dimension> Jacobian;
|
||||
typedef OptionalJacobian<dimension, dimension> ChartJacobian;
|
||||
|
||||
static TangentVector Local(Fixed origin, Fixed other,
|
||||
static TangentVector Local(const Fixed& origin, const Fixed& other,
|
||||
ChartJacobian H1 = boost::none, ChartJacobian H2 = boost::none) {
|
||||
if (H1) (*H1) = -Jacobian::Identity();
|
||||
if (H2) (*H2) = Jacobian::Identity();
|
||||
|
@ -320,7 +320,7 @@ struct traits<Eigen::Matrix<double, M, N, Options, MaxRows, MaxCols> > :
|
|||
return result;
|
||||
}
|
||||
|
||||
static Fixed Retract(Fixed origin, const TangentVector& v,
|
||||
static Fixed Retract(const Fixed& origin, const TangentVector& v,
|
||||
ChartJacobian H1 = boost::none, ChartJacobian H2 = boost::none) {
|
||||
if (H1) (*H1) = Jacobian::Identity();
|
||||
if (H2) (*H2) = Jacobian::Identity();
|
||||
|
|
|
@ -13,10 +13,10 @@
|
|||
* @file cholesky.cpp
|
||||
* @brief Efficient incomplete Cholesky on rank-deficient matrices, todo: constrained Cholesky
|
||||
* @author Richard Roberts
|
||||
* @author Frank Dellaert
|
||||
* @date Nov 5, 2010
|
||||
*/
|
||||
|
||||
#include <gtsam/base/debug.h>
|
||||
#include <gtsam/base/cholesky.h>
|
||||
#include <gtsam/base/timing.h>
|
||||
|
||||
|
@ -27,64 +27,53 @@ using namespace std;
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
static const double negativePivotThreshold = -1e-1;
|
||||
static const double zeroPivotThreshold = 1e-6;
|
||||
static const double underconstrainedPrior = 1e-5;
|
||||
static const int underconstrainedExponentDifference = 12;
|
||||
static const double negativePivotThreshold = -1e-1;
|
||||
static const double zeroPivotThreshold = 1e-6;
|
||||
static const double underconstrainedPrior = 1e-5;
|
||||
static const int underconstrainedExponentDifference = 12;
|
||||
|
||||
/* ************************************************************************* */
|
||||
static inline int choleskyStep(Matrix& ATA, size_t k, size_t order) {
|
||||
|
||||
const bool debug = ISDEBUG("choleskyCareful");
|
||||
|
||||
// Get pivot value
|
||||
double alpha = ATA(k,k);
|
||||
double alpha = ATA(k, k);
|
||||
|
||||
// Correct negative pivots from round-off error
|
||||
if(alpha < negativePivotThreshold) {
|
||||
if(debug) {
|
||||
cout << "pivot = " << alpha << endl;
|
||||
print(ATA, "Partially-factorized matrix: ");
|
||||
}
|
||||
if (alpha < negativePivotThreshold) {
|
||||
return -1;
|
||||
} else if(alpha < 0.0)
|
||||
} else if (alpha < 0.0)
|
||||
alpha = 0.0;
|
||||
|
||||
|
||||
const double beta = sqrt(alpha);
|
||||
|
||||
if(beta > zeroPivotThreshold) {
|
||||
if (beta > zeroPivotThreshold) {
|
||||
const double betainv = 1.0 / beta;
|
||||
|
||||
// Update k,k
|
||||
ATA(k,k) = beta;
|
||||
ATA(k, k) = beta;
|
||||
|
||||
if(k < (order-1)) {
|
||||
if (k < (order - 1)) {
|
||||
// Update A(k,k+1:end) <- A(k,k+1:end) / beta
|
||||
typedef Matrix::RowXpr::SegmentReturnType BlockRow;
|
||||
BlockRow V = ATA.row(k).segment(k+1, order-(k+1));
|
||||
BlockRow V = ATA.row(k).segment(k + 1, order - (k + 1));
|
||||
V *= betainv;
|
||||
|
||||
// Update A(k+1:end, k+1:end) <- A(k+1:end, k+1:end) - v*v' / alpha
|
||||
ATA.block(k+1, k+1, order-(k+1), order-(k+1)) -= V.transpose() * V;
|
||||
// ATA.bottomRightCorner(order-(k+1), order-(k+1)).selfadjointView<Eigen::Upper>()
|
||||
// .rankUpdate(V.adjoint(), -1);
|
||||
ATA.block(k + 1, k + 1, order - (k + 1), order - (k + 1)) -= V.transpose() * V;
|
||||
// ATA.bottomRightCorner(order-(k+1), order-(k+1)).selfadjointView<Eigen::Upper>()
|
||||
// .rankUpdate(V.adjoint(), -1);
|
||||
}
|
||||
return 1;
|
||||
} else {
|
||||
// For zero pivots, add the underconstrained variable prior
|
||||
ATA(k,k) = underconstrainedPrior;
|
||||
for(size_t j=k+1; j<order; ++j)
|
||||
ATA(k,j) = 0.0;
|
||||
if(debug) cout << "choleskyCareful: Skipping " << k << endl;
|
||||
ATA(k, k) = underconstrainedPrior;
|
||||
for (size_t j = k + 1; j < order; ++j)
|
||||
ATA(k, j) = 0.0;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
pair<size_t,bool> choleskyCareful(Matrix& ATA, int order) {
|
||||
|
||||
const bool debug = ISDEBUG("choleskyCareful");
|
||||
|
||||
pair<size_t, bool> choleskyCareful(Matrix& ATA, int order) {
|
||||
// Check that the matrix is square (we do not check for symmetry)
|
||||
assert(ATA.rows() == ATA.cols());
|
||||
|
||||
|
@ -92,7 +81,7 @@ pair<size_t,bool> choleskyCareful(Matrix& ATA, int order) {
|
|||
const size_t n = ATA.rows();
|
||||
|
||||
// Negative order means factor the entire matrix
|
||||
if(order < 0)
|
||||
if (order < 0)
|
||||
order = int(n);
|
||||
|
||||
assert(size_t(order) <= n);
|
||||
|
@ -102,13 +91,11 @@ pair<size_t,bool> choleskyCareful(Matrix& ATA, int order) {
|
|||
bool success = true;
|
||||
|
||||
// Factor row-by-row
|
||||
for(size_t k = 0; k < size_t(order); ++k) {
|
||||
for (size_t k = 0; k < size_t(order); ++k) {
|
||||
int stepResult = choleskyStep(ATA, k, size_t(order));
|
||||
if(stepResult == 1) {
|
||||
if(debug) cout << "choleskyCareful: Factored through " << k << endl;
|
||||
if(debug) print(ATA, "ATA: ");
|
||||
maxrank = k+1;
|
||||
} else if(stepResult == -1) {
|
||||
if (stepResult == 1) {
|
||||
maxrank = k + 1;
|
||||
} else if (stepResult == -1) {
|
||||
success = false;
|
||||
break;
|
||||
} /* else if(stepResult == 0) Found zero pivot */
|
||||
|
@ -118,72 +105,54 @@ pair<size_t,bool> choleskyCareful(Matrix& ATA, int order) {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
bool choleskyPartial(Matrix& ABC, size_t nFrontal) {
|
||||
|
||||
bool choleskyPartial(Matrix& ABC, size_t nFrontal, size_t topleft) {
|
||||
gttic(choleskyPartial);
|
||||
if (nFrontal == 0)
|
||||
return true;
|
||||
|
||||
const bool debug = ISDEBUG("choleskyPartial");
|
||||
assert(ABC.cols() == ABC.rows());
|
||||
const Eigen::DenseIndex n = ABC.rows() - topleft;
|
||||
assert(n >= 0 && nFrontal <= size_t(n));
|
||||
|
||||
assert(ABC.rows() == ABC.cols());
|
||||
assert(ABC.rows() >= 0 && nFrontal <= size_t(ABC.rows()));
|
||||
// Create views on blocks
|
||||
auto A = ABC.block(topleft, topleft, nFrontal, nFrontal);
|
||||
auto B = ABC.block(topleft, topleft + nFrontal, nFrontal, n - nFrontal);
|
||||
auto C = ABC.block(topleft + nFrontal, topleft + nFrontal, n - nFrontal, n - nFrontal);
|
||||
|
||||
const size_t n = ABC.rows();
|
||||
|
||||
// Compute Cholesky factorization of A, overwrites A.
|
||||
gttic(lld);
|
||||
Eigen::ComputationInfo lltResult;
|
||||
if(nFrontal > 0)
|
||||
{
|
||||
Eigen::LLT<Matrix, Eigen::Upper> llt = ABC.block(0, 0, nFrontal, nFrontal).selfadjointView<Eigen::Upper>().llt();
|
||||
ABC.block(0, 0, nFrontal, nFrontal).triangularView<Eigen::Upper>() = llt.matrixU();
|
||||
lltResult = llt.info();
|
||||
}
|
||||
else
|
||||
{
|
||||
lltResult = Eigen::Success;
|
||||
}
|
||||
gttoc(lld);
|
||||
|
||||
if(debug) cout << "R:\n" << Eigen::MatrixXd(ABC.topLeftCorner(nFrontal,nFrontal).triangularView<Eigen::Upper>()) << endl;
|
||||
// Compute Cholesky factorization A = R'*R, overwrites A.
|
||||
gttic(LLT);
|
||||
Eigen::LLT<Matrix, Eigen::Upper> llt(A);
|
||||
Eigen::ComputationInfo lltResult = llt.info();
|
||||
if (lltResult != Eigen::Success)
|
||||
return false;
|
||||
auto R = A.triangularView<Eigen::Upper>();
|
||||
R = llt.matrixU();
|
||||
gttoc(LLT);
|
||||
|
||||
// Compute S = inv(R') * B
|
||||
gttic(compute_S);
|
||||
if(n - nFrontal > 0) {
|
||||
ABC.topLeftCorner(nFrontal,nFrontal).triangularView<Eigen::Upper>().transpose().solveInPlace(
|
||||
ABC.topRightCorner(nFrontal, n-nFrontal));
|
||||
}
|
||||
if(debug) cout << "S:\n" << ABC.topRightCorner(nFrontal, n-nFrontal) << endl;
|
||||
if (nFrontal < n)
|
||||
R.transpose().solveInPlace(B);
|
||||
gttoc(compute_S);
|
||||
|
||||
// Compute L = C - S' * S
|
||||
gttic(compute_L);
|
||||
if(debug) cout << "C:\n" << Eigen::MatrixXd(ABC.bottomRightCorner(n-nFrontal,n-nFrontal).selfadjointView<Eigen::Upper>()) << endl;
|
||||
if(n - nFrontal > 0)
|
||||
ABC.bottomRightCorner(n-nFrontal,n-nFrontal).selfadjointView<Eigen::Upper>().rankUpdate(
|
||||
ABC.topRightCorner(nFrontal, n-nFrontal).transpose(), -1.0);
|
||||
if(debug) cout << "L:\n" << Eigen::MatrixXd(ABC.bottomRightCorner(n-nFrontal,n-nFrontal).selfadjointView<Eigen::Upper>()) << endl;
|
||||
if (nFrontal < n)
|
||||
C.selfadjointView<Eigen::Upper>().rankUpdate(B.transpose(), -1.0);
|
||||
gttoc(compute_L);
|
||||
|
||||
// Check last diagonal element - Eigen does not check it
|
||||
bool ok;
|
||||
if(lltResult == Eigen::Success) {
|
||||
if(nFrontal >= 2) {
|
||||
int exp2, exp1;
|
||||
(void)frexp(ABC(nFrontal-2, nFrontal-2), &exp2);
|
||||
(void)frexp(ABC(nFrontal-1, nFrontal-1), &exp1);
|
||||
ok = (exp2 - exp1 < underconstrainedExponentDifference);
|
||||
} else if(nFrontal == 1) {
|
||||
int exp1;
|
||||
(void)frexp(ABC(0,0), &exp1);
|
||||
ok = (exp1 > -underconstrainedExponentDifference);
|
||||
} else {
|
||||
ok = true;
|
||||
}
|
||||
if (nFrontal >= 2) {
|
||||
int exp2, exp1;
|
||||
(void)frexp(R(topleft + nFrontal - 2, topleft + nFrontal - 2), &exp2);
|
||||
(void)frexp(R(topleft + nFrontal - 1, topleft + nFrontal - 1), &exp1);
|
||||
return (exp2 - exp1 < underconstrainedExponentDifference);
|
||||
} else if (nFrontal == 1) {
|
||||
int exp1;
|
||||
(void)frexp(R(0, 0), &exp1);
|
||||
return (exp1 > -underconstrainedExponentDifference);
|
||||
} else {
|
||||
ok = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
return ok;
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -55,10 +55,12 @@ GTSAM_EXPORT std::pair<size_t,bool> choleskyCareful(Matrix& ATA, int order = -1)
|
|||
* nFrontal determines the split between A, B, and C, with A being of size
|
||||
* nFrontal x nFrontal.
|
||||
*
|
||||
* if non-zero, factorization proceeds in bottom-right corner starting at topleft
|
||||
*
|
||||
* @return \c true if the decomposition is successful, \c false if \c A was
|
||||
* not positive-definite.
|
||||
*/
|
||||
GTSAM_EXPORT bool choleskyPartial(Matrix& ABC, size_t nFrontal);
|
||||
GTSAM_EXPORT bool choleskyPartial(Matrix& ABC, size_t nFrontal, size_t topleft=0);
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -320,8 +320,8 @@ typename internal::FixedSizeMatrix<Y,X1>::type numericalDerivative41(
|
|||
/**
|
||||
* Compute numerical derivative in argument 2 of 4-argument function
|
||||
* @param h ternary function yielding m-vector
|
||||
* @param x1 n-dimensional first argument value
|
||||
* @param x2 second argument value
|
||||
* @param x1 first argument value
|
||||
* @param x2 n-dimensional second argument value
|
||||
* @param x3 third argument value
|
||||
* @param x4 fourth argument value
|
||||
* @param delta increment for numerical derivative
|
||||
|
@ -333,11 +333,53 @@ typename internal::FixedSizeMatrix<Y,X2>::type numericalDerivative42(
|
|||
const X2& x2, const X3& x3, const X4& x4, double delta = 1e-5) {
|
||||
BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<Y>::structure_category>::value),
|
||||
"Template argument Y must be a manifold type.");
|
||||
BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<X1>::structure_category>::value),
|
||||
"Template argument X1 must be a manifold type.");
|
||||
BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<X2>::structure_category>::value),
|
||||
"Template argument X2 must be a manifold type.");
|
||||
return numericalDerivative11<Y, X2>(boost::bind(h, x1, _1, x3, x4), x2, delta);
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute numerical derivative in argument 3 of 4-argument function
|
||||
* @param h ternary function yielding m-vector
|
||||
* @param x1 first argument value
|
||||
* @param x2 second argument value
|
||||
* @param x3 n-dimensional third argument value
|
||||
* @param x4 fourth argument value
|
||||
* @param delta increment for numerical derivative
|
||||
* @return m*n Jacobian computed via central differencing
|
||||
*/
|
||||
template<class Y, class X1, class X2, class X3, class X4>
|
||||
typename internal::FixedSizeMatrix<Y,X3>::type numericalDerivative43(
|
||||
boost::function<Y(const X1&, const X2&, const X3&, const X4&)> h, const X1& x1,
|
||||
const X2& x2, const X3& x3, const X4& x4, double delta = 1e-5) {
|
||||
BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<Y>::structure_category>::value),
|
||||
"Template argument Y must be a manifold type.");
|
||||
BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<X3>::structure_category>::value),
|
||||
"Template argument X3 must be a manifold type.");
|
||||
return numericalDerivative11<Y, X3>(boost::bind(h, x1, x2, _1, x4), x3, delta);
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute numerical derivative in argument 4 of 4-argument function
|
||||
* @param h ternary function yielding m-vector
|
||||
* @param x1 first argument value
|
||||
* @param x2 second argument value
|
||||
* @param x3 third argument value
|
||||
* @param x4 n-dimensional fourth argument value
|
||||
* @param delta increment for numerical derivative
|
||||
* @return m*n Jacobian computed via central differencing
|
||||
*/
|
||||
template<class Y, class X1, class X2, class X3, class X4>
|
||||
typename internal::FixedSizeMatrix<Y,X4>::type numericalDerivative44(
|
||||
boost::function<Y(const X1&, const X2&, const X3&, const X4&)> h, const X1& x1,
|
||||
const X2& x2, const X3& x3, const X4& x4, double delta = 1e-5) {
|
||||
BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<Y>::structure_category>::value),
|
||||
"Template argument Y must be a manifold type.");
|
||||
BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<X4>::structure_category>::value),
|
||||
"Template argument X4 must be a manifold type.");
|
||||
return numericalDerivative11<Y, X4>(boost::bind(h, x1, x2, x3, _1), x4, delta);
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute numerical Hessian matrix. Requires a single-argument Lie->scalar
|
||||
* function. This is implemented simply as the derivative of the gradient.
|
||||
|
|
|
@ -23,10 +23,23 @@ using namespace gtsam;
|
|||
using namespace std;
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(cholesky, choleskyPartial) {
|
||||
TEST(cholesky, choleskyPartial0) {
|
||||
|
||||
// choleskyPartial should only use the upper triangle, so this represents a
|
||||
// symmetric matrix.
|
||||
Matrix ABC(3,3);
|
||||
ABC << 4.0375, 3.4584, 3.5735,
|
||||
0., 4.7267, 3.8423,
|
||||
0., 0., 5.1600;
|
||||
|
||||
// Test passing 0 frontals to partialCholesky
|
||||
Matrix RSL(ABC);
|
||||
choleskyPartial(RSL, 0);
|
||||
EXPECT(assert_equal(ABC, RSL, 1e-9));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(cholesky, choleskyPartial) {
|
||||
Matrix ABC = (Matrix(7,7) <<
|
||||
4.0375, 3.4584, 3.5735, 2.4815, 2.1471, 2.7400, 2.2063,
|
||||
0., 4.7267, 3.8423, 2.3624, 2.8091, 2.9579, 2.5914,
|
||||
|
|
|
@ -40,58 +40,41 @@ TEST(SymmetricBlockMatrix, ReadBlocks)
|
|||
Matrix expected1 = (Matrix(2, 2) <<
|
||||
22, 23,
|
||||
23, 29).finished();
|
||||
Matrix actual1 = testBlockMatrix(1, 1);
|
||||
// Test only writing the upper triangle for efficiency
|
||||
Matrix actual1t = Z_2x2;
|
||||
actual1t.triangularView<Eigen::Upper>() = testBlockMatrix(1, 1).triangularView();
|
||||
Matrix actual1 = testBlockMatrix.diagonalBlock(1);
|
||||
EXPECT(assert_equal(expected1, actual1));
|
||||
EXPECT(assert_equal(Matrix(expected1.triangularView<Eigen::Upper>()), actual1t));
|
||||
|
||||
// Above the diagonal
|
||||
Matrix expected2 = (Matrix(3, 2) <<
|
||||
4, 5,
|
||||
10, 11,
|
||||
16, 17).finished();
|
||||
Matrix actual2 = testBlockMatrix(0, 1);
|
||||
Matrix actual2 = testBlockMatrix.aboveDiagonalBlock(0, 1);
|
||||
EXPECT(assert_equal(expected2, actual2));
|
||||
|
||||
// Below the diagonal
|
||||
Matrix expected3 = (Matrix(2, 3) <<
|
||||
4, 10, 16,
|
||||
5, 11, 17).finished();
|
||||
Matrix actual3 = testBlockMatrix(1, 0);
|
||||
EXPECT(assert_equal(expected3, actual3));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(SymmetricBlockMatrix, WriteBlocks)
|
||||
{
|
||||
// On the diagonal
|
||||
Matrix expected1 = testBlockMatrix(1, 1);
|
||||
Matrix expected1 = testBlockMatrix.diagonalBlock(1);
|
||||
SymmetricBlockMatrix bm1 = SymmetricBlockMatrix::LikeActiveViewOf(testBlockMatrix);
|
||||
bm1(1, 1) = expected1.selfadjointView<Eigen::Upper>(); // Verified with debugger that this only writes the upper triangle
|
||||
Matrix actual1 = bm1(1, 1);
|
||||
|
||||
bm1.setDiagonalBlock(1, expected1);
|
||||
Matrix actual1 = bm1.diagonalBlock(1);
|
||||
EXPECT(assert_equal(expected1, actual1));
|
||||
|
||||
// On the diagonal
|
||||
Matrix expected1p = testBlockMatrix(1, 1);
|
||||
SymmetricBlockMatrix bm1p = SymmetricBlockMatrix::LikeActiveViewOf(testBlockMatrix);
|
||||
bm1p(1, 1) = expected1p; // Verified with debugger that this only writes the upper triangle
|
||||
Matrix actual1p = bm1p(1, 1);
|
||||
EXPECT(assert_equal(expected1p, actual1p));
|
||||
|
||||
// Above the diagonal
|
||||
Matrix expected2 = testBlockMatrix(0, 1);
|
||||
Matrix expected2 = testBlockMatrix.aboveDiagonalBlock(0, 1);
|
||||
SymmetricBlockMatrix bm2 = SymmetricBlockMatrix::LikeActiveViewOf(testBlockMatrix);
|
||||
bm2(0, 1) = expected2;
|
||||
Matrix actual2 = bm2(0, 1);
|
||||
bm2.setOffDiagonalBlock(0, 1, expected2);
|
||||
Matrix actual2 = bm2.aboveDiagonalBlock(0, 1);
|
||||
EXPECT(assert_equal(expected2, actual2));
|
||||
|
||||
// Below the diagonal
|
||||
Matrix expected3 = testBlockMatrix(1, 0);
|
||||
Matrix expected3 = testBlockMatrix.aboveDiagonalBlock(0, 1).transpose();
|
||||
SymmetricBlockMatrix bm3 = SymmetricBlockMatrix::LikeActiveViewOf(testBlockMatrix);
|
||||
bm3(1, 0) = expected3;
|
||||
Matrix actual3 = bm3(1, 0);
|
||||
bm3.setOffDiagonalBlock(1, 0, expected3);
|
||||
Matrix actual3 = bm3.aboveDiagonalBlock(0, 1).transpose();
|
||||
EXPECT(assert_equal(expected3, actual3));
|
||||
}
|
||||
|
||||
|
@ -103,30 +86,16 @@ TEST(SymmetricBlockMatrix, Ranges)
|
|||
22, 23, 24,
|
||||
23, 29, 30,
|
||||
24, 30, 36).finished();
|
||||
Matrix actual1 = testBlockMatrix.range(1, 3, 1, 3).selfadjointView();
|
||||
Matrix actual1a = testBlockMatrix.range(1, 3, 1, 3);
|
||||
Matrix actual1 = testBlockMatrix.selfadjointView(1, 3);
|
||||
EXPECT(assert_equal(expected1, actual1));
|
||||
EXPECT(assert_equal(expected1, actual1a));
|
||||
|
||||
// Above the diagonal
|
||||
Matrix expected2 = (Matrix(3, 1) <<
|
||||
24,
|
||||
30,
|
||||
36).finished();
|
||||
Matrix actual2 = testBlockMatrix.range(1, 3, 2, 3).knownOffDiagonal();
|
||||
Matrix actual2a = testBlockMatrix.range(1, 3, 2, 3);
|
||||
Matrix expected2 = (Matrix(3, 3) <<
|
||||
4, 5, 6,
|
||||
10, 11, 12,
|
||||
16, 17, 18).finished();
|
||||
Matrix actual2 = testBlockMatrix.aboveDiagonalRange(0, 1, 1, 3);
|
||||
EXPECT(assert_equal(expected2, actual2));
|
||||
EXPECT(assert_equal(expected2, actual2a));
|
||||
|
||||
// Below the diagonal
|
||||
Matrix expected3 = (Matrix(3, 3) <<
|
||||
4, 10, 16,
|
||||
5, 11, 17,
|
||||
6, 12, 18).finished();
|
||||
Matrix actual3 = testBlockMatrix.range(1, 3, 0, 1).knownOffDiagonal();
|
||||
Matrix actual3a = testBlockMatrix.range(1, 3, 0, 1);
|
||||
EXPECT(assert_equal(expected3, actual3));
|
||||
EXPECT(assert_equal(expected3, actual3a));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -152,34 +121,51 @@ TEST(SymmetricBlockMatrix, expressions)
|
|||
Matrix b = (Matrix(1, 2) << 5, 6).finished();
|
||||
|
||||
SymmetricBlockMatrix bm1(list_of(2)(3)(1));
|
||||
bm1.full().triangularView().setZero();
|
||||
bm1(1, 1).selfadjointView().rankUpdate(a.transpose());
|
||||
EXPECT(assert_equal(Matrix(expected1.full().selfadjointView()), bm1.full().selfadjointView()));
|
||||
bm1.setZero();
|
||||
bm1.diagonalBlock(1).rankUpdate(a.transpose());
|
||||
EXPECT(assert_equal(Matrix(expected1.selfadjointView()), bm1.selfadjointView()));
|
||||
|
||||
SymmetricBlockMatrix bm2(list_of(2)(3)(1));
|
||||
bm2.full().triangularView().setZero();
|
||||
bm2(0, 1).knownOffDiagonal() += b.transpose() * a;
|
||||
EXPECT(assert_equal(Matrix(expected2.full().selfadjointView()), bm2.full().selfadjointView()));
|
||||
bm2.setZero();
|
||||
bm2.updateOffDiagonalBlock(0, 1, b.transpose() * a);
|
||||
EXPECT(assert_equal(Matrix(expected2.selfadjointView()), bm2.selfadjointView()));
|
||||
|
||||
SymmetricBlockMatrix bm3(list_of(2)(3)(1));
|
||||
bm3.full().triangularView().setZero();
|
||||
bm3(1, 0).knownOffDiagonal() += a.transpose() * b;
|
||||
EXPECT(assert_equal(Matrix(expected2.full().selfadjointView()), bm3.full().selfadjointView()));
|
||||
bm3.setZero();
|
||||
bm3.updateOffDiagonalBlock(1, 0, a.transpose() * b);
|
||||
EXPECT(assert_equal(Matrix(expected2.selfadjointView()), bm3.selfadjointView()));
|
||||
|
||||
SymmetricBlockMatrix bm4(list_of(2)(3)(1));
|
||||
bm4.full().triangularView().setZero();
|
||||
bm4(1, 1) += expected1(1, 1);
|
||||
EXPECT(assert_equal(Matrix(expected1.full().selfadjointView()), bm4.full().selfadjointView()));
|
||||
bm4.setZero();
|
||||
bm4.updateDiagonalBlock(1, expected1.diagonalBlock(1));
|
||||
EXPECT(assert_equal(Matrix(expected1.selfadjointView()), bm4.selfadjointView()));
|
||||
|
||||
SymmetricBlockMatrix bm5(list_of(2)(3)(1));
|
||||
bm5.full().triangularView().setZero();
|
||||
bm5(0, 1) += expected2(0, 1);
|
||||
EXPECT(assert_equal(Matrix(expected2.full().selfadjointView()), bm5.full().selfadjointView()));
|
||||
bm5.setZero();
|
||||
bm5.updateOffDiagonalBlock(0, 1, expected2.aboveDiagonalBlock(0, 1));
|
||||
EXPECT(assert_equal(Matrix(expected2.selfadjointView()), bm5.selfadjointView()));
|
||||
|
||||
SymmetricBlockMatrix bm6(list_of(2)(3)(1));
|
||||
bm6.full().triangularView().setZero();
|
||||
bm6(1, 0) += expected2(1, 0);
|
||||
EXPECT(assert_equal(Matrix(expected2.full().selfadjointView()), bm6.full().selfadjointView()));
|
||||
bm6.setZero();
|
||||
bm6.updateOffDiagonalBlock(1, 0, expected2.aboveDiagonalBlock(0, 1).transpose());
|
||||
EXPECT(assert_equal(Matrix(expected2.selfadjointView()), bm6.selfadjointView()));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(SymmetricBlockMatrix, inverseInPlace) {
|
||||
// generate an invertible matrix
|
||||
const Vector3 a(1.0, 0.2, 2.0), b(0.3, 0.8, -1.0), c(0.1, 0.2, 0.7);
|
||||
Matrix inputMatrix(3, 3);
|
||||
inputMatrix.setZero();
|
||||
inputMatrix += a * a.transpose();
|
||||
inputMatrix += b * b.transpose();
|
||||
inputMatrix += c * c.transpose();
|
||||
const Matrix expectedInverse = inputMatrix.inverse();
|
||||
|
||||
SymmetricBlockMatrix symmMatrix(list_of(2)(1), inputMatrix);
|
||||
// invert in place
|
||||
symmMatrix.invertInPlace();
|
||||
EXPECT(assert_equal(expectedInverse, symmMatrix.selfadjointView()));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
#include <string>
|
||||
#include <boost/shared_ptr.hpp>
|
||||
#include <boost/make_shared.hpp>
|
||||
#include <boost/bind.hpp>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
|
@ -158,7 +157,6 @@ void DepthFirstForestParallel(FOREST& forest, DATA& rootData,
|
|||
#ifdef GTSAM_USE_TBB
|
||||
// Typedefs
|
||||
typedef typename FOREST::Node Node;
|
||||
typedef boost::shared_ptr<Node> sharedNode;
|
||||
|
||||
tbb::task::spawn_root_and_wait(
|
||||
internal::CreateRootTask<Node>(forest.roots(), rootData, visitorPre,
|
||||
|
|
|
@ -44,8 +44,9 @@ namespace gtsam {
|
|||
boost::shared_ptr<DATA> myData;
|
||||
VISITOR_POST& visitorPost;
|
||||
|
||||
PostOrderTask(const boost::shared_ptr<NODE>& treeNode, const boost::shared_ptr<DATA>& myData, VISITOR_POST& visitorPost) :
|
||||
treeNode(treeNode), myData(myData), visitorPost(visitorPost) {}
|
||||
PostOrderTask(const boost::shared_ptr<NODE>& treeNode,
|
||||
const boost::shared_ptr<DATA>& myData, VISITOR_POST& visitorPost)
|
||||
: treeNode(treeNode), myData(myData), visitorPost(visitorPost) {}
|
||||
|
||||
tbb::task* execute()
|
||||
{
|
||||
|
@ -71,10 +72,15 @@ namespace gtsam {
|
|||
bool isPostOrderPhase;
|
||||
|
||||
PreOrderTask(const boost::shared_ptr<NODE>& treeNode, const boost::shared_ptr<DATA>& myData,
|
||||
VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost, int problemSizeThreshold,
|
||||
bool makeNewTasks = true) :
|
||||
treeNode(treeNode), myData(myData), visitorPre(visitorPre), visitorPost(visitorPost),
|
||||
problemSizeThreshold(problemSizeThreshold), makeNewTasks(makeNewTasks), isPostOrderPhase(false) {}
|
||||
VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost, int problemSizeThreshold,
|
||||
bool makeNewTasks = true)
|
||||
: treeNode(treeNode),
|
||||
myData(myData),
|
||||
visitorPre(visitorPre),
|
||||
visitorPost(visitorPost),
|
||||
problemSizeThreshold(problemSizeThreshold),
|
||||
makeNewTasks(makeNewTasks),
|
||||
isPostOrderPhase(false) {}
|
||||
|
||||
tbb::task* execute()
|
||||
{
|
||||
|
@ -93,8 +99,6 @@ namespace gtsam {
|
|||
// Allocate post-order task as a continuation
|
||||
isPostOrderPhase = true;
|
||||
recycle_as_continuation();
|
||||
//PostOrderTask<NODE, DATA, VISITOR_POST>& postOrderTask =
|
||||
// *new(allocate_continuation()) PostOrderTask<NODE, DATA, VISITOR_POST>(treeNode, myData, visitorPost);
|
||||
|
||||
bool overThreshold = (treeNode->problemSize() >= problemSizeThreshold);
|
||||
|
||||
|
@ -105,21 +109,18 @@ namespace gtsam {
|
|||
// Process child in a subtask. Important: Run visitorPre before calling
|
||||
// allocate_child so that if visitorPre throws an exception, we will not have
|
||||
// allocated an extra child, this causes a TBB error.
|
||||
boost::shared_ptr<DATA> childData = boost::allocate_shared<DATA>(tbb::scalable_allocator<DATA>(), visitorPre(child, *myData));
|
||||
//childTasks.push_back(*new(postOrderTask.allocate_child())
|
||||
// PreOrderTask(child, childData, visitorPre, visitorPost,
|
||||
// problemSizeThreshold, overThreshold));
|
||||
tbb::task* childTask = new(allocate_child())
|
||||
PreOrderTask(child, childData, visitorPre, visitorPost,
|
||||
problemSizeThreshold, overThreshold);
|
||||
if(firstChild)
|
||||
boost::shared_ptr<DATA> childData = boost::allocate_shared<DATA>(
|
||||
tbb::scalable_allocator<DATA>(), visitorPre(child, *myData));
|
||||
tbb::task* childTask =
|
||||
new (allocate_child()) PreOrderTask(child, childData, visitorPre, visitorPost,
|
||||
problemSizeThreshold, overThreshold);
|
||||
if (firstChild)
|
||||
childTasks.push_back(*childTask);
|
||||
else
|
||||
firstChild = childTask;
|
||||
}
|
||||
|
||||
// If we have child tasks, start subtasks and wait for them to complete
|
||||
//postOrderTask.set_ref_count((int) treeNode->children.size());
|
||||
set_ref_count((int)treeNode->children.size());
|
||||
spawn(childTasks);
|
||||
return firstChild;
|
||||
|
|
|
@ -64,7 +64,7 @@
|
|||
#cmakedefine GTSAM_ALLOW_DEPRECATED_SINCE_V4
|
||||
|
||||
// Publish flag about Eigen typedef
|
||||
#cmakedefine GTSAM_USE_VECTOR3_POINTS
|
||||
#cmakedefine GTSAM_TYPEDEF_POINTS_TO_VECTORS
|
||||
|
||||
// Support Metis-based nested dissection
|
||||
#cmakedefine GTSAM_SUPPORT_NESTED_DISSECTION
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
namespace gtsam {
|
||||
|
||||
// Instantiate base classes
|
||||
template class ClusterTree<DiscreteBayesTree, DiscreteFactorGraph>;
|
||||
template class EliminatableClusterTree<DiscreteBayesTree, DiscreteFactorGraph>;
|
||||
template class JunctionTree<DiscreteBayesTree, DiscreteFactorGraph>;
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -26,8 +26,8 @@ namespace gtsam {
|
|||
class DiscreteEliminationTree;
|
||||
|
||||
/**
|
||||
* A ClusterTree, i.e., a set of variable clusters with factors, arranged in a tree, with
|
||||
* the additional property that it represents the clique tree associated with a Bayes net.
|
||||
* An EliminatableClusterTree, i.e., a set of variable clusters with factors, arranged in a tree,
|
||||
* with the additional property that it represents the clique tree associated with a Bayes net.
|
||||
*
|
||||
* In GTSAM a junction tree is an intermediate data structure in multifrontal
|
||||
* variable elimination. Each node is a cluster of factors, along with a
|
||||
|
@ -39,7 +39,7 @@ namespace gtsam {
|
|||
* BayesTree stores conditionals, that are the product of eliminating the factors in the
|
||||
* corresponding JunctionTree cliques.
|
||||
*
|
||||
* The tree structure and elimination method are exactly analagous to the EliminationTree,
|
||||
* The tree structure and elimination method are exactly analogous to the EliminationTree,
|
||||
* except that in the JunctionTree, at each node multiple variables are eliminated at a time.
|
||||
*
|
||||
* \addtogroup Multifrontal
|
||||
|
@ -53,7 +53,7 @@ namespace gtsam {
|
|||
typedef boost::shared_ptr<This> shared_ptr; ///< Shared pointer to this class
|
||||
|
||||
/**
|
||||
* Build the elimination tree of a factor graph using pre-computed column structure.
|
||||
* Build the elimination tree of a factor graph using precomputed column structure.
|
||||
* @param factorGraph The factor graph for which to build the elimination tree
|
||||
* @param structure The set of factors involving each variable. If this is not
|
||||
* precomputed, you can call the Create(const FactorGraph<DERIVEDFACTOR>&)
|
||||
|
|
|
@ -106,7 +106,7 @@ Point2 Cal3Bundler::calibrate(const Point2& pi, const double tol) const {
|
|||
const int maxIterations = 10;
|
||||
int iteration;
|
||||
for (iteration = 0; iteration < maxIterations; ++iteration) {
|
||||
if (uncalibrate(pn).distance(pi) <= tol)
|
||||
if (distance2(uncalibrate(pn), pi) <= tol)
|
||||
break;
|
||||
const double x = pn.x(), y = pn.y(), xx = x * x, yy = y * y;
|
||||
const double rr = xx + yy;
|
||||
|
|
|
@ -144,7 +144,7 @@ Point2 Cal3DS2_Base::calibrate(const Point2& pi, const double tol) const {
|
|||
const int maxIterations = 10;
|
||||
int iteration;
|
||||
for (iteration = 0; iteration < maxIterations; ++iteration) {
|
||||
if (uncalibrate(pn).distance(pi) <= tol) break;
|
||||
if (distance2(uncalibrate(pn), pi) <= tol) break;
|
||||
const double x = pn.x(), y = pn.y(), xy = x * y, xx = x * x, yy = y * y;
|
||||
const double rr = xx + yy;
|
||||
const double g = (1 + k1_ * rr + k2_ * rr * rr);
|
||||
|
|
|
@ -51,6 +51,13 @@ Cal3_S2::Cal3_S2(const std::string &path) :
|
|||
infile.close();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
ostream& operator<<(ostream& os, const Cal3_S2& cal) {
|
||||
os << "{fx: " << cal.fx() << ", fy: " << cal.fy() << ", s:" << cal.skew() << ", px:" << cal.px()
|
||||
<< ", py:" << cal.py() << "}";
|
||||
return os;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void Cal3_S2::print(const std::string& s) const {
|
||||
gtsam::print((Matrix)matrix(), s);
|
||||
|
|
|
@ -75,6 +75,9 @@ public:
|
|||
/// @name Testable
|
||||
/// @{
|
||||
|
||||
/// Output stream operator
|
||||
GTSAM_EXPORT friend std::ostream &operator<<(std::ostream &os, const Cal3_S2& cal);
|
||||
|
||||
/// print with optional string
|
||||
void print(const std::string& s = "Cal3_S2") const;
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
@ -19,6 +19,7 @@
|
|||
#pragma once
|
||||
|
||||
#include <gtsam/geometry/BearingRange.h>
|
||||
#include <gtsam/geometry/Point2.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <gtsam/base/concepts.h>
|
||||
#include <gtsam/base/Manifold.h>
|
||||
|
@ -28,8 +29,6 @@
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
class Point2;
|
||||
|
||||
class GTSAM_EXPORT CheiralityException: public ThreadsafeException<
|
||||
CheiralityException> {
|
||||
public:
|
||||
|
@ -262,6 +261,14 @@ public:
|
|||
/// @name Named Constructors
|
||||
/// @{
|
||||
|
||||
// Create CalibratedCamera, with derivatives
|
||||
static CalibratedCamera Create(const Pose3& pose,
|
||||
OptionalJacobian<dimension, 6> H1 = boost::none) {
|
||||
if (H1)
|
||||
*H1 << I_6x6;
|
||||
return CalibratedCamera(pose);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a level camera at the given 2D pose and height
|
||||
* @param pose2 specifies the location and viewing direction
|
||||
|
|
|
@ -56,8 +56,7 @@ protected:
|
|||
// Project and fill error vector
|
||||
Vector b(ZDim * m);
|
||||
for (size_t i = 0, row = 0; i < m; i++, row += ZDim) {
|
||||
Z e = predicted[i] - measured[i];
|
||||
b.segment<ZDim>(row) = e.vector();
|
||||
b.segment<ZDim>(row) = traits<Z>::Local(measured[i], predicted[i]);
|
||||
}
|
||||
return b;
|
||||
}
|
||||
|
@ -107,7 +106,8 @@ public:
|
|||
|
||||
// Allocate result
|
||||
size_t m = this->size();
|
||||
std::vector<Z> z(m);
|
||||
std::vector<Z> z;
|
||||
z.reserve(m);
|
||||
|
||||
// Allocate derivatives
|
||||
if (E) E->resize(ZDim * m, N);
|
||||
|
@ -117,7 +117,7 @@ public:
|
|||
for (size_t i = 0; i < m; i++) {
|
||||
MatrixZD Fi;
|
||||
Eigen::Matrix<double, ZDim, N> Ei;
|
||||
z[i] = this->at(i).project2(point, Fs ? &Fi : 0, E ? &Ei : 0);
|
||||
z.emplace_back(this->at(i).project2(point, Fs ? &Fi : 0, E ? &Ei : 0));
|
||||
if (Fs) (*Fs)[i] = Fi;
|
||||
if (E) E->block<ZDim, N>(ZDim * i, 0) = Ei;
|
||||
}
|
||||
|
@ -157,28 +157,29 @@ public:
|
|||
for (size_t i = 0; i < m; i++) { // for each camera
|
||||
|
||||
const MatrixZD& Fi = Fs[i];
|
||||
const auto FiT = Fi.transpose();
|
||||
const Eigen::Matrix<double, ZDim, N> Ei_P = //
|
||||
E.block(ZDim * i, 0, ZDim, N) * P;
|
||||
|
||||
// D = (Dx2) * ZDim
|
||||
augmentedHessian(i, m) = Fi.transpose() * b.segment<ZDim>(ZDim * i) // F' * b
|
||||
- Fi.transpose() * (Ei_P * (E.transpose() * b)); // D = (DxZDim) * (ZDimx3) * (N*ZDimm) * (ZDimm x 1)
|
||||
augmentedHessian.setOffDiagonalBlock(i, m, FiT * b.segment<ZDim>(ZDim * i) // F' * b
|
||||
- FiT * (Ei_P * (E.transpose() * b))); // D = (DxZDim) * (ZDimx3) * (N*ZDimm) * (ZDimm x 1)
|
||||
|
||||
// (DxD) = (DxZDim) * ( (ZDimxD) - (ZDimx3) * (3xZDim) * (ZDimxD) )
|
||||
augmentedHessian(i, i) = Fi.transpose()
|
||||
* (Fi - Ei_P * E.block(ZDim * i, 0, ZDim, N).transpose() * Fi);
|
||||
augmentedHessian.setDiagonalBlock(i, FiT
|
||||
* (Fi - Ei_P * E.block(ZDim * i, 0, ZDim, N).transpose() * Fi));
|
||||
|
||||
// upper triangular part of the hessian
|
||||
for (size_t j = i + 1; j < m; j++) { // for each camera
|
||||
const MatrixZD& Fj = Fs[j];
|
||||
|
||||
// (DxD) = (Dx2) * ( (2x2) * (2xD) )
|
||||
augmentedHessian(i, j) = -Fi.transpose()
|
||||
* (Ei_P * E.block(ZDim * j, 0, ZDim, N).transpose() * Fj);
|
||||
augmentedHessian.setOffDiagonalBlock(i, j, -FiT
|
||||
* (Ei_P * E.block(ZDim * j, 0, ZDim, N).transpose() * Fj));
|
||||
}
|
||||
} // end of for over cameras
|
||||
|
||||
augmentedHessian(m, m)(0, 0) += b.squaredNorm();
|
||||
augmentedHessian.diagonalBlock(m)(0, 0) += b.squaredNorm();
|
||||
return augmentedHessian;
|
||||
}
|
||||
|
||||
|
@ -252,8 +253,6 @@ public:
|
|||
// G = F' * F - F' * E * P * E' * F
|
||||
// g = F' * (b - E * P * E' * b)
|
||||
|
||||
Eigen::Matrix<double, D, D> matrixBlock;
|
||||
|
||||
// a single point is observed in m cameras
|
||||
size_t m = Fs.size(); // cameras observing current point
|
||||
size_t M = (augmentedHessian.rows() - 1) / D; // all cameras in the group
|
||||
|
@ -263,6 +262,7 @@ public:
|
|||
for (size_t i = 0; i < m; i++) { // for each camera in the current factor
|
||||
|
||||
const MatrixZD& Fi = Fs[i];
|
||||
const auto FiT = Fi.transpose();
|
||||
const Eigen::Matrix<double, 2, N> Ei_P = E.template block<ZDim, N>(
|
||||
ZDim * i, 0) * P;
|
||||
|
||||
|
@ -275,17 +275,15 @@ public:
|
|||
// information vector - store previous vector
|
||||
// vectorBlock = augmentedHessian(aug_i, aug_m).knownOffDiagonal();
|
||||
// add contribution of current factor
|
||||
augmentedHessian(aug_i, M) = augmentedHessian(aug_i, M).knownOffDiagonal()
|
||||
+ Fi.transpose() * b.segment<ZDim>(ZDim * i) // F' * b
|
||||
- Fi.transpose() * (Ei_P * (E.transpose() * b)); // D = (DxZDim) * (ZDimx3) * (N*ZDimm) * (ZDimm x 1)
|
||||
augmentedHessian.updateOffDiagonalBlock(aug_i, M,
|
||||
FiT * b.segment<ZDim>(ZDim * i) // F' * b
|
||||
- FiT * (Ei_P * (E.transpose() * b))); // D = (DxZDim) * (ZDimx3) * (N*ZDimm) * (ZDimm x 1)
|
||||
|
||||
// (DxD) = (DxZDim) * ( (ZDimxD) - (ZDimx3) * (3xZDim) * (ZDimxD) )
|
||||
// main block diagonal - store previous block
|
||||
matrixBlock = augmentedHessian(aug_i, aug_i);
|
||||
// (DxD) += (DxZDim) * ( (ZDimxD) - (ZDimx3) * (3xZDim) * (ZDimxD) )
|
||||
// add contribution of current factor
|
||||
augmentedHessian(aug_i, aug_i) = matrixBlock
|
||||
+ (Fi.transpose()
|
||||
* (Fi - Ei_P * E.template block<ZDim, N>(ZDim * i, 0).transpose() * Fi));
|
||||
// TODO(gareth): Eigen doesn't let us pass the expression. Call eval() for now...
|
||||
augmentedHessian.updateDiagonalBlock(aug_i,
|
||||
((FiT * (Fi - Ei_P * E.template block<ZDim, N>(ZDim * i, 0).transpose() * Fi))).eval());
|
||||
|
||||
// upper triangular part of the hessian
|
||||
for (size_t j = i + 1; j < m; j++) { // for each camera
|
||||
|
@ -297,14 +295,12 @@ public:
|
|||
// off diagonal block - store previous block
|
||||
// matrixBlock = augmentedHessian(aug_i, aug_j).knownOffDiagonal();
|
||||
// add contribution of current factor
|
||||
augmentedHessian(aug_i, aug_j) =
|
||||
augmentedHessian(aug_i, aug_j).knownOffDiagonal()
|
||||
- Fi.transpose()
|
||||
* (Ei_P * E.template block<ZDim, N>(ZDim * j, 0).transpose() * Fj);
|
||||
augmentedHessian.updateOffDiagonalBlock(aug_i, aug_j,
|
||||
-FiT * (Ei_P * E.template block<ZDim, N>(ZDim * j, 0).transpose() * Fj));
|
||||
}
|
||||
} // end of for over cameras
|
||||
|
||||
augmentedHessian(M, M)(0, 0) += b.squaredNorm();
|
||||
augmentedHessian.diagonalBlock(M)(0, 0) += b.squaredNorm();
|
||||
}
|
||||
|
||||
private:
|
||||
|
|
|
@ -312,6 +312,16 @@ public:
|
|||
return Base::equals(camera, tol) && K_->equals(e->calibration(), tol);
|
||||
}
|
||||
|
||||
/// stream operator
|
||||
friend std::ostream& operator<<(std::ostream &os, const PinholePose& camera) {
|
||||
os << "{R: " << camera.pose().rotation().rpy().transpose();
|
||||
os << ", t: " << camera.pose().translation().transpose();
|
||||
if (!camera.K_) os << ", K: none";
|
||||
else os << ", K: " << *camera.K_;
|
||||
os << "}";
|
||||
return os;
|
||||
}
|
||||
|
||||
/// print
|
||||
void print(const std::string& s = "PinholePose") const {
|
||||
Base::print(s);
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
@ -23,21 +23,11 @@ using namespace std;
|
|||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
void Point2::print(const string& s) const {
|
||||
cout << s << *this << endl;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
bool Point2::equals(const Point2& q, double tol) const {
|
||||
return (fabs(x_ - q.x()) < tol && fabs(y_ - q.y()) < tol);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
double Point2::norm(OptionalJacobian<1,2> H) const {
|
||||
double r = sqrt(x_ * x_ + y_ * y_);
|
||||
double norm2(const Point2& p, OptionalJacobian<1,2> H) {
|
||||
double r = std::sqrt(p.x() * p.x() + p.y() * p.y());
|
||||
if (H) {
|
||||
if (fabs(r) > 1e-10)
|
||||
*H << x_ / r, y_ / r;
|
||||
*H << p.x() / r, p.y() / r;
|
||||
else
|
||||
*H << 1, 1; // really infinity, why 1 ?
|
||||
}
|
||||
|
@ -45,34 +35,66 @@ double Point2::norm(OptionalJacobian<1,2> H) const {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
double Point2::distance(const Point2& point, OptionalJacobian<1,2> H1,
|
||||
OptionalJacobian<1,2> H2) const {
|
||||
Point2 d = point - *this;
|
||||
double distance2(const Point2& p, const Point2& q, OptionalJacobian<1, 2> H1,
|
||||
OptionalJacobian<1, 2> H2) {
|
||||
Point2 d = q - p;
|
||||
if (H1 || H2) {
|
||||
Matrix12 H;
|
||||
double r = d.norm(H);
|
||||
double r = norm2(d, H);
|
||||
if (H1) *H1 = -H;
|
||||
if (H2) *H2 = H;
|
||||
return r;
|
||||
} else
|
||||
} else {
|
||||
return d.norm();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate f and h, respectively the parallel and perpendicular distance of
|
||||
* the intersections of two circles along and from the line connecting the centers.
|
||||
* Both are dimensionless fractions of the distance d between the circle centers.
|
||||
* If the circles do not intersect or they are identical, returns boost::none.
|
||||
* If one solution (touching circles, as determined by tol), h will be exactly zero.
|
||||
* h is a good measure for how accurate the intersection will be, as when circles touch
|
||||
* or nearly touch, the intersection is ill-defined with noisy radius measurements.
|
||||
* @param R_d : R/d, ratio of radius of first circle to distance between centers
|
||||
* @param r_d : r/d, ratio of radius of second circle to distance between centers
|
||||
* @param tol: absolute tolerance below which we consider touching circles
|
||||
*/
|
||||
#ifndef GTSAM_TYPEDEF_POINTS_TO_VECTORS
|
||||
|
||||
/* ************************************************************************* */
|
||||
void Point2::print(const string& s) const {
|
||||
cout << s << *this << endl;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
bool Point2::equals(const Point2& q, double tol) const {
|
||||
return (fabs(x() - q.x()) < tol && fabs(y() - q.y()) < tol);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
double Point2::norm(OptionalJacobian<1,2> H) const {
|
||||
return gtsam::norm2(*this, H);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
double Point2::distance(const Point2& point, OptionalJacobian<1,2> H1,
|
||||
OptionalJacobian<1,2> H2) const {
|
||||
return gtsam::distance2(*this, point, H1, H2);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
ostream &operator<<(ostream &os, const Point2& p) {
|
||||
os << '(' << p.x() << ", " << p.y() << ')';
|
||||
return os;
|
||||
}
|
||||
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
|
||||
boost::optional<Point2> CircleCircleIntersection(double R_d, double r_d, double tol) {
|
||||
return circleCircleIntersection(R_d, r_d, tol);
|
||||
}
|
||||
std::list<Point2> CircleCircleIntersection(Point2 c1, Point2 c2, boost::optional<Point2> fh) {
|
||||
return circleCircleIntersection(c1, c2, fh);
|
||||
}
|
||||
std::list<Point2> CircleCircleIntersection(Point2 c1, double r1, Point2 c2, double r2, double tol) {
|
||||
return circleCircleIntersection(c1, r1, c2, r2, tol);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // GTSAM_TYPEDEF_POINTS_TO_VECTORS
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Math inspired by http://paulbourke.net/geometry/circlesphere/
|
||||
boost::optional<Point2> Point2::CircleCircleIntersection(double R_d, double r_d,
|
||||
boost::optional<Point2> circleCircleIntersection(double R_d, double r_d,
|
||||
double tol) {
|
||||
|
||||
double R2_d2 = R_d*R_d; // Yes, RD-D2 !
|
||||
|
@ -83,11 +105,11 @@ boost::optional<Point2> Point2::CircleCircleIntersection(double R_d, double r_d,
|
|||
// Hence, there are only solutions if >=0
|
||||
if (h2<-tol) return boost::none; // allow *slightly* negative
|
||||
else if (h2<tol) return Point2(f,0.0); // one solution
|
||||
else return Point2(f,sqrt(h2)); // two solutions
|
||||
else return Point2(f,std::sqrt(h2)); // two solutions
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
list<Point2> Point2::CircleCircleIntersection(Point2 c1, Point2 c2,
|
||||
list<Point2> circleCircleIntersection(Point2 c1, Point2 c2,
|
||||
boost::optional<Point2> fh) {
|
||||
|
||||
list<Point2> solutions;
|
||||
|
@ -116,27 +138,21 @@ list<Point2> Point2::CircleCircleIntersection(Point2 c1, Point2 c2,
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
list<Point2> Point2::CircleCircleIntersection(Point2 c1, double r1, Point2 c2,
|
||||
list<Point2> circleCircleIntersection(Point2 c1, double r1, Point2 c2,
|
||||
double r2, double tol) {
|
||||
|
||||
// distance between circle centers.
|
||||
double d = c1.dist(c2);
|
||||
double d = distance2(c1, c2);
|
||||
|
||||
// centers coincide, either no solution or infinite number of solutions.
|
||||
if (d<1e-9) return list<Point2>();
|
||||
|
||||
// Calculate f and h given normalized radii
|
||||
double _d = 1.0/d, R_d = r1*_d, r_d=r2*_d;
|
||||
boost::optional<Point2> fh = CircleCircleIntersection(R_d,r_d);
|
||||
boost::optional<Point2> fh = circleCircleIntersection(R_d,r_d);
|
||||
|
||||
// Call version that takes fh
|
||||
return CircleCircleIntersection(c1, c2, fh);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
ostream &operator<<(ostream &os, const Point2& p) {
|
||||
os << '(' << p.x() << ", " << p.y() << ')';
|
||||
return os;
|
||||
return circleCircleIntersection(c1, c2, fh);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
@ -22,6 +22,14 @@
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
#ifdef GTSAM_TYPEDEF_POINTS_TO_VECTORS
|
||||
|
||||
/// As of GTSAM 4, in order to make GTSAM more lean,
|
||||
/// it is now possible to just typedef Point2 to Vector2
|
||||
typedef Vector2 Point2;
|
||||
|
||||
#else
|
||||
|
||||
/**
|
||||
* A 2D point
|
||||
* Complies with the Testable Concept
|
||||
|
@ -29,69 +37,30 @@ namespace gtsam {
|
|||
* @addtogroup geometry
|
||||
* \nosubgrouping
|
||||
*/
|
||||
class GTSAM_EXPORT Point2 {
|
||||
class GTSAM_EXPORT Point2 : public Vector2 {
|
||||
private:
|
||||
|
||||
double x_, y_;
|
||||
|
||||
public:
|
||||
enum { dimension = 2 };
|
||||
/// @name Standard Constructors
|
||||
/// @{
|
||||
|
||||
/// default constructor
|
||||
Point2(): x_(0), y_(0) {}
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
|
||||
// Deprecated default constructor initializes to zero, in contrast to new behavior below
|
||||
Point2() { setZero(); }
|
||||
#else
|
||||
Point2() {}
|
||||
#endif
|
||||
|
||||
/// construct from doubles
|
||||
Point2(double x, double y): x_(x), y_(y) {}
|
||||
using Vector2::Vector2;
|
||||
|
||||
/// @}
|
||||
/// @name Advanced Constructors
|
||||
/// @{
|
||||
|
||||
/// construct from 2D vector
|
||||
explicit Point2(const Vector2& v) {
|
||||
x_ = v(0);
|
||||
y_ = v(1);
|
||||
}
|
||||
|
||||
/*
|
||||
* @brief Circle-circle intersection, given normalized radii.
|
||||
* Calculate f and h, respectively the parallel and perpendicular distance of
|
||||
* the intersections of two circles along and from the line connecting the centers.
|
||||
* Both are dimensionless fractions of the distance d between the circle centers.
|
||||
* If the circles do not intersect or they are identical, returns boost::none.
|
||||
* If one solution (touching circles, as determined by tol), h will be exactly zero.
|
||||
* h is a good measure for how accurate the intersection will be, as when circles touch
|
||||
* or nearly touch, the intersection is ill-defined with noisy radius measurements.
|
||||
* @param R_d : R/d, ratio of radius of first circle to distance between centers
|
||||
* @param r_d : r/d, ratio of radius of second circle to distance between centers
|
||||
* @param tol: absolute tolerance below which we consider touching circles
|
||||
* @return optional Point2 with f and h, boost::none if no solution.
|
||||
*/
|
||||
static boost::optional<Point2> CircleCircleIntersection(double R_d, double r_d,
|
||||
double tol = 1e-9);
|
||||
|
||||
/*
|
||||
* @brief Circle-circle intersection, from the normalized radii solution.
|
||||
* @param c1 center of first circle
|
||||
* @param c2 center of second circle
|
||||
* @return list of solutions (0,1, or 2). Identical circles will return empty list, as well.
|
||||
*/
|
||||
static std::list<Point2> CircleCircleIntersection(Point2 c1, Point2 c2, boost::optional<Point2>);
|
||||
|
||||
/**
|
||||
* @brief Intersect 2 circles
|
||||
* @param c1 center of first circle
|
||||
* @param r1 radius of first circle
|
||||
* @param c2 center of second circle
|
||||
* @param r2 radius of second circle
|
||||
* @param tol: absolute tolerance below which we consider touching circles
|
||||
* @return list of solutions (0,1, or 2). Identical circles will return empty list, as well.
|
||||
*/
|
||||
static std::list<Point2> CircleCircleIntersection(Point2 c1, double r1,
|
||||
Point2 c2, double r2, double tol = 1e-9);
|
||||
|
||||
explicit Point2(const Vector2& v):Vector2(v) {}
|
||||
/// @}
|
||||
/// @name Testable
|
||||
/// @{
|
||||
|
@ -107,21 +76,7 @@ public:
|
|||
/// @{
|
||||
|
||||
/// identity
|
||||
inline static Point2 identity() {return Point2();}
|
||||
|
||||
/// inverse
|
||||
inline Point2 operator- () const {return Point2(-x_,-y_);}
|
||||
|
||||
/// add vector on right
|
||||
inline Point2 operator +(const Vector2& v) const {
|
||||
return Point2(x_ + v[0], y_ + v[1]);
|
||||
}
|
||||
|
||||
/// add
|
||||
inline Point2 operator + (const Point2& q) const {return Point2(x_+q.x_,y_+q.y_);}
|
||||
|
||||
/// subtract
|
||||
inline Point2 operator - (const Point2& q) const {return Point2(x_-q.x_,y_-q.y_);}
|
||||
inline static Point2 identity() {return Point2(0,0);}
|
||||
|
||||
/// @}
|
||||
/// @name Vector Space
|
||||
|
@ -137,51 +92,44 @@ public:
|
|||
double distance(const Point2& p2, OptionalJacobian<1,2> H1 = boost::none,
|
||||
OptionalJacobian<1,2> H2 = boost::none) const;
|
||||
|
||||
/** @deprecated The following function has been deprecated, use distance above */
|
||||
inline double dist(const Point2& p2) const {
|
||||
return (p2 - *this).norm();
|
||||
}
|
||||
|
||||
/// multiply with a scalar
|
||||
inline Point2 operator * (double s) const {return Point2(x_*s,y_*s);}
|
||||
|
||||
/// divide by a scalar
|
||||
inline Point2 operator / (double q) const {return Point2(x_/q,y_/q);}
|
||||
|
||||
/// @}
|
||||
/// @name Standard Interface
|
||||
/// @{
|
||||
|
||||
/// equality
|
||||
inline bool operator ==(const Point2& q) const {return x_==q.x_ && y_==q.y_;}
|
||||
inline bool operator ==(const Point2& q) const {return x()==q.x() && y()==q.y();}
|
||||
|
||||
/// get x
|
||||
double x() const {return x_;}
|
||||
inline double x() const {return (*this)[0];}
|
||||
|
||||
/// get y
|
||||
double y() const {return y_;}
|
||||
inline double y() const {return (*this)[1];}
|
||||
|
||||
/// return vectorized form (column-wise). TODO: why does this function exist?
|
||||
Vector2 vector() const { return Vector2(x_, y_); }
|
||||
/// return vectorized form (column-wise).
|
||||
const Vector2& vector() const { return *this; }
|
||||
|
||||
/// @}
|
||||
|
||||
/// @name Deprecated
|
||||
/// @{
|
||||
inline void operator += (const Point2& q) {x_+=q.x_;y_+=q.y_;}
|
||||
inline void operator *= (double s) {x_*=s;y_*=s;}
|
||||
Point2 inverse() const { return -(*this);}
|
||||
Point2 compose(const Point2& q) const { return (*this)+q;}
|
||||
Point2 between(const Point2& q) const { return q-(*this);}
|
||||
Vector2 localCoordinates(const Point2& q) const { return between(q).vector();}
|
||||
Point2 retract(const Vector2& v) const { return compose(Point2(v));}
|
||||
static Vector2 Logmap(const Point2& p) { return p.vector();}
|
||||
static Point2 Expmap(const Vector2& v) { return Point2(v);}
|
||||
/// @}
|
||||
|
||||
/// Streaming
|
||||
GTSAM_EXPORT friend std::ostream &operator<<(std::ostream &os, const Point2& p);
|
||||
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
|
||||
/// @name Deprecated
|
||||
/// @{
|
||||
Point2 inverse() const { return -(*this); }
|
||||
Point2 compose(const Point2& q) const { return (*this)+q;}
|
||||
Point2 between(const Point2& q) const { return q-(*this);}
|
||||
Vector2 localCoordinates(const Point2& q) const { return between(q);}
|
||||
Point2 retract(const Vector2& v) const { return compose(Point2(v));}
|
||||
static Vector2 Logmap(const Point2& p) { return p;}
|
||||
static Point2 Expmap(const Vector2& v) { return Point2(v);}
|
||||
inline double dist(const Point2& p2) const {return distance(p2);}
|
||||
static boost::optional<Point2> CircleCircleIntersection(double R_d, double r_d, double tol = 1e-9);
|
||||
static std::list<Point2> CircleCircleIntersection(Point2 c1, Point2 c2, boost::optional<Point2> fh);
|
||||
static std::list<Point2> CircleCircleIntersection(Point2 c1, double r1, Point2 c2, double r2, double tol = 1e-9);
|
||||
/// @}
|
||||
#endif
|
||||
|
||||
private:
|
||||
|
||||
/// @name Advanced Interface
|
||||
|
@ -192,13 +140,25 @@ private:
|
|||
template<class ARCHIVE>
|
||||
void serialize(ARCHIVE & ar, const unsigned int /*version*/)
|
||||
{
|
||||
ar & BOOST_SERIALIZATION_NVP(x_);
|
||||
ar & BOOST_SERIALIZATION_NVP(y_);
|
||||
}
|
||||
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Vector2);}
|
||||
|
||||
/// @}
|
||||
/// @}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct traits<Point2> : public internal::VectorSpace<Point2> {
|
||||
};
|
||||
|
||||
#endif // GTSAM_TYPEDEF_POINTS_TO_VECTORS
|
||||
|
||||
/// Distance of the point from the origin, with Jacobian
|
||||
double norm2(const Point2& p, OptionalJacobian<1, 2> H = boost::none);
|
||||
|
||||
/// distance between two points
|
||||
double distance2(const Point2& p1, const Point2& q,
|
||||
OptionalJacobian<1, 2> H1 = boost::none,
|
||||
OptionalJacobian<1, 2> H2 = boost::none);
|
||||
|
||||
// Convenience typedef
|
||||
typedef std::pair<Point2, Point2> Point2Pair;
|
||||
std::ostream &operator<<(std::ostream &os, const gtsam::Point2Pair &p);
|
||||
|
@ -207,10 +167,45 @@ std::ostream &operator<<(std::ostream &os, const gtsam::Point2Pair &p);
|
|||
typedef std::vector<Point2> Point2Vector;
|
||||
|
||||
/// multiply with scalar
|
||||
inline Point2 operator*(double s, const Point2& p) {return p*s;}
|
||||
inline Point2 operator*(double s, const Point2& p) {
|
||||
return p * s;
|
||||
}
|
||||
|
||||
template<>
|
||||
struct traits<Point2> : public internal::VectorSpace<Point2> {};
|
||||
/*
|
||||
* @brief Circle-circle intersection, given normalized radii.
|
||||
* Calculate f and h, respectively the parallel and perpendicular distance of
|
||||
* the intersections of two circles along and from the line connecting the centers.
|
||||
* Both are dimensionless fractions of the distance d between the circle centers.
|
||||
* If the circles do not intersect or they are identical, returns boost::none.
|
||||
* If one solution (touching circles, as determined by tol), h will be exactly zero.
|
||||
* h is a good measure for how accurate the intersection will be, as when circles touch
|
||||
* or nearly touch, the intersection is ill-defined with noisy radius measurements.
|
||||
* @param R_d : R/d, ratio of radius of first circle to distance between centers
|
||||
* @param r_d : r/d, ratio of radius of second circle to distance between centers
|
||||
* @param tol: absolute tolerance below which we consider touching circles
|
||||
* @return optional Point2 with f and h, boost::none if no solution.
|
||||
*/
|
||||
boost::optional<Point2> circleCircleIntersection(double R_d, double r_d, double tol = 1e-9);
|
||||
|
||||
/*
|
||||
* @brief Circle-circle intersection, from the normalized radii solution.
|
||||
* @param c1 center of first circle
|
||||
* @param c2 center of second circle
|
||||
* @return list of solutions (0,1, or 2). Identical circles will return empty list, as well.
|
||||
*/
|
||||
std::list<Point2> circleCircleIntersection(Point2 c1, Point2 c2, boost::optional<Point2> fh);
|
||||
|
||||
/**
|
||||
* @brief Intersect 2 circles
|
||||
* @param c1 center of first circle
|
||||
* @param r1 radius of first circle
|
||||
* @param c2 center of second circle
|
||||
* @param r2 radius of second circle
|
||||
* @param tol: absolute tolerance below which we consider touching circles
|
||||
* @return list of solutions (0,1, or 2). Identical circles will return empty list, as well.
|
||||
*/
|
||||
std::list<Point2> circleCircleIntersection(Point2 c1, double r1,
|
||||
Point2 c2, double r2, double tol = 1e-9);
|
||||
|
||||
} // \ namespace gtsam
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ using namespace std;
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
#ifndef GTSAM_USE_VECTOR3_POINTS
|
||||
#ifndef GTSAM_TYPEDEF_POINTS_TO_VECTORS
|
||||
bool Point3::equals(const Point3 &q, double tol) const {
|
||||
return (fabs(x() - q.x()) < tol && fabs(y() - q.y()) < tol &&
|
||||
fabs(z() - q.z()) < tol);
|
||||
|
@ -34,11 +34,11 @@ void Point3::print(const string& s) const {
|
|||
/* ************************************************************************* */
|
||||
double Point3::distance(const Point3 &q, OptionalJacobian<1, 3> H1,
|
||||
OptionalJacobian<1, 3> H2) const {
|
||||
return gtsam::distance(*this,q,H1,H2);
|
||||
return gtsam::distance3(*this,q,H1,H2);
|
||||
}
|
||||
|
||||
double Point3::norm(OptionalJacobian<1,3> H) const {
|
||||
return gtsam::norm(*this, H);
|
||||
return gtsam::norm3(*this, H);
|
||||
}
|
||||
|
||||
Point3 Point3::normalized(OptionalJacobian<3,3> H) const {
|
||||
|
@ -57,7 +57,7 @@ double Point3::dot(const Point3 &q, OptionalJacobian<1, 3> H1,
|
|||
|
||||
/* ************************************************************************* */
|
||||
ostream &operator<<(ostream &os, const Point3& p) {
|
||||
os << '[' << p.x() << ", " << p.y() << ", " << p.z() << "]\';";
|
||||
os << '[' << p.x() << ", " << p.y() << ", " << p.z() << "]'";
|
||||
return os;
|
||||
}
|
||||
|
||||
|
@ -80,8 +80,8 @@ Point3 Point3::sub(const Point3 &q, OptionalJacobian<3,3> H1,
|
|||
|
||||
#endif
|
||||
/* ************************************************************************* */
|
||||
double distance(const Point3 &p1, const Point3 &q, OptionalJacobian<1, 3> H1,
|
||||
OptionalJacobian<1, 3> H2) {
|
||||
double distance3(const Point3 &p1, const Point3 &q, OptionalJacobian<1, 3> H1,
|
||||
OptionalJacobian<1, 3> H2) {
|
||||
double d = (q - p1).norm();
|
||||
if (H1) {
|
||||
*H1 << p1.x() - q.x(), p1.y() - q.y(), p1.z() - q.z();
|
||||
|
@ -94,7 +94,7 @@ double distance(const Point3 &p1, const Point3 &q, OptionalJacobian<1, 3> H1,
|
|||
return d;
|
||||
}
|
||||
|
||||
double norm(const Point3 &p, OptionalJacobian<1, 3> H) {
|
||||
double norm3(const Point3 &p, OptionalJacobian<1, 3> H) {
|
||||
double r = sqrt(p.x() * p.x() + p.y() * p.y() + p.z() * p.z());
|
||||
if (H) {
|
||||
if (fabs(r) > 1e-10)
|
||||
|
@ -106,7 +106,7 @@ double norm(const Point3 &p, OptionalJacobian<1, 3> H) {
|
|||
}
|
||||
|
||||
Point3 normalize(const Point3 &p, OptionalJacobian<3, 3> H) {
|
||||
Point3 normalized = p / norm(p);
|
||||
Point3 normalized = p / p.norm();
|
||||
if (H) {
|
||||
// 3*3 Derivative
|
||||
double x2 = p.x() * p.x(), y2 = p.y() * p.y(), z2 = p.z() * p.z();
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
#ifdef GTSAM_USE_VECTOR3_POINTS
|
||||
#ifdef GTSAM_TYPEDEF_POINTS_TO_VECTORS
|
||||
|
||||
/// As of GTSAM 4, in order to make GTSAM more lean,
|
||||
/// it is now possible to just typedef Point3 to Vector3
|
||||
|
@ -51,8 +51,8 @@ class GTSAM_EXPORT Point3 : public Vector3 {
|
|||
/// @name Standard Constructors
|
||||
/// @{
|
||||
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
|
||||
// Deprecated default constructor initializes to zero, in contrast to new behavior below
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
|
||||
Point3() { setZero(); }
|
||||
#endif
|
||||
|
||||
|
@ -124,9 +124,9 @@ class GTSAM_EXPORT Point3 : public Vector3 {
|
|||
Point3 inverse() const { return -(*this);}
|
||||
Point3 compose(const Point3& q) const { return (*this)+q;}
|
||||
Point3 between(const Point3& q) const { return q-(*this);}
|
||||
Vector3 localCoordinates(const Point3& q) const { return between(q).vector();}
|
||||
Vector3 localCoordinates(const Point3& q) const { return between(q);}
|
||||
Point3 retract(const Vector3& v) const { return compose(Point3(v));}
|
||||
static Vector3 Logmap(const Point3& p) { return p.vector();}
|
||||
static Vector3 Logmap(const Point3& p) { return p;}
|
||||
static Point3 Expmap(const Vector3& v) { return Point3(v);}
|
||||
inline double dist(const Point3& q) const { return (q - *this).norm(); }
|
||||
Point3 normalize(OptionalJacobian<3, 3> H = boost::none) const { return normalized(H);}
|
||||
|
@ -153,19 +153,19 @@ struct traits<Point3> : public internal::VectorSpace<Point3> {};
|
|||
template<>
|
||||
struct traits<const Point3> : public internal::VectorSpace<Point3> {};
|
||||
|
||||
#endif
|
||||
#endif // GTSAM_TYPEDEF_POINTS_TO_VECTORS
|
||||
|
||||
// Convenience typedef
|
||||
typedef std::pair<Point3, Point3> Point3Pair;
|
||||
std::ostream &operator<<(std::ostream &os, const gtsam::Point3Pair &p);
|
||||
|
||||
/// distance between two points
|
||||
double distance(const Point3& p1, const Point3& q,
|
||||
OptionalJacobian<1, 3> H1 = boost::none,
|
||||
OptionalJacobian<1, 3> H2 = boost::none);
|
||||
double distance3(const Point3& p1, const Point3& q,
|
||||
OptionalJacobian<1, 3> H1 = boost::none,
|
||||
OptionalJacobian<1, 3> H2 = boost::none);
|
||||
|
||||
/// Distance of the point from the origin, with Jacobian
|
||||
double norm(const Point3& p, OptionalJacobian<1, 3> H = boost::none);
|
||||
double norm3(const Point3& p, OptionalJacobian<1, 3> H = boost::none);
|
||||
|
||||
/// normalize, with optional Jacobian
|
||||
Point3 normalize(const Point3& p, OptionalJacobian<3, 3> H = boost::none);
|
||||
|
@ -180,10 +180,6 @@ double dot(const Point3& p, const Point3& q,
|
|||
OptionalJacobian<1, 3> H_p = boost::none,
|
||||
OptionalJacobian<1, 3> H_q = boost::none);
|
||||
|
||||
// Convenience typedef
|
||||
typedef std::pair<Point3, Point3> Point3Pair;
|
||||
std::ostream &operator<<(std::ostream &os, const gtsam::Point3Pair &p);
|
||||
|
||||
template <typename A1, typename A2>
|
||||
struct Range;
|
||||
|
||||
|
@ -193,7 +189,7 @@ struct Range<Point3, Point3> {
|
|||
double operator()(const Point3& p, const Point3& q,
|
||||
OptionalJacobian<1, 3> H1 = boost::none,
|
||||
OptionalJacobian<1, 3> H2 = boost::none) {
|
||||
return distance(p, q, H1, H2);
|
||||
return distance3(p, q, H1, H2);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -53,21 +53,21 @@ void Pose2::print(const string& s) const {
|
|||
|
||||
/* ************************************************************************* */
|
||||
bool Pose2::equals(const Pose2& q, double tol) const {
|
||||
return t_.equals(q.t_, tol) && r_.equals(q.r_, tol);
|
||||
return equal_with_abs_tol(t_, q.t_, tol) && r_.equals(q.r_, tol);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Pose2 Pose2::Expmap(const Vector3& xi, OptionalJacobian<3, 3> H) {
|
||||
if (H) *H = Pose2::ExpmapDerivative(xi);
|
||||
assert(xi.size() == 3);
|
||||
Point2 v(xi(0),xi(1));
|
||||
double w = xi(2);
|
||||
if (H) *H = Pose2::ExpmapDerivative(xi);
|
||||
const Point2 v(xi(0),xi(1));
|
||||
const double w = xi(2);
|
||||
if (std::abs(w) < 1e-10)
|
||||
return Pose2(xi[0], xi[1], xi[2]);
|
||||
else {
|
||||
Rot2 R(Rot2::fromAngle(w));
|
||||
Point2 v_ortho = R_PI_2 * v; // points towards rot center
|
||||
Point2 t = (v_ortho - R.rotate(v_ortho)) / w;
|
||||
const Rot2 R(Rot2::fromAngle(w));
|
||||
const Point2 v_ortho = R_PI_2 * v; // points towards rot center
|
||||
const Point2 t = (v_ortho - R.rotate(v_ortho)) / w;
|
||||
return Pose2(R, t);
|
||||
}
|
||||
}
|
||||
|
@ -249,7 +249,7 @@ double Pose2::range(const Point2& point,
|
|||
Point2 d = point - t_;
|
||||
if (!Hpose && !Hpoint) return d.norm();
|
||||
Matrix12 D_r_d;
|
||||
double r = d.norm(D_r_d);
|
||||
double r = norm2(d, D_r_d);
|
||||
if (Hpose) {
|
||||
Matrix23 D_d_pose;
|
||||
D_d_pose << -r_.c(), r_.s(), 0.0,
|
||||
|
@ -267,7 +267,7 @@ double Pose2::range(const Pose2& pose,
|
|||
Point2 d = pose.t() - t_;
|
||||
if (!Hpose && !Hother) return d.norm();
|
||||
Matrix12 D_r_d;
|
||||
double r = d.norm(D_r_d);
|
||||
double r = norm2(d, D_r_d);
|
||||
if (Hpose) {
|
||||
Matrix23 D_d_pose;
|
||||
D_d_pose <<
|
||||
|
@ -311,7 +311,7 @@ boost::optional<Pose2> align(const vector<Point2Pair>& pairs) {
|
|||
if (n<2) return boost::none; // we need at least two pairs
|
||||
|
||||
// calculate centroids
|
||||
Point2 cp,cq;
|
||||
Point2 cp(0,0), cq(0,0);
|
||||
for(const Point2Pair& pair: pairs) {
|
||||
cp += pair.first;
|
||||
cq += pair.second;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
@ -52,7 +52,9 @@ public:
|
|||
/// @{
|
||||
|
||||
/** default constructor = origin */
|
||||
Pose2() {} // default is origin
|
||||
Pose2() :
|
||||
r_(traits<Rot2>::Identity()), t_(traits<Point2>::Identity()) {
|
||||
}
|
||||
|
||||
/** copy constructor */
|
||||
Pose2(const Pose2& pose) : r_(pose.r_), t_(pose.t_) {}
|
||||
|
@ -86,7 +88,7 @@ public:
|
|||
/// @{
|
||||
|
||||
/** Construct from canonical coordinates \f$ [T_x,T_y,\theta] \f$ (Lie algebra) */
|
||||
Pose2(const Vector& v) {
|
||||
Pose2(const Vector& v) : Pose2() {
|
||||
*this = Expmap(v);
|
||||
}
|
||||
|
||||
|
|
|
@ -345,7 +345,7 @@ double Pose3::range(const Point3& point, OptionalJacobian<1, 6> H1,
|
|||
return local.norm();
|
||||
} else {
|
||||
Matrix13 D_r_local;
|
||||
const double r = norm(local, D_r_local);
|
||||
const double r = norm3(local, D_r_local);
|
||||
if (H1) *H1 = D_r_local * D_local_pose;
|
||||
if (H2) *H2 = D_r_local * D_local_point;
|
||||
return r;
|
||||
|
|
|
@ -118,6 +118,7 @@ namespace gtsam {
|
|||
* @param q The quaternion
|
||||
*/
|
||||
Rot3(const Quaternion& q);
|
||||
Rot3(double x, double y, double z, double w) : Rot3(Quaternion(x, y, z, w)) {}
|
||||
|
||||
/// Random, generates a random axis, then random angle \in [-p,pi]
|
||||
static Rot3 Random(boost::mt19937 & rng);
|
||||
|
@ -156,12 +157,17 @@ namespace gtsam {
|
|||
|
||||
/**
|
||||
* Returns rotation nRb from body to nav frame.
|
||||
* For vehicle coordinate frame X forward, Y right, Z down:
|
||||
* Positive yaw is to right (as in aircraft heading).
|
||||
* Positive pitch is up (increasing aircraft altitude).
|
||||
* Positive roll is to right (increasing yaw in aircraft).
|
||||
* Tait-Bryan system from Spatial Reference Model (SRM) (x,y,z) = (roll,pitch,yaw)
|
||||
* as described in http://www.sedris.org/wg8home/Documents/WG80462.pdf.
|
||||
* Assumes vehicle coordinate frame X forward, Y right, Z down.
|
||||
*
|
||||
* For vehicle coordinate frame X forward, Y left, Z up:
|
||||
* Positive yaw is to left (as in aircraft heading).
|
||||
* Positive pitch is down (decreasing aircraft altitude).
|
||||
* Positive roll is to right (decreasing yaw in aircraft).
|
||||
*/
|
||||
static Rot3 Ypr(double y, double p, double r) { return RzRyRx(r,p,y);}
|
||||
|
||||
|
@ -179,7 +185,7 @@ namespace gtsam {
|
|||
*/
|
||||
static Rot3 AxisAngle(const Point3& axis, double angle) {
|
||||
#ifdef GTSAM_USE_QUATERNIONS
|
||||
return gtsam::Quaternion(Eigen::AngleAxis<double>(angle, axis.vector()));
|
||||
return gtsam::Quaternion(Eigen::AngleAxis<double>(angle, axis));
|
||||
#else
|
||||
return Rot3(SO3::AxisAngle(axis,angle));
|
||||
#endif
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
@ -92,7 +92,7 @@ namespace gtsam {
|
|||
const Matrix3 R = matrix();
|
||||
if (H1) *H1 = R * skewSymmetric(-p.x(), -p.y(), -p.z());
|
||||
if (H2) *H2 = R;
|
||||
const Vector3 r = R * p.vector();
|
||||
const Vector3 r = R * p;
|
||||
return Point3(r.x(), r.y(), r.z());
|
||||
}
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ public:
|
|||
|
||||
/// Construct from 2D point in plane at focal length f
|
||||
/// Unit3(p,1) can be viewed as normalized homogeneous coordinates of 2D point
|
||||
explicit Unit3(const Point2& p, double f = 1.0) : p_(p.x(), p.y(), f) {
|
||||
explicit Unit3(const Point2& p, double f) : p_(p.x(), p.y(), f) {
|
||||
p_.normalize();
|
||||
}
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ TEST( Cal3Bundler, calibrate )
|
|||
Point2 pn(0.5, 0.5);
|
||||
Point2 pi = K.uncalibrate(pn);
|
||||
Point2 pn_hat = K.calibrate(pi);
|
||||
CHECK( pn.equals(pn_hat, 1e-5));
|
||||
CHECK( traits<Point2>::Equals(pn, pn_hat, 1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -48,7 +48,7 @@ TEST( Cal3DS2, calibrate )
|
|||
Point2 pn(0.5, 0.5);
|
||||
Point2 pi = K.uncalibrate(pn);
|
||||
Point2 pn_hat = K.calibrate(pi);
|
||||
CHECK( pn.equals(pn_hat, 1e-5));
|
||||
CHECK( traits<Point2>::Equals(pn, pn_hat, 1e-5));
|
||||
}
|
||||
|
||||
Point2 uncalibrate_(const Cal3DS2& k, const Point2& pt) { return k.uncalibrate(pt); }
|
||||
|
|
|
@ -59,7 +59,7 @@ TEST( Cal3Unified, calibrate)
|
|||
{
|
||||
Point2 pi = K.uncalibrate(p);
|
||||
Point2 pn_hat = K.calibrate(pi);
|
||||
CHECK( p.equals(pn_hat, 1e-8));
|
||||
CHECK( traits<Point2>::Equals(p, pn_hat, 1e-8));
|
||||
}
|
||||
|
||||
Point2 uncalibrate_(const Cal3Unified& k, const Point2& pt) { return k.uncalibrate(pt); }
|
||||
|
|
|
@ -30,10 +30,10 @@ using namespace gtsam;
|
|||
GTSAM_CONCEPT_MANIFOLD_INST(CalibratedCamera)
|
||||
|
||||
// Camera situated at 0.5 meters high, looking down
|
||||
static const Pose3 pose1(Rot3(Vector3(1, -1, -1).asDiagonal()),
|
||||
static const Pose3 kDefaultPose(Rot3(Vector3(1, -1, -1).asDiagonal()),
|
||||
Point3(0, 0, 0.5));
|
||||
|
||||
static const CalibratedCamera camera(pose1);
|
||||
static const CalibratedCamera camera(kDefaultPose);
|
||||
|
||||
static const Point3 point1(-0.08,-0.08, 0.0);
|
||||
static const Point3 point2(-0.08, 0.08, 0.0);
|
||||
|
@ -43,7 +43,19 @@ static const Point3 point4( 0.08,-0.08, 0.0);
|
|||
/* ************************************************************************* */
|
||||
TEST( CalibratedCamera, constructor)
|
||||
{
|
||||
CHECK(assert_equal( camera.pose(), pose1));
|
||||
CHECK(assert_equal( camera.pose(), kDefaultPose));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(CalibratedCamera, Create) {
|
||||
Matrix actualH;
|
||||
EXPECT(assert_equal(camera, CalibratedCamera::Create(kDefaultPose, actualH)));
|
||||
|
||||
// Check derivative
|
||||
boost::function<CalibratedCamera(Pose3)> f = //
|
||||
boost::bind(CalibratedCamera::Create, _1, boost::none);
|
||||
Matrix numericalH = numericalDerivative11<CalibratedCamera, Pose3>(f, kDefaultPose);
|
||||
EXPECT(assert_equal(numericalH, actualH, 1e-9));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -131,8 +143,8 @@ TEST( CalibratedCamera, Dproject_point_pose)
|
|||
// Add a test with more arbitrary rotation
|
||||
TEST( CalibratedCamera, Dproject_point_pose2)
|
||||
{
|
||||
static const Pose3 pose1(Rot3::Ypr(0.1, -0.1, 0.4), Point3(0, 0, -10));
|
||||
static const CalibratedCamera camera(pose1);
|
||||
static const Pose3 kDefaultPose(Rot3::Ypr(0.1, -0.1, 0.4), Point3(0, 0, -10));
|
||||
static const CalibratedCamera camera(kDefaultPose);
|
||||
Matrix Dpose, Dpoint;
|
||||
camera.project(point1, Dpose, Dpoint);
|
||||
Matrix numerical_pose = numericalDerivative21(project2, camera, point1);
|
||||
|
@ -152,7 +164,7 @@ TEST( CalibratedCamera, Dproject_point_pose_infinity)
|
|||
Point2 result = camera.project2(pointAtInfinity, Dpose, Dpoint);
|
||||
Matrix numerical_pose = numericalDerivative21(projectAtInfinity, camera, pointAtInfinity);
|
||||
Matrix numerical_point = numericalDerivative22(projectAtInfinity, camera, pointAtInfinity);
|
||||
CHECK(assert_equal(Point2(), result));
|
||||
CHECK(assert_equal(Point2(0,0), result));
|
||||
CHECK(assert_equal(numerical_pose, Dpose, 1e-7));
|
||||
CHECK(assert_equal(numerical_point, Dpoint, 1e-7));
|
||||
}
|
||||
|
@ -161,8 +173,8 @@ TEST( CalibratedCamera, Dproject_point_pose_infinity)
|
|||
// Add a test with more arbitrary rotation
|
||||
TEST( CalibratedCamera, Dproject_point_pose2_infinity)
|
||||
{
|
||||
static const Pose3 pose1(Rot3::Ypr(0.1, -0.1, 0.4), Point3(0, 0, -10));
|
||||
static const CalibratedCamera camera(pose1);
|
||||
static const Pose3 pose(Rot3::Ypr(0.1, -0.1, 0.4), Point3(0, 0, -10));
|
||||
static const CalibratedCamera camera(pose);
|
||||
Matrix Dpose, Dpoint;
|
||||
camera.project2(pointAtInfinity, Dpose, Dpoint);
|
||||
Matrix numerical_pose = numericalDerivative21(projectAtInfinity, camera, pointAtInfinity);
|
||||
|
|
|
@ -44,7 +44,7 @@ TEST(CameraSet, Pinhole) {
|
|||
EXPECT(!set.equals(set2));
|
||||
|
||||
// Check measurements
|
||||
Point2 expected;
|
||||
Point2 expected(0,0);
|
||||
ZZ z = set.project2(p);
|
||||
EXPECT(assert_equal(expected, z[0]));
|
||||
EXPECT(assert_equal(expected, z[1]));
|
||||
|
@ -90,7 +90,7 @@ TEST(CameraSet, Pinhole) {
|
|||
Vector v = Ft * (b - E * P * Et * b);
|
||||
schur << Ft * F - Ft * E * P * Et * F, v, v.transpose(), 30;
|
||||
SymmetricBlockMatrix actualReduced = Set::SchurComplement(Fs, E, P, b);
|
||||
EXPECT(assert_equal(schur, actualReduced.matrix()));
|
||||
EXPECT(assert_equal(schur, actualReduced.selfadjointView()));
|
||||
|
||||
// Check Schur complement update, same order, should just double
|
||||
FastVector<Key> allKeys, keys;
|
||||
|
@ -99,7 +99,7 @@ TEST(CameraSet, Pinhole) {
|
|||
keys.push_back(1);
|
||||
keys.push_back(2);
|
||||
Set::UpdateSchurComplement(Fs, E, P, b, allKeys, keys, actualReduced);
|
||||
EXPECT(assert_equal((Matrix )(2.0 * schur), actualReduced.matrix()));
|
||||
EXPECT(assert_equal((Matrix )(2.0 * schur), actualReduced.selfadjointView()));
|
||||
|
||||
// Check Schur complement update, keys reversed
|
||||
FastVector<Key> keys2;
|
||||
|
@ -111,13 +111,13 @@ TEST(CameraSet, Pinhole) {
|
|||
Vector reverse_v = Ft * (reverse_b - E * P * Et * reverse_b);
|
||||
Matrix A(19, 19);
|
||||
A << Ft * F - Ft * E * P * Et * F, reverse_v, reverse_v.transpose(), 30;
|
||||
EXPECT(assert_equal((Matrix )(2.0 * schur + A), actualReduced.matrix()));
|
||||
EXPECT(assert_equal((Matrix )(2.0 * schur + A), actualReduced.selfadjointView()));
|
||||
|
||||
// reprojectionErrorAtInfinity
|
||||
Unit3 pointAtInfinity(0, 0, 1000);
|
||||
EXPECT(
|
||||
assert_equal(pointAtInfinity,
|
||||
camera.backprojectPointAtInfinity(Point2())));
|
||||
camera.backprojectPointAtInfinity(Point2(0,0))));
|
||||
actualV = set.reprojectionError(pointAtInfinity, measured, Fs, E);
|
||||
EXPECT(assert_equal(expectedV, actualV));
|
||||
LONGS_EQUAL(2, Fs.size());
|
||||
|
|
|
@ -153,12 +153,12 @@ TEST( PinholeCamera, backproject2)
|
|||
Rot3 rot(1., 0., 0., 0., 0., 1., 0., -1., 0.); // a camera1 looking down
|
||||
Camera camera(Pose3(rot, origin), K);
|
||||
|
||||
Point3 actual = camera.backproject(Point2(), 1.);
|
||||
Point3 actual = camera.backproject(Point2(0,0), 1.);
|
||||
Point3 expected(0., 1., 0.);
|
||||
pair<Point2, bool> x = camera.projectSafe(expected);
|
||||
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
EXPECT(assert_equal(Point2(), x.first));
|
||||
EXPECT(assert_equal(Point2(0,0), x.first));
|
||||
EXPECT(x.second);
|
||||
}
|
||||
|
||||
|
@ -169,12 +169,12 @@ TEST( PinholeCamera, backprojectInfinity2)
|
|||
Rot3 rot(1., 0., 0., 0., 0., 1., 0., -1., 0.); // a camera1 looking down
|
||||
Camera camera(Pose3(rot, origin), K);
|
||||
|
||||
Unit3 actual = camera.backprojectPointAtInfinity(Point2());
|
||||
Unit3 actual = camera.backprojectPointAtInfinity(Point2(0,0));
|
||||
Unit3 expected(0., 1., 0.);
|
||||
Point2 x = camera.project(expected);
|
||||
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
EXPECT(assert_equal(Point2(), x));
|
||||
EXPECT(assert_equal(Point2(0,0), x));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -184,12 +184,12 @@ TEST( PinholeCamera, backprojectInfinity3)
|
|||
Rot3 rot(1., 0., 0., 0., 1., 0., 0., 0., 1.); // identity
|
||||
Camera camera(Pose3(rot, origin), K);
|
||||
|
||||
Unit3 actual = camera.backprojectPointAtInfinity(Point2());
|
||||
Unit3 actual = camera.backprojectPointAtInfinity(Point2(0,0));
|
||||
Unit3 expected(0., 0., 1.);
|
||||
Point2 x = camera.project(expected);
|
||||
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
EXPECT(assert_equal(Point2(), x));
|
||||
EXPECT(assert_equal(Point2(0,0), x));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -278,7 +278,7 @@ TEST( PinholeCamera, range0) {
|
|||
double result = camera.range(point1, D1, D2);
|
||||
Matrix Hexpected1 = numericalDerivative21(range0, camera, point1);
|
||||
Matrix Hexpected2 = numericalDerivative22(range0, camera, point1);
|
||||
EXPECT_DOUBLES_EQUAL(distance(point1, camera.pose().translation()), result,
|
||||
EXPECT_DOUBLES_EQUAL(distance3(point1, camera.pose().translation()), result,
|
||||
1e-9);
|
||||
EXPECT(assert_equal(Hexpected1, D1, 1e-7));
|
||||
EXPECT(assert_equal(Hexpected2, D2, 1e-7));
|
||||
|
|
|
@ -124,12 +124,12 @@ TEST( PinholePose, backproject2)
|
|||
Rot3 rot(1., 0., 0., 0., 0., 1., 0., -1., 0.); // a camera1 looking down
|
||||
Camera camera(Pose3(rot, origin), K);
|
||||
|
||||
Point3 actual = camera.backproject(Point2(), 1.);
|
||||
Point3 actual = camera.backproject(Point2(0,0), 1.);
|
||||
Point3 expected(0., 1., 0.);
|
||||
pair<Point2, bool> x = camera.projectSafe(expected);
|
||||
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
EXPECT(assert_equal(Point2(), x.first));
|
||||
EXPECT(assert_equal(Point2(0,0), x.first));
|
||||
EXPECT(x.second);
|
||||
}
|
||||
|
||||
|
@ -212,7 +212,7 @@ TEST( PinholePose, range0) {
|
|||
double result = camera.range(point1, D1, D2);
|
||||
Matrix expectedDcamera = numericalDerivative21(range0, camera, point1);
|
||||
Matrix expectedDpoint = numericalDerivative22(range0, camera, point1);
|
||||
EXPECT_DOUBLES_EQUAL(distance(point1, camera.pose().translation()), result, 1e-9);
|
||||
EXPECT_DOUBLES_EQUAL(distance3(point1, camera.pose().translation()), result, 1e-9);
|
||||
EXPECT(assert_equal(expectedDcamera, D1, 1e-7));
|
||||
EXPECT(assert_equal(expectedDpoint, D2, 1e-7));
|
||||
}
|
||||
|
|
|
@ -84,7 +84,7 @@ TEST(PinholeSet, Pinhole) {
|
|||
EXPECT(!set.equals(set2));
|
||||
|
||||
// Check measurements
|
||||
Point2 expected;
|
||||
Point2 expected(0,0);
|
||||
ZZ z = set.project2(p);
|
||||
EXPECT(assert_equal(expected, z[0]));
|
||||
EXPECT(assert_equal(expected, z[1]));
|
||||
|
@ -131,7 +131,7 @@ TEST(PinholeSet, Pinhole) {
|
|||
}
|
||||
EXPECT(
|
||||
assert_equal(pointAtInfinity,
|
||||
camera.backprojectPointAtInfinity(Point2())));
|
||||
camera.backprojectPointAtInfinity(Point2(0,0))));
|
||||
{
|
||||
PinholeSet<Camera>::FBlocks Fs;
|
||||
Matrix E;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
@ -27,6 +27,11 @@ using namespace gtsam;
|
|||
GTSAM_CONCEPT_TESTABLE_INST(Point2)
|
||||
GTSAM_CONCEPT_LIE_INST(Point2)
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Point2 , Constructor) {
|
||||
Point2 p;
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Double , Concept) {
|
||||
BOOST_CONCEPT_ASSERT((IsGroup<double>));
|
||||
|
@ -95,26 +100,26 @@ TEST( Point2, expmap) {
|
|||
|
||||
/* ************************************************************************* */
|
||||
TEST( Point2, arithmetic) {
|
||||
EXPECT(assert_equal( Point2(-5,-6), -Point2(5,6) ));
|
||||
EXPECT(assert_equal( Point2(5,6), Point2(4,5)+Point2(1,1)));
|
||||
EXPECT(assert_equal( Point2(3,4), Point2(4,5)-Point2(1,1) ));
|
||||
EXPECT(assert_equal( Point2(8,6), Point2(4,3)*2));
|
||||
EXPECT(assert_equal( Point2(4,6), 2*Point2(2,3)));
|
||||
EXPECT(assert_equal( Point2(2,3), Point2(4,6)/2));
|
||||
EXPECT(assert_equal<Point2>( Point2(-5,-6), -Point2(5,6) ));
|
||||
EXPECT(assert_equal<Point2>( Point2(5,6), Point2(4,5)+Point2(1,1)));
|
||||
EXPECT(assert_equal<Point2>( Point2(3,4), Point2(4,5)-Point2(1,1) ));
|
||||
EXPECT(assert_equal<Point2>( Point2(8,6), Point2(4,3)*2));
|
||||
EXPECT(assert_equal<Point2>( Point2(4,6), 2*Point2(2,3)));
|
||||
EXPECT(assert_equal<Point2>( Point2(2,3), Point2(4,6)/2));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( Point2, unit) {
|
||||
Point2 p0(10, 0), p1(0, -10), p2(10, 10);
|
||||
EXPECT(assert_equal(Point2(1, 0), p0.unit(), 1e-6));
|
||||
EXPECT(assert_equal(Point2(0,-1), p1.unit(), 1e-6));
|
||||
EXPECT(assert_equal(Point2(sqrt(2.0)/2.0, sqrt(2.0)/2.0), p2.unit(), 1e-6));
|
||||
EXPECT(assert_equal(Point2(1, 0), Point2(p0.normalized()), 1e-6));
|
||||
EXPECT(assert_equal(Point2(0,-1), Point2(p1.normalized()), 1e-6));
|
||||
EXPECT(assert_equal(Point2(sqrt(2.0)/2.0, sqrt(2.0)/2.0), Point2(p2.normalized()), 1e-6));
|
||||
}
|
||||
|
||||
namespace {
|
||||
/* ************************************************************************* */
|
||||
// some shared test values
|
||||
Point2 x1, x2(1, 1), x3(1, 1);
|
||||
Point2 x1(0,0), x2(1, 1), x3(1, 1);
|
||||
Point2 l1(1, 0), l2(1, 1), l3(2, 2), l4(1, 3);
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -126,19 +131,19 @@ TEST( Point2, norm ) {
|
|||
Point2 p0(cos(5.0), sin(5.0));
|
||||
DOUBLES_EQUAL(1, p0.norm(), 1e-6);
|
||||
Point2 p1(4, 5), p2(1, 1);
|
||||
DOUBLES_EQUAL( 5, p1.distance(p2), 1e-6);
|
||||
DOUBLES_EQUAL( 5, distance2(p1, p2), 1e-6);
|
||||
DOUBLES_EQUAL( 5, (p2-p1).norm(), 1e-6);
|
||||
|
||||
Matrix expectedH, actualH;
|
||||
double actual;
|
||||
|
||||
// exception, for (0,0) derivative is [Inf,Inf] but we return [1,1]
|
||||
actual = x1.norm(actualH);
|
||||
actual = norm2(x1, actualH);
|
||||
EXPECT_DOUBLES_EQUAL(0, actual, 1e-9);
|
||||
expectedH = (Matrix(1, 2) << 1.0, 1.0).finished();
|
||||
EXPECT(assert_equal(expectedH,actualH));
|
||||
|
||||
actual = x2.norm(actualH);
|
||||
actual = norm2(x2, actualH);
|
||||
EXPECT_DOUBLES_EQUAL(sqrt(2.0), actual, 1e-9);
|
||||
expectedH = numericalDerivative11(norm_proxy, x2);
|
||||
EXPECT(assert_equal(expectedH,actualH));
|
||||
|
@ -151,20 +156,20 @@ TEST( Point2, norm ) {
|
|||
/* ************************************************************************* */
|
||||
namespace {
|
||||
double distance_proxy(const Point2& location, const Point2& point) {
|
||||
return location.distance(point);
|
||||
return distance2(location, point);
|
||||
}
|
||||
}
|
||||
TEST( Point2, distance ) {
|
||||
Matrix expectedH1, actualH1, expectedH2, actualH2;
|
||||
|
||||
// establish distance is indeed zero
|
||||
EXPECT_DOUBLES_EQUAL(1, x1.distance(l1), 1e-9);
|
||||
EXPECT_DOUBLES_EQUAL(1, distance2(x1, l1), 1e-9);
|
||||
|
||||
// establish distance is indeed 45 degrees
|
||||
EXPECT_DOUBLES_EQUAL(sqrt(2.0), x1.distance(l2), 1e-9);
|
||||
EXPECT_DOUBLES_EQUAL(sqrt(2.0), distance2(x1, l2), 1e-9);
|
||||
|
||||
// Another pair
|
||||
double actual23 = x2.distance(l3, actualH1, actualH2);
|
||||
double actual23 = distance2(x2, l3, actualH1, actualH2);
|
||||
EXPECT_DOUBLES_EQUAL(sqrt(2.0), actual23, 1e-9);
|
||||
|
||||
// Check numerical derivatives
|
||||
|
@ -174,7 +179,7 @@ TEST( Point2, distance ) {
|
|||
EXPECT(assert_equal(expectedH2,actualH2));
|
||||
|
||||
// Another test
|
||||
double actual34 = x3.distance(l4, actualH1, actualH2);
|
||||
double actual34 = distance2(x3, l4, actualH1, actualH2);
|
||||
EXPECT_DOUBLES_EQUAL(2, actual34, 1e-9);
|
||||
|
||||
// Check numerical derivatives
|
||||
|
@ -190,42 +195,42 @@ TEST( Point2, circleCircleIntersection) {
|
|||
double offset = 0.994987;
|
||||
// Test intersections of circle moving from inside to outside
|
||||
|
||||
list<Point2> inside = Point2::CircleCircleIntersection(Point2(0,0),5,Point2(0,0),1);
|
||||
list<Point2> inside = circleCircleIntersection(Point2(0,0),5,Point2(0,0),1);
|
||||
EXPECT_LONGS_EQUAL(0,inside.size());
|
||||
|
||||
list<Point2> touching1 = Point2::CircleCircleIntersection(Point2(0,0),5,Point2(4,0),1);
|
||||
list<Point2> touching1 = circleCircleIntersection(Point2(0,0),5,Point2(4,0),1);
|
||||
EXPECT_LONGS_EQUAL(1,touching1.size());
|
||||
EXPECT(assert_equal(Point2(5,0), touching1.front()));
|
||||
|
||||
list<Point2> common = Point2::CircleCircleIntersection(Point2(0,0),5,Point2(5,0),1);
|
||||
list<Point2> common = circleCircleIntersection(Point2(0,0),5,Point2(5,0),1);
|
||||
EXPECT_LONGS_EQUAL(2,common.size());
|
||||
EXPECT(assert_equal(Point2(4.9, offset), common.front(), 1e-6));
|
||||
EXPECT(assert_equal(Point2(4.9, -offset), common.back(), 1e-6));
|
||||
|
||||
list<Point2> touching2 = Point2::CircleCircleIntersection(Point2(0,0),5,Point2(6,0),1);
|
||||
list<Point2> touching2 = circleCircleIntersection(Point2(0,0),5,Point2(6,0),1);
|
||||
EXPECT_LONGS_EQUAL(1,touching2.size());
|
||||
EXPECT(assert_equal(Point2(5,0), touching2.front()));
|
||||
|
||||
// test rotated case
|
||||
list<Point2> rotated = Point2::CircleCircleIntersection(Point2(0,0),5,Point2(0,5),1);
|
||||
list<Point2> rotated = circleCircleIntersection(Point2(0,0),5,Point2(0,5),1);
|
||||
EXPECT_LONGS_EQUAL(2,rotated.size());
|
||||
EXPECT(assert_equal(Point2(-offset, 4.9), rotated.front(), 1e-6));
|
||||
EXPECT(assert_equal(Point2( offset, 4.9), rotated.back(), 1e-6));
|
||||
|
||||
// test r1<r2
|
||||
list<Point2> smaller = Point2::CircleCircleIntersection(Point2(0,0),1,Point2(5,0),5);
|
||||
list<Point2> smaller = circleCircleIntersection(Point2(0,0),1,Point2(5,0),5);
|
||||
EXPECT_LONGS_EQUAL(2,smaller.size());
|
||||
EXPECT(assert_equal(Point2(0.1, offset), smaller.front(), 1e-6));
|
||||
EXPECT(assert_equal(Point2(0.1, -offset), smaller.back(), 1e-6));
|
||||
|
||||
// test offset case, r1>r2
|
||||
list<Point2> offset1 = Point2::CircleCircleIntersection(Point2(1,1),5,Point2(6,1),1);
|
||||
list<Point2> offset1 = circleCircleIntersection(Point2(1,1),5,Point2(6,1),1);
|
||||
EXPECT_LONGS_EQUAL(2,offset1.size());
|
||||
EXPECT(assert_equal(Point2(5.9, 1+offset), offset1.front(), 1e-6));
|
||||
EXPECT(assert_equal(Point2(5.9, 1-offset), offset1.back(), 1e-6));
|
||||
|
||||
// test offset case, r1<r2
|
||||
list<Point2> offset2 = Point2::CircleCircleIntersection(Point2(6,1),1,Point2(1,1),5);
|
||||
list<Point2> offset2 = circleCircleIntersection(Point2(6,1),1,Point2(1,1),5);
|
||||
EXPECT_LONGS_EQUAL(2,offset2.size());
|
||||
EXPECT(assert_equal(Point2(5.9, 1-offset), offset2.front(), 1e-6));
|
||||
EXPECT(assert_equal(Point2(5.9, 1+offset), offset2.back(), 1e-6));
|
||||
|
@ -233,12 +238,14 @@ TEST( Point2, circleCircleIntersection) {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
#ifndef GTSAM_TYPEDEF_POINTS_TO_VECTORS
|
||||
TEST( Point2, stream) {
|
||||
Point2 p(1, 2);
|
||||
std::ostringstream os;
|
||||
os << p;
|
||||
EXPECT(os.str() == "(1, 2)");
|
||||
}
|
||||
#endif
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main () {
|
||||
|
|
|
@ -26,6 +26,11 @@ GTSAM_CONCEPT_LIE_INST(Point3)
|
|||
|
||||
static Point3 P(0.2, 0.7, -2);
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Point3 , Constructor) {
|
||||
Point3 p;
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Point3 , Concept) {
|
||||
BOOST_CONCEPT_ASSERT((IsGroup<Point3>));
|
||||
|
@ -149,12 +154,12 @@ TEST( Point3, cross2) {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
#ifndef GTSAM_USE_VECTOR3_POINTS
|
||||
#ifndef GTSAM_TYPEDEF_POINTS_TO_VECTORS
|
||||
TEST( Point3, stream) {
|
||||
Point3 p(1, 2, -3);
|
||||
std::ostringstream os;
|
||||
os << p;
|
||||
EXPECT(os.str() == "[1, 2, -3]';");
|
||||
EXPECT(os.str() == "[1, 2, -3]'");
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -178,20 +183,20 @@ TEST (Point3, norm) {
|
|||
Matrix actualH;
|
||||
Point3 point(3,4,5); // arbitrary point
|
||||
double expected = sqrt(50);
|
||||
EXPECT_DOUBLES_EQUAL(expected, norm(point, actualH), 1e-8);
|
||||
EXPECT_DOUBLES_EQUAL(expected, norm3(point, actualH), 1e-8);
|
||||
Matrix expectedH = numericalDerivative11<double, Point3>(norm_proxy, point);
|
||||
EXPECT(assert_equal(expectedH, actualH, 1e-8));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
double testFunc(const Point3& P, const Point3& Q) {
|
||||
return distance(P,Q);
|
||||
return distance3(P, Q);
|
||||
}
|
||||
|
||||
TEST (Point3, distance) {
|
||||
Point3 P(1., 12.8, -32.), Q(52.7, 4.9, -13.3);
|
||||
Matrix H1, H2;
|
||||
double d = distance(P, Q, H1, H2);
|
||||
double d = distance3(P, Q, H1, H2);
|
||||
double expectedDistance = 55.542686;
|
||||
Matrix numH1 = numericalDerivative21(testFunc, P, Q);
|
||||
Matrix numH2 = numericalDerivative22(testFunc, P, Q);
|
||||
|
|
|
@ -43,7 +43,7 @@ TEST(Pose2 , Concept) {
|
|||
|
||||
/* ************************************************************************* */
|
||||
TEST(Pose2, constructors) {
|
||||
Point2 p;
|
||||
Point2 p(0,0);
|
||||
Pose2 pose(0,p);
|
||||
Pose2 origin;
|
||||
assert_equal(pose,origin);
|
||||
|
@ -371,7 +371,7 @@ TEST(Pose2, compose_c)
|
|||
/* ************************************************************************* */
|
||||
TEST(Pose2, inverse )
|
||||
{
|
||||
Point2 origin, t(1,2);
|
||||
Point2 origin(0,0), t(1,2);
|
||||
Pose2 gTl(M_PI/2.0, t); // robot at (1,2) looking towards y
|
||||
|
||||
Pose2 identity, lTg = gTl.inverse();
|
||||
|
@ -409,7 +409,7 @@ namespace {
|
|||
/* ************************************************************************* */
|
||||
TEST( Pose2, matrix )
|
||||
{
|
||||
Point2 origin, t(1,2);
|
||||
Point2 origin(0,0), t(1,2);
|
||||
Pose2 gTl(M_PI/2.0, t); // robot at (1,2) looking towards y
|
||||
Matrix gMl = matrix(gTl);
|
||||
EXPECT(assert_equal((Matrix(3,3) <<
|
||||
|
@ -743,7 +743,7 @@ namespace {
|
|||
/* ************************************************************************* */
|
||||
struct Triangle { size_t i_,j_,k_;};
|
||||
|
||||
boost::optional<Pose2> align(const vector<Point2>& ps, const vector<Point2>& qs,
|
||||
boost::optional<Pose2> align2(const vector<Point2>& ps, const vector<Point2>& qs,
|
||||
const pair<Triangle, Triangle>& trianglePair) {
|
||||
const Triangle& t1 = trianglePair.first, t2 = trianglePair.second;
|
||||
vector<Point2Pair> correspondences;
|
||||
|
@ -762,7 +762,7 @@ TEST(Pose2, align_4) {
|
|||
Triangle t1; t1.i_=0; t1.j_=1; t1.k_=2;
|
||||
Triangle t2; t2.i_=1; t2.j_=2; t2.k_=0;
|
||||
|
||||
boost::optional<Pose2> actual = align(ps, qs, make_pair(t1,t2));
|
||||
boost::optional<Pose2> actual = align2(ps, qs, make_pair(t1,t2));
|
||||
EXPECT(assert_equal(expected, *actual));
|
||||
}
|
||||
|
||||
|
|
|
@ -231,9 +231,9 @@ TEST(Rot3, retract_localCoordinates)
|
|||
/* ************************************************************************* */
|
||||
TEST(Rot3, expmap_logmap)
|
||||
{
|
||||
Vector3 d12 = Vector3::Constant(0.1);
|
||||
Vector d12 = Vector3::Constant(0.1);
|
||||
Rot3 R2 = R.expmap(d12);
|
||||
EXPECT(assert_equal(d12, (Vector) R.logmap(R2)));
|
||||
EXPECT(assert_equal(d12, R.logmap(R2)));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -104,12 +104,12 @@ TEST( SimpleCamera, backproject2)
|
|||
Rot3 rot(1., 0., 0., 0., 0., 1., 0., -1., 0.); // a camera looking down
|
||||
SimpleCamera camera(Pose3(rot, origin), K);
|
||||
|
||||
Point3 actual = camera.backproject(Point2(), 1.);
|
||||
Point3 actual = camera.backproject(Point2(0,0), 1.);
|
||||
Point3 expected(0., 1., 0.);
|
||||
pair<Point2, bool> x = camera.projectSafe(expected);
|
||||
|
||||
CHECK(assert_equal(expected, actual));
|
||||
CHECK(assert_equal(Point2(), x.first));
|
||||
CHECK(assert_equal(Point2(0,0), x.first));
|
||||
CHECK(x.second);
|
||||
}
|
||||
|
||||
|
|
|
@ -273,7 +273,7 @@ TEST( triangulation, onePose) {
|
|||
vector<Point2> measurements;
|
||||
|
||||
poses += Pose3();
|
||||
measurements += Point2();
|
||||
measurements += Point2(0,0);
|
||||
|
||||
CHECK_EXCEPTION(triangulatePoint3(poses, sharedCal, measurements),
|
||||
TriangulationUnderconstrainedException);
|
||||
|
@ -282,7 +282,7 @@ TEST( triangulation, onePose) {
|
|||
//******************************************************************************
|
||||
TEST( triangulation, StereotriangulateNonlinear ) {
|
||||
|
||||
Cal3_S2Stereo::shared_ptr stereoK(new Cal3_S2Stereo(1733.75, 1733.75, 0, 689.645, 508.835, 0.0699612));
|
||||
auto stereoK = boost::make_shared<Cal3_S2Stereo>(1733.75, 1733.75, 0, 689.645, 508.835, 0.0699612);
|
||||
|
||||
// two camera poses m1, m2
|
||||
Matrix4 m1, m2;
|
||||
|
|
|
@ -112,7 +112,8 @@ std::pair<NonlinearFactorGraph, Values> triangulationGraph(
|
|||
Values values;
|
||||
values.insert(landmarkKey, initialEstimate); // Initial landmark value
|
||||
NonlinearFactorGraph graph;
|
||||
static SharedNoiseModel unit(noiseModel::Unit::Create(CAMERA::Measurement::dimension));
|
||||
static SharedNoiseModel unit(noiseModel::Unit::Create(
|
||||
traits<typename CAMERA::Measurement>::dimension));
|
||||
for (size_t i = 0; i < measurements.size(); i++) {
|
||||
const CAMERA& camera_i = cameras[i];
|
||||
graph.push_back(TriangulationFactor<CAMERA> //
|
||||
|
@ -457,7 +458,7 @@ TriangulationResult triangulateSafe(const std::vector<CAMERA>& cameras,
|
|||
for(const CAMERA& camera: cameras) {
|
||||
const Pose3& pose = camera.pose();
|
||||
if (params.landmarkDistanceThreshold > 0
|
||||
&& distance(pose.translation(), point)
|
||||
&& distance3(pose.translation(), point)
|
||||
> params.landmarkDistanceThreshold)
|
||||
return TriangulationResult::Degenerate();
|
||||
#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION
|
||||
|
@ -471,7 +472,7 @@ TriangulationResult triangulateSafe(const std::vector<CAMERA>& cameras,
|
|||
if (params.dynamicOutlierRejectionThreshold > 0) {
|
||||
const Point2& zi = measured.at(i);
|
||||
Point2 reprojectionError(camera.project(point) - zi);
|
||||
totalReprojError += reprojectionError.vector().norm();
|
||||
totalReprojError += reprojectionError.norm();
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ namespace gtsam {
|
|||
|
||||
// Forward declarations
|
||||
template<class FACTOR> class FactorGraph;
|
||||
template<class BAYESTREE, class GRAPH> class ClusterTree;
|
||||
template<class BAYESTREE, class GRAPH> class EliminatableClusterTree;
|
||||
|
||||
/* ************************************************************************* */
|
||||
/** clique statistics */
|
||||
|
@ -247,7 +247,7 @@ namespace gtsam {
|
|||
void fillNodesIndex(const sharedClique& subtree);
|
||||
|
||||
// Friend JunctionTree because it directly fills roots and nodes index.
|
||||
template<class BAYESRTEE, class GRAPH> friend class ClusterTree;
|
||||
template<class BAYESRTEE, class GRAPH> friend class EliminatableClusterTree;
|
||||
|
||||
private:
|
||||
/** Serialization function */
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* @file ClusterTree-inst.h
|
||||
* @file EliminatableClusterTree-inst.h
|
||||
* @date Oct 8, 2013
|
||||
* @author Kai Ni
|
||||
* @author Richard Roberts
|
||||
|
@ -7,16 +7,102 @@
|
|||
* @brief Collects factorgraph fragments defined on variable clusters, arranged in a tree
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/inference/ClusterTree.h>
|
||||
#include <gtsam/inference/BayesTree.h>
|
||||
#include <gtsam/inference/Ordering.h>
|
||||
#include <gtsam/base/timing.h>
|
||||
#include <gtsam/base/treeTraversal-inst.h>
|
||||
|
||||
#include <boost/bind.hpp>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class GRAPH>
|
||||
void ClusterTree<GRAPH>::Cluster::print(const std::string& s,
|
||||
const KeyFormatter& keyFormatter) const {
|
||||
std::cout << s << " (" << problemSize_ << ")";
|
||||
PrintKeyVector(orderedFrontalKeys);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template <class GRAPH>
|
||||
std::vector<size_t> ClusterTree<GRAPH>::Cluster::nrFrontalsOfChildren() const {
|
||||
std::vector<size_t> nrFrontals;
|
||||
nrFrontals.reserve(nrChildren());
|
||||
for (const sharedNode& child : children)
|
||||
nrFrontals.push_back(child->nrFrontals());
|
||||
return nrFrontals;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template <class GRAPH>
|
||||
void ClusterTree<GRAPH>::Cluster::merge(const boost::shared_ptr<Cluster>& cluster) {
|
||||
// Merge keys. For efficiency, we add keys in reverse order at end, calling reverse after..
|
||||
orderedFrontalKeys.insert(orderedFrontalKeys.end(), cluster->orderedFrontalKeys.rbegin(),
|
||||
cluster->orderedFrontalKeys.rend());
|
||||
factors.push_back(cluster->factors);
|
||||
children.insert(children.end(), cluster->children.begin(), cluster->children.end());
|
||||
// Increment problem size
|
||||
problemSize_ = std::max(problemSize_, cluster->problemSize_);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class GRAPH>
|
||||
void ClusterTree<GRAPH>::Cluster::mergeChildren(
|
||||
const std::vector<bool>& merge) {
|
||||
gttic(Cluster_mergeChildren);
|
||||
assert(merge.size() == this->children.size());
|
||||
|
||||
// Count how many keys, factors and children we'll end up with
|
||||
size_t nrKeys = orderedFrontalKeys.size();
|
||||
size_t nrFactors = factors.size();
|
||||
size_t nrNewChildren = 0;
|
||||
// Loop over children
|
||||
size_t i = 0;
|
||||
for(const sharedNode& child: this->children) {
|
||||
if (merge[i]) {
|
||||
nrKeys += child->orderedFrontalKeys.size();
|
||||
nrFactors += child->factors.size();
|
||||
nrNewChildren += child->nrChildren();
|
||||
} else {
|
||||
nrNewChildren += 1; // we keep the child
|
||||
}
|
||||
++i;
|
||||
}
|
||||
|
||||
// now reserve space, and really merge
|
||||
auto oldChildren = this->children;
|
||||
this->children.clear();
|
||||
this->children.reserve(nrNewChildren);
|
||||
orderedFrontalKeys.reserve(nrKeys);
|
||||
factors.reserve(nrFactors);
|
||||
i = 0;
|
||||
for (const sharedNode& child : oldChildren) {
|
||||
if (merge[i]) {
|
||||
this->merge(child);
|
||||
} else {
|
||||
this->addChild(child); // we keep the child
|
||||
}
|
||||
++i;
|
||||
}
|
||||
std::reverse(orderedFrontalKeys.begin(), orderedFrontalKeys.end());
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template <class GRAPH>
|
||||
void ClusterTree<GRAPH>::print(const std::string& s, const KeyFormatter& keyFormatter) const {
|
||||
treeTraversal::PrintForest(*this, s, keyFormatter);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template <class GRAPH>
|
||||
ClusterTree<GRAPH>& ClusterTree<GRAPH>::operator=(const This& other) {
|
||||
// Start by duplicating the tree.
|
||||
roots_ = treeTraversal::CloneForest(other);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Elimination traversal data - stores a pointer to the parent data and collects
|
||||
// the factors resulting from elimination of the children. Also sets up BayesTree
|
||||
|
@ -34,6 +120,7 @@ struct EliminationData {
|
|||
size_t myIndexInParent;
|
||||
FastVector<sharedFactor> childFactors;
|
||||
boost::shared_ptr<BTNode> bayesTreeNode;
|
||||
|
||||
EliminationData(EliminationData* _parentData, size_t nChildren) :
|
||||
parentData(_parentData), bayesTreeNode(boost::make_shared<BTNode>()) {
|
||||
if (parentData) {
|
||||
|
@ -55,7 +142,7 @@ struct EliminationData {
|
|||
const typename CLUSTERTREE::sharedNode& node,
|
||||
EliminationData& parentData) {
|
||||
assert(node);
|
||||
EliminationData myData(&parentData, node->children.size());
|
||||
EliminationData myData(&parentData, node->nrChildren());
|
||||
myData.bayesTreeNode->problemSize_ = node->problemSize();
|
||||
return myData;
|
||||
}
|
||||
|
@ -75,120 +162,51 @@ struct EliminationData {
|
|||
}
|
||||
|
||||
// Function that does the HEAVY lifting
|
||||
void operator()(const typename CLUSTERTREE::sharedNode& node,
|
||||
EliminationData& myData) {
|
||||
void operator()(const typename CLUSTERTREE::sharedNode& node, EliminationData& myData) {
|
||||
assert(node);
|
||||
|
||||
// Gather factors
|
||||
FactorGraphType gatheredFactors;
|
||||
gatheredFactors.reserve(node->factors.size() + node->children.size());
|
||||
gatheredFactors.reserve(node->factors.size() + node->nrChildren());
|
||||
gatheredFactors += node->factors;
|
||||
gatheredFactors += myData.childFactors;
|
||||
|
||||
// Check for Bayes tree orphan subtrees, and add them to our children
|
||||
for(const sharedFactor& f: node->factors) {
|
||||
if (const BayesTreeOrphanWrapper<BTNode>* asSubtree =
|
||||
dynamic_cast<const BayesTreeOrphanWrapper<BTNode>*>(f.get())) {
|
||||
// TODO(frank): should this really happen here?
|
||||
for (const sharedFactor& factor: node->factors) {
|
||||
auto asSubtree = dynamic_cast<const BayesTreeOrphanWrapper<BTNode>*>(factor.get());
|
||||
if (asSubtree) {
|
||||
myData.bayesTreeNode->children.push_back(asSubtree->clique);
|
||||
asSubtree->clique->parent_ = myData.bayesTreeNode;
|
||||
}
|
||||
}
|
||||
|
||||
// >>>>>>>>>>>>>> Do dense elimination step >>>>>>>>>>>>>>>>>>>>>>>>>>>>>
|
||||
std::pair<boost::shared_ptr<ConditionalType>,
|
||||
boost::shared_ptr<FactorType> > eliminationResult =
|
||||
eliminationFunction_(gatheredFactors, node->orderedFrontalKeys);
|
||||
auto eliminationResult = eliminationFunction_(gatheredFactors, node->orderedFrontalKeys);
|
||||
// <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
|
||||
|
||||
// Store conditional in BayesTree clique, and in the case of ISAM2Clique also store the remaining factor
|
||||
// Store conditional in BayesTree clique, and in the case of ISAM2Clique also store the
|
||||
// remaining factor
|
||||
myData.bayesTreeNode->setEliminationResult(eliminationResult);
|
||||
|
||||
// Fill nodes index - we do this here instead of calling insertRoot at the end to avoid
|
||||
// putting orphan subtrees in the index - they'll already be in the index of the ISAM2
|
||||
// object they're added to.
|
||||
for(const Key& j: myData.bayesTreeNode->conditional()->frontals())
|
||||
for (const Key& j: myData.bayesTreeNode->conditional()->frontals())
|
||||
nodesIndex_.insert(std::make_pair(j, myData.bayesTreeNode));
|
||||
|
||||
// Store remaining factor in parent's gathered factors
|
||||
if (!eliminationResult.second->empty())
|
||||
myData.parentData->childFactors[myData.myIndexInParent] =
|
||||
eliminationResult.second;
|
||||
myData.parentData->childFactors[myData.myIndexInParent] = eliminationResult.second;
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class BAYESTREE, class GRAPH>
|
||||
void ClusterTree<BAYESTREE, GRAPH>::Cluster::print(const std::string& s,
|
||||
const KeyFormatter& keyFormatter) const {
|
||||
std::cout << s << " (" << problemSize_ << ")";
|
||||
PrintKeyVector(orderedFrontalKeys);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class BAYESTREE, class GRAPH>
|
||||
void ClusterTree<BAYESTREE, GRAPH>::Cluster::mergeChildren(
|
||||
const std::vector<bool>& merge) {
|
||||
gttic(Cluster_mergeChildren);
|
||||
|
||||
// Count how many keys, factors and children we'll end up with
|
||||
size_t nrKeys = orderedFrontalKeys.size();
|
||||
size_t nrFactors = factors.size();
|
||||
size_t nrNewChildren = 0;
|
||||
// Loop over children
|
||||
size_t i = 0;
|
||||
for(const sharedNode& child: children) {
|
||||
if (merge[i]) {
|
||||
nrKeys += child->orderedFrontalKeys.size();
|
||||
nrFactors += child->factors.size();
|
||||
nrNewChildren += child->children.size();
|
||||
} else {
|
||||
nrNewChildren += 1; // we keep the child
|
||||
}
|
||||
++i;
|
||||
}
|
||||
|
||||
// now reserve space, and really merge
|
||||
orderedFrontalKeys.reserve(nrKeys);
|
||||
factors.reserve(nrFactors);
|
||||
typename Node::Children newChildren;
|
||||
newChildren.reserve(nrNewChildren);
|
||||
i = 0;
|
||||
for(const sharedNode& child: children) {
|
||||
if (merge[i]) {
|
||||
// Merge keys. For efficiency, we add keys in reverse order at end, calling reverse after..
|
||||
orderedFrontalKeys.insert(orderedFrontalKeys.end(),
|
||||
child->orderedFrontalKeys.rbegin(), child->orderedFrontalKeys.rend());
|
||||
// Merge keys, factors, and children.
|
||||
factors.insert(factors.end(), child->factors.begin(),
|
||||
child->factors.end());
|
||||
newChildren.insert(newChildren.end(), child->children.begin(),
|
||||
child->children.end());
|
||||
// Increment problem size
|
||||
problemSize_ = std::max(problemSize_, child->problemSize_);
|
||||
// Increment number of frontal variables
|
||||
} else {
|
||||
newChildren.push_back(child); // we keep the child
|
||||
}
|
||||
++i;
|
||||
}
|
||||
children = newChildren;
|
||||
std::reverse(orderedFrontalKeys.begin(), orderedFrontalKeys.end());
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class BAYESTREE, class GRAPH>
|
||||
void ClusterTree<BAYESTREE, GRAPH>::print(const std::string& s,
|
||||
const KeyFormatter& keyFormatter) const {
|
||||
treeTraversal::PrintForest(*this, s, keyFormatter);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class BAYESTREE, class GRAPH>
|
||||
ClusterTree<BAYESTREE, GRAPH>& ClusterTree<BAYESTREE, GRAPH>::operator=(
|
||||
EliminatableClusterTree<BAYESTREE, GRAPH>& EliminatableClusterTree<BAYESTREE, GRAPH>::operator=(
|
||||
const This& other) {
|
||||
// Start by duplicating the tree.
|
||||
roots_ = treeTraversal::CloneForest(other);
|
||||
ClusterTree<GRAPH>::operator=(other);
|
||||
|
||||
// Assign the remaining factors - these are pointers to factors in the original factor graph and
|
||||
// we do not clone them.
|
||||
|
@ -198,41 +216,40 @@ ClusterTree<BAYESTREE, GRAPH>& ClusterTree<BAYESTREE, GRAPH>::operator=(
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class BAYESTREE, class GRAPH>
|
||||
std::pair<boost::shared_ptr<BAYESTREE>, boost::shared_ptr<GRAPH> > ClusterTree<
|
||||
BAYESTREE, GRAPH>::eliminate(const Eliminate& function) const {
|
||||
template <class BAYESTREE, class GRAPH>
|
||||
std::pair<boost::shared_ptr<BAYESTREE>, boost::shared_ptr<GRAPH> >
|
||||
EliminatableClusterTree<BAYESTREE, GRAPH>::eliminate(const Eliminate& function) const {
|
||||
gttic(ClusterTree_eliminate);
|
||||
// Do elimination (depth-first traversal). The rootsContainer stores a 'dummy' BayesTree node
|
||||
// that contains all of the roots as its children. rootsContainer also stores the remaining
|
||||
// uneliminated factors passed up from the roots.
|
||||
// un-eliminated factors passed up from the roots.
|
||||
boost::shared_ptr<BayesTreeType> result = boost::make_shared<BayesTreeType>();
|
||||
|
||||
typedef EliminationData<This> Data;
|
||||
Data rootsContainer(0, roots_.size());
|
||||
typename Data::EliminationPostOrderVisitor visitorPost(function,
|
||||
result->nodes_);
|
||||
Data rootsContainer(0, this->nrRoots());
|
||||
|
||||
typename Data::EliminationPostOrderVisitor visitorPost(function, result->nodes_);
|
||||
{
|
||||
TbbOpenMPMixedScope threadLimiter; // Limits OpenMP threads since we're mixing TBB and OpenMP
|
||||
treeTraversal::DepthFirstForestParallel(*this, rootsContainer,
|
||||
Data::EliminationPreOrderVisitor, visitorPost, 10);
|
||||
TbbOpenMPMixedScope threadLimiter; // Limits OpenMP threads since we're mixing TBB and OpenMP
|
||||
treeTraversal::DepthFirstForestParallel(*this, rootsContainer, Data::EliminationPreOrderVisitor,
|
||||
visitorPost, 10);
|
||||
}
|
||||
|
||||
// Create BayesTree from roots stored in the dummy BayesTree node.
|
||||
result->roots_.insert(result->roots_.end(),
|
||||
rootsContainer.bayesTreeNode->children.begin(),
|
||||
rootsContainer.bayesTreeNode->children.end());
|
||||
result->roots_.insert(result->roots_.end(), rootsContainer.bayesTreeNode->children.begin(),
|
||||
rootsContainer.bayesTreeNode->children.end());
|
||||
|
||||
// Add remaining factors that were not involved with eliminated variables
|
||||
boost::shared_ptr<FactorGraphType> remaining = boost::make_shared<
|
||||
FactorGraphType>();
|
||||
remaining->reserve(
|
||||
remainingFactors_.size() + rootsContainer.childFactors.size());
|
||||
boost::shared_ptr<FactorGraphType> remaining = boost::make_shared<FactorGraphType>();
|
||||
remaining->reserve(remainingFactors_.size() + rootsContainer.childFactors.size());
|
||||
remaining->push_back(remainingFactors_.begin(), remainingFactors_.end());
|
||||
for(const sharedFactor& factor: rootsContainer.childFactors) {
|
||||
for (const sharedFactor& factor : rootsContainer.childFactors) {
|
||||
if (factor)
|
||||
remaining->push_back(factor);
|
||||
}
|
||||
|
||||
// Return result
|
||||
return std::make_pair(result, remaining);
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* @file ClusterTree.h
|
||||
* @file EliminatableClusterTree.h
|
||||
* @date Oct 8, 2013
|
||||
* @author Kai Ni
|
||||
* @author Richard Roberts
|
||||
|
@ -21,57 +21,182 @@ namespace gtsam {
|
|||
* each factor \f$ f_i \f$ is associated with a single cluster and \f$ scope(f_i) \sub C_k \f$.
|
||||
* \nosubgrouping
|
||||
*/
|
||||
template<class BAYESTREE, class GRAPH>
|
||||
template <class GRAPH>
|
||||
class ClusterTree {
|
||||
public:
|
||||
typedef GRAPH FactorGraphType; ///< The factor graph type
|
||||
typedef typename GRAPH::FactorType FactorType; ///< The type of factors
|
||||
typedef ClusterTree<BAYESTREE, GRAPH> This; ///< This class
|
||||
typedef boost::shared_ptr<This> shared_ptr; ///< Shared pointer to this class
|
||||
typedef boost::shared_ptr<FactorType> sharedFactor; ///< Shared pointer to a factor
|
||||
typedef BAYESTREE BayesTreeType; ///< The BayesTree type produced by elimination
|
||||
typedef typename BayesTreeType::ConditionalType ConditionalType; ///< The type of conditionals
|
||||
typedef boost::shared_ptr<ConditionalType> sharedConditional; ///< Shared pointer to a conditional
|
||||
typedef typename FactorGraphType::Eliminate Eliminate; ///< Typedef for an eliminate subroutine
|
||||
public:
|
||||
typedef GRAPH FactorGraphType; ///< The factor graph type
|
||||
typedef ClusterTree<GRAPH> This; ///< This class
|
||||
typedef boost::shared_ptr<This> shared_ptr; ///< Shared pointer to this class
|
||||
|
||||
typedef typename GRAPH::FactorType FactorType; ///< The type of factors
|
||||
typedef boost::shared_ptr<FactorType> sharedFactor; ///< Shared pointer to a factor
|
||||
|
||||
/// A Cluster is just a collection of factors
|
||||
// TODO(frank): re-factor JunctionTree so we can make members private
|
||||
struct Cluster {
|
||||
typedef Ordering Keys;
|
||||
typedef FastVector<sharedFactor> Factors;
|
||||
typedef FastVector<boost::shared_ptr<Cluster> > Children;
|
||||
Children children; ///< sub-trees
|
||||
|
||||
Cluster() {
|
||||
}
|
||||
Cluster(Key key, const Factors& factors) :
|
||||
factors(factors) {
|
||||
orderedFrontalKeys.push_back(key);
|
||||
}
|
||||
typedef Ordering Keys;
|
||||
Keys orderedFrontalKeys; ///< Frontal keys of this node
|
||||
|
||||
FactorGraphType factors; ///< Factors associated with this node
|
||||
|
||||
Keys orderedFrontalKeys; ///< Frontal keys of this node
|
||||
Factors factors; ///< Factors associated with this node
|
||||
Children children; ///< sub-trees
|
||||
int problemSize_;
|
||||
|
||||
Cluster() : problemSize_(0) {}
|
||||
|
||||
virtual ~Cluster() {}
|
||||
|
||||
const Cluster& operator[](size_t i) const {
|
||||
return *(children[i]);
|
||||
}
|
||||
|
||||
/// Construct from factors associated with a single key
|
||||
template <class CONTAINER>
|
||||
Cluster(Key key, const CONTAINER& factorsToAdd)
|
||||
: problemSize_(0) {
|
||||
addFactors(key, factorsToAdd);
|
||||
}
|
||||
|
||||
/// Add factors associated with a single key
|
||||
template <class CONTAINER>
|
||||
void addFactors(Key key, const CONTAINER& factorsToAdd) {
|
||||
orderedFrontalKeys.push_back(key);
|
||||
factors.push_back(factorsToAdd);
|
||||
problemSize_ += factors.size();
|
||||
}
|
||||
|
||||
/// Add a child cluster
|
||||
void addChild(const boost::shared_ptr<Cluster>& cluster) {
|
||||
children.push_back(cluster);
|
||||
problemSize_ = std::max(problemSize_, cluster->problemSize_);
|
||||
}
|
||||
|
||||
size_t nrChildren() const {
|
||||
return children.size();
|
||||
}
|
||||
|
||||
size_t nrFactors() const {
|
||||
return factors.size();
|
||||
}
|
||||
|
||||
size_t nrFrontals() const {
|
||||
return orderedFrontalKeys.size();
|
||||
}
|
||||
|
||||
int problemSize() const {
|
||||
return problemSize_;
|
||||
}
|
||||
|
||||
/// print this node
|
||||
void print(const std::string& s = "", const KeyFormatter& keyFormatter =
|
||||
DefaultKeyFormatter) const;
|
||||
virtual void print(const std::string& s = "",
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
||||
|
||||
/// Return a vector with nrFrontal keys for each child
|
||||
std::vector<size_t> nrFrontalsOfChildren() const;
|
||||
|
||||
/// Merge in given cluster
|
||||
void merge(const boost::shared_ptr<Cluster>& cluster);
|
||||
|
||||
/// Merge all children for which bit is set into this node
|
||||
void mergeChildren(const std::vector<bool>& merge);
|
||||
};
|
||||
|
||||
typedef boost::shared_ptr<Cluster> sharedCluster; ///< Shared pointer to Cluster
|
||||
typedef Cluster Node; ///< Define Node=Cluster for compatibility with tree traversal functions
|
||||
typedef sharedCluster sharedNode; ///< Define Node=Cluster for compatibility with tree traversal functions
|
||||
typedef boost::shared_ptr<Cluster> sharedCluster; ///< Shared pointer to Cluster
|
||||
|
||||
// Define Node=Cluster for compatibility with tree traversal functions
|
||||
typedef Cluster Node;
|
||||
typedef sharedCluster sharedNode;
|
||||
|
||||
/** concept check */
|
||||
GTSAM_CONCEPT_TESTABLE_TYPE(FactorType);
|
||||
|
||||
protected:
|
||||
protected:
|
||||
FastVector<sharedNode> roots_;
|
||||
|
||||
/// @name Standard Constructors
|
||||
/// @{
|
||||
|
||||
/** Copy constructor - makes a deep copy of the tree structure, but only pointers to factors are
|
||||
* copied, factors are not cloned. */
|
||||
ClusterTree(const This& other) {
|
||||
*this = other;
|
||||
}
|
||||
|
||||
/// @}
|
||||
|
||||
public:
|
||||
|
||||
/// Default constructor
|
||||
ClusterTree() {}
|
||||
|
||||
/// @name Testable
|
||||
/// @{
|
||||
|
||||
/** Print the cluster tree */
|
||||
void print(const std::string& s = "",
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
||||
|
||||
/// @}
|
||||
|
||||
/// @name Advanced Interface
|
||||
/// @{
|
||||
|
||||
void addRoot(const boost::shared_ptr<Cluster>& cluster) {
|
||||
roots_.push_back(cluster);
|
||||
}
|
||||
|
||||
void addChildrenAsRoots(const boost::shared_ptr<Cluster>& cluster) {
|
||||
for (auto child : cluster->children)
|
||||
this->addRoot(child);
|
||||
}
|
||||
|
||||
size_t nrRoots() const {
|
||||
return roots_.size();
|
||||
}
|
||||
|
||||
/** Return the set of roots (one for a tree, multiple for a forest) */
|
||||
const FastVector<sharedNode>& roots() const {
|
||||
return roots_;
|
||||
}
|
||||
|
||||
const Cluster& operator[](size_t i) const {
|
||||
return *(roots_[i]);
|
||||
}
|
||||
|
||||
/// @}
|
||||
|
||||
protected:
|
||||
/// @name Details
|
||||
|
||||
/// Assignment operator - makes a deep copy of the tree structure, but only pointers to factors
|
||||
/// are copied, factors are not cloned.
|
||||
This& operator=(const This& other);
|
||||
|
||||
/// @}
|
||||
};
|
||||
|
||||
/**
|
||||
* A cluster-tree that eliminates to a Bayes tree.
|
||||
*/
|
||||
template <class BAYESTREE, class GRAPH>
|
||||
class EliminatableClusterTree : public ClusterTree<GRAPH> {
|
||||
public:
|
||||
typedef BAYESTREE BayesTreeType; ///< The BayesTree type produced by elimination
|
||||
typedef GRAPH FactorGraphType; ///< The factor graph type
|
||||
typedef EliminatableClusterTree<BAYESTREE, GRAPH> This; ///< This class
|
||||
typedef boost::shared_ptr<This> shared_ptr; ///< Shared pointer to this class
|
||||
|
||||
typedef typename BAYESTREE::ConditionalType ConditionalType; ///< The type of conditionals
|
||||
typedef boost::shared_ptr<ConditionalType>
|
||||
sharedConditional; ///< Shared pointer to a conditional
|
||||
|
||||
typedef typename GRAPH::Eliminate Eliminate; ///< Typedef for an eliminate subroutine
|
||||
typedef typename GRAPH::FactorType FactorType; ///< The type of factors
|
||||
typedef boost::shared_ptr<FactorType> sharedFactor; ///< Shared pointer to a factor
|
||||
|
||||
protected:
|
||||
FastVector<sharedFactor> remainingFactors_;
|
||||
|
||||
/// @name Standard Constructors
|
||||
|
@ -79,19 +204,13 @@ protected:
|
|||
|
||||
/** Copy constructor - makes a deep copy of the tree structure, but only pointers to factors are
|
||||
* copied, factors are not cloned. */
|
||||
ClusterTree(const This& other) {*this = other;}
|
||||
|
||||
/// @}
|
||||
|
||||
public:
|
||||
/// @name Testable
|
||||
/// @{
|
||||
|
||||
/** Print the cluster tree */
|
||||
void print(const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
||||
EliminatableClusterTree(const This& other) : ClusterTree<GRAPH>(other) {
|
||||
*this = other;
|
||||
}
|
||||
|
||||
/// @}
|
||||
|
||||
public:
|
||||
/// @name Standard Interface
|
||||
/// @{
|
||||
|
||||
|
@ -100,23 +219,22 @@ public:
|
|||
* in GaussianFactorGraph.h
|
||||
* @return The Bayes tree and factor graph resulting from elimination
|
||||
*/
|
||||
std::pair<boost::shared_ptr<BayesTreeType>, boost::shared_ptr<FactorGraphType> >
|
||||
eliminate(const Eliminate& function) const;
|
||||
std::pair<boost::shared_ptr<BayesTreeType>, boost::shared_ptr<FactorGraphType> > eliminate(
|
||||
const Eliminate& function) const;
|
||||
|
||||
/// @}
|
||||
|
||||
/// @name Advanced Interface
|
||||
/// @{
|
||||
|
||||
/** Return the set of roots (one for a tree, multiple for a forest) */
|
||||
const FastVector<sharedNode>& roots() const {return roots_;}
|
||||
|
||||
/** Return the remaining factors that are not pulled into elimination */
|
||||
const FastVector<sharedFactor>& remainingFactors() const {return remainingFactors_;}
|
||||
const FastVector<sharedFactor>& remainingFactors() const {
|
||||
return remainingFactors_;
|
||||
}
|
||||
|
||||
/// @}
|
||||
|
||||
protected:
|
||||
protected:
|
||||
/// @name Details
|
||||
|
||||
/// Assignment operator - makes a deep copy of the tree structure, but only pointers to factors
|
||||
|
@ -124,11 +242,10 @@ protected:
|
|||
This& operator=(const This& other);
|
||||
|
||||
/// Default constructor to be used in derived classes
|
||||
ClusterTree() {}
|
||||
EliminatableClusterTree() {}
|
||||
|
||||
/// @}
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#include <gtsam/inference/ClusterTree-inst.h>
|
||||
|
|
|
@ -354,3 +354,5 @@ namespace gtsam {
|
|||
}; // FactorGraph
|
||||
|
||||
} // namespace gtsam
|
||||
|
||||
#include <gtsam/inference/FactorGraph-inst.h>
|
|
@ -54,7 +54,7 @@ struct ConstructorTraversalData {
|
|||
// pointer in its parent.
|
||||
ConstructorTraversalData myData = ConstructorTraversalData(&parentData);
|
||||
myData.myJTNode = boost::make_shared<Node>(node->key, node->factors);
|
||||
parentData.myJTNode->children.push_back(myData.myJTNode);
|
||||
parentData.myJTNode->addChild(myData.myJTNode);
|
||||
return myData;
|
||||
}
|
||||
|
||||
|
@ -99,20 +99,20 @@ struct ConstructorTraversalData {
|
|||
// Merge our children if they are in our clique - if our conditional has
|
||||
// exactly one fewer parent than our child's conditional.
|
||||
const size_t myNrParents = myConditional->nrParents();
|
||||
const size_t nrChildren = node->children.size();
|
||||
const size_t nrChildren = node->nrChildren();
|
||||
assert(childConditionals.size() == nrChildren);
|
||||
|
||||
// decide which children to merge, as index into children
|
||||
std::vector<size_t> nrFrontals = node->nrFrontalsOfChildren();
|
||||
std::vector<bool> merge(nrChildren, false);
|
||||
size_t myNrFrontals = 1, i = 0;
|
||||
for(const sharedNode& child: node->children) {
|
||||
size_t myNrFrontals = 1;
|
||||
for (size_t i = 0;i<nrChildren;i++){
|
||||
// Check if we should merge the i^th child
|
||||
if (myNrParents + myNrFrontals == childConditionals[i]->nrParents()) {
|
||||
// Increment number of frontal variables
|
||||
myNrFrontals += child->orderedFrontalKeys.size();
|
||||
myNrFrontals += nrFrontals[i];
|
||||
merge[i] = true;
|
||||
}
|
||||
++i;
|
||||
}
|
||||
|
||||
// now really merge
|
||||
|
@ -145,10 +145,7 @@ JunctionTree<BAYESTREE, GRAPH>::JunctionTree(
|
|||
Data::ConstructorTraversalVisitorPostAlg2);
|
||||
|
||||
// Assign roots from the dummy node
|
||||
typedef typename JunctionTree<BAYESTREE, GRAPH>::Node Node;
|
||||
const typename Node::Children& children = rootData.myJTNode->children;
|
||||
Base::roots_.reserve(children.size());
|
||||
Base::roots_.insert(Base::roots_.begin(), children.begin(), children.end());
|
||||
this->addChildrenAsRoots(rootData.myJTNode);
|
||||
|
||||
// Transfer remaining factors from elimination tree
|
||||
Base::remainingFactors_ = eliminationTree.remainingFactors();
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
@ -28,15 +28,13 @@ namespace gtsam {
|
|||
template<class BAYESNET, class GRAPH> class EliminationTree;
|
||||
|
||||
/**
|
||||
* A JunctionTree is a ClusterTree, i.e., a set of variable clusters with factors, arranged
|
||||
* in a tree, with the additional property that it represents the clique tree associated
|
||||
* with a Bayes net.
|
||||
* A JunctionTree is a cluster tree, a set of variable clusters with factors, arranged in a tree,
|
||||
* with the additional property that it represents the clique tree associated with a Bayes Net.
|
||||
*
|
||||
* In GTSAM a junction tree is an intermediate data structure in multifrontal
|
||||
* variable elimination. Each node is a cluster of factors, along with a
|
||||
* clique of variables that are eliminated all at once. In detail, every node k represents
|
||||
* a clique (maximal fully connected subset) of an associated chordal graph, such as a
|
||||
* chordal Bayes net resulting from elimination.
|
||||
* In GTSAM a junction tree is an intermediate data structure in multifrontal variable
|
||||
* elimination. Each node is a cluster of factors, along with a clique of variables that are
|
||||
* eliminated all at once. In detail, every node k represents a clique (maximal fully connected
|
||||
* subset) of an associated chordal graph, such as a chordal Bayes net resulting from elimination.
|
||||
*
|
||||
* The difference with the BayesTree is that a JunctionTree stores factors, whereas a
|
||||
* BayesTree stores conditionals, that are the product of eliminating the factors in the
|
||||
|
@ -49,13 +47,13 @@ namespace gtsam {
|
|||
* \nosubgrouping
|
||||
*/
|
||||
template<class BAYESTREE, class GRAPH>
|
||||
class JunctionTree : public ClusterTree<BAYESTREE, GRAPH> {
|
||||
class JunctionTree : public EliminatableClusterTree<BAYESTREE, GRAPH> {
|
||||
|
||||
public:
|
||||
|
||||
typedef JunctionTree<BAYESTREE, GRAPH> This; ///< This class
|
||||
typedef boost::shared_ptr<This> shared_ptr; ///< Shared pointer to this class
|
||||
typedef ClusterTree<BAYESTREE, GRAPH> Base; ///< Our base class
|
||||
typedef EliminatableClusterTree<BAYESTREE, GRAPH> Base; ///< Our base class
|
||||
|
||||
protected:
|
||||
|
||||
|
@ -65,7 +63,7 @@ namespace gtsam {
|
|||
/** Build the junction tree from an elimination tree. */
|
||||
template<class ETREE>
|
||||
static This FromEliminationTree(const ETREE& eliminationTree) { return This(eliminationTree); }
|
||||
|
||||
|
||||
/** Build the junction tree from an elimination tree. */
|
||||
template<class ETREE_BAYESNET, class ETREE_GRAPH>
|
||||
JunctionTree(const EliminationTree<ETREE_BAYESNET, ETREE_GRAPH>& eliminationTree);
|
||||
|
|
|
@ -72,7 +72,7 @@ Ordering Ordering::ColamdConstrained(const VariableIndex& variableIndex,
|
|||
for(size_t factorIndex: column) {
|
||||
A[count++] = (int) factorIndex; // copy sparse column
|
||||
}
|
||||
p[index + 1] = count; // column j (base 1) goes from A[j-1] to A[j]-1
|
||||
p[index + 1] = count; // column j (base 1) goes from A[j-1] to A[j]-1
|
||||
// Store key in array and increment index
|
||||
keys[index] = key_factors.first;
|
||||
++index;
|
||||
|
@ -123,6 +123,7 @@ Ordering Ordering::ColamdConstrainedLast(const VariableIndex& variableIndex,
|
|||
std::vector<int> cmember(n, 0);
|
||||
|
||||
// Build a mapping to look up sorted Key indices by Key
|
||||
// TODO(frank): think of a way to not build this
|
||||
FastMap<Key, size_t> keyIndices;
|
||||
size_t j = 0;
|
||||
for (auto key_factors: variableIndex)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
@ -79,7 +79,10 @@ public:
|
|||
/// it is faster to use COLAMD(const VariableIndex&)
|
||||
template<class FACTOR>
|
||||
static Ordering Colamd(const FactorGraph<FACTOR>& graph) {
|
||||
return Colamd(VariableIndex(graph));
|
||||
if (graph.empty())
|
||||
return Ordering();
|
||||
else
|
||||
return Colamd(VariableIndex(graph));
|
||||
}
|
||||
|
||||
/// Compute a fill-reducing ordering using COLAMD from a VariableIndex.
|
||||
|
@ -96,8 +99,10 @@ public:
|
|||
template<class FACTOR>
|
||||
static Ordering ColamdConstrainedLast(const FactorGraph<FACTOR>& graph,
|
||||
const std::vector<Key>& constrainLast, bool forceOrder = false) {
|
||||
return ColamdConstrainedLast(VariableIndex(graph), constrainLast,
|
||||
forceOrder);
|
||||
if (graph.empty())
|
||||
return Ordering();
|
||||
else
|
||||
return ColamdConstrainedLast(VariableIndex(graph), constrainLast, forceOrder);
|
||||
}
|
||||
|
||||
/// Compute a fill-reducing ordering using constrained COLAMD from a VariableIndex. This
|
||||
|
@ -121,8 +126,10 @@ public:
|
|||
template<class FACTOR>
|
||||
static Ordering ColamdConstrainedFirst(const FactorGraph<FACTOR>& graph,
|
||||
const std::vector<Key>& constrainFirst, bool forceOrder = false) {
|
||||
return ColamdConstrainedFirst(VariableIndex(graph), constrainFirst,
|
||||
forceOrder);
|
||||
if (graph.empty())
|
||||
return Ordering();
|
||||
else
|
||||
return ColamdConstrainedFirst(VariableIndex(graph), constrainFirst, forceOrder);
|
||||
}
|
||||
|
||||
/// Compute a fill-reducing ordering using constrained COLAMD from a VariableIndex. This
|
||||
|
@ -148,7 +155,10 @@ public:
|
|||
template<class FACTOR>
|
||||
static Ordering ColamdConstrained(const FactorGraph<FACTOR>& graph,
|
||||
const FastMap<Key, int>& groups) {
|
||||
return ColamdConstrained(VariableIndex(graph), groups);
|
||||
if (graph.empty())
|
||||
return Ordering();
|
||||
else
|
||||
return ColamdConstrained(VariableIndex(graph), groups);
|
||||
}
|
||||
|
||||
/// Compute a fill-reducing ordering using constrained COLAMD from a VariableIndex. In this
|
||||
|
@ -190,6 +200,8 @@ public:
|
|||
template<class FACTOR>
|
||||
static Ordering Create(OrderingType orderingType,
|
||||
const FactorGraph<FACTOR>& graph) {
|
||||
if (graph.empty())
|
||||
return Ordering();
|
||||
|
||||
switch (orderingType) {
|
||||
case COLAMD:
|
||||
|
|
|
@ -33,8 +33,8 @@ namespace gtsam {
|
|||
*/
|
||||
class GTSAM_EXPORT Symbol {
|
||||
protected:
|
||||
const unsigned char c_;
|
||||
const std::uint64_t j_;
|
||||
unsigned char c_;
|
||||
std::uint64_t j_;
|
||||
|
||||
public:
|
||||
|
||||
|
|
|
@ -30,36 +30,64 @@ using namespace boost::assign;
|
|||
|
||||
namespace example {
|
||||
SymbolicFactorGraph symbolicChain() {
|
||||
SymbolicFactorGraph sfg;
|
||||
sfg.push_factor(0, 1);
|
||||
sfg.push_factor(1, 2);
|
||||
sfg.push_factor(2, 3);
|
||||
sfg.push_factor(3, 4);
|
||||
sfg.push_factor(4, 5);
|
||||
return sfg;
|
||||
SymbolicFactorGraph symbolicGraph;
|
||||
symbolicGraph.push_factor(0, 1);
|
||||
symbolicGraph.push_factor(1, 2);
|
||||
symbolicGraph.push_factor(2, 3);
|
||||
symbolicGraph.push_factor(3, 4);
|
||||
symbolicGraph.push_factor(4, 5);
|
||||
return symbolicGraph;
|
||||
}
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
TEST(Ordering, constrained_ordering) {
|
||||
|
||||
// create graph with wanted variable set = 2, 4
|
||||
SymbolicFactorGraph sfg = example::symbolicChain();
|
||||
SymbolicFactorGraph symbolicGraph = example::symbolicChain();
|
||||
|
||||
// unconstrained version
|
||||
Ordering actUnconstrained = Ordering::Colamd(sfg);
|
||||
Ordering expUnconstrained = Ordering(list_of(0)(1)(2)(3)(4)(5));
|
||||
EXPECT(assert_equal(expUnconstrained, actUnconstrained));
|
||||
{
|
||||
Ordering actual = Ordering::Colamd(symbolicGraph);
|
||||
Ordering expected = Ordering(list_of(0)(1)(2)(3)(4)(5));
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
// constrained version - push one set to the end
|
||||
Ordering actConstrained = Ordering::ColamdConstrainedLast(sfg, list_of(2)(4));
|
||||
Ordering expConstrained = Ordering(list_of(0)(1)(5)(3)(4)(2));
|
||||
EXPECT(assert_equal(expConstrained, actConstrained));
|
||||
{
|
||||
Ordering actual = Ordering::ColamdConstrainedLast(symbolicGraph, list_of(2)(4));
|
||||
Ordering expected = Ordering(list_of(0)(1)(5)(3)(4)(2));
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
// constrained version - push one set to the start
|
||||
Ordering actConstrained2 = Ordering::ColamdConstrainedFirst(sfg,
|
||||
list_of(2)(4));
|
||||
Ordering expConstrained2 = Ordering(list_of(2)(4)(0)(1)(3)(5));
|
||||
EXPECT(assert_equal(expConstrained2, actConstrained2));
|
||||
{
|
||||
Ordering actual = Ordering::ColamdConstrainedFirst(symbolicGraph, list_of(2)(4));
|
||||
Ordering expected = Ordering(list_of(2)(4)(0)(1)(3)(5));
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
// Make sure giving empty constraints does not break the code
|
||||
{
|
||||
Ordering actual = Ordering::ColamdConstrainedLast(symbolicGraph, {});
|
||||
Ordering expected = Ordering(list_of(0)(1)(2)(3)(4)(5));
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
}
|
||||
{
|
||||
Ordering actual = Ordering::ColamdConstrainedFirst(symbolicGraph, {});
|
||||
Ordering expected = Ordering(list_of(0)(1)(2)(3)(4)(5));
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
// Make sure giving empty graph does not break the code
|
||||
SymbolicFactorGraph emptyGraph;
|
||||
Ordering empty;
|
||||
{
|
||||
Ordering actual = Ordering::ColamdConstrainedLast(emptyGraph, list_of(2)(4));
|
||||
EXPECT(assert_equal(empty, actual));
|
||||
}
|
||||
{
|
||||
Ordering actual = Ordering::ColamdConstrainedFirst(emptyGraph, list_of(2)(4));
|
||||
EXPECT(assert_equal(empty, actual));
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -68,7 +96,7 @@ TEST(Ordering, grouped_constrained_ordering) {
|
|||
// create graph with constrained groups:
|
||||
// 1: 2, 4
|
||||
// 2: 5
|
||||
SymbolicFactorGraph sfg = example::symbolicChain();
|
||||
SymbolicFactorGraph symbolicGraph = example::symbolicChain();
|
||||
|
||||
// constrained version - push one set to the end
|
||||
FastMap<Key, int> constraints;
|
||||
|
@ -76,40 +104,40 @@ TEST(Ordering, grouped_constrained_ordering) {
|
|||
constraints[4] = 1;
|
||||
constraints[5] = 2;
|
||||
|
||||
Ordering actConstrained = Ordering::ColamdConstrained(sfg, constraints);
|
||||
Ordering expConstrained = list_of(0)(1)(3)(2)(4)(5);
|
||||
EXPECT(assert_equal(expConstrained, actConstrained));
|
||||
Ordering actual = Ordering::ColamdConstrained(symbolicGraph, constraints);
|
||||
Ordering expected = list_of(0)(1)(3)(2)(4)(5);
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(Ordering, csr_format) {
|
||||
// Example in METIS manual
|
||||
SymbolicFactorGraph sfg;
|
||||
sfg.push_factor(0, 1);
|
||||
sfg.push_factor(1, 2);
|
||||
sfg.push_factor(2, 3);
|
||||
sfg.push_factor(3, 4);
|
||||
sfg.push_factor(5, 6);
|
||||
sfg.push_factor(6, 7);
|
||||
sfg.push_factor(7, 8);
|
||||
sfg.push_factor(8, 9);
|
||||
sfg.push_factor(10, 11);
|
||||
sfg.push_factor(11, 12);
|
||||
sfg.push_factor(12, 13);
|
||||
sfg.push_factor(13, 14);
|
||||
SymbolicFactorGraph symbolicGraph;
|
||||
symbolicGraph.push_factor(0, 1);
|
||||
symbolicGraph.push_factor(1, 2);
|
||||
symbolicGraph.push_factor(2, 3);
|
||||
symbolicGraph.push_factor(3, 4);
|
||||
symbolicGraph.push_factor(5, 6);
|
||||
symbolicGraph.push_factor(6, 7);
|
||||
symbolicGraph.push_factor(7, 8);
|
||||
symbolicGraph.push_factor(8, 9);
|
||||
symbolicGraph.push_factor(10, 11);
|
||||
symbolicGraph.push_factor(11, 12);
|
||||
symbolicGraph.push_factor(12, 13);
|
||||
symbolicGraph.push_factor(13, 14);
|
||||
|
||||
sfg.push_factor(0, 5);
|
||||
sfg.push_factor(5, 10);
|
||||
sfg.push_factor(1, 6);
|
||||
sfg.push_factor(6, 11);
|
||||
sfg.push_factor(2, 7);
|
||||
sfg.push_factor(7, 12);
|
||||
sfg.push_factor(3, 8);
|
||||
sfg.push_factor(8, 13);
|
||||
sfg.push_factor(4, 9);
|
||||
sfg.push_factor(9, 14);
|
||||
symbolicGraph.push_factor(0, 5);
|
||||
symbolicGraph.push_factor(5, 10);
|
||||
symbolicGraph.push_factor(1, 6);
|
||||
symbolicGraph.push_factor(6, 11);
|
||||
symbolicGraph.push_factor(2, 7);
|
||||
symbolicGraph.push_factor(7, 12);
|
||||
symbolicGraph.push_factor(3, 8);
|
||||
symbolicGraph.push_factor(8, 13);
|
||||
symbolicGraph.push_factor(4, 9);
|
||||
symbolicGraph.push_factor(9, 14);
|
||||
|
||||
MetisIndex mi(sfg);
|
||||
MetisIndex mi(symbolicGraph);
|
||||
|
||||
vector<int> xadjExpected, adjExpected;
|
||||
xadjExpected += 0, 2, 5, 8, 11, 13, 16, 20, 24, 28, 31, 33, 36, 39, 42, 44;
|
||||
|
@ -122,16 +150,16 @@ TEST(Ordering, csr_format) {
|
|||
|
||||
/* ************************************************************************* */
|
||||
TEST(Ordering, csr_format_2) {
|
||||
SymbolicFactorGraph sfg;
|
||||
SymbolicFactorGraph symbolicGraph;
|
||||
|
||||
sfg.push_factor(0);
|
||||
sfg.push_factor(0, 1);
|
||||
sfg.push_factor(1, 2);
|
||||
sfg.push_factor(2, 3);
|
||||
sfg.push_factor(3, 4);
|
||||
sfg.push_factor(4, 1);
|
||||
symbolicGraph.push_factor(0);
|
||||
symbolicGraph.push_factor(0, 1);
|
||||
symbolicGraph.push_factor(1, 2);
|
||||
symbolicGraph.push_factor(2, 3);
|
||||
symbolicGraph.push_factor(3, 4);
|
||||
symbolicGraph.push_factor(4, 1);
|
||||
|
||||
MetisIndex mi(sfg);
|
||||
MetisIndex mi(symbolicGraph);
|
||||
|
||||
vector<int> xadjExpected, adjExpected;
|
||||
xadjExpected += 0, 1, 4, 6, 8, 10;
|
||||
|
@ -144,16 +172,16 @@ TEST(Ordering, csr_format_2) {
|
|||
|
||||
/* ************************************************************************* */
|
||||
TEST(Ordering, csr_format_3) {
|
||||
SymbolicFactorGraph sfg;
|
||||
SymbolicFactorGraph symbolicGraph;
|
||||
|
||||
sfg.push_factor(100);
|
||||
sfg.push_factor(100, 101);
|
||||
sfg.push_factor(101, 102);
|
||||
sfg.push_factor(102, 103);
|
||||
sfg.push_factor(103, 104);
|
||||
sfg.push_factor(104, 101);
|
||||
symbolicGraph.push_factor(100);
|
||||
symbolicGraph.push_factor(100, 101);
|
||||
symbolicGraph.push_factor(101, 102);
|
||||
symbolicGraph.push_factor(102, 103);
|
||||
symbolicGraph.push_factor(103, 104);
|
||||
symbolicGraph.push_factor(104, 101);
|
||||
|
||||
MetisIndex mi(sfg);
|
||||
MetisIndex mi(symbolicGraph);
|
||||
|
||||
vector<int> xadjExpected, adjExpected;
|
||||
xadjExpected += 0, 1, 4, 6, 8, 10;
|
||||
|
@ -174,16 +202,16 @@ TEST(Ordering, csr_format_3) {
|
|||
/* ************************************************************************* */
|
||||
#ifdef GTSAM_SUPPORT_NESTED_DISSECTION
|
||||
TEST(Ordering, csr_format_4) {
|
||||
SymbolicFactorGraph sfg;
|
||||
SymbolicFactorGraph symbolicGraph;
|
||||
|
||||
sfg.push_factor(Symbol('x', 1));
|
||||
sfg.push_factor(Symbol('x', 1), Symbol('x', 2));
|
||||
sfg.push_factor(Symbol('x', 2), Symbol('x', 3));
|
||||
sfg.push_factor(Symbol('x', 3), Symbol('x', 4));
|
||||
sfg.push_factor(Symbol('x', 4), Symbol('x', 5));
|
||||
sfg.push_factor(Symbol('x', 5), Symbol('x', 6));
|
||||
symbolicGraph.push_factor(Symbol('x', 1));
|
||||
symbolicGraph.push_factor(Symbol('x', 1), Symbol('x', 2));
|
||||
symbolicGraph.push_factor(Symbol('x', 2), Symbol('x', 3));
|
||||
symbolicGraph.push_factor(Symbol('x', 3), Symbol('x', 4));
|
||||
symbolicGraph.push_factor(Symbol('x', 4), Symbol('x', 5));
|
||||
symbolicGraph.push_factor(Symbol('x', 5), Symbol('x', 6));
|
||||
|
||||
MetisIndex mi(sfg);
|
||||
MetisIndex mi(symbolicGraph);
|
||||
|
||||
vector<int> xadjExpected, adjExpected;
|
||||
xadjExpected += 0, 1, 3, 5, 7, 9, 10;
|
||||
|
@ -196,29 +224,29 @@ TEST(Ordering, csr_format_4) {
|
|||
EXPECT(adjExpected.size() == mi.adj().size());
|
||||
EXPECT(adjExpected == adjAcutal);
|
||||
|
||||
Ordering metOrder = Ordering::Metis(sfg);
|
||||
Ordering metOrder = Ordering::Metis(symbolicGraph);
|
||||
|
||||
// Test different symbol types
|
||||
sfg.push_factor(Symbol('l', 1));
|
||||
sfg.push_factor(Symbol('x', 1), Symbol('l', 1));
|
||||
sfg.push_factor(Symbol('x', 2), Symbol('l', 1));
|
||||
sfg.push_factor(Symbol('x', 3), Symbol('l', 1));
|
||||
sfg.push_factor(Symbol('x', 4), Symbol('l', 1));
|
||||
symbolicGraph.push_factor(Symbol('l', 1));
|
||||
symbolicGraph.push_factor(Symbol('x', 1), Symbol('l', 1));
|
||||
symbolicGraph.push_factor(Symbol('x', 2), Symbol('l', 1));
|
||||
symbolicGraph.push_factor(Symbol('x', 3), Symbol('l', 1));
|
||||
symbolicGraph.push_factor(Symbol('x', 4), Symbol('l', 1));
|
||||
|
||||
Ordering metOrder2 = Ordering::Metis(sfg);
|
||||
Ordering metOrder2 = Ordering::Metis(symbolicGraph);
|
||||
}
|
||||
#endif
|
||||
/* ************************************************************************* */
|
||||
#ifdef GTSAM_SUPPORT_NESTED_DISSECTION
|
||||
TEST(Ordering, metis) {
|
||||
|
||||
SymbolicFactorGraph sfg;
|
||||
SymbolicFactorGraph symbolicGraph;
|
||||
|
||||
sfg.push_factor(0);
|
||||
sfg.push_factor(0, 1);
|
||||
sfg.push_factor(1, 2);
|
||||
symbolicGraph.push_factor(0);
|
||||
symbolicGraph.push_factor(0, 1);
|
||||
symbolicGraph.push_factor(1, 2);
|
||||
|
||||
MetisIndex mi(sfg);
|
||||
MetisIndex mi(symbolicGraph);
|
||||
|
||||
vector<int> xadjExpected, adjExpected;
|
||||
xadjExpected += 0, 1, 3, 4;
|
||||
|
@ -228,7 +256,7 @@ TEST(Ordering, metis) {
|
|||
EXPECT(adjExpected.size() == mi.adj().size());
|
||||
EXPECT(adjExpected == mi.adj());
|
||||
|
||||
Ordering metis = Ordering::Metis(sfg);
|
||||
Ordering metis = Ordering::Metis(symbolicGraph);
|
||||
}
|
||||
#endif
|
||||
/* ************************************************************************* */
|
||||
|
@ -236,15 +264,15 @@ TEST(Ordering, metis) {
|
|||
TEST(Ordering, MetisLoop) {
|
||||
|
||||
// create linear graph
|
||||
SymbolicFactorGraph sfg = example::symbolicChain();
|
||||
SymbolicFactorGraph symbolicGraph = example::symbolicChain();
|
||||
|
||||
// add loop closure
|
||||
sfg.push_factor(0, 5);
|
||||
symbolicGraph.push_factor(0, 5);
|
||||
|
||||
// METIS
|
||||
#if !defined(__APPLE__)
|
||||
{
|
||||
Ordering actual = Ordering::Create(Ordering::METIS, sfg);
|
||||
Ordering actual = Ordering::Create(Ordering::METIS, symbolicGraph);
|
||||
// - P( 0 4 1)
|
||||
// | - P( 2 | 4 1)
|
||||
// | | - P( 3 | 4 2)
|
||||
|
@ -254,7 +282,7 @@ TEST(Ordering, MetisLoop) {
|
|||
}
|
||||
#else
|
||||
{
|
||||
Ordering actual = Ordering::Create(Ordering::METIS, sfg);
|
||||
Ordering actual = Ordering::Create(Ordering::METIS, symbolicGraph);
|
||||
// - P( 1 0 3)
|
||||
// | - P( 4 | 0 3)
|
||||
// | | - P( 5 | 0 4)
|
||||
|
@ -269,7 +297,7 @@ TEST(Ordering, MetisLoop) {
|
|||
TEST(Ordering, Create) {
|
||||
|
||||
// create chain graph
|
||||
SymbolicFactorGraph sfg = example::symbolicChain();
|
||||
SymbolicFactorGraph symbolicGraph = example::symbolicChain();
|
||||
|
||||
// COLAMD
|
||||
{
|
||||
|
@ -278,7 +306,7 @@ TEST(Ordering, Create) {
|
|||
//| | - P( 2 | 3)
|
||||
//| | | - P( 1 | 2)
|
||||
//| | | | - P( 0 | 1)
|
||||
Ordering actual = Ordering::Create(Ordering::COLAMD, sfg);
|
||||
Ordering actual = Ordering::Create(Ordering::COLAMD, symbolicGraph);
|
||||
Ordering expected = Ordering(list_of(0)(1)(2)(3)(4)(5));
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
}
|
||||
|
@ -286,7 +314,7 @@ TEST(Ordering, Create) {
|
|||
#ifdef GTSAM_SUPPORT_NESTED_DISSECTION
|
||||
// METIS
|
||||
{
|
||||
Ordering actual = Ordering::Create(Ordering::METIS, sfg);
|
||||
Ordering actual = Ordering::Create(Ordering::METIS, symbolicGraph);
|
||||
//- P( 1 0 2)
|
||||
//| - P( 3 4 | 2)
|
||||
//| | - P( 5 | 4)
|
||||
|
@ -296,7 +324,7 @@ TEST(Ordering, Create) {
|
|||
#endif
|
||||
|
||||
// CUSTOM
|
||||
CHECK_EXCEPTION(Ordering::Create(Ordering::CUSTOM, sfg), runtime_error);
|
||||
CHECK_EXCEPTION(Ordering::Create(Ordering::CUSTOM, symbolicGraph), runtime_error);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -73,12 +73,12 @@ struct BinaryJacobianFactor: JacobianFactor {
|
|||
Eigen::Block<const Matrix, M, 1> b(Ab, 0, N1 + N2);
|
||||
|
||||
// We perform I += A'*A to the upper triangle
|
||||
(*info)(slot1, slot1).selfadjointView().rankUpdate(A1.transpose());
|
||||
(*info)(slot1, slot2).knownOffDiagonal() += A1.transpose() * A2;
|
||||
(*info)(slot1, slotB).knownOffDiagonal() += A1.transpose() * b;
|
||||
(*info)(slot2, slot2).selfadjointView().rankUpdate(A2.transpose());
|
||||
(*info)(slot2, slotB).knownOffDiagonal() += A2.transpose() * b;
|
||||
(*info)(slotB, slotB)(0, 0) += b.transpose() * b;
|
||||
info->diagonalBlock(slot1).rankUpdate(A1.transpose());
|
||||
info->updateOffDiagonalBlock(slot1, slot2, A1.transpose() * A2);
|
||||
info->updateOffDiagonalBlock(slot1, slotB, A1.transpose() * b);
|
||||
info->diagonalBlock(slot2).rankUpdate(A2.transpose());
|
||||
info->updateOffDiagonalBlock(slot2, slotB, A2.transpose() * b);
|
||||
info->updateDiagonalBlock(slotB, b.transpose() * b);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
|
|
@ -48,7 +48,7 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
GaussianFactorGraph::Keys GaussianFactorGraph::keys() const {
|
||||
KeySet keys;
|
||||
for(const sharedFactor& factor: *this)
|
||||
for (const sharedFactor& factor: *this)
|
||||
if (factor)
|
||||
keys.insert(factor->begin(), factor->end());
|
||||
return keys;
|
||||
|
@ -57,8 +57,8 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
std::map<Key, size_t> GaussianFactorGraph::getKeyDimMap() const {
|
||||
map<Key, size_t> spec;
|
||||
for ( const GaussianFactor::shared_ptr &gf: *this ) {
|
||||
for ( GaussianFactor::const_iterator it = gf->begin() ; it != gf->end() ; it++ ) {
|
||||
for (const GaussianFactor::shared_ptr& gf : *this) {
|
||||
for (GaussianFactor::const_iterator it = gf->begin(); it != gf->end(); it++) {
|
||||
map<Key,size_t>::iterator it2 = spec.find(*it);
|
||||
if ( it2 == spec.end() ) {
|
||||
spec.insert(make_pair(*it, gf->getDim(it)));
|
||||
|
@ -78,7 +78,7 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
GaussianFactorGraph GaussianFactorGraph::clone() const {
|
||||
GaussianFactorGraph result;
|
||||
for(const sharedFactor& f: *this) {
|
||||
for (const sharedFactor& f : *this) {
|
||||
if (f)
|
||||
result.push_back(f->clone());
|
||||
else
|
||||
|
@ -90,9 +90,9 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
GaussianFactorGraph GaussianFactorGraph::negate() const {
|
||||
GaussianFactorGraph result;
|
||||
for(const sharedFactor& f: *this) {
|
||||
if (f)
|
||||
result.push_back(f->negate());
|
||||
for (const sharedFactor& factor: *this) {
|
||||
if (factor)
|
||||
result.push_back(factor->negate());
|
||||
else
|
||||
result.push_back(sharedFactor()); // Passes on null factors so indices remain valid
|
||||
}
|
||||
|
@ -104,8 +104,9 @@ namespace gtsam {
|
|||
// First find dimensions of each variable
|
||||
typedef std::map<Key, size_t> KeySizeMap;
|
||||
KeySizeMap dims;
|
||||
for(const sharedFactor& factor: *this) {
|
||||
if (!static_cast<bool>(factor)) continue;
|
||||
for (const sharedFactor& factor : *this) {
|
||||
if (!static_cast<bool>(factor))
|
||||
continue;
|
||||
|
||||
for (GaussianFactor::const_iterator key = factor->begin();
|
||||
key != factor->end(); ++key) {
|
||||
|
@ -116,7 +117,7 @@ namespace gtsam {
|
|||
// Compute first scalar column of each variable
|
||||
size_t currentColIndex = 0;
|
||||
KeySizeMap columnIndices = dims;
|
||||
for(const KeySizeMap::value_type& col: dims) {
|
||||
for (const KeySizeMap::value_type& col : dims) {
|
||||
columnIndices[col.first] = currentColIndex;
|
||||
currentColIndex += dims[col.first];
|
||||
}
|
||||
|
@ -125,7 +126,7 @@ namespace gtsam {
|
|||
typedef boost::tuple<size_t, size_t, double> triplet;
|
||||
vector<triplet> entries;
|
||||
size_t row = 0;
|
||||
for(const sharedFactor& factor: *this) {
|
||||
for (const sharedFactor& factor : *this) {
|
||||
if (!static_cast<bool>(factor)) continue;
|
||||
|
||||
// Convert to JacobianFactor if necessary
|
||||
|
@ -209,25 +210,21 @@ namespace gtsam {
|
|||
// combine all factors and get upper-triangular part of Hessian
|
||||
Scatter scatter(*this, optionalOrdering);
|
||||
HessianFactor combined(*this, scatter);
|
||||
Matrix result = combined.info();
|
||||
// Fill in lower-triangular part of Hessian
|
||||
result.triangularView<Eigen::StrictlyLower>() = result.transpose();
|
||||
return result;
|
||||
return combined.info().selfadjointView();;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
pair<Matrix, Vector> GaussianFactorGraph::hessian(
|
||||
boost::optional<const Ordering&> optionalOrdering) const {
|
||||
Matrix augmented = augmentedHessian(optionalOrdering);
|
||||
return make_pair(
|
||||
augmented.topLeftCorner(augmented.rows() - 1, augmented.rows() - 1),
|
||||
augmented.col(augmented.rows() - 1).head(augmented.rows() - 1));
|
||||
size_t n = augmented.rows() - 1;
|
||||
return make_pair(augmented.topLeftCorner(n, n), augmented.topRightCorner(n, 1));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues GaussianFactorGraph::hessianDiagonal() const {
|
||||
VectorValues d;
|
||||
for(const sharedFactor& factor: *this) {
|
||||
for (const sharedFactor& factor : *this) {
|
||||
if(factor){
|
||||
VectorValues di = factor->hessianDiagonal();
|
||||
d.addInPlace_(di);
|
||||
|
@ -239,11 +236,11 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
map<Key,Matrix> GaussianFactorGraph::hessianBlockDiagonal() const {
|
||||
map<Key,Matrix> blocks;
|
||||
for(const sharedFactor& factor: *this) {
|
||||
for (const sharedFactor& factor : *this) {
|
||||
if (!factor) continue;
|
||||
map<Key,Matrix> BD = factor->hessianBlockDiagonal();
|
||||
map<Key,Matrix>::const_iterator it = BD.begin();
|
||||
for(;it!=BD.end();it++) {
|
||||
for (;it!=BD.end();++it) {
|
||||
Key j = it->first; // variable key for this block
|
||||
const Matrix& Bj = it->second;
|
||||
if (blocks.count(j))
|
||||
|
@ -262,6 +259,30 @@ namespace gtsam {
|
|||
return BaseEliminateable::eliminateMultifrontal(ordering, function)->optimize();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// TODO(frank): can we cache memory across invocations
|
||||
VectorValues GaussianFactorGraph::optimizeDensely() const {
|
||||
gttic(GaussianFactorGraph_optimizeDensely);
|
||||
|
||||
// Combine all factors in a single HessianFactor (as done in augmentedHessian)
|
||||
Scatter scatter(*this);
|
||||
HessianFactor combined(*this, scatter);
|
||||
|
||||
// TODO(frank): cast to large dynamic matrix :-(
|
||||
// NOTE(frank): info only valid (I think) in upper triangle. No problem for LLT...
|
||||
Matrix augmented = combined.info().selfadjointView();
|
||||
|
||||
// Do Cholesky Factorization
|
||||
size_t n = augmented.rows() - 1;
|
||||
auto AtA = augmented.topLeftCorner(n, n);
|
||||
auto eta = augmented.topRightCorner(n, 1);
|
||||
Eigen::LLT<Matrix, Eigen::Upper> llt(AtA);
|
||||
|
||||
// Solve and convert, re-using scatter data structure
|
||||
Vector solution = llt.solve(eta);
|
||||
return VectorValues(solution, scatter);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
namespace {
|
||||
JacobianFactor::shared_ptr convertToJacobianFactorPtr(const GaussianFactor::shared_ptr &gf) {
|
||||
|
@ -277,8 +298,8 @@ namespace gtsam {
|
|||
VectorValues GaussianFactorGraph::gradient(const VectorValues& x0) const
|
||||
{
|
||||
VectorValues g = VectorValues::Zero(x0);
|
||||
for(const sharedFactor& Ai_G: *this) {
|
||||
JacobianFactor::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
|
||||
for (const sharedFactor& factor: *this) {
|
||||
JacobianFactor::shared_ptr Ai = convertToJacobianFactorPtr(factor);
|
||||
Vector e = Ai->error_vector(x0);
|
||||
Ai->transposeMultiplyAdd(1.0, e, g);
|
||||
}
|
||||
|
@ -289,7 +310,7 @@ namespace gtsam {
|
|||
VectorValues GaussianFactorGraph::gradientAtZero() const {
|
||||
// Zero-out the gradient
|
||||
VectorValues g;
|
||||
for(const sharedFactor& factor: *this) {
|
||||
for (const sharedFactor& factor: *this) {
|
||||
if (!factor) continue;
|
||||
VectorValues gi = factor->gradientAtZero();
|
||||
g.addInPlace_(gi);
|
||||
|
@ -329,8 +350,8 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
Errors GaussianFactorGraph::operator*(const VectorValues& x) const {
|
||||
Errors e;
|
||||
for(const GaussianFactor::shared_ptr& Ai_G: *this) {
|
||||
JacobianFactor::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
|
||||
for (const GaussianFactor::shared_ptr& factor: *this) {
|
||||
JacobianFactor::shared_ptr Ai = convertToJacobianFactorPtr(factor);
|
||||
e.push_back((*Ai) * x);
|
||||
}
|
||||
return e;
|
||||
|
@ -339,7 +360,7 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
void GaussianFactorGraph::multiplyHessianAdd(double alpha,
|
||||
const VectorValues& x, VectorValues& y) const {
|
||||
for(const GaussianFactor::shared_ptr& f: *this)
|
||||
for (const GaussianFactor::shared_ptr& f: *this)
|
||||
f->multiplyHessianAdd(alpha, x, y);
|
||||
}
|
||||
|
||||
|
@ -351,8 +372,8 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
void GaussianFactorGraph::multiplyInPlace(const VectorValues& x, const Errors::iterator& e) const {
|
||||
Errors::iterator ei = e;
|
||||
for(const GaussianFactor::shared_ptr& Ai_G: *this) {
|
||||
JacobianFactor::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
|
||||
for (const GaussianFactor::shared_ptr& factor: *this) {
|
||||
JacobianFactor::shared_ptr Ai = convertToJacobianFactorPtr(factor);
|
||||
*ei = (*Ai)*x;
|
||||
ei++;
|
||||
}
|
||||
|
@ -361,7 +382,7 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
bool hasConstraints(const GaussianFactorGraph& factors) {
|
||||
typedef JacobianFactor J;
|
||||
for(const GaussianFactor::shared_ptr& factor: factors) {
|
||||
for (const GaussianFactor::shared_ptr& factor: factors) {
|
||||
J::shared_ptr jacobian(boost::dynamic_pointer_cast<J>(factor));
|
||||
if (jacobian && jacobian->get_model() && jacobian->get_model()->isConstrained()) {
|
||||
return true;
|
||||
|
@ -376,8 +397,8 @@ namespace gtsam {
|
|||
VectorValues& x) const {
|
||||
// For each factor add the gradient contribution
|
||||
Errors::const_iterator ei = e.begin();
|
||||
for(const sharedFactor& Ai_G: *this) {
|
||||
JacobianFactor::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
|
||||
for (const sharedFactor& factor: *this) {
|
||||
JacobianFactor::shared_ptr Ai = convertToJacobianFactorPtr(factor);
|
||||
Ai->transposeMultiplyAdd(alpha, *(ei++), x);
|
||||
}
|
||||
}
|
||||
|
@ -385,8 +406,8 @@ namespace gtsam {
|
|||
///* ************************************************************************* */
|
||||
//void residual(const GaussianFactorGraph& fg, const VectorValues &x, VectorValues &r) {
|
||||
// Key i = 0 ;
|
||||
// for(const GaussianFactor::shared_ptr& Ai_G: fg) {
|
||||
// JacobianFactor::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
|
||||
// for (const GaussianFactor::shared_ptr& factor, fg) {
|
||||
// JacobianFactor::shared_ptr Ai = convertToJacobianFactorPtr(factor);
|
||||
// r[i] = Ai->getb();
|
||||
// i++;
|
||||
// }
|
||||
|
@ -399,10 +420,10 @@ namespace gtsam {
|
|||
//void multiply(const GaussianFactorGraph& fg, const VectorValues &x, VectorValues &r) {
|
||||
// r.setZero();
|
||||
// Key i = 0;
|
||||
// for(const GaussianFactor::shared_ptr& Ai_G: fg) {
|
||||
// JacobianFactor::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
|
||||
// for (const GaussianFactor::shared_ptr& factor, fg) {
|
||||
// JacobianFactor::shared_ptr Ai = convertToJacobianFactorPtr(factor);
|
||||
// Vector &y = r[i];
|
||||
// for(JacobianFactor::const_iterator j = Ai->begin(); j != Ai->end(); ++j) {
|
||||
// for (JacobianFactor::const_iterator j = Ai->begin(); j != Ai->end(); ++j) {
|
||||
// y += Ai->getA(j) * x[*j];
|
||||
// }
|
||||
// ++i;
|
||||
|
@ -414,9 +435,9 @@ namespace gtsam {
|
|||
{
|
||||
VectorValues x;
|
||||
Errors::const_iterator ei = e.begin();
|
||||
for(const sharedFactor& Ai_G: *this) {
|
||||
JacobianFactor::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
|
||||
for(JacobianFactor::const_iterator j = Ai->begin(); j != Ai->end(); ++j) {
|
||||
for (const sharedFactor& factor: *this) {
|
||||
JacobianFactor::shared_ptr Ai = convertToJacobianFactorPtr(factor);
|
||||
for (JacobianFactor::const_iterator j = Ai->begin(); j != Ai->end(); ++j) {
|
||||
// Create the value as a zero vector if it does not exist.
|
||||
pair<VectorValues::iterator, bool> xi = x.tryInsert(*j, Vector());
|
||||
if(xi.second)
|
||||
|
@ -432,8 +453,8 @@ namespace gtsam {
|
|||
Errors GaussianFactorGraph::gaussianErrors(const VectorValues& x) const
|
||||
{
|
||||
Errors e;
|
||||
for(const sharedFactor& Ai_G: *this) {
|
||||
JacobianFactor::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
|
||||
for (const sharedFactor& factor: *this) {
|
||||
JacobianFactor::shared_ptr Ai = convertToJacobianFactorPtr(factor);
|
||||
e.push_back(Ai->error_vector(x));
|
||||
}
|
||||
return e;
|
||||
|
|
|
@ -246,6 +246,11 @@ namespace gtsam {
|
|||
VectorValues optimize(OptionalOrdering ordering = boost::none,
|
||||
const Eliminate& function = EliminationTraitsType::DefaultEliminate) const;
|
||||
|
||||
/**
|
||||
* Optimize using Eigen's dense Cholesky factorization
|
||||
*/
|
||||
VectorValues optimizeDensely() const;
|
||||
|
||||
/**
|
||||
* Compute the gradient of the energy function,
|
||||
* \f$ \nabla_{x=x_0} \left\Vert \Sigma^{-1} A x - b \right\Vert^2 \f$,
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
namespace gtsam {
|
||||
|
||||
// Instantiate base classes
|
||||
template class ClusterTree<GaussianBayesTree, GaussianFactorGraph>;
|
||||
template class EliminatableClusterTree<GaussianBayesTree, GaussianFactorGraph>;
|
||||
template class JunctionTree<GaussianBayesTree, GaussianFactorGraph>;
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
@ -26,21 +26,9 @@ namespace gtsam {
|
|||
class GaussianEliminationTree;
|
||||
|
||||
/**
|
||||
* A ClusterTree, i.e., a set of variable clusters with factors, arranged in a tree, with
|
||||
* the additional property that it represents the clique tree associated with a Bayes net.
|
||||
*
|
||||
* In GTSAM a junction tree is an intermediate data structure in multifrontal
|
||||
* variable elimination. Each node is a cluster of factors, along with a
|
||||
* clique of variables that are eliminated all at once. In detail, every node k represents
|
||||
* a clique (maximal fully connected subset) of an associated chordal graph, such as a
|
||||
* chordal Bayes net resulting from elimination.
|
||||
*
|
||||
* The difference with the BayesTree is that a JunctionTree stores factors, whereas a
|
||||
* BayesTree stores conditionals, that are the product of eliminating the factors in the
|
||||
* corresponding JunctionTree cliques.
|
||||
*
|
||||
* The tree structure and elimination method are exactly analagous to the EliminationTree,
|
||||
* except that in the JunctionTree, at each node multiple variables are eliminated at a time.
|
||||
* A junction tree specialized to Gaussian factors, i.e., it is a cluster tree with Gaussian
|
||||
* factors stored in each cluster. It can be eliminated into a Gaussian Bayes tree with the same
|
||||
* structure, which is essentially doing multifrontal sparse matrix factorization.
|
||||
*
|
||||
* \addtogroup Multifrontal
|
||||
* \nosubgrouping
|
||||
|
@ -51,7 +39,7 @@ namespace gtsam {
|
|||
typedef JunctionTree<GaussianBayesTree, GaussianFactorGraph> Base; ///< Base class
|
||||
typedef GaussianJunctionTree This; ///< This class
|
||||
typedef boost::shared_ptr<This> shared_ptr; ///< Shared pointer to this class
|
||||
|
||||
|
||||
/**
|
||||
* Build the elimination tree of a factor graph using pre-computed column structure.
|
||||
* @param factorGraph The factor graph for which to build the elimination tree
|
||||
|
|
|
@ -29,10 +29,10 @@ namespace gtsam {
|
|||
if((DenseIndex)Base::keys_.size() != augmentedInformation.nBlocks() - 1)
|
||||
throw std::invalid_argument(
|
||||
"Error in HessianFactor constructor input. Number of provided keys plus\n"
|
||||
"one for the information vector must equal the number of provided matrix blocks.");
|
||||
"one for the information vector must equal the number of provided matrix blocks. ");
|
||||
|
||||
// Check RHS dimension
|
||||
if(augmentedInformation(0, augmentedInformation.nBlocks() - 1).cols() != 1)
|
||||
if(augmentedInformation.getDim(augmentedInformation.nBlocks() - 1) != 1)
|
||||
throw std::invalid_argument(
|
||||
"Error in HessianFactor constructor input. The last provided matrix block\n"
|
||||
"must be the information vector, but the last provided block had more than one column.");
|
||||
|
|
|
@ -57,10 +57,33 @@ using namespace boost::adaptors;
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
void HessianFactor::Allocate(const Scatter& scatter) {
|
||||
gttic(HessianFactor_Allocate);
|
||||
|
||||
// Allocate with dimensions for each variable plus 1 at the end for the information vector
|
||||
const size_t n = scatter.size();
|
||||
keys_.resize(n);
|
||||
FastVector<DenseIndex> dims(n + 1);
|
||||
DenseIndex slot = 0;
|
||||
for(const SlotEntry& slotentry: scatter) {
|
||||
keys_[slot] = slotentry.key;
|
||||
dims[slot] = slotentry.dimension;
|
||||
++slot;
|
||||
}
|
||||
dims.back() = 1;
|
||||
info_ = SymmetricBlockMatrix(dims);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
HessianFactor::HessianFactor(const Scatter& scatter) {
|
||||
Allocate(scatter);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
HessianFactor::HessianFactor() :
|
||||
info_(cref_list_of<1>(1)) {
|
||||
linearTerm().setZero();
|
||||
assert(info_.rows() == 1);
|
||||
constantTerm() = 0.0;
|
||||
}
|
||||
|
||||
|
@ -70,9 +93,9 @@ HessianFactor::HessianFactor(Key j, const Matrix& G, const Vector& g, double f)
|
|||
if (G.rows() != G.cols() || G.rows() != g.size())
|
||||
throw invalid_argument(
|
||||
"Attempting to construct HessianFactor with inconsistent matrix and/or vector dimensions");
|
||||
info_(0, 0) = G;
|
||||
info_(0, 1) = g;
|
||||
info_(1, 1)(0, 0) = f;
|
||||
info_.setDiagonalBlock(0, G);
|
||||
linearTerm() = g;
|
||||
constantTerm() = f;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -83,9 +106,9 @@ HessianFactor::HessianFactor(Key j, const Vector& mu, const Matrix& Sigma) :
|
|||
if (Sigma.rows() != Sigma.cols() || Sigma.rows() != mu.size())
|
||||
throw invalid_argument(
|
||||
"Attempting to construct HessianFactor with inconsistent matrix and/or vector dimensions");
|
||||
info_(0, 0) = Sigma.inverse(); // G
|
||||
info_(0, 1) = info_(0, 0).selfadjointView() * mu; // g
|
||||
info_(1, 1)(0, 0) = mu.dot(info_(0, 1).knownOffDiagonal().col(0)); // f
|
||||
info_.setDiagonalBlock(0, Sigma.inverse()); // G
|
||||
linearTerm() = info_.diagonalBlock(0) * mu; // g
|
||||
constantTerm() = mu.dot(linearTerm().col(0)); // f
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -94,12 +117,11 @@ HessianFactor::HessianFactor(Key j1, Key j2, const Matrix& G11,
|
|||
double f) :
|
||||
GaussianFactor(cref_list_of<2>(j1)(j2)), info_(
|
||||
cref_list_of<3>(G11.cols())(G22.cols())(1)) {
|
||||
info_(0, 0) = G11;
|
||||
info_(0, 1) = G12;
|
||||
info_(0, 2) = g1;
|
||||
info_(1, 1) = G22;
|
||||
info_(1, 2) = g2;
|
||||
info_(2, 2)(0, 0) = f;
|
||||
info_.setDiagonalBlock(0, G11);
|
||||
info_.setOffDiagonalBlock(0, 1, G12);
|
||||
info_.setDiagonalBlock(1, G22);
|
||||
linearTerm() << g1, g2;
|
||||
constantTerm() = f;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -115,16 +137,14 @@ HessianFactor::HessianFactor(Key j1, Key j2, Key j3, const Matrix& G11,
|
|||
|| G22.cols() != g2.size() || G33.cols() != g3.size())
|
||||
throw invalid_argument(
|
||||
"Inconsistent matrix and/or vector dimensions in HessianFactor constructor");
|
||||
info_(0, 0) = G11;
|
||||
info_(0, 1) = G12;
|
||||
info_(0, 2) = G13;
|
||||
info_(0, 3) = g1;
|
||||
info_(1, 1) = G22;
|
||||
info_(1, 2) = G23;
|
||||
info_(1, 3) = g2;
|
||||
info_(2, 2) = G33;
|
||||
info_(2, 3) = g3;
|
||||
info_(3, 3)(0, 0) = f;
|
||||
info_.setDiagonalBlock(0, G11);
|
||||
info_.setOffDiagonalBlock(0, 1, G12);
|
||||
info_.setOffDiagonalBlock(0, 2, G13);
|
||||
info_.setDiagonalBlock(1, G22);
|
||||
info_.setOffDiagonalBlock(1, 2, G23);
|
||||
info_.setDiagonalBlock(2, G33);
|
||||
linearTerm() << g1, g2, g3;
|
||||
constantTerm() = f;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -174,11 +194,16 @@ HessianFactor::HessianFactor(const std::vector<Key>& js,
|
|||
size_t index = 0;
|
||||
for (size_t i = 0; i < variable_count; ++i) {
|
||||
for (size_t j = i; j < variable_count; ++j) {
|
||||
info_(i, j) = Gs[index++];
|
||||
if (i == j) {
|
||||
info_.setDiagonalBlock(i, Gs[index]);
|
||||
} else {
|
||||
info_.setOffDiagonalBlock(i, j, Gs[index]);
|
||||
}
|
||||
index++;
|
||||
}
|
||||
info_(i, variable_count) = gs[i];
|
||||
info_.setOffDiagonalBlock(i, variable_count, gs[i]);
|
||||
}
|
||||
info_(variable_count, variable_count)(0, 0) = f;
|
||||
constantTerm() = f;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -186,17 +211,17 @@ namespace {
|
|||
void _FromJacobianHelper(const JacobianFactor& jf, SymmetricBlockMatrix& info) {
|
||||
gttic(HessianFactor_fromJacobian);
|
||||
const SharedDiagonal& jfModel = jf.get_model();
|
||||
auto A = jf.matrixObject().full();
|
||||
if (jfModel) {
|
||||
if (jf.get_model()->isConstrained())
|
||||
throw invalid_argument(
|
||||
"Cannot construct HessianFactor from JacobianFactor with constrained noise model");
|
||||
info.full().triangularView() =
|
||||
jf.matrixObject().full().transpose()
|
||||
* (jfModel->invsigmas().array() * jfModel->invsigmas().array()).matrix().asDiagonal()
|
||||
* jf.matrixObject().full();
|
||||
|
||||
auto D = (jfModel->invsigmas().array() * jfModel->invsigmas().array()).matrix().asDiagonal();
|
||||
|
||||
info.setFullMatrix(A.transpose() * D * A);
|
||||
} else {
|
||||
info.full().triangularView() = jf.matrixObject().full().transpose()
|
||||
* jf.matrixObject().full();
|
||||
info.setFullMatrix(A.transpose() * A);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -227,32 +252,13 @@ HessianFactor::HessianFactor(const GaussianFactor& gf) :
|
|||
HessianFactor::HessianFactor(const GaussianFactorGraph& factors,
|
||||
boost::optional<const Scatter&> scatter) {
|
||||
gttic(HessianFactor_MergeConstructor);
|
||||
boost::optional<Scatter> computedScatter;
|
||||
if (!scatter) {
|
||||
computedScatter = Scatter(factors);
|
||||
scatter = computedScatter;
|
||||
}
|
||||
|
||||
// Allocate and copy keys
|
||||
gttic(allocate);
|
||||
// Allocate with dimensions for each variable plus 1 at the end for the information vector
|
||||
const size_t n = scatter->size();
|
||||
keys_.resize(n);
|
||||
FastVector<DenseIndex> dims(n + 1);
|
||||
DenseIndex slot = 0;
|
||||
for(const SlotEntry& slotentry: *scatter) {
|
||||
keys_[slot] = slotentry.key;
|
||||
dims[slot] = slotentry.dimension;
|
||||
++slot;
|
||||
}
|
||||
dims.back() = 1;
|
||||
info_ = SymmetricBlockMatrix(dims);
|
||||
info_.full().triangularView().setZero();
|
||||
gttoc(allocate);
|
||||
Allocate(scatter ? *scatter : Scatter(factors));
|
||||
|
||||
// Form A' * A
|
||||
gttic(update);
|
||||
for(const GaussianFactor::shared_ptr& factor: factors)
|
||||
info_.setZero();
|
||||
for(const auto& factor: factors)
|
||||
if (factor)
|
||||
factor->updateHessian(keys_, &info_);
|
||||
gttoc(update);
|
||||
|
@ -266,7 +272,7 @@ void HessianFactor::print(const std::string& s,
|
|||
for (const_iterator key = begin(); key != end(); ++key)
|
||||
cout << formatter(*key) << "(" << getDim(key) << ") ";
|
||||
cout << "\n";
|
||||
gtsam::print(Matrix(info_.full().selfadjointView()),
|
||||
gtsam::print(Matrix(info_.selfadjointView()),
|
||||
"Augmented information matrix: ");
|
||||
}
|
||||
|
||||
|
@ -281,22 +287,25 @@ bool HessianFactor::equals(const GaussianFactor& lf, double tol) const {
|
|||
|
||||
/* ************************************************************************* */
|
||||
Matrix HessianFactor::augmentedInformation() const {
|
||||
return info_.full().selfadjointView();
|
||||
return info_.selfadjointView();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Eigen::SelfAdjointView<SymmetricBlockMatrix::constBlock, Eigen::Upper>
|
||||
HessianFactor::informationView() const {
|
||||
return info_.selfadjointView(0, size());
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Matrix HessianFactor::information() const {
|
||||
return info_.range(0, size(), 0, size()).selfadjointView();
|
||||
return informationView();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues HessianFactor::hessianDiagonal() const {
|
||||
VectorValues d;
|
||||
// Loop over all variables
|
||||
for (DenseIndex j = 0; j < (DenseIndex) size(); ++j) {
|
||||
// Get the diagonal block, and insert its diagonal
|
||||
Matrix B = info_(j, j).selfadjointView();
|
||||
d.insert(keys_[j], B.diagonal());
|
||||
for (DenseIndex j = 0; j < (DenseIndex)size(); ++j) {
|
||||
d.insert(keys_[j], info_.diagonal(j));
|
||||
}
|
||||
return d;
|
||||
}
|
||||
|
@ -314,8 +323,7 @@ map<Key, Matrix> HessianFactor::hessianBlockDiagonal() const {
|
|||
// Loop over all variables
|
||||
for (DenseIndex j = 0; j < (DenseIndex) size(); ++j) {
|
||||
// Get the diagonal block, and insert it
|
||||
Matrix B = info_(j, j).selfadjointView();
|
||||
blocks.insert(make_pair(keys_[j], B));
|
||||
blocks.emplace(keys_[j], info_.diagonalBlock(j));
|
||||
}
|
||||
return blocks;
|
||||
}
|
||||
|
@ -334,28 +342,44 @@ std::pair<Matrix, Vector> HessianFactor::jacobian() const {
|
|||
double HessianFactor::error(const VectorValues& c) const {
|
||||
// error 0.5*(f - 2*x'*g + x'*G*x)
|
||||
const double f = constantTerm();
|
||||
if (empty()) {
|
||||
return 0.5 * f;
|
||||
}
|
||||
double xtg = 0, xGx = 0;
|
||||
// extract the relevant subset of the VectorValues
|
||||
// NOTE may not be as efficient
|
||||
const Vector x = c.vector(keys());
|
||||
xtg = x.dot(linearTerm());
|
||||
xGx = x.transpose() * info_.range(0, size(), 0, size()).selfadjointView() * x;
|
||||
xtg = x.dot(linearTerm().col(0));
|
||||
auto AtA = informationView();
|
||||
xGx = x.transpose() * AtA * x;
|
||||
return 0.5 * (f - 2.0 * xtg + xGx);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void HessianFactor::updateHessian(const FastVector<Key>& infoKeys,
|
||||
SymmetricBlockMatrix* info) const {
|
||||
SymmetricBlockMatrix* info) const {
|
||||
gttic(updateHessian_HessianFactor);
|
||||
assert(info);
|
||||
// Apply updates to the upper triangle
|
||||
DenseIndex n = size(), N = info->nBlocks() - 1;
|
||||
vector<DenseIndex> slots(n + 1);
|
||||
for (DenseIndex j = 0; j <= n; ++j) {
|
||||
const DenseIndex J = (j == n) ? N : Slot(infoKeys, keys_[j]);
|
||||
DenseIndex nrVariablesInThisFactor = size(), nrBlocksInInfo = info->nBlocks() - 1;
|
||||
vector<DenseIndex> slots(nrVariablesInThisFactor + 1);
|
||||
// Loop over this factor's blocks with indices (i,j)
|
||||
// For every block (i,j), we determine the block (I,J) in info.
|
||||
for (DenseIndex j = 0; j <= nrVariablesInThisFactor; ++j) {
|
||||
const bool rhs = (j == nrVariablesInThisFactor);
|
||||
const DenseIndex J = rhs ? nrBlocksInInfo : Slot(infoKeys, keys_[j]);
|
||||
slots[j] = J;
|
||||
for (DenseIndex i = 0; i <= j; ++i) {
|
||||
const DenseIndex I = slots[i]; // because i<=j, slots[i] is valid.
|
||||
(*info)(I, J) += info_(i, j);
|
||||
const DenseIndex I = slots[i]; // because i<=j, slots[i] is valid.
|
||||
|
||||
if (i == j) {
|
||||
assert(I == J);
|
||||
info->updateDiagonalBlock(I, info_.diagonalBlock(i));
|
||||
} else {
|
||||
assert(i < j);
|
||||
assert(I != J);
|
||||
info->updateOffDiagonalBlock(I, J, info_.aboveDiagonalBlock(i, j));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -363,8 +387,8 @@ void HessianFactor::updateHessian(const FastVector<Key>& infoKeys,
|
|||
/* ************************************************************************* */
|
||||
GaussianFactor::shared_ptr HessianFactor::negate() const {
|
||||
shared_ptr result = boost::make_shared<This>(*this);
|
||||
result->info_.full().triangularView() =
|
||||
-result->info_.full().triangularView().nestedExpression(); // Negate the information matrix of the result
|
||||
// Negate the information matrix of the result
|
||||
result->info_.negate();
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -386,12 +410,13 @@ void HessianFactor::multiplyHessianAdd(double alpha, const VectorValues& x,
|
|||
Vector xj = x.at(keys_[j]);
|
||||
DenseIndex i = 0;
|
||||
for (; i < j; ++i)
|
||||
y[i] += info_(i, j).knownOffDiagonal() * xj;
|
||||
y[i] += info_.aboveDiagonalBlock(i, j) * xj;
|
||||
|
||||
// blocks on the diagonal are only half
|
||||
y[i] += info_(j, j).selfadjointView() * xj;
|
||||
y[i] += info_.diagonalBlock(j) * xj;
|
||||
// for below diagonal, we take transpose block from upper triangular part
|
||||
for (i = j + 1; i < (DenseIndex) size(); ++i)
|
||||
y[i] += info_(i, j).knownOffDiagonal() * xj;
|
||||
y[i] += info_.aboveDiagonalBlock(j, i).transpose() * xj;
|
||||
}
|
||||
|
||||
// copy to yvalues
|
||||
|
@ -411,7 +436,7 @@ VectorValues HessianFactor::gradientAtZero() const {
|
|||
VectorValues g;
|
||||
size_t n = size();
|
||||
for (size_t j = 0; j < n; ++j)
|
||||
g.insert(keys_[j], -info_(j, n).knownOffDiagonal());
|
||||
g.insert(keys_[j], -info_.aboveDiagonalBlock(j, n));
|
||||
return g;
|
||||
}
|
||||
|
||||
|
@ -424,31 +449,80 @@ void HessianFactor::gradientAtZero(double* d) const {
|
|||
|
||||
/* ************************************************************************* */
|
||||
Vector HessianFactor::gradient(Key key, const VectorValues& x) const {
|
||||
Factor::const_iterator i = find(key);
|
||||
const Factor::const_iterator it_i = find(key);
|
||||
const DenseIndex I = std::distance(begin(), it_i);
|
||||
|
||||
// Sum over G_ij*xj for all xj connecting to xi
|
||||
Vector b = Vector::Zero(x.at(key).size());
|
||||
for (Factor::const_iterator j = begin(); j != end(); ++j) {
|
||||
for (Factor::const_iterator it_j = begin(); it_j != end(); ++it_j) {
|
||||
const DenseIndex J = std::distance(begin(), it_j);
|
||||
|
||||
// Obtain Gij from the Hessian factor
|
||||
// Hessian factor only stores an upper triangular matrix, so be careful when i>j
|
||||
Matrix Gij;
|
||||
if (i > j) {
|
||||
Matrix Gji = info(j, i);
|
||||
Gij = Gji.transpose();
|
||||
} else {
|
||||
Gij = info(i, j);
|
||||
}
|
||||
const Matrix Gij = info_.block(I, J);
|
||||
// Accumulate Gij*xj to gradf
|
||||
b += Gij * x.at(*j);
|
||||
b += Gij * x.at(*it_j);
|
||||
}
|
||||
// Subtract the linear term gi
|
||||
b += -linearTerm(i);
|
||||
b += -linearTerm(it_i);
|
||||
return b;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
std::pair<boost::shared_ptr<GaussianConditional>,
|
||||
boost::shared_ptr<HessianFactor> > EliminateCholesky(
|
||||
const GaussianFactorGraph& factors, const Ordering& keys) {
|
||||
boost::shared_ptr<GaussianConditional> HessianFactor::eliminateCholesky(const Ordering& keys) {
|
||||
gttic(HessianFactor_eliminateCholesky);
|
||||
|
||||
GaussianConditional::shared_ptr conditional;
|
||||
|
||||
try {
|
||||
// Do dense elimination
|
||||
size_t nFrontals = keys.size();
|
||||
assert(nFrontals <= size());
|
||||
info_.choleskyPartial(nFrontals);
|
||||
|
||||
// TODO(frank): pre-allocate GaussianConditional and write into it
|
||||
const VerticalBlockMatrix Ab = info_.split(nFrontals);
|
||||
conditional = boost::make_shared<GaussianConditional>(keys_, nFrontals, Ab);
|
||||
|
||||
// Erase the eliminated keys in this factor
|
||||
keys_.erase(begin(), begin() + nFrontals);
|
||||
} catch (const CholeskyFailed& e) {
|
||||
throw IndeterminantLinearSystemException(keys.front());
|
||||
}
|
||||
|
||||
// Return result
|
||||
return conditional;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues HessianFactor::solve() {
|
||||
gttic(HessianFactor_solve);
|
||||
|
||||
// Do Cholesky Factorization
|
||||
const size_t n = size();
|
||||
assert(info_.nBlocks() == n + 1);
|
||||
info_.choleskyPartial(n);
|
||||
auto R = info_.triangularView(0, n);
|
||||
auto eta = linearTerm();
|
||||
|
||||
// Solve
|
||||
const Vector solution = R.solve(eta);
|
||||
|
||||
// Parse into VectorValues
|
||||
VectorValues delta;
|
||||
std::size_t offset = 0;
|
||||
for (DenseIndex j = 0; j < (DenseIndex)n; ++j) {
|
||||
const DenseIndex dim = info_.getDim(j);
|
||||
delta.insert(keys_[j], solution.segment(offset, dim));
|
||||
offset += dim;
|
||||
}
|
||||
|
||||
return delta;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
std::pair<boost::shared_ptr<GaussianConditional>, boost::shared_ptr<HessianFactor> >
|
||||
EliminateCholesky(const GaussianFactorGraph& factors, const Ordering& keys) {
|
||||
gttic(EliminateCholesky);
|
||||
|
||||
// Build joint factor
|
||||
|
@ -459,23 +533,11 @@ std::pair<boost::shared_ptr<GaussianConditional>,
|
|||
} catch (std::invalid_argument&) {
|
||||
throw InvalidDenseElimination(
|
||||
"EliminateCholesky was called with a request to eliminate variables that are not\n"
|
||||
"involved in the provided factors.");
|
||||
"involved in the provided factors.");
|
||||
}
|
||||
|
||||
// Do dense elimination
|
||||
GaussianConditional::shared_ptr conditional;
|
||||
try {
|
||||
size_t numberOfKeysToEliminate = keys.size();
|
||||
VerticalBlockMatrix Ab = jointFactor->info_.choleskyPartial(
|
||||
numberOfKeysToEliminate);
|
||||
conditional = boost::make_shared<GaussianConditional>(jointFactor->keys(),
|
||||
numberOfKeysToEliminate, Ab);
|
||||
// Erase the eliminated keys in the remaining factor
|
||||
jointFactor->keys_.erase(jointFactor->begin(),
|
||||
jointFactor->begin() + numberOfKeysToEliminate);
|
||||
} catch (const CholeskyFailed& e) {
|
||||
throw IndeterminantLinearSystemException(keys.front());
|
||||
}
|
||||
auto conditional = jointFactor->eliminateCholesky(keys);
|
||||
|
||||
// Return result
|
||||
return make_pair(conditional, jointFactor);
|
||||
|
|
|
@ -35,12 +35,6 @@ namespace gtsam {
|
|||
class GaussianBayesNet;
|
||||
class GaussianFactorGraph;
|
||||
|
||||
GTSAM_EXPORT std::pair<boost::shared_ptr<GaussianConditional>, boost::shared_ptr<GaussianFactor> >
|
||||
EliminatePreferCholesky(const GaussianFactorGraph& factors, const Ordering& keys);
|
||||
|
||||
GTSAM_EXPORT std::pair<boost::shared_ptr<GaussianConditional>, boost::shared_ptr<HessianFactor> >
|
||||
EliminateCholesky(const GaussianFactorGraph& factors, const Ordering& keys);
|
||||
|
||||
/**
|
||||
* @brief A Gaussian factor using the canonical parameters (information form)
|
||||
*
|
||||
|
@ -206,7 +200,9 @@ namespace gtsam {
|
|||
* @param variable An iterator pointing to the slot in this factor. You can
|
||||
* use, for example, begin() + 2 to get the 3rd variable in this factor.
|
||||
*/
|
||||
virtual DenseIndex getDim(const_iterator variable) const { return info_(variable-this->begin(), 0).rows(); }
|
||||
virtual DenseIndex getDim(const_iterator variable) const {
|
||||
return info_.getDim(std::distance(begin(), variable));
|
||||
}
|
||||
|
||||
/** Return the number of columns and rows of the Hessian matrix, including the information vector. */
|
||||
size_t rows() const { return info_.rows(); }
|
||||
|
@ -217,80 +213,54 @@ namespace gtsam {
|
|||
* @return a HessianFactor with negated Hessian matrices
|
||||
*/
|
||||
virtual GaussianFactor::shared_ptr negate() const;
|
||||
|
||||
|
||||
/** Check if the factor is empty. TODO: How should this be defined? */
|
||||
virtual bool empty() const { return size() == 0 /*|| rows() == 0*/; }
|
||||
|
||||
/** Return a view of the block at (j1,j2) of the <em>upper-triangular part</em> of the
|
||||
* information matrix \f$ H \f$, no data is copied. See HessianFactor class documentation
|
||||
* above to explain that only the upper-triangular part of the information matrix is stored
|
||||
* and returned by this function.
|
||||
* @param j1 Which block row to get, as an iterator pointing to the slot in this factor. You can
|
||||
* use, for example, begin() + 2 to get the 3rd variable in this factor.
|
||||
* @param j2 Which block column to get, as an iterator pointing to the slot in this factor. You can
|
||||
* use, for example, begin() + 2 to get the 3rd variable in this factor.
|
||||
* @return A view of the requested block, not a copy.
|
||||
/** Return the constant term \f$ f \f$ as described above
|
||||
* @return The constant term \f$ f \f$
|
||||
*/
|
||||
constBlock info(const_iterator j1, const_iterator j2) const { return info_(j1-begin(), j2-begin()); }
|
||||
|
||||
/** Return a view of the block at (j1,j2) of the <em>upper-triangular part</em> of the
|
||||
* information matrix \f$ H \f$, no data is copied. See HessianFactor class documentation
|
||||
* above to explain that only the upper-triangular part of the information matrix is stored
|
||||
* and returned by this function.
|
||||
* @param j1 Which block row to get, as an iterator pointing to the slot in this factor. You can
|
||||
* use, for example, begin() + 2 to get the 3rd variable in this factor.
|
||||
* @param j2 Which block column to get, as an iterator pointing to the slot in this factor. You can
|
||||
* use, for example, begin() + 2 to get the 3rd variable in this factor.
|
||||
* @return A view of the requested block, not a copy.
|
||||
*/
|
||||
Block info(iterator j1, iterator j2) { return info_(j1-begin(), j2-begin()); }
|
||||
|
||||
/** Return the <em>upper-triangular part</em> of the full *augmented* information matrix,
|
||||
* as described above. See HessianFactor class documentation above to explain that only the
|
||||
* upper-triangular part of the information matrix is stored and returned by this function.
|
||||
*/
|
||||
SymmetricBlockMatrix::constBlock info() const { return info_.full(); }
|
||||
|
||||
/** Return the <em>upper-triangular part</em> of the full *augmented* information matrix,
|
||||
* as described above. See HessianFactor class documentation above to explain that only the
|
||||
* upper-triangular part of the information matrix is stored and returned by this function.
|
||||
*/
|
||||
SymmetricBlockMatrix::Block info() { return info_.full(); }
|
||||
double constantTerm() const {
|
||||
const auto view = info_.diagonalBlock(size());
|
||||
return view(0, 0);
|
||||
}
|
||||
|
||||
/** Return the constant term \f$ f \f$ as described above
|
||||
* @return The constant term \f$ f \f$
|
||||
*/
|
||||
double constantTerm() const { return info_(this->size(), this->size())(0,0); }
|
||||
|
||||
/** Return the constant term \f$ f \f$ as described above
|
||||
* @return The constant term \f$ f \f$
|
||||
*/
|
||||
double& constantTerm() { return info_(this->size(), this->size())(0,0); }
|
||||
double& constantTerm() { return info_.diagonalBlock(size())(0, 0); }
|
||||
|
||||
/** Return the part of linear term \f$ g \f$ as described above corresponding to the requested variable.
|
||||
* @param j Which block row to get, as an iterator pointing to the slot in this factor. You can
|
||||
* use, for example, begin() + 2 to get the 3rd variable in this factor.
|
||||
* @return The linear term \f$ g \f$ */
|
||||
constBlock::OffDiagonal::ColXpr linearTerm(const_iterator j) const {
|
||||
return info_(j-begin(), size()).knownOffDiagonal().col(0); }
|
||||
|
||||
/** Return the part of linear term \f$ g \f$ as described above corresponding to the requested variable.
|
||||
* @param j Which block row to get, as an iterator pointing to the slot in this factor. You can
|
||||
* use, for example, begin() + 2 to get the 3rd variable in this factor.
|
||||
* @return The linear term \f$ g \f$ */
|
||||
Block::OffDiagonal::ColXpr linearTerm(iterator j) {
|
||||
return info_(j-begin(), size()).knownOffDiagonal().col(0); }
|
||||
SymmetricBlockMatrix::constBlock linearTerm(const_iterator j) const {
|
||||
assert(!empty());
|
||||
return info_.aboveDiagonalBlock(j - begin(), size());
|
||||
}
|
||||
|
||||
/** Return the complete linear term \f$ g \f$ as described above.
|
||||
* @return The linear term \f$ g \f$ */
|
||||
constBlock::OffDiagonal::ColXpr linearTerm() const {
|
||||
return info_.range(0, this->size(), this->size(), this->size() + 1).knownOffDiagonal().col(0); }
|
||||
SymmetricBlockMatrix::constBlock linearTerm() const {
|
||||
assert(!empty());
|
||||
// get the last column (except the bottom right block)
|
||||
return info_.aboveDiagonalRange(0, size(), size(), size() + 1);
|
||||
}
|
||||
|
||||
/** Return the complete linear term \f$ g \f$ as described above.
|
||||
* @return The linear term \f$ g \f$ */
|
||||
Block::OffDiagonal::ColXpr linearTerm() {
|
||||
return info_.range(0, this->size(), this->size(), this->size() + 1).knownOffDiagonal().col(0); }
|
||||
|
||||
SymmetricBlockMatrix::Block linearTerm() {
|
||||
assert(!empty());
|
||||
return info_.aboveDiagonalRange(0, size(), size(), size() + 1);
|
||||
}
|
||||
|
||||
/// Return underlying information matrix.
|
||||
const SymmetricBlockMatrix& info() const { return info_; }
|
||||
|
||||
/// Return non-const information matrix.
|
||||
/// TODO(gareth): Review the sanity of having non-const access to this.
|
||||
SymmetricBlockMatrix& info() { return info_; }
|
||||
|
||||
/** Return the augmented information matrix represented by this GaussianFactor.
|
||||
* The augmented information matrix contains the information matrix with an
|
||||
* additional column holding the information vector, and an additional row
|
||||
|
@ -308,6 +278,9 @@ namespace gtsam {
|
|||
*/
|
||||
virtual Matrix augmentedInformation() const;
|
||||
|
||||
/// Return self-adjoint view onto the information matrix (NOT augmented).
|
||||
Eigen::SelfAdjointView<SymmetricBlockMatrix::constBlock, Eigen::Upper> informationView() const;
|
||||
|
||||
/** Return the non-augmented information matrix represented by this
|
||||
* GaussianFactor.
|
||||
*/
|
||||
|
@ -322,11 +295,7 @@ namespace gtsam {
|
|||
/// Return the block diagonal of the Hessian for this factor
|
||||
virtual std::map<Key,Matrix> hessianBlockDiagonal() const;
|
||||
|
||||
/**
|
||||
* Return (dense) matrix associated with factor
|
||||
* @param ordering of variables needed for matrix column order
|
||||
* @param set weight to true to bake in the weights
|
||||
*/
|
||||
/// Return (dense) matrix associated with factor
|
||||
virtual std::pair<Matrix, Vector> jacobian() const;
|
||||
|
||||
/**
|
||||
|
@ -336,16 +305,21 @@ namespace gtsam {
|
|||
*/
|
||||
virtual Matrix augmentedJacobian() const;
|
||||
|
||||
/** Return the full augmented Hessian matrix of this factor as a SymmetricBlockMatrix object. */
|
||||
const SymmetricBlockMatrix& matrixObject() const { return info_; }
|
||||
|
||||
/** Update an information matrix by adding the information corresponding to this factor
|
||||
* (used internally during elimination).
|
||||
* @param scatter A mapping from variable index to slot index in this HessianFactor
|
||||
* @param keys THe ordered vector of keys for the information matrix to be updated
|
||||
* @param info The information matrix to be updated
|
||||
*/
|
||||
void updateHessian(const FastVector<Key>& keys, SymmetricBlockMatrix* info) const;
|
||||
|
||||
/** Update another Hessian factor
|
||||
* @param other the HessianFactor to be updated
|
||||
*/
|
||||
void updateHessian(HessianFactor* other) const {
|
||||
assert(other);
|
||||
updateHessian(other->keys_, &other->info_);
|
||||
}
|
||||
|
||||
/** y += alpha * A'*A*x */
|
||||
void multiplyHessianAdd(double alpha, const VectorValues& x, VectorValues& y) const;
|
||||
|
||||
|
@ -362,44 +336,33 @@ namespace gtsam {
|
|||
Vector gradient(Key key, const VectorValues& x) const;
|
||||
|
||||
/**
|
||||
* Densely partially eliminate with Cholesky factorization. JacobianFactors are
|
||||
* left-multiplied with their transpose to form the Hessian using the conversion constructor
|
||||
* HessianFactor(const JacobianFactor&).
|
||||
*
|
||||
* If any factors contain constrained noise models, this function will fail because our current
|
||||
* implementation cannot handle constrained noise models in Cholesky factorization. The
|
||||
* function EliminatePreferCholesky() automatically does QR instead when this is the case.
|
||||
*
|
||||
* Variables are eliminated in the order specified in \c keys.
|
||||
*
|
||||
* @param factors Factors to combine and eliminate
|
||||
* @param keys The variables to eliminate and their elimination ordering
|
||||
* @return The conditional and remaining factor
|
||||
*
|
||||
* \addtogroup LinearSolving */
|
||||
friend GTSAM_EXPORT std::pair<boost::shared_ptr<GaussianConditional>, boost::shared_ptr<HessianFactor> >
|
||||
EliminateCholesky(const GaussianFactorGraph& factors, const Ordering& keys);
|
||||
* In-place elimination that returns a conditional on (ordered) keys specified, and leaves
|
||||
* this factor to be on the remaining keys (separator) only. Does dense partial Cholesky.
|
||||
*/
|
||||
boost::shared_ptr<GaussianConditional> eliminateCholesky(const Ordering& keys);
|
||||
|
||||
/**
|
||||
* Densely partially eliminate with Cholesky factorization. JacobianFactors are
|
||||
* left-multiplied with their transpose to form the Hessian using the conversion constructor
|
||||
* HessianFactor(const JacobianFactor&).
|
||||
*
|
||||
* This function will fall back on QR factorization for any cliques containing JacobianFactor's
|
||||
* with constrained noise models.
|
||||
*
|
||||
* Variables are eliminated in the order specified in \c keys.
|
||||
*
|
||||
* @param factors Factors to combine and eliminate
|
||||
* @param keys The variables to eliminate and their elimination ordering
|
||||
* @return The conditional and remaining factor
|
||||
*
|
||||
* \addtogroup LinearSolving */
|
||||
friend GTSAM_EXPORT std::pair<boost::shared_ptr<GaussianConditional>, boost::shared_ptr<GaussianFactor> >
|
||||
EliminatePreferCholesky(const GaussianFactorGraph& factors, const Ordering& keys);
|
||||
/// Solve the system A'*A delta = A'*b in-place, return delta as VectorValues
|
||||
VectorValues solve();
|
||||
|
||||
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
|
||||
/// @name Deprecated
|
||||
/// @{
|
||||
const SymmetricBlockMatrix& matrixObject() const { return info_; }
|
||||
/// @}
|
||||
#endif
|
||||
|
||||
private:
|
||||
|
||||
/// Allocate for given scatter pattern
|
||||
void Allocate(const Scatter& scatter);
|
||||
|
||||
/// Constructor with given scatter pattern, allocating but not initializing storage.
|
||||
HessianFactor(const Scatter& scatter);
|
||||
|
||||
friend class NonlinearFactorGraph;
|
||||
friend class NonlinearClusterTree;
|
||||
|
||||
/** Serialization function */
|
||||
friend class boost::serialization::access;
|
||||
template<class ARCHIVE>
|
||||
|
@ -409,6 +372,43 @@ namespace gtsam {
|
|||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Densely partially eliminate with Cholesky factorization. JacobianFactors are
|
||||
* left-multiplied with their transpose to form the Hessian using the conversion constructor
|
||||
* HessianFactor(const JacobianFactor&).
|
||||
*
|
||||
* If any factors contain constrained noise models, this function will fail because our current
|
||||
* implementation cannot handle constrained noise models in Cholesky factorization. The
|
||||
* function EliminatePreferCholesky() automatically does QR instead when this is the case.
|
||||
*
|
||||
* Variables are eliminated in the order specified in \c keys.
|
||||
*
|
||||
* @param factors Factors to combine and eliminate
|
||||
* @param keys The variables to eliminate and their elimination ordering
|
||||
* @return The conditional and remaining factor
|
||||
*
|
||||
* \addtogroup LinearSolving */
|
||||
GTSAM_EXPORT std::pair<boost::shared_ptr<GaussianConditional>, boost::shared_ptr<HessianFactor> >
|
||||
EliminateCholesky(const GaussianFactorGraph& factors, const Ordering& keys);
|
||||
|
||||
/**
|
||||
* Densely partially eliminate with Cholesky factorization. JacobianFactors are
|
||||
* left-multiplied with their transpose to form the Hessian using the conversion constructor
|
||||
* HessianFactor(const JacobianFactor&).
|
||||
*
|
||||
* This function will fall back on QR factorization for any cliques containing JacobianFactor's
|
||||
* with constrained noise models.
|
||||
*
|
||||
* Variables are eliminated in the order specified in \c keys.
|
||||
*
|
||||
* @param factors Factors to combine and eliminate
|
||||
* @param keys The variables to eliminate and their elimination ordering
|
||||
* @return The conditional and remaining factor
|
||||
*
|
||||
* \addtogroup LinearSolving */
|
||||
GTSAM_EXPORT std::pair<boost::shared_ptr<GaussianConditional>, boost::shared_ptr<GaussianFactor> >
|
||||
EliminatePreferCholesky(const GaussianFactorGraph& factors, const Ordering& keys);
|
||||
|
||||
/// traits
|
||||
template<>
|
||||
struct traits<HessianFactor> : public Testable<HessianFactor> {};
|
||||
|
|
|
@ -104,10 +104,10 @@ JacobianFactor::JacobianFactor(const Key i1, const Matrix& A1, Key i2,
|
|||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor(const HessianFactor& factor) :
|
||||
Base(factor), Ab_(
|
||||
VerticalBlockMatrix::LikeActiveViewOf(factor.matrixObject(),
|
||||
VerticalBlockMatrix::LikeActiveViewOf(factor.info(),
|
||||
factor.rows())) {
|
||||
// Copy Hessian into our matrix and then do in-place Cholesky
|
||||
Ab_.full() = factor.matrixObject().full();
|
||||
Ab_.full() = factor.info().selfadjointView();
|
||||
|
||||
// Do Cholesky to get a Jacobian
|
||||
size_t maxrank;
|
||||
|
@ -532,10 +532,10 @@ void JacobianFactor::updateHessian(const FastVector<Key>& infoKeys,
|
|||
// Fill off-diagonal blocks with Ai'*Aj
|
||||
for (DenseIndex i = 0; i < j; ++i) {
|
||||
const DenseIndex I = slots[i]; // because i<j, slots[i] is valid.
|
||||
(*info)(I, J).knownOffDiagonal() += Ab_(i).transpose() * Ab_j;
|
||||
info->updateOffDiagonalBlock(I, J, Ab_(i).transpose() * Ab_j);
|
||||
}
|
||||
// Fill diagonal block with Aj'*Aj
|
||||
(*info)(J, J).selfadjointView().rankUpdate(Ab_j.transpose());
|
||||
info->diagonalBlock(J).rankUpdate(Ab_j.transpose());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,6 +37,11 @@ namespace gtsam {
|
|||
class Ordering;
|
||||
class JacobianFactor;
|
||||
|
||||
/**
|
||||
* Multiply all factors and eliminate the given keys from the resulting factor using a QR
|
||||
* variant that handles constraints (zero sigmas). Computation happens in noiseModel::Gaussian::QR
|
||||
* Returns a conditional on those keys, and a new factor on the separator.
|
||||
*/
|
||||
GTSAM_EXPORT std::pair<boost::shared_ptr<GaussianConditional>, boost::shared_ptr<JacobianFactor> >
|
||||
EliminateQR(const GaussianFactorGraph& factors, const Ordering& keys);
|
||||
|
||||
|
@ -178,12 +183,12 @@ namespace gtsam {
|
|||
* which in fact stores an augmented information matrix.
|
||||
*/
|
||||
virtual Matrix augmentedInformation() const;
|
||||
|
||||
|
||||
/** Return the non-augmented information matrix represented by this
|
||||
* GaussianFactor.
|
||||
*/
|
||||
virtual Matrix information() const;
|
||||
|
||||
|
||||
/// Return the diagonal of the Hessian for this factor
|
||||
virtual VectorValues hessianDiagonal() const;
|
||||
|
||||
|
@ -197,7 +202,7 @@ namespace gtsam {
|
|||
* @brief Returns (dense) A,b pair associated with factor, bakes in the weights
|
||||
*/
|
||||
virtual std::pair<Matrix, Vector> jacobian() const;
|
||||
|
||||
|
||||
/**
|
||||
* @brief Returns (dense) A,b pair associated with factor, does not bake in weights
|
||||
*/
|
||||
|
@ -319,7 +324,7 @@ namespace gtsam {
|
|||
|
||||
/** set noiseModel correctly */
|
||||
void setModel(bool anyConstrained, const Vector& sigmas);
|
||||
|
||||
|
||||
/**
|
||||
* Densely partially eliminate with QR factorization, this is usually provided as an argument to
|
||||
* one of the factor graph elimination functions (see EliminateableFactorGraph). HessianFactors
|
||||
|
@ -348,7 +353,7 @@ namespace gtsam {
|
|||
/// Internal function to fill blocks and set dimensions
|
||||
template<typename TERMS>
|
||||
void fillTerms(const TERMS& terms, const Vector& b, const SharedDiagonal& noiseModel);
|
||||
|
||||
|
||||
private:
|
||||
|
||||
/** Unsafe Constructor that creates an uninitialized Jacobian of right size
|
||||
|
|
|
@ -886,6 +886,30 @@ DCS::shared_ptr DCS::Create(double c, const ReweightScheme reweight) {
|
|||
return shared_ptr(new DCS(c, reweight));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// L2WithDeadZone
|
||||
/* ************************************************************************* */
|
||||
|
||||
L2WithDeadZone::L2WithDeadZone(double k, const ReweightScheme reweight) : Base(reweight), k_(k) {
|
||||
if (k_ <= 0) {
|
||||
throw runtime_error("mEstimator L2WithDeadZone takes only positive double in constructor.");
|
||||
}
|
||||
}
|
||||
|
||||
void L2WithDeadZone::print(const std::string &s="") const {
|
||||
std::cout << s << ": L2WithDeadZone (" << k_ << ")" << std::endl;
|
||||
}
|
||||
|
||||
bool L2WithDeadZone::equals(const Base &expected, double tol) const {
|
||||
const L2WithDeadZone* p = dynamic_cast<const L2WithDeadZone*>(&expected);
|
||||
if (p == NULL) return false;
|
||||
return fabs(k_ - p->k_) < tol;
|
||||
}
|
||||
|
||||
L2WithDeadZone::shared_ptr L2WithDeadZone::Create(double k, const ReweightScheme reweight) {
|
||||
return shared_ptr(new L2WithDeadZone(k, reweight));
|
||||
}
|
||||
|
||||
} // namespace mEstimator
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
@ -662,7 +662,29 @@ namespace gtsam {
|
|||
Base(const ReweightScheme reweight = Block):reweight_(reweight) {}
|
||||
virtual ~Base() {}
|
||||
|
||||
/// robust error function to implement
|
||||
/*
|
||||
* This method is responsible for returning the total penalty for a given amount of error.
|
||||
* For example, this method is responsible for implementing the quadratic function for an
|
||||
* L2 penalty, the absolute value function for an L1 penalty, etc.
|
||||
*
|
||||
* TODO(mike): There is currently a bug in GTSAM, where none of the mEstimator classes
|
||||
* implement a residual function, and GTSAM calls the weight function to evaluate the
|
||||
* total penalty, rather than calling the residual function. The weight function should be
|
||||
* used during iteratively reweighted least squares optimization, but should not be used to
|
||||
* evaluate the total penalty. The long-term solution is for all mEstimators to implement
|
||||
* both a weight and a residual function, and for GTSAM to call the residual function when
|
||||
* evaluating the total penalty. But for now, I'm leaving this residual method as pure
|
||||
* virtual, so the existing mEstimators can inherit this default fallback behavior.
|
||||
*/
|
||||
virtual double residual(double error) const { return 0; };
|
||||
|
||||
/*
|
||||
* This method is responsible for returning the weight function for a given amount of error.
|
||||
* The weight function is related to the analytic derivative of the residual function. See
|
||||
* http://research.microsoft.com/en-us/um/people/zhang/INRIA/Publis/Tutorial-Estim/node24.html
|
||||
* for details. This method is required when optimizing cost functions with robust penalties
|
||||
* using iteratively re-weighted least squares.
|
||||
*/
|
||||
virtual double weight(double error) const = 0;
|
||||
|
||||
virtual void print(const std::string &s) const = 0;
|
||||
|
@ -916,6 +938,45 @@ namespace gtsam {
|
|||
}
|
||||
};
|
||||
|
||||
/// L2WithDeadZone implements a standard L2 penalty, but with a dead zone of width 2*k,
|
||||
/// centered at the origin. The resulting penalty within the dead zone is always zero, and
|
||||
/// grows quadratically outside the dead zone. In this sense, the L2WithDeadZone penalty is
|
||||
/// "robust to inliers", rather than being robust to outliers. This penalty can be used to
|
||||
/// create barrier functions in a general way.
|
||||
class GTSAM_EXPORT L2WithDeadZone : public Base {
|
||||
protected:
|
||||
double k_;
|
||||
|
||||
public:
|
||||
typedef boost::shared_ptr<L2WithDeadZone> shared_ptr;
|
||||
|
||||
L2WithDeadZone(double k, const ReweightScheme reweight = Block);
|
||||
double residual(double error) const {
|
||||
const double abs_error = fabs(error);
|
||||
return (abs_error < k_) ? 0.0 : 0.5*(k_-abs_error)*(k_-abs_error);
|
||||
}
|
||||
double weight(double error) const {
|
||||
// note that this code is slightly uglier than above, because there are three distinct
|
||||
// cases to handle (left of deadzone, deadzone, right of deadzone) instead of the two
|
||||
// cases (deadzone, non-deadzone) above.
|
||||
if (fabs(error) <= k_) return 0.0;
|
||||
else if (error > k_) return (-k_+error)/error;
|
||||
else return (k_+error)/error;
|
||||
}
|
||||
void print(const std::string &s) const;
|
||||
bool equals(const Base& expected, double tol=1e-8) const;
|
||||
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
||||
|
||||
private:
|
||||
/** Serialization function */
|
||||
friend class boost::serialization::access;
|
||||
template<class ARCHIVE>
|
||||
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
|
||||
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
|
||||
ar & BOOST_SERIALIZATION_NVP(k_);
|
||||
}
|
||||
};
|
||||
|
||||
} ///\namespace mEstimator
|
||||
|
||||
/**
|
||||
|
@ -976,7 +1037,9 @@ namespace gtsam {
|
|||
{ throw std::invalid_argument("unwhiten is not currently supported for robust noise models."); }
|
||||
inline virtual double distance(const Vector& v) const
|
||||
{ return this->whiten(v).squaredNorm(); }
|
||||
|
||||
// TODO(mike): fold the use of the m-estimator residual(...) function into distance(...)
|
||||
inline virtual double distance_non_whitened(const Vector& v) const
|
||||
{ return robust_->residual(v.norm()); }
|
||||
// TODO: these are really robust iterated re-weighting support functions
|
||||
virtual void WhitenSystem(Vector& b) const;
|
||||
virtual void WhitenSystem(std::vector<Matrix>& A, Vector& b) const;
|
||||
|
@ -997,7 +1060,7 @@ namespace gtsam {
|
|||
ar & boost::serialization::make_nvp("noise_", const_cast<NoiseModel::shared_ptr&>(noise_));
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// Helper function
|
||||
GTSAM_EXPORT boost::optional<Vector> checkIfDiagonal(const Matrix M);
|
||||
|
||||
|
|
|
@ -125,12 +125,12 @@ public:
|
|||
const double* xj = x + key * D;
|
||||
DenseIndex i = 0;
|
||||
for (; i < j; ++i)
|
||||
y_[i] += info_(i, j).knownOffDiagonal() * ConstDMap(xj);
|
||||
y_[i] += info_.aboveDiagonalBlock(i, j) * ConstDMap(xj);
|
||||
// blocks on the diagonal are only half
|
||||
y_[i] += info_(j, j).selfadjointView() * ConstDMap(xj);
|
||||
y_[i] += info_.diagonalBlock(j) * ConstDMap(xj);
|
||||
// for below diagonal, we take transpose block from upper triangular part
|
||||
for (i = j + 1; i < (DenseIndex) size(); ++i)
|
||||
y_[i] += info_(i, j).knownOffDiagonal() * ConstDMap(xj);
|
||||
y_[i] += info_.aboveDiagonalBlock(j, i).transpose() * ConstDMap(xj);
|
||||
}
|
||||
|
||||
// copy to yvalues
|
||||
|
@ -155,16 +155,16 @@ public:
|
|||
for (DenseIndex j = 0; j < (DenseIndex) size(); ++j) {
|
||||
DenseIndex i = 0;
|
||||
for (; i < j; ++i)
|
||||
y_[i] += info_(i, j).knownOffDiagonal()
|
||||
y_[i] += info_.aboveDiagonalBlock(i, j)
|
||||
* ConstDMap(x + offsets[keys_[j]],
|
||||
offsets[keys_[j] + 1] - offsets[keys_[j]]);
|
||||
// blocks on the diagonal are only half
|
||||
y_[i] += info_(j, j).selfadjointView()
|
||||
y_[i] += info_.diagonalBlock(j)
|
||||
* ConstDMap(x + offsets[keys_[j]],
|
||||
offsets[keys_[j] + 1] - offsets[keys_[j]]);
|
||||
// for below diagonal, we take transpose block from upper triangular part
|
||||
for (i = j + 1; i < (DenseIndex) size(); ++i)
|
||||
y_[i] += info_(i, j).knownOffDiagonal()
|
||||
y_[i] += info_.aboveDiagonalBlock(j, i).transpose()
|
||||
* ConstDMap(x + offsets[keys_[j]],
|
||||
offsets[keys_[j] + 1] - offsets[keys_[j]]);
|
||||
}
|
||||
|
@ -182,8 +182,7 @@ public:
|
|||
for (DenseIndex pos = 0; pos < (DenseIndex) size(); ++pos) {
|
||||
Key j = keys_[pos];
|
||||
// Get the diagonal block, and insert its diagonal
|
||||
const Matrix& B = info_(pos, pos).selfadjointView();
|
||||
DMap(d + D * j) += B.diagonal();
|
||||
DMap(d + D * j) += info_.diagonal(pos);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -194,8 +193,7 @@ public:
|
|||
for (DenseIndex pos = 0; pos < (DenseIndex) size(); ++pos) {
|
||||
Key j = keys_[pos];
|
||||
// Get the diagonal block, and insert its diagonal
|
||||
VectorD dj = -info_(pos, size()).knownOffDiagonal();
|
||||
DMap(d + D * j) += dj;
|
||||
DMap(d + D * j) -= info_.aboveDiagonalBlock(pos, size());;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -40,14 +40,15 @@ Scatter::Scatter(const GaussianFactorGraph& gfg,
|
|||
|
||||
// If we have an ordering, pre-fill the ordered variables first
|
||||
if (ordering) {
|
||||
for (Key key: *ordering) {
|
||||
push_back(SlotEntry(key, 0));
|
||||
for (Key key : *ordering) {
|
||||
add(key, 0);
|
||||
}
|
||||
}
|
||||
|
||||
// Now, find dimensions of variables and/or extend
|
||||
for (const GaussianFactor::shared_ptr& factor: gfg) {
|
||||
if (!factor) continue;
|
||||
for (const auto& factor : gfg) {
|
||||
if (!factor)
|
||||
continue;
|
||||
|
||||
// TODO: Fix this hack to cope with zero-row Jacobians that come from BayesTreeOrphanWrappers
|
||||
const JacobianFactor* asJacobian = dynamic_cast<const JacobianFactor*>(factor.get());
|
||||
|
@ -61,7 +62,7 @@ Scatter::Scatter(const GaussianFactorGraph& gfg,
|
|||
if (it!=end())
|
||||
it->dimension = factor->getDim(variable);
|
||||
else
|
||||
push_back(SlotEntry(key, factor->getDim(variable)));
|
||||
add(key, factor->getDim(variable));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -74,6 +75,11 @@ Scatter::Scatter(const GaussianFactorGraph& gfg,
|
|||
erase(std::remove_if(begin(), end(), SlotEntry::Zero), end());
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void Scatter::add(Key key, size_t dim) {
|
||||
emplace_back(SlotEntry(key, dim));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
FastVector<SlotEntry>::iterator Scatter::find(Key key) {
|
||||
iterator it = begin();
|
||||
|
|
|
@ -50,10 +50,16 @@ struct GTSAM_EXPORT SlotEntry {
|
|||
*/
|
||||
class Scatter : public FastVector<SlotEntry> {
|
||||
public:
|
||||
/// Constructor
|
||||
/// Default Constructor
|
||||
Scatter() {}
|
||||
|
||||
/// Construct from gaussian factor graph, with optional (partial or complete) ordering
|
||||
Scatter(const GaussianFactorGraph& gfg,
|
||||
boost::optional<const Ordering&> ordering = boost::none);
|
||||
|
||||
/// Add a key/dim pair
|
||||
void add(Key key, size_t dim);
|
||||
|
||||
private:
|
||||
|
||||
/// Find the SlotEntry with the right key (linear time worst case)
|
||||
|
|
|
@ -144,12 +144,12 @@ SubgraphSolver::splitGraph(const GaussianFactorGraph &jfg) {
|
|||
if (gf->keys().size() == 1)
|
||||
augment = true;
|
||||
else {
|
||||
const Key u = gf->keys()[0], v = gf->keys()[1], u_root = D.findSet(u),
|
||||
v_root = D.findSet(v);
|
||||
const Key u = gf->keys()[0], v = gf->keys()[1], u_root = D.find(u),
|
||||
v_root = D.find(v);
|
||||
if (u_root != v_root) {
|
||||
t++;
|
||||
augment = true;
|
||||
D.makeUnionInPlace(u_root, v_root);
|
||||
D.merge(u_root, v_root);
|
||||
}
|
||||
}
|
||||
if (augment)
|
||||
|
|
|
@ -47,15 +47,24 @@ namespace gtsam {
|
|||
VectorValues::VectorValues(const Vector& x, const Dims& dims) {
|
||||
typedef pair<Key, size_t> Pair;
|
||||
size_t j = 0;
|
||||
for(const Pair& v: dims) {
|
||||
for (const Pair& v : dims) {
|
||||
Key key;
|
||||
size_t n;
|
||||
boost::tie(key, n) = v;
|
||||
values_.insert(make_pair(key, x.segment(j,n)));
|
||||
values_.insert(make_pair(key, x.segment(j, n)));
|
||||
j += n;
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues::VectorValues(const Vector& x, const Scatter& scatter) {
|
||||
size_t j = 0;
|
||||
for (const SlotEntry& v : scatter) {
|
||||
values_.insert(make_pair(v.key, x.segment(j, v.dimension)));
|
||||
j += v.dimension;
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues VectorValues::Zero(const VectorValues& other)
|
||||
{
|
||||
|
|
|
@ -17,11 +17,12 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/linear/Scatter.h>
|
||||
#include <gtsam/inference/Ordering.h>
|
||||
#include <gtsam/base/Vector.h>
|
||||
#include <gtsam/base/ConcurrentMap.h>
|
||||
#include <gtsam/base/FastVector.h>
|
||||
#include <gtsam/global_includes.h>
|
||||
#include <gtsam/inference/Ordering.h>
|
||||
|
||||
#include <boost/shared_ptr.hpp>
|
||||
|
||||
|
@ -124,9 +125,12 @@ namespace gtsam {
|
|||
template<typename ITERATOR>
|
||||
VectorValues(ITERATOR first, ITERATOR last) : values_(first, last) {}
|
||||
|
||||
/** Constructor from Vector. */
|
||||
/// Constructor from Vector, with Dims
|
||||
VectorValues(const Vector& c, const Dims& dims);
|
||||
|
||||
/// Constructor from Vector, with Scatter
|
||||
VectorValues(const Vector& c, const Scatter& scatter);
|
||||
|
||||
/** Create a VectorValues with the same structure as \c other, but filled with zeros. */
|
||||
static VectorValues Zero(const VectorValues& other);
|
||||
|
||||
|
|
|
@ -57,8 +57,8 @@ namespace gtsam
|
|||
// Take any ancestor results we'll need
|
||||
for(Key parent: clique->conditional_->parents())
|
||||
myData.cliqueResults.insert(std::make_pair(parent, myData.parentData->cliqueResults.at(parent)));
|
||||
|
||||
// Solve and store in our results
|
||||
//collectedResult.insert(clique->conditional()->solve(collectedResult/*myData.ancestorResults*/));
|
||||
{
|
||||
GaussianConditional& c = *clique->conditional();
|
||||
// Solve matrix
|
||||
|
@ -82,17 +82,24 @@ namespace gtsam
|
|||
vectorPos += parentVector.size();
|
||||
}
|
||||
}
|
||||
xS = c.getb() - c.get_S() * xS;
|
||||
Vector soln = c.get_R().triangularView<Eigen::Upper>().solve(xS);
|
||||
|
||||
// NOTE(gareth): We can no longer write: xS = b - S * xS
|
||||
// This is because Eigen (as of 3.3) no longer evaluates S * xS into
|
||||
// a temporary, and the operation trashes valus in xS.
|
||||
// See: http://eigen.tuxfamily.org/index.php?title=3.3
|
||||
const Vector rhs = c.getb() - c.get_S() * xS;
|
||||
|
||||
// TODO(gareth): Inline instantiation of Eigen::Solve and check flag
|
||||
const Vector solution = c.get_R().triangularView<Eigen::Upper>().solve(rhs);
|
||||
|
||||
// Check for indeterminant solution
|
||||
if(soln.hasNaN()) throw IndeterminantLinearSystemException(c.keys().front());
|
||||
if(solution.hasNaN()) throw IndeterminantLinearSystemException(c.keys().front());
|
||||
|
||||
// Insert solution into a VectorValues
|
||||
DenseIndex vectorPosition = 0;
|
||||
for(GaussianConditional::const_iterator frontal = c.beginFrontals(); frontal != c.endFrontals(); ++frontal) {
|
||||
VectorValues::const_iterator r =
|
||||
collectedResult.insert(*frontal, soln.segment(vectorPosition, c.getDim(frontal)));
|
||||
collectedResult.insert(*frontal, solution.segment(vectorPosition, c.getDim(frontal)));
|
||||
myData.cliqueResults.insert(make_pair(r->first, r));
|
||||
vectorPosition += c.getDim(frontal);
|
||||
}
|
||||
|
|
|
@ -36,17 +36,23 @@ using namespace gtsam;
|
|||
|
||||
static const Key _x_=0, _y_=1;
|
||||
|
||||
static GaussianBayesNet smallBayesNet = list_of
|
||||
(GaussianConditional(_x_, (Vector(1) << 9.0).finished(), (Matrix(1, 1) << 1.0).finished(), _y_, (Matrix(1, 1) << 1.0).finished()))
|
||||
(GaussianConditional(_y_, (Vector(1) << 5.0).finished(), (Matrix(1, 1) << 1.0).finished()));
|
||||
static GaussianBayesNet smallBayesNet =
|
||||
list_of(GaussianConditional(_x_, Vector1::Constant(9), I_1x1, _y_, I_1x1))(
|
||||
GaussianConditional(_y_, Vector1::Constant(5), I_1x1));
|
||||
|
||||
static GaussianBayesNet noisyBayesNet =
|
||||
list_of(GaussianConditional(_x_, Vector1::Constant(9), I_1x1, _y_, I_1x1,
|
||||
noiseModel::Diagonal::Sigmas(Vector1::Constant(2))))(
|
||||
GaussianConditional(_y_, Vector1::Constant(5), I_1x1,
|
||||
noiseModel::Diagonal::Sigmas(Vector1::Constant(3))));
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianBayesNet, matrix )
|
||||
TEST( GaussianBayesNet, Matrix )
|
||||
{
|
||||
Matrix R; Vector d;
|
||||
boost::tie(R,d) = smallBayesNet.matrix(); // find matrix and RHS
|
||||
|
||||
Matrix R1 = (Matrix(2, 2) <<
|
||||
Matrix R1 = (Matrix2() <<
|
||||
1.0, 1.0,
|
||||
0.0, 1.0
|
||||
).finished();
|
||||
|
@ -57,31 +63,55 @@ TEST( GaussianBayesNet, matrix )
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianBayesNet, optimize )
|
||||
TEST( GaussianBayesNet, NoisyMatrix )
|
||||
{
|
||||
Matrix R; Vector d;
|
||||
boost::tie(R,d) = noisyBayesNet.matrix(); // find matrix and RHS
|
||||
|
||||
Matrix R1 = (Matrix2() <<
|
||||
0.5, 0.5,
|
||||
0.0, 1./3.
|
||||
).finished();
|
||||
Vector d1 = Vector2(9./2., 5./3.);
|
||||
|
||||
EXPECT(assert_equal(R,R1));
|
||||
EXPECT(assert_equal(d,d1));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(GaussianBayesNet, Optimize) {
|
||||
VectorValues expected =
|
||||
map_list_of<Key, Vector>(_x_, Vector1::Constant(4))(_y_, Vector1::Constant(5));
|
||||
VectorValues actual = smallBayesNet.optimize();
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
VectorValues expected = map_list_of<Key, Vector>
|
||||
(_x_, (Vector(1) << 4.0).finished())
|
||||
(_y_, (Vector(1) << 5.0).finished());
|
||||
/* ************************************************************************* */
|
||||
TEST(GaussianBayesNet, NoisyOptimize) {
|
||||
Matrix R;
|
||||
Vector d;
|
||||
boost::tie(R, d) = noisyBayesNet.matrix(); // find matrix and RHS
|
||||
const Vector x = R.inverse() * d;
|
||||
VectorValues expected = map_list_of<Key, Vector>(_x_, x.head(1))(_y_, x.tail(1));
|
||||
|
||||
EXPECT(assert_equal(expected,actual));
|
||||
VectorValues actual = noisyBayesNet.optimize();
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianBayesNet, optimizeIncomplete )
|
||||
{
|
||||
static GaussianBayesNet incompleteBayesNet = list_of
|
||||
(GaussianConditional(_x_, (Vector(1) << 9.0).finished(), (Matrix(1, 1) << 1.0).finished(), _y_, (Matrix(1, 1) << 1.0).finished()));
|
||||
(GaussianConditional(_x_, Vector1::Constant(9), I_1x1, _y_, I_1x1));
|
||||
|
||||
VectorValues solutionForMissing = map_list_of<Key, Vector>
|
||||
(_y_, (Vector(1) << 5.0).finished());
|
||||
(_y_, Vector1::Constant(5));
|
||||
|
||||
VectorValues actual = incompleteBayesNet.optimize(solutionForMissing);
|
||||
|
||||
VectorValues expected = map_list_of<Key, Vector>
|
||||
(_x_, (Vector(1) << 4.0).finished())
|
||||
(_y_, (Vector(1) << 5.0).finished());
|
||||
(_x_, Vector1::Constant(4))
|
||||
(_y_, Vector1::Constant(5));
|
||||
|
||||
EXPECT(assert_equal(expected,actual));
|
||||
}
|
||||
|
@ -95,13 +125,13 @@ TEST( GaussianBayesNet, optimize3 )
|
|||
// NOTE: we are supplying a new RHS here
|
||||
|
||||
VectorValues expected = map_list_of<Key, Vector>
|
||||
(_x_, (Vector(1) << -1.0).finished())
|
||||
(_y_, (Vector(1) << 5.0).finished());
|
||||
(_x_, Vector1::Constant(-1))
|
||||
(_y_, Vector1::Constant(5));
|
||||
|
||||
// Test different RHS version
|
||||
VectorValues gx = map_list_of<Key, Vector>
|
||||
(_x_, (Vector(1) << 4.0).finished())
|
||||
(_y_, (Vector(1) << 5.0).finished());
|
||||
(_x_, Vector1::Constant(4))
|
||||
(_y_, Vector1::Constant(5));
|
||||
VectorValues actual = smallBayesNet.backSubstitute(gx);
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
}
|
||||
|
@ -114,11 +144,11 @@ TEST( GaussianBayesNet, backSubstituteTranspose )
|
|||
// 5 1 1 3
|
||||
VectorValues
|
||||
x = map_list_of<Key, Vector>
|
||||
(_x_, (Vector(1) << 2.0).finished())
|
||||
(_y_, (Vector(1) << 5.0).finished()),
|
||||
(_x_, Vector1::Constant(2))
|
||||
(_y_, Vector1::Constant(5)),
|
||||
expected = map_list_of<Key, Vector>
|
||||
(_x_, (Vector(1) << 2.0).finished())
|
||||
(_y_, (Vector(1) << 3.0).finished());
|
||||
(_x_, Vector1::Constant(2))
|
||||
(_y_, Vector1::Constant(3));
|
||||
|
||||
VectorValues actual = smallBayesNet.backSubstituteTranspose(x);
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
|
@ -130,15 +160,15 @@ TEST( GaussianBayesNet, DeterminantTest )
|
|||
{
|
||||
GaussianBayesNet cbn;
|
||||
cbn += GaussianConditional(
|
||||
0, Vector2(3.0, 4.0 ), (Matrix(2, 2) << 1.0, 3.0, 0.0, 4.0 ).finished(),
|
||||
1, (Matrix(2, 2) << 2.0, 1.0, 2.0, 3.0).finished(), noiseModel::Isotropic::Sigma(2, 2.0));
|
||||
0, Vector2(3.0, 4.0), (Matrix2() << 1.0, 3.0, 0.0, 4.0).finished(),
|
||||
1, (Matrix2() << 2.0, 1.0, 2.0, 3.0).finished(), noiseModel::Isotropic::Sigma(2, 2.0));
|
||||
|
||||
cbn += GaussianConditional(
|
||||
1, Vector2(5.0, 6.0 ), (Matrix(2, 2) << 1.0, 1.0, 0.0, 3.0 ).finished(),
|
||||
2, (Matrix(2, 2) << 1.0, 0.0, 5.0, 2.0).finished(), noiseModel::Isotropic::Sigma(2, 2.0));
|
||||
1, Vector2(5.0, 6.0), (Matrix2() << 1.0, 1.0, 0.0, 3.0).finished(),
|
||||
2, (Matrix2() << 1.0, 0.0, 5.0, 2.0).finished(), noiseModel::Isotropic::Sigma(2, 2.0));
|
||||
|
||||
cbn += GaussianConditional(
|
||||
3, Vector2(7.0, 8.0 ), (Matrix(2, 2) << 1.0, 1.0, 0.0, 5.0 ).finished(), noiseModel::Isotropic::Sigma(2, 2.0));
|
||||
3, Vector2(7.0, 8.0), (Matrix2() << 1.0, 1.0, 0.0, 5.0).finished(), noiseModel::Isotropic::Sigma(2, 2.0));
|
||||
|
||||
double expectedDeterminant = 60.0 / 64.0;
|
||||
double actualDeterminant = cbn.determinant();
|
||||
|
@ -161,21 +191,21 @@ TEST(GaussianBayesNet, ComputeSteepestDescentPoint) {
|
|||
// Create an arbitrary Bayes Net
|
||||
GaussianBayesNet gbn;
|
||||
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
|
||||
0, Vector2(1.0,2.0), (Matrix(2, 2) << 3.0,4.0,0.0,6.0).finished(),
|
||||
3, (Matrix(2, 2) << 7.0,8.0,9.0,10.0).finished(),
|
||||
4, (Matrix(2, 2) << 11.0,12.0,13.0,14.0).finished()));
|
||||
0, Vector2(1.0,2.0), (Matrix2() << 3.0,4.0,0.0,6.0).finished(),
|
||||
3, (Matrix2() << 7.0,8.0,9.0,10.0).finished(),
|
||||
4, (Matrix2() << 11.0,12.0,13.0,14.0).finished()));
|
||||
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
|
||||
1, Vector2(15.0,16.0), (Matrix(2, 2) << 17.0,18.0,0.0,20.0).finished(),
|
||||
2, (Matrix(2, 2) << 21.0,22.0,23.0,24.0).finished(),
|
||||
4, (Matrix(2, 2) << 25.0,26.0,27.0,28.0).finished()));
|
||||
1, Vector2(15.0,16.0), (Matrix2() << 17.0,18.0,0.0,20.0).finished(),
|
||||
2, (Matrix2() << 21.0,22.0,23.0,24.0).finished(),
|
||||
4, (Matrix2() << 25.0,26.0,27.0,28.0).finished()));
|
||||
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
|
||||
2, Vector2(29.0,30.0), (Matrix(2, 2) << 31.0,32.0,0.0,34.0).finished(),
|
||||
3, (Matrix(2, 2) << 35.0,36.0,37.0,38.0).finished()));
|
||||
2, Vector2(29.0,30.0), (Matrix2() << 31.0,32.0,0.0,34.0).finished(),
|
||||
3, (Matrix2() << 35.0,36.0,37.0,38.0).finished()));
|
||||
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
|
||||
3, Vector2(39.0,40.0), (Matrix(2, 2) << 41.0,42.0,0.0,44.0).finished(),
|
||||
4, (Matrix(2, 2) << 45.0,46.0,47.0,48.0).finished()));
|
||||
3, Vector2(39.0,40.0), (Matrix2() << 41.0,42.0,0.0,44.0).finished(),
|
||||
4, (Matrix2() << 45.0,46.0,47.0,48.0).finished()));
|
||||
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
|
||||
4, Vector2(49.0,50.0), (Matrix(2, 2) << 51.0,52.0,0.0,54.0).finished()));
|
||||
4, Vector2(49.0,50.0), (Matrix2() << 51.0,52.0,0.0,54.0).finished()));
|
||||
|
||||
// Compute the Hessian numerically
|
||||
Matrix hessian = numericalHessian<Vector10>(
|
||||
|
|
|
@ -130,7 +130,7 @@ TEST(GaussianBayesTree, complicatedMarginal) {
|
|||
// Create the conditionals to go in the BayesTree
|
||||
GaussianBayesTree bt;
|
||||
bt.insertRoot(
|
||||
MakeClique(GaussianConditional(pair_list_of<Key, Matrix> (11, (Matrix(3,1) << 0.0971, 0, 0).finished())
|
||||
MakeClique(GaussianConditional(pair_list_of<Key, Matrix> (11, (Matrix(3,1) << 0.0971, 0, 0).finished())
|
||||
(12, (Matrix(3,2) << 0.3171, 0.4387, 0.9502, 0.3816, 0, 0.7655).finished()),
|
||||
2, Vector3(0.2638, 0.1455, 0.1361)), list_of
|
||||
(MakeClique(GaussianConditional(pair_list_of<Key, Matrix> (9, (Matrix(3,1) << 0.7952, 0, 0).finished())
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
@ -27,7 +27,7 @@
|
|||
#include <gtsam/base/VerticalBlockMatrix.h>
|
||||
|
||||
#include <boost/assign/list_of.hpp>
|
||||
#include <boost/assign/std/list.hpp> // for operator +=
|
||||
#include <boost/assign/std/list.hpp> // for operator +=
|
||||
using namespace boost::assign;
|
||||
|
||||
#include <gtsam/base/TestableAssertions.h>
|
||||
|
@ -36,7 +36,7 @@ using namespace boost::assign;
|
|||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
//static SharedDiagonal
|
||||
// static SharedDiagonal
|
||||
// sigma0_1 = noiseModel::Isotropic::Sigma(2,0.1), sigma_02 = noiseModel::Isotropic::Sigma(2,0.2),
|
||||
// constraintModel = noiseModel::Constrained::All(2);
|
||||
|
||||
|
@ -56,11 +56,11 @@ TEST(GaussianFactorGraph, initialization) {
|
|||
|
||||
// Test sparse, which takes a vector and returns a matrix, used in MATLAB
|
||||
// Note that this the augmented vector and the RHS is in column 7
|
||||
Matrix expectedIJS = (Matrix(3, 22) <<
|
||||
1., 2., 1., 2., 3., 4., 3., 4., 3., 4., 5., 6., 5., 6., 5., 6., 7., 8., 7., 8., 7., 8.,
|
||||
1., 2., 7., 7., 1., 2., 3., 4., 7., 7., 1., 2., 5., 6., 7., 7., 3., 4., 5., 6., 7., 7.,
|
||||
10., 10., -1., -1., -10., -10., 10., 10., 2., -1., -5., -5., 5., 5., 0., 1., -5., -5., 5., 5., -1., 1.5
|
||||
).finished();
|
||||
Matrix expectedIJS =
|
||||
(Matrix(3, 22) << 1., 2., 1., 2., 3., 4., 3., 4., 3., 4., 5., 6., 5., 6., 5., 6., 7., 8., 7.,
|
||||
8., 7., 8., 1., 2., 7., 7., 1., 2., 3., 4., 7., 7., 1., 2., 5., 6., 7., 7., 3., 4., 5., 6.,
|
||||
7., 7., 10., 10., -1., -1., -10., -10., 10., 10., 2., -1., -5., -5., 5., 5., 0., 1., -5.,
|
||||
-5., 5., 5., -1., 1.5).finished();
|
||||
Matrix actualIJS = fg.sparseJacobian_();
|
||||
EQUALITY(expectedIJS, actualIJS);
|
||||
}
|
||||
|
@ -98,7 +98,8 @@ TEST(GaussianFactorGraph, sparseJacobian) {
|
|||
GaussianFactorGraph gfg;
|
||||
SharedDiagonal model = noiseModel::Isotropic::Sigma(2, 0.5);
|
||||
gfg.add(0, (Matrix(2, 3) << 1., 2., 3., 5., 6., 7.).finished(), Vector2(4., 8.), model);
|
||||
gfg.add(0, (Matrix(2, 3) << 9.,10., 0., 0., 0., 0.).finished(), 1, (Matrix(2, 2) << 11., 12., 14., 15.).finished(), Vector2(13.,16.), model);
|
||||
gfg.add(0, (Matrix(2, 3) << 9., 10., 0., 0., 0., 0.).finished(), 1,
|
||||
(Matrix(2, 2) << 11., 12., 14., 15.).finished(), Vector2(13., 16.), model);
|
||||
|
||||
Matrix actual = gfg.sparseJacobian_();
|
||||
|
||||
|
@ -121,73 +122,73 @@ TEST(GaussianFactorGraph, matrices) {
|
|||
GaussianFactorGraph gfg;
|
||||
SharedDiagonal model = noiseModel::Unit::Create(2);
|
||||
gfg.add(0, A00, Vector2(4., 8.), model);
|
||||
gfg.add(0, A10, 1, A11, Vector2(13.,16.), model);
|
||||
gfg.add(0, A10, 1, A11, Vector2(13., 16.), model);
|
||||
|
||||
Matrix Ab(4,6);
|
||||
Ab <<
|
||||
1, 2, 3, 0, 0, 4,
|
||||
5, 6, 7, 0, 0, 8,
|
||||
9,10, 0,11,12,13,
|
||||
0, 0, 0,14,15,16;
|
||||
Matrix Ab(4, 6);
|
||||
Ab << 1, 2, 3, 0, 0, 4, 5, 6, 7, 0, 0, 8, 9, 10, 0, 11, 12, 13, 0, 0, 0, 14, 15, 16;
|
||||
|
||||
// augmented versions
|
||||
EXPECT(assert_equal(Ab, gfg.augmentedJacobian()));
|
||||
EXPECT(assert_equal(Ab.transpose() * Ab, gfg.augmentedHessian()));
|
||||
|
||||
// jacobian
|
||||
Matrix A = Ab.leftCols(Ab.cols()-1);
|
||||
Vector b = Ab.col(Ab.cols()-1);
|
||||
Matrix actualA; Vector actualb; boost::tie(actualA,actualb) = gfg.jacobian();
|
||||
Matrix A = Ab.leftCols(Ab.cols() - 1);
|
||||
Vector b = Ab.col(Ab.cols() - 1);
|
||||
Matrix actualA;
|
||||
Vector actualb;
|
||||
boost::tie(actualA, actualb) = gfg.jacobian();
|
||||
EXPECT(assert_equal(A, actualA));
|
||||
EXPECT(assert_equal(b, actualb));
|
||||
|
||||
// hessian
|
||||
Matrix L = A.transpose() * A;
|
||||
Vector eta = A.transpose() * b;
|
||||
Matrix actualL; Vector actualeta; boost::tie(actualL,actualeta) = gfg.hessian();
|
||||
Matrix actualL;
|
||||
Vector actualeta;
|
||||
boost::tie(actualL, actualeta) = gfg.hessian();
|
||||
EXPECT(assert_equal(L, actualL));
|
||||
EXPECT(assert_equal(eta, actualeta));
|
||||
|
||||
// hessianBlockDiagonal
|
||||
VectorValues expectLdiagonal; // Make explicit that diagonal is sum-squares of columns
|
||||
expectLdiagonal.insert(0, Vector3(1+25+81, 4+36+100, 9+49));
|
||||
expectLdiagonal.insert(1, Vector2(121+196, 144+225));
|
||||
VectorValues expectLdiagonal; // Make explicit that diagonal is sum-squares of columns
|
||||
expectLdiagonal.insert(0, Vector3(1 + 25 + 81, 4 + 36 + 100, 9 + 49));
|
||||
expectLdiagonal.insert(1, Vector2(121 + 196, 144 + 225));
|
||||
EXPECT(assert_equal(expectLdiagonal, gfg.hessianDiagonal()));
|
||||
|
||||
// hessianBlockDiagonal
|
||||
map<Key,Matrix> actualBD = gfg.hessianBlockDiagonal();
|
||||
LONGS_EQUAL(2,actualBD.size());
|
||||
EXPECT(assert_equal(A00.transpose()*A00 + A10.transpose()*A10,actualBD[0]));
|
||||
EXPECT(assert_equal(A11.transpose()*A11,actualBD[1]));
|
||||
map<Key, Matrix> actualBD = gfg.hessianBlockDiagonal();
|
||||
LONGS_EQUAL(2, actualBD.size());
|
||||
EXPECT(assert_equal(A00.transpose() * A00 + A10.transpose() * A10, actualBD[0]));
|
||||
EXPECT(assert_equal(A11.transpose() * A11, actualBD[1]));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/// Factor graph with 2 2D factors on 3 2D variables
|
||||
static GaussianFactorGraph createSimpleGaussianFactorGraph() {
|
||||
GaussianFactorGraph fg;
|
||||
Key x1 = 2, x2 = 0, l1 = 1;
|
||||
SharedDiagonal unit2 = noiseModel::Unit::Create(2);
|
||||
// linearized prior on x1: c[_x1_]+x1=0 i.e. x1=-c[_x1_]
|
||||
fg += JacobianFactor(2, 10*I_2x2, -1.0*Vector::Ones(2), unit2);
|
||||
fg += JacobianFactor(x1, 10 * I_2x2, -1.0 * Vector::Ones(2), unit2);
|
||||
// odometry between x1 and x2: x2-x1=[0.2;-0.1]
|
||||
fg += JacobianFactor(0, 10*I_2x2, 2, -10*I_2x2, Vector2(2.0, -1.0), unit2);
|
||||
fg += JacobianFactor(x2, 10 * I_2x2, x1, -10 * I_2x2, Vector2(2.0, -1.0), unit2);
|
||||
// measurement between x1 and l1: l1-x1=[0.0;0.2]
|
||||
fg += JacobianFactor(1, 5*I_2x2, 2, -5*I_2x2, Vector2(0.0, 1.0), unit2);
|
||||
fg += JacobianFactor(l1, 5 * I_2x2, x1, -5 * I_2x2, Vector2(0.0, 1.0), unit2);
|
||||
// measurement between x2 and l1: l1-x2=[-0.2;0.3]
|
||||
fg += JacobianFactor(0, -5*I_2x2, 1, 5*I_2x2, Vector2(-1.0, 1.5), unit2);
|
||||
fg += JacobianFactor(x2, -5 * I_2x2, l1, 5 * I_2x2, Vector2(-1.0, 1.5), unit2);
|
||||
return fg;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianFactorGraph, gradient )
|
||||
{
|
||||
TEST(GaussianFactorGraph, gradient) {
|
||||
GaussianFactorGraph fg = createSimpleGaussianFactorGraph();
|
||||
|
||||
// Construct expected gradient
|
||||
// 2*f(x) = 100*(x1+c[X(1)])^2 + 100*(x2-x1-[0.2;-0.1])^2 + 25*(l1-x1-[0.0;0.2])^2 + 25*(l1-x2-[-0.2;0.3])^2
|
||||
// 2*f(x) = 100*(x1+c[X(1)])^2 + 100*(x2-x1-[0.2;-0.1])^2 + 25*(l1-x1-[0.0;0.2])^2 +
|
||||
// 25*(l1-x2-[-0.2;0.3])^2
|
||||
// worked out: df/dx1 = 100*[0.1;0.1] + 100*[0.2;-0.1]) + 25*[0.0;0.2] = [10+20;10-10+5] = [30;5]
|
||||
VectorValues expected = map_list_of<Key, Vector>
|
||||
(1, Vector2(5.0, -12.5))
|
||||
(2, Vector2(30.0, 5.0))
|
||||
(0, Vector2(-25.0, 17.5));
|
||||
VectorValues expected = map_list_of<Key, Vector>(1, Vector2(5.0, -12.5))(2, Vector2(30.0, 5.0))(
|
||||
0, Vector2(-25.0, 17.5));
|
||||
|
||||
// Check the gradient at delta=0
|
||||
VectorValues zero = VectorValues::Zero(expected);
|
||||
|
@ -202,18 +203,14 @@ TEST( GaussianFactorGraph, gradient )
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianFactorGraph, transposeMultiplication )
|
||||
{
|
||||
TEST(GaussianFactorGraph, transposeMultiplication) {
|
||||
GaussianFactorGraph A = createSimpleGaussianFactorGraph();
|
||||
|
||||
Errors e; e +=
|
||||
Vector2(0.0, 0.0),
|
||||
Vector2(15.0, 0.0),
|
||||
Vector2(0.0,-5.0),
|
||||
Vector2(-7.5,-5.0);
|
||||
Errors e;
|
||||
e += Vector2(0.0, 0.0), Vector2(15.0, 0.0), Vector2(0.0, -5.0), Vector2(-7.5, -5.0);
|
||||
|
||||
VectorValues expected;
|
||||
expected.insert(1, Vector2(-37.5,-50.0));
|
||||
expected.insert(1, Vector2(-37.5, -50.0));
|
||||
expected.insert(2, Vector2(-150.0, 25.0));
|
||||
expected.insert(0, Vector2(187.5, 25.0));
|
||||
|
||||
|
@ -222,8 +219,7 @@ TEST( GaussianFactorGraph, transposeMultiplication )
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(GaussianFactorGraph, eliminate_empty )
|
||||
{
|
||||
TEST(GaussianFactorGraph, eliminate_empty) {
|
||||
// eliminate an empty factor
|
||||
GaussianFactorGraph gfg;
|
||||
gfg.add(JacobianFactor());
|
||||
|
@ -243,25 +239,31 @@ TEST(GaussianFactorGraph, eliminate_empty )
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianFactorGraph, matrices2 )
|
||||
{
|
||||
TEST(GaussianFactorGraph, matrices2) {
|
||||
GaussianFactorGraph gfg = createSimpleGaussianFactorGraph();
|
||||
Matrix A; Vector b; boost::tie(A,b) = gfg.jacobian();
|
||||
Matrix AtA; Vector eta; boost::tie(AtA,eta) = gfg.hessian();
|
||||
EXPECT(assert_equal(A.transpose()*A, AtA));
|
||||
EXPECT(assert_equal(A.transpose()*b, eta));
|
||||
Matrix A;
|
||||
Vector b;
|
||||
boost::tie(A, b) = gfg.jacobian();
|
||||
Matrix AtA;
|
||||
Vector eta;
|
||||
boost::tie(AtA, eta) = gfg.hessian();
|
||||
EXPECT(assert_equal(A.transpose() * A, AtA));
|
||||
EXPECT(assert_equal(A.transpose() * b, eta));
|
||||
Matrix expectedAtA(6, 6);
|
||||
expectedAtA << 125, 0, -25, 0, -100, 0, //
|
||||
0, 125, 0, -25, 0, -100, //
|
||||
-25, 0, 50, 0, -25, 0, //
|
||||
0, -25, 0, 50, 0, -25, //
|
||||
-100, 0, -25, 0, 225, 0, //
|
||||
0, -100, 0, -25, 0, 225;
|
||||
EXPECT(assert_equal(expectedAtA, AtA));
|
||||
}
|
||||
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianFactorGraph, multiplyHessianAdd )
|
||||
{
|
||||
TEST(GaussianFactorGraph, multiplyHessianAdd) {
|
||||
GaussianFactorGraph gfg = createSimpleGaussianFactorGraph();
|
||||
|
||||
VectorValues x = map_list_of<Key, Vector>
|
||||
(0, Vector2(1,2))
|
||||
(1, Vector2(3,4))
|
||||
(2, Vector2(5,6));
|
||||
VectorValues x = map_list_of<Key, Vector>(0, Vector2(1, 2))(1, Vector2(3, 4))(2, Vector2(5, 6));
|
||||
|
||||
VectorValues expected;
|
||||
expected.insert(0, Vector2(-450, -450));
|
||||
|
@ -274,7 +276,7 @@ TEST( GaussianFactorGraph, multiplyHessianAdd )
|
|||
|
||||
// now, do it with non-zero y
|
||||
gfg.multiplyHessianAdd(1.0, x, actual);
|
||||
EXPECT(assert_equal(2*expected, actual));
|
||||
EXPECT(assert_equal(2 * expected, actual));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -286,20 +288,20 @@ static GaussianFactorGraph createGaussianFactorGraphWithHessianFactor() {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianFactorGraph, multiplyHessianAdd2 )
|
||||
{
|
||||
TEST(GaussianFactorGraph, multiplyHessianAdd2) {
|
||||
GaussianFactorGraph gfg = createGaussianFactorGraphWithHessianFactor();
|
||||
|
||||
// brute force
|
||||
Matrix AtA; Vector eta; boost::tie(AtA,eta) = gfg.hessian();
|
||||
Vector X(6); X<<1,2,3,4,5,6;
|
||||
Vector Y(6); Y<<-450, -450, 300, 400, 2950, 3450;
|
||||
EXPECT(assert_equal(Y,AtA*X));
|
||||
Matrix AtA;
|
||||
Vector eta;
|
||||
boost::tie(AtA, eta) = gfg.hessian();
|
||||
Vector X(6);
|
||||
X << 1, 2, 3, 4, 5, 6;
|
||||
Vector Y(6);
|
||||
Y << -450, -450, 300, 400, 2950, 3450;
|
||||
EXPECT(assert_equal(Y, AtA * X));
|
||||
|
||||
VectorValues x = map_list_of<Key, Vector>
|
||||
(0, Vector2(1,2))
|
||||
(1, Vector2(3,4))
|
||||
(2, Vector2(5,6));
|
||||
VectorValues x = map_list_of<Key, Vector>(0, Vector2(1, 2))(1, Vector2(3, 4))(2, Vector2(5, 6));
|
||||
|
||||
VectorValues expected;
|
||||
expected.insert(0, Vector2(-450, -450));
|
||||
|
@ -312,24 +314,26 @@ TEST( GaussianFactorGraph, multiplyHessianAdd2 )
|
|||
|
||||
// now, do it with non-zero y
|
||||
gfg.multiplyHessianAdd(1.0, x, actual);
|
||||
EXPECT(assert_equal(2*expected, actual));
|
||||
EXPECT(assert_equal(2 * expected, actual));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianFactorGraph, matricesMixed )
|
||||
{
|
||||
TEST(GaussianFactorGraph, matricesMixed) {
|
||||
GaussianFactorGraph gfg = createGaussianFactorGraphWithHessianFactor();
|
||||
Matrix A; Vector b; boost::tie(A,b) = gfg.jacobian(); // incorrect !
|
||||
Matrix AtA; Vector eta; boost::tie(AtA,eta) = gfg.hessian(); // correct
|
||||
EXPECT(assert_equal(A.transpose()*A, AtA));
|
||||
Vector expected = - (Vector(6) << -25, 17.5, 5, -13.5, 29, 4).finished();
|
||||
Matrix A;
|
||||
Vector b;
|
||||
boost::tie(A, b) = gfg.jacobian(); // incorrect !
|
||||
Matrix AtA;
|
||||
Vector eta;
|
||||
boost::tie(AtA, eta) = gfg.hessian(); // correct
|
||||
EXPECT(assert_equal(A.transpose() * A, AtA));
|
||||
Vector expected = -(Vector(6) << -25, 17.5, 5, -13.5, 29, 4).finished();
|
||||
EXPECT(assert_equal(expected, eta));
|
||||
EXPECT(assert_equal(A.transpose()*b, eta));
|
||||
EXPECT(assert_equal(A.transpose() * b, eta));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianFactorGraph, gradientAtZero )
|
||||
{
|
||||
TEST(GaussianFactorGraph, gradientAtZero) {
|
||||
GaussianFactorGraph gfg = createGaussianFactorGraphWithHessianFactor();
|
||||
VectorValues expected;
|
||||
VectorValues actual = gfg.gradientAtZero();
|
||||
|
@ -340,29 +344,28 @@ TEST( GaussianFactorGraph, gradientAtZero )
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianFactorGraph, clone ) {
|
||||
TEST(GaussianFactorGraph, clone) {
|
||||
// 2 variables, frontal has dim=4
|
||||
VerticalBlockMatrix blockMatrix(list_of(4)(2)(1), 4);
|
||||
blockMatrix.matrix() <<
|
||||
1.0, 0.0, 2.0, 0.0, 3.0, 0.0, 0.1,
|
||||
0.0, 1.0, 0.0, 2.0, 0.0, 3.0, 0.2,
|
||||
0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.3,
|
||||
0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.4;
|
||||
blockMatrix.matrix() << 1.0, 0.0, 2.0, 0.0, 3.0, 0.0, 0.1, 0.0, 1.0, 0.0, 2.0, 0.0, 3.0, 0.2, 0.0,
|
||||
0.0, 3.0, 0.0, 4.0, 0.0, 0.3, 0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.4;
|
||||
GaussianConditional cg(list_of(1)(2), 1, blockMatrix);
|
||||
|
||||
GaussianFactorGraph init_graph = createGaussianFactorGraphWithHessianFactor();
|
||||
init_graph.push_back(GaussianFactor::shared_ptr()); /// Add null factor
|
||||
init_graph.push_back(GaussianFactor::shared_ptr()); /// Add null factor
|
||||
init_graph.push_back(GaussianConditional(cg));
|
||||
|
||||
GaussianFactorGraph exp_graph = createGaussianFactorGraphWithHessianFactor(); // Created separately
|
||||
exp_graph.push_back(GaussianFactor::shared_ptr()); /// Add null factor
|
||||
GaussianFactorGraph exp_graph =
|
||||
createGaussianFactorGraphWithHessianFactor(); // Created separately
|
||||
exp_graph.push_back(GaussianFactor::shared_ptr()); /// Add null factor
|
||||
exp_graph.push_back(GaussianConditional(cg));
|
||||
|
||||
GaussianFactorGraph actCloned = init_graph.clone();
|
||||
EXPECT(assert_equal(init_graph, actCloned)); // Same as the original version
|
||||
EXPECT(assert_equal(init_graph, actCloned)); // Same as the original version
|
||||
|
||||
// Apply an in-place change to init_graph and compare
|
||||
JacobianFactor::shared_ptr jacFactor0 = boost::dynamic_pointer_cast<JacobianFactor>(init_graph.at(0));
|
||||
JacobianFactor::shared_ptr jacFactor0 =
|
||||
boost::dynamic_pointer_cast<JacobianFactor>(init_graph.at(0));
|
||||
CHECK(jacFactor0);
|
||||
jacFactor0->getA(jacFactor0->begin()) *= 7.;
|
||||
EXPECT(assert_inequal(init_graph, exp_graph));
|
||||
|
@ -370,9 +373,9 @@ TEST( GaussianFactorGraph, clone ) {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianFactorGraph, negate ) {
|
||||
TEST(GaussianFactorGraph, negate) {
|
||||
GaussianFactorGraph init_graph = createGaussianFactorGraphWithHessianFactor();
|
||||
init_graph.push_back(GaussianFactor::shared_ptr()); /// Add null factor
|
||||
init_graph.push_back(GaussianFactor::shared_ptr()); /// Add null factor
|
||||
GaussianFactorGraph actNegation = init_graph.negate();
|
||||
GaussianFactorGraph expNegation;
|
||||
expNegation.push_back(init_graph.at(0)->negate());
|
||||
|
@ -385,8 +388,7 @@ TEST( GaussianFactorGraph, negate ) {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( GaussianFactorGraph, hessianDiagonal )
|
||||
{
|
||||
TEST(GaussianFactorGraph, hessianDiagonal) {
|
||||
GaussianFactorGraph gfg = createGaussianFactorGraphWithHessianFactor();
|
||||
VectorValues expected;
|
||||
Matrix infoMatrix = gfg.hessian().first;
|
||||
|
@ -399,6 +401,16 @@ TEST( GaussianFactorGraph, hessianDiagonal )
|
|||
EXPECT(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
TEST(GaussianFactorGraph, DenseSolve) {
|
||||
GaussianFactorGraph fg = createSimpleGaussianFactorGraph();
|
||||
VectorValues expected = fg.optimize();
|
||||
VectorValues actual = fg.optimizeDensely();
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr);}
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -53,8 +53,8 @@ TEST(HessianFactor, emptyConstructor)
|
|||
{
|
||||
HessianFactor f;
|
||||
DOUBLES_EQUAL(0.0, f.constantTerm(), 1e-9); // Constant term should be zero
|
||||
EXPECT(assert_equal(Vector(), f.linearTerm())); // Linear term should be empty
|
||||
EXPECT(assert_equal((Matrix) Z_1x1, f.info())); // Full matrix should be 1-by-1 zero matrix
|
||||
//EXPECT(assert_equal(Vector(), f.linearTerm())); // Linear term should be empty
|
||||
EXPECT(assert_equal((Matrix) Z_1x1, f.info().selfadjointView())); // Full matrix should be 1-by-1 zero matrix
|
||||
DOUBLES_EQUAL(0.0, f.error(VectorValues()), 1e-9); // Should have zero error
|
||||
}
|
||||
|
||||
|
@ -103,7 +103,7 @@ TEST(HessianFactor, Constructor1)
|
|||
HessianFactor factor(0, G, g, f);
|
||||
|
||||
// extract underlying parts
|
||||
EXPECT(assert_equal(G, Matrix(factor.info(factor.begin(), factor.begin()))));
|
||||
EXPECT(assert_equal(G, Matrix(factor.info().diagonalBlock(0))));
|
||||
EXPECT_DOUBLES_EQUAL(f, factor.constantTerm(), 1e-10);
|
||||
EXPECT(assert_equal(g, Vector(factor.linearTerm())));
|
||||
EXPECT_LONGS_EQUAL(1, (long)factor.size());
|
||||
|
@ -118,7 +118,6 @@ TEST(HessianFactor, Constructor1)
|
|||
EXPECT_DOUBLES_EQUAL(expected, actual, 1e-10);
|
||||
}
|
||||
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(HessianFactor, Constructor1b)
|
||||
{
|
||||
|
@ -132,7 +131,7 @@ TEST(HessianFactor, Constructor1b)
|
|||
double f = dot(g,mu);
|
||||
|
||||
// Check
|
||||
EXPECT(assert_equal(G, Matrix(factor.info(factor.begin(), factor.begin()))));
|
||||
EXPECT(assert_equal(G, Matrix(factor.info().diagonalBlock(0))));
|
||||
EXPECT_DOUBLES_EQUAL(f, factor.constantTerm(), 1e-10);
|
||||
EXPECT(assert_equal(g, Vector(factor.linearTerm())));
|
||||
EXPECT_LONGS_EQUAL(1, (long)factor.size());
|
||||
|
@ -167,9 +166,9 @@ TEST(HessianFactor, Constructor2)
|
|||
Vector linearExpected(3); linearExpected << g1, g2;
|
||||
EXPECT(assert_equal(linearExpected, factor.linearTerm()));
|
||||
|
||||
EXPECT(assert_equal(G11, factor.info(factor.begin(), factor.begin())));
|
||||
EXPECT(assert_equal(G12, factor.info(factor.begin(), factor.begin()+1)));
|
||||
EXPECT(assert_equal(G22, factor.info(factor.begin()+1, factor.begin()+1)));
|
||||
EXPECT(assert_equal(G11, factor.info().diagonalBlock(0)));
|
||||
EXPECT(assert_equal(G12, factor.info().aboveDiagonalBlock(0, 1)));
|
||||
EXPECT(assert_equal(G22, factor.info().diagonalBlock(1)));
|
||||
|
||||
// Check case when vector values is larger than factor
|
||||
VectorValues dxLarge = pair_list_of<Key, Vector>
|
||||
|
@ -218,12 +217,12 @@ TEST(HessianFactor, Constructor3)
|
|||
Vector linearExpected(6); linearExpected << g1, g2, g3;
|
||||
EXPECT(assert_equal(linearExpected, factor.linearTerm()));
|
||||
|
||||
EXPECT(assert_equal(G11, factor.info(factor.begin()+0, factor.begin()+0)));
|
||||
EXPECT(assert_equal(G12, factor.info(factor.begin()+0, factor.begin()+1)));
|
||||
EXPECT(assert_equal(G13, factor.info(factor.begin()+0, factor.begin()+2)));
|
||||
EXPECT(assert_equal(G22, factor.info(factor.begin()+1, factor.begin()+1)));
|
||||
EXPECT(assert_equal(G23, factor.info(factor.begin()+1, factor.begin()+2)));
|
||||
EXPECT(assert_equal(G33, factor.info(factor.begin()+2, factor.begin()+2)));
|
||||
EXPECT(assert_equal(G11, factor.info().diagonalBlock(0)));
|
||||
EXPECT(assert_equal(G12, factor.info().aboveDiagonalBlock(0, 1)));
|
||||
EXPECT(assert_equal(G13, factor.info().aboveDiagonalBlock(0, 2)));
|
||||
EXPECT(assert_equal(G22, factor.info().diagonalBlock(1)));
|
||||
EXPECT(assert_equal(G23, factor.info().aboveDiagonalBlock(1, 2)));
|
||||
EXPECT(assert_equal(G33, factor.info().diagonalBlock(2)));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -271,12 +270,12 @@ TEST(HessianFactor, ConstructorNWay)
|
|||
Vector linearExpected(6); linearExpected << g1, g2, g3;
|
||||
EXPECT(assert_equal(linearExpected, factor.linearTerm()));
|
||||
|
||||
EXPECT(assert_equal(G11, factor.info(factor.begin()+0, factor.begin()+0)));
|
||||
EXPECT(assert_equal(G12, factor.info(factor.begin()+0, factor.begin()+1)));
|
||||
EXPECT(assert_equal(G13, factor.info(factor.begin()+0, factor.begin()+2)));
|
||||
EXPECT(assert_equal(G22, factor.info(factor.begin()+1, factor.begin()+1)));
|
||||
EXPECT(assert_equal(G23, factor.info(factor.begin()+1, factor.begin()+2)));
|
||||
EXPECT(assert_equal(G33, factor.info(factor.begin()+2, factor.begin()+2)));
|
||||
EXPECT(assert_equal(G11, factor.info().diagonalBlock(0)));
|
||||
EXPECT(assert_equal(G12, factor.info().aboveDiagonalBlock(0, 1)));
|
||||
EXPECT(assert_equal(G13, factor.info().aboveDiagonalBlock(0, 2)));
|
||||
EXPECT(assert_equal(G22, factor.info().diagonalBlock(1)));
|
||||
EXPECT(assert_equal(G23, factor.info().aboveDiagonalBlock(1, 2)));
|
||||
EXPECT(assert_equal(G33, factor.info().diagonalBlock(2)));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -499,7 +498,7 @@ TEST(HessianFactor, combine) {
|
|||
-100.0000, 0.0, 20.0000, 0.0, 80.0000, 0.0, -20.0000,
|
||||
0.0, -100.0000, 0.0, 20.0000, 0.0, 80.0000, 14.0000,
|
||||
25.0000, -17.5000, -5.0000, 3.5000, -20.0000, 14.0000, 7.4500).finished();
|
||||
EXPECT(assert_equal(expected, Matrix(actual.matrixObject().full()), tol));
|
||||
EXPECT(assert_equal(expected, Matrix(actual.info().selfadjointView()), tol));
|
||||
|
||||
}
|
||||
|
||||
|
@ -575,6 +574,23 @@ TEST(HessianFactor, hessianDiagonal)
|
|||
EXPECT(assert_equal(G22,actualBD[1]));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(HessianFactor, Solve)
|
||||
{
|
||||
Matrix2 A;
|
||||
A << 1, 2, 3, 4;
|
||||
Matrix2 G = A.transpose() * A;
|
||||
Vector2 b(5, 6);
|
||||
Vector2 g = A.transpose() * b;
|
||||
double f = 0;
|
||||
Key key(55);
|
||||
HessianFactor factor(key, G, g, f);
|
||||
|
||||
VectorValues expected;
|
||||
expected.insert(key, A.inverse() * b);
|
||||
EXPECT(assert_equal(expected, factor.solve()));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr);}
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -448,6 +448,15 @@ TEST(NoiseModel, WhitenInPlace)
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
||||
/*
|
||||
* These tests are responsible for testing the weight functions for the m-estimators in GTSAM.
|
||||
* The weight function is related to the analytic derivative of the residual function. See
|
||||
* http://research.microsoft.com/en-us/um/people/zhang/INRIA/Publis/Tutorial-Estim/node24.html
|
||||
* for details. This weight function is required when optimizing cost functions with robust
|
||||
* penalties using iteratively re-weighted least squares.
|
||||
*/
|
||||
|
||||
TEST(NoiseModel, robustFunctionHuber)
|
||||
{
|
||||
const double k = 5.0, error1 = 1.0, error2 = 10.0;
|
||||
|
@ -478,6 +487,26 @@ TEST(NoiseModel, robustFunctionDCS)
|
|||
DOUBLES_EQUAL(0.00039211, weight2, 1e-8);
|
||||
}
|
||||
|
||||
TEST(NoiseModel, robustFunctionL2WithDeadZone)
|
||||
{
|
||||
const double k = 1.0, e0 = -10.0, e1 = -1.01, e2 = -0.99, e3 = 0.99, e4 = 1.01, e5 = 10.0;
|
||||
const mEstimator::L2WithDeadZone::shared_ptr lsdz = mEstimator::L2WithDeadZone::Create(k);
|
||||
|
||||
DOUBLES_EQUAL(0.9, lsdz->weight(e0), 1e-8);
|
||||
DOUBLES_EQUAL(0.00990099009, lsdz->weight(e1), 1e-8);
|
||||
DOUBLES_EQUAL(0.0, lsdz->weight(e2), 1e-8);
|
||||
DOUBLES_EQUAL(0.0, lsdz->weight(e3), 1e-8);
|
||||
DOUBLES_EQUAL(0.00990099009, lsdz->weight(e4), 1e-8);
|
||||
DOUBLES_EQUAL(0.9, lsdz->weight(e5), 1e-8);
|
||||
|
||||
DOUBLES_EQUAL(40.5, lsdz->residual(e0), 1e-8);
|
||||
DOUBLES_EQUAL(0.00005, lsdz->residual(e1), 1e-8);
|
||||
DOUBLES_EQUAL(0.0, lsdz->residual(e2), 1e-8);
|
||||
DOUBLES_EQUAL(0.0, lsdz->residual(e3), 1e-8);
|
||||
DOUBLES_EQUAL(0.00005, lsdz->residual(e4), 1e-8);
|
||||
DOUBLES_EQUAL(40.5, lsdz->residual(e5), 1e-8);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(NoiseModel, robustNoiseHuber)
|
||||
{
|
||||
|
@ -550,6 +579,31 @@ TEST(NoiseModel, robustNoiseDCS)
|
|||
DOUBLES_EQUAL(sqrt_weight*a11, A(1,1), 1e-8);
|
||||
}
|
||||
|
||||
TEST(NoiseModel, robustNoiseL2WithDeadZone)
|
||||
{
|
||||
double dead_zone_size = 1.0;
|
||||
SharedNoiseModel robust = noiseModel::Robust::Create(
|
||||
noiseModel::mEstimator::L2WithDeadZone::Create(dead_zone_size),
|
||||
Unit::Create(3));
|
||||
|
||||
/*
|
||||
* TODO(mike): There is currently a bug in GTSAM, where none of the mEstimator classes
|
||||
* implement a residual function, and GTSAM calls the weight function to evaluate the
|
||||
* total penalty, rather than calling the residual function. The weight function should be
|
||||
* used during iteratively reweighted least squares optimization, but should not be used to
|
||||
* evaluate the total penalty. The long-term solution is for all mEstimators to implement
|
||||
* both a weight and a residual function, and for GTSAM to call the residual function when
|
||||
* evaluating the total penalty. This bug causes the test below to fail, so I'm leaving it
|
||||
* commented out until the underlying bug in GTSAM is fixed.
|
||||
*
|
||||
* for (int i = 0; i < 5; i++) {
|
||||
* Vector3 error = Vector3(i, 0, 0);
|
||||
* DOUBLES_EQUAL(0.5*max(0,i-1)*max(0,i-1), robust->distance(error), 1e-8);
|
||||
* }
|
||||
*/
|
||||
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
#define TEST_GAUSSIAN(gaussian)\
|
||||
EQUALITY(info, gaussian->information());\
|
||||
|
|
|
@ -128,9 +128,8 @@ TEST(RegularHessianFactor, Constructors)
|
|||
EXPECT(assert_equal(2*expected_y, fast_y));
|
||||
|
||||
// check some expressions
|
||||
EXPECT(assert_equal(G12,factor.info(i1,i2).knownOffDiagonal()));
|
||||
EXPECT(assert_equal(G22,factor.info(i2,i2).selfadjointView()));
|
||||
EXPECT(assert_equal((Matrix)G12.transpose(),factor.info(i2,i1).knownOffDiagonal()));
|
||||
EXPECT(assert_equal(G12,factor.info().aboveDiagonalBlock(i1 - factor.begin(), i2 - factor.begin())));
|
||||
EXPECT(assert_equal(G22,factor.info().diagonalBlock(i2 - factor.begin())));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
@ -188,6 +188,14 @@ TEST(VectorValues, convert)
|
|||
VectorValues actual(x,dims);
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
|
||||
Scatter scatter;
|
||||
scatter.emplace_back(0,1);
|
||||
scatter.emplace_back(1,2);
|
||||
scatter.emplace_back(2,2);
|
||||
scatter.emplace_back(5,2);
|
||||
VectorValues actual2(x,scatter);
|
||||
EXPECT(assert_equal(expected, actual2));
|
||||
|
||||
// Test other direction, note vector() is not guaranteed to give right result
|
||||
FastVector<Key> keys = list_of(0)(1)(2)(5);
|
||||
EXPECT(assert_equal(x, actual.vector(keys)));
|
||||
|
|
|
@ -96,8 +96,9 @@ void PreintegratedImuMeasurements::integrateMeasurements(
|
|||
void PreintegratedImuMeasurements::mergeWith(const PreintegratedImuMeasurements& pim12, //
|
||||
Matrix9* H1, Matrix9* H2) {
|
||||
PreintegrationType::mergeWith(pim12, H1, H2);
|
||||
preintMeasCov_ =
|
||||
*H1 * preintMeasCov_ * H1->transpose() + *H2 * pim12.preintMeasCov_ * H2->transpose();
|
||||
// NOTE(gareth): Temporary P is needed as of Eigen 3.3
|
||||
const Matrix9 P = *H1 * preintMeasCov_ * H1->transpose();
|
||||
preintMeasCov_ = P + *H2 * pim12.preintMeasCov_ * H2->transpose();
|
||||
}
|
||||
#endif
|
||||
//------------------------------------------------------------------------------
|
||||
|
|
|
@ -75,7 +75,7 @@ public:
|
|||
/// @{
|
||||
NavState deltaXij() const override { return deltaXij_; }
|
||||
Rot3 deltaRij() const override { return deltaXij_.attitude(); }
|
||||
Vector3 deltaPij() const override { return deltaXij_.position().vector(); }
|
||||
Vector3 deltaPij() const override { return deltaXij_.position(); }
|
||||
Vector3 deltaVij() const override { return deltaXij_.velocity(); }
|
||||
|
||||
Matrix3 delRdelBiasOmega() const { return delRdelBiasOmega_; }
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue