From c0c2564ac629e6d6919d2759421fd5682a74c62e Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Mon, 27 Jul 2020 09:29:28 -0400 Subject: [PATCH 001/142] Copy cython files --- python/gtsam_py/python/__init__.py | 26 ++ .../python/examples/DogLegOptimizerExample.py | 118 ++++++ .../python/examples/GPSFactorExample.py | 56 +++ .../python/examples/ImuFactorExample.py | 153 ++++++++ .../python/examples/ImuFactorExample2.py | 172 ++++++++ .../python/examples/OdometryExample.py | 69 ++++ .../examples/PlanarManipulatorExample.py | 334 ++++++++++++++++ .../python/examples/PlanarSLAMExample.py | 81 ++++ .../python/examples/Pose2SLAMExample.py | 97 +++++ .../python/examples/Pose2SLAMExample_g2o.py | 88 +++++ .../python/examples/Pose3SLAMExample_g2o.py | 71 ++++ ...Pose3SLAMExample_initializePose3Chordal.py | 35 ++ .../python/examples/PreintegrationExample.py | 140 +++++++ python/gtsam_py/python/examples/README.md | 57 +++ python/gtsam_py/python/examples/SFMExample.py | 121 ++++++ python/gtsam_py/python/examples/SFMdata.py | 39 ++ .../python/examples/SimpleRotation.py | 85 ++++ .../python/examples/VisualISAM2Example.py | 154 ++++++++ .../python/examples/VisualISAMExample.py | 103 +++++ python/gtsam_py/python/examples/__init__.py | 0 python/gtsam_py/python/tests/__init__.py | 0 .../python/tests/testScenarioRunner.py | 47 +++ .../gtsam_py/python/tests/test_Cal3Unified.py | 38 ++ .../python/tests/test_FrobeniusFactor.py | 56 +++ .../python/tests/test_GaussianFactorGraph.py | 92 +++++ .../python/tests/test_JacobianFactor.py | 92 +++++ .../python/tests/test_KalmanFilter.py | 83 ++++ .../python/tests/test_KarcherMeanFactor.py | 80 ++++ .../python/tests/test_LocalizationExample.py | 64 +++ .../python/tests/test_NonlinearOptimizer.py | 83 ++++ .../python/tests/test_OdometryExample.py | 59 +++ .../python/tests/test_PlanarSLAMExample.py | 78 ++++ python/gtsam_py/python/tests/test_Pose2.py | 32 ++ .../python/tests/test_Pose2SLAMExample.py | 76 ++++ python/gtsam_py/python/tests/test_Pose3.py | 70 ++++ .../python/tests/test_Pose3SLAMExample.py | 60 +++ .../gtsam_py/python/tests/test_PriorFactor.py | 61 +++ .../gtsam_py/python/tests/test_SFMExample.py | 82 ++++ python/gtsam_py/python/tests/test_SO4.py | 59 +++ python/gtsam_py/python/tests/test_SOn.py | 59 +++ python/gtsam_py/python/tests/test_Scenario.py | 53 +++ .../python/tests/test_SimpleCamera.py | 45 +++ .../python/tests/test_StereoVOExample.py | 82 ++++ python/gtsam_py/python/tests/test_Values.py | 86 ++++ .../python/tests/test_VisualISAMExample.py | 57 +++ python/gtsam_py/python/tests/test_dataset.py | 45 +++ python/gtsam_py/python/tests/test_dsf_map.py | 40 ++ .../python/tests/test_initialize_pose3.py | 89 +++++ .../python/tests/test_logging_optimizer.py | 108 +++++ python/gtsam_py/python/utils/__init__.py | 0 python/gtsam_py/python/utils/circlePose3.py | 38 ++ .../python/utils/logging_optimizer.py | 52 +++ python/gtsam_py/python/utils/plot.py | 369 ++++++++++++++++++ python/gtsam_py/python/utils/test_case.py | 27 ++ .../python/utils/visual_data_generator.py | 118 ++++++ python/gtsam_py/python/utils/visual_isam.py | 131 +++++++ 56 files changed, 4610 insertions(+) create mode 100644 python/gtsam_py/python/__init__.py create mode 100644 python/gtsam_py/python/examples/DogLegOptimizerExample.py create mode 100644 python/gtsam_py/python/examples/GPSFactorExample.py create mode 100644 python/gtsam_py/python/examples/ImuFactorExample.py create mode 100644 python/gtsam_py/python/examples/ImuFactorExample2.py create mode 100644 python/gtsam_py/python/examples/OdometryExample.py create mode 100644 python/gtsam_py/python/examples/PlanarManipulatorExample.py create mode 100644 python/gtsam_py/python/examples/PlanarSLAMExample.py create mode 100644 python/gtsam_py/python/examples/Pose2SLAMExample.py create mode 100644 python/gtsam_py/python/examples/Pose2SLAMExample_g2o.py create mode 100644 python/gtsam_py/python/examples/Pose3SLAMExample_g2o.py create mode 100644 python/gtsam_py/python/examples/Pose3SLAMExample_initializePose3Chordal.py create mode 100644 python/gtsam_py/python/examples/PreintegrationExample.py create mode 100644 python/gtsam_py/python/examples/README.md create mode 100644 python/gtsam_py/python/examples/SFMExample.py create mode 100644 python/gtsam_py/python/examples/SFMdata.py create mode 100644 python/gtsam_py/python/examples/SimpleRotation.py create mode 100644 python/gtsam_py/python/examples/VisualISAM2Example.py create mode 100644 python/gtsam_py/python/examples/VisualISAMExample.py create mode 100644 python/gtsam_py/python/examples/__init__.py create mode 100644 python/gtsam_py/python/tests/__init__.py create mode 100644 python/gtsam_py/python/tests/testScenarioRunner.py create mode 100644 python/gtsam_py/python/tests/test_Cal3Unified.py create mode 100644 python/gtsam_py/python/tests/test_FrobeniusFactor.py create mode 100644 python/gtsam_py/python/tests/test_GaussianFactorGraph.py create mode 100644 python/gtsam_py/python/tests/test_JacobianFactor.py create mode 100644 python/gtsam_py/python/tests/test_KalmanFilter.py create mode 100644 python/gtsam_py/python/tests/test_KarcherMeanFactor.py create mode 100644 python/gtsam_py/python/tests/test_LocalizationExample.py create mode 100644 python/gtsam_py/python/tests/test_NonlinearOptimizer.py create mode 100644 python/gtsam_py/python/tests/test_OdometryExample.py create mode 100644 python/gtsam_py/python/tests/test_PlanarSLAMExample.py create mode 100644 python/gtsam_py/python/tests/test_Pose2.py create mode 100644 python/gtsam_py/python/tests/test_Pose2SLAMExample.py create mode 100644 python/gtsam_py/python/tests/test_Pose3.py create mode 100644 python/gtsam_py/python/tests/test_Pose3SLAMExample.py create mode 100644 python/gtsam_py/python/tests/test_PriorFactor.py create mode 100644 python/gtsam_py/python/tests/test_SFMExample.py create mode 100644 python/gtsam_py/python/tests/test_SO4.py create mode 100644 python/gtsam_py/python/tests/test_SOn.py create mode 100644 python/gtsam_py/python/tests/test_Scenario.py create mode 100644 python/gtsam_py/python/tests/test_SimpleCamera.py create mode 100644 python/gtsam_py/python/tests/test_StereoVOExample.py create mode 100644 python/gtsam_py/python/tests/test_Values.py create mode 100644 python/gtsam_py/python/tests/test_VisualISAMExample.py create mode 100644 python/gtsam_py/python/tests/test_dataset.py create mode 100644 python/gtsam_py/python/tests/test_dsf_map.py create mode 100644 python/gtsam_py/python/tests/test_initialize_pose3.py create mode 100644 python/gtsam_py/python/tests/test_logging_optimizer.py create mode 100644 python/gtsam_py/python/utils/__init__.py create mode 100644 python/gtsam_py/python/utils/circlePose3.py create mode 100644 python/gtsam_py/python/utils/logging_optimizer.py create mode 100644 python/gtsam_py/python/utils/plot.py create mode 100644 python/gtsam_py/python/utils/test_case.py create mode 100644 python/gtsam_py/python/utils/visual_data_generator.py create mode 100644 python/gtsam_py/python/utils/visual_isam.py diff --git a/python/gtsam_py/python/__init__.py b/python/gtsam_py/python/__init__.py new file mode 100644 index 000000000..d40ee4502 --- /dev/null +++ b/python/gtsam_py/python/__init__.py @@ -0,0 +1,26 @@ +from .gtsam import * + +try: + import gtsam_unstable + + + def _deprecated_wrapper(item, name): + def wrapper(*args, **kwargs): + from warnings import warn + message = ('importing the unstable item "{}" directly from gtsam is deprecated. '.format(name) + + 'Please import it from gtsam_unstable.') + warn(message) + return item(*args, **kwargs) + return wrapper + + + for name in dir(gtsam_unstable): + if not name.startswith('__'): + item = getattr(gtsam_unstable, name) + if callable(item): + item = _deprecated_wrapper(item, name) + globals()[name] = item + +except ImportError: + pass + diff --git a/python/gtsam_py/python/examples/DogLegOptimizerExample.py b/python/gtsam_py/python/examples/DogLegOptimizerExample.py new file mode 100644 index 000000000..776ceedc4 --- /dev/null +++ b/python/gtsam_py/python/examples/DogLegOptimizerExample.py @@ -0,0 +1,118 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Example comparing DoglegOptimizer with Levenberg-Marquardt. +Author: Frank Dellaert +""" +# pylint: disable=no-member, invalid-name + +import math +import argparse + +import gtsam +import matplotlib.pyplot as plt +import numpy as np + + +def run(args): + """Test Dogleg vs LM, inspired by issue #452.""" + + # print parameters + print("num samples = {}, deltaInitial = {}".format( + args.num_samples, args.delta)) + + # Ground truth solution + T11 = gtsam.Pose2(0, 0, 0) + T12 = gtsam.Pose2(1, 0, 0) + T21 = gtsam.Pose2(0, 1, 0) + T22 = gtsam.Pose2(1, 1, 0) + + # Factor graph + graph = gtsam.NonlinearFactorGraph() + + # Priors + prior = gtsam.noiseModel_Isotropic.Sigma(3, 1) + graph.add(gtsam.PriorFactorPose2(11, T11, prior)) + graph.add(gtsam.PriorFactorPose2(21, T21, prior)) + + # Odometry + model = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.01, 0.01, 0.3])) + graph.add(gtsam.BetweenFactorPose2(11, 12, T11.between(T12), model)) + graph.add(gtsam.BetweenFactorPose2(21, 22, T21.between(T22), model)) + + # Range + model_rho = gtsam.noiseModel_Isotropic.Sigma(1, 0.01) + graph.add(gtsam.RangeFactorPose2(12, 22, 1.0, model_rho)) + + params = gtsam.DoglegParams() + params.setDeltaInitial(args.delta) # default is 10 + + # Add progressively more noise to ground truth + sigmas = [0.01, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20] + n = len(sigmas) + p_dl, s_dl, p_lm, s_lm = [0]*n, [0]*n, [0]*n, [0]*n + for i, sigma in enumerate(sigmas): + dl_fails, lm_fails = 0, 0 + # Attempt num_samples optimizations for both DL and LM + for _attempt in range(args.num_samples): + initial = gtsam.Values() + initial.insert(11, T11.retract(np.random.normal(0, sigma, 3))) + initial.insert(12, T12.retract(np.random.normal(0, sigma, 3))) + initial.insert(21, T21.retract(np.random.normal(0, sigma, 3))) + initial.insert(22, T22.retract(np.random.normal(0, sigma, 3))) + + # Run dogleg optimizer + dl = gtsam.DoglegOptimizer(graph, initial, params) + result = dl.optimize() + dl_fails += graph.error(result) > 1e-9 + + # Run + lm = gtsam.LevenbergMarquardtOptimizer(graph, initial) + result = lm.optimize() + lm_fails += graph.error(result) > 1e-9 + + # Calculate Bayes estimate of success probability + # using a beta prior of alpha=0.5, beta=0.5 + alpha, beta = 0.5, 0.5 + v = args.num_samples+alpha+beta + p_dl[i] = (args.num_samples-dl_fails+alpha)/v + p_lm[i] = (args.num_samples-lm_fails+alpha)/v + + def stddev(p): + """Calculate standard deviation.""" + return math.sqrt(p*(1-p)/(1+v)) + + s_dl[i] = stddev(p_dl[i]) + s_lm[i] = stddev(p_lm[i]) + + fmt = "sigma= {}:\tDL success {:.2f}% +/- {:.2f}%, LM success {:.2f}% +/- {:.2f}%" + print(fmt.format(sigma, + 100*p_dl[i], 100*s_dl[i], + 100*p_lm[i], 100*s_lm[i])) + + if args.plot: + fig, ax = plt.subplots() + dl_plot = plt.errorbar(sigmas, p_dl, yerr=s_dl, label="Dogleg") + lm_plot = plt.errorbar(sigmas, p_lm, yerr=s_lm, label="LM") + plt.title("Dogleg emprical success vs. LM") + plt.legend(handles=[dl_plot, lm_plot]) + ax.set_xlim(0, sigmas[-1]+1) + ax.set_ylim(0, 1) + plt.show() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Compare Dogleg and LM success rates") + parser.add_argument("-n", "--num_samples", type=int, default=1000, + help="Number of samples for each sigma") + parser.add_argument("-d", "--delta", type=float, default=10.0, + help="Initial delta for dogleg") + parser.add_argument("-p", "--plot", action="store_true", + help="Flag to plot results") + args = parser.parse_args() + run(args) diff --git a/python/gtsam_py/python/examples/GPSFactorExample.py b/python/gtsam_py/python/examples/GPSFactorExample.py new file mode 100644 index 000000000..493a07725 --- /dev/null +++ b/python/gtsam_py/python/examples/GPSFactorExample.py @@ -0,0 +1,56 @@ +""" +GTSAM Copyright 2010-2018, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved +Authors: Frank Dellaert, et al. (see THANKS for the full author list) + +See LICENSE for the license information + +Simple robot motion example, with prior and one GPS measurements +Author: Mandy Xie +""" +# pylint: disable=invalid-name, E1101 + +from __future__ import print_function + +import numpy as np + +import gtsam + +import matplotlib.pyplot as plt +import gtsam.utils.plot as gtsam_plot + +# ENU Origin is where the plane was in hold next to runway +lat0 = 33.86998 +lon0 = -84.30626 +h0 = 274 + +# Create noise models +GPS_NOISE = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) +PRIOR_NOISE = gtsam.noiseModel_Isotropic.Sigma(6, 0.25) + +# Create an empty nonlinear factor graph +graph = gtsam.NonlinearFactorGraph() + +# Add a prior on the first point, setting it to the origin +# A prior factor consists of a mean and a noise model (covariance matrix) +priorMean = gtsam.Pose3() # prior at origin +graph.add(gtsam.PriorFactorPose3(1, priorMean, PRIOR_NOISE)) + +# Add GPS factors +gps = gtsam.Point3(lat0, lon0, h0) +graph.add(gtsam.GPSFactor(1, gps, GPS_NOISE)) +print("\nFactor Graph:\n{}".format(graph)) + +# Create the data structure to hold the initialEstimate estimate to the solution +# For illustrative purposes, these have been deliberately set to incorrect values +initial = gtsam.Values() +initial.insert(1, gtsam.Pose3()) +print("\nInitial Estimate:\n{}".format(initial)) + +# optimize using Levenberg-Marquardt optimization +params = gtsam.LevenbergMarquardtParams() +optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params) +result = optimizer.optimize() +print("\nFinal Result:\n{}".format(result)) + diff --git a/python/gtsam_py/python/examples/ImuFactorExample.py b/python/gtsam_py/python/examples/ImuFactorExample.py new file mode 100644 index 000000000..0e01766e7 --- /dev/null +++ b/python/gtsam_py/python/examples/ImuFactorExample.py @@ -0,0 +1,153 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +A script validating and demonstrating the ImuFactor inference. + +Author: Frank Dellaert, Varun Agrawal +""" + +from __future__ import print_function + +import math + +import gtsam +import matplotlib.pyplot as plt +import numpy as np +from gtsam import symbol_shorthand_B as B +from gtsam import symbol_shorthand_V as V +from gtsam import symbol_shorthand_X as X +from gtsam.utils.plot import plot_pose3 +from mpl_toolkits.mplot3d import Axes3D + +from PreintegrationExample import POSES_FIG, PreintegrationExample + +BIAS_KEY = B(0) + + +np.set_printoptions(precision=3, suppress=True) + + +class ImuFactorExample(PreintegrationExample): + + def __init__(self): + self.velocity = np.array([2, 0, 0]) + self.priorNoise = gtsam.noiseModel_Isotropic.Sigma(6, 0.1) + self.velNoise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) + + # Choose one of these twists to change scenario: + zero_twist = (np.zeros(3), np.zeros(3)) + forward_twist = (np.zeros(3), self.velocity) + loop_twist = (np.array([0, -math.radians(30), 0]), self.velocity) + sick_twist = ( + np.array([math.radians(30), -math.radians(30), 0]), self.velocity) + + accBias = np.array([-0.3, 0.1, 0.2]) + gyroBias = np.array([0.1, 0.3, -0.1]) + bias = gtsam.imuBias_ConstantBias(accBias, gyroBias) + + dt = 1e-2 + super(ImuFactorExample, self).__init__(sick_twist, bias, dt) + + def addPrior(self, i, graph): + state = self.scenario.navState(i) + graph.push_back(gtsam.PriorFactorPose3( + X(i), state.pose(), self.priorNoise)) + graph.push_back(gtsam.PriorFactorVector( + V(i), state.velocity(), self.velNoise)) + + def run(self): + graph = gtsam.NonlinearFactorGraph() + + # initialize data structure for pre-integrated IMU measurements + pim = gtsam.PreintegratedImuMeasurements(self.params, self.actualBias) + + T = 12 + num_poses = T + 1 # assumes 1 factor per second + initial = gtsam.Values() + initial.insert(BIAS_KEY, self.actualBias) + for i in range(num_poses): + state_i = self.scenario.navState(float(i)) + + poseNoise = gtsam.Pose3.Expmap(np.random.randn(3)*0.1) + pose = state_i.pose().compose(poseNoise) + + velocity = state_i.velocity() + np.random.randn(3)*0.1 + + initial.insert(X(i), pose) + initial.insert(V(i), velocity) + + # simulate the loop + i = 0 # state index + actual_state_i = self.scenario.navState(0) + for k, t in enumerate(np.arange(0, T, self.dt)): + # get measurements and add them to PIM + measuredOmega = self.runner.measuredAngularVelocity(t) + measuredAcc = self.runner.measuredSpecificForce(t) + pim.integrateMeasurement(measuredAcc, measuredOmega, self.dt) + + poseNoise = gtsam.Pose3.Expmap(np.random.randn(3)*0.1) + + actual_state_i = gtsam.NavState( + actual_state_i.pose().compose(poseNoise), + actual_state_i.velocity() + np.random.randn(3)*0.1) + + # Plot IMU many times + if k % 10 == 0: + self.plotImu(t, measuredOmega, measuredAcc) + + # Plot every second + if k % int(1 / self.dt) == 0: + self.plotGroundTruthPose(t) + + # create IMU factor every second + if (k + 1) % int(1 / self.dt) == 0: + factor = gtsam.ImuFactor(X(i), V(i), X( + i + 1), V(i + 1), BIAS_KEY, pim) + graph.push_back(factor) + if True: + print(factor) + print(pim.predict(actual_state_i, self.actualBias)) + pim.resetIntegration() + actual_state_i = self.scenario.navState(t + self.dt) + i += 1 + + # add priors on beginning and end + self.addPrior(0, graph) + self.addPrior(num_poses - 1, graph) + + # optimize using Levenberg-Marquardt optimization + params = gtsam.LevenbergMarquardtParams() + params.setVerbosityLM("SUMMARY") + optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params) + result = optimizer.optimize() + + # Calculate and print marginal covariances + marginals = gtsam.Marginals(graph, result) + print("Covariance on bias:\n", marginals.marginalCovariance(BIAS_KEY)) + for i in range(num_poses): + print("Covariance on pose {}:\n{}\n".format( + i, marginals.marginalCovariance(X(i)))) + print("Covariance on vel {}:\n{}\n".format( + i, marginals.marginalCovariance(V(i)))) + + # Plot resulting poses + i = 0 + while result.exists(X(i)): + pose_i = result.atPose3(X(i)) + plot_pose3(POSES_FIG, pose_i, 0.1) + i += 1 + + gtsam.utils.plot.set_axes_equal(POSES_FIG) + + print(result.atimuBias_ConstantBias(BIAS_KEY)) + + plt.ioff() + plt.show() + + +if __name__ == '__main__': + ImuFactorExample().run() diff --git a/python/gtsam_py/python/examples/ImuFactorExample2.py b/python/gtsam_py/python/examples/ImuFactorExample2.py new file mode 100644 index 000000000..0d98f08fe --- /dev/null +++ b/python/gtsam_py/python/examples/ImuFactorExample2.py @@ -0,0 +1,172 @@ +""" +iSAM2 example with ImuFactor. +Author: Robert Truax (C++), Frank Dellaert (Python) +""" +# pylint: disable=invalid-name, E1101 + +from __future__ import print_function + +import math + +import gtsam +import gtsam.utils.plot as gtsam_plot +import matplotlib.pyplot as plt +import numpy as np +from gtsam import (ISAM2, BetweenFactorConstantBias, Cal3_S2, + ConstantTwistScenario, ImuFactor, NonlinearFactorGraph, + PinholeCameraCal3_S2, Point3, Pose3, + PriorFactorConstantBias, PriorFactorPose3, + PriorFactorVector, Rot3, Values) +from gtsam import symbol_shorthand_B as B +from gtsam import symbol_shorthand_V as V +from gtsam import symbol_shorthand_X as X +from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611 + + +def vector3(x, y, z): + """Create 3d double numpy array.""" + return np.array([x, y, z], dtype=np.float) + + +def ISAM2_plot(values, fignum=0): + """Plot poses.""" + fig = plt.figure(fignum) + axes = fig.gca(projection='3d') + plt.cla() + + i = 0 + min_bounds = 0, 0, 0 + max_bounds = 0, 0, 0 + while values.exists(X(i)): + pose_i = values.atPose3(X(i)) + gtsam_plot.plot_pose3(fignum, pose_i, 10) + position = pose_i.translation().vector() + min_bounds = [min(v1, v2) for (v1, v2) in zip(position, min_bounds)] + max_bounds = [max(v1, v2) for (v1, v2) in zip(position, max_bounds)] + # max_bounds = min(pose_i.x(), max_bounds[0]), 0, 0 + i += 1 + + # draw + axes.set_xlim3d(min_bounds[0]-1, max_bounds[0]+1) + axes.set_ylim3d(min_bounds[1]-1, max_bounds[1]+1) + axes.set_zlim3d(min_bounds[2]-1, max_bounds[2]+1) + plt.pause(1) + + +# IMU preintegration parameters +# Default Params for a Z-up navigation frame, such as ENU: gravity points along negative Z-axis +g = 9.81 +n_gravity = vector3(0, 0, -g) +PARAMS = gtsam.PreintegrationParams.MakeSharedU(g) +I = np.eye(3) +PARAMS.setAccelerometerCovariance(I * 0.1) +PARAMS.setGyroscopeCovariance(I * 0.1) +PARAMS.setIntegrationCovariance(I * 0.1) +PARAMS.setUse2ndOrderCoriolis(False) +PARAMS.setOmegaCoriolis(vector3(0, 0, 0)) + +BIAS_COVARIANCE = gtsam.noiseModel_Isotropic.Variance(6, 0.1) +DELTA = Pose3(Rot3.Rodrigues(0, 0, 0), + Point3(0.05, -0.10, 0.20)) + + +def IMU_example(): + """Run iSAM 2 example with IMU factor.""" + + # Start with a camera on x-axis looking at origin + radius = 30 + up = Point3(0, 0, 1) + target = Point3(0, 0, 0) + position = Point3(radius, 0, 0) + camera = PinholeCameraCal3_S2.Lookat(position, target, up, Cal3_S2()) + pose_0 = camera.pose() + + # Create the set of ground-truth landmarks and poses + angular_velocity = math.radians(180) # rad/sec + delta_t = 1.0/18 # makes for 10 degrees per step + + angular_velocity_vector = vector3(0, -angular_velocity, 0) + linear_velocity_vector = vector3(radius * angular_velocity, 0, 0) + scenario = ConstantTwistScenario( + angular_velocity_vector, linear_velocity_vector, pose_0) + + # Create a factor graph + newgraph = NonlinearFactorGraph() + + # Create (incremental) ISAM2 solver + isam = ISAM2() + + # Create the initial estimate to the solution + # Intentionally initialize the variables off from the ground truth + initialEstimate = Values() + + # Add a prior on pose x0. This indirectly specifies where the origin is. + # 30cm std on x,y,z 0.1 rad on roll,pitch,yaw + noise = gtsam.noiseModel_Diagonal.Sigmas( + np.array([0.1, 0.1, 0.1, 0.3, 0.3, 0.3])) + newgraph.push_back(PriorFactorPose3(X(0), pose_0, noise)) + + # Add imu priors + biasKey = B(0) + biasnoise = gtsam.noiseModel_Isotropic.Sigma(6, 0.1) + biasprior = PriorFactorConstantBias(biasKey, gtsam.imuBias_ConstantBias(), + biasnoise) + newgraph.push_back(biasprior) + initialEstimate.insert(biasKey, gtsam.imuBias_ConstantBias()) + velnoise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) + + # Calculate with correct initial velocity + n_velocity = vector3(0, angular_velocity * radius, 0) + velprior = PriorFactorVector(V(0), n_velocity, velnoise) + newgraph.push_back(velprior) + initialEstimate.insert(V(0), n_velocity) + + accum = gtsam.PreintegratedImuMeasurements(PARAMS) + + # Simulate poses and imu measurements, adding them to the factor graph + for i in range(80): + t = i * delta_t # simulation time + if i == 0: # First time add two poses + pose_1 = scenario.pose(delta_t) + initialEstimate.insert(X(0), pose_0.compose(DELTA)) + initialEstimate.insert(X(1), pose_1.compose(DELTA)) + elif i >= 2: # Add more poses as necessary + pose_i = scenario.pose(t) + initialEstimate.insert(X(i), pose_i.compose(DELTA)) + + if i > 0: + # Add Bias variables periodically + if i % 5 == 0: + biasKey += 1 + factor = BetweenFactorConstantBias( + biasKey - 1, biasKey, gtsam.imuBias_ConstantBias(), BIAS_COVARIANCE) + newgraph.add(factor) + initialEstimate.insert(biasKey, gtsam.imuBias_ConstantBias()) + + # Predict acceleration and gyro measurements in (actual) body frame + nRb = scenario.rotation(t).matrix() + bRn = np.transpose(nRb) + measuredAcc = scenario.acceleration_b(t) - np.dot(bRn, n_gravity) + measuredOmega = scenario.omega_b(t) + accum.integrateMeasurement(measuredAcc, measuredOmega, delta_t) + + # Add Imu Factor + imufac = ImuFactor(X(i - 1), V(i - 1), X(i), V(i), biasKey, accum) + newgraph.add(imufac) + + # insert new velocity, which is wrong + initialEstimate.insert(V(i), n_velocity) + accum.resetIntegration() + + # Incremental solution + isam.update(newgraph, initialEstimate) + result = isam.calculateEstimate() + ISAM2_plot(result) + + # reset + newgraph = NonlinearFactorGraph() + initialEstimate.clear() + + +if __name__ == '__main__': + IMU_example() diff --git a/python/gtsam_py/python/examples/OdometryExample.py b/python/gtsam_py/python/examples/OdometryExample.py new file mode 100644 index 000000000..e778e3f85 --- /dev/null +++ b/python/gtsam_py/python/examples/OdometryExample.py @@ -0,0 +1,69 @@ +""" +GTSAM Copyright 2010-2018, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved +Authors: Frank Dellaert, et al. (see THANKS for the full author list) + +See LICENSE for the license information + +Simple robot motion example, with prior and two odometry measurements +Author: Frank Dellaert +""" +# pylint: disable=invalid-name, E1101 + +from __future__ import print_function + +import numpy as np + +import gtsam + +import matplotlib.pyplot as plt +import gtsam.utils.plot as gtsam_plot + +# Create noise models +ODOMETRY_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) +PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) + +# Create an empty nonlinear factor graph +graph = gtsam.NonlinearFactorGraph() + +# Add a prior on the first pose, setting it to the origin +# A prior factor consists of a mean and a noise model (covariance matrix) +priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior at origin +graph.add(gtsam.PriorFactorPose2(1, priorMean, PRIOR_NOISE)) + +# Add odometry factors +odometry = gtsam.Pose2(2.0, 0.0, 0.0) +# For simplicity, we will use the same noise model for each odometry factor +# Create odometry (Between) factors between consecutive poses +graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, ODOMETRY_NOISE)) +graph.add(gtsam.BetweenFactorPose2(2, 3, odometry, ODOMETRY_NOISE)) +print("\nFactor Graph:\n{}".format(graph)) + +# Create the data structure to hold the initialEstimate estimate to the solution +# For illustrative purposes, these have been deliberately set to incorrect values +initial = gtsam.Values() +initial.insert(1, gtsam.Pose2(0.5, 0.0, 0.2)) +initial.insert(2, gtsam.Pose2(2.3, 0.1, -0.2)) +initial.insert(3, gtsam.Pose2(4.1, 0.1, 0.1)) +print("\nInitial Estimate:\n{}".format(initial)) + +# optimize using Levenberg-Marquardt optimization +params = gtsam.LevenbergMarquardtParams() +optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params) +result = optimizer.optimize() +print("\nFinal Result:\n{}".format(result)) + +# 5. Calculate and print marginal covariances for all variables +marginals = gtsam.Marginals(graph, result) +for i in range(1, 4): + print("X{} covariance:\n{}\n".format(i, marginals.marginalCovariance(i))) + +fig = plt.figure(0) +for i in range(1, 4): + gtsam_plot.plot_pose2(0, result.atPose2(i), 0.5, marginals.marginalCovariance(i)) +plt.axis('equal') +plt.show() + + + diff --git a/python/gtsam_py/python/examples/PlanarManipulatorExample.py b/python/gtsam_py/python/examples/PlanarManipulatorExample.py new file mode 100644 index 000000000..e42ae09d7 --- /dev/null +++ b/python/gtsam_py/python/examples/PlanarManipulatorExample.py @@ -0,0 +1,334 @@ +""" +GTSAM Copyright 2010-2018, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved +Authors: Frank Dellaert, et al. (see THANKS for the full author list) + +See LICENSE for the license information + +Kinematics of three-link manipulator with GTSAM poses and product of exponential maps. +Author: Frank Dellaert +""" +# pylint: disable=invalid-name, E1101 + +from __future__ import print_function + +import math +import unittest +from functools import reduce + +import matplotlib.pyplot as plt +import numpy as np +from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611 + +import gtsam +import gtsam.utils.plot as gtsam_plot +from gtsam import Pose2 +from gtsam.utils.test_case import GtsamTestCase + + +def vector3(x, y, z): + """Create 3D double numpy array.""" + return np.array([x, y, z], dtype=np.float) + + +def compose(*poses): + """Compose all Pose2 transforms given as arguments from left to right.""" + return reduce((lambda x, y: x.compose(y)), poses) + + +def vee(M): + """Pose2 vee operator.""" + return vector3(M[0, 2], M[1, 2], M[1, 0]) + + +def delta(g0, g1): + """Difference between x,y,,theta components of SE(2) poses.""" + return vector3(g1.x() - g0.x(), g1.y() - g0.y(), g1.theta() - g0.theta()) + + +def trajectory(g0, g1, N=20): + """ Create an interpolated trajectory in SE(2), treating x,y, and theta separately. + g0 and g1 are the initial and final pose, respectively. + N is the number of *intervals* + Returns N+1 poses + """ + e = delta(g0, g1) + return [Pose2(g0.x()+e[0]*t, g0.y()+e[1]*t, g0.theta()+e[2]*t) for t in np.linspace(0, 1, N)] + + +class ThreeLinkArm(object): + """Three-link arm class.""" + + def __init__(self): + self.L1 = 3.5 + self.L2 = 3.5 + self.L3 = 2.5 + self.xi1 = vector3(0, 0, 1) + self.xi2 = vector3(self.L1, 0, 1) + self.xi3 = vector3(self.L1+self.L2, 0, 1) + self.sXt0 = Pose2(0, self.L1+self.L2 + self.L3, math.radians(90)) + + def fk(self, q): + """ Forward kinematics. + Takes numpy array of joint angles, in radians. + """ + sXl1 = Pose2(0, 0, math.radians(90)) + l1Zl1 = Pose2(0, 0, q[0]) + l1Xl2 = Pose2(self.L1, 0, 0) + l2Zl2 = Pose2(0, 0, q[1]) + l2Xl3 = Pose2(self.L2, 0, 0) + l3Zl3 = Pose2(0, 0, q[2]) + l3Xt = Pose2(self.L3, 0, 0) + return compose(sXl1, l1Zl1, l1Xl2, l2Zl2, l2Xl3, l3Zl3, l3Xt) + + def jacobian(self, q): + """ Calculate manipulator Jacobian. + Takes numpy array of joint angles, in radians. + """ + a = q[0]+q[1] + b = a+q[2] + return np.array([[-self.L1*math.cos(q[0]) - self.L2*math.cos(a)-self.L3*math.cos(b), + -self.L1*math.cos(a)-self.L3*math.cos(b), + - self.L3*math.cos(b)], + [-self.L1*math.sin(q[0]) - self.L2*math.sin(a)-self.L3*math.sin(b), + -self.L1*math.sin(a)-self.L3*math.sin(b), + - self.L3*math.sin(b)], + [1, 1, 1]], np.float) + + def poe(self, q): + """ Forward kinematics. + Takes numpy array of joint angles, in radians. + """ + l1Zl1 = Pose2.Expmap(self.xi1 * q[0]) + l2Zl2 = Pose2.Expmap(self.xi2 * q[1]) + l3Zl3 = Pose2.Expmap(self.xi3 * q[2]) + return compose(l1Zl1, l2Zl2, l3Zl3, self.sXt0) + + def con(self, q): + """ Forward kinematics, conjugation form. + Takes numpy array of joint angles, in radians. + """ + def expmap(x, y, theta): + """Implement exponential map via conjugation with axis (x,y).""" + return compose(Pose2(x, y, 0), Pose2(0, 0, theta), Pose2(-x, -y, 0)) + + l1Zl1 = expmap(0.0, 0.0, q[0]) + l2Zl2 = expmap(0.0, self.L1, q[1]) + l3Zl3 = expmap(0.0, self.L1+self.L2, q[2]) + return compose(l1Zl1, l2Zl2, l3Zl3, self.sXt0) + + def ik(self, sTt_desired, e=1e-9): + """ Inverse kinematics. + Takes desired Pose2 of tool T with respect to base S. + Optional: mu, gradient descent rate; e: error norm threshold + """ + q = np.radians(vector3(30, -30, 45)) # well within workspace + error = vector3(100, 100, 100) + + while np.linalg.norm(error) > e: + error = delta(sTt_desired, self.fk(q)) + J = self.jacobian(q) + q -= np.dot(np.linalg.pinv(J), error) + + # return result in interval [-pi,pi) + return np.remainder(q+math.pi, 2*math.pi)-math.pi + + def manipulator_jacobian(self, q): + """ Calculate manipulator Jacobian. + Takes numpy array of joint angles, in radians. + Returns the manipulator Jacobian of differential twists. When multiplied with + a vector of joint velocities, will yield a single differential twist which is + the spatial velocity d(sTt)/dt * inv(sTt) of the end-effector pose. + Just like always, differential twists can be hatted and multiplied with spatial + coordinates of a point to give the spatial velocity of the point. + """ + l1Zl1 = Pose2.Expmap(self.xi1 * q[0]) + l2Zl2 = Pose2.Expmap(self.xi2 * q[1]) + # l3Zl3 = Pose2.Expmap(self.xi3 * q[2]) + + p1 = self.xi1 + # p1 = Pose2().Adjoint(self.xi1) + + sTl1 = l1Zl1 + p2 = sTl1.Adjoint(self.xi2) + + sTl2 = compose(l1Zl1, l2Zl2) + p3 = sTl2.Adjoint(self.xi3) + + differential_twists = [p1, p2, p3] + return np.stack(differential_twists, axis=1) + + def plot(self, fignum, q): + """ Plot arm. + Takes figure number, and numpy array of joint angles, in radians. + """ + fig = plt.figure(fignum) + axes = fig.gca() + + sXl1 = Pose2(0, 0, math.radians(90)) + t = sXl1.translation() + p1 = np.array([t.x(), t.y()]) + gtsam_plot.plot_pose2_on_axes(axes, sXl1) + + def plot_line(p, g, color): + t = g.translation() + q = np.array([t.x(), t.y()]) + line = np.append(p[np.newaxis], q[np.newaxis], axis=0) + axes.plot(line[:, 0], line[:, 1], color) + return q + + l1Zl1 = Pose2(0, 0, q[0]) + l1Xl2 = Pose2(self.L1, 0, 0) + sTl2 = compose(sXl1, l1Zl1, l1Xl2) + p2 = plot_line(p1, sTl2, 'r-') + gtsam_plot.plot_pose2_on_axes(axes, sTl2) + + l2Zl2 = Pose2(0, 0, q[1]) + l2Xl3 = Pose2(self.L2, 0, 0) + sTl3 = compose(sTl2, l2Zl2, l2Xl3) + p3 = plot_line(p2, sTl3, 'g-') + gtsam_plot.plot_pose2_on_axes(axes, sTl3) + + l3Zl3 = Pose2(0, 0, q[2]) + l3Xt = Pose2(self.L3, 0, 0) + sTt = compose(sTl3, l3Zl3, l3Xt) + plot_line(p3, sTt, 'b-') + gtsam_plot.plot_pose2_on_axes(axes, sTt) + + +# Create common example configurations. +Q0 = vector3(0, 0, 0) +Q1 = np.radians(vector3(-30, -45, -90)) +Q2 = np.radians(vector3(-90, 90, 0)) + + +class TestPose2SLAMExample(GtsamTestCase): + """Unit tests for functions used below.""" + + def setUp(self): + self.arm = ThreeLinkArm() + + def assertPose2Equals(self, actual, expected, tol=1e-2): + """Helper function that prints out actual and expected if not equal.""" + equal = actual.equals(expected, tol) + if not equal: + raise self.failureException( + "Poses are not equal:\n{}!={}".format(actual, expected)) + + def test_fk_arm(self): + """Make sure forward kinematics is correct for some known test configurations.""" + # at rest + expected = Pose2(0, 2*3.5 + 2.5, math.radians(90)) + sTt = self.arm.fk(Q0) + self.assertIsInstance(sTt, Pose2) + self.assertPose2Equals(sTt, expected) + + # -30, -45, -90 + expected = Pose2(5.78, 1.52, math.radians(-75)) + sTt = self.arm.fk(Q1) + self.assertPose2Equals(sTt, expected) + + def test_jacobian(self): + """Test Jacobian calculation.""" + # at rest + expected = np.array([[-9.5, -6, -2.5], [0, 0, 0], [1, 1, 1]], np.float) + J = self.arm.jacobian(Q0) + np.testing.assert_array_almost_equal(J, expected) + + # at -90, 90, 0 + expected = np.array([[-6, -6, -2.5], [3.5, 0, 0], [1, 1, 1]], np.float) + J = self.arm.jacobian(Q2) + np.testing.assert_array_almost_equal(J, expected) + + def test_con_arm(self): + """Make sure POE is correct for some known test configurations.""" + # at rest + expected = Pose2(0, 2*3.5 + 2.5, math.radians(90)) + sTt = self.arm.con(Q0) + self.assertIsInstance(sTt, Pose2) + self.assertPose2Equals(sTt, expected) + + # -30, -45, -90 + expected = Pose2(5.78, 1.52, math.radians(-75)) + sTt = self.arm.con(Q1) + self.assertPose2Equals(sTt, expected) + + def test_poe_arm(self): + """Make sure POE is correct for some known test configurations.""" + # at rest + expected = Pose2(0, 2*3.5 + 2.5, math.radians(90)) + sTt = self.arm.poe(Q0) + self.assertIsInstance(sTt, Pose2) + self.assertPose2Equals(sTt, expected) + + # -30, -45, -90 + expected = Pose2(5.78, 1.52, math.radians(-75)) + sTt = self.arm.poe(Q1) + self.assertPose2Equals(sTt, expected) + + def test_ik(self): + """Check iterative inverse kinematics function.""" + # at rest + actual = self.arm.ik(Pose2(0, 2*3.5 + 2.5, math.radians(90))) + np.testing.assert_array_almost_equal(actual, Q0, decimal=2) + + # -30, -45, -90 + sTt_desired = Pose2(5.78, 1.52, math.radians(-75)) + actual = self.arm.ik(sTt_desired) + self.assertPose2Equals(self.arm.fk(actual), sTt_desired) + np.testing.assert_array_almost_equal(actual, Q1, decimal=2) + + def test_manipulator_jacobian(self): + """Test Jacobian calculation.""" + # at rest + expected = np.array([[0, 3.5, 7], [0, 0, 0], [1, 1, 1]], np.float) + J = self.arm.manipulator_jacobian(Q0) + np.testing.assert_array_almost_equal(J, expected) + + # at -90, 90, 0 + expected = np.array( + [[0, 0, 3.5], [0, -3.5, -3.5], [1, 1, 1]], np.float) + J = self.arm.manipulator_jacobian(Q2) + np.testing.assert_array_almost_equal(J, expected) + + +def run_example(): + """ Use trajectory interpolation and then trajectory tracking a la Murray + to move a 3-link arm on a straight line. + """ + # Create arm + arm = ThreeLinkArm() + + # Get initial pose using forward kinematics + q = np.radians(vector3(30, -30, 45)) + sTt_initial = arm.fk(q) + + # Create interpolated trajectory in task space to desired goal pose + sTt_goal = Pose2(2.4, 4.3, math.radians(0)) + poses = trajectory(sTt_initial, sTt_goal, 50) + + # Setup figure and plot initial pose + fignum = 0 + fig = plt.figure(fignum) + axes = fig.gca() + axes.set_xlim(-5, 5) + axes.set_ylim(0, 10) + gtsam_plot.plot_pose2(fignum, arm.fk(q)) + + # For all poses in interpolated trajectory, calculate dq to move to next pose. + # We do this by calculating the local Jacobian J and doing dq = inv(J)*delta(sTt, pose). + for pose in poses: + sTt = arm.fk(q) + error = delta(sTt, pose) + J = arm.jacobian(q) + q += np.dot(np.linalg.inv(J), error) + arm.plot(fignum, q) + plt.pause(0.01) + + plt.pause(10) + + +if __name__ == "__main__": + run_example() + unittest.main() diff --git a/python/gtsam_py/python/examples/PlanarSLAMExample.py b/python/gtsam_py/python/examples/PlanarSLAMExample.py new file mode 100644 index 000000000..c84f0f834 --- /dev/null +++ b/python/gtsam_py/python/examples/PlanarSLAMExample.py @@ -0,0 +1,81 @@ +""" +GTSAM Copyright 2010-2018, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved +Authors: Frank Dellaert, et al. (see THANKS for the full author list) + +See LICENSE for the license information + +Simple robotics example using odometry measurements and bearing-range (laser) measurements +Author: Alex Cunningham (C++), Kevin Deng & Frank Dellaert (Python) +""" +# pylint: disable=invalid-name, E1101 + +from __future__ import print_function + +import gtsam +import numpy as np +from gtsam import symbol_shorthand_L as L +from gtsam import symbol_shorthand_X as X + +# Create noise models +PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) +ODOMETRY_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) +MEASUREMENT_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.1, 0.2])) + +# Create an empty nonlinear factor graph +graph = gtsam.NonlinearFactorGraph() + +# Create the keys corresponding to unknown variables in the factor graph +X1 = X(1) +X2 = X(2) +X3 = X(3) +L1 = L(4) +L2 = L(5) + +# Add a prior on pose X1 at the origin. A prior factor consists of a mean and a noise model +graph.add(gtsam.PriorFactorPose2(X1, gtsam.Pose2(0.0, 0.0, 0.0), PRIOR_NOISE)) + +# Add odometry factors between X1,X2 and X2,X3, respectively +graph.add(gtsam.BetweenFactorPose2( + X1, X2, gtsam.Pose2(2.0, 0.0, 0.0), ODOMETRY_NOISE)) +graph.add(gtsam.BetweenFactorPose2( + X2, X3, gtsam.Pose2(2.0, 0.0, 0.0), ODOMETRY_NOISE)) + +# Add Range-Bearing measurements to two different landmarks L1 and L2 +graph.add(gtsam.BearingRangeFactor2D( + X1, L1, gtsam.Rot2.fromDegrees(45), np.sqrt(4.0+4.0), MEASUREMENT_NOISE)) +graph.add(gtsam.BearingRangeFactor2D( + X2, L1, gtsam.Rot2.fromDegrees(90), 2.0, MEASUREMENT_NOISE)) +graph.add(gtsam.BearingRangeFactor2D( + X3, L2, gtsam.Rot2.fromDegrees(90), 2.0, MEASUREMENT_NOISE)) + +# Print graph +print("Factor Graph:\n{}".format(graph)) + +# Create (deliberately inaccurate) initial estimate +initial_estimate = gtsam.Values() +initial_estimate.insert(X1, gtsam.Pose2(-0.25, 0.20, 0.15)) +initial_estimate.insert(X2, gtsam.Pose2(2.30, 0.10, -0.20)) +initial_estimate.insert(X3, gtsam.Pose2(4.10, 0.10, 0.10)) +initial_estimate.insert(L1, gtsam.Point2(1.80, 2.10)) +initial_estimate.insert(L2, gtsam.Point2(4.10, 1.80)) + +# Print +print("Initial Estimate:\n{}".format(initial_estimate)) + +# Optimize using Levenberg-Marquardt optimization. The optimizer +# accepts an optional set of configuration parameters, controlling +# things like convergence criteria, the type of linear system solver +# to use, and the amount of information displayed during optimization. +# Here we will use the default set of parameters. See the +# documentation for the full set of parameters. +params = gtsam.LevenbergMarquardtParams() +optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate, params) +result = optimizer.optimize() +print("\nFinal Result:\n{}".format(result)) + +# Calculate and print marginal covariances for all variables +marginals = gtsam.Marginals(graph, result) +for (key, str) in [(X1, "X1"), (X2, "X2"), (X3, "X3"), (L1, "L1"), (L2, "L2")]: + print("{} covariance:\n{}\n".format(str, marginals.marginalCovariance(key))) diff --git a/python/gtsam_py/python/examples/Pose2SLAMExample.py b/python/gtsam_py/python/examples/Pose2SLAMExample.py new file mode 100644 index 000000000..680f2209f --- /dev/null +++ b/python/gtsam_py/python/examples/Pose2SLAMExample.py @@ -0,0 +1,97 @@ +""" +GTSAM Copyright 2010-2018, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved +Authors: Frank Dellaert, et al. (see THANKS for the full author list) + +See LICENSE for the license information + +Simple robotics example using odometry measurements and bearing-range (laser) measurements +Author: Alex Cunningham (C++), Kevin Deng & Frank Dellaert (Python) +""" +# pylint: disable=invalid-name, E1101 + +from __future__ import print_function + +import math + +import numpy as np + +import gtsam + +import matplotlib.pyplot as plt +import gtsam.utils.plot as gtsam_plot + + +def vector3(x, y, z): + """Create 3d double numpy array.""" + return np.array([x, y, z], dtype=np.float) + + +# Create noise models +PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(vector3(0.3, 0.3, 0.1)) +ODOMETRY_NOISE = gtsam.noiseModel_Diagonal.Sigmas(vector3(0.2, 0.2, 0.1)) + +# 1. Create a factor graph container and add factors to it +graph = gtsam.NonlinearFactorGraph() + +# 2a. Add a prior on the first pose, setting it to the origin +# A prior factor consists of a mean and a noise ODOMETRY_NOISE (covariance matrix) +graph.add(gtsam.PriorFactorPose2(1, gtsam.Pose2(0, 0, 0), PRIOR_NOISE)) + +# 2b. Add odometry factors +# Create odometry (Between) factors between consecutive poses +graph.add(gtsam.BetweenFactorPose2(1, 2, gtsam.Pose2(2, 0, 0), ODOMETRY_NOISE)) +graph.add(gtsam.BetweenFactorPose2( + 2, 3, gtsam.Pose2(2, 0, math.pi / 2), ODOMETRY_NOISE)) +graph.add(gtsam.BetweenFactorPose2( + 3, 4, gtsam.Pose2(2, 0, math.pi / 2), ODOMETRY_NOISE)) +graph.add(gtsam.BetweenFactorPose2( + 4, 5, gtsam.Pose2(2, 0, math.pi / 2), ODOMETRY_NOISE)) + +# 2c. Add the loop closure constraint +# This factor encodes the fact that we have returned to the same pose. In real +# systems, these constraints may be identified in many ways, such as appearance-based +# techniques with camera images. We will use another Between Factor to enforce this constraint: +graph.add(gtsam.BetweenFactorPose2( + 5, 2, gtsam.Pose2(2, 0, math.pi / 2), ODOMETRY_NOISE)) +print("\nFactor Graph:\n{}".format(graph)) # print + +# 3. Create the data structure to hold the initial_estimate estimate to the +# solution. For illustrative purposes, these have been deliberately set to incorrect values +initial_estimate = gtsam.Values() +initial_estimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2)) +initial_estimate.insert(2, gtsam.Pose2(2.3, 0.1, -0.2)) +initial_estimate.insert(3, gtsam.Pose2(4.1, 0.1, math.pi / 2)) +initial_estimate.insert(4, gtsam.Pose2(4.0, 2.0, math.pi)) +initial_estimate.insert(5, gtsam.Pose2(2.1, 2.1, -math.pi / 2)) +print("\nInitial Estimate:\n{}".format(initial_estimate)) # print + +# 4. Optimize the initial values using a Gauss-Newton nonlinear optimizer +# The optimizer accepts an optional set of configuration parameters, +# controlling things like convergence criteria, the type of linear +# system solver to use, and the amount of information displayed during +# optimization. We will set a few parameters as a demonstration. +parameters = gtsam.GaussNewtonParams() + +# Stop iterating once the change in error between steps is less than this value +parameters.setRelativeErrorTol(1e-5) +# Do not perform more than N iteration steps +parameters.setMaxIterations(100) +# Create the optimizer ... +optimizer = gtsam.GaussNewtonOptimizer(graph, initial_estimate, parameters) +# ... and optimize +result = optimizer.optimize() +print("Final Result:\n{}".format(result)) + +# 5. Calculate and print marginal covariances for all variables +marginals = gtsam.Marginals(graph, result) +for i in range(1, 6): + print("X{} covariance:\n{}\n".format(i, marginals.marginalCovariance(i))) + +fig = plt.figure(0) +for i in range(1, 6): + gtsam_plot.plot_pose2(0, result.atPose2(i), 0.5, marginals.marginalCovariance(i)) + +plt.axis('equal') +plt.show() diff --git a/python/gtsam_py/python/examples/Pose2SLAMExample_g2o.py b/python/gtsam_py/python/examples/Pose2SLAMExample_g2o.py new file mode 100644 index 000000000..09114370d --- /dev/null +++ b/python/gtsam_py/python/examples/Pose2SLAMExample_g2o.py @@ -0,0 +1,88 @@ +""" +GTSAM Copyright 2010-2018, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved +Authors: Frank Dellaert, et al. (see THANKS for the full author list) + +See LICENSE for the license information + +A 2D Pose SLAM example that reads input from g2o, converts it to a factor graph +and does the optimization. Output is written on a file, in g2o format +""" +# pylint: disable=invalid-name, E1101 + +from __future__ import print_function +import argparse +import math +import numpy as np +import matplotlib.pyplot as plt + +import gtsam +from gtsam.utils import plot + + +def vector3(x, y, z): + """Create 3d double numpy array.""" + return np.array([x, y, z], dtype=np.float) + + +parser = argparse.ArgumentParser( + description="A 2D Pose SLAM example that reads input from g2o, " + "converts it to a factor graph and does the optimization. " + "Output is written on a file, in g2o format") +parser.add_argument('-i', '--input', help='input file g2o format') +parser.add_argument('-o', '--output', + help="the path to the output file with optimized graph") +parser.add_argument('-m', '--maxiter', type=int, + help="maximum number of iterations for optimizer") +parser.add_argument('-k', '--kernel', choices=['none', 'huber', 'tukey'], + default="none", help="Type of kernel used") +parser.add_argument("-p", "--plot", action="store_true", + help="Flag to plot results") +args = parser.parse_args() + +g2oFile = gtsam.findExampleDataFile("noisyToyGraph.txt") if args.input is None\ + else args.input + +maxIterations = 100 if args.maxiter is None else args.maxiter + +is3D = False + +graph, initial = gtsam.readG2o(g2oFile, is3D) + +assert args.kernel == "none", "Supplied kernel type is not yet implemented" + +# Add prior on the pose having index (key) = 0 +priorModel = gtsam.noiseModel_Diagonal.Variances(vector3(1e-6, 1e-6, 1e-8)) +graph.add(gtsam.PriorFactorPose2(0, gtsam.Pose2(), priorModel)) + +params = gtsam.GaussNewtonParams() +params.setVerbosity("Termination") +params.setMaxIterations(maxIterations) +# parameters.setRelativeErrorTol(1e-5) +# Create the optimizer ... +optimizer = gtsam.GaussNewtonOptimizer(graph, initial, params) +# ... and optimize +result = optimizer.optimize() + +print("Optimization complete") +print("initial error = ", graph.error(initial)) +print("final error = ", graph.error(result)) + + +if args.output is None: + print("\nFactor Graph:\n{}".format(graph)) + print("\nInitial Estimate:\n{}".format(initial)) + print("Final Result:\n{}".format(result)) +else: + outputFile = args.output + print("Writing results to file: ", outputFile) + graphNoKernel, _ = gtsam.readG2o(g2oFile, is3D) + gtsam.writeG2o(graphNoKernel, result, outputFile) + print ("Done!") + +if args.plot: + resultPoses = gtsam.utilities_extractPose2(result) + for i in range(resultPoses.shape[0]): + plot.plot_pose2(1, gtsam.Pose2(resultPoses[i, :])) + plt.show() diff --git a/python/gtsam_py/python/examples/Pose3SLAMExample_g2o.py b/python/gtsam_py/python/examples/Pose3SLAMExample_g2o.py new file mode 100644 index 000000000..3c1a54f7b --- /dev/null +++ b/python/gtsam_py/python/examples/Pose3SLAMExample_g2o.py @@ -0,0 +1,71 @@ +""" + * @file Pose3SLAMExample_initializePose3.cpp + * @brief A 3D Pose SLAM example that reads input from g2o, and initializes the + * Pose3 using InitializePose3 + * @date Jan 17, 2019 + * @author Vikrant Shah based on CPP example by Luca Carlone +""" +# pylint: disable=invalid-name, E1101 + +from __future__ import print_function +import argparse +import numpy as np +import matplotlib.pyplot as plt +from mpl_toolkits.mplot3d import Axes3D + +import gtsam +from gtsam.utils import plot + + +def vector6(x, y, z, a, b, c): + """Create 6d double numpy array.""" + return np.array([x, y, z, a, b, c], dtype=np.float) + + +parser = argparse.ArgumentParser( + description="A 3D Pose SLAM example that reads input from g2o, and " + "initializes Pose3") +parser.add_argument('-i', '--input', help='input file g2o format') +parser.add_argument('-o', '--output', + help="the path to the output file with optimized graph") +parser.add_argument("-p", "--plot", action="store_true", + help="Flag to plot results") +args = parser.parse_args() + +g2oFile = gtsam.findExampleDataFile("pose3example.txt") if args.input is None \ + else args.input + +is3D = True +graph, initial = gtsam.readG2o(g2oFile, is3D) + +# Add Prior on the first key +priorModel = gtsam.noiseModel_Diagonal.Variances(vector6(1e-6, 1e-6, 1e-6, + 1e-4, 1e-4, 1e-4)) + +print("Adding prior to g2o file ") +firstKey = initial.keys().at(0) +graph.add(gtsam.PriorFactorPose3(firstKey, gtsam.Pose3(), priorModel)) + +params = gtsam.GaussNewtonParams() +params.setVerbosity("Termination") # this will show info about stopping conds +optimizer = gtsam.GaussNewtonOptimizer(graph, initial, params) +result = optimizer.optimize() +print("Optimization complete") + +print("initial error = ", graph.error(initial)) +print("final error = ", graph.error(result)) + +if args.output is None: + print("Final Result:\n{}".format(result)) +else: + outputFile = args.output + print("Writing results to file: ", outputFile) + graphNoKernel, _ = gtsam.readG2o(g2oFile, is3D) + gtsam.writeG2o(graphNoKernel, result, outputFile) + print ("Done!") + +if args.plot: + resultPoses = gtsam.utilities_allPose3s(result) + for i in range(resultPoses.size()): + plot.plot_pose3(1, resultPoses.atPose3(i)) + plt.show() diff --git a/python/gtsam_py/python/examples/Pose3SLAMExample_initializePose3Chordal.py b/python/gtsam_py/python/examples/Pose3SLAMExample_initializePose3Chordal.py new file mode 100644 index 000000000..02c696905 --- /dev/null +++ b/python/gtsam_py/python/examples/Pose3SLAMExample_initializePose3Chordal.py @@ -0,0 +1,35 @@ +""" +GTSAM Copyright 2010-2018, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved +Authors: Frank Dellaert, et al. (see THANKS for the full author list) + +See LICENSE for the license information + +Initialize PoseSLAM with Chordal init +Author: Luca Carlone, Frank Dellaert (python port) +""" +# pylint: disable=invalid-name, E1101 + +from __future__ import print_function + +import numpy as np + +import gtsam + +# Read graph from file +g2oFile = gtsam.findExampleDataFile("pose3example.txt") + +is3D = True +graph, initial = gtsam.readG2o(g2oFile, is3D) + +# Add prior on the first key. TODO: assumes first key ios z +priorModel = gtsam.noiseModel_Diagonal.Variances( + np.array([1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4])) +firstKey = initial.keys().at(0) +graph.add(gtsam.PriorFactorPose3(0, gtsam.Pose3(), priorModel)) + +# Initializing Pose3 - chordal relaxation" +initialization = gtsam.InitializePose3.initialize(graph) + +print(initialization) diff --git a/python/gtsam_py/python/examples/PreintegrationExample.py b/python/gtsam_py/python/examples/PreintegrationExample.py new file mode 100644 index 000000000..76520b355 --- /dev/null +++ b/python/gtsam_py/python/examples/PreintegrationExample.py @@ -0,0 +1,140 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +A script validating the Preintegration of IMU measurements +""" + +import math + +import matplotlib.pyplot as plt +import numpy as np +from mpl_toolkits.mplot3d import Axes3D + +import gtsam +from gtsam.utils.plot import plot_pose3 + +IMU_FIG = 1 +POSES_FIG = 2 + + +class PreintegrationExample(object): + + @staticmethod + def defaultParams(g): + """Create default parameters with Z *up* and realistic noise parameters""" + params = gtsam.PreintegrationParams.MakeSharedU(g) + kGyroSigma = math.radians(0.5) / 60 # 0.5 degree ARW + kAccelSigma = 0.1 / 60 # 10 cm VRW + params.setGyroscopeCovariance( + kGyroSigma ** 2 * np.identity(3, np.float)) + params.setAccelerometerCovariance( + kAccelSigma ** 2 * np.identity(3, np.float)) + params.setIntegrationCovariance( + 0.0000001 ** 2 * np.identity(3, np.float)) + return params + + def __init__(self, twist=None, bias=None, dt=1e-2): + """Initialize with given twist, a pair(angularVelocityVector, velocityVector).""" + + # setup interactive plotting + plt.ion() + + # Setup loop as default scenario + if twist is not None: + (W, V) = twist + else: + # default = loop with forward velocity 2m/s, while pitching up + # with angular velocity 30 degree/sec (negative in FLU) + W = np.array([0, -math.radians(30), 0]) + V = np.array([2, 0, 0]) + + self.scenario = gtsam.ConstantTwistScenario(W, V) + self.dt = dt + + self.maxDim = 5 + self.labels = list('xyz') + self.colors = list('rgb') + + # Create runner + self.g = 10 # simple gravity constant + self.params = self.defaultParams(self.g) + + if bias is not None: + self.actualBias = bias + else: + accBias = np.array([0, 0.1, 0]) + gyroBias = np.array([0, 0, 0]) + self.actualBias = gtsam.imuBias_ConstantBias(accBias, gyroBias) + + self.runner = gtsam.ScenarioRunner( + self.scenario, self.params, self.dt, self.actualBias) + + def plotImu(self, t, measuredOmega, measuredAcc): + plt.figure(IMU_FIG) + + # plot angular velocity + omega_b = self.scenario.omega_b(t) + for i, (label, color) in enumerate(zip(self.labels, self.colors)): + plt.subplot(4, 3, i + 1) + plt.scatter(t, omega_b[i], color='k', marker='.') + plt.scatter(t, measuredOmega[i], color=color, marker='.') + plt.xlabel('angular velocity ' + label) + + # plot acceleration in nav + acceleration_n = self.scenario.acceleration_n(t) + for i, (label, color) in enumerate(zip(self.labels, self.colors)): + plt.subplot(4, 3, i + 4) + plt.scatter(t, acceleration_n[i], color=color, marker='.') + plt.xlabel('acceleration in nav ' + label) + + # plot acceleration in body + acceleration_b = self.scenario.acceleration_b(t) + for i, (label, color) in enumerate(zip(self.labels, self.colors)): + plt.subplot(4, 3, i + 7) + plt.scatter(t, acceleration_b[i], color=color, marker='.') + plt.xlabel('acceleration in body ' + label) + + # plot actual specific force, as well as corrupted + actual = self.runner.actualSpecificForce(t) + for i, (label, color) in enumerate(zip(self.labels, self.colors)): + plt.subplot(4, 3, i + 10) + plt.scatter(t, actual[i], color='k', marker='.') + plt.scatter(t, measuredAcc[i], color=color, marker='.') + plt.xlabel('specific force ' + label) + + def plotGroundTruthPose(self, t): + # plot ground truth pose, as well as prediction from integrated IMU measurements + actualPose = self.scenario.pose(t) + plot_pose3(POSES_FIG, actualPose, 0.3) + t = actualPose.translation() + self.maxDim = max([abs(t.x()), abs(t.y()), abs(t.z()), self.maxDim]) + ax = plt.gca() + ax.set_xlim3d(-self.maxDim, self.maxDim) + ax.set_ylim3d(-self.maxDim, self.maxDim) + ax.set_zlim3d(-self.maxDim, self.maxDim) + + plt.pause(0.01) + + def run(self): + # simulate the loop + T = 12 + for i, t in enumerate(np.arange(0, T, self.dt)): + measuredOmega = self.runner.measuredAngularVelocity(t) + measuredAcc = self.runner.measuredSpecificForce(t) + if i % 25 == 0: + self.plotImu(t, measuredOmega, measuredAcc) + self.plotGroundTruthPose(t) + pim = self.runner.integrate(t, self.actualBias, True) + predictedNavState = self.runner.predict(pim, self.actualBias) + plot_pose3(POSES_FIG, predictedNavState.pose(), 0.1) + + plt.ioff() + plt.show() + + +if __name__ == '__main__': + PreintegrationExample().run() diff --git a/python/gtsam_py/python/examples/README.md b/python/gtsam_py/python/examples/README.md new file mode 100644 index 000000000..99bce00e2 --- /dev/null +++ b/python/gtsam_py/python/examples/README.md @@ -0,0 +1,57 @@ +These examples are almost identical to the old handwritten python wrapper +examples. However, there are just some slight name changes, for example +`noiseModel.Diagonal` becomes `noiseModel_Diagonal` etc... +Also, instead of `gtsam.Symbol('b', 0)` we can simply say `gtsam.symbol_shorthand_B(0)` or `B(0)` if we use python aliasing. + +# Porting Progress + +| C++ Example Name | Ported | +|-------------------------------------------------------|--------| +| CameraResectioning | | +| CreateSFMExampleData | | +| DiscreteBayesNet_FG | none of the required discrete functionality is exposed through cython | +| easyPoint2KalmanFilter | ExtendedKalmanFilter not exposed through cython | +| elaboratePoint2KalmanFilter | GaussianSequentialSolver not exposed through cython | +| ImuFactorExample2 | X | +| ImuFactorsExample | | +| ISAM2Example_SmartFactor | | +| ISAM2_SmartFactorStereo_IMU | | +| LocalizationExample | impossible? | +| METISOrderingExample | | +| OdometryExample | X | +| PlanarSLAMExample | X | +| Pose2SLAMExample | X | +| Pose2SLAMExampleExpressions | | +| Pose2SLAMExample_g2o | X | +| Pose2SLAMExample_graph | | +| Pose2SLAMExample_graphviz | | +| Pose2SLAMExample_lago | lago not exposed through cython | +| Pose2SLAMStressTest | | +| Pose2SLAMwSPCG | | +| Pose3SLAMExample_changeKeys | | +| Pose3SLAMExampleExpressions_BearingRangeWithTransform | | +| Pose3SLAMExample_g2o | X | +| Pose3SLAMExample_initializePose3Chordal | | +| Pose3SLAMExample_initializePose3Gradient | | +| RangeISAMExample_plaza2 | | +| SelfCalibrationExample | | +| SFMExample_bal_COLAMD_METIS | | +| SFMExample_bal | | +| SFMExample | X | +| SFMExampleExpressions_bal | | +| SFMExampleExpressions | | +| SFMExample_SmartFactor | | +| SFMExample_SmartFactorPCG | | +| SimpleRotation | X | +| SolverComparer | | +| StereoVOExample | | +| StereoVOExample_large | | +| TimeTBB | | +| UGM_chain | discrete functionality not exposed | +| UGM_small | discrete functionality not exposed | +| VisualISAM2Example | X | +| VisualISAMExample | X | + +Extra Examples (with no C++ equivalent) +- PlanarManipulatorExample +- SFMData diff --git a/python/gtsam_py/python/examples/SFMExample.py b/python/gtsam_py/python/examples/SFMExample.py new file mode 100644 index 000000000..e02def2f9 --- /dev/null +++ b/python/gtsam_py/python/examples/SFMExample.py @@ -0,0 +1,121 @@ +""" +GTSAM Copyright 2010, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved +Authors: Frank Dellaert, et al. (see THANKS for the full author list) + +See LICENSE for the license information + +A structure-from-motion problem on a simulated dataset +""" +from __future__ import print_function + +import gtsam +import matplotlib.pyplot as plt +import numpy as np +from gtsam import symbol_shorthand_L as L +from gtsam import symbol_shorthand_X as X +from gtsam.examples import SFMdata +from gtsam.gtsam import (Cal3_S2, DoglegOptimizer, + GenericProjectionFactorCal3_S2, Marginals, + NonlinearFactorGraph, PinholeCameraCal3_S2, Point3, + Pose3, PriorFactorPoint3, PriorFactorPose3, Rot3, + SimpleCamera, Values) +from gtsam.utils import plot + + +def main(): + """ + Camera observations of landmarks (i.e. pixel coordinates) will be stored as Point2 (x, y). + + Each variable in the system (poses and landmarks) must be identified with a unique key. + We can either use simple integer keys (1, 2, 3, ...) or symbols (X1, X2, L1). + Here we will use Symbols + + In GTSAM, measurement functions are represented as 'factors'. Several common factors + have been provided with the library for solving robotics/SLAM/Bundle Adjustment problems. + Here we will use Projection factors to model the camera's landmark observations. + Also, we will initialize the robot at some location using a Prior factor. + + When the factors are created, we will add them to a Factor Graph. As the factors we are using + are nonlinear factors, we will need a Nonlinear Factor Graph. + + Finally, once all of the factors have been added to our factor graph, we will want to + solve/optimize to graph to find the best (Maximum A Posteriori) set of variable values. + GTSAM includes several nonlinear optimizers to perform this step. Here we will use a + trust-region method known as Powell's Degleg + + The nonlinear solvers within GTSAM are iterative solvers, meaning they linearize the + nonlinear functions around an initial linearization point, then solve the linear system + to update the linearization point. This happens repeatedly until the solver converges + to a consistent set of variable values. This requires us to specify an initial guess + for each variable, held in a Values container. + """ + + # Define the camera calibration parameters + K = Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0) + + # Define the camera observation noise model + measurement_noise = gtsam.noiseModel_Isotropic.Sigma(2, 1.0) # one pixel in u and v + + # Create the set of ground-truth landmarks + points = SFMdata.createPoints() + + # Create the set of ground-truth poses + poses = SFMdata.createPoses(K) + + # Create a factor graph + graph = NonlinearFactorGraph() + + # Add a prior on pose x1. This indirectly specifies where the origin is. + # 0.3 rad std on roll,pitch,yaw and 0.1m on x,y,z + pose_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) + factor = PriorFactorPose3(X(0), poses[0], pose_noise) + graph.push_back(factor) + + # Simulated measurements from each camera pose, adding them to the factor graph + for i, pose in enumerate(poses): + camera = PinholeCameraCal3_S2(pose, K) + for j, point in enumerate(points): + measurement = camera.project(point) + factor = GenericProjectionFactorCal3_S2( + measurement, measurement_noise, X(i), L(j), K) + graph.push_back(factor) + + # Because the structure-from-motion problem has a scale ambiguity, the problem is still under-constrained + # Here we add a prior on the position of the first landmark. This fixes the scale by indicating the distance + # between the first camera and the first landmark. All other landmark positions are interpreted using this scale. + point_noise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) + factor = PriorFactorPoint3(L(0), points[0], point_noise) + graph.push_back(factor) + graph.print_('Factor Graph:\n') + + # Create the data structure to hold the initial estimate to the solution + # Intentionally initialize the variables off from the ground truth + initial_estimate = Values() + for i, pose in enumerate(poses): + transformed_pose = pose.retract(0.1*np.random.randn(6,1)) + initial_estimate.insert(X(i), transformed_pose) + for j, point in enumerate(points): + transformed_point = Point3(point.vector() + 0.1*np.random.randn(3)) + initial_estimate.insert(L(j), transformed_point) + initial_estimate.print_('Initial Estimates:\n') + + # Optimize the graph and print results + params = gtsam.DoglegParams() + params.setVerbosity('TERMINATION') + optimizer = DoglegOptimizer(graph, initial_estimate, params) + print('Optimizing:') + result = optimizer.optimize() + result.print_('Final results:\n') + print('initial error = {}'.format(graph.error(initial_estimate))) + print('final error = {}'.format(graph.error(result))) + + marginals = Marginals(graph, result) + plot.plot_3d_points(1, result, marginals=marginals) + plot.plot_trajectory(1, result, marginals=marginals, scale=8) + plot.set_axes_equal(1) + plt.show() + +if __name__ == '__main__': + main() diff --git a/python/gtsam_py/python/examples/SFMdata.py b/python/gtsam_py/python/examples/SFMdata.py new file mode 100644 index 000000000..c586f7e52 --- /dev/null +++ b/python/gtsam_py/python/examples/SFMdata.py @@ -0,0 +1,39 @@ +""" +A structure-from-motion example with landmarks + - The landmarks form a 10 meter cube + - The robot rotates around the landmarks, always facing towards the cube +""" +# pylint: disable=invalid-name, E1101 + +import numpy as np + +import gtsam + + +def createPoints(): + # Create the set of ground-truth landmarks + points = [gtsam.Point3(10.0, 10.0, 10.0), + gtsam.Point3(-10.0, 10.0, 10.0), + gtsam.Point3(-10.0, -10.0, 10.0), + gtsam.Point3(10.0, -10.0, 10.0), + gtsam.Point3(10.0, 10.0, -10.0), + gtsam.Point3(-10.0, 10.0, -10.0), + gtsam.Point3(-10.0, -10.0, -10.0), + gtsam.Point3(10.0, -10.0, -10.0)] + return points + + +def createPoses(K): + # Create the set of ground-truth poses + radius = 40.0 + height = 10.0 + angles = np.linspace(0, 2*np.pi, 8, endpoint=False) + up = gtsam.Point3(0, 0, 1) + target = gtsam.Point3(0, 0, 0) + poses = [] + for theta in angles: + position = gtsam.Point3(radius*np.cos(theta), + radius*np.sin(theta), height) + camera = gtsam.PinholeCameraCal3_S2.Lookat(position, target, up, K) + poses.append(camera.pose()) + return poses diff --git a/python/gtsam_py/python/examples/SimpleRotation.py b/python/gtsam_py/python/examples/SimpleRotation.py new file mode 100644 index 000000000..4e82d3778 --- /dev/null +++ b/python/gtsam_py/python/examples/SimpleRotation.py @@ -0,0 +1,85 @@ +""" +GTSAM Copyright 2010, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved +Authors: Frank Dellaert, et al. (see THANKS for the full author list) + +See LICENSE for the license information + +This example will perform a relatively trivial optimization on +a single variable with a single factor. +""" + +import gtsam +import numpy as np +from gtsam import symbol_shorthand_X as X + + +def main(): + """ + Step 1: Create a factor to express a unary constraint + + The "prior" in this case is the measurement from a sensor, + with a model of the noise on the measurement. + + The "Key" created here is a label used to associate parts of the + state (stored in "RotValues") with particular factors. They require + an index to allow for lookup, and should be unique. + + In general, creating a factor requires: + - A key or set of keys labeling the variables that are acted upon + - A measurement value + - A measurement model with the correct dimensionality for the factor + """ + prior = gtsam.Rot2.fromAngle(np.deg2rad(30)) + prior.print_('goal angle') + model = gtsam.noiseModel_Isotropic.Sigma(dim=1, sigma=np.deg2rad(1)) + key = X(1) + factor = gtsam.PriorFactorRot2(key, prior, model) + + """ + Step 2: Create a graph container and add the factor to it + + Before optimizing, all factors need to be added to a Graph container, + which provides the necessary top-level functionality for defining a + system of constraints. + + In this case, there is only one factor, but in a practical scenario, + many more factors would be added. + """ + graph = gtsam.NonlinearFactorGraph() + graph.push_back(factor) + graph.print_('full graph') + + """ + Step 3: Create an initial estimate + + An initial estimate of the solution for the system is necessary to + start optimization. This system state is the "Values" instance, + which is similar in structure to a dictionary, in that it maps + keys (the label created in step 1) to specific values. + + The initial estimate provided to optimization will be used as + a linearization point for optimization, so it is important that + all of the variables in the graph have a corresponding value in + this structure. + """ + initial = gtsam.Values() + initial.insert(key, gtsam.Rot2.fromAngle(np.deg2rad(20))) + initial.print_('initial estimate') + + """ + Step 4: Optimize + + After formulating the problem with a graph of constraints + and an initial estimate, executing optimization is as simple + as calling a general optimization function with the graph and + initial estimate. This will yield a new RotValues structure + with the final state of the optimization. + """ + result = gtsam.LevenbergMarquardtOptimizer(graph, initial).optimize() + result.print_('final result') + + +if __name__ == '__main__': + main() diff --git a/python/gtsam_py/python/examples/VisualISAM2Example.py b/python/gtsam_py/python/examples/VisualISAM2Example.py new file mode 100644 index 000000000..49e6ca95c --- /dev/null +++ b/python/gtsam_py/python/examples/VisualISAM2Example.py @@ -0,0 +1,154 @@ +""" +GTSAM Copyright 2010-2018, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved +Authors: Frank Dellaert, et al. (see THANKS for the full author list) + +See LICENSE for the license information + +An example of running visual SLAM using iSAM2. +Author: Duy-Nguyen Ta (C++), Frank Dellaert (Python) +""" +# pylint: disable=invalid-name, E1101 + +from __future__ import print_function + +import gtsam +import gtsam.utils.plot as gtsam_plot +import matplotlib.pyplot as plt +import numpy as np +from gtsam import symbol_shorthand_L as L +from gtsam import symbol_shorthand_X as X +from gtsam.examples import SFMdata +from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611 + + +def visual_ISAM2_plot(result): + """ + VisualISAMPlot plots current state of ISAM2 object + Author: Ellon Paiva + Based on MATLAB version by: Duy Nguyen Ta and Frank Dellaert + """ + + # Declare an id for the figure + fignum = 0 + + fig = plt.figure(fignum) + axes = fig.gca(projection='3d') + plt.cla() + + # Plot points + # Can't use data because current frame might not see all points + # marginals = Marginals(isam.getFactorsUnsafe(), isam.calculateEstimate()) + # gtsam.plot_3d_points(result, [], marginals) + gtsam_plot.plot_3d_points(fignum, result, 'rx') + + # Plot cameras + i = 0 + while result.exists(X(i)): + pose_i = result.atPose3(X(i)) + gtsam_plot.plot_pose3(fignum, pose_i, 10) + i += 1 + + # draw + axes.set_xlim3d(-40, 40) + axes.set_ylim3d(-40, 40) + axes.set_zlim3d(-40, 40) + plt.pause(1) + + +def visual_ISAM2_example(): + plt.ion() + + # Define the camera calibration parameters + K = gtsam.Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0) + + # Define the camera observation noise model + measurement_noise = gtsam.noiseModel_Isotropic.Sigma( + 2, 1.0) # one pixel in u and v + + # Create the set of ground-truth landmarks + points = SFMdata.createPoints() + + # Create the set of ground-truth poses + poses = SFMdata.createPoses(K) + + # Create an iSAM2 object. Unlike iSAM1, which performs periodic batch steps + # to maintain proper linearization and efficient variable ordering, iSAM2 + # performs partial relinearization/reordering at each step. A parameter + # structure is available that allows the user to set various properties, such + # as the relinearization threshold and type of linear solver. For this + # example, we we set the relinearization threshold small so the iSAM2 result + # will approach the batch result. + parameters = gtsam.ISAM2Params() + parameters.setRelinearizeThreshold(0.01) + parameters.setRelinearizeSkip(1) + isam = gtsam.ISAM2(parameters) + + # Create a Factor Graph and Values to hold the new data + graph = gtsam.NonlinearFactorGraph() + initial_estimate = gtsam.Values() + + # Loop over the different poses, adding the observations to iSAM incrementally + for i, pose in enumerate(poses): + + # Add factors for each landmark observation + for j, point in enumerate(points): + camera = gtsam.PinholeCameraCal3_S2(pose, K) + measurement = camera.project(point) + graph.push_back(gtsam.GenericProjectionFactorCal3_S2( + measurement, measurement_noise, X(i), L(j), K)) + + # Add an initial guess for the current pose + # Intentionally initialize the variables off from the ground truth + initial_estimate.insert(X(i), pose.compose(gtsam.Pose3( + gtsam.Rot3.Rodrigues(-0.1, 0.2, 0.25), gtsam.Point3(0.05, -0.10, 0.20)))) + + # If this is the first iteration, add a prior on the first pose to set the + # coordinate frame and a prior on the first landmark to set the scale. + # Also, as iSAM solves incrementally, we must wait until each is observed + # at least twice before adding it to iSAM. + if i == 0: + # Add a prior on pose x0 + pose_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array( + [0.1, 0.1, 0.1, 0.3, 0.3, 0.3])) # 30cm std on x,y,z 0.1 rad on roll,pitch,yaw + graph.push_back(gtsam.PriorFactorPose3(X(0), poses[0], pose_noise)) + + # Add a prior on landmark l0 + point_noise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) + graph.push_back(gtsam.PriorFactorPoint3( + L(0), points[0], point_noise)) # add directly to graph + + # Add initial guesses to all observed landmarks + # Intentionally initialize the variables off from the ground truth + for j, point in enumerate(points): + initial_estimate.insert(L(j), gtsam.Point3( + point.x()-0.25, point.y()+0.20, point.z()+0.15)) + else: + # Update iSAM with the new factors + isam.update(graph, initial_estimate) + # Each call to iSAM2 update(*) performs one iteration of the iterative nonlinear solver. + # If accuracy is desired at the expense of time, update(*) can be called additional + # times to perform multiple optimizer iterations every step. + isam.update() + current_estimate = isam.calculateEstimate() + print("****************************************************") + print("Frame", i, ":") + for j in range(i + 1): + print(X(j), ":", current_estimate.atPose3(X(j))) + + for j in range(len(points)): + print(L(j), ":", current_estimate.atPoint3(L(j))) + + visual_ISAM2_plot(current_estimate) + + # Clear the factor graph and values for the next iteration + graph.resize(0) + initial_estimate.clear() + + plt.ioff() + plt.show() + + +if __name__ == '__main__': + visual_ISAM2_example() diff --git a/python/gtsam_py/python/examples/VisualISAMExample.py b/python/gtsam_py/python/examples/VisualISAMExample.py new file mode 100644 index 000000000..5cc37867b --- /dev/null +++ b/python/gtsam_py/python/examples/VisualISAMExample.py @@ -0,0 +1,103 @@ +""" +GTSAM Copyright 2010, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved +Authors: Frank Dellaert, et al. (see THANKS for the full author list) + +See LICENSE for the license information + +A visualSLAM example for the structure-from-motion problem on a simulated dataset +This version uses iSAM to solve the problem incrementally +""" + +from __future__ import print_function + +import numpy as np +import gtsam +from gtsam.examples import SFMdata +from gtsam.gtsam import (Cal3_S2, GenericProjectionFactorCal3_S2, + NonlinearFactorGraph, NonlinearISAM, Point3, Pose3, + PriorFactorPoint3, PriorFactorPose3, Rot3, + PinholeCameraCal3_S2, Values) +from gtsam import symbol_shorthand_L as L +from gtsam import symbol_shorthand_X as X + + +def main(): + """ + A structure-from-motion example with landmarks + - The landmarks form a 10 meter cube + - The robot rotates around the landmarks, always facing towards the cube + """ + + # Define the camera calibration parameters + K = Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0) + + # Define the camera observation noise model + camera_noise = gtsam.noiseModel_Isotropic.Sigma(2, 1.0) # one pixel in u and v + + # Create the set of ground-truth landmarks + points = SFMdata.createPoints() + # Create the set of ground-truth poses + poses = SFMdata.createPoses(K) + + # Create a NonlinearISAM object which will relinearize and reorder the variables + # every "reorderInterval" updates + isam = NonlinearISAM(reorderInterval=3) + + # Create a Factor Graph and Values to hold the new data + graph = NonlinearFactorGraph() + initial_estimate = Values() + + # Loop over the different poses, adding the observations to iSAM incrementally + for i, pose in enumerate(poses): + camera = PinholeCameraCal3_S2(pose, K) + # Add factors for each landmark observation + for j, point in enumerate(points): + measurement = camera.project(point) + factor = GenericProjectionFactorCal3_S2(measurement, camera_noise, X(i), L(j), K) + graph.push_back(factor) + + # Intentionally initialize the variables off from the ground truth + noise = Pose3(r=Rot3.Rodrigues(-0.1, 0.2, 0.25), t=Point3(0.05, -0.10, 0.20)) + initial_xi = pose.compose(noise) + + # Add an initial guess for the current pose + initial_estimate.insert(X(i), initial_xi) + + # If this is the first iteration, add a prior on the first pose to set the coordinate frame + # and a prior on the first landmark to set the scale + # Also, as iSAM solves incrementally, we must wait until each is observed at least twice before + # adding it to iSAM. + if i == 0: + # Add a prior on pose x0, with 0.3 rad std on roll,pitch,yaw and 0.1m x,y,z + pose_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) + factor = PriorFactorPose3(X(0), poses[0], pose_noise) + graph.push_back(factor) + + # Add a prior on landmark l0 + point_noise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) + factor = PriorFactorPoint3(L(0), points[0], point_noise) + graph.push_back(factor) + + # Add initial guesses to all observed landmarks + noise = np.array([-0.25, 0.20, 0.15]) + for j, point in enumerate(points): + # Intentionally initialize the variables off from the ground truth + initial_lj = points[j].vector() + noise + initial_estimate.insert(L(j), Point3(initial_lj)) + else: + # Update iSAM with the new factors + isam.update(graph, initial_estimate) + current_estimate = isam.estimate() + print('*' * 50) + print('Frame {}:'.format(i)) + current_estimate.print_('Current estimate: ') + + # Clear the factor graph and values for the next iteration + graph.resize(0) + initial_estimate.clear() + + +if __name__ == '__main__': + main() diff --git a/python/gtsam_py/python/examples/__init__.py b/python/gtsam_py/python/examples/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/python/gtsam_py/python/tests/__init__.py b/python/gtsam_py/python/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/python/gtsam_py/python/tests/testScenarioRunner.py b/python/gtsam_py/python/tests/testScenarioRunner.py new file mode 100644 index 000000000..97a97b0ec --- /dev/null +++ b/python/gtsam_py/python/tests/testScenarioRunner.py @@ -0,0 +1,47 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +ScenarioRunner unit tests. +Author: Frank Dellaert & Duy Nguyen Ta (Python) +""" +import math +import unittest + +import numpy as np + +import gtsam +from gtsam.utils.test_case import GtsamTestCase + + +class TestScenarioRunner(GtsamTestCase): + def setUp(self): + self.g = 10 # simple gravity constant + + def test_loop_runner(self): + # Forward velocity 2m/s + # Pitch up with angular velocity 6 degree/sec (negative in FLU) + v = 2 + w = math.radians(6) + W = np.array([0, -w, 0]) + V = np.array([v, 0, 0]) + scenario = gtsam.ConstantTwistScenario(W, V) + + dt = 0.1 + params = gtsam.PreintegrationParams.MakeSharedU(self.g) + bias = gtsam.imuBias_ConstantBias() + runner = gtsam.ScenarioRunner( + scenario, params, dt, bias) + + # Test specific force at time 0: a is pointing up + t = 0.0 + a = w * v + np.testing.assert_almost_equal( + np.array([0, 0, a + self.g]), runner.actualSpecificForce(t)) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/gtsam_py/python/tests/test_Cal3Unified.py b/python/gtsam_py/python/tests/test_Cal3Unified.py new file mode 100644 index 000000000..fbf5f3565 --- /dev/null +++ b/python/gtsam_py/python/tests/test_Cal3Unified.py @@ -0,0 +1,38 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Cal3Unified unit tests. +Author: Frank Dellaert & Duy Nguyen Ta (Python) +""" +import unittest + +import numpy as np + +import gtsam +from gtsam.utils.test_case import GtsamTestCase + + +class TestCal3Unified(GtsamTestCase): + + def test_Cal3Unified(self): + K = gtsam.Cal3Unified() + self.assertEqual(K.fx(), 1.) + self.assertEqual(K.fx(), 1.) + + def test_retract(self): + expected = gtsam.Cal3Unified(100 + 2, 105 + 3, 0.0 + 4, 320 + 5, 240 + 6, + 1e-3 + 7, 2.0*1e-3 + 8, 3.0*1e-3 + 9, 4.0*1e-3 + 10, 0.1 + 1) + K = gtsam.Cal3Unified(100, 105, 0.0, 320, 240, + 1e-3, 2.0*1e-3, 3.0*1e-3, 4.0*1e-3, 0.1) + d = np.array([2, 3, 4, 5, 6, 7, 8, 9, 10, 1], order='F') + actual = K.retract(d) + self.gtsamAssertEquals(actual, expected) + np.testing.assert_allclose(d, K.localCoordinates(actual)) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam_py/python/tests/test_FrobeniusFactor.py b/python/gtsam_py/python/tests/test_FrobeniusFactor.py new file mode 100644 index 000000000..f3f5354bb --- /dev/null +++ b/python/gtsam_py/python/tests/test_FrobeniusFactor.py @@ -0,0 +1,56 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +FrobeniusFactor unit tests. +Author: Frank Dellaert +""" +# pylint: disable=no-name-in-module, import-error, invalid-name +import unittest + +import numpy as np +from gtsam import (Rot3, SO3, SO4, FrobeniusBetweenFactorSO4, FrobeniusFactorSO4, + FrobeniusWormholeFactor, SOn) + +id = SO4() +v1 = np.array([0, 0, 0, 0.1, 0, 0]) +Q1 = SO4.Expmap(v1) +v2 = np.array([0, 0, 0, 0.01, 0.02, 0.03]) +Q2 = SO4.Expmap(v2) + + +class TestFrobeniusFactorSO4(unittest.TestCase): + """Test FrobeniusFactor factors.""" + + def test_frobenius_factor(self): + """Test creation of a factor that calculates the Frobenius norm.""" + factor = FrobeniusFactorSO4(1, 2) + actual = factor.evaluateError(Q1, Q2) + expected = (Q2.matrix()-Q1.matrix()).transpose().reshape((16,)) + np.testing.assert_array_equal(actual, expected) + + def test_frobenius_between_factor(self): + """Test creation of a Frobenius BetweenFactor.""" + factor = FrobeniusBetweenFactorSO4(1, 2, Q1.between(Q2)) + actual = factor.evaluateError(Q1, Q2) + expected = np.zeros((16,)) + np.testing.assert_array_almost_equal(actual, expected) + + def test_frobenius_wormhole_factor(self): + """Test creation of a factor that calculates Shonan error.""" + R1 = SO3.Expmap(v1[3:]) + R2 = SO3.Expmap(v2[3:]) + factor = FrobeniusWormholeFactor(1, 2, Rot3(R1.between(R2).matrix()), p=4) + I4 = SOn(4) + Q1 = I4.retract(v1) + Q2 = I4.retract(v2) + actual = factor.evaluateError(Q1, Q2) + expected = np.zeros((12,)) + np.testing.assert_array_almost_equal(actual, expected, decimal=4) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam_py/python/tests/test_GaussianFactorGraph.py b/python/gtsam_py/python/tests/test_GaussianFactorGraph.py new file mode 100644 index 000000000..983825d8b --- /dev/null +++ b/python/gtsam_py/python/tests/test_GaussianFactorGraph.py @@ -0,0 +1,92 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Unit tests for Linear Factor Graphs. +Author: Frank Dellaert & Gerry Chen +""" +# pylint: disable=invalid-name, no-name-in-module, no-member + +from __future__ import print_function + +import unittest + +import gtsam +import numpy as np +from gtsam import symbol_shorthand_X as X +from gtsam.utils.test_case import GtsamTestCase + + +def create_graph(): + """Create a basic linear factor graph for testing""" + graph = gtsam.GaussianFactorGraph() + + x0 = X(0) + x1 = X(1) + x2 = X(2) + + BETWEEN_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.ones(1)) + PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.ones(1)) + + graph.add(x1, np.eye(1), x0, -np.eye(1), np.ones(1), BETWEEN_NOISE) + graph.add(x2, np.eye(1), x1, -np.eye(1), 2*np.ones(1), BETWEEN_NOISE) + graph.add(x0, np.eye(1), np.zeros(1), PRIOR_NOISE) + + return graph, (x0, x1, x2) + +class TestGaussianFactorGraph(GtsamTestCase): + """Tests for Gaussian Factor Graphs.""" + + def test_fg(self): + """Test solving a linear factor graph""" + graph, X = create_graph() + result = graph.optimize() + + EXPECTEDX = [0, 1, 3] + + # check solutions + self.assertAlmostEqual(EXPECTEDX[0], result.at(X[0]), delta=1e-8) + self.assertAlmostEqual(EXPECTEDX[1], result.at(X[1]), delta=1e-8) + self.assertAlmostEqual(EXPECTEDX[2], result.at(X[2]), delta=1e-8) + + def test_convertNonlinear(self): + """Test converting a linear factor graph to a nonlinear one""" + graph, X = create_graph() + + EXPECTEDM = [1, 2, 3] + + # create nonlinear factor graph for marginalization + nfg = gtsam.LinearContainerFactor.ConvertLinearGraph(graph) + optimizer = gtsam.LevenbergMarquardtOptimizer(nfg, gtsam.Values()) + nlresult = optimizer.optimizeSafely() + + # marginalize + marginals = gtsam.Marginals(nfg, nlresult) + m = [marginals.marginalCovariance(x) for x in X] + + # check linear marginalizations + self.assertAlmostEqual(EXPECTEDM[0], m[0], delta=1e-8) + self.assertAlmostEqual(EXPECTEDM[1], m[1], delta=1e-8) + self.assertAlmostEqual(EXPECTEDM[2], m[2], delta=1e-8) + + def test_linearMarginalization(self): + """Marginalize a linear factor graph""" + graph, X = create_graph() + result = graph.optimize() + + EXPECTEDM = [1, 2, 3] + + # linear factor graph marginalize + marginals = gtsam.Marginals(graph, result) + m = [marginals.marginalCovariance(x) for x in X] + + # check linear marginalizations + self.assertAlmostEqual(EXPECTEDM[0], m[0], delta=1e-8) + self.assertAlmostEqual(EXPECTEDM[1], m[1], delta=1e-8) + self.assertAlmostEqual(EXPECTEDM[2], m[2], delta=1e-8) + +if __name__ == '__main__': + unittest.main() diff --git a/python/gtsam_py/python/tests/test_JacobianFactor.py b/python/gtsam_py/python/tests/test_JacobianFactor.py new file mode 100644 index 000000000..04433492b --- /dev/null +++ b/python/gtsam_py/python/tests/test_JacobianFactor.py @@ -0,0 +1,92 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +JacobianFactor unit tests. +Author: Frank Dellaert & Duy Nguyen Ta (Python) +""" +import unittest + +import numpy as np + +import gtsam +from gtsam.utils.test_case import GtsamTestCase + + +class TestJacobianFactor(GtsamTestCase): + + def test_eliminate(self): + # Recommended way to specify a matrix (see cython/README) + Ax2 = np.array( + [[-5., 0.], + [+0., -5.], + [10., 0.], + [+0., 10.]], order='F') + + # This is good too + Al1 = np.array( + [[5, 0], + [0, 5], + [0, 0], + [0, 0]], dtype=float, order = 'F') + + # Not recommended for performance reasons, but should still work + # as the wrapper should convert it to the correct type and storage order + Ax1 = np.array( + [[0, 0], # f4 + [0, 0], # f4 + [-10, 0], # f2 + [0, -10]]) # f2 + + x2 = 1 + l1 = 2 + x1 = 3 + + # the RHS + b2 = np.array([-1., 1.5, 2., -1.]) + sigmas = np.array([1., 1., 1., 1.]) + model4 = gtsam.noiseModel_Diagonal.Sigmas(sigmas) + combined = gtsam.JacobianFactor(x2, Ax2, l1, Al1, x1, Ax1, b2, model4) + + # eliminate the first variable (x2) in the combined factor, destructive + # ! + ord = gtsam.Ordering() + ord.push_back(x2) + actualCG, lf = combined.eliminate(ord) + + # create expected Conditional Gaussian + R11 = np.array([[11.1803, 0.00], + [0.00, 11.1803]]) + S12 = np.array([[-2.23607, 0.00], + [+0.00, -2.23607]]) + S13 = np.array([[-8.94427, 0.00], + [+0.00, -8.94427]]) + d = np.array([2.23607, -1.56525]) + expectedCG = gtsam.GaussianConditional( + x2, d, R11, l1, S12, x1, S13, gtsam.noiseModel_Unit.Create(2)) + # check if the result matches + self.gtsamAssertEquals(actualCG, expectedCG, 1e-4) + + # the expected linear factor + Bl1 = np.array([[4.47214, 0.00], + [0.00, 4.47214]]) + + Bx1 = np.array( + # x1 + [[-4.47214, 0.00], + [+0.00, -4.47214]]) + + # the RHS + b1 = np.array([0.0, 0.894427]) + + model2 = gtsam.noiseModel_Diagonal.Sigmas(np.array([1., 1.])) + expectedLF = gtsam.JacobianFactor(l1, Bl1, x1, Bx1, b1, model2) + + # check if the result matches the combined (reduced) factor + self.gtsamAssertEquals(lf, expectedLF, 1e-4) + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam_py/python/tests/test_KalmanFilter.py b/python/gtsam_py/python/tests/test_KalmanFilter.py new file mode 100644 index 000000000..94c41df72 --- /dev/null +++ b/python/gtsam_py/python/tests/test_KalmanFilter.py @@ -0,0 +1,83 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +KalmanFilter unit tests. +Author: Frank Dellaert & Duy Nguyen Ta (Python) +""" +import unittest + +import numpy as np + +import gtsam +from gtsam.utils.test_case import GtsamTestCase + + +class TestKalmanFilter(GtsamTestCase): + + def test_KalmanFilter(self): + F = np.eye(2) + B = np.eye(2) + u = np.array([1.0, 0.0]) + modelQ = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.1, 0.1])) + Q = 0.01 * np.eye(2) + H = np.eye(2) + z1 = np.array([1.0, 0.0]) + z2 = np.array([2.0, 0.0]) + z3 = np.array([3.0, 0.0]) + modelR = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.1, 0.1])) + R = 0.01 * np.eye(2) + + # Create the set of expected output TestValues + expected0 = np.array([0.0, 0.0]) + P00 = 0.01 * np.eye(2) + + expected1 = np.array([1.0, 0.0]) + P01 = P00 + Q + I11 = np.linalg.inv(P01) + np.linalg.inv(R) + + expected2 = np.array([2.0, 0.0]) + P12 = np.linalg.inv(I11) + Q + I22 = np.linalg.inv(P12) + np.linalg.inv(R) + + expected3 = np.array([3.0, 0.0]) + P23 = np.linalg.inv(I22) + Q + I33 = np.linalg.inv(P23) + np.linalg.inv(R) + + # Create an KalmanFilter object + KF = gtsam.KalmanFilter(n=2) + + # Create the Kalman Filter initialization point + x_initial = np.array([0.0, 0.0]) + P_initial = 0.01 * np.eye(2) + + # Create an KF object + state = KF.init(x_initial, P_initial) + self.assertTrue(np.allclose(expected0, state.mean())) + self.assertTrue(np.allclose(P00, state.covariance())) + + # Run iteration 1 + state = KF.predict(state, F, B, u, modelQ) + self.assertTrue(np.allclose(expected1, state.mean())) + self.assertTrue(np.allclose(P01, state.covariance())) + state = KF.update(state, H, z1, modelR) + self.assertTrue(np.allclose(expected1, state.mean())) + self.assertTrue(np.allclose(I11, state.information())) + + # Run iteration 2 + state = KF.predict(state, F, B, u, modelQ) + self.assertTrue(np.allclose(expected2, state.mean())) + state = KF.update(state, H, z2, modelR) + self.assertTrue(np.allclose(expected2, state.mean())) + + # Run iteration 3 + state = KF.predict(state, F, B, u, modelQ) + self.assertTrue(np.allclose(expected3, state.mean())) + state = KF.update(state, H, z3, modelR) + self.assertTrue(np.allclose(expected3, state.mean())) + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam_py/python/tests/test_KarcherMeanFactor.py b/python/gtsam_py/python/tests/test_KarcherMeanFactor.py new file mode 100644 index 000000000..6976decc1 --- /dev/null +++ b/python/gtsam_py/python/tests/test_KarcherMeanFactor.py @@ -0,0 +1,80 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +KarcherMeanFactor unit tests. +Author: Frank Dellaert +""" + +# pylint: disable=invalid-name, no-name-in-module, no-member + +import unittest + +import gtsam +import numpy as np +from gtsam.utils.test_case import GtsamTestCase + +KEY = 0 +MODEL = gtsam.noiseModel_Unit.Create(3) + + +def find_Karcher_mean_Rot3(rotations): + """Find the Karcher mean of given values.""" + # Cost function C(R) = \sum PriorFactor(R_i)::error(R) + # No closed form solution. + graph = gtsam.NonlinearFactorGraph() + for R in rotations: + graph.add(gtsam.PriorFactorRot3(KEY, R, MODEL)) + initial = gtsam.Values() + initial.insert(KEY, gtsam.Rot3()) + result = gtsam.GaussNewtonOptimizer(graph, initial).optimize() + return result.atRot3(KEY) + + +# Rot3 version +R = gtsam.Rot3.Expmap(np.array([0.1, 0, 0])) + + +class TestKarcherMean(GtsamTestCase): + + def test_find(self): + # Check that optimizing for Karcher mean (which minimizes Between distance) + # gets correct result. + rotations = {R, R.inverse()} + expected = gtsam.Rot3() + actual = find_Karcher_mean_Rot3(rotations) + self.gtsamAssertEquals(expected, actual) + + def test_factor(self): + """Check that the InnerConstraint factor leaves the mean unchanged.""" + # Make a graph with two variables, one between, and one InnerConstraint + # The optimal result should satisfy the between, while moving the other + # variable to make the mean the same as before. + # Mean of R and R' is identity. Let's make a BetweenFactor making R21 = + # R*R*R, i.e. geodesic length is 3 rather than 2. + graph = gtsam.NonlinearFactorGraph() + R12 = R.compose(R.compose(R)) + graph.add(gtsam.BetweenFactorRot3(1, 2, R12, MODEL)) + keys = gtsam.KeyVector() + keys.push_back(1) + keys.push_back(2) + graph.add(gtsam.KarcherMeanFactorRot3(keys)) + + initial = gtsam.Values() + initial.insert(1, R.inverse()) + initial.insert(2, R) + expected = find_Karcher_mean_Rot3([R, R.inverse()]) + + result = gtsam.GaussNewtonOptimizer(graph, initial).optimize() + actual = find_Karcher_mean_Rot3( + [result.atRot3(1), result.atRot3(2)]) + self.gtsamAssertEquals(expected, actual) + self.gtsamAssertEquals( + R12, result.atRot3(1).between(result.atRot3(2))) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam_py/python/tests/test_LocalizationExample.py b/python/gtsam_py/python/tests/test_LocalizationExample.py new file mode 100644 index 000000000..6ce65f087 --- /dev/null +++ b/python/gtsam_py/python/tests/test_LocalizationExample.py @@ -0,0 +1,64 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Localization unit tests. +Author: Frank Dellaert & Duy Nguyen Ta (Python) +""" +import unittest + +import numpy as np + +import gtsam +from gtsam.utils.test_case import GtsamTestCase + + +class TestLocalizationExample(GtsamTestCase): + + def test_LocalizationExample(self): + # Create the graph (defined in pose2SLAM.h, derived from + # NonlinearFactorGraph) + graph = gtsam.NonlinearFactorGraph() + + # Add two odometry factors + # create a measurement for both factors (the same in this case) + odometry = gtsam.Pose2(2.0, 0.0, 0.0) + odometryNoise = gtsam.noiseModel_Diagonal.Sigmas( + np.array([0.2, 0.2, 0.1])) # 20cm std on x,y, 0.1 rad on theta + graph.add(gtsam.BetweenFactorPose2(0, 1, odometry, odometryNoise)) + graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, odometryNoise)) + + # Add three "GPS" measurements + # We use Pose2 Priors here with high variance on theta + groundTruth = gtsam.Values() + groundTruth.insert(0, gtsam.Pose2(0.0, 0.0, 0.0)) + groundTruth.insert(1, gtsam.Pose2(2.0, 0.0, 0.0)) + groundTruth.insert(2, gtsam.Pose2(4.0, 0.0, 0.0)) + model = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.1, 0.1, 10.])) + for i in range(3): + graph.add(gtsam.PriorFactorPose2(i, groundTruth.atPose2(i), model)) + + # Initialize to noisy points + initialEstimate = gtsam.Values() + initialEstimate.insert(0, gtsam.Pose2(0.5, 0.0, 0.2)) + initialEstimate.insert(1, gtsam.Pose2(2.3, 0.1, -0.2)) + initialEstimate.insert(2, gtsam.Pose2(4.1, 0.1, 0.1)) + + # Optimize using Levenberg-Marquardt optimization with an ordering from + # colamd + optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate) + result = optimizer.optimizeSafely() + + # Plot Covariance Ellipses + marginals = gtsam.Marginals(graph, result) + P = [None] * result.size() + for i in range(0, result.size()): + pose_i = result.atPose2(i) + self.gtsamAssertEquals(pose_i, groundTruth.atPose2(i), 1e-4) + P[i] = marginals.marginalCovariance(i) + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam_py/python/tests/test_NonlinearOptimizer.py b/python/gtsam_py/python/tests/test_NonlinearOptimizer.py new file mode 100644 index 000000000..985dc30a2 --- /dev/null +++ b/python/gtsam_py/python/tests/test_NonlinearOptimizer.py @@ -0,0 +1,83 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Unit tests for IMU testing scenarios. +Author: Frank Dellaert & Duy Nguyen Ta (Python) +""" +# pylint: disable=invalid-name, no-name-in-module + +from __future__ import print_function + +import unittest + +import gtsam +from gtsam import (DoglegOptimizer, DoglegParams, GaussNewtonOptimizer, + GaussNewtonParams, LevenbergMarquardtOptimizer, + LevenbergMarquardtParams, PCGSolverParameters, + DummyPreconditionerParameters, NonlinearFactorGraph, Ordering, + Point2, PriorFactorPoint2, Values) +from gtsam.utils.test_case import GtsamTestCase + +KEY1 = 1 +KEY2 = 2 + + +class TestScenario(GtsamTestCase): + def test_optimize(self): + """Do trivial test with three optimizer variants.""" + fg = NonlinearFactorGraph() + model = gtsam.noiseModel_Unit.Create(2) + fg.add(PriorFactorPoint2(KEY1, Point2(0, 0), model)) + + # test error at minimum + xstar = Point2(0, 0) + optimal_values = Values() + optimal_values.insert(KEY1, xstar) + self.assertEqual(0.0, fg.error(optimal_values), 0.0) + + # test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 = + x0 = Point2(3, 3) + initial_values = Values() + initial_values.insert(KEY1, x0) + self.assertEqual(9.0, fg.error(initial_values), 1e-3) + + # optimize parameters + ordering = Ordering() + ordering.push_back(KEY1) + + # Gauss-Newton + gnParams = GaussNewtonParams() + gnParams.setOrdering(ordering) + actual1 = GaussNewtonOptimizer(fg, initial_values, gnParams).optimize() + self.assertAlmostEqual(0, fg.error(actual1)) + + # Levenberg-Marquardt + lmParams = LevenbergMarquardtParams.CeresDefaults() + lmParams.setOrdering(ordering) + actual2 = LevenbergMarquardtOptimizer( + fg, initial_values, lmParams).optimize() + self.assertAlmostEqual(0, fg.error(actual2)) + + # Levenberg-Marquardt + lmParams = LevenbergMarquardtParams.CeresDefaults() + lmParams.setLinearSolverType("ITERATIVE") + cgParams = PCGSolverParameters() + cgParams.setPreconditionerParams(DummyPreconditionerParameters()) + lmParams.setIterativeParams(cgParams) + actual2 = LevenbergMarquardtOptimizer( + fg, initial_values, lmParams).optimize() + self.assertAlmostEqual(0, fg.error(actual2)) + + # Dogleg + dlParams = DoglegParams() + dlParams.setOrdering(ordering) + actual3 = DoglegOptimizer(fg, initial_values, dlParams).optimize() + self.assertAlmostEqual(0, fg.error(actual3)) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam_py/python/tests/test_OdometryExample.py b/python/gtsam_py/python/tests/test_OdometryExample.py new file mode 100644 index 000000000..c8ea95588 --- /dev/null +++ b/python/gtsam_py/python/tests/test_OdometryExample.py @@ -0,0 +1,59 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Odometry unit tests. +Author: Frank Dellaert & Duy Nguyen Ta (Python) +""" +import unittest + +import numpy as np + +import gtsam +from gtsam.utils.test_case import GtsamTestCase + + +class TestOdometryExample(GtsamTestCase): + + def test_OdometryExample(self): + # Create the graph (defined in pose2SLAM.h, derived from + # NonlinearFactorGraph) + graph = gtsam.NonlinearFactorGraph() + + # Add a Gaussian prior on pose x_1 + priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior mean is at origin + priorNoise = gtsam.noiseModel_Diagonal.Sigmas( + np.array([0.3, 0.3, 0.1])) # 30cm std on x,y, 0.1 rad on theta + # add directly to graph + graph.add(gtsam.PriorFactorPose2(1, priorMean, priorNoise)) + + # Add two odometry factors + # create a measurement for both factors (the same in this case) + odometry = gtsam.Pose2(2.0, 0.0, 0.0) + odometryNoise = gtsam.noiseModel_Diagonal.Sigmas( + np.array([0.2, 0.2, 0.1])) # 20cm std on x,y, 0.1 rad on theta + graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, odometryNoise)) + graph.add(gtsam.BetweenFactorPose2(2, 3, odometry, odometryNoise)) + + # Initialize to noisy points + initialEstimate = gtsam.Values() + initialEstimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2)) + initialEstimate.insert(2, gtsam.Pose2(2.3, 0.1, -0.2)) + initialEstimate.insert(3, gtsam.Pose2(4.1, 0.1, 0.1)) + + # Optimize using Levenberg-Marquardt optimization with an ordering from + # colamd + optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate) + result = optimizer.optimizeSafely() + marginals = gtsam.Marginals(graph, result) + marginals.marginalCovariance(1) + + # Check first pose equality + pose_1 = result.atPose2(1) + self.gtsamAssertEquals(pose_1, gtsam.Pose2(), 1e-4) + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam_py/python/tests/test_PlanarSLAMExample.py b/python/gtsam_py/python/tests/test_PlanarSLAMExample.py new file mode 100644 index 000000000..ae813d35c --- /dev/null +++ b/python/gtsam_py/python/tests/test_PlanarSLAMExample.py @@ -0,0 +1,78 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +PlanarSLAM unit tests. +Author: Frank Dellaert & Duy Nguyen Ta (Python) +""" +import unittest +from math import pi + +import numpy as np + +import gtsam +from gtsam.utils.test_case import GtsamTestCase + + +class TestPlanarSLAM(GtsamTestCase): + + def test_PlanarSLAM(self): + # Assumptions + # - All values are axis aligned + # - Robot poses are facing along the X axis (horizontal, to the right in images) + # - We have full odometry for measurements + # - The robot is on a grid, moving 2 meters each step + + # Create graph container and add factors to it + graph = gtsam.NonlinearFactorGraph() + + # Add prior + # gaussian for prior + priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior at origin + priorNoise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) + # add directly to graph + graph.add(gtsam.PriorFactorPose2(1, priorMean, priorNoise)) + + # Add odometry + # general noisemodel for odometry + odometryNoise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) + graph.add(gtsam.BetweenFactorPose2( + 1, 2, gtsam.Pose2(2.0, 0.0, 0.0), odometryNoise)) + graph.add(gtsam.BetweenFactorPose2( + 2, 3, gtsam.Pose2(2.0, 0.0, pi / 2), odometryNoise)) + graph.add(gtsam.BetweenFactorPose2( + 3, 4, gtsam.Pose2(2.0, 0.0, pi / 2), odometryNoise)) + graph.add(gtsam.BetweenFactorPose2( + 4, 5, gtsam.Pose2(2.0, 0.0, pi / 2), odometryNoise)) + + # Add pose constraint + model = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) + graph.add(gtsam.BetweenFactorPose2(5, 2, gtsam.Pose2(2.0, 0.0, pi / 2), model)) + + # Initialize to noisy points + initialEstimate = gtsam.Values() + initialEstimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2)) + initialEstimate.insert(2, gtsam.Pose2(2.3, 0.1, -0.2)) + initialEstimate.insert(3, gtsam.Pose2(4.1, 0.1, pi / 2)) + initialEstimate.insert(4, gtsam.Pose2(4.0, 2.0, pi)) + initialEstimate.insert(5, gtsam.Pose2(2.1, 2.1, -pi / 2)) + + # Optimize using Levenberg-Marquardt optimization with an ordering from + # colamd + optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate) + result = optimizer.optimizeSafely() + + # Plot Covariance Ellipses + marginals = gtsam.Marginals(graph, result) + P = marginals.marginalCovariance(1) + + pose_1 = result.atPose2(1) + self.gtsamAssertEquals(pose_1, gtsam.Pose2(), 1e-4) + + + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam_py/python/tests/test_Pose2.py b/python/gtsam_py/python/tests/test_Pose2.py new file mode 100644 index 000000000..9652b594a --- /dev/null +++ b/python/gtsam_py/python/tests/test_Pose2.py @@ -0,0 +1,32 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Pose2 unit tests. +Author: Frank Dellaert & Duy Nguyen Ta (Python) +""" +import unittest + +import numpy as np + +import gtsam +from gtsam import Pose2 +from gtsam.utils.test_case import GtsamTestCase + + +class TestPose2(GtsamTestCase): + """Test selected Pose2 methods.""" + + def test_adjoint(self): + """Test adjoint method.""" + xi = np.array([1, 2, 3]) + expected = np.dot(Pose2.adjointMap_(xi), xi) + actual = Pose2.adjoint_(xi, xi) + np.testing.assert_array_equal(actual, expected) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam_py/python/tests/test_Pose2SLAMExample.py b/python/gtsam_py/python/tests/test_Pose2SLAMExample.py new file mode 100644 index 000000000..a79b6b18c --- /dev/null +++ b/python/gtsam_py/python/tests/test_Pose2SLAMExample.py @@ -0,0 +1,76 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Pose2SLAM unit tests. +Author: Frank Dellaert & Duy Nguyen Ta (Python) +""" +import unittest +from math import pi + +import numpy as np + +import gtsam +from gtsam.utils.test_case import GtsamTestCase + + +class TestPose2SLAMExample(GtsamTestCase): + + def test_Pose2SLAMExample(self): + # Assumptions + # - All values are axis aligned + # - Robot poses are facing along the X axis (horizontal, to the right in images) + # - We have full odometry for measurements + # - The robot is on a grid, moving 2 meters each step + + # Create graph container and add factors to it + graph = gtsam.NonlinearFactorGraph() + + # Add prior + # gaussian for prior + priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior at origin + priorNoise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) + # add directly to graph + graph.add(gtsam.PriorFactorPose2(1, priorMean, priorNoise)) + + # Add odometry + # general noisemodel for odometry + odometryNoise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) + graph.add(gtsam.BetweenFactorPose2( + 1, 2, gtsam.Pose2(2.0, 0.0, 0.0), odometryNoise)) + graph.add(gtsam.BetweenFactorPose2( + 2, 3, gtsam.Pose2(2.0, 0.0, pi / 2), odometryNoise)) + graph.add(gtsam.BetweenFactorPose2( + 3, 4, gtsam.Pose2(2.0, 0.0, pi / 2), odometryNoise)) + graph.add(gtsam.BetweenFactorPose2( + 4, 5, gtsam.Pose2(2.0, 0.0, pi / 2), odometryNoise)) + + # Add pose constraint + model = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) + graph.add(gtsam.BetweenFactorPose2(5, 2, gtsam.Pose2(2.0, 0.0, pi / 2), model)) + + # Initialize to noisy points + initialEstimate = gtsam.Values() + initialEstimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2)) + initialEstimate.insert(2, gtsam.Pose2(2.3, 0.1, -0.2)) + initialEstimate.insert(3, gtsam.Pose2(4.1, 0.1, pi / 2)) + initialEstimate.insert(4, gtsam.Pose2(4.0, 2.0, pi)) + initialEstimate.insert(5, gtsam.Pose2(2.1, 2.1, -pi / 2)) + + # Optimize using Levenberg-Marquardt optimization with an ordering from + # colamd + optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate) + result = optimizer.optimizeSafely() + + # Plot Covariance Ellipses + marginals = gtsam.Marginals(graph, result) + P = marginals.marginalCovariance(1) + + pose_1 = result.atPose2(1) + self.gtsamAssertEquals(pose_1, gtsam.Pose2(), 1e-4) + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam_py/python/tests/test_Pose3.py b/python/gtsam_py/python/tests/test_Pose3.py new file mode 100644 index 000000000..138f1ff13 --- /dev/null +++ b/python/gtsam_py/python/tests/test_Pose3.py @@ -0,0 +1,70 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Pose3 unit tests. +Author: Frank Dellaert, Duy Nguyen Ta +""" +# pylint: disable=no-name-in-module +import math +import unittest + +import numpy as np + +import gtsam +from gtsam import Point3, Pose3, Rot3 +from gtsam.utils.test_case import GtsamTestCase + + +class TestPose3(GtsamTestCase): + """Test selected Pose3 methods.""" + + def test_between(self): + """Test between method.""" + T2 = Pose3(Rot3.Rodrigues(0.3, 0.2, 0.1), Point3(3.5, -8.2, 4.2)) + T3 = Pose3(Rot3.Rodrigues(-90, 0, 0), Point3(1, 2, 3)) + expected = T2.inverse().compose(T3) + actual = T2.between(T3) + self.gtsamAssertEquals(actual, expected, 1e-6) + + def test_transform_to(self): + """Test transformTo method.""" + transform = Pose3(Rot3.Rodrigues(0, 0, -1.570796), Point3(2, 4, 0)) + actual = transform.transformTo(Point3(3, 2, 10)) + expected = Point3(2, 1, 10) + self.gtsamAssertEquals(actual, expected, 1e-6) + + def test_range(self): + """Test range method.""" + l1 = Point3(1, 0, 0) + l2 = Point3(1, 1, 0) + x1 = Pose3() + + xl1 = Pose3(Rot3.Ypr(0.0, 0.0, 0.0), Point3(1, 0, 0)) + xl2 = Pose3(Rot3.Ypr(0.0, 1.0, 0.0), Point3(1, 1, 0)) + + # establish range is indeed zero + self.assertEqual(1, x1.range(point=l1)) + + # establish range is indeed sqrt2 + self.assertEqual(math.sqrt(2.0), x1.range(point=l2)) + + # establish range is indeed zero + self.assertEqual(1, x1.range(pose=xl1)) + + # establish range is indeed sqrt2 + self.assertEqual(math.sqrt(2.0), x1.range(pose=xl2)) + + def test_adjoint(self): + """Test adjoint method.""" + xi = np.array([1, 2, 3, 4, 5, 6]) + expected = np.dot(Pose3.adjointMap_(xi), xi) + actual = Pose3.adjoint_(xi, xi) + np.testing.assert_array_equal(actual, expected) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam_py/python/tests/test_Pose3SLAMExample.py b/python/gtsam_py/python/tests/test_Pose3SLAMExample.py new file mode 100644 index 000000000..1e9eaac67 --- /dev/null +++ b/python/gtsam_py/python/tests/test_Pose3SLAMExample.py @@ -0,0 +1,60 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +PoseSLAM unit tests. +Author: Frank Dellaert & Duy Nguyen Ta (Python) +""" +import unittest +from math import pi + +import numpy as np + +import gtsam +from gtsam.utils.test_case import GtsamTestCase +from gtsam.utils.circlePose3 import * + + +class TestPose3SLAMExample(GtsamTestCase): + + def test_Pose3SLAMExample(self): + # Create a hexagon of poses + hexagon = circlePose3(6, 1.0) + p0 = hexagon.atPose3(0) + p1 = hexagon.atPose3(1) + + # create a Pose graph with one equality constraint and one measurement + fg = gtsam.NonlinearFactorGraph() + fg.add(gtsam.NonlinearEqualityPose3(0, p0)) + delta = p0.between(p1) + covariance = gtsam.noiseModel_Diagonal.Sigmas( + np.array([0.05, 0.05, 0.05, 5. * pi / 180, 5. * pi / 180, 5. * pi / 180])) + fg.add(gtsam.BetweenFactorPose3(0, 1, delta, covariance)) + fg.add(gtsam.BetweenFactorPose3(1, 2, delta, covariance)) + fg.add(gtsam.BetweenFactorPose3(2, 3, delta, covariance)) + fg.add(gtsam.BetweenFactorPose3(3, 4, delta, covariance)) + fg.add(gtsam.BetweenFactorPose3(4, 5, delta, covariance)) + fg.add(gtsam.BetweenFactorPose3(5, 0, delta, covariance)) + + # Create initial config + initial = gtsam.Values() + s = 0.10 + initial.insert(0, p0) + initial.insert(1, hexagon.atPose3(1).retract(s * np.random.randn(6, 1))) + initial.insert(2, hexagon.atPose3(2).retract(s * np.random.randn(6, 1))) + initial.insert(3, hexagon.atPose3(3).retract(s * np.random.randn(6, 1))) + initial.insert(4, hexagon.atPose3(4).retract(s * np.random.randn(6, 1))) + initial.insert(5, hexagon.atPose3(5).retract(s * np.random.randn(6, 1))) + + # optimize + optimizer = gtsam.LevenbergMarquardtOptimizer(fg, initial) + result = optimizer.optimizeSafely() + + pose_1 = result.atPose3(1) + self.gtsamAssertEquals(pose_1, p1, 1e-4) + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam_py/python/tests/test_PriorFactor.py b/python/gtsam_py/python/tests/test_PriorFactor.py new file mode 100644 index 000000000..66207b800 --- /dev/null +++ b/python/gtsam_py/python/tests/test_PriorFactor.py @@ -0,0 +1,61 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +PriorFactor unit tests. +Author: Frank Dellaert & Duy Nguyen Ta (Python) +""" +import unittest + +import numpy as np + +import gtsam +from gtsam.utils.test_case import GtsamTestCase + + +class TestPriorFactor(GtsamTestCase): + + def test_PriorFactor(self): + values = gtsam.Values() + + key = 5 + priorPose3 = gtsam.Pose3() + model = gtsam.noiseModel_Unit.Create(6) + factor = gtsam.PriorFactorPose3(key, priorPose3, model) + values.insert(key, priorPose3) + self.assertEqual(factor.error(values), 0) + + key = 3 + priorVector = np.array([0., 0., 0.]) + model = gtsam.noiseModel_Unit.Create(3) + factor = gtsam.PriorFactorVector(key, priorVector, model) + values.insert(key, priorVector) + self.assertEqual(factor.error(values), 0) + + def test_AddPrior(self): + """ + Test adding prior factors directly to factor graph via the .addPrior method. + """ + # define factor graph + graph = gtsam.NonlinearFactorGraph() + + # define and add Pose3 prior + key = 5 + priorPose3 = gtsam.Pose3() + model = gtsam.noiseModel_Unit.Create(6) + graph.addPriorPose3(key, priorPose3, model) + self.assertEqual(graph.size(), 1) + + # define and add Vector prior + key = 3 + priorVector = np.array([0., 0., 0.]) + model = gtsam.noiseModel_Unit.Create(3) + graph.addPriorVector(key, priorVector, model) + self.assertEqual(graph.size(), 2) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam_py/python/tests/test_SFMExample.py b/python/gtsam_py/python/tests/test_SFMExample.py new file mode 100644 index 000000000..e8fa46186 --- /dev/null +++ b/python/gtsam_py/python/tests/test_SFMExample.py @@ -0,0 +1,82 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +SFM unit tests. +Author: Frank Dellaert & Duy Nguyen Ta (Python) +""" +import unittest + +import numpy as np + +import gtsam +import gtsam.utils.visual_data_generator as generator +from gtsam import symbol +from gtsam.utils.test_case import GtsamTestCase + + +class TestSFMExample(GtsamTestCase): + + def test_SFMExample(self): + options = generator.Options() + options.triangle = False + options.nrCameras = 10 + + [data, truth] = generator.generate_data(options) + + measurementNoiseSigma = 1.0 + pointNoiseSigma = 0.1 + poseNoiseSigmas = np.array([0.001, 0.001, 0.001, 0.1, 0.1, 0.1]) + + graph = gtsam.NonlinearFactorGraph() + + # Add factors for all measurements + measurementNoise = gtsam.noiseModel_Isotropic.Sigma(2, measurementNoiseSigma) + for i in range(len(data.Z)): + for k in range(len(data.Z[i])): + j = data.J[i][k] + graph.add(gtsam.GenericProjectionFactorCal3_S2( + data.Z[i][k], measurementNoise, + symbol(ord('x'), i), symbol(ord('p'), j), data.K)) + + posePriorNoise = gtsam.noiseModel_Diagonal.Sigmas(poseNoiseSigmas) + graph.add(gtsam.PriorFactorPose3(symbol(ord('x'), 0), + truth.cameras[0].pose(), posePriorNoise)) + pointPriorNoise = gtsam.noiseModel_Isotropic.Sigma(3, pointNoiseSigma) + graph.add(gtsam.PriorFactorPoint3(symbol(ord('p'), 0), + truth.points[0], pointPriorNoise)) + + # Initial estimate + initialEstimate = gtsam.Values() + for i in range(len(truth.cameras)): + pose_i = truth.cameras[i].pose() + initialEstimate.insert(symbol(ord('x'), i), pose_i) + for j in range(len(truth.points)): + point_j = truth.points[j] + initialEstimate.insert(symbol(ord('p'), j), point_j) + + # Optimization + optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate) + for i in range(5): + optimizer.iterate() + result = optimizer.values() + + # Marginalization + marginals = gtsam.Marginals(graph, result) + marginals.marginalCovariance(symbol(ord('p'), 0)) + marginals.marginalCovariance(symbol(ord('x'), 0)) + + # Check optimized results, should be equal to ground truth + for i in range(len(truth.cameras)): + pose_i = result.atPose3(symbol(ord('x'), i)) + self.gtsamAssertEquals(pose_i, truth.cameras[i].pose(), 1e-5) + + for j in range(len(truth.points)): + point_j = result.atPoint3(symbol(ord('p'), j)) + self.gtsamAssertEquals(point_j, truth.points[j], 1e-5) + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam_py/python/tests/test_SO4.py b/python/gtsam_py/python/tests/test_SO4.py new file mode 100644 index 000000000..648bd1710 --- /dev/null +++ b/python/gtsam_py/python/tests/test_SO4.py @@ -0,0 +1,59 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +SO4 unit tests. +Author: Frank Dellaert +""" +# pylint: disable=no-name-in-module, import-error +import unittest + +import numpy as np +from gtsam import SO4 + +I4 = SO4() +v1 = np.array([0, 0, 0, .1, 0, 0]) +v2 = np.array([0, 0, 0, 0.01, 0.02, 0.03]) +Q1 = SO4.Expmap(v1) +Q2 = SO4.Expmap(v2) + + +class TestSO4(unittest.TestCase): + """Test selected SO4 methods.""" + + def test_constructor(self): + """Construct from matrix.""" + matrix = np.eye(4) + so4 = SO4(matrix) + self.assertIsInstance(so4, SO4) + + def test_retract(self): + """Test retraction to manifold.""" + v = np.zeros((6,), np.float) + actual = I4.retract(v) + self.assertTrue(actual.equals(I4, 1e-9)) + + def test_local(self): + """Check localCoordinates for trivial case.""" + v0 = np.zeros((6,), np.float) + actual = I4.localCoordinates(I4) + np.testing.assert_array_almost_equal(actual, v0) + + def test_compose(self): + """Check compose works in subgroup.""" + expected = SO4.Expmap(2*v1) + actual = Q1.compose(Q1) + self.assertTrue(actual.equals(expected, 1e-9)) + + def test_vec(self): + """Check wrapping of vec.""" + expected = np.array([1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]) + actual = I4.vec() + np.testing.assert_array_equal(actual, expected) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam_py/python/tests/test_SOn.py b/python/gtsam_py/python/tests/test_SOn.py new file mode 100644 index 000000000..7599363e2 --- /dev/null +++ b/python/gtsam_py/python/tests/test_SOn.py @@ -0,0 +1,59 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Dynamic SO(n) unit tests. +Author: Frank Dellaert +""" +# pylint: disable=no-name-in-module, import-error +import unittest + +import numpy as np +from gtsam import SOn + +I4 = SOn(4) +v1 = np.array([0, 0, 0, .1, 0, 0]) +v2 = np.array([0, 0, 0, 0.01, 0.02, 0.03]) +Q1 = I4.retract(v1) +Q2 = I4.retract(v2) + + +class TestSO4(unittest.TestCase): + """Test selected SOn methods.""" + + def test_constructor(self): + """Construct from matrix.""" + matrix = np.eye(4) + so4 = SOn.FromMatrix(matrix) + self.assertIsInstance(so4, SOn) + + def test_retract(self): + """Test retraction to manifold.""" + v = np.zeros((6,), np.float) + actual = I4.retract(v) + self.assertTrue(actual.equals(I4, 1e-9)) + + def test_local(self): + """Check localCoordinates for trivial case.""" + v0 = np.zeros((6,), np.float) + actual = I4.localCoordinates(I4) + np.testing.assert_array_almost_equal(actual, v0) + + def test_compose(self): + """Check compose works in subgroup.""" + expected = I4.retract(2*v1) + actual = Q1.compose(Q1) + self.assertTrue(actual.equals(expected, 1e-3)) # not exmap so only approximate + + def test_vec(self): + """Check wrapping of vec.""" + expected = np.array([1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]) + actual = I4.vec() + np.testing.assert_array_equal(actual, expected) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam_py/python/tests/test_Scenario.py b/python/gtsam_py/python/tests/test_Scenario.py new file mode 100644 index 000000000..09601fba5 --- /dev/null +++ b/python/gtsam_py/python/tests/test_Scenario.py @@ -0,0 +1,53 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Scenario unit tests. +Author: Frank Dellaert & Duy Nguyen Ta (Python) +""" +from __future__ import print_function + +import math +import unittest + +import numpy as np + +import gtsam +from gtsam.utils.test_case import GtsamTestCase + +# pylint: disable=invalid-name, E1101 + + +class TestScenario(GtsamTestCase): + def setUp(self): + pass + + def test_loop(self): + # Forward velocity 2m/s + # Pitch up with angular velocity 6 degree/sec (negative in FLU) + v = 2 + w = math.radians(6) + W = np.array([0, -w, 0]) + V = np.array([v, 0, 0]) + scenario = gtsam.ConstantTwistScenario(W, V) + + T = 30 + np.testing.assert_almost_equal(W, scenario.omega_b(T)) + np.testing.assert_almost_equal(V, scenario.velocity_b(T)) + np.testing.assert_almost_equal( + np.cross(W, V), scenario.acceleration_b(T)) + + # R = v/w, so test if loop crests at 2*R + R = v / w + T30 = scenario.pose(T) + np.testing.assert_almost_equal( + np.array([math.pi, 0, math.pi]), T30.rotation().xyz()) + self.gtsamAssertEquals(gtsam.Point3( + 0, 0, 2.0 * R), T30.translation(), 1e-9) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/gtsam_py/python/tests/test_SimpleCamera.py b/python/gtsam_py/python/tests/test_SimpleCamera.py new file mode 100644 index 000000000..a3654a5f1 --- /dev/null +++ b/python/gtsam_py/python/tests/test_SimpleCamera.py @@ -0,0 +1,45 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +PinholeCameraCal3_S2 unit tests. +Author: Frank Dellaert & Duy Nguyen Ta (Python) +""" +import math +import unittest + +import numpy as np + +import gtsam +from gtsam import Cal3_S2, Point3, Pose2, Pose3, Rot3, PinholeCameraCal3_S2 +from gtsam.utils.test_case import GtsamTestCase + +K = Cal3_S2(625, 625, 0, 0, 0) + +class TestSimpleCamera(GtsamTestCase): + + def test_constructor(self): + pose1 = Pose3(Rot3(np.diag([1, -1, -1])), Point3(0, 0, 0.5)) + camera = PinholeCameraCal3_S2(pose1, K) + self.gtsamAssertEquals(camera.calibration(), K, 1e-9) + self.gtsamAssertEquals(camera.pose(), pose1, 1e-9) + + def test_level2(self): + # Create a level camera, looking in Y-direction + pose2 = Pose2(0.4,0.3,math.pi/2.0) + camera = PinholeCameraCal3_S2.Level(K, pose2, 0.1) + + # expected + x = Point3(1,0,0) + y = Point3(0,0,-1) + z = Point3(0,1,0) + wRc = Rot3(x,y,z) + expected = Pose3(wRc,Point3(0.4,0.3,0.1)) + self.gtsamAssertEquals(camera.pose(), expected, 1e-9) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam_py/python/tests/test_StereoVOExample.py b/python/gtsam_py/python/tests/test_StereoVOExample.py new file mode 100644 index 000000000..3f5f57522 --- /dev/null +++ b/python/gtsam_py/python/tests/test_StereoVOExample.py @@ -0,0 +1,82 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Stereo VO unit tests. +Author: Frank Dellaert & Duy Nguyen Ta (Python) +""" +import unittest + +import numpy as np + +import gtsam +from gtsam import symbol +from gtsam.utils.test_case import GtsamTestCase + + +class TestStereoVOExample(GtsamTestCase): + + def test_StereoVOExample(self): + ## Assumptions + # - For simplicity this example is in the camera's coordinate frame + # - X: right, Y: down, Z: forward + # - Pose x1 is at the origin, Pose 2 is 1 meter forward (along Z-axis) + # - x1 is fixed with a constraint, x2 is initialized with noisy values + # - No noise on measurements + + ## Create keys for variables + x1 = symbol(ord('x'),1) + x2 = symbol(ord('x'),2) + l1 = symbol(ord('l'),1) + l2 = symbol(ord('l'),2) + l3 = symbol(ord('l'),3) + + ## Create graph container and add factors to it + graph = gtsam.NonlinearFactorGraph() + + ## add a constraint on the starting pose + first_pose = gtsam.Pose3() + graph.add(gtsam.NonlinearEqualityPose3(x1, first_pose)) + + ## Create realistic calibration and measurement noise model + # format: fx fy skew cx cy baseline + K = gtsam.Cal3_S2Stereo(1000, 1000, 0, 320, 240, 0.2) + stereo_model = gtsam.noiseModel_Diagonal.Sigmas(np.array([1.0, 1.0, 1.0])) + + ## Add measurements + # pose 1 + graph.add(gtsam.GenericStereoFactor3D(gtsam.StereoPoint2(520, 480, 440), stereo_model, x1, l1, K)) + graph.add(gtsam.GenericStereoFactor3D(gtsam.StereoPoint2(120, 80, 440), stereo_model, x1, l2, K)) + graph.add(gtsam.GenericStereoFactor3D(gtsam.StereoPoint2(320, 280, 140), stereo_model, x1, l3, K)) + + #pose 2 + graph.add(gtsam.GenericStereoFactor3D(gtsam.StereoPoint2(570, 520, 490), stereo_model, x2, l1, K)) + graph.add(gtsam.GenericStereoFactor3D(gtsam.StereoPoint2( 70, 20, 490), stereo_model, x2, l2, K)) + graph.add(gtsam.GenericStereoFactor3D(gtsam.StereoPoint2(320, 270, 115), stereo_model, x2, l3, K)) + + ## Create initial estimate for camera poses and landmarks + initialEstimate = gtsam.Values() + initialEstimate.insert(x1, first_pose) + # noisy estimate for pose 2 + initialEstimate.insert(x2, gtsam.Pose3(gtsam.Rot3(), gtsam.Point3(0.1,-.1,1.1))) + expected_l1 = gtsam.Point3( 1, 1, 5) + initialEstimate.insert(l1, expected_l1) + initialEstimate.insert(l2, gtsam.Point3(-1, 1, 5)) + initialEstimate.insert(l3, gtsam.Point3( 0,-.5, 5)) + + ## optimize + optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate) + result = optimizer.optimize() + + ## check equality for the first pose and point + pose_x1 = result.atPose3(x1) + self.gtsamAssertEquals(pose_x1, first_pose,1e-4) + + point_l1 = result.atPoint3(l1) + self.gtsamAssertEquals(point_l1, expected_l1,1e-4) + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam_py/python/tests/test_Values.py b/python/gtsam_py/python/tests/test_Values.py new file mode 100644 index 000000000..20634a21c --- /dev/null +++ b/python/gtsam_py/python/tests/test_Values.py @@ -0,0 +1,86 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Values unit tests. +Author: Frank Dellaert & Duy Nguyen Ta (Python) +""" +# pylint: disable=invalid-name, E1101, E0611 +import unittest + +import numpy as np + +import gtsam +from gtsam import (Cal3_S2, Cal3Bundler, Cal3DS2, EssentialMatrix, Point2, + Point3, Pose2, Pose3, Rot2, Rot3, Unit3, Values, + imuBias_ConstantBias) +from gtsam.utils.test_case import GtsamTestCase + + +class TestValues(GtsamTestCase): + + def test_values(self): + values = Values() + E = EssentialMatrix(Rot3(), Unit3()) + tol = 1e-9 + + values.insert(0, Point2(0, 0)) + values.insert(1, Point3(0, 0, 0)) + values.insert(2, Rot2()) + values.insert(3, Pose2()) + values.insert(4, Rot3()) + values.insert(5, Pose3()) + values.insert(6, Cal3_S2()) + values.insert(7, Cal3DS2()) + values.insert(8, Cal3Bundler()) + values.insert(9, E) + values.insert(10, imuBias_ConstantBias()) + + # Special cases for Vectors and Matrices + # Note that gtsam's Eigen Vectors and Matrices requires double-precision + # floating point numbers in column-major (Fortran style) storage order, + # whereas by default, numpy.array is in row-major order and the type is + # in whatever the number input type is, e.g. np.array([1,2,3]) + # will have 'int' type. + # + # The wrapper will automatically fix the type and storage order for you, + # but for performance reasons, it's recommended to specify the correct + # type and storage order. + # for vectors, the order is not important, but dtype still is + vec = np.array([1., 2., 3.]) + values.insert(11, vec) + mat = np.array([[1., 2.], [3., 4.]], order='F') + values.insert(12, mat) + # Test with dtype int and the default order='C' + # This still works as the wrapper converts to the correct type and order for you + # but is nornally not recommended! + mat2 = np.array([[1, 2, ], [3, 5]]) + values.insert(13, mat2) + + self.gtsamAssertEquals(values.atPoint2(0), Point2(0,0), tol) + self.gtsamAssertEquals(values.atPoint3(1), Point3(0,0,0), tol) + self.gtsamAssertEquals(values.atRot2(2), Rot2(), tol) + self.gtsamAssertEquals(values.atPose2(3), Pose2(), tol) + self.gtsamAssertEquals(values.atRot3(4), Rot3(), tol) + self.gtsamAssertEquals(values.atPose3(5), Pose3(), tol) + self.gtsamAssertEquals(values.atCal3_S2(6), Cal3_S2(), tol) + self.gtsamAssertEquals(values.atCal3DS2(7), Cal3DS2(), tol) + self.gtsamAssertEquals(values.atCal3Bundler(8), Cal3Bundler(), tol) + self.gtsamAssertEquals(values.atEssentialMatrix(9), E, tol) + self.gtsamAssertEquals(values.atimuBias_ConstantBias( + 10), imuBias_ConstantBias(), tol) + + # special cases for Vector and Matrix: + actualVector = values.atVector(11) + np.testing.assert_allclose(vec, actualVector, tol) + actualMatrix = values.atMatrix(12) + np.testing.assert_allclose(mat, actualMatrix, tol) + actualMatrix2 = values.atMatrix(13) + np.testing.assert_allclose(mat2, actualMatrix2, tol) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam_py/python/tests/test_VisualISAMExample.py b/python/gtsam_py/python/tests/test_VisualISAMExample.py new file mode 100644 index 000000000..99d7e6160 --- /dev/null +++ b/python/gtsam_py/python/tests/test_VisualISAMExample.py @@ -0,0 +1,57 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +visual_isam unit tests. +Author: Frank Dellaert & Duy Nguyen Ta (Python) +""" +import unittest + +import numpy as np + +import gtsam +import gtsam.utils.visual_data_generator as generator +import gtsam.utils.visual_isam as visual_isam +from gtsam import symbol +from gtsam.utils.test_case import GtsamTestCase + + +class TestVisualISAMExample(GtsamTestCase): + + def test_VisualISAMExample(self): + # Data Options + options = generator.Options() + options.triangle = False + options.nrCameras = 20 + + # iSAM Options + isamOptions = visual_isam.Options() + isamOptions.hardConstraint = False + isamOptions.pointPriors = False + isamOptions.batchInitialization = True + isamOptions.reorderInterval = 10 + isamOptions.alwaysRelinearize = False + + # Generate data + data, truth = generator.generate_data(options) + + # Initialize iSAM with the first pose and points + isam, result, nextPose = visual_isam.initialize(data, truth, isamOptions) + + # Main loop for iSAM: stepping through all poses + for currentPose in range(nextPose, options.nrCameras): + isam, result = visual_isam.step(data, isam, result, truth, currentPose) + + for i in range(len(truth.cameras)): + pose_i = result.atPose3(symbol(ord('x'), i)) + self.gtsamAssertEquals(pose_i, truth.cameras[i].pose(), 1e-5) + + for j in range(len(truth.points)): + point_j = result.atPoint3(symbol(ord('l'), j)) + self.gtsamAssertEquals(point_j, truth.points[j], 1e-5) + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam_py/python/tests/test_dataset.py b/python/gtsam_py/python/tests/test_dataset.py new file mode 100644 index 000000000..60fb9450d --- /dev/null +++ b/python/gtsam_py/python/tests/test_dataset.py @@ -0,0 +1,45 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Unit tests for testing dataset access. +Author: Frank Dellaert & Duy Nguyen Ta (Python) +""" +# pylint: disable=invalid-name, no-name-in-module, no-member + +from __future__ import print_function + +import unittest + +import gtsam +from gtsam import BetweenFactorPose3, BetweenFactorPose3s +from gtsam.utils.test_case import GtsamTestCase + + +class TestDataset(GtsamTestCase): + """Tests for datasets.h wrapper.""" + + def setUp(self): + """Get some common paths.""" + self.pose3_example_g2o_file = gtsam.findExampleDataFile( + "pose3example.txt") + + def test_readG2o3D(self): + """Test reading directly into factor graph.""" + is3D = True + graph, initial = gtsam.readG2o(self.pose3_example_g2o_file, is3D) + self.assertEqual(graph.size(), 6) + self.assertEqual(initial.size(), 5) + + def test_parse3Dfactors(self): + """Test parsing into data structure.""" + factors = gtsam.parse3DFactors(self.pose3_example_g2o_file) + self.assertEqual(factors.size(), 6) + self.assertIsInstance(factors.at(0), BetweenFactorPose3) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/gtsam_py/python/tests/test_dsf_map.py b/python/gtsam_py/python/tests/test_dsf_map.py new file mode 100644 index 000000000..73ddcb050 --- /dev/null +++ b/python/gtsam_py/python/tests/test_dsf_map.py @@ -0,0 +1,40 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Unit tests for Disjoint Set Forest. +Author: Frank Dellaert & Varun Agrawal +""" +# pylint: disable=invalid-name, no-name-in-module, no-member + +from __future__ import print_function + +import unittest + +import gtsam +from gtsam.utils.test_case import GtsamTestCase + + +class TestDSFMap(GtsamTestCase): + """Tests for DSFMap.""" + + def test_all(self): + """Test everything in DFSMap.""" + def key(index_pair): + return index_pair.i(), index_pair.j() + + dsf = gtsam.DSFMapIndexPair() + pair1 = gtsam.IndexPair(1, 18) + self.assertEqual(key(dsf.find(pair1)), key(pair1)) + pair2 = gtsam.IndexPair(2, 2) + + # testing the merge feature of dsf + dsf.merge(pair1, pair2) + self.assertEqual(key(dsf.find(pair1)), key(dsf.find(pair2))) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/gtsam_py/python/tests/test_initialize_pose3.py b/python/gtsam_py/python/tests/test_initialize_pose3.py new file mode 100644 index 000000000..3aa7e3470 --- /dev/null +++ b/python/gtsam_py/python/tests/test_initialize_pose3.py @@ -0,0 +1,89 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Unit tests for 3D SLAM initialization, using rotation relaxation. +Author: Luca Carlone and Frank Dellaert (Python) +""" +# pylint: disable=invalid-name, E1101, E0611 +import unittest + +import numpy as np + +import gtsam +from gtsam import NonlinearFactorGraph, Point3, Pose3, Rot3, Values +from gtsam.utils.test_case import GtsamTestCase + +x0, x1, x2, x3 = 0, 1, 2, 3 + + +class TestValues(GtsamTestCase): + + def setUp(self): + + model = gtsam.noiseModel_Isotropic.Sigma(6, 0.1) + + # We consider a small graph: + # symbolic FG + # x2 0 1 + # / | \ 1 2 + # / | \ 2 3 + # x3 | x1 2 0 + # \ | / 0 3 + # \ | / + # x0 + # + p0 = Point3(0, 0, 0) + self.R0 = Rot3.Expmap(np.array([0.0, 0.0, 0.0])) + p1 = Point3(1, 2, 0) + self.R1 = Rot3.Expmap(np.array([0.0, 0.0, 1.570796])) + p2 = Point3(0, 2, 0) + self.R2 = Rot3.Expmap(np.array([0.0, 0.0, 3.141593])) + p3 = Point3(-1, 1, 0) + self.R3 = Rot3.Expmap(np.array([0.0, 0.0, 4.712389])) + + pose0 = Pose3(self.R0, p0) + pose1 = Pose3(self.R1, p1) + pose2 = Pose3(self.R2, p2) + pose3 = Pose3(self.R3, p3) + + g = NonlinearFactorGraph() + g.add(gtsam.BetweenFactorPose3(x0, x1, pose0.between(pose1), model)) + g.add(gtsam.BetweenFactorPose3(x1, x2, pose1.between(pose2), model)) + g.add(gtsam.BetweenFactorPose3(x2, x3, pose2.between(pose3), model)) + g.add(gtsam.BetweenFactorPose3(x2, x0, pose2.between(pose0), model)) + g.add(gtsam.BetweenFactorPose3(x0, x3, pose0.between(pose3), model)) + g.add(gtsam.PriorFactorPose3(x0, pose0, model)) + self.graph = g + + def test_buildPose3graph(self): + pose3graph = gtsam.InitializePose3.buildPose3graph(self.graph) + + def test_orientations(self): + pose3Graph = gtsam.InitializePose3.buildPose3graph(self.graph) + + initial = gtsam.InitializePose3.computeOrientationsChordal(pose3Graph) + + # comparison is up to M_PI, that's why we add some multiples of 2*M_PI + self.gtsamAssertEquals(initial.atRot3(x0), self.R0, 1e-6) + self.gtsamAssertEquals(initial.atRot3(x1), self.R1, 1e-6) + self.gtsamAssertEquals(initial.atRot3(x2), self.R2, 1e-6) + self.gtsamAssertEquals(initial.atRot3(x3), self.R3, 1e-6) + + def test_initializePoses(self): + g2oFile = gtsam.findExampleDataFile("pose3example-grid") + is3D = True + inputGraph, expectedValues = gtsam.readG2o(g2oFile, is3D) + priorModel = gtsam.noiseModel_Unit.Create(6) + inputGraph.add(gtsam.PriorFactorPose3(0, Pose3(), priorModel)) + + initial = gtsam.InitializePose3.initialize(inputGraph) + # TODO(frank): very loose !! + self.gtsamAssertEquals(initial, expectedValues, 0.1) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam_py/python/tests/test_logging_optimizer.py b/python/gtsam_py/python/tests/test_logging_optimizer.py new file mode 100644 index 000000000..2560a72a2 --- /dev/null +++ b/python/gtsam_py/python/tests/test_logging_optimizer.py @@ -0,0 +1,108 @@ +""" +Unit tests for optimization that logs to comet.ml. +Author: Jing Wu and Frank Dellaert +""" +# pylint: disable=invalid-name + +import sys +if sys.version_info.major >= 3: + from io import StringIO +else: + from cStringIO import StringIO + +import unittest +from datetime import datetime + +import gtsam +import numpy as np +from gtsam import Rot3 +from gtsam.utils.test_case import GtsamTestCase + +from gtsam.utils.logging_optimizer import gtsam_optimize + +KEY = 0 +MODEL = gtsam.noiseModel_Unit.Create(3) + + +class TestOptimizeComet(GtsamTestCase): + """Check correct logging to comet.ml.""" + + def setUp(self): + """Set up a small Karcher mean optimization example.""" + # Grabbed from KarcherMeanFactor unit tests. + R = Rot3.Expmap(np.array([0.1, 0, 0])) + rotations = {R, R.inverse()} # mean is the identity + self.expected = Rot3() + + graph = gtsam.NonlinearFactorGraph() + for R in rotations: + graph.add(gtsam.PriorFactorRot3(KEY, R, MODEL)) + initial = gtsam.Values() + initial.insert(KEY, R) + self.params = gtsam.GaussNewtonParams() + self.optimizer = gtsam.GaussNewtonOptimizer( + graph, initial, self.params) + + self.lmparams = gtsam.LevenbergMarquardtParams() + self.lmoptimizer = gtsam.LevenbergMarquardtOptimizer( + graph, initial, self.lmparams + ) + + # setup output capture + self.capturedOutput = StringIO() + sys.stdout = self.capturedOutput + + def tearDown(self): + """Reset print capture.""" + sys.stdout = sys.__stdout__ + + def test_simple_printing(self): + """Test with a simple hook.""" + + # Provide a hook that just prints + def hook(_, error): + print(error) + + # Only thing we require from optimizer is an iterate method + gtsam_optimize(self.optimizer, self.params, hook) + + # Check that optimizing yields the identity. + actual = self.optimizer.values() + self.gtsamAssertEquals(actual.atRot3(KEY), self.expected, tol=1e-6) + + def test_lm_simple_printing(self): + """Make sure we are properly terminating LM""" + def hook(_, error): + print(error) + + gtsam_optimize(self.lmoptimizer, self.lmparams, hook) + + actual = self.lmoptimizer.values() + self.gtsamAssertEquals(actual.atRot3(KEY), self.expected, tol=1e-6) + + @unittest.skip("Not a test we want run every time, as needs comet.ml account") + def test_comet(self): + """Test with a comet hook.""" + from comet_ml import Experiment + comet = Experiment(project_name="Testing", + auto_output_logging="native") + comet.log_dataset_info(name="Karcher", path="shonan") + comet.add_tag("GaussNewton") + comet.log_parameter("method", "GaussNewton") + time = datetime.now() + comet.set_name("GaussNewton-" + str(time.month) + "/" + str(time.day) + " " + + str(time.hour)+":"+str(time.minute)+":"+str(time.second)) + + # I want to do some comet thing here + def hook(optimizer, error): + comet.log_metric("Karcher error", + error, optimizer.iterations()) + + gtsam_optimize(self.optimizer, self.params, hook) + comet.end() + + actual = self.optimizer.values() + self.gtsamAssertEquals(actual.atRot3(KEY), self.expected) + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam_py/python/utils/__init__.py b/python/gtsam_py/python/utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/python/gtsam_py/python/utils/circlePose3.py b/python/gtsam_py/python/utils/circlePose3.py new file mode 100644 index 000000000..7012548f4 --- /dev/null +++ b/python/gtsam_py/python/utils/circlePose3.py @@ -0,0 +1,38 @@ +import gtsam +import numpy as np +from math import pi, cos, sin + + +def circlePose3(numPoses=8, radius=1.0, symbolChar=0): + """ + circlePose3 generates a set of poses in a circle. This function + returns those poses inside a gtsam.Values object, with sequential + keys starting from 0. An optional character may be provided, which + will be stored in the msb of each key (i.e. gtsam.Symbol). + + We use aerospace/navlab convention, X forward, Y right, Z down + First pose will be at (R,0,0) + ^y ^ X + | | + z-->xZ--> Y (z pointing towards viewer, Z pointing away from viewer) + Vehicle at p0 is looking towards y axis (X-axis points towards world y) + """ + + # Force symbolChar to be a single character + if type(symbolChar) is str: + symbolChar = ord(symbolChar[0]) + + values = gtsam.Values() + theta = 0.0 + dtheta = 2 * pi / numPoses + gRo = gtsam.Rot3( + np.array([[0., 1., 0.], [1., 0., 0.], [0., 0., -1.]], order='F')) + for i in range(numPoses): + key = gtsam.symbol(symbolChar, i) + gti = gtsam.Point3(radius * cos(theta), radius * sin(theta), 0) + oRi = gtsam.Rot3.Yaw( + -theta) # negative yaw goes counterclockwise, with Z down ! + gTi = gtsam.Pose3(gRo.compose(oRi), gti) + values.insert(key, gTi) + theta = theta + dtheta + return values diff --git a/python/gtsam_py/python/utils/logging_optimizer.py b/python/gtsam_py/python/utils/logging_optimizer.py new file mode 100644 index 000000000..3d9175951 --- /dev/null +++ b/python/gtsam_py/python/utils/logging_optimizer.py @@ -0,0 +1,52 @@ +""" +Optimization with logging via a hook. +Author: Jing Wu and Frank Dellaert +""" +# pylint: disable=invalid-name + +from gtsam import NonlinearOptimizer, NonlinearOptimizerParams +import gtsam + + +def optimize(optimizer, check_convergence, hook): + """ Given an optimizer and a convergence check, iterate until convergence. + After each iteration, hook(optimizer, error) is called. + After the function, use values and errors to get the result. + Arguments: + optimizer (T): needs an iterate and an error function. + check_convergence: T * float * float -> bool + hook -- hook function to record the error + """ + # the optimizer is created with default values which incur the error below + current_error = optimizer.error() + hook(optimizer, current_error) + + # Iterative loop + while True: + # Do next iteration + optimizer.iterate() + new_error = optimizer.error() + hook(optimizer, new_error) + if check_convergence(optimizer, current_error, new_error): + return + current_error = new_error + + +def gtsam_optimize(optimizer, + params, + hook): + """ Given an optimizer and params, iterate until convergence. + After each iteration, hook(optimizer) is called. + After the function, use values and errors to get the result. + Arguments: + optimizer {NonlinearOptimizer} -- Nonlinear optimizer + params {NonlinearOptimizarParams} -- Nonlinear optimizer parameters + hook -- hook function to record the error + """ + def check_convergence(optimizer, current_error, new_error): + return (optimizer.iterations() >= params.getMaxIterations()) or ( + gtsam.checkConvergence(params.getRelativeErrorTol(), params.getAbsoluteErrorTol(), params.getErrorTol(), + current_error, new_error)) or ( + isinstance(optimizer, gtsam.LevenbergMarquardtOptimizer) and optimizer.lambda_() > params.getlambdaUpperBound()) + optimize(optimizer, check_convergence, hook) + return optimizer.values() diff --git a/python/gtsam_py/python/utils/plot.py b/python/gtsam_py/python/utils/plot.py new file mode 100644 index 000000000..1e976a69e --- /dev/null +++ b/python/gtsam_py/python/utils/plot.py @@ -0,0 +1,369 @@ +"""Various plotting utlities.""" + +import numpy as np +import matplotlib.pyplot as plt +from matplotlib import patches +from mpl_toolkits.mplot3d import Axes3D + +import gtsam + + +def set_axes_equal(fignum): + """ + Make axes of 3D plot have equal scale so that spheres appear as spheres, + cubes as cubes, etc.. This is one possible solution to Matplotlib's + ax.set_aspect('equal') and ax.axis('equal') not working for 3D. + + Args: + fignum (int): An integer representing the figure number for Matplotlib. + """ + fig = plt.figure(fignum) + ax = fig.gca(projection='3d') + + limits = np.array([ + ax.get_xlim3d(), + ax.get_ylim3d(), + ax.get_zlim3d(), + ]) + + origin = np.mean(limits, axis=1) + radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0])) + + ax.set_xlim3d([origin[0] - radius, origin[0] + radius]) + ax.set_ylim3d([origin[1] - radius, origin[1] + radius]) + ax.set_zlim3d([origin[2] - radius, origin[2] + radius]) + + +def ellipsoid(xc, yc, zc, rx, ry, rz, n): + """ + Numpy equivalent of Matlab's ellipsoid function. + + Args: + xc (double): Center of ellipsoid in X-axis. + yc (double): Center of ellipsoid in Y-axis. + zc (double): Center of ellipsoid in Z-axis. + rx (double): Radius of ellipsoid in X-axis. + ry (double): Radius of ellipsoid in Y-axis. + rz (double): Radius of ellipsoid in Z-axis. + n (int): The granularity of the ellipsoid plotted. + + Returns: + tuple[numpy.ndarray]: The points in the x, y and z axes to use for the surface plot. + """ + u = np.linspace(0, 2*np.pi, n+1) + v = np.linspace(0, np.pi, n+1) + x = -rx * np.outer(np.cos(u), np.sin(v)).T + y = -ry * np.outer(np.sin(u), np.sin(v)).T + z = -rz * np.outer(np.ones_like(u), np.cos(v)).T + + return x, y, z + + +def plot_covariance_ellipse_3d(axes, origin, P, scale=1, n=8, alpha=0.5): + """ + Plots a Gaussian as an uncertainty ellipse + + Based on Maybeck Vol 1, page 366 + k=2.296 corresponds to 1 std, 68.26% of all probability + k=11.82 corresponds to 3 std, 99.74% of all probability + + Args: + axes (matplotlib.axes.Axes): Matplotlib axes. + origin (gtsam.Point3): The origin in the world frame. + P (numpy.ndarray): The marginal covariance matrix of the 3D point which will be represented as an ellipse. + scale (float): Scaling factor of the radii of the covariance ellipse. + n (int): Defines the granularity of the ellipse. Higher values indicate finer ellipses. + alpha (float): Transparency value for the plotted surface in the range [0, 1]. + """ + k = 11.82 + U, S, _ = np.linalg.svd(P) + + radii = k * np.sqrt(S) + radii = radii * scale + rx, ry, rz = radii + + # generate data for "unrotated" ellipsoid + xc, yc, zc = ellipsoid(0, 0, 0, rx, ry, rz, n) + + # rotate data with orientation matrix U and center c + data = np.kron(U[:, 0:1], xc) + np.kron(U[:, 1:2], yc) + \ + np.kron(U[:, 2:3], zc) + n = data.shape[1] + x = data[0:n, :] + origin[0] + y = data[n:2*n, :] + origin[1] + z = data[2*n:, :] + origin[2] + + axes.plot_surface(x, y, z, alpha=alpha, cmap='hot') + + +def plot_pose2_on_axes(axes, pose, axis_length=0.1, covariance=None): + """ + Plot a 2D pose on given axis `axes` with given `axis_length`. + + Args: + axes (matplotlib.axes.Axes): Matplotlib axes. + pose (gtsam.Pose2): The pose to be plotted. + axis_length (float): The length of the camera axes. + covariance (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. + """ + # get rotation and translation (center) + gRp = pose.rotation().matrix() # rotation from pose to global + t = pose.translation() + origin = np.array([t.x(), t.y()]) + + # draw the camera axes + x_axis = origin + gRp[:, 0] * axis_length + line = np.append(origin[np.newaxis], x_axis[np.newaxis], axis=0) + axes.plot(line[:, 0], line[:, 1], 'r-') + + y_axis = origin + gRp[:, 1] * axis_length + line = np.append(origin[np.newaxis], y_axis[np.newaxis], axis=0) + axes.plot(line[:, 0], line[:, 1], 'g-') + + if covariance is not None: + pPp = covariance[0:2, 0:2] + gPp = np.matmul(np.matmul(gRp, pPp), gRp.T) + + w, v = np.linalg.eig(gPp) + + # k = 2.296 + k = 5.0 + + angle = np.arctan2(v[1, 0], v[0, 0]) + e1 = patches.Ellipse(origin, np.sqrt(w[0]*k), np.sqrt(w[1]*k), + np.rad2deg(angle), fill=False) + axes.add_patch(e1) + + +def plot_pose2(fignum, pose, axis_length=0.1, covariance=None, + axis_labels=('X axis', 'Y axis', 'Z axis')): + """ + Plot a 2D pose on given figure with given `axis_length`. + + Args: + fignum (int): Integer representing the figure number to use for plotting. + pose (gtsam.Pose2): The pose to be plotted. + axis_length (float): The length of the camera axes. + covariance (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. + axis_labels (iterable[string]): List of axis labels to set. + """ + # get figure object + fig = plt.figure(fignum) + axes = fig.gca() + plot_pose2_on_axes(axes, pose, axis_length=axis_length, + covariance=covariance) + + axes.set_xlabel(axis_labels[0]) + axes.set_ylabel(axis_labels[1]) + axes.set_zlabel(axis_labels[2]) + + return fig + + +def plot_point3_on_axes(axes, point, linespec, P=None): + """ + Plot a 3D point on given axis `axes` with given `linespec`. + + Args: + axes (matplotlib.axes.Axes): Matplotlib axes. + point (gtsam.Point3): The point to be plotted. + linespec (string): String representing formatting options for Matplotlib. + P (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. + """ + axes.plot([point.x()], [point.y()], [point.z()], linespec) + if P is not None: + plot_covariance_ellipse_3d(axes, point.vector(), P) + + +def plot_point3(fignum, point, linespec, P=None, + axis_labels=('X axis', 'Y axis', 'Z axis')): + """ + Plot a 3D point on given figure with given `linespec`. + + Args: + fignum (int): Integer representing the figure number to use for plotting. + point (gtsam.Point3): The point to be plotted. + linespec (string): String representing formatting options for Matplotlib. + P (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. + axis_labels (iterable[string]): List of axis labels to set. + + Returns: + fig: The matplotlib figure. + + """ + fig = plt.figure(fignum) + axes = fig.gca(projection='3d') + plot_point3_on_axes(axes, point, linespec, P) + + axes.set_xlabel(axis_labels[0]) + axes.set_ylabel(axis_labels[1]) + axes.set_zlabel(axis_labels[2]) + + return fig + + +def plot_3d_points(fignum, values, linespec="g*", marginals=None, + title="3D Points", axis_labels=('X axis', 'Y axis', 'Z axis')): + """ + Plots the Point3s in `values`, with optional covariances. + Finds all the Point3 objects in the given Values object and plots them. + If a Marginals object is given, this function will also plot marginal + covariance ellipses for each point. + + Args: + fignum (int): Integer representing the figure number to use for plotting. + values (gtsam.Values): Values dictionary consisting of points to be plotted. + linespec (string): String representing formatting options for Matplotlib. + marginals (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. + title (string): The title of the plot. + axis_labels (iterable[string]): List of axis labels to set. + """ + + keys = values.keys() + + # Plot points and covariance matrices + for i in range(keys.size()): + try: + key = keys.at(i) + point = values.atPoint3(key) + if marginals is not None: + covariance = marginals.marginalCovariance(key) + else: + covariance = None + + fig = plot_point3(fignum, point, linespec, covariance, + axis_labels=axis_labels) + + except RuntimeError: + continue + # I guess it's not a Point3 + + fig.suptitle(title) + fig.canvas.set_window_title(title.lower()) + + +def plot_pose3_on_axes(axes, pose, axis_length=0.1, P=None, scale=1): + """ + Plot a 3D pose on given axis `axes` with given `axis_length`. + + Args: + axes (matplotlib.axes.Axes): Matplotlib axes. + point (gtsam.Point3): The point to be plotted. + linespec (string): String representing formatting options for Matplotlib. + P (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. + """ + # get rotation and translation (center) + gRp = pose.rotation().matrix() # rotation from pose to global + origin = pose.translation().vector() + + # draw the camera axes + x_axis = origin + gRp[:, 0] * axis_length + line = np.append(origin[np.newaxis], x_axis[np.newaxis], axis=0) + axes.plot(line[:, 0], line[:, 1], line[:, 2], 'r-') + + y_axis = origin + gRp[:, 1] * axis_length + line = np.append(origin[np.newaxis], y_axis[np.newaxis], axis=0) + axes.plot(line[:, 0], line[:, 1], line[:, 2], 'g-') + + z_axis = origin + gRp[:, 2] * axis_length + line = np.append(origin[np.newaxis], z_axis[np.newaxis], axis=0) + axes.plot(line[:, 0], line[:, 1], line[:, 2], 'b-') + + # plot the covariance + if P is not None: + # covariance matrix in pose coordinate frame + pPp = P[3:6, 3:6] + # convert the covariance matrix to global coordinate frame + gPp = gRp @ pPp @ gRp.T + plot_covariance_ellipse_3d(axes, origin, gPp) + + +def plot_pose3(fignum, pose, axis_length=0.1, P=None, + axis_labels=('X axis', 'Y axis', 'Z axis')): + """ + Plot a 3D pose on given figure with given `axis_length`. + + Args: + fignum (int): Integer representing the figure number to use for plotting. + pose (gtsam.Pose3): 3D pose to be plotted. + linespec (string): String representing formatting options for Matplotlib. + P (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. + axis_labels (iterable[string]): List of axis labels to set. + + Returns: + fig: The matplotlib figure. + """ + # get figure object + fig = plt.figure(fignum) + axes = fig.gca(projection='3d') + plot_pose3_on_axes(axes, pose, P=P, + axis_length=axis_length) + + axes.set_xlabel(axis_labels[0]) + axes.set_ylabel(axis_labels[1]) + axes.set_zlabel(axis_labels[2]) + + return fig + + +def plot_trajectory(fignum, values, scale=1, marginals=None, + title="Plot Trajectory", axis_labels=('X axis', 'Y axis', 'Z axis')): + """ + Plot a complete 3D trajectory using poses in `values`. + + Args: + fignum (int): Integer representing the figure number to use for plotting. + values (gtsam.Values): Values dict containing the poses. + scale (float): Value to scale the poses by. + marginals (gtsam.Marginals): Marginalized probability values of the estimation. + Used to plot uncertainty bounds. + title (string): The title of the plot. + axis_labels (iterable[string]): List of axis labels to set. + """ + pose3Values = gtsam.utilities_allPose3s(values) + keys = gtsam.KeyVector(pose3Values.keys()) + lastIndex = None + + for i in range(keys.size()): + key = keys.at(i) + try: + pose = pose3Values.atPose3(key) + except: + print("Warning: no Pose3 at key: {0}".format(key)) + + if lastIndex is not None: + lastKey = keys.at(lastIndex) + try: + lastPose = pose3Values.atPose3(lastKey) + except: + print("Warning: no Pose3 at key: {0}".format(lastKey)) + pass + + if marginals: + covariance = marginals.marginalCovariance(lastKey) + else: + covariance = None + + fig = plot_pose3(fignum, lastPose, P=covariance, + axis_length=scale, axis_labels=axis_labels) + + lastIndex = i + + # Draw final pose + if lastIndex is not None: + lastKey = keys.at(lastIndex) + try: + lastPose = pose3Values.atPose3(lastKey) + if marginals: + covariance = marginals.marginalCovariance(lastKey) + else: + covariance = None + + fig = plot_pose3(fignum, lastPose, P=covariance, + axis_length=scale, axis_labels=axis_labels) + + except: + pass + + fig.suptitle(title) + fig.canvas.set_window_title(title.lower()) diff --git a/python/gtsam_py/python/utils/test_case.py b/python/gtsam_py/python/utils/test_case.py new file mode 100644 index 000000000..7df1e6ee9 --- /dev/null +++ b/python/gtsam_py/python/utils/test_case.py @@ -0,0 +1,27 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +TestCase class with GTSAM assert utils. +Author: Frank Dellaert +""" +import unittest + + +class GtsamTestCase(unittest.TestCase): + """TestCase class with GTSAM assert utils.""" + + def gtsamAssertEquals(self, actual, expected, tol=1e-9): + """ AssertEqual function that prints out actual and expected if not equal. + Usage: + self.gtsamAssertEqual(actual,expected) + Keyword Arguments: + tol {float} -- tolerance passed to 'equals', default 1e-9 + """ + equal = actual.equals(expected, tol) + if not equal: + raise self.failureException( + "Values are not equal:\n{}!={}".format(actual, expected)) diff --git a/python/gtsam_py/python/utils/visual_data_generator.py b/python/gtsam_py/python/utils/visual_data_generator.py new file mode 100644 index 000000000..f04588e70 --- /dev/null +++ b/python/gtsam_py/python/utils/visual_data_generator.py @@ -0,0 +1,118 @@ +from __future__ import print_function + +import numpy as np + +import gtsam +from gtsam import Cal3_S2, PinholeCameraCal3_S2, Point2, Point3, Pose3 + + +class Options: + """ + Options to generate test scenario + """ + + def __init__(self, triangle=False, nrCameras=3, K=Cal3_S2()): + """ + Options to generate test scenario + @param triangle: generate a triangle scene with 3 points if True, otherwise + a cube with 8 points + @param nrCameras: number of cameras to generate + @param K: camera calibration object + """ + self.triangle = triangle + self.nrCameras = nrCameras + + +class GroundTruth: + """ + Object holding generated ground-truth data + """ + + def __init__(self, K=Cal3_S2(), nrCameras=3, nrPoints=4): + self.K = K + self.cameras = [Pose3()] * nrCameras + self.points = [Point3()] * nrPoints + + def print_(self, s=""): + print(s) + print("K = ", self.K) + print("Cameras: ", len(self.cameras)) + for camera in self.cameras: + print("\t", camera) + print("Points: ", len(self.points)) + for point in self.points: + print("\t", point) + pass + + +class Data: + """ + Object holding generated measurement data + """ + + class NoiseModels: + pass + + def __init__(self, K=Cal3_S2(), nrCameras=3, nrPoints=4): + self.K = K + self.Z = [x[:] for x in [[Point2()] * nrPoints] * nrCameras] + self.J = [x[:] for x in [[0] * nrPoints] * nrCameras] + self.odometry = [Pose3()] * nrCameras + + # Set Noise parameters + self.noiseModels = Data.NoiseModels() + self.noiseModels.posePrior = gtsam.noiseModel_Diagonal.Sigmas( + np.array([0.001, 0.001, 0.001, 0.1, 0.1, 0.1])) + # noiseModels.odometry = gtsam.noiseModel_Diagonal.Sigmas( + # np.array([0.001,0.001,0.001,0.1,0.1,0.1])) + self.noiseModels.odometry = gtsam.noiseModel_Diagonal.Sigmas( + np.array([0.05, 0.05, 0.05, 0.2, 0.2, 0.2])) + self.noiseModels.pointPrior = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) + self.noiseModels.measurement = gtsam.noiseModel_Isotropic.Sigma(2, 1.0) + + +def generate_data(options): + """ Generate ground-truth and measurement data. """ + + K = Cal3_S2(500, 500, 0, 640. / 2., 480. / 2.) + nrPoints = 3 if options.triangle else 8 + + truth = GroundTruth(K=K, nrCameras=options.nrCameras, nrPoints=nrPoints) + data = Data(K, nrCameras=options.nrCameras, nrPoints=nrPoints) + + # Generate simulated data + if options.triangle: # Create a triangle target, just 3 points on a plane + r = 10 + for j in range(len(truth.points)): + theta = j * 2 * np.pi / nrPoints + truth.points[j] = Point3(r * np.cos(theta), r * np.sin(theta), 0) + else: # 3D landmarks as vertices of a cube + truth.points = [ + Point3(10, 10, 10), Point3(-10, 10, 10), + Point3(-10, -10, 10), Point3(10, -10, 10), + Point3(10, 10, -10), Point3(-10, 10, -10), + Point3(-10, -10, -10), Point3(10, -10, -10) + ] + + # Create camera cameras on a circle around the triangle + height = 10 + r = 40 + for i in range(options.nrCameras): + theta = i * 2 * np.pi / options.nrCameras + t = Point3(r * np.cos(theta), r * np.sin(theta), height) + truth.cameras[i] = PinholeCameraCal3_S2.Lookat(t, + Point3(), + Point3(0, 0, 1), + truth.K) + # Create measurements + for j in range(nrPoints): + # All landmarks seen in every frame + data.Z[i][j] = truth.cameras[i].project(truth.points[j]) + data.J[i][j] = j + + # Calculate odometry between cameras + for i in range(1, options.nrCameras): + data.odometry[i] = truth.cameras[i - 1].pose().between( + truth.cameras[i].pose()) + + return data, truth diff --git a/python/gtsam_py/python/utils/visual_isam.py b/python/gtsam_py/python/utils/visual_isam.py new file mode 100644 index 000000000..b0ebe68c3 --- /dev/null +++ b/python/gtsam_py/python/utils/visual_isam.py @@ -0,0 +1,131 @@ +import gtsam +from gtsam import symbol + + +class Options: + """ Options for visual isam example. """ + + def __init__(self): + self.hardConstraint = False + self.pointPriors = False + self.batchInitialization = True + self.reorderInterval = 10 + self.alwaysRelinearize = False + + +def initialize(data, truth, options): + # Initialize iSAM + params = gtsam.ISAM2Params() + if options.alwaysRelinearize: + params.setRelinearizeSkip(1) + isam = gtsam.ISAM2(params=params) + + # Add constraints/priors + # TODO: should not be from ground truth! + newFactors = gtsam.NonlinearFactorGraph() + initialEstimates = gtsam.Values() + for i in range(2): + ii = symbol(ord('x'), i) + if i == 0: + if options.hardConstraint: # add hard constraint + newFactors.add( + gtsam.NonlinearEqualityPose3(ii, truth.cameras[0].pose())) + else: + newFactors.add( + gtsam.PriorFactorPose3(ii, truth.cameras[i].pose(), + data.noiseModels.posePrior)) + initialEstimates.insert(ii, truth.cameras[i].pose()) + + nextPoseIndex = 2 + + # Add visual measurement factors from two first poses and initialize + # observed landmarks + for i in range(2): + ii = symbol(ord('x'), i) + for k in range(len(data.Z[i])): + j = data.J[i][k] + jj = symbol(ord('l'), j) + newFactors.add( + gtsam.GenericProjectionFactorCal3_S2(data.Z[i][ + k], data.noiseModels.measurement, ii, jj, data.K)) + # TODO: initial estimates should not be from ground truth! + if not initialEstimates.exists(jj): + initialEstimates.insert(jj, truth.points[j]) + if options.pointPriors: # add point priors + newFactors.add( + gtsam.PriorFactorPoint3(jj, truth.points[j], + data.noiseModels.pointPrior)) + + # Add odometry between frames 0 and 1 + newFactors.add( + gtsam.BetweenFactorPose3( + symbol(ord('x'), 0), + symbol(ord('x'), 1), data.odometry[1], data.noiseModels.odometry)) + + # Update ISAM + if options.batchInitialization: # Do a full optimize for first two poses + batchOptimizer = gtsam.LevenbergMarquardtOptimizer(newFactors, + initialEstimates) + fullyOptimized = batchOptimizer.optimize() + isam.update(newFactors, fullyOptimized) + else: + isam.update(newFactors, initialEstimates) + + # figure(1)tic + # t=toc plot(frame_i,t,'r.') tic + result = isam.calculateEstimate() + # t=toc plot(frame_i,t,'g.') + + return isam, result, nextPoseIndex + + +def step(data, isam, result, truth, currPoseIndex): + ''' + Do one step isam update + @param[in] data: measurement data (odometry and visual measurements and their noiseModels) + @param[in/out] isam: current isam object, will be updated + @param[in/out] result: current result object, will be updated + @param[in] truth: ground truth data, used to initialize new variables + @param[currPoseIndex]: index of the current pose + ''' + # iSAM expects us to give it a new set of factors + # along with initial estimates for any new variables introduced. + newFactors = gtsam.NonlinearFactorGraph() + initialEstimates = gtsam.Values() + + # Add odometry + prevPoseIndex = currPoseIndex - 1 + odometry = data.odometry[prevPoseIndex] + newFactors.add( + gtsam.BetweenFactorPose3( + symbol(ord('x'), prevPoseIndex), + symbol(ord('x'), currPoseIndex), odometry, + data.noiseModels.odometry)) + + # Add visual measurement factors and initializations as necessary + for k in range(len(data.Z[currPoseIndex])): + zij = data.Z[currPoseIndex][k] + j = data.J[currPoseIndex][k] + jj = symbol(ord('l'), j) + newFactors.add( + gtsam.GenericProjectionFactorCal3_S2( + zij, data.noiseModels.measurement, + symbol(ord('x'), currPoseIndex), jj, data.K)) + # TODO: initialize with something other than truth + if not result.exists(jj) and not initialEstimates.exists(jj): + lmInit = truth.points[j] + initialEstimates.insert(jj, lmInit) + + # Initial estimates for the new pose. + prevPose = result.atPose3(symbol(ord('x'), prevPoseIndex)) + initialEstimates.insert( + symbol(ord('x'), currPoseIndex), prevPose.compose(odometry)) + + # Update ISAM + # figure(1)tic + isam.update(newFactors, initialEstimates) + # t=toc plot(frame_i,t,'r.') tic + newResult = isam.calculateEstimate() + # t=toc plot(frame_i,t,'g.') + + return isam, newResult From 7873c3608878bbba99860a36423afa4ee0a480d8 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Mon, 27 Jul 2020 09:30:24 -0400 Subject: [PATCH 002/142] Add unstable files --- python/gtsam_unstable_py/python/__init__.py | 1 + .../examples/FixedLagSmootherExample.py | 102 +++++++++++++ .../python/examples/TimeOfArrivalExample.py | 128 +++++++++++++++++ .../python/examples/__init__.py | 0 .../python/tests/__init__.py | 0 .../tests/test_FixedLagSmootherExample.py | 135 ++++++++++++++++++ 6 files changed, 366 insertions(+) create mode 100644 python/gtsam_unstable_py/python/__init__.py create mode 100644 python/gtsam_unstable_py/python/examples/FixedLagSmootherExample.py create mode 100644 python/gtsam_unstable_py/python/examples/TimeOfArrivalExample.py create mode 100644 python/gtsam_unstable_py/python/examples/__init__.py create mode 100644 python/gtsam_unstable_py/python/tests/__init__.py create mode 100644 python/gtsam_unstable_py/python/tests/test_FixedLagSmootherExample.py diff --git a/python/gtsam_unstable_py/python/__init__.py b/python/gtsam_unstable_py/python/__init__.py new file mode 100644 index 000000000..3195e6da4 --- /dev/null +++ b/python/gtsam_unstable_py/python/__init__.py @@ -0,0 +1 @@ +from .gtsam_unstable import * diff --git a/python/gtsam_unstable_py/python/examples/FixedLagSmootherExample.py b/python/gtsam_unstable_py/python/examples/FixedLagSmootherExample.py new file mode 100644 index 000000000..786701e0f --- /dev/null +++ b/python/gtsam_unstable_py/python/examples/FixedLagSmootherExample.py @@ -0,0 +1,102 @@ +""" +GTSAM Copyright 2010-2018, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved +Authors: Frank Dellaert, et al. (see THANKS for the full author list) + +See LICENSE for the license information + +Demonstration of the fixed-lag smoothers using a planar robot example +and multiple odometry-like sensors +Author: Frank Dellaert (C++), Jeremy Aguilon (Python) +""" + +import numpy as np + +import gtsam +import gtsam_unstable + + +def _timestamp_key_value(key, value): + """ + + """ + return gtsam_unstable.FixedLagSmootherKeyTimestampMapValue( + key, value + ) + + +def BatchFixedLagSmootherExample(): + """ + Runs a batch fixed smoother on an agent with two odometry + sensors that is simply moving to the + """ + + # Define a batch fixed lag smoother, which uses + # Levenberg-Marquardt to perform the nonlinear optimization + lag = 2.0 + smoother_batch = gtsam_unstable.BatchFixedLagSmoother(lag) + + # Create containers to store the factors and linearization points + # that will be sent to the smoothers + new_factors = gtsam.NonlinearFactorGraph() + new_values = gtsam.Values() + new_timestamps = gtsam_unstable.FixedLagSmootherKeyTimestampMap() + + # Create a prior on the first pose, placing it at the origin + prior_mean = gtsam.Pose2(0, 0, 0) + prior_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) + X1 = 0 + new_factors.push_back(gtsam.PriorFactorPose2(X1, prior_mean, prior_noise)) + new_values.insert(X1, prior_mean) + new_timestamps.insert(_timestamp_key_value(X1, 0.0)) + + delta_time = 0.25 + time = 0.25 + + while time <= 3.0: + previous_key = 1000 * (time - delta_time) + current_key = 1000 * time + + # assign current key to the current timestamp + new_timestamps.insert(_timestamp_key_value(current_key, time)) + + # Add a guess for this pose to the new values + # Assume that the robot moves at 2 m/s. Position is time[s] * 2[m/s] + current_pose = gtsam.Pose2(time * 2, 0, 0) + new_values.insert(current_key, current_pose) + + # Add odometry factors from two different sources with different error + # stats + odometry_measurement_1 = gtsam.Pose2(0.61, -0.08, 0.02) + odometry_noise_1 = gtsam.noiseModel_Diagonal.Sigmas( + np.array([0.1, 0.1, 0.05])) + new_factors.push_back(gtsam.BetweenFactorPose2( + previous_key, current_key, odometry_measurement_1, odometry_noise_1 + )) + + odometry_measurement_2 = gtsam.Pose2(0.47, 0.03, 0.01) + odometry_noise_2 = gtsam.noiseModel_Diagonal.Sigmas( + np.array([0.05, 0.05, 0.05])) + new_factors.push_back(gtsam.BetweenFactorPose2( + previous_key, current_key, odometry_measurement_2, odometry_noise_2 + )) + + # Update the smoothers with the new factors. In this case, + # one iteration must pass for Levenberg-Marquardt to accurately + # estimate + if time >= 0.50: + smoother_batch.update(new_factors, new_values, new_timestamps) + print("Timestamp = " + str(time) + ", Key = " + str(current_key)) + print(smoother_batch.calculateEstimatePose2(current_key)) + + new_timestamps.clear() + new_values.clear() + new_factors.resize(0) + + time += delta_time + + +if __name__ == '__main__': + BatchFixedLagSmootherExample() + print("Example complete") diff --git a/python/gtsam_unstable_py/python/examples/TimeOfArrivalExample.py b/python/gtsam_unstable_py/python/examples/TimeOfArrivalExample.py new file mode 100644 index 000000000..6ba06f0f2 --- /dev/null +++ b/python/gtsam_unstable_py/python/examples/TimeOfArrivalExample.py @@ -0,0 +1,128 @@ +""" +GTSAM Copyright 2010-2020, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved +Authors: Frank Dellaert, et al. (see THANKS for the full author list) + +See LICENSE for the license information + +Track a moving object "Time of Arrival" measurements at 4 microphones. +Author: Frank Dellaert +""" +# pylint: disable=invalid-name, no-name-in-module + +from gtsam import (LevenbergMarquardtOptimizer, LevenbergMarquardtParams, + NonlinearFactorGraph, Point3, Values, noiseModel_Isotropic) +from gtsam_unstable import Event, TimeOfArrival, TOAFactor + +# units +MS = 1e-3 +CM = 1e-2 + +# Instantiate functor with speed of sound value +TIME_OF_ARRIVAL = TimeOfArrival(330) + + +def define_microphones(): + """Create microphones.""" + height = 0.5 + microphones = [] + microphones.append(Point3(0, 0, height)) + microphones.append(Point3(403 * CM, 0, height)) + microphones.append(Point3(403 * CM, 403 * CM, height)) + microphones.append(Point3(0, 403 * CM, 2 * height)) + return microphones + + +def create_trajectory(n): + """Create ground truth trajectory.""" + trajectory = [] + timeOfEvent = 10 + # simulate emitting a sound every second while moving on straight line + for key in range(n): + trajectory.append( + Event(timeOfEvent, 245 * CM + key * 1.0, 201.5 * CM, (212 - 45) * CM)) + timeOfEvent += 1 + + return trajectory + + +def simulate_one_toa(microphones, event): + """Simulate time-of-arrival measurements for a single event.""" + return [TIME_OF_ARRIVAL.measure(event, microphones[i]) + for i in range(len(microphones))] + + +def simulate_toa(microphones, trajectory): + """Simulate time-of-arrival measurements for an entire trajectory.""" + return [simulate_one_toa(microphones, event) + for event in trajectory] + + +def create_graph(microphones, simulatedTOA): + """Create factor graph.""" + graph = NonlinearFactorGraph() + + # Create a noise model for the TOA error + model = noiseModel_Isotropic.Sigma(1, 0.5 * MS) + + K = len(microphones) + key = 0 + for toa in simulatedTOA: + for i in range(K): + factor = TOAFactor(key, microphones[i], toa[i], model) + graph.push_back(factor) + key += 1 + + return graph + + +def create_initial_estimate(n): + """Create initial estimate for n events.""" + initial = Values() + zero = Event() + for key in range(n): + TOAFactor.InsertEvent(key, zero, initial) + return initial + + +def toa_example(): + """Run example with 4 microphones and 5 events in a straight line.""" + # Create microphones + microphones = define_microphones() + K = len(microphones) + for i in range(K): + print("mic {} = {}".format(i, microphones[i])) + + # Create a ground truth trajectory + n = 5 + groundTruth = create_trajectory(n) + for event in groundTruth: + print(event) + + # Simulate time-of-arrival measurements + simulatedTOA = simulate_toa(microphones, groundTruth) + for key in range(n): + for i in range(K): + print("z_{}{} = {} ms".format(key, i, simulatedTOA[key][i] / MS)) + + # create factor graph + graph = create_graph(microphones, simulatedTOA) + print(graph.at(0)) + + # Create initial estimate + initial_estimate = create_initial_estimate(n) + print(initial_estimate) + + # Optimize using Levenberg-Marquardt optimization. + params = LevenbergMarquardtParams() + params.setAbsoluteErrorTol(1e-10) + params.setVerbosityLM("SUMMARY") + optimizer = LevenbergMarquardtOptimizer(graph, initial_estimate, params) + result = optimizer.optimize() + print("Final Result:\n", result) + + +if __name__ == '__main__': + toa_example() + print("Example complete") diff --git a/python/gtsam_unstable_py/python/examples/__init__.py b/python/gtsam_unstable_py/python/examples/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/python/gtsam_unstable_py/python/tests/__init__.py b/python/gtsam_unstable_py/python/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/python/gtsam_unstable_py/python/tests/test_FixedLagSmootherExample.py b/python/gtsam_unstable_py/python/tests/test_FixedLagSmootherExample.py new file mode 100644 index 000000000..8d3af311f --- /dev/null +++ b/python/gtsam_unstable_py/python/tests/test_FixedLagSmootherExample.py @@ -0,0 +1,135 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Cal3Unified unit tests. +Author: Frank Dellaert & Duy Nguyen Ta (Python) +""" +import unittest + +import numpy as np + +import gtsam +import gtsam_unstable +from gtsam.utils.test_case import GtsamTestCase + + +def _timestamp_key_value(key, value): + return gtsam_unstable.FixedLagSmootherKeyTimestampMapValue( + key, value + ) + + +class TestFixedLagSmootherExample(GtsamTestCase): + ''' + Tests the fixed lag smoother wrapper + ''' + + def test_FixedLagSmootherExample(self): + ''' + Simple test that checks for equality between C++ example + file and the Python implementation. See + gtsam_unstable/examples/FixedLagSmootherExample.cpp + ''' + # Define a batch fixed lag smoother, which uses + # Levenberg-Marquardt to perform the nonlinear optimization + lag = 2.0 + smoother_batch = gtsam_unstable.BatchFixedLagSmoother(lag) + + # Create containers to store the factors and linearization points + # that will be sent to the smoothers + new_factors = gtsam.NonlinearFactorGraph() + new_values = gtsam.Values() + new_timestamps = gtsam_unstable.FixedLagSmootherKeyTimestampMap() + + # Create a prior on the first pose, placing it at the origin + prior_mean = gtsam.Pose2(0, 0, 0) + prior_noise = gtsam.noiseModel_Diagonal.Sigmas( + np.array([0.3, 0.3, 0.1])) + X1 = 0 + new_factors.push_back( + gtsam.PriorFactorPose2( + X1, prior_mean, prior_noise)) + new_values.insert(X1, prior_mean) + new_timestamps.insert(_timestamp_key_value(X1, 0.0)) + + delta_time = 0.25 + time = 0.25 + + i = 0 + + ground_truth = [ + gtsam.Pose2(0.995821, 0.0231012, 0.0300001), + gtsam.Pose2(1.49284, 0.0457247, 0.045), + gtsam.Pose2(1.98981, 0.0758879, 0.06), + gtsam.Pose2(2.48627, 0.113502, 0.075), + gtsam.Pose2(2.98211, 0.158558, 0.09), + gtsam.Pose2(3.47722, 0.211047, 0.105), + gtsam.Pose2(3.97149, 0.270956, 0.12), + gtsam.Pose2(4.4648, 0.338272, 0.135), + gtsam.Pose2(4.95705, 0.41298, 0.15), + gtsam.Pose2(5.44812, 0.495063, 0.165), + gtsam.Pose2(5.9379, 0.584503, 0.18), + ] + + # Iterates from 0.25s to 3.0s, adding 0.25s each loop + # In each iteration, the agent moves at a constant speed + # and its two odometers measure the change. The smoothed + # result is then compared to the ground truth + while time <= 3.0: + previous_key = 1000 * (time - delta_time) + current_key = 1000 * time + + # assign current key to the current timestamp + new_timestamps.insert(_timestamp_key_value(current_key, time)) + + # Add a guess for this pose to the new values + # Assume that the robot moves at 2 m/s. Position is time[s] * + # 2[m/s] + current_pose = gtsam.Pose2(time * 2, 0, 0) + new_values.insert(current_key, current_pose) + + # Add odometry factors from two different sources with different + # error stats + odometry_measurement_1 = gtsam.Pose2(0.61, -0.08, 0.02) + odometry_noise_1 = gtsam.noiseModel_Diagonal.Sigmas( + np.array([0.1, 0.1, 0.05])) + new_factors.push_back( + gtsam.BetweenFactorPose2( + previous_key, + current_key, + odometry_measurement_1, + odometry_noise_1)) + + odometry_measurement_2 = gtsam.Pose2(0.47, 0.03, 0.01) + odometry_noise_2 = gtsam.noiseModel_Diagonal.Sigmas( + np.array([0.05, 0.05, 0.05])) + new_factors.push_back( + gtsam.BetweenFactorPose2( + previous_key, + current_key, + odometry_measurement_2, + odometry_noise_2)) + + # Update the smoothers with the new factors. In this case, + # one iteration must pass for Levenberg-Marquardt to accurately + # estimate + if time >= 0.50: + smoother_batch.update(new_factors, new_values, new_timestamps) + + estimate = smoother_batch.calculateEstimatePose2(current_key) + self.assertTrue(estimate.equals(ground_truth[i], 1e-4)) + i += 1 + + new_timestamps.clear() + new_values.clear() + new_factors.resize(0) + + time += delta_time + + +if __name__ == "__main__": + unittest.main() From 9216934ca8295a00200121a687d4378abbcb8350 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Mon, 27 Jul 2020 09:32:31 -0400 Subject: [PATCH 003/142] Replace with new python tests --- python/gtsam_py/python/__init__.py | 29 +-- .../python/examples/DogLegOptimizerExample.py | 6 +- .../python/examples/GPSFactorExample.py | 4 +- .../python/examples/ImuFactorExample.py | 50 ++--- .../python/examples/ImuFactorExample2.py | 77 +++---- .../python/examples/OdometryExample.py | 4 +- .../examples/PlanarManipulatorExample.py | 6 +- .../python/examples/PlanarSLAMExample.py | 21 +- .../python/examples/Pose2SLAMExample.py | 4 +- .../python/examples/Pose2SLAMExample_g2o.py | 9 +- .../python/examples/Pose3SLAMExample_g2o.py | 13 +- ...Pose3SLAMExample_initializePose3Chordal.py | 2 +- .../python/examples/PreintegrationExample.py | 4 +- python/gtsam_py/python/examples/README.md | 9 +- python/gtsam_py/python/examples/SFMExample.py | 33 +-- python/gtsam_py/python/examples/SFMdata.py | 3 +- .../python/examples/SimpleRotation.py | 7 +- .../python/examples/VisualISAM2Example.py | 27 ++- .../python/examples/VisualISAMExample.py | 33 +-- python/gtsam_py/python/tests/__init__.py | 0 .../python/tests/testScenarioRunner.py | 2 +- .../python/tests/test_GaussianFactorGraph.py | 13 +- .../python/tests/test_JacobianFactor.py | 6 +- .../python/tests/test_KalmanFilter.py | 4 +- .../python/tests/test_KarcherMeanFactor.py | 6 +- .../python/tests/test_LocalizationExample.py | 4 +- .../python/tests/test_NonlinearOptimizer.py | 15 +- .../python/tests/test_OdometryExample.py | 4 +- .../python/tests/test_PlanarSLAMExample.py | 6 +- .../python/tests/test_Pose2SLAMExample.py | 6 +- .../python/tests/test_Pose3SLAMExample.py | 2 +- .../gtsam_py/python/tests/test_PriorFactor.py | 26 +-- .../gtsam_py/python/tests/test_SFMExample.py | 24 +-- python/gtsam_py/python/tests/test_Scenario.py | 5 +- .../python/tests/test_SimpleCamera.py | 8 +- .../python/tests/test_StereoVOExample.py | 12 +- .../python/tests/test_Triangulation.py | 80 +++++++ python/gtsam_py/python/tests/test_Values.py | 9 +- .../python/tests/test_VisualISAMExample.py | 4 +- python/gtsam_py/python/tests/test_dataset.py | 6 +- .../python/tests/test_initialize_pose3.py | 7 +- .../python/tests/test_logging_optimizer.py | 2 +- python/gtsam_py/python/utils/circlePose3.py | 6 +- python/gtsam_py/python/utils/plot.py | 200 +++--------------- python/gtsam_py/python/utils/test_case.py | 6 +- .../python/utils/visual_data_generator.py | 53 +++-- python/gtsam_py/python/utils/visual_isam.py | 22 +- .../examples/FixedLagSmootherExample.py | 24 +-- .../python/examples/TimeOfArrivalExample.py | 4 +- .../tests/test_FixedLagSmootherExample.py | 21 +- 50 files changed, 408 insertions(+), 520 deletions(-) delete mode 100644 python/gtsam_py/python/tests/__init__.py create mode 100644 python/gtsam_py/python/tests/test_Triangulation.py diff --git a/python/gtsam_py/python/__init__.py b/python/gtsam_py/python/__init__.py index d40ee4502..9e3e19187 100644 --- a/python/gtsam_py/python/__init__.py +++ b/python/gtsam_py/python/__init__.py @@ -1,26 +1,9 @@ from .gtsam import * -try: - import gtsam_unstable - - - def _deprecated_wrapper(item, name): - def wrapper(*args, **kwargs): - from warnings import warn - message = ('importing the unstable item "{}" directly from gtsam is deprecated. '.format(name) + - 'Please import it from gtsam_unstable.') - warn(message) - return item(*args, **kwargs) - return wrapper - - - for name in dir(gtsam_unstable): - if not name.startswith('__'): - item = getattr(gtsam_unstable, name) - if callable(item): - item = _deprecated_wrapper(item, name) - globals()[name] = item - -except ImportError: - pass +def Point2(x=0, y=0): + import numpy as np + return np.array([x, y], dtype=float) +def Point3(x=0, y=0, z=0): + import numpy as np + return np.array([x, y, z], dtype=float) diff --git a/python/gtsam_py/python/examples/DogLegOptimizerExample.py b/python/gtsam_py/python/examples/DogLegOptimizerExample.py index 776ceedc4..26f4fef84 100644 --- a/python/gtsam_py/python/examples/DogLegOptimizerExample.py +++ b/python/gtsam_py/python/examples/DogLegOptimizerExample.py @@ -35,17 +35,17 @@ def run(args): graph = gtsam.NonlinearFactorGraph() # Priors - prior = gtsam.noiseModel_Isotropic.Sigma(3, 1) + prior = gtsam.noiseModel.Isotropic.Sigma(3, 1) graph.add(gtsam.PriorFactorPose2(11, T11, prior)) graph.add(gtsam.PriorFactorPose2(21, T21, prior)) # Odometry - model = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.01, 0.01, 0.3])) + model = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.01, 0.01, 0.3])) graph.add(gtsam.BetweenFactorPose2(11, 12, T11.between(T12), model)) graph.add(gtsam.BetweenFactorPose2(21, 22, T21.between(T22), model)) # Range - model_rho = gtsam.noiseModel_Isotropic.Sigma(1, 0.01) + model_rho = gtsam.noiseModel.Isotropic.Sigma(1, 0.01) graph.add(gtsam.RangeFactorPose2(12, 22, 1.0, model_rho)) params = gtsam.DoglegParams() diff --git a/python/gtsam_py/python/examples/GPSFactorExample.py b/python/gtsam_py/python/examples/GPSFactorExample.py index 493a07725..0bc0d1bf3 100644 --- a/python/gtsam_py/python/examples/GPSFactorExample.py +++ b/python/gtsam_py/python/examples/GPSFactorExample.py @@ -26,8 +26,8 @@ lon0 = -84.30626 h0 = 274 # Create noise models -GPS_NOISE = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) -PRIOR_NOISE = gtsam.noiseModel_Isotropic.Sigma(6, 0.25) +GPS_NOISE = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) +PRIOR_NOISE = gtsam.noiseModel.Isotropic.Sigma(6, 0.25) # Create an empty nonlinear factor graph graph = gtsam.NonlinearFactorGraph() diff --git a/python/gtsam_py/python/examples/ImuFactorExample.py b/python/gtsam_py/python/examples/ImuFactorExample.py index 0e01766e7..adaa3c08a 100644 --- a/python/gtsam_py/python/examples/ImuFactorExample.py +++ b/python/gtsam_py/python/examples/ImuFactorExample.py @@ -5,27 +5,32 @@ All Rights Reserved See LICENSE for the license information -A script validating and demonstrating the ImuFactor inference. - -Author: Frank Dellaert, Varun Agrawal +A script validating the ImuFactor inference. """ from __future__ import print_function import math -import gtsam import matplotlib.pyplot as plt import numpy as np -from gtsam import symbol_shorthand_B as B -from gtsam import symbol_shorthand_V as V -from gtsam import symbol_shorthand_X as X -from gtsam.utils.plot import plot_pose3 from mpl_toolkits.mplot3d import Axes3D +import gtsam +from gtsam.utils.plot import plot_pose3 from PreintegrationExample import POSES_FIG, PreintegrationExample -BIAS_KEY = B(0) +BIAS_KEY = int(gtsam.symbol('b', 0)) + + +def X(key): + """Create symbol for pose key.""" + return gtsam.symbol('x', key) + + +def V(key): + """Create symbol for velocity key.""" + return gtsam.symbol('v', key) np.set_printoptions(precision=3, suppress=True) @@ -35,8 +40,8 @@ class ImuFactorExample(PreintegrationExample): def __init__(self): self.velocity = np.array([2, 0, 0]) - self.priorNoise = gtsam.noiseModel_Isotropic.Sigma(6, 0.1) - self.velNoise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) + self.priorNoise = gtsam.noiseModel.Isotropic.Sigma(6, 0.1) + self.velNoise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) # Choose one of these twists to change scenario: zero_twist = (np.zeros(3), np.zeros(3)) @@ -47,7 +52,7 @@ class ImuFactorExample(PreintegrationExample): accBias = np.array([-0.3, 0.1, 0.2]) gyroBias = np.array([0.1, 0.3, -0.1]) - bias = gtsam.imuBias_ConstantBias(accBias, gyroBias) + bias = gtsam.imuBias.ConstantBias(accBias, gyroBias) dt = 1e-2 super(ImuFactorExample, self).__init__(sick_twist, bias, dt) @@ -71,14 +76,8 @@ class ImuFactorExample(PreintegrationExample): initial.insert(BIAS_KEY, self.actualBias) for i in range(num_poses): state_i = self.scenario.navState(float(i)) - - poseNoise = gtsam.Pose3.Expmap(np.random.randn(3)*0.1) - pose = state_i.pose().compose(poseNoise) - - velocity = state_i.velocity() + np.random.randn(3)*0.1 - - initial.insert(X(i), pose) - initial.insert(V(i), velocity) + initial.insert(X(i), state_i.pose()) + initial.insert(V(i), state_i.velocity()) # simulate the loop i = 0 # state index @@ -89,12 +88,6 @@ class ImuFactorExample(PreintegrationExample): measuredAcc = self.runner.measuredSpecificForce(t) pim.integrateMeasurement(measuredAcc, measuredOmega, self.dt) - poseNoise = gtsam.Pose3.Expmap(np.random.randn(3)*0.1) - - actual_state_i = gtsam.NavState( - actual_state_i.pose().compose(poseNoise), - actual_state_i.velocity() + np.random.randn(3)*0.1) - # Plot IMU many times if k % 10 == 0: self.plotImu(t, measuredOmega, measuredAcc) @@ -140,10 +133,7 @@ class ImuFactorExample(PreintegrationExample): pose_i = result.atPose3(X(i)) plot_pose3(POSES_FIG, pose_i, 0.1) i += 1 - - gtsam.utils.plot.set_axes_equal(POSES_FIG) - - print(result.atimuBias_ConstantBias(BIAS_KEY)) + print(result.atimuBias.ConstantBias(BIAS_KEY)) plt.ioff() plt.show() diff --git a/python/gtsam_py/python/examples/ImuFactorExample2.py b/python/gtsam_py/python/examples/ImuFactorExample2.py index 0d98f08fe..39e22eb17 100644 --- a/python/gtsam_py/python/examples/ImuFactorExample2.py +++ b/python/gtsam_py/python/examples/ImuFactorExample2.py @@ -8,20 +8,23 @@ from __future__ import print_function import math -import gtsam -import gtsam.utils.plot as gtsam_plot import matplotlib.pyplot as plt import numpy as np -from gtsam import (ISAM2, BetweenFactorConstantBias, Cal3_S2, - ConstantTwistScenario, ImuFactor, NonlinearFactorGraph, - PinholeCameraCal3_S2, Point3, Pose3, - PriorFactorConstantBias, PriorFactorPose3, - PriorFactorVector, Rot3, Values) -from gtsam import symbol_shorthand_B as B -from gtsam import symbol_shorthand_V as V -from gtsam import symbol_shorthand_X as X from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611 +import gtsam +import gtsam.utils.plot as gtsam_plot + + +def X(key): + """Create symbol for pose key.""" + return gtsam.symbol('x', key) + + +def V(key): + """Create symbol for velocity key.""" + return gtsam.symbol('v', key) + def vector3(x, y, z): """Create 3d double numpy array.""" @@ -40,7 +43,7 @@ def ISAM2_plot(values, fignum=0): while values.exists(X(i)): pose_i = values.atPose3(X(i)) gtsam_plot.plot_pose3(fignum, pose_i, 10) - position = pose_i.translation().vector() + position = pose_i.translation() min_bounds = [min(v1, v2) for (v1, v2) in zip(position, min_bounds)] max_bounds = [max(v1, v2) for (v1, v2) in zip(position, max_bounds)] # max_bounds = min(pose_i.x(), max_bounds[0]), 0, 0 @@ -65,20 +68,21 @@ PARAMS.setIntegrationCovariance(I * 0.1) PARAMS.setUse2ndOrderCoriolis(False) PARAMS.setOmegaCoriolis(vector3(0, 0, 0)) -BIAS_COVARIANCE = gtsam.noiseModel_Isotropic.Variance(6, 0.1) -DELTA = Pose3(Rot3.Rodrigues(0, 0, 0), - Point3(0.05, -0.10, 0.20)) +BIAS_COVARIANCE = gtsam.noiseModel.Isotropic.Variance(6, 0.1) +DELTA = gtsam.Pose3(gtsam.Rot3.Rodrigues(0, 0, 0), + gtsam.Point3(0.05, -0.10, 0.20)) def IMU_example(): """Run iSAM 2 example with IMU factor.""" + ZERO_BIAS = gtsam.imuBias.ConstantBias() # Start with a camera on x-axis looking at origin radius = 30 - up = Point3(0, 0, 1) - target = Point3(0, 0, 0) - position = Point3(radius, 0, 0) - camera = PinholeCameraCal3_S2.Lookat(position, target, up, Cal3_S2()) + up = gtsam.Point3(0, 0, 1) + target = gtsam.Point3(0, 0, 0) + position = gtsam.Point3(radius, 0, 0) + camera = gtsam.SimpleCamera.Lookat(position, target, up, gtsam.Cal3_S2()) pose_0 = camera.pose() # Create the set of ground-truth landmarks and poses @@ -87,37 +91,37 @@ def IMU_example(): angular_velocity_vector = vector3(0, -angular_velocity, 0) linear_velocity_vector = vector3(radius * angular_velocity, 0, 0) - scenario = ConstantTwistScenario( + scenario = gtsam.ConstantTwistScenario( angular_velocity_vector, linear_velocity_vector, pose_0) # Create a factor graph - newgraph = NonlinearFactorGraph() + newgraph = gtsam.NonlinearFactorGraph() # Create (incremental) ISAM2 solver - isam = ISAM2() + isam = gtsam.ISAM2() # Create the initial estimate to the solution # Intentionally initialize the variables off from the ground truth - initialEstimate = Values() + initialEstimate = gtsam.Values() # Add a prior on pose x0. This indirectly specifies where the origin is. # 30cm std on x,y,z 0.1 rad on roll,pitch,yaw - noise = gtsam.noiseModel_Diagonal.Sigmas( + noise = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.1, 0.1, 0.1, 0.3, 0.3, 0.3])) - newgraph.push_back(PriorFactorPose3(X(0), pose_0, noise)) + newgraph.push_back(gtsam.PriorFactorPose3(X(0), pose_0, noise)) # Add imu priors - biasKey = B(0) - biasnoise = gtsam.noiseModel_Isotropic.Sigma(6, 0.1) - biasprior = PriorFactorConstantBias(biasKey, gtsam.imuBias_ConstantBias(), - biasnoise) + biasKey = gtsam.symbol('b', 0) + biasnoise = gtsam.noiseModel.Isotropic.Sigma(6, 0.1) + biasprior = gtsam.PriorFactorConstantBias(biasKey, ZERO_BIAS, + biasnoise) newgraph.push_back(biasprior) - initialEstimate.insert(biasKey, gtsam.imuBias_ConstantBias()) - velnoise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) + initialEstimate.insert(biasKey, ZERO_BIAS) + velnoise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) # Calculate with correct initial velocity n_velocity = vector3(0, angular_velocity * radius, 0) - velprior = PriorFactorVector(V(0), n_velocity, velnoise) + velprior = gtsam.PriorFactorVector(V(0), n_velocity, velnoise) newgraph.push_back(velprior) initialEstimate.insert(V(0), n_velocity) @@ -138,10 +142,10 @@ def IMU_example(): # Add Bias variables periodically if i % 5 == 0: biasKey += 1 - factor = BetweenFactorConstantBias( - biasKey - 1, biasKey, gtsam.imuBias_ConstantBias(), BIAS_COVARIANCE) + factor = gtsam.BetweenFactorConstantBias( + biasKey - 1, biasKey, ZERO_BIAS, BIAS_COVARIANCE) newgraph.add(factor) - initialEstimate.insert(biasKey, gtsam.imuBias_ConstantBias()) + initialEstimate.insert(biasKey, ZERO_BIAS) # Predict acceleration and gyro measurements in (actual) body frame nRb = scenario.rotation(t).matrix() @@ -151,7 +155,8 @@ def IMU_example(): accum.integrateMeasurement(measuredAcc, measuredOmega, delta_t) # Add Imu Factor - imufac = ImuFactor(X(i - 1), V(i - 1), X(i), V(i), biasKey, accum) + imufac = gtsam.ImuFactor( + X(i - 1), V(i - 1), X(i), V(i), biasKey, accum) newgraph.add(imufac) # insert new velocity, which is wrong @@ -164,7 +169,7 @@ def IMU_example(): ISAM2_plot(result) # reset - newgraph = NonlinearFactorGraph() + newgraph = gtsam.NonlinearFactorGraph() initialEstimate.clear() diff --git a/python/gtsam_py/python/examples/OdometryExample.py b/python/gtsam_py/python/examples/OdometryExample.py index e778e3f85..8b519ce9a 100644 --- a/python/gtsam_py/python/examples/OdometryExample.py +++ b/python/gtsam_py/python/examples/OdometryExample.py @@ -21,8 +21,8 @@ import matplotlib.pyplot as plt import gtsam.utils.plot as gtsam_plot # Create noise models -ODOMETRY_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) -PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) +ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) +PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) # Create an empty nonlinear factor graph graph = gtsam.NonlinearFactorGraph() diff --git a/python/gtsam_py/python/examples/PlanarManipulatorExample.py b/python/gtsam_py/python/examples/PlanarManipulatorExample.py index e42ae09d7..9af4f7fcc 100644 --- a/python/gtsam_py/python/examples/PlanarManipulatorExample.py +++ b/python/gtsam_py/python/examples/PlanarManipulatorExample.py @@ -167,13 +167,11 @@ class ThreeLinkArm(object): axes = fig.gca() sXl1 = Pose2(0, 0, math.radians(90)) - t = sXl1.translation() - p1 = np.array([t.x(), t.y()]) + p1 = sXl1.translation() gtsam_plot.plot_pose2_on_axes(axes, sXl1) def plot_line(p, g, color): - t = g.translation() - q = np.array([t.x(), t.y()]) + q = g.translation() line = np.append(p[np.newaxis], q[np.newaxis], axis=0) axes.plot(line[:, 0], line[:, 1], color) return q diff --git a/python/gtsam_py/python/examples/PlanarSLAMExample.py b/python/gtsam_py/python/examples/PlanarSLAMExample.py index c84f0f834..0baf2fa9a 100644 --- a/python/gtsam_py/python/examples/PlanarSLAMExample.py +++ b/python/gtsam_py/python/examples/PlanarSLAMExample.py @@ -13,25 +13,24 @@ Author: Alex Cunningham (C++), Kevin Deng & Frank Dellaert (Python) from __future__ import print_function -import gtsam import numpy as np -from gtsam import symbol_shorthand_L as L -from gtsam import symbol_shorthand_X as X + +import gtsam # Create noise models -PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) -ODOMETRY_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) -MEASUREMENT_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.1, 0.2])) +PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) +ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) +MEASUREMENT_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.2])) # Create an empty nonlinear factor graph graph = gtsam.NonlinearFactorGraph() # Create the keys corresponding to unknown variables in the factor graph -X1 = X(1) -X2 = X(2) -X3 = X(3) -L1 = L(4) -L2 = L(5) +X1 = gtsam.symbol('x', 1) +X2 = gtsam.symbol('x', 2) +X3 = gtsam.symbol('x', 3) +L1 = gtsam.symbol('l', 4) +L2 = gtsam.symbol('l', 5) # Add a prior on pose X1 at the origin. A prior factor consists of a mean and a noise model graph.add(gtsam.PriorFactorPose2(X1, gtsam.Pose2(0.0, 0.0, 0.0), PRIOR_NOISE)) diff --git a/python/gtsam_py/python/examples/Pose2SLAMExample.py b/python/gtsam_py/python/examples/Pose2SLAMExample.py index 680f2209f..686da4db3 100644 --- a/python/gtsam_py/python/examples/Pose2SLAMExample.py +++ b/python/gtsam_py/python/examples/Pose2SLAMExample.py @@ -29,8 +29,8 @@ def vector3(x, y, z): # Create noise models -PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(vector3(0.3, 0.3, 0.1)) -ODOMETRY_NOISE = gtsam.noiseModel_Diagonal.Sigmas(vector3(0.2, 0.2, 0.1)) +PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(vector3(0.3, 0.3, 0.1)) +ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(vector3(0.2, 0.2, 0.1)) # 1. Create a factor graph container and add factors to it graph = gtsam.NonlinearFactorGraph() diff --git a/python/gtsam_py/python/examples/Pose2SLAMExample_g2o.py b/python/gtsam_py/python/examples/Pose2SLAMExample_g2o.py index 09114370d..d27adfc2c 100644 --- a/python/gtsam_py/python/examples/Pose2SLAMExample_g2o.py +++ b/python/gtsam_py/python/examples/Pose2SLAMExample_g2o.py @@ -53,15 +53,16 @@ graph, initial = gtsam.readG2o(g2oFile, is3D) assert args.kernel == "none", "Supplied kernel type is not yet implemented" # Add prior on the pose having index (key) = 0 -priorModel = gtsam.noiseModel_Diagonal.Variances(vector3(1e-6, 1e-6, 1e-8)) -graph.add(gtsam.PriorFactorPose2(0, gtsam.Pose2(), priorModel)) +graphWithPrior = graph +priorModel = gtsam.noiseModel.Diagonal.Variances(vector3(1e-6, 1e-6, 1e-8)) +graphWithPrior.add(gtsam.PriorFactorPose2(0, gtsam.Pose2(), priorModel)) params = gtsam.GaussNewtonParams() params.setVerbosity("Termination") params.setMaxIterations(maxIterations) # parameters.setRelativeErrorTol(1e-5) # Create the optimizer ... -optimizer = gtsam.GaussNewtonOptimizer(graph, initial, params) +optimizer = gtsam.GaussNewtonOptimizer(graphWithPrior, initial, params) # ... and optimize result = optimizer.optimize() @@ -82,7 +83,7 @@ else: print ("Done!") if args.plot: - resultPoses = gtsam.utilities_extractPose2(result) + resultPoses = gtsam.extractPose2(result) for i in range(resultPoses.shape[0]): plot.plot_pose2(1, gtsam.Pose2(resultPoses[i, :])) plt.show() diff --git a/python/gtsam_py/python/examples/Pose3SLAMExample_g2o.py b/python/gtsam_py/python/examples/Pose3SLAMExample_g2o.py index 3c1a54f7b..20b02500e 100644 --- a/python/gtsam_py/python/examples/Pose3SLAMExample_g2o.py +++ b/python/gtsam_py/python/examples/Pose3SLAMExample_g2o.py @@ -39,21 +39,22 @@ is3D = True graph, initial = gtsam.readG2o(g2oFile, is3D) # Add Prior on the first key -priorModel = gtsam.noiseModel_Diagonal.Variances(vector6(1e-6, 1e-6, 1e-6, +priorModel = gtsam.noiseModel.Diagonal.Variances(vector6(1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4)) print("Adding prior to g2o file ") +graphWithPrior = graph firstKey = initial.keys().at(0) -graph.add(gtsam.PriorFactorPose3(firstKey, gtsam.Pose3(), priorModel)) +graphWithPrior.add(gtsam.PriorFactorPose3(firstKey, gtsam.Pose3(), priorModel)) params = gtsam.GaussNewtonParams() params.setVerbosity("Termination") # this will show info about stopping conds -optimizer = gtsam.GaussNewtonOptimizer(graph, initial, params) +optimizer = gtsam.GaussNewtonOptimizer(graphWithPrior, initial, params) result = optimizer.optimize() print("Optimization complete") -print("initial error = ", graph.error(initial)) -print("final error = ", graph.error(result)) +print("initial error = ", graphWithPrior.error(initial)) +print("final error = ", graphWithPrior.error(result)) if args.output is None: print("Final Result:\n{}".format(result)) @@ -65,7 +66,7 @@ else: print ("Done!") if args.plot: - resultPoses = gtsam.utilities_allPose3s(result) + resultPoses = gtsam.allPose3s(result) for i in range(resultPoses.size()): plot.plot_pose3(1, resultPoses.atPose3(i)) plt.show() diff --git a/python/gtsam_py/python/examples/Pose3SLAMExample_initializePose3Chordal.py b/python/gtsam_py/python/examples/Pose3SLAMExample_initializePose3Chordal.py index 02c696905..140669c95 100644 --- a/python/gtsam_py/python/examples/Pose3SLAMExample_initializePose3Chordal.py +++ b/python/gtsam_py/python/examples/Pose3SLAMExample_initializePose3Chordal.py @@ -24,7 +24,7 @@ is3D = True graph, initial = gtsam.readG2o(g2oFile, is3D) # Add prior on the first key. TODO: assumes first key ios z -priorModel = gtsam.noiseModel_Diagonal.Variances( +priorModel = gtsam.noiseModel.Diagonal.Variances( np.array([1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4])) firstKey = initial.keys().at(0) graph.add(gtsam.PriorFactorPose3(0, gtsam.Pose3(), priorModel)) diff --git a/python/gtsam_py/python/examples/PreintegrationExample.py b/python/gtsam_py/python/examples/PreintegrationExample.py index 76520b355..e543e5d82 100644 --- a/python/gtsam_py/python/examples/PreintegrationExample.py +++ b/python/gtsam_py/python/examples/PreintegrationExample.py @@ -68,7 +68,7 @@ class PreintegrationExample(object): else: accBias = np.array([0, 0.1, 0]) gyroBias = np.array([0, 0, 0]) - self.actualBias = gtsam.imuBias_ConstantBias(accBias, gyroBias) + self.actualBias = gtsam.imuBias.ConstantBias(accBias, gyroBias) self.runner = gtsam.ScenarioRunner( self.scenario, self.params, self.dt, self.actualBias) @@ -111,7 +111,7 @@ class PreintegrationExample(object): actualPose = self.scenario.pose(t) plot_pose3(POSES_FIG, actualPose, 0.3) t = actualPose.translation() - self.maxDim = max([abs(t.x()), abs(t.y()), abs(t.z()), self.maxDim]) + self.maxDim = max([max(t), self.maxDim]) ax = plt.gca() ax.set_xlim3d(-self.maxDim, self.maxDim) ax.set_ylim3d(-self.maxDim, self.maxDim) diff --git a/python/gtsam_py/python/examples/README.md b/python/gtsam_py/python/examples/README.md index 99bce00e2..ec4437d4c 100644 --- a/python/gtsam_py/python/examples/README.md +++ b/python/gtsam_py/python/examples/README.md @@ -1,7 +1,8 @@ -These examples are almost identical to the old handwritten python wrapper -examples. However, there are just some slight name changes, for example -`noiseModel.Diagonal` becomes `noiseModel_Diagonal` etc... -Also, instead of `gtsam.Symbol('b', 0)` we can simply say `gtsam.symbol_shorthand_B(0)` or `B(0)` if we use python aliasing. +# THIS FILE IS OUTDATED! + +~~These examples are almost identical to the old handwritten python wrapper examples. However, there are just some slight name changes, for example `noiseModel.Diagonal` becomes `noiseModel.Diagonal` etc...~~ + +~~Also, annoyingly, instead of `gtsam.Symbol('b', 0)` we now need to say `gtsam.symbol('b', 0))`~~ # Porting Progress diff --git a/python/gtsam_py/python/examples/SFMExample.py b/python/gtsam_py/python/examples/SFMExample.py index e02def2f9..0c03a105d 100644 --- a/python/gtsam_py/python/examples/SFMExample.py +++ b/python/gtsam_py/python/examples/SFMExample.py @@ -10,20 +10,24 @@ A structure-from-motion problem on a simulated dataset """ from __future__ import print_function -import gtsam import matplotlib.pyplot as plt import numpy as np -from gtsam import symbol_shorthand_L as L -from gtsam import symbol_shorthand_X as X + +import gtsam from gtsam.examples import SFMdata from gtsam.gtsam import (Cal3_S2, DoglegOptimizer, GenericProjectionFactorCal3_S2, Marginals, - NonlinearFactorGraph, PinholeCameraCal3_S2, Point3, - Pose3, PriorFactorPoint3, PriorFactorPose3, Rot3, + NonlinearFactorGraph, Point3, Pose3, + PriorFactorPoint3, PriorFactorPose3, Rot3, SimpleCamera, Values) from gtsam.utils import plot +def symbol(name: str, index: int) -> int: + """ helper for creating a symbol without explicitly casting 'name' from str to int """ + return gtsam.symbol(name, index) + + def main(): """ Camera observations of landmarks (i.e. pixel coordinates) will be stored as Point2 (x, y). @@ -56,7 +60,7 @@ def main(): K = Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0) # Define the camera observation noise model - measurement_noise = gtsam.noiseModel_Isotropic.Sigma(2, 1.0) # one pixel in u and v + measurement_noise = gtsam.noiseModel.Isotropic.Sigma(2, 1.0) # one pixel in u and v # Create the set of ground-truth landmarks points = SFMdata.createPoints() @@ -69,24 +73,24 @@ def main(): # Add a prior on pose x1. This indirectly specifies where the origin is. # 0.3 rad std on roll,pitch,yaw and 0.1m on x,y,z - pose_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) - factor = PriorFactorPose3(X(0), poses[0], pose_noise) + pose_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) + factor = PriorFactorPose3(symbol('x', 0), poses[0], pose_noise) graph.push_back(factor) # Simulated measurements from each camera pose, adding them to the factor graph for i, pose in enumerate(poses): - camera = PinholeCameraCal3_S2(pose, K) + camera = SimpleCamera(pose, K) for j, point in enumerate(points): measurement = camera.project(point) factor = GenericProjectionFactorCal3_S2( - measurement, measurement_noise, X(i), L(j), K) + measurement, measurement_noise, symbol('x', i), symbol('l', j), K) graph.push_back(factor) # Because the structure-from-motion problem has a scale ambiguity, the problem is still under-constrained # Here we add a prior on the position of the first landmark. This fixes the scale by indicating the distance # between the first camera and the first landmark. All other landmark positions are interpreted using this scale. - point_noise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) - factor = PriorFactorPoint3(L(0), points[0], point_noise) + point_noise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) + factor = PriorFactorPoint3(symbol('l', 0), points[0], point_noise) graph.push_back(factor) graph.print_('Factor Graph:\n') @@ -95,10 +99,10 @@ def main(): initial_estimate = Values() for i, pose in enumerate(poses): transformed_pose = pose.retract(0.1*np.random.randn(6,1)) - initial_estimate.insert(X(i), transformed_pose) + initial_estimate.insert(symbol('x', i), transformed_pose) for j, point in enumerate(points): transformed_point = Point3(point.vector() + 0.1*np.random.randn(3)) - initial_estimate.insert(L(j), transformed_point) + initial_estimate.insert(symbol('l', j), transformed_point) initial_estimate.print_('Initial Estimates:\n') # Optimize the graph and print results @@ -117,5 +121,6 @@ def main(): plot.set_axes_equal(1) plt.show() + if __name__ == '__main__': main() diff --git a/python/gtsam_py/python/examples/SFMdata.py b/python/gtsam_py/python/examples/SFMdata.py index c586f7e52..6ac9c5726 100644 --- a/python/gtsam_py/python/examples/SFMdata.py +++ b/python/gtsam_py/python/examples/SFMdata.py @@ -33,7 +33,8 @@ def createPoses(K): poses = [] for theta in angles: position = gtsam.Point3(radius*np.cos(theta), - radius*np.sin(theta), height) + radius*np.sin(theta), + height) camera = gtsam.PinholeCameraCal3_S2.Lookat(position, target, up, K) poses.append(camera.pose()) return poses diff --git a/python/gtsam_py/python/examples/SimpleRotation.py b/python/gtsam_py/python/examples/SimpleRotation.py index 4e82d3778..2dd7b1194 100644 --- a/python/gtsam_py/python/examples/SimpleRotation.py +++ b/python/gtsam_py/python/examples/SimpleRotation.py @@ -10,9 +10,8 @@ This example will perform a relatively trivial optimization on a single variable with a single factor. """ -import gtsam import numpy as np -from gtsam import symbol_shorthand_X as X +import gtsam def main(): @@ -33,8 +32,8 @@ def main(): """ prior = gtsam.Rot2.fromAngle(np.deg2rad(30)) prior.print_('goal angle') - model = gtsam.noiseModel_Isotropic.Sigma(dim=1, sigma=np.deg2rad(1)) - key = X(1) + model = gtsam.noiseModel.Isotropic.Sigma(dim=1, sigma=np.deg2rad(1)) + key = gtsam.symbol('x', 1) factor = gtsam.PriorFactorRot2(key, prior, model) """ diff --git a/python/gtsam_py/python/examples/VisualISAM2Example.py b/python/gtsam_py/python/examples/VisualISAM2Example.py index 49e6ca95c..d33ec98bf 100644 --- a/python/gtsam_py/python/examples/VisualISAM2Example.py +++ b/python/gtsam_py/python/examples/VisualISAM2Example.py @@ -13,15 +13,24 @@ Author: Duy-Nguyen Ta (C++), Frank Dellaert (Python) from __future__ import print_function -import gtsam -import gtsam.utils.plot as gtsam_plot import matplotlib.pyplot as plt import numpy as np -from gtsam import symbol_shorthand_L as L -from gtsam import symbol_shorthand_X as X -from gtsam.examples import SFMdata from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611 +import gtsam +import gtsam.utils.plot as gtsam_plot +from gtsam.examples import SFMdata + + +def X(i): + """Create key for pose i.""" + return int(gtsam.symbol('x', i)) + + +def L(j): + """Create key for landmark j.""" + return int(gtsam.symbol('l', j)) + def visual_ISAM2_plot(result): """ @@ -64,7 +73,7 @@ def visual_ISAM2_example(): K = gtsam.Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0) # Define the camera observation noise model - measurement_noise = gtsam.noiseModel_Isotropic.Sigma( + measurement_noise = gtsam.noiseModel.Isotropic.Sigma( 2, 1.0) # one pixel in u and v # Create the set of ground-truth landmarks @@ -110,12 +119,12 @@ def visual_ISAM2_example(): # at least twice before adding it to iSAM. if i == 0: # Add a prior on pose x0 - pose_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array( + pose_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array( [0.1, 0.1, 0.1, 0.3, 0.3, 0.3])) # 30cm std on x,y,z 0.1 rad on roll,pitch,yaw graph.push_back(gtsam.PriorFactorPose3(X(0), poses[0], pose_noise)) # Add a prior on landmark l0 - point_noise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) + point_noise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) graph.push_back(gtsam.PriorFactorPoint3( L(0), points[0], point_noise)) # add directly to graph @@ -123,7 +132,7 @@ def visual_ISAM2_example(): # Intentionally initialize the variables off from the ground truth for j, point in enumerate(points): initial_estimate.insert(L(j), gtsam.Point3( - point.x()-0.25, point.y()+0.20, point.z()+0.15)) + point[0]-0.25, point[1]+0.20, point[2]+0.15)) else: # Update iSAM with the new factors isam.update(graph, initial_estimate) diff --git a/python/gtsam_py/python/examples/VisualISAMExample.py b/python/gtsam_py/python/examples/VisualISAMExample.py index 5cc37867b..2e90ce21b 100644 --- a/python/gtsam_py/python/examples/VisualISAMExample.py +++ b/python/gtsam_py/python/examples/VisualISAMExample.py @@ -15,12 +15,15 @@ from __future__ import print_function import numpy as np import gtsam from gtsam.examples import SFMdata -from gtsam.gtsam import (Cal3_S2, GenericProjectionFactorCal3_S2, - NonlinearFactorGraph, NonlinearISAM, Point3, Pose3, +from gtsam import (Cal3_S2, GenericProjectionFactorCal3_S2, + NonlinearFactorGraph, NonlinearISAM, Pose3, PriorFactorPoint3, PriorFactorPose3, Rot3, - PinholeCameraCal3_S2, Values) -from gtsam import symbol_shorthand_L as L -from gtsam import symbol_shorthand_X as X + SimpleCamera, Values, Point3) + + +def symbol(name: str, index: int) -> int: + """ helper for creating a symbol without explicitly casting 'name' from str to int """ + return gtsam.symbol(name, index) def main(): @@ -34,7 +37,7 @@ def main(): K = Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0) # Define the camera observation noise model - camera_noise = gtsam.noiseModel_Isotropic.Sigma(2, 1.0) # one pixel in u and v + camera_noise = gtsam.noiseModel.Isotropic.Sigma(2, 1.0) # one pixel in u and v # Create the set of ground-truth landmarks points = SFMdata.createPoints() @@ -51,11 +54,11 @@ def main(): # Loop over the different poses, adding the observations to iSAM incrementally for i, pose in enumerate(poses): - camera = PinholeCameraCal3_S2(pose, K) + camera = SimpleCamera(pose, K) # Add factors for each landmark observation for j, point in enumerate(points): measurement = camera.project(point) - factor = GenericProjectionFactorCal3_S2(measurement, camera_noise, X(i), L(j), K) + factor = GenericProjectionFactorCal3_S2(measurement, camera_noise, symbol('x', i), symbol('l', j), K) graph.push_back(factor) # Intentionally initialize the variables off from the ground truth @@ -63,7 +66,7 @@ def main(): initial_xi = pose.compose(noise) # Add an initial guess for the current pose - initial_estimate.insert(X(i), initial_xi) + initial_estimate.insert(symbol('x', i), initial_xi) # If this is the first iteration, add a prior on the first pose to set the coordinate frame # and a prior on the first landmark to set the scale @@ -71,21 +74,21 @@ def main(): # adding it to iSAM. if i == 0: # Add a prior on pose x0, with 0.3 rad std on roll,pitch,yaw and 0.1m x,y,z - pose_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) - factor = PriorFactorPose3(X(0), poses[0], pose_noise) + pose_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) + factor = PriorFactorPose3(symbol('x', 0), poses[0], pose_noise) graph.push_back(factor) # Add a prior on landmark l0 - point_noise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) - factor = PriorFactorPoint3(L(0), points[0], point_noise) + point_noise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) + factor = PriorFactorPoint3(symbol('l', 0), points[0], point_noise) graph.push_back(factor) # Add initial guesses to all observed landmarks noise = np.array([-0.25, 0.20, 0.15]) for j, point in enumerate(points): # Intentionally initialize the variables off from the ground truth - initial_lj = points[j].vector() + noise - initial_estimate.insert(L(j), Point3(initial_lj)) + initial_lj = points[j] + noise + initial_estimate.insert(symbol('l', j), initial_lj) else: # Update iSAM with the new factors isam.update(graph, initial_estimate) diff --git a/python/gtsam_py/python/tests/__init__.py b/python/gtsam_py/python/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/python/gtsam_py/python/tests/testScenarioRunner.py b/python/gtsam_py/python/tests/testScenarioRunner.py index 97a97b0ec..2af16a794 100644 --- a/python/gtsam_py/python/tests/testScenarioRunner.py +++ b/python/gtsam_py/python/tests/testScenarioRunner.py @@ -32,7 +32,7 @@ class TestScenarioRunner(GtsamTestCase): dt = 0.1 params = gtsam.PreintegrationParams.MakeSharedU(self.g) - bias = gtsam.imuBias_ConstantBias() + bias = gtsam.imuBias.ConstantBias() runner = gtsam.ScenarioRunner( scenario, params, dt, bias) diff --git a/python/gtsam_py/python/tests/test_GaussianFactorGraph.py b/python/gtsam_py/python/tests/test_GaussianFactorGraph.py index 983825d8b..759de0f3b 100644 --- a/python/gtsam_py/python/tests/test_GaussianFactorGraph.py +++ b/python/gtsam_py/python/tests/test_GaussianFactorGraph.py @@ -15,21 +15,20 @@ from __future__ import print_function import unittest import gtsam -import numpy as np -from gtsam import symbol_shorthand_X as X from gtsam.utils.test_case import GtsamTestCase +import numpy as np def create_graph(): """Create a basic linear factor graph for testing""" graph = gtsam.GaussianFactorGraph() - x0 = X(0) - x1 = X(1) - x2 = X(2) + x0 = gtsam.symbol('x', 0) + x1 = gtsam.symbol('x', 1) + x2 = gtsam.symbol('x', 2) - BETWEEN_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.ones(1)) - PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.ones(1)) + BETWEEN_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.ones(1)) + PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.ones(1)) graph.add(x1, np.eye(1), x0, -np.eye(1), np.ones(1), BETWEEN_NOISE) graph.add(x2, np.eye(1), x1, -np.eye(1), 2*np.ones(1), BETWEEN_NOISE) diff --git a/python/gtsam_py/python/tests/test_JacobianFactor.py b/python/gtsam_py/python/tests/test_JacobianFactor.py index 04433492b..6e049ed47 100644 --- a/python/gtsam_py/python/tests/test_JacobianFactor.py +++ b/python/gtsam_py/python/tests/test_JacobianFactor.py @@ -48,7 +48,7 @@ class TestJacobianFactor(GtsamTestCase): # the RHS b2 = np.array([-1., 1.5, 2., -1.]) sigmas = np.array([1., 1., 1., 1.]) - model4 = gtsam.noiseModel_Diagonal.Sigmas(sigmas) + model4 = gtsam.noiseModel.Diagonal.Sigmas(sigmas) combined = gtsam.JacobianFactor(x2, Ax2, l1, Al1, x1, Ax1, b2, model4) # eliminate the first variable (x2) in the combined factor, destructive @@ -66,7 +66,7 @@ class TestJacobianFactor(GtsamTestCase): [+0.00, -8.94427]]) d = np.array([2.23607, -1.56525]) expectedCG = gtsam.GaussianConditional( - x2, d, R11, l1, S12, x1, S13, gtsam.noiseModel_Unit.Create(2)) + x2, d, R11, l1, S12, x1, S13, gtsam.noiseModel.Unit.Create(2)) # check if the result matches self.gtsamAssertEquals(actualCG, expectedCG, 1e-4) @@ -82,7 +82,7 @@ class TestJacobianFactor(GtsamTestCase): # the RHS b1 = np.array([0.0, 0.894427]) - model2 = gtsam.noiseModel_Diagonal.Sigmas(np.array([1., 1.])) + model2 = gtsam.noiseModel.Diagonal.Sigmas(np.array([1., 1.])) expectedLF = gtsam.JacobianFactor(l1, Bl1, x1, Bx1, b1, model2) # check if the result matches the combined (reduced) factor diff --git a/python/gtsam_py/python/tests/test_KalmanFilter.py b/python/gtsam_py/python/tests/test_KalmanFilter.py index 94c41df72..48a91b96c 100644 --- a/python/gtsam_py/python/tests/test_KalmanFilter.py +++ b/python/gtsam_py/python/tests/test_KalmanFilter.py @@ -22,13 +22,13 @@ class TestKalmanFilter(GtsamTestCase): F = np.eye(2) B = np.eye(2) u = np.array([1.0, 0.0]) - modelQ = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.1, 0.1])) + modelQ = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.1])) Q = 0.01 * np.eye(2) H = np.eye(2) z1 = np.array([1.0, 0.0]) z2 = np.array([2.0, 0.0]) z3 = np.array([3.0, 0.0]) - modelR = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.1, 0.1])) + modelR = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.1])) R = 0.01 * np.eye(2) # Create the set of expected output TestValues diff --git a/python/gtsam_py/python/tests/test_KarcherMeanFactor.py b/python/gtsam_py/python/tests/test_KarcherMeanFactor.py index 6976decc1..a315a506c 100644 --- a/python/gtsam_py/python/tests/test_KarcherMeanFactor.py +++ b/python/gtsam_py/python/tests/test_KarcherMeanFactor.py @@ -18,7 +18,7 @@ import numpy as np from gtsam.utils.test_case import GtsamTestCase KEY = 0 -MODEL = gtsam.noiseModel_Unit.Create(3) +MODEL = gtsam.noiseModel.Unit.Create(3) def find_Karcher_mean_Rot3(rotations): @@ -59,8 +59,8 @@ class TestKarcherMean(GtsamTestCase): R12 = R.compose(R.compose(R)) graph.add(gtsam.BetweenFactorRot3(1, 2, R12, MODEL)) keys = gtsam.KeyVector() - keys.push_back(1) - keys.push_back(2) + keys.append(1) + keys.append(2) graph.add(gtsam.KarcherMeanFactorRot3(keys)) initial = gtsam.Values() diff --git a/python/gtsam_py/python/tests/test_LocalizationExample.py b/python/gtsam_py/python/tests/test_LocalizationExample.py index 6ce65f087..8ae3583f0 100644 --- a/python/gtsam_py/python/tests/test_LocalizationExample.py +++ b/python/gtsam_py/python/tests/test_LocalizationExample.py @@ -26,7 +26,7 @@ class TestLocalizationExample(GtsamTestCase): # Add two odometry factors # create a measurement for both factors (the same in this case) odometry = gtsam.Pose2(2.0, 0.0, 0.0) - odometryNoise = gtsam.noiseModel_Diagonal.Sigmas( + odometryNoise = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.2, 0.2, 0.1])) # 20cm std on x,y, 0.1 rad on theta graph.add(gtsam.BetweenFactorPose2(0, 1, odometry, odometryNoise)) graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, odometryNoise)) @@ -37,7 +37,7 @@ class TestLocalizationExample(GtsamTestCase): groundTruth.insert(0, gtsam.Pose2(0.0, 0.0, 0.0)) groundTruth.insert(1, gtsam.Pose2(2.0, 0.0, 0.0)) groundTruth.insert(2, gtsam.Pose2(4.0, 0.0, 0.0)) - model = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.1, 0.1, 10.])) + model = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.1, 10.])) for i in range(3): graph.add(gtsam.PriorFactorPose2(i, groundTruth.atPose2(i), model)) diff --git a/python/gtsam_py/python/tests/test_NonlinearOptimizer.py b/python/gtsam_py/python/tests/test_NonlinearOptimizer.py index 985dc30a2..3041e325c 100644 --- a/python/gtsam_py/python/tests/test_NonlinearOptimizer.py +++ b/python/gtsam_py/python/tests/test_NonlinearOptimizer.py @@ -17,8 +17,7 @@ import unittest import gtsam from gtsam import (DoglegOptimizer, DoglegParams, GaussNewtonOptimizer, GaussNewtonParams, LevenbergMarquardtOptimizer, - LevenbergMarquardtParams, PCGSolverParameters, - DummyPreconditionerParameters, NonlinearFactorGraph, Ordering, + LevenbergMarquardtParams, NonlinearFactorGraph, Ordering, Point2, PriorFactorPoint2, Values) from gtsam.utils.test_case import GtsamTestCase @@ -30,7 +29,7 @@ class TestScenario(GtsamTestCase): def test_optimize(self): """Do trivial test with three optimizer variants.""" fg = NonlinearFactorGraph() - model = gtsam.noiseModel_Unit.Create(2) + model = gtsam.noiseModel.Unit.Create(2) fg.add(PriorFactorPoint2(KEY1, Point2(0, 0), model)) # test error at minimum @@ -62,16 +61,6 @@ class TestScenario(GtsamTestCase): fg, initial_values, lmParams).optimize() self.assertAlmostEqual(0, fg.error(actual2)) - # Levenberg-Marquardt - lmParams = LevenbergMarquardtParams.CeresDefaults() - lmParams.setLinearSolverType("ITERATIVE") - cgParams = PCGSolverParameters() - cgParams.setPreconditionerParams(DummyPreconditionerParameters()) - lmParams.setIterativeParams(cgParams) - actual2 = LevenbergMarquardtOptimizer( - fg, initial_values, lmParams).optimize() - self.assertAlmostEqual(0, fg.error(actual2)) - # Dogleg dlParams = DoglegParams() dlParams.setOrdering(ordering) diff --git a/python/gtsam_py/python/tests/test_OdometryExample.py b/python/gtsam_py/python/tests/test_OdometryExample.py index c8ea95588..72e532f20 100644 --- a/python/gtsam_py/python/tests/test_OdometryExample.py +++ b/python/gtsam_py/python/tests/test_OdometryExample.py @@ -25,7 +25,7 @@ class TestOdometryExample(GtsamTestCase): # Add a Gaussian prior on pose x_1 priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior mean is at origin - priorNoise = gtsam.noiseModel_Diagonal.Sigmas( + priorNoise = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.3, 0.3, 0.1])) # 30cm std on x,y, 0.1 rad on theta # add directly to graph graph.add(gtsam.PriorFactorPose2(1, priorMean, priorNoise)) @@ -33,7 +33,7 @@ class TestOdometryExample(GtsamTestCase): # Add two odometry factors # create a measurement for both factors (the same in this case) odometry = gtsam.Pose2(2.0, 0.0, 0.0) - odometryNoise = gtsam.noiseModel_Diagonal.Sigmas( + odometryNoise = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.2, 0.2, 0.1])) # 20cm std on x,y, 0.1 rad on theta graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, odometryNoise)) graph.add(gtsam.BetweenFactorPose2(2, 3, odometry, odometryNoise)) diff --git a/python/gtsam_py/python/tests/test_PlanarSLAMExample.py b/python/gtsam_py/python/tests/test_PlanarSLAMExample.py index ae813d35c..8cb3ad2ac 100644 --- a/python/gtsam_py/python/tests/test_PlanarSLAMExample.py +++ b/python/gtsam_py/python/tests/test_PlanarSLAMExample.py @@ -32,13 +32,13 @@ class TestPlanarSLAM(GtsamTestCase): # Add prior # gaussian for prior priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior at origin - priorNoise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) + priorNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) # add directly to graph graph.add(gtsam.PriorFactorPose2(1, priorMean, priorNoise)) # Add odometry # general noisemodel for odometry - odometryNoise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) + odometryNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) graph.add(gtsam.BetweenFactorPose2( 1, 2, gtsam.Pose2(2.0, 0.0, 0.0), odometryNoise)) graph.add(gtsam.BetweenFactorPose2( @@ -49,7 +49,7 @@ class TestPlanarSLAM(GtsamTestCase): 4, 5, gtsam.Pose2(2.0, 0.0, pi / 2), odometryNoise)) # Add pose constraint - model = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) + model = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) graph.add(gtsam.BetweenFactorPose2(5, 2, gtsam.Pose2(2.0, 0.0, pi / 2), model)) # Initialize to noisy points diff --git a/python/gtsam_py/python/tests/test_Pose2SLAMExample.py b/python/gtsam_py/python/tests/test_Pose2SLAMExample.py index a79b6b18c..e47b9fbff 100644 --- a/python/gtsam_py/python/tests/test_Pose2SLAMExample.py +++ b/python/gtsam_py/python/tests/test_Pose2SLAMExample.py @@ -32,13 +32,13 @@ class TestPose2SLAMExample(GtsamTestCase): # Add prior # gaussian for prior priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior at origin - priorNoise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) + priorNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) # add directly to graph graph.add(gtsam.PriorFactorPose2(1, priorMean, priorNoise)) # Add odometry # general noisemodel for odometry - odometryNoise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) + odometryNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) graph.add(gtsam.BetweenFactorPose2( 1, 2, gtsam.Pose2(2.0, 0.0, 0.0), odometryNoise)) graph.add(gtsam.BetweenFactorPose2( @@ -49,7 +49,7 @@ class TestPose2SLAMExample(GtsamTestCase): 4, 5, gtsam.Pose2(2.0, 0.0, pi / 2), odometryNoise)) # Add pose constraint - model = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) + model = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) graph.add(gtsam.BetweenFactorPose2(5, 2, gtsam.Pose2(2.0, 0.0, pi / 2), model)) # Initialize to noisy points diff --git a/python/gtsam_py/python/tests/test_Pose3SLAMExample.py b/python/gtsam_py/python/tests/test_Pose3SLAMExample.py index 1e9eaac67..fce171b55 100644 --- a/python/gtsam_py/python/tests/test_Pose3SLAMExample.py +++ b/python/gtsam_py/python/tests/test_Pose3SLAMExample.py @@ -30,7 +30,7 @@ class TestPose3SLAMExample(GtsamTestCase): fg = gtsam.NonlinearFactorGraph() fg.add(gtsam.NonlinearEqualityPose3(0, p0)) delta = p0.between(p1) - covariance = gtsam.noiseModel_Diagonal.Sigmas( + covariance = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.05, 0.05, 0.05, 5. * pi / 180, 5. * pi / 180, 5. * pi / 180])) fg.add(gtsam.BetweenFactorPose3(0, 1, delta, covariance)) fg.add(gtsam.BetweenFactorPose3(1, 2, delta, covariance)) diff --git a/python/gtsam_py/python/tests/test_PriorFactor.py b/python/gtsam_py/python/tests/test_PriorFactor.py index 66207b800..da072d0bd 100644 --- a/python/gtsam_py/python/tests/test_PriorFactor.py +++ b/python/gtsam_py/python/tests/test_PriorFactor.py @@ -23,39 +23,17 @@ class TestPriorFactor(GtsamTestCase): key = 5 priorPose3 = gtsam.Pose3() - model = gtsam.noiseModel_Unit.Create(6) + model = gtsam.noiseModel.Unit.Create(6) factor = gtsam.PriorFactorPose3(key, priorPose3, model) values.insert(key, priorPose3) self.assertEqual(factor.error(values), 0) key = 3 priorVector = np.array([0., 0., 0.]) - model = gtsam.noiseModel_Unit.Create(3) + model = gtsam.noiseModel.Unit.Create(3) factor = gtsam.PriorFactorVector(key, priorVector, model) values.insert(key, priorVector) self.assertEqual(factor.error(values), 0) - def test_AddPrior(self): - """ - Test adding prior factors directly to factor graph via the .addPrior method. - """ - # define factor graph - graph = gtsam.NonlinearFactorGraph() - - # define and add Pose3 prior - key = 5 - priorPose3 = gtsam.Pose3() - model = gtsam.noiseModel_Unit.Create(6) - graph.addPriorPose3(key, priorPose3, model) - self.assertEqual(graph.size(), 1) - - # define and add Vector prior - key = 3 - priorVector = np.array([0., 0., 0.]) - model = gtsam.noiseModel_Unit.Create(3) - graph.addPriorVector(key, priorVector, model) - self.assertEqual(graph.size(), 2) - - if __name__ == "__main__": unittest.main() diff --git a/python/gtsam_py/python/tests/test_SFMExample.py b/python/gtsam_py/python/tests/test_SFMExample.py index e8fa46186..8b5473ff0 100644 --- a/python/gtsam_py/python/tests/test_SFMExample.py +++ b/python/gtsam_py/python/tests/test_SFMExample.py @@ -34,29 +34,29 @@ class TestSFMExample(GtsamTestCase): graph = gtsam.NonlinearFactorGraph() # Add factors for all measurements - measurementNoise = gtsam.noiseModel_Isotropic.Sigma(2, measurementNoiseSigma) + measurementNoise = gtsam.noiseModel.Isotropic.Sigma(2, measurementNoiseSigma) for i in range(len(data.Z)): for k in range(len(data.Z[i])): j = data.J[i][k] graph.add(gtsam.GenericProjectionFactorCal3_S2( data.Z[i][k], measurementNoise, - symbol(ord('x'), i), symbol(ord('p'), j), data.K)) + symbol('x', i), symbol('p', j), data.K)) - posePriorNoise = gtsam.noiseModel_Diagonal.Sigmas(poseNoiseSigmas) - graph.add(gtsam.PriorFactorPose3(symbol(ord('x'), 0), + posePriorNoise = gtsam.noiseModel.Diagonal.Sigmas(poseNoiseSigmas) + graph.add(gtsam.PriorFactorPose3(symbol('x', 0), truth.cameras[0].pose(), posePriorNoise)) - pointPriorNoise = gtsam.noiseModel_Isotropic.Sigma(3, pointNoiseSigma) - graph.add(gtsam.PriorFactorPoint3(symbol(ord('p'), 0), + pointPriorNoise = gtsam.noiseModel.Isotropic.Sigma(3, pointNoiseSigma) + graph.add(gtsam.PriorFactorPoint3(symbol('p', 0), truth.points[0], pointPriorNoise)) # Initial estimate initialEstimate = gtsam.Values() for i in range(len(truth.cameras)): pose_i = truth.cameras[i].pose() - initialEstimate.insert(symbol(ord('x'), i), pose_i) + initialEstimate.insert(symbol('x', i), pose_i) for j in range(len(truth.points)): point_j = truth.points[j] - initialEstimate.insert(symbol(ord('p'), j), point_j) + initialEstimate.insert(symbol('p', j), point_j) # Optimization optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate) @@ -66,16 +66,16 @@ class TestSFMExample(GtsamTestCase): # Marginalization marginals = gtsam.Marginals(graph, result) - marginals.marginalCovariance(symbol(ord('p'), 0)) - marginals.marginalCovariance(symbol(ord('x'), 0)) + marginals.marginalCovariance(symbol('p', 0)) + marginals.marginalCovariance(symbol('x', 0)) # Check optimized results, should be equal to ground truth for i in range(len(truth.cameras)): - pose_i = result.atPose3(symbol(ord('x'), i)) + pose_i = result.atPose3(symbol('x', i)) self.gtsamAssertEquals(pose_i, truth.cameras[i].pose(), 1e-5) for j in range(len(truth.points)): - point_j = result.atPoint3(symbol(ord('p'), j)) + point_j = result.atPoint3(symbol('p', j)) self.gtsamAssertEquals(point_j, truth.points[j], 1e-5) if __name__ == "__main__": diff --git a/python/gtsam_py/python/tests/test_Scenario.py b/python/gtsam_py/python/tests/test_Scenario.py index 09601fba5..fc5965829 100644 --- a/python/gtsam_py/python/tests/test_Scenario.py +++ b/python/gtsam_py/python/tests/test_Scenario.py @@ -43,8 +43,11 @@ class TestScenario(GtsamTestCase): # R = v/w, so test if loop crests at 2*R R = v / w T30 = scenario.pose(T) + xyz = T30.rotation().xyz() + if xyz[0] < 0: + xyz = -xyz np.testing.assert_almost_equal( - np.array([math.pi, 0, math.pi]), T30.rotation().xyz()) + np.array([math.pi, 0, math.pi]), xyz) self.gtsamAssertEquals(gtsam.Point3( 0, 0, 2.0 * R), T30.translation(), 1e-9) diff --git a/python/gtsam_py/python/tests/test_SimpleCamera.py b/python/gtsam_py/python/tests/test_SimpleCamera.py index a3654a5f1..efdfec561 100644 --- a/python/gtsam_py/python/tests/test_SimpleCamera.py +++ b/python/gtsam_py/python/tests/test_SimpleCamera.py @@ -5,7 +5,7 @@ All Rights Reserved See LICENSE for the license information -PinholeCameraCal3_S2 unit tests. +SimpleCamera unit tests. Author: Frank Dellaert & Duy Nguyen Ta (Python) """ import math @@ -14,7 +14,7 @@ import unittest import numpy as np import gtsam -from gtsam import Cal3_S2, Point3, Pose2, Pose3, Rot3, PinholeCameraCal3_S2 +from gtsam import Cal3_S2, Point3, Pose2, Pose3, Rot3, SimpleCamera from gtsam.utils.test_case import GtsamTestCase K = Cal3_S2(625, 625, 0, 0, 0) @@ -23,14 +23,14 @@ class TestSimpleCamera(GtsamTestCase): def test_constructor(self): pose1 = Pose3(Rot3(np.diag([1, -1, -1])), Point3(0, 0, 0.5)) - camera = PinholeCameraCal3_S2(pose1, K) + camera = SimpleCamera(pose1, K) self.gtsamAssertEquals(camera.calibration(), K, 1e-9) self.gtsamAssertEquals(camera.pose(), pose1, 1e-9) def test_level2(self): # Create a level camera, looking in Y-direction pose2 = Pose2(0.4,0.3,math.pi/2.0) - camera = PinholeCameraCal3_S2.Level(K, pose2, 0.1) + camera = SimpleCamera.Level(K, pose2, 0.1) # expected x = Point3(1,0,0) diff --git a/python/gtsam_py/python/tests/test_StereoVOExample.py b/python/gtsam_py/python/tests/test_StereoVOExample.py index 3f5f57522..cefc08aab 100644 --- a/python/gtsam_py/python/tests/test_StereoVOExample.py +++ b/python/gtsam_py/python/tests/test_StereoVOExample.py @@ -28,11 +28,11 @@ class TestStereoVOExample(GtsamTestCase): # - No noise on measurements ## Create keys for variables - x1 = symbol(ord('x'),1) - x2 = symbol(ord('x'),2) - l1 = symbol(ord('l'),1) - l2 = symbol(ord('l'),2) - l3 = symbol(ord('l'),3) + x1 = symbol('x',1) + x2 = symbol('x',2) + l1 = symbol('l',1) + l2 = symbol('l',2) + l3 = symbol('l',3) ## Create graph container and add factors to it graph = gtsam.NonlinearFactorGraph() @@ -44,7 +44,7 @@ class TestStereoVOExample(GtsamTestCase): ## Create realistic calibration and measurement noise model # format: fx fy skew cx cy baseline K = gtsam.Cal3_S2Stereo(1000, 1000, 0, 320, 240, 0.2) - stereo_model = gtsam.noiseModel_Diagonal.Sigmas(np.array([1.0, 1.0, 1.0])) + stereo_model = gtsam.noiseModel.Diagonal.Sigmas(np.array([1.0, 1.0, 1.0])) ## Add measurements # pose 1 diff --git a/python/gtsam_py/python/tests/test_Triangulation.py b/python/gtsam_py/python/tests/test_Triangulation.py new file mode 100644 index 000000000..b43ad9b57 --- /dev/null +++ b/python/gtsam_py/python/tests/test_Triangulation.py @@ -0,0 +1,80 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Test Triangulation +Author: Frank Dellaert & Fan Jiang (Python) +""" +import unittest + +import numpy as np + +import gtsam as g +from gtsam.utils.test_case import GtsamTestCase +from gtsam import Cal3_S2, Cal3Bundler, Rot3, Pose3, \ + PinholeCameraCal3_S2, Point3, Point2Vector, Pose3Vector, triangulatePoint3 + +class TestVisualISAMExample(GtsamTestCase): + def test_TriangulationExample(self): + # Some common constants + sharedCal = Cal3_S2(1500, 1200, 0, 640, 480) + + # Looking along X-axis, 1 meter above ground plane (x-y) + upright = Rot3.Ypr(-np.pi / 2, 0., -np.pi / 2) + pose1 = Pose3(upright, Point3(0, 0, 1)) + camera1 = PinholeCameraCal3_S2(pose1, sharedCal) + + # create second camera 1 meter to the right of first camera + pose2 = pose1.compose(Pose3(Rot3(), Point3(1, 0, 0))) + camera2 = PinholeCameraCal3_S2(pose2, sharedCal) + + # landmark ~5 meters infront of camera + landmark = Point3(5, 0.5, 1.2) + + # 1. Project two landmarks into two cameras and triangulate + z1 = camera1.project(landmark) + z2 = camera2.project(landmark) + + # twoPoses + poses = Pose3Vector() + measurements = Point2Vector() + + poses.append(pose1) + poses.append(pose2) + measurements.append(z1) + measurements.append(z2) + + optimize = True + rank_tol = 1e-9 + + triangulated_landmark = triangulatePoint3(poses,sharedCal, measurements, rank_tol, optimize) + self.gtsamAssertEquals(landmark, triangulated_landmark,1e-9) + + # 2. Add some noise and try again: result should be ~ (4.995, 0.499167, 1.19814) + measurements = Point2Vector() + measurements.append(z1 - np.array([0.1, 0.5])) + measurements.append(z2 - np.array([-0.2, 0.3])) + + triangulated_landmark = triangulatePoint3(poses,sharedCal, measurements, rank_tol, optimize) + self.gtsamAssertEquals(landmark, triangulated_landmark,1e-2) + # + # # two Poses with Bundler Calibration + # bundlerCal = Cal3Bundler(1500, 0, 0, 640, 480) + # camera1 = PinholeCameraCal3Bundler(pose1, bundlerCal) + # camera2 = PinholeCameraCal3Bundler(pose2, bundlerCal) + # + # z1 = camera1.project(landmark) + # z2 = camera2.project(landmark) + # + # measurements = Point2Vector() + # measurements.append(z1) + # measurements.append(z2) + # + # triangulated_landmark = triangulatePoint3(poses,bundlerCal, measurements, rank_tol, optimize) + # self.gtsamAssertEquals(landmark, triangulated_landmark,1e-9) + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam_py/python/tests/test_Values.py b/python/gtsam_py/python/tests/test_Values.py index 20634a21c..dddd11c40 100644 --- a/python/gtsam_py/python/tests/test_Values.py +++ b/python/gtsam_py/python/tests/test_Values.py @@ -15,8 +15,7 @@ import numpy as np import gtsam from gtsam import (Cal3_S2, Cal3Bundler, Cal3DS2, EssentialMatrix, Point2, - Point3, Pose2, Pose3, Rot2, Rot3, Unit3, Values, - imuBias_ConstantBias) + Point3, Pose2, Pose3, Rot2, Rot3, Unit3, Values, imuBias) from gtsam.utils.test_case import GtsamTestCase @@ -37,7 +36,7 @@ class TestValues(GtsamTestCase): values.insert(7, Cal3DS2()) values.insert(8, Cal3Bundler()) values.insert(9, E) - values.insert(10, imuBias_ConstantBias()) + values.insert(10, imuBias.ConstantBias()) # Special cases for Vectors and Matrices # Note that gtsam's Eigen Vectors and Matrices requires double-precision @@ -70,8 +69,8 @@ class TestValues(GtsamTestCase): self.gtsamAssertEquals(values.atCal3DS2(7), Cal3DS2(), tol) self.gtsamAssertEquals(values.atCal3Bundler(8), Cal3Bundler(), tol) self.gtsamAssertEquals(values.atEssentialMatrix(9), E, tol) - self.gtsamAssertEquals(values.atimuBias_ConstantBias( - 10), imuBias_ConstantBias(), tol) + self.gtsamAssertEquals(values.atConstantBias( + 10), imuBias.ConstantBias(), tol) # special cases for Vector and Matrix: actualVector = values.atVector(11) diff --git a/python/gtsam_py/python/tests/test_VisualISAMExample.py b/python/gtsam_py/python/tests/test_VisualISAMExample.py index 99d7e6160..6eb05eeee 100644 --- a/python/gtsam_py/python/tests/test_VisualISAMExample.py +++ b/python/gtsam_py/python/tests/test_VisualISAMExample.py @@ -46,11 +46,11 @@ class TestVisualISAMExample(GtsamTestCase): isam, result = visual_isam.step(data, isam, result, truth, currentPose) for i in range(len(truth.cameras)): - pose_i = result.atPose3(symbol(ord('x'), i)) + pose_i = result.atPose3(symbol('x', i)) self.gtsamAssertEquals(pose_i, truth.cameras[i].pose(), 1e-5) for j in range(len(truth.points)): - point_j = result.atPoint3(symbol(ord('l'), j)) + point_j = result.atPoint3(symbol('l', j)) self.gtsamAssertEquals(point_j, truth.points[j], 1e-5) if __name__ == "__main__": diff --git a/python/gtsam_py/python/tests/test_dataset.py b/python/gtsam_py/python/tests/test_dataset.py index 60fb9450d..87fc2ad54 100644 --- a/python/gtsam_py/python/tests/test_dataset.py +++ b/python/gtsam_py/python/tests/test_dataset.py @@ -15,7 +15,7 @@ from __future__ import print_function import unittest import gtsam -from gtsam import BetweenFactorPose3, BetweenFactorPose3s +from gtsam import BetweenFactorPose3 from gtsam.utils.test_case import GtsamTestCase @@ -37,8 +37,8 @@ class TestDataset(GtsamTestCase): def test_parse3Dfactors(self): """Test parsing into data structure.""" factors = gtsam.parse3DFactors(self.pose3_example_g2o_file) - self.assertEqual(factors.size(), 6) - self.assertIsInstance(factors.at(0), BetweenFactorPose3) + self.assertEqual(len(factors), 6) + self.assertIsInstance(factors[0], BetweenFactorPose3) if __name__ == '__main__': diff --git a/python/gtsam_py/python/tests/test_initialize_pose3.py b/python/gtsam_py/python/tests/test_initialize_pose3.py index 3aa7e3470..6d7f66653 100644 --- a/python/gtsam_py/python/tests/test_initialize_pose3.py +++ b/python/gtsam_py/python/tests/test_initialize_pose3.py @@ -24,7 +24,7 @@ class TestValues(GtsamTestCase): def setUp(self): - model = gtsam.noiseModel_Isotropic.Sigma(6, 0.1) + model = gtsam.noiseModel.Isotropic.Sigma(6, 0.1) # We consider a small graph: # symbolic FG @@ -64,9 +64,8 @@ class TestValues(GtsamTestCase): def test_orientations(self): pose3Graph = gtsam.InitializePose3.buildPose3graph(self.graph) - initial = gtsam.InitializePose3.computeOrientationsChordal(pose3Graph) - + # comparison is up to M_PI, that's why we add some multiples of 2*M_PI self.gtsamAssertEquals(initial.atRot3(x0), self.R0, 1e-6) self.gtsamAssertEquals(initial.atRot3(x1), self.R1, 1e-6) @@ -77,7 +76,7 @@ class TestValues(GtsamTestCase): g2oFile = gtsam.findExampleDataFile("pose3example-grid") is3D = True inputGraph, expectedValues = gtsam.readG2o(g2oFile, is3D) - priorModel = gtsam.noiseModel_Unit.Create(6) + priorModel = gtsam.noiseModel.Unit.Create(6) inputGraph.add(gtsam.PriorFactorPose3(0, Pose3(), priorModel)) initial = gtsam.InitializePose3.initialize(inputGraph) diff --git a/python/gtsam_py/python/tests/test_logging_optimizer.py b/python/gtsam_py/python/tests/test_logging_optimizer.py index 2560a72a2..47eb32e7b 100644 --- a/python/gtsam_py/python/tests/test_logging_optimizer.py +++ b/python/gtsam_py/python/tests/test_logging_optimizer.py @@ -21,7 +21,7 @@ from gtsam.utils.test_case import GtsamTestCase from gtsam.utils.logging_optimizer import gtsam_optimize KEY = 0 -MODEL = gtsam.noiseModel_Unit.Create(3) +MODEL = gtsam.noiseModel.Unit.Create(3) class TestOptimizeComet(GtsamTestCase): diff --git a/python/gtsam_py/python/utils/circlePose3.py b/python/gtsam_py/python/utils/circlePose3.py index 7012548f4..37e7c5568 100644 --- a/python/gtsam_py/python/utils/circlePose3.py +++ b/python/gtsam_py/python/utils/circlePose3.py @@ -3,7 +3,7 @@ import numpy as np from math import pi, cos, sin -def circlePose3(numPoses=8, radius=1.0, symbolChar=0): +def circlePose3(numPoses=8, radius=1.0, symbolChar='\0'): """ circlePose3 generates a set of poses in a circle. This function returns those poses inside a gtsam.Values object, with sequential @@ -18,10 +18,6 @@ def circlePose3(numPoses=8, radius=1.0, symbolChar=0): Vehicle at p0 is looking towards y axis (X-axis points towards world y) """ - # Force symbolChar to be a single character - if type(symbolChar) is str: - symbolChar = ord(symbolChar[0]) - values = gtsam.Values() theta = 0.0 dtheta = 2 * pi / numPoses diff --git a/python/gtsam_py/python/utils/plot.py b/python/gtsam_py/python/utils/plot.py index 1e976a69e..7b8da0f06 100644 --- a/python/gtsam_py/python/utils/plot.py +++ b/python/gtsam_py/python/utils/plot.py @@ -13,9 +13,8 @@ def set_axes_equal(fignum): Make axes of 3D plot have equal scale so that spheres appear as spheres, cubes as cubes, etc.. This is one possible solution to Matplotlib's ax.set_aspect('equal') and ax.axis('equal') not working for 3D. - - Args: - fignum (int): An integer representing the figure number for Matplotlib. + Input + ax: a matplotlib axis, e.g., as output from plt.gca(). """ fig = plt.figure(fignum) ax = fig.gca(projection='3d') @@ -35,21 +34,7 @@ def set_axes_equal(fignum): def ellipsoid(xc, yc, zc, rx, ry, rz, n): - """ - Numpy equivalent of Matlab's ellipsoid function. - - Args: - xc (double): Center of ellipsoid in X-axis. - yc (double): Center of ellipsoid in Y-axis. - zc (double): Center of ellipsoid in Z-axis. - rx (double): Radius of ellipsoid in X-axis. - ry (double): Radius of ellipsoid in Y-axis. - rz (double): Radius of ellipsoid in Z-axis. - n (int): The granularity of the ellipsoid plotted. - - Returns: - tuple[numpy.ndarray]: The points in the x, y and z axes to use for the surface plot. - """ + """Numpy equivalent of Matlab's ellipsoid function""" u = np.linspace(0, 2*np.pi, n+1) v = np.linspace(0, np.pi, n+1) x = -rx * np.outer(np.cos(u), np.sin(v)).T @@ -62,18 +47,9 @@ def ellipsoid(xc, yc, zc, rx, ry, rz, n): def plot_covariance_ellipse_3d(axes, origin, P, scale=1, n=8, alpha=0.5): """ Plots a Gaussian as an uncertainty ellipse - Based on Maybeck Vol 1, page 366 k=2.296 corresponds to 1 std, 68.26% of all probability k=11.82 corresponds to 3 std, 99.74% of all probability - - Args: - axes (matplotlib.axes.Axes): Matplotlib axes. - origin (gtsam.Point3): The origin in the world frame. - P (numpy.ndarray): The marginal covariance matrix of the 3D point which will be represented as an ellipse. - scale (float): Scaling factor of the radii of the covariance ellipse. - n (int): Defines the granularity of the ellipse. Higher values indicate finer ellipses. - alpha (float): Transparency value for the plotted surface in the range [0, 1]. """ k = 11.82 U, S, _ = np.linalg.svd(P) @@ -97,19 +73,10 @@ def plot_covariance_ellipse_3d(axes, origin, P, scale=1, n=8, alpha=0.5): def plot_pose2_on_axes(axes, pose, axis_length=0.1, covariance=None): - """ - Plot a 2D pose on given axis `axes` with given `axis_length`. - - Args: - axes (matplotlib.axes.Axes): Matplotlib axes. - pose (gtsam.Pose2): The pose to be plotted. - axis_length (float): The length of the camera axes. - covariance (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. - """ + """Plot a 2D pose on given axis 'axes' with given 'axis_length'.""" # get rotation and translation (center) gRp = pose.rotation().matrix() # rotation from pose to global - t = pose.translation() - origin = np.array([t.x(), t.y()]) + origin = pose.translation() # draw the camera axes x_axis = origin + gRp[:, 0] * axis_length @@ -134,89 +101,34 @@ def plot_pose2_on_axes(axes, pose, axis_length=0.1, covariance=None): np.rad2deg(angle), fill=False) axes.add_patch(e1) - -def plot_pose2(fignum, pose, axis_length=0.1, covariance=None, - axis_labels=('X axis', 'Y axis', 'Z axis')): - """ - Plot a 2D pose on given figure with given `axis_length`. - - Args: - fignum (int): Integer representing the figure number to use for plotting. - pose (gtsam.Pose2): The pose to be plotted. - axis_length (float): The length of the camera axes. - covariance (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. - axis_labels (iterable[string]): List of axis labels to set. - """ +def plot_pose2(fignum, pose, axis_length=0.1, covariance=None): + """Plot a 2D pose on given figure with given 'axis_length'.""" # get figure object fig = plt.figure(fignum) axes = fig.gca() - plot_pose2_on_axes(axes, pose, axis_length=axis_length, - covariance=covariance) - - axes.set_xlabel(axis_labels[0]) - axes.set_ylabel(axis_labels[1]) - axes.set_zlabel(axis_labels[2]) - - return fig + plot_pose2_on_axes(axes, pose, axis_length, covariance) def plot_point3_on_axes(axes, point, linespec, P=None): - """ - Plot a 3D point on given axis `axes` with given `linespec`. - - Args: - axes (matplotlib.axes.Axes): Matplotlib axes. - point (gtsam.Point3): The point to be plotted. - linespec (string): String representing formatting options for Matplotlib. - P (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. - """ - axes.plot([point.x()], [point.y()], [point.z()], linespec) + """Plot a 3D point on given axis 'axes' with given 'linespec' and optional covariance 'P'.""" + axes.plot([point[0]], [point[1]], [point[2]], linespec) if P is not None: plot_covariance_ellipse_3d(axes, point.vector(), P) -def plot_point3(fignum, point, linespec, P=None, - axis_labels=('X axis', 'Y axis', 'Z axis')): - """ - Plot a 3D point on given figure with given `linespec`. - - Args: - fignum (int): Integer representing the figure number to use for plotting. - point (gtsam.Point3): The point to be plotted. - linespec (string): String representing formatting options for Matplotlib. - P (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. - axis_labels (iterable[string]): List of axis labels to set. - - Returns: - fig: The matplotlib figure. - - """ +def plot_point3(fignum, point, linespec, P=None): + """Plot a 3D point on given figure with given 'linespec' and optional covariance 'P'.""" fig = plt.figure(fignum) axes = fig.gca(projection='3d') plot_point3_on_axes(axes, point, linespec, P) - axes.set_xlabel(axis_labels[0]) - axes.set_ylabel(axis_labels[1]) - axes.set_zlabel(axis_labels[2]) - return fig - - -def plot_3d_points(fignum, values, linespec="g*", marginals=None, - title="3D Points", axis_labels=('X axis', 'Y axis', 'Z axis')): +def plot_3d_points(fignum, values, linespec="g*", marginals=None): """ - Plots the Point3s in `values`, with optional covariances. + Plots the Point3s in 'values', with optional covariances. Finds all the Point3 objects in the given Values object and plots them. If a Marginals object is given, this function will also plot marginal covariance ellipses for each point. - - Args: - fignum (int): Integer representing the figure number to use for plotting. - values (gtsam.Values): Values dictionary consisting of points to be plotted. - linespec (string): String representing formatting options for Matplotlib. - marginals (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. - title (string): The title of the plot. - axis_labels (iterable[string]): List of axis labels to set. """ keys = values.keys() @@ -227,34 +139,25 @@ def plot_3d_points(fignum, values, linespec="g*", marginals=None, key = keys.at(i) point = values.atPoint3(key) if marginals is not None: - covariance = marginals.marginalCovariance(key) + P = marginals.marginalCovariance(key); else: - covariance = None + P = None - fig = plot_point3(fignum, point, linespec, covariance, - axis_labels=axis_labels) + plot_point3(fignum, point, linespec, P) except RuntimeError: continue # I guess it's not a Point3 - fig.suptitle(title) - fig.canvas.set_window_title(title.lower()) - -def plot_pose3_on_axes(axes, pose, axis_length=0.1, P=None, scale=1): +def plot_pose3_on_axes(axes, pose, P=None, scale=1, axis_length=0.1): """ - Plot a 3D pose on given axis `axes` with given `axis_length`. - - Args: - axes (matplotlib.axes.Axes): Matplotlib axes. - point (gtsam.Point3): The point to be plotted. - linespec (string): String representing formatting options for Matplotlib. - P (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. + Plot a 3D pose on given axis 'axes' with given 'axis_length'. + Optional 'scale' the pose and plot the covariance 'P'. """ # get rotation and translation (center) gRp = pose.rotation().matrix() # rotation from pose to global - origin = pose.translation().vector() + origin = pose.translation() # draw the camera axes x_axis = origin + gRp[:, 0] * axis_length @@ -269,7 +172,6 @@ def plot_pose3_on_axes(axes, pose, axis_length=0.1, P=None, scale=1): line = np.append(origin[np.newaxis], z_axis[np.newaxis], axis=0) axes.plot(line[:, 0], line[:, 1], line[:, 2], 'b-') - # plot the covariance if P is not None: # covariance matrix in pose coordinate frame pPp = P[3:6, 3:6] @@ -278,49 +180,16 @@ def plot_pose3_on_axes(axes, pose, axis_length=0.1, P=None, scale=1): plot_covariance_ellipse_3d(axes, origin, gPp) -def plot_pose3(fignum, pose, axis_length=0.1, P=None, - axis_labels=('X axis', 'Y axis', 'Z axis')): - """ - Plot a 3D pose on given figure with given `axis_length`. - - Args: - fignum (int): Integer representing the figure number to use for plotting. - pose (gtsam.Pose3): 3D pose to be plotted. - linespec (string): String representing formatting options for Matplotlib. - P (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. - axis_labels (iterable[string]): List of axis labels to set. - - Returns: - fig: The matplotlib figure. - """ +def plot_pose3(fignum, pose, P, axis_length=0.1): + """Plot a 3D pose on given figure with given 'axis_length'.""" # get figure object fig = plt.figure(fignum) axes = fig.gca(projection='3d') - plot_pose3_on_axes(axes, pose, P=P, - axis_length=axis_length) - - axes.set_xlabel(axis_labels[0]) - axes.set_ylabel(axis_labels[1]) - axes.set_zlabel(axis_labels[2]) - - return fig + plot_pose3_on_axes(axes, pose, P=P, axis_length=axis_length) -def plot_trajectory(fignum, values, scale=1, marginals=None, - title="Plot Trajectory", axis_labels=('X axis', 'Y axis', 'Z axis')): - """ - Plot a complete 3D trajectory using poses in `values`. - - Args: - fignum (int): Integer representing the figure number to use for plotting. - values (gtsam.Values): Values dict containing the poses. - scale (float): Value to scale the poses by. - marginals (gtsam.Marginals): Marginalized probability values of the estimation. - Used to plot uncertainty bounds. - title (string): The title of the plot. - axis_labels (iterable[string]): List of axis labels to set. - """ - pose3Values = gtsam.utilities_allPose3s(values) +def plot_trajectory(fignum, values, scale=1, marginals=None): + pose3Values = gtsam.allPose3s(values) keys = gtsam.KeyVector(pose3Values.keys()) lastIndex = None @@ -340,12 +209,11 @@ def plot_trajectory(fignum, values, scale=1, marginals=None, pass if marginals: - covariance = marginals.marginalCovariance(lastKey) + P = marginals.marginalCovariance(lastKey) else: - covariance = None + P = None - fig = plot_pose3(fignum, lastPose, P=covariance, - axis_length=scale, axis_labels=axis_labels) + plot_pose3(fignum, lastPose, P, scale) lastIndex = i @@ -355,15 +223,11 @@ def plot_trajectory(fignum, values, scale=1, marginals=None, try: lastPose = pose3Values.atPose3(lastKey) if marginals: - covariance = marginals.marginalCovariance(lastKey) + P = marginals.marginalCovariance(lastKey) else: - covariance = None + P = None - fig = plot_pose3(fignum, lastPose, P=covariance, - axis_length=scale, axis_labels=axis_labels) + plot_pose3(fignum, lastPose, P, scale) except: pass - - fig.suptitle(title) - fig.canvas.set_window_title(title.lower()) diff --git a/python/gtsam_py/python/utils/test_case.py b/python/gtsam_py/python/utils/test_case.py index 7df1e6ee9..3effd7f65 100644 --- a/python/gtsam_py/python/utils/test_case.py +++ b/python/gtsam_py/python/utils/test_case.py @@ -21,7 +21,11 @@ class GtsamTestCase(unittest.TestCase): Keyword Arguments: tol {float} -- tolerance passed to 'equals', default 1e-9 """ - equal = actual.equals(expected, tol) + import numpy + if isinstance(expected, numpy.ndarray): + equal = numpy.allclose(actual, expected, atol=tol) + else: + equal = actual.equals(expected, tol) if not equal: raise self.failureException( "Values are not equal:\n{}!={}".format(actual, expected)) diff --git a/python/gtsam_py/python/utils/visual_data_generator.py b/python/gtsam_py/python/utils/visual_data_generator.py index f04588e70..bc60dcd70 100644 --- a/python/gtsam_py/python/utils/visual_data_generator.py +++ b/python/gtsam_py/python/utils/visual_data_generator.py @@ -1,9 +1,8 @@ from __future__ import print_function import numpy as np - +from math import pi, cos, sin import gtsam -from gtsam import Cal3_S2, PinholeCameraCal3_S2, Point2, Point3, Pose3 class Options: @@ -11,7 +10,7 @@ class Options: Options to generate test scenario """ - def __init__(self, triangle=False, nrCameras=3, K=Cal3_S2()): + def __init__(self, triangle=False, nrCameras=3, K=gtsam.Cal3_S2()): """ Options to generate test scenario @param triangle: generate a triangle scene with 3 points if True, otherwise @@ -28,10 +27,10 @@ class GroundTruth: Object holding generated ground-truth data """ - def __init__(self, K=Cal3_S2(), nrCameras=3, nrPoints=4): + def __init__(self, K=gtsam.Cal3_S2(), nrCameras=3, nrPoints=4): self.K = K - self.cameras = [Pose3()] * nrCameras - self.points = [Point3()] * nrPoints + self.cameras = [gtsam.Pose3()] * nrCameras + self.points = [gtsam.Point3(0, 0, 0)] * nrPoints def print_(self, s=""): print(s) @@ -53,28 +52,28 @@ class Data: class NoiseModels: pass - def __init__(self, K=Cal3_S2(), nrCameras=3, nrPoints=4): + def __init__(self, K=gtsam.Cal3_S2(), nrCameras=3, nrPoints=4): self.K = K - self.Z = [x[:] for x in [[Point2()] * nrPoints] * nrCameras] + self.Z = [x[:] for x in [[gtsam.Point2()] * nrPoints] * nrCameras] self.J = [x[:] for x in [[0] * nrPoints] * nrCameras] - self.odometry = [Pose3()] * nrCameras + self.odometry = [gtsam.Pose3()] * nrCameras # Set Noise parameters self.noiseModels = Data.NoiseModels() - self.noiseModels.posePrior = gtsam.noiseModel_Diagonal.Sigmas( + self.noiseModels.posePrior = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.001, 0.001, 0.001, 0.1, 0.1, 0.1])) - # noiseModels.odometry = gtsam.noiseModel_Diagonal.Sigmas( + # noiseModels.odometry = gtsam.noiseModel.Diagonal.Sigmas( # np.array([0.001,0.001,0.001,0.1,0.1,0.1])) - self.noiseModels.odometry = gtsam.noiseModel_Diagonal.Sigmas( + self.noiseModels.odometry = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.05, 0.05, 0.05, 0.2, 0.2, 0.2])) - self.noiseModels.pointPrior = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) - self.noiseModels.measurement = gtsam.noiseModel_Isotropic.Sigma(2, 1.0) + self.noiseModels.pointPrior = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) + self.noiseModels.measurement = gtsam.noiseModel.Isotropic.Sigma(2, 1.0) def generate_data(options): """ Generate ground-truth and measurement data. """ - K = Cal3_S2(500, 500, 0, 640. / 2., 480. / 2.) + K = gtsam.Cal3_S2(500, 500, 0, 640. / 2., 480. / 2.) nrPoints = 3 if options.triangle else 8 truth = GroundTruth(K=K, nrCameras=options.nrCameras, nrPoints=nrPoints) @@ -84,26 +83,26 @@ def generate_data(options): if options.triangle: # Create a triangle target, just 3 points on a plane r = 10 for j in range(len(truth.points)): - theta = j * 2 * np.pi / nrPoints - truth.points[j] = Point3(r * np.cos(theta), r * np.sin(theta), 0) + theta = j * 2 * pi / nrPoints + truth.points[j] = gtsam.Point3(r * cos(theta), r * sin(theta), 0) else: # 3D landmarks as vertices of a cube truth.points = [ - Point3(10, 10, 10), Point3(-10, 10, 10), - Point3(-10, -10, 10), Point3(10, -10, 10), - Point3(10, 10, -10), Point3(-10, 10, -10), - Point3(-10, -10, -10), Point3(10, -10, -10) + gtsam.Point3(10, 10, 10), gtsam.Point3(-10, 10, 10), + gtsam.Point3(-10, -10, 10), gtsam.Point3(10, -10, 10), + gtsam.Point3(10, 10, -10), gtsam.Point3(-10, 10, -10), + gtsam.Point3(-10, -10, -10), gtsam.Point3(10, -10, -10) ] # Create camera cameras on a circle around the triangle height = 10 r = 40 for i in range(options.nrCameras): - theta = i * 2 * np.pi / options.nrCameras - t = Point3(r * np.cos(theta), r * np.sin(theta), height) - truth.cameras[i] = PinholeCameraCal3_S2.Lookat(t, - Point3(), - Point3(0, 0, 1), - truth.K) + theta = i * 2 * pi / options.nrCameras + t = gtsam.Point3(r * cos(theta), r * sin(theta), height) + truth.cameras[i] = gtsam.SimpleCamera.Lookat(t, + gtsam.Point3(0, 0, 0), + gtsam.Point3(0, 0, 1), + truth.K) # Create measurements for j in range(nrPoints): # All landmarks seen in every frame diff --git a/python/gtsam_py/python/utils/visual_isam.py b/python/gtsam_py/python/utils/visual_isam.py index b0ebe68c3..a8fed4b23 100644 --- a/python/gtsam_py/python/utils/visual_isam.py +++ b/python/gtsam_py/python/utils/visual_isam.py @@ -25,7 +25,7 @@ def initialize(data, truth, options): newFactors = gtsam.NonlinearFactorGraph() initialEstimates = gtsam.Values() for i in range(2): - ii = symbol(ord('x'), i) + ii = symbol('x', i) if i == 0: if options.hardConstraint: # add hard constraint newFactors.add( @@ -41,10 +41,10 @@ def initialize(data, truth, options): # Add visual measurement factors from two first poses and initialize # observed landmarks for i in range(2): - ii = symbol(ord('x'), i) + ii = symbol('x', i) for k in range(len(data.Z[i])): j = data.J[i][k] - jj = symbol(ord('l'), j) + jj = symbol('l', j) newFactors.add( gtsam.GenericProjectionFactorCal3_S2(data.Z[i][ k], data.noiseModels.measurement, ii, jj, data.K)) @@ -59,8 +59,8 @@ def initialize(data, truth, options): # Add odometry between frames 0 and 1 newFactors.add( gtsam.BetweenFactorPose3( - symbol(ord('x'), 0), - symbol(ord('x'), 1), data.odometry[1], data.noiseModels.odometry)) + symbol('x', 0), + symbol('x', 1), data.odometry[1], data.noiseModels.odometry)) # Update ISAM if options.batchInitialization: # Do a full optimize for first two poses @@ -98,28 +98,28 @@ def step(data, isam, result, truth, currPoseIndex): odometry = data.odometry[prevPoseIndex] newFactors.add( gtsam.BetweenFactorPose3( - symbol(ord('x'), prevPoseIndex), - symbol(ord('x'), currPoseIndex), odometry, + symbol('x', prevPoseIndex), + symbol('x', currPoseIndex), odometry, data.noiseModels.odometry)) # Add visual measurement factors and initializations as necessary for k in range(len(data.Z[currPoseIndex])): zij = data.Z[currPoseIndex][k] j = data.J[currPoseIndex][k] - jj = symbol(ord('l'), j) + jj = symbol('l', j) newFactors.add( gtsam.GenericProjectionFactorCal3_S2( zij, data.noiseModels.measurement, - symbol(ord('x'), currPoseIndex), jj, data.K)) + symbol('x', currPoseIndex), jj, data.K)) # TODO: initialize with something other than truth if not result.exists(jj) and not initialEstimates.exists(jj): lmInit = truth.points[j] initialEstimates.insert(jj, lmInit) # Initial estimates for the new pose. - prevPose = result.atPose3(symbol(ord('x'), prevPoseIndex)) + prevPose = result.atPose3(symbol('x', prevPoseIndex)) initialEstimates.insert( - symbol(ord('x'), currPoseIndex), prevPose.compose(odometry)) + symbol('x', currPoseIndex), prevPose.compose(odometry)) # Update ISAM # figure(1)tic diff --git a/python/gtsam_unstable_py/python/examples/FixedLagSmootherExample.py b/python/gtsam_unstable_py/python/examples/FixedLagSmootherExample.py index 786701e0f..7d2cea8ae 100644 --- a/python/gtsam_unstable_py/python/examples/FixedLagSmootherExample.py +++ b/python/gtsam_unstable_py/python/examples/FixedLagSmootherExample.py @@ -16,16 +16,6 @@ import numpy as np import gtsam import gtsam_unstable - -def _timestamp_key_value(key, value): - """ - - """ - return gtsam_unstable.FixedLagSmootherKeyTimestampMapValue( - key, value - ) - - def BatchFixedLagSmootherExample(): """ Runs a batch fixed smoother on an agent with two odometry @@ -45,21 +35,21 @@ def BatchFixedLagSmootherExample(): # Create a prior on the first pose, placing it at the origin prior_mean = gtsam.Pose2(0, 0, 0) - prior_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) + prior_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) X1 = 0 new_factors.push_back(gtsam.PriorFactorPose2(X1, prior_mean, prior_noise)) new_values.insert(X1, prior_mean) - new_timestamps.insert(_timestamp_key_value(X1, 0.0)) + new_timestamps.insert((X1, 0.0)) delta_time = 0.25 time = 0.25 while time <= 3.0: - previous_key = 1000 * (time - delta_time) - current_key = 1000 * time + previous_key = int(1000 * (time - delta_time)) + current_key = int(1000 * time) # assign current key to the current timestamp - new_timestamps.insert(_timestamp_key_value(current_key, time)) + new_timestamps.insert((current_key, time)) # Add a guess for this pose to the new values # Assume that the robot moves at 2 m/s. Position is time[s] * 2[m/s] @@ -69,14 +59,14 @@ def BatchFixedLagSmootherExample(): # Add odometry factors from two different sources with different error # stats odometry_measurement_1 = gtsam.Pose2(0.61, -0.08, 0.02) - odometry_noise_1 = gtsam.noiseModel_Diagonal.Sigmas( + odometry_noise_1 = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.1, 0.1, 0.05])) new_factors.push_back(gtsam.BetweenFactorPose2( previous_key, current_key, odometry_measurement_1, odometry_noise_1 )) odometry_measurement_2 = gtsam.Pose2(0.47, 0.03, 0.01) - odometry_noise_2 = gtsam.noiseModel_Diagonal.Sigmas( + odometry_noise_2 = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.05, 0.05, 0.05])) new_factors.push_back(gtsam.BetweenFactorPose2( previous_key, current_key, odometry_measurement_2, odometry_noise_2 diff --git a/python/gtsam_unstable_py/python/examples/TimeOfArrivalExample.py b/python/gtsam_unstable_py/python/examples/TimeOfArrivalExample.py index 6ba06f0f2..59f008a05 100644 --- a/python/gtsam_unstable_py/python/examples/TimeOfArrivalExample.py +++ b/python/gtsam_unstable_py/python/examples/TimeOfArrivalExample.py @@ -12,7 +12,7 @@ Author: Frank Dellaert # pylint: disable=invalid-name, no-name-in-module from gtsam import (LevenbergMarquardtOptimizer, LevenbergMarquardtParams, - NonlinearFactorGraph, Point3, Values, noiseModel_Isotropic) + NonlinearFactorGraph, Point3, Values, noiseModel) from gtsam_unstable import Event, TimeOfArrival, TOAFactor # units @@ -64,7 +64,7 @@ def create_graph(microphones, simulatedTOA): graph = NonlinearFactorGraph() # Create a noise model for the TOA error - model = noiseModel_Isotropic.Sigma(1, 0.5 * MS) + model = noiseModel.Isotropic.Sigma(1, 0.5 * MS) K = len(microphones) key = 0 diff --git a/python/gtsam_unstable_py/python/tests/test_FixedLagSmootherExample.py b/python/gtsam_unstable_py/python/tests/test_FixedLagSmootherExample.py index 8d3af311f..c1ccd1ea1 100644 --- a/python/gtsam_unstable_py/python/tests/test_FixedLagSmootherExample.py +++ b/python/gtsam_unstable_py/python/tests/test_FixedLagSmootherExample.py @@ -16,13 +16,6 @@ import gtsam import gtsam_unstable from gtsam.utils.test_case import GtsamTestCase - -def _timestamp_key_value(key, value): - return gtsam_unstable.FixedLagSmootherKeyTimestampMapValue( - key, value - ) - - class TestFixedLagSmootherExample(GtsamTestCase): ''' Tests the fixed lag smoother wrapper @@ -47,14 +40,14 @@ class TestFixedLagSmootherExample(GtsamTestCase): # Create a prior on the first pose, placing it at the origin prior_mean = gtsam.Pose2(0, 0, 0) - prior_noise = gtsam.noiseModel_Diagonal.Sigmas( + prior_noise = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.3, 0.3, 0.1])) X1 = 0 new_factors.push_back( gtsam.PriorFactorPose2( X1, prior_mean, prior_noise)) new_values.insert(X1, prior_mean) - new_timestamps.insert(_timestamp_key_value(X1, 0.0)) + new_timestamps.insert((X1, 0.0)) delta_time = 0.25 time = 0.25 @@ -80,11 +73,11 @@ class TestFixedLagSmootherExample(GtsamTestCase): # and its two odometers measure the change. The smoothed # result is then compared to the ground truth while time <= 3.0: - previous_key = 1000 * (time - delta_time) - current_key = 1000 * time + previous_key = int(1000 * (time - delta_time)) + current_key = int(1000 * time) # assign current key to the current timestamp - new_timestamps.insert(_timestamp_key_value(current_key, time)) + new_timestamps.insert((current_key, time)) # Add a guess for this pose to the new values # Assume that the robot moves at 2 m/s. Position is time[s] * @@ -95,7 +88,7 @@ class TestFixedLagSmootherExample(GtsamTestCase): # Add odometry factors from two different sources with different # error stats odometry_measurement_1 = gtsam.Pose2(0.61, -0.08, 0.02) - odometry_noise_1 = gtsam.noiseModel_Diagonal.Sigmas( + odometry_noise_1 = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.1, 0.1, 0.05])) new_factors.push_back( gtsam.BetweenFactorPose2( @@ -105,7 +98,7 @@ class TestFixedLagSmootherExample(GtsamTestCase): odometry_noise_1)) odometry_measurement_2 = gtsam.Pose2(0.47, 0.03, 0.01) - odometry_noise_2 = gtsam.noiseModel_Diagonal.Sigmas( + odometry_noise_2 = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.05, 0.05, 0.05])) new_factors.push_back( gtsam.BetweenFactorPose2( From 2bda74950a6fc2f97f4323029d7340a67de15958 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Mon, 27 Jul 2020 10:56:09 -0400 Subject: [PATCH 004/142] Sync ImuFactorExample --- .../python/examples/ImuFactorExample.py | 46 ++-- python/gtsam_py/python/utils/plot.py | 200 +++++++++++++++--- 2 files changed, 197 insertions(+), 49 deletions(-) diff --git a/python/gtsam_py/python/examples/ImuFactorExample.py b/python/gtsam_py/python/examples/ImuFactorExample.py index adaa3c08a..e041afd87 100644 --- a/python/gtsam_py/python/examples/ImuFactorExample.py +++ b/python/gtsam_py/python/examples/ImuFactorExample.py @@ -5,32 +5,29 @@ All Rights Reserved See LICENSE for the license information -A script validating the ImuFactor inference. +A script validating and demonstrating the ImuFactor inference. + +Author: Frank Dellaert, Varun Agrawal """ from __future__ import print_function import math +import gtsam import matplotlib.pyplot as plt import numpy as np +from gtsam import symbol_shorthand +B = symbol_shorthand.B +V = symbol_shorthand.V +X = symbol_shorthand.X + +from gtsam.utils.plot import plot_pose3 from mpl_toolkits.mplot3d import Axes3D -import gtsam -from gtsam.utils.plot import plot_pose3 from PreintegrationExample import POSES_FIG, PreintegrationExample -BIAS_KEY = int(gtsam.symbol('b', 0)) - - -def X(key): - """Create symbol for pose key.""" - return gtsam.symbol('x', key) - - -def V(key): - """Create symbol for velocity key.""" - return gtsam.symbol('v', key) +BIAS_KEY = B(0) np.set_printoptions(precision=3, suppress=True) @@ -76,8 +73,14 @@ class ImuFactorExample(PreintegrationExample): initial.insert(BIAS_KEY, self.actualBias) for i in range(num_poses): state_i = self.scenario.navState(float(i)) - initial.insert(X(i), state_i.pose()) - initial.insert(V(i), state_i.velocity()) + + poseNoise = gtsam.Pose3.Expmap(np.random.randn(3)*0.1) + pose = state_i.pose().compose(poseNoise) + + velocity = state_i.velocity() + np.random.randn(3)*0.1 + + initial.insert(X(i), pose) + initial.insert(V(i), velocity) # simulate the loop i = 0 # state index @@ -88,6 +91,12 @@ class ImuFactorExample(PreintegrationExample): measuredAcc = self.runner.measuredSpecificForce(t) pim.integrateMeasurement(measuredAcc, measuredOmega, self.dt) + poseNoise = gtsam.Pose3.Expmap(np.random.randn(3)*0.1) + + actual_state_i = gtsam.NavState( + actual_state_i.pose().compose(poseNoise), + actual_state_i.velocity() + np.random.randn(3)*0.1) + # Plot IMU many times if k % 10 == 0: self.plotImu(t, measuredOmega, measuredAcc) @@ -133,7 +142,10 @@ class ImuFactorExample(PreintegrationExample): pose_i = result.atPose3(X(i)) plot_pose3(POSES_FIG, pose_i, 0.1) i += 1 - print(result.atimuBias.ConstantBias(BIAS_KEY)) + + gtsam.utils.plot.set_axes_equal(POSES_FIG) + + print(result.atConstantBias(BIAS_KEY)) plt.ioff() plt.show() diff --git a/python/gtsam_py/python/utils/plot.py b/python/gtsam_py/python/utils/plot.py index 7b8da0f06..276cefd54 100644 --- a/python/gtsam_py/python/utils/plot.py +++ b/python/gtsam_py/python/utils/plot.py @@ -13,8 +13,9 @@ def set_axes_equal(fignum): Make axes of 3D plot have equal scale so that spheres appear as spheres, cubes as cubes, etc.. This is one possible solution to Matplotlib's ax.set_aspect('equal') and ax.axis('equal') not working for 3D. - Input - ax: a matplotlib axis, e.g., as output from plt.gca(). + + Args: + fignum (int): An integer representing the figure number for Matplotlib. """ fig = plt.figure(fignum) ax = fig.gca(projection='3d') @@ -34,7 +35,21 @@ def set_axes_equal(fignum): def ellipsoid(xc, yc, zc, rx, ry, rz, n): - """Numpy equivalent of Matlab's ellipsoid function""" + """ + Numpy equivalent of Matlab's ellipsoid function. + + Args: + xc (double): Center of ellipsoid in X-axis. + yc (double): Center of ellipsoid in Y-axis. + zc (double): Center of ellipsoid in Z-axis. + rx (double): Radius of ellipsoid in X-axis. + ry (double): Radius of ellipsoid in Y-axis. + rz (double): Radius of ellipsoid in Z-axis. + n (int): The granularity of the ellipsoid plotted. + + Returns: + tuple[numpy.ndarray]: The points in the x, y and z axes to use for the surface plot. + """ u = np.linspace(0, 2*np.pi, n+1) v = np.linspace(0, np.pi, n+1) x = -rx * np.outer(np.cos(u), np.sin(v)).T @@ -47,9 +62,18 @@ def ellipsoid(xc, yc, zc, rx, ry, rz, n): def plot_covariance_ellipse_3d(axes, origin, P, scale=1, n=8, alpha=0.5): """ Plots a Gaussian as an uncertainty ellipse + Based on Maybeck Vol 1, page 366 k=2.296 corresponds to 1 std, 68.26% of all probability k=11.82 corresponds to 3 std, 99.74% of all probability + + Args: + axes (matplotlib.axes.Axes): Matplotlib axes. + origin (gtsam.Point3): The origin in the world frame. + P (numpy.ndarray): The marginal covariance matrix of the 3D point which will be represented as an ellipse. + scale (float): Scaling factor of the radii of the covariance ellipse. + n (int): Defines the granularity of the ellipse. Higher values indicate finer ellipses. + alpha (float): Transparency value for the plotted surface in the range [0, 1]. """ k = 11.82 U, S, _ = np.linalg.svd(P) @@ -73,10 +97,19 @@ def plot_covariance_ellipse_3d(axes, origin, P, scale=1, n=8, alpha=0.5): def plot_pose2_on_axes(axes, pose, axis_length=0.1, covariance=None): - """Plot a 2D pose on given axis 'axes' with given 'axis_length'.""" + """ + Plot a 2D pose on given axis `axes` with given `axis_length`. + + Args: + axes (matplotlib.axes.Axes): Matplotlib axes. + pose (gtsam.Pose2): The pose to be plotted. + axis_length (float): The length of the camera axes. + covariance (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. + """ # get rotation and translation (center) gRp = pose.rotation().matrix() # rotation from pose to global - origin = pose.translation() + t = pose.translation() + origin = np.array([t.x(), t.y()]) # draw the camera axes x_axis = origin + gRp[:, 0] * axis_length @@ -101,34 +134,89 @@ def plot_pose2_on_axes(axes, pose, axis_length=0.1, covariance=None): np.rad2deg(angle), fill=False) axes.add_patch(e1) -def plot_pose2(fignum, pose, axis_length=0.1, covariance=None): - """Plot a 2D pose on given figure with given 'axis_length'.""" + +def plot_pose2(fignum, pose, axis_length=0.1, covariance=None, + axis_labels=('X axis', 'Y axis', 'Z axis')): + """ + Plot a 2D pose on given figure with given `axis_length`. + + Args: + fignum (int): Integer representing the figure number to use for plotting. + pose (gtsam.Pose2): The pose to be plotted. + axis_length (float): The length of the camera axes. + covariance (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. + axis_labels (iterable[string]): List of axis labels to set. + """ # get figure object fig = plt.figure(fignum) axes = fig.gca() - plot_pose2_on_axes(axes, pose, axis_length, covariance) + plot_pose2_on_axes(axes, pose, axis_length=axis_length, + covariance=covariance) + + axes.set_xlabel(axis_labels[0]) + axes.set_ylabel(axis_labels[1]) + axes.set_zlabel(axis_labels[2]) + + return fig def plot_point3_on_axes(axes, point, linespec, P=None): - """Plot a 3D point on given axis 'axes' with given 'linespec' and optional covariance 'P'.""" - axes.plot([point[0]], [point[1]], [point[2]], linespec) + """ + Plot a 3D point on given axis `axes` with given `linespec`. + + Args: + axes (matplotlib.axes.Axes): Matplotlib axes. + point (gtsam.Point3): The point to be plotted. + linespec (string): String representing formatting options for Matplotlib. + P (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. + """ + axes.plot([point.x()], [point.y()], [point.z()], linespec) if P is not None: - plot_covariance_ellipse_3d(axes, point.vector(), P) + plot_covariance_ellipse_3d(axes, point, P) -def plot_point3(fignum, point, linespec, P=None): - """Plot a 3D point on given figure with given 'linespec' and optional covariance 'P'.""" +def plot_point3(fignum, point, linespec, P=None, + axis_labels=('X axis', 'Y axis', 'Z axis')): + """ + Plot a 3D point on given figure with given `linespec`. + + Args: + fignum (int): Integer representing the figure number to use for plotting. + point (gtsam.Point3): The point to be plotted. + linespec (string): String representing formatting options for Matplotlib. + P (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. + axis_labels (iterable[string]): List of axis labels to set. + + Returns: + fig: The matplotlib figure. + + """ fig = plt.figure(fignum) axes = fig.gca(projection='3d') plot_point3_on_axes(axes, point, linespec, P) + axes.set_xlabel(axis_labels[0]) + axes.set_ylabel(axis_labels[1]) + axes.set_zlabel(axis_labels[2]) -def plot_3d_points(fignum, values, linespec="g*", marginals=None): + return fig + + +def plot_3d_points(fignum, values, linespec="g*", marginals=None, + title="3D Points", axis_labels=('X axis', 'Y axis', 'Z axis')): """ - Plots the Point3s in 'values', with optional covariances. + Plots the Point3s in `values`, with optional covariances. Finds all the Point3 objects in the given Values object and plots them. If a Marginals object is given, this function will also plot marginal covariance ellipses for each point. + + Args: + fignum (int): Integer representing the figure number to use for plotting. + values (gtsam.Values): Values dictionary consisting of points to be plotted. + linespec (string): String representing formatting options for Matplotlib. + marginals (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. + title (string): The title of the plot. + axis_labels (iterable[string]): List of axis labels to set. """ keys = values.keys() @@ -139,21 +227,30 @@ def plot_3d_points(fignum, values, linespec="g*", marginals=None): key = keys.at(i) point = values.atPoint3(key) if marginals is not None: - P = marginals.marginalCovariance(key); + covariance = marginals.marginalCovariance(key) else: - P = None + covariance = None - plot_point3(fignum, point, linespec, P) + fig = plot_point3(fignum, point, linespec, covariance, + axis_labels=axis_labels) except RuntimeError: continue # I guess it's not a Point3 + fig.suptitle(title) + fig.canvas.set_window_title(title.lower()) -def plot_pose3_on_axes(axes, pose, P=None, scale=1, axis_length=0.1): + +def plot_pose3_on_axes(axes, pose, axis_length=0.1, P=None, scale=1): """ - Plot a 3D pose on given axis 'axes' with given 'axis_length'. - Optional 'scale' the pose and plot the covariance 'P'. + Plot a 3D pose on given axis `axes` with given `axis_length`. + + Args: + axes (matplotlib.axes.Axes): Matplotlib axes. + point (gtsam.Point3): The point to be plotted. + linespec (string): String representing formatting options for Matplotlib. + P (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. """ # get rotation and translation (center) gRp = pose.rotation().matrix() # rotation from pose to global @@ -172,6 +269,7 @@ def plot_pose3_on_axes(axes, pose, P=None, scale=1, axis_length=0.1): line = np.append(origin[np.newaxis], z_axis[np.newaxis], axis=0) axes.plot(line[:, 0], line[:, 1], line[:, 2], 'b-') + # plot the covariance if P is not None: # covariance matrix in pose coordinate frame pPp = P[3:6, 3:6] @@ -180,16 +278,49 @@ def plot_pose3_on_axes(axes, pose, P=None, scale=1, axis_length=0.1): plot_covariance_ellipse_3d(axes, origin, gPp) -def plot_pose3(fignum, pose, P, axis_length=0.1): - """Plot a 3D pose on given figure with given 'axis_length'.""" +def plot_pose3(fignum, pose, axis_length=0.1, P=None, + axis_labels=('X axis', 'Y axis', 'Z axis')): + """ + Plot a 3D pose on given figure with given `axis_length`. + + Args: + fignum (int): Integer representing the figure number to use for plotting. + pose (gtsam.Pose3): 3D pose to be plotted. + linespec (string): String representing formatting options for Matplotlib. + P (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. + axis_labels (iterable[string]): List of axis labels to set. + + Returns: + fig: The matplotlib figure. + """ # get figure object fig = plt.figure(fignum) axes = fig.gca(projection='3d') - plot_pose3_on_axes(axes, pose, P=P, axis_length=axis_length) + plot_pose3_on_axes(axes, pose, P=P, + axis_length=axis_length) + + axes.set_xlabel(axis_labels[0]) + axes.set_ylabel(axis_labels[1]) + axes.set_zlabel(axis_labels[2]) + + return fig -def plot_trajectory(fignum, values, scale=1, marginals=None): - pose3Values = gtsam.allPose3s(values) +def plot_trajectory(fignum, values, scale=1, marginals=None, + title="Plot Trajectory", axis_labels=('X axis', 'Y axis', 'Z axis')): + """ + Plot a complete 3D trajectory using poses in `values`. + + Args: + fignum (int): Integer representing the figure number to use for plotting. + values (gtsam.Values): Values dict containing the poses. + scale (float): Value to scale the poses by. + marginals (gtsam.Marginals): Marginalized probability values of the estimation. + Used to plot uncertainty bounds. + title (string): The title of the plot. + axis_labels (iterable[string]): List of axis labels to set. + """ + pose3Values = gtsam.utilities_allPose3s(values) keys = gtsam.KeyVector(pose3Values.keys()) lastIndex = None @@ -209,11 +340,12 @@ def plot_trajectory(fignum, values, scale=1, marginals=None): pass if marginals: - P = marginals.marginalCovariance(lastKey) + covariance = marginals.marginalCovariance(lastKey) else: - P = None + covariance = None - plot_pose3(fignum, lastPose, P, scale) + fig = plot_pose3(fignum, lastPose, P=covariance, + axis_length=scale, axis_labels=axis_labels) lastIndex = i @@ -223,11 +355,15 @@ def plot_trajectory(fignum, values, scale=1, marginals=None): try: lastPose = pose3Values.atPose3(lastKey) if marginals: - P = marginals.marginalCovariance(lastKey) + covariance = marginals.marginalCovariance(lastKey) else: - P = None + covariance = None - plot_pose3(fignum, lastPose, P, scale) + fig = plot_pose3(fignum, lastPose, P=covariance, + axis_length=scale, axis_labels=axis_labels) except: pass + + fig.suptitle(title) + fig.canvas.set_window_title(title.lower()) From 48b0c845dc92d46389c8bd57097cd2ce80f644fe Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Mon, 27 Jul 2020 11:01:59 -0400 Subject: [PATCH 005/142] Sync ImuFactorExample2 --- .../python/examples/ImuFactorExample2.py | 70 +++++++++---------- 1 file changed, 33 insertions(+), 37 deletions(-) diff --git a/python/gtsam_py/python/examples/ImuFactorExample2.py b/python/gtsam_py/python/examples/ImuFactorExample2.py index 39e22eb17..4f4b02f63 100644 --- a/python/gtsam_py/python/examples/ImuFactorExample2.py +++ b/python/gtsam_py/python/examples/ImuFactorExample2.py @@ -8,22 +8,20 @@ from __future__ import print_function import math -import matplotlib.pyplot as plt -import numpy as np -from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611 - import gtsam import gtsam.utils.plot as gtsam_plot - - -def X(key): - """Create symbol for pose key.""" - return gtsam.symbol('x', key) - - -def V(key): - """Create symbol for velocity key.""" - return gtsam.symbol('v', key) +import matplotlib.pyplot as plt +import numpy as np +from gtsam import (ISAM2, BetweenFactorConstantBias, Cal3_S2, + ConstantTwistScenario, ImuFactor, NonlinearFactorGraph, + PinholeCameraCal3_S2, Point3, Pose3, + PriorFactorConstantBias, PriorFactorPose3, + PriorFactorVector, Rot3, Values) +from gtsam import symbol_shorthand +B = symbol_shorthand.B +V = symbol_shorthand.V +X = symbol_shorthand.X +from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611 def vector3(x, y, z): @@ -69,20 +67,19 @@ PARAMS.setUse2ndOrderCoriolis(False) PARAMS.setOmegaCoriolis(vector3(0, 0, 0)) BIAS_COVARIANCE = gtsam.noiseModel.Isotropic.Variance(6, 0.1) -DELTA = gtsam.Pose3(gtsam.Rot3.Rodrigues(0, 0, 0), - gtsam.Point3(0.05, -0.10, 0.20)) +DELTA = Pose3(Rot3.Rodrigues(0, 0, 0), + Point3(0.05, -0.10, 0.20)) def IMU_example(): """Run iSAM 2 example with IMU factor.""" - ZERO_BIAS = gtsam.imuBias.ConstantBias() # Start with a camera on x-axis looking at origin radius = 30 - up = gtsam.Point3(0, 0, 1) - target = gtsam.Point3(0, 0, 0) - position = gtsam.Point3(radius, 0, 0) - camera = gtsam.SimpleCamera.Lookat(position, target, up, gtsam.Cal3_S2()) + up = Point3(0, 0, 1) + target = Point3(0, 0, 0) + position = Point3(radius, 0, 0) + camera = PinholeCameraCal3_S2.Lookat(position, target, up, Cal3_S2()) pose_0 = camera.pose() # Create the set of ground-truth landmarks and poses @@ -91,37 +88,37 @@ def IMU_example(): angular_velocity_vector = vector3(0, -angular_velocity, 0) linear_velocity_vector = vector3(radius * angular_velocity, 0, 0) - scenario = gtsam.ConstantTwistScenario( + scenario = ConstantTwistScenario( angular_velocity_vector, linear_velocity_vector, pose_0) # Create a factor graph - newgraph = gtsam.NonlinearFactorGraph() + newgraph = NonlinearFactorGraph() # Create (incremental) ISAM2 solver - isam = gtsam.ISAM2() + isam = ISAM2() # Create the initial estimate to the solution # Intentionally initialize the variables off from the ground truth - initialEstimate = gtsam.Values() + initialEstimate = Values() # Add a prior on pose x0. This indirectly specifies where the origin is. # 30cm std on x,y,z 0.1 rad on roll,pitch,yaw noise = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.1, 0.1, 0.1, 0.3, 0.3, 0.3])) - newgraph.push_back(gtsam.PriorFactorPose3(X(0), pose_0, noise)) + newgraph.push_back(PriorFactorPose3(X(0), pose_0, noise)) # Add imu priors - biasKey = gtsam.symbol('b', 0) + biasKey = B(0) biasnoise = gtsam.noiseModel.Isotropic.Sigma(6, 0.1) - biasprior = gtsam.PriorFactorConstantBias(biasKey, ZERO_BIAS, - biasnoise) + biasprior = PriorFactorConstantBias(biasKey, gtsam.imuBias.ConstantBias(), + biasnoise) newgraph.push_back(biasprior) - initialEstimate.insert(biasKey, ZERO_BIAS) + initialEstimate.insert(biasKey, gtsam.imuBias.ConstantBias()) velnoise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) # Calculate with correct initial velocity n_velocity = vector3(0, angular_velocity * radius, 0) - velprior = gtsam.PriorFactorVector(V(0), n_velocity, velnoise) + velprior = PriorFactorVector(V(0), n_velocity, velnoise) newgraph.push_back(velprior) initialEstimate.insert(V(0), n_velocity) @@ -142,10 +139,10 @@ def IMU_example(): # Add Bias variables periodically if i % 5 == 0: biasKey += 1 - factor = gtsam.BetweenFactorConstantBias( - biasKey - 1, biasKey, ZERO_BIAS, BIAS_COVARIANCE) + factor = BetweenFactorConstantBias( + biasKey - 1, biasKey, gtsam.imuBias.ConstantBias(), BIAS_COVARIANCE) newgraph.add(factor) - initialEstimate.insert(biasKey, ZERO_BIAS) + initialEstimate.insert(biasKey, gtsam.imuBias.ConstantBias()) # Predict acceleration and gyro measurements in (actual) body frame nRb = scenario.rotation(t).matrix() @@ -155,8 +152,7 @@ def IMU_example(): accum.integrateMeasurement(measuredAcc, measuredOmega, delta_t) # Add Imu Factor - imufac = gtsam.ImuFactor( - X(i - 1), V(i - 1), X(i), V(i), biasKey, accum) + imufac = ImuFactor(X(i - 1), V(i - 1), X(i), V(i), biasKey, accum) newgraph.add(imufac) # insert new velocity, which is wrong @@ -169,7 +165,7 @@ def IMU_example(): ISAM2_plot(result) # reset - newgraph = gtsam.NonlinearFactorGraph() + newgraph = NonlinearFactorGraph() initialEstimate.clear() From 128db80fec87a261a27bc3b55377b2c834ca9457 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Mon, 27 Jul 2020 11:17:06 -0400 Subject: [PATCH 006/142] Fix Pose2 plot --- python/gtsam_py/python/utils/plot.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/python/gtsam_py/python/utils/plot.py b/python/gtsam_py/python/utils/plot.py index 276cefd54..eef08ac88 100644 --- a/python/gtsam_py/python/utils/plot.py +++ b/python/gtsam_py/python/utils/plot.py @@ -109,7 +109,7 @@ def plot_pose2_on_axes(axes, pose, axis_length=0.1, covariance=None): # get rotation and translation (center) gRp = pose.rotation().matrix() # rotation from pose to global t = pose.translation() - origin = np.array([t.x(), t.y()]) + origin = np.array(t) # draw the camera axes x_axis = origin + gRp[:, 0] * axis_length @@ -155,7 +155,6 @@ def plot_pose2(fignum, pose, axis_length=0.1, covariance=None, axes.set_xlabel(axis_labels[0]) axes.set_ylabel(axis_labels[1]) - axes.set_zlabel(axis_labels[2]) return fig @@ -170,7 +169,7 @@ def plot_point3_on_axes(axes, point, linespec, P=None): linespec (string): String representing formatting options for Matplotlib. P (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. """ - axes.plot([point.x()], [point.y()], [point.z()], linespec) + axes.plot([point[0]], [point[1]], [point[2]], linespec) if P is not None: plot_covariance_ellipse_3d(axes, point, P) From 7b4266ed6be3b601b04dc7edba1df9957a270321 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Mon, 27 Jul 2020 14:35:28 -0400 Subject: [PATCH 007/142] Update to match cython --- .../python/examples/ImuFactorExample.py | 5 +-- .../python/examples/ImuFactorExample2.py | 5 +-- .../python/examples/PlanarSLAMExample.py | 11 ++++--- .../python/examples/Pose2SLAMExample_g2o.py | 2 +- .../python/examples/Pose3SLAMExample_g2o.py | 4 +-- .../python/examples/PreintegrationExample.py | 2 +- python/gtsam_py/python/examples/README.md | 14 +++----- python/gtsam_py/python/examples/SFMExample.py | 32 ++++++++----------- python/gtsam_py/python/utils/plot.py | 14 ++++---- 9 files changed, 37 insertions(+), 52 deletions(-) diff --git a/python/gtsam_py/python/examples/ImuFactorExample.py b/python/gtsam_py/python/examples/ImuFactorExample.py index e041afd87..1f8dfca07 100644 --- a/python/gtsam_py/python/examples/ImuFactorExample.py +++ b/python/gtsam_py/python/examples/ImuFactorExample.py @@ -17,10 +17,7 @@ import math import gtsam import matplotlib.pyplot as plt import numpy as np -from gtsam import symbol_shorthand -B = symbol_shorthand.B -V = symbol_shorthand.V -X = symbol_shorthand.X +from gtsam.gtsam.symbol_shorthand import B, V, X from gtsam.utils.plot import plot_pose3 from mpl_toolkits.mplot3d import Axes3D diff --git a/python/gtsam_py/python/examples/ImuFactorExample2.py b/python/gtsam_py/python/examples/ImuFactorExample2.py index 4f4b02f63..d5be69f47 100644 --- a/python/gtsam_py/python/examples/ImuFactorExample2.py +++ b/python/gtsam_py/python/examples/ImuFactorExample2.py @@ -17,10 +17,7 @@ from gtsam import (ISAM2, BetweenFactorConstantBias, Cal3_S2, PinholeCameraCal3_S2, Point3, Pose3, PriorFactorConstantBias, PriorFactorPose3, PriorFactorVector, Rot3, Values) -from gtsam import symbol_shorthand -B = symbol_shorthand.B -V = symbol_shorthand.V -X = symbol_shorthand.X +from gtsam.gtsam.symbol_shorthand import B, V, X from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611 diff --git a/python/gtsam_py/python/examples/PlanarSLAMExample.py b/python/gtsam_py/python/examples/PlanarSLAMExample.py index 0baf2fa9a..ba0529479 100644 --- a/python/gtsam_py/python/examples/PlanarSLAMExample.py +++ b/python/gtsam_py/python/examples/PlanarSLAMExample.py @@ -16,6 +16,7 @@ from __future__ import print_function import numpy as np import gtsam +from gtsam.gtsam.symbol_shorthand import X, L # Create noise models PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) @@ -26,11 +27,11 @@ MEASUREMENT_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.2])) graph = gtsam.NonlinearFactorGraph() # Create the keys corresponding to unknown variables in the factor graph -X1 = gtsam.symbol('x', 1) -X2 = gtsam.symbol('x', 2) -X3 = gtsam.symbol('x', 3) -L1 = gtsam.symbol('l', 4) -L2 = gtsam.symbol('l', 5) +X1 = X(1) +X2 = X(2) +X3 = X(3) +L1 = L(4) +L2 = L(5) # Add a prior on pose X1 at the origin. A prior factor consists of a mean and a noise model graph.add(gtsam.PriorFactorPose2(X1, gtsam.Pose2(0.0, 0.0, 0.0), PRIOR_NOISE)) diff --git a/python/gtsam_py/python/examples/Pose2SLAMExample_g2o.py b/python/gtsam_py/python/examples/Pose2SLAMExample_g2o.py index d27adfc2c..1ea7fc300 100644 --- a/python/gtsam_py/python/examples/Pose2SLAMExample_g2o.py +++ b/python/gtsam_py/python/examples/Pose2SLAMExample_g2o.py @@ -83,7 +83,7 @@ else: print ("Done!") if args.plot: - resultPoses = gtsam.extractPose2(result) + resultPoses = gtsam.utilities.extractPose2(result) for i in range(resultPoses.shape[0]): plot.plot_pose2(1, gtsam.Pose2(resultPoses[i, :])) plt.show() diff --git a/python/gtsam_py/python/examples/Pose3SLAMExample_g2o.py b/python/gtsam_py/python/examples/Pose3SLAMExample_g2o.py index 20b02500e..c06a9d12f 100644 --- a/python/gtsam_py/python/examples/Pose3SLAMExample_g2o.py +++ b/python/gtsam_py/python/examples/Pose3SLAMExample_g2o.py @@ -44,7 +44,7 @@ priorModel = gtsam.noiseModel.Diagonal.Variances(vector6(1e-6, 1e-6, 1e-6, print("Adding prior to g2o file ") graphWithPrior = graph -firstKey = initial.keys().at(0) +firstKey = initial.keys()[0] graphWithPrior.add(gtsam.PriorFactorPose3(firstKey, gtsam.Pose3(), priorModel)) params = gtsam.GaussNewtonParams() @@ -66,7 +66,7 @@ else: print ("Done!") if args.plot: - resultPoses = gtsam.allPose3s(result) + resultPoses = gtsam.utilities.allPose3s(result) for i in range(resultPoses.size()): plot.plot_pose3(1, resultPoses.atPose3(i)) plt.show() diff --git a/python/gtsam_py/python/examples/PreintegrationExample.py b/python/gtsam_py/python/examples/PreintegrationExample.py index e543e5d82..ee7f9cd8f 100644 --- a/python/gtsam_py/python/examples/PreintegrationExample.py +++ b/python/gtsam_py/python/examples/PreintegrationExample.py @@ -111,7 +111,7 @@ class PreintegrationExample(object): actualPose = self.scenario.pose(t) plot_pose3(POSES_FIG, actualPose, 0.3) t = actualPose.translation() - self.maxDim = max([max(t), self.maxDim]) + self.maxDim = max([max(np.abs(t)), self.maxDim]) ax = plt.gca() ax.set_xlim3d(-self.maxDim, self.maxDim) ax.set_ylim3d(-self.maxDim, self.maxDim) diff --git a/python/gtsam_py/python/examples/README.md b/python/gtsam_py/python/examples/README.md index ec4437d4c..46b87f079 100644 --- a/python/gtsam_py/python/examples/README.md +++ b/python/gtsam_py/python/examples/README.md @@ -1,18 +1,12 @@ -# THIS FILE IS OUTDATED! - -~~These examples are almost identical to the old handwritten python wrapper examples. However, there are just some slight name changes, for example `noiseModel.Diagonal` becomes `noiseModel.Diagonal` etc...~~ - -~~Also, annoyingly, instead of `gtsam.Symbol('b', 0)` we now need to say `gtsam.symbol('b', 0))`~~ - # Porting Progress | C++ Example Name | Ported | |-------------------------------------------------------|--------| | CameraResectioning | | | CreateSFMExampleData | | -| DiscreteBayesNet_FG | none of the required discrete functionality is exposed through cython | -| easyPoint2KalmanFilter | ExtendedKalmanFilter not exposed through cython | -| elaboratePoint2KalmanFilter | GaussianSequentialSolver not exposed through cython | +| DiscreteBayesNet_FG | none of the required discrete functionality is exposed through Python | +| easyPoint2KalmanFilter | ExtendedKalmanFilter not exposed through Python | +| elaboratePoint2KalmanFilter | GaussianSequentialSolver not exposed through Python | | ImuFactorExample2 | X | | ImuFactorsExample | | | ISAM2Example_SmartFactor | | @@ -26,7 +20,7 @@ | Pose2SLAMExample_g2o | X | | Pose2SLAMExample_graph | | | Pose2SLAMExample_graphviz | | -| Pose2SLAMExample_lago | lago not exposed through cython | +| Pose2SLAMExample_lago | lago not exposed through Python | | Pose2SLAMStressTest | | | Pose2SLAMwSPCG | | | Pose3SLAMExample_changeKeys | | diff --git a/python/gtsam_py/python/examples/SFMExample.py b/python/gtsam_py/python/examples/SFMExample.py index 0c03a105d..f0c4c82ba 100644 --- a/python/gtsam_py/python/examples/SFMExample.py +++ b/python/gtsam_py/python/examples/SFMExample.py @@ -10,24 +10,21 @@ A structure-from-motion problem on a simulated dataset """ from __future__ import print_function +import gtsam import matplotlib.pyplot as plt import numpy as np +from gtsam import symbol_shorthand +L = symbol_shorthand.L +X = symbol_shorthand.X -import gtsam from gtsam.examples import SFMdata -from gtsam.gtsam import (Cal3_S2, DoglegOptimizer, +from gtsam import (Cal3_S2, DoglegOptimizer, GenericProjectionFactorCal3_S2, Marginals, - NonlinearFactorGraph, Point3, Pose3, - PriorFactorPoint3, PriorFactorPose3, Rot3, - SimpleCamera, Values) + NonlinearFactorGraph, PinholeCameraCal3_S2, Point3, + Pose3, PriorFactorPoint3, PriorFactorPose3, Rot3, Values) from gtsam.utils import plot -def symbol(name: str, index: int) -> int: - """ helper for creating a symbol without explicitly casting 'name' from str to int """ - return gtsam.symbol(name, index) - - def main(): """ Camera observations of landmarks (i.e. pixel coordinates) will be stored as Point2 (x, y). @@ -74,23 +71,23 @@ def main(): # Add a prior on pose x1. This indirectly specifies where the origin is. # 0.3 rad std on roll,pitch,yaw and 0.1m on x,y,z pose_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) - factor = PriorFactorPose3(symbol('x', 0), poses[0], pose_noise) + factor = PriorFactorPose3(X(0), poses[0], pose_noise) graph.push_back(factor) # Simulated measurements from each camera pose, adding them to the factor graph for i, pose in enumerate(poses): - camera = SimpleCamera(pose, K) + camera = PinholeCameraCal3_S2(pose, K) for j, point in enumerate(points): measurement = camera.project(point) factor = GenericProjectionFactorCal3_S2( - measurement, measurement_noise, symbol('x', i), symbol('l', j), K) + measurement, measurement_noise, X(i), L(j), K) graph.push_back(factor) # Because the structure-from-motion problem has a scale ambiguity, the problem is still under-constrained # Here we add a prior on the position of the first landmark. This fixes the scale by indicating the distance # between the first camera and the first landmark. All other landmark positions are interpreted using this scale. point_noise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) - factor = PriorFactorPoint3(symbol('l', 0), points[0], point_noise) + factor = PriorFactorPoint3(L(0), points[0], point_noise) graph.push_back(factor) graph.print_('Factor Graph:\n') @@ -99,10 +96,10 @@ def main(): initial_estimate = Values() for i, pose in enumerate(poses): transformed_pose = pose.retract(0.1*np.random.randn(6,1)) - initial_estimate.insert(symbol('x', i), transformed_pose) + initial_estimate.insert(X(i), transformed_pose) for j, point in enumerate(points): - transformed_point = Point3(point.vector() + 0.1*np.random.randn(3)) - initial_estimate.insert(symbol('l', j), transformed_point) + transformed_point = point + 0.1*np.random.randn(3) + initial_estimate.insert(L(j), transformed_point) initial_estimate.print_('Initial Estimates:\n') # Optimize the graph and print results @@ -121,6 +118,5 @@ def main(): plot.set_axes_equal(1) plt.show() - if __name__ == '__main__': main() diff --git a/python/gtsam_py/python/utils/plot.py b/python/gtsam_py/python/utils/plot.py index eef08ac88..ce602f506 100644 --- a/python/gtsam_py/python/utils/plot.py +++ b/python/gtsam_py/python/utils/plot.py @@ -221,9 +221,9 @@ def plot_3d_points(fignum, values, linespec="g*", marginals=None, keys = values.keys() # Plot points and covariance matrices - for i in range(keys.size()): + for i in range(len(keys)): try: - key = keys.at(i) + key = keys[i] point = values.atPoint3(key) if marginals is not None: covariance = marginals.marginalCovariance(key) @@ -319,19 +319,19 @@ def plot_trajectory(fignum, values, scale=1, marginals=None, title (string): The title of the plot. axis_labels (iterable[string]): List of axis labels to set. """ - pose3Values = gtsam.utilities_allPose3s(values) + pose3Values = gtsam.utilities.allPose3s(values) keys = gtsam.KeyVector(pose3Values.keys()) lastIndex = None - for i in range(keys.size()): - key = keys.at(i) + for i in range(len(keys)): + key = keys[i] try: pose = pose3Values.atPose3(key) except: print("Warning: no Pose3 at key: {0}".format(key)) if lastIndex is not None: - lastKey = keys.at(lastIndex) + lastKey = keys[lastIndex] try: lastPose = pose3Values.atPose3(lastKey) except: @@ -350,7 +350,7 @@ def plot_trajectory(fignum, values, scale=1, marginals=None, # Draw final pose if lastIndex is not None: - lastKey = keys.at(lastIndex) + lastKey = keys[lastIndex] try: lastPose = pose3Values.atPose3(lastKey) if marginals: From bc95b41efcb7f70eba92c13c3a5bad33ebeb6e75 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Mon, 27 Jul 2020 15:16:26 -0400 Subject: [PATCH 008/142] Update more to match cython --- .../python/examples/Pose2SLAMExample_g2o.py | 5 ++--- .../python/examples/Pose3SLAMExample_g2o.py | 9 ++++----- .../gtsam_py/python/examples/SimpleRotation.py | 4 ++-- .../python/examples/VisualISAM2Example.py | 18 ++++-------------- 4 files changed, 12 insertions(+), 24 deletions(-) diff --git a/python/gtsam_py/python/examples/Pose2SLAMExample_g2o.py b/python/gtsam_py/python/examples/Pose2SLAMExample_g2o.py index 1ea7fc300..b2ba9c5bc 100644 --- a/python/gtsam_py/python/examples/Pose2SLAMExample_g2o.py +++ b/python/gtsam_py/python/examples/Pose2SLAMExample_g2o.py @@ -53,16 +53,15 @@ graph, initial = gtsam.readG2o(g2oFile, is3D) assert args.kernel == "none", "Supplied kernel type is not yet implemented" # Add prior on the pose having index (key) = 0 -graphWithPrior = graph priorModel = gtsam.noiseModel.Diagonal.Variances(vector3(1e-6, 1e-6, 1e-8)) -graphWithPrior.add(gtsam.PriorFactorPose2(0, gtsam.Pose2(), priorModel)) +graph.add(gtsam.PriorFactorPose2(0, gtsam.Pose2(), priorModel)) params = gtsam.GaussNewtonParams() params.setVerbosity("Termination") params.setMaxIterations(maxIterations) # parameters.setRelativeErrorTol(1e-5) # Create the optimizer ... -optimizer = gtsam.GaussNewtonOptimizer(graphWithPrior, initial, params) +optimizer = gtsam.GaussNewtonOptimizer(graph, initial, params) # ... and optimize result = optimizer.optimize() diff --git a/python/gtsam_py/python/examples/Pose3SLAMExample_g2o.py b/python/gtsam_py/python/examples/Pose3SLAMExample_g2o.py index c06a9d12f..82b3bda98 100644 --- a/python/gtsam_py/python/examples/Pose3SLAMExample_g2o.py +++ b/python/gtsam_py/python/examples/Pose3SLAMExample_g2o.py @@ -43,18 +43,17 @@ priorModel = gtsam.noiseModel.Diagonal.Variances(vector6(1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4)) print("Adding prior to g2o file ") -graphWithPrior = graph firstKey = initial.keys()[0] -graphWithPrior.add(gtsam.PriorFactorPose3(firstKey, gtsam.Pose3(), priorModel)) +graph.add(gtsam.PriorFactorPose3(firstKey, gtsam.Pose3(), priorModel)) params = gtsam.GaussNewtonParams() params.setVerbosity("Termination") # this will show info about stopping conds -optimizer = gtsam.GaussNewtonOptimizer(graphWithPrior, initial, params) +optimizer = gtsam.GaussNewtonOptimizer(graph, initial, params) result = optimizer.optimize() print("Optimization complete") -print("initial error = ", graphWithPrior.error(initial)) -print("final error = ", graphWithPrior.error(result)) +print("initial error = ", graph.error(initial)) +print("final error = ", graph.error(result)) if args.output is None: print("Final Result:\n{}".format(result)) diff --git a/python/gtsam_py/python/examples/SimpleRotation.py b/python/gtsam_py/python/examples/SimpleRotation.py index 2dd7b1194..de8ab07c3 100644 --- a/python/gtsam_py/python/examples/SimpleRotation.py +++ b/python/gtsam_py/python/examples/SimpleRotation.py @@ -12,7 +12,7 @@ a single variable with a single factor. import numpy as np import gtsam - +from gtsam.gtsam.symbol_shorthand import X def main(): """ @@ -33,7 +33,7 @@ def main(): prior = gtsam.Rot2.fromAngle(np.deg2rad(30)) prior.print_('goal angle') model = gtsam.noiseModel.Isotropic.Sigma(dim=1, sigma=np.deg2rad(1)) - key = gtsam.symbol('x', 1) + key = X(1) factor = gtsam.PriorFactorRot2(key, prior, model) """ diff --git a/python/gtsam_py/python/examples/VisualISAM2Example.py b/python/gtsam_py/python/examples/VisualISAM2Example.py index d33ec98bf..e4c0c8b8d 100644 --- a/python/gtsam_py/python/examples/VisualISAM2Example.py +++ b/python/gtsam_py/python/examples/VisualISAM2Example.py @@ -13,23 +13,13 @@ Author: Duy-Nguyen Ta (C++), Frank Dellaert (Python) from __future__ import print_function -import matplotlib.pyplot as plt -import numpy as np -from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611 - import gtsam import gtsam.utils.plot as gtsam_plot +import matplotlib.pyplot as plt +import numpy as np +from gtsam.gtsam.symbol_shorthand import L, X from gtsam.examples import SFMdata - - -def X(i): - """Create key for pose i.""" - return int(gtsam.symbol('x', i)) - - -def L(j): - """Create key for landmark j.""" - return int(gtsam.symbol('l', j)) +from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611 def visual_ISAM2_plot(result): From 7114cf93d336ddf08d8e368e34882ed65de9d759 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 27 Jul 2020 20:35:23 -0500 Subject: [PATCH 009/142] update ImuFactorExample.py --- .../python/examples/ImuFactorExample.py | 138 +++++++++++------- 1 file changed, 84 insertions(+), 54 deletions(-) diff --git a/python/gtsam_py/python/examples/ImuFactorExample.py b/python/gtsam_py/python/examples/ImuFactorExample.py index 1f8dfca07..03b16098d 100644 --- a/python/gtsam_py/python/examples/ImuFactorExample.py +++ b/python/gtsam_py/python/examples/ImuFactorExample.py @@ -12,15 +12,17 @@ Author: Frank Dellaert, Varun Agrawal from __future__ import print_function +import argparse import math -import gtsam import matplotlib.pyplot as plt -import numpy as np -from gtsam.gtsam.symbol_shorthand import B, V, X - -from gtsam.utils.plot import plot_pose3 from mpl_toolkits.mplot3d import Axes3D +import numpy as np + +import gtsam +from gtsam.gtsam.symbol_shorthand import B, V, X +from gtsam.utils.plot import plot_pose3 + from PreintegrationExample import POSES_FIG, PreintegrationExample @@ -32,24 +34,27 @@ np.set_printoptions(precision=3, suppress=True) class ImuFactorExample(PreintegrationExample): - def __init__(self): + def __init__(self, twist_scenario="sick_twist"): self.velocity = np.array([2, 0, 0]) self.priorNoise = gtsam.noiseModel.Isotropic.Sigma(6, 0.1) self.velNoise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) # Choose one of these twists to change scenario: - zero_twist = (np.zeros(3), np.zeros(3)) - forward_twist = (np.zeros(3), self.velocity) - loop_twist = (np.array([0, -math.radians(30), 0]), self.velocity) - sick_twist = ( - np.array([math.radians(30), -math.radians(30), 0]), self.velocity) + twist_scenarios = dict( + zero_twist=(np.zeros(3), np.zeros(3)), + forward_twist=(np.zeros(3), self.velocity), + loop_twist=(np.array([0, -math.radians(30), 0]), self.velocity), + sick_twist=(np.array([math.radians(30), -math.radians(30), 0]), + self.velocity) + ) accBias = np.array([-0.3, 0.1, 0.2]) gyroBias = np.array([0.1, 0.3, -0.1]) bias = gtsam.imuBias.ConstantBias(accBias, gyroBias) dt = 1e-2 - super(ImuFactorExample, self).__init__(sick_twist, bias, dt) + super(ImuFactorExample, self).__init__(twist_scenarios[twist_scenario], + bias, dt) def addPrior(self, i, graph): state = self.scenario.navState(i) @@ -58,65 +63,73 @@ class ImuFactorExample(PreintegrationExample): graph.push_back(gtsam.PriorFactorVector( V(i), state.velocity(), self.velNoise)) - def run(self): + def run(self, T=12, compute_covariances=False, verbose=True): graph = gtsam.NonlinearFactorGraph() # initialize data structure for pre-integrated IMU measurements pim = gtsam.PreintegratedImuMeasurements(self.params, self.actualBias) T = 12 - num_poses = T + 1 # assumes 1 factor per second + num_poses = T # assumes 1 factor per second initial = gtsam.Values() initial.insert(BIAS_KEY, self.actualBias) - for i in range(num_poses): - state_i = self.scenario.navState(float(i)) - - poseNoise = gtsam.Pose3.Expmap(np.random.randn(3)*0.1) - pose = state_i.pose().compose(poseNoise) - - velocity = state_i.velocity() + np.random.randn(3)*0.1 - - initial.insert(X(i), pose) - initial.insert(V(i), velocity) # simulate the loop i = 0 # state index - actual_state_i = self.scenario.navState(0) + initial_state_i = self.scenario.navState(0) + initial.insert(X(i), initial_state_i.pose()) + initial.insert(V(i), initial_state_i.velocity()) + + # add prior on beginning + self.addPrior(0, graph) + for k, t in enumerate(np.arange(0, T, self.dt)): # get measurements and add them to PIM measuredOmega = self.runner.measuredAngularVelocity(t) measuredAcc = self.runner.measuredSpecificForce(t) pim.integrateMeasurement(measuredAcc, measuredOmega, self.dt) - poseNoise = gtsam.Pose3.Expmap(np.random.randn(3)*0.1) - - actual_state_i = gtsam.NavState( - actual_state_i.pose().compose(poseNoise), - actual_state_i.velocity() + np.random.randn(3)*0.1) - # Plot IMU many times if k % 10 == 0: self.plotImu(t, measuredOmega, measuredAcc) - # Plot every second - if k % int(1 / self.dt) == 0: - self.plotGroundTruthPose(t) + if (k+1) % int(1 / self.dt) == 0: + # Plot every second + self.plotGroundTruthPose(t, scale=1) + plt.title("Ground Truth Trajectory") - # create IMU factor every second - if (k + 1) % int(1 / self.dt) == 0: - factor = gtsam.ImuFactor(X(i), V(i), X( - i + 1), V(i + 1), BIAS_KEY, pim) + # create IMU factor every second + factor = gtsam.ImuFactor(X(i), V(i), + X(i + 1), V(i + 1), + BIAS_KEY, pim) graph.push_back(factor) - if True: + + if verbose: print(factor) print(pim.predict(actual_state_i, self.actualBias)) + pim.resetIntegration() + + rotationNoise = gtsam.Rot3.Expmap(np.random.randn(3)*0.1) + translationNoise = gtsam.Point3(*np.random.randn(3)*1) + poseNoise = gtsam.Pose3(rotationNoise, translationNoise) + actual_state_i = self.scenario.navState(t + self.dt) + print("Actual state at {0}:\n{1}".format( + t+self.dt, actual_state_i)) + + noisy_state_i = gtsam.NavState( + actual_state_i.pose().compose(poseNoise), + actual_state_i.velocity() + np.random.randn(3)*0.1) + + initial.insert(X(i+1), noisy_state_i.pose()) + initial.insert(V(i+1), noisy_state_i.velocity()) i += 1 - # add priors on beginning and end - self.addPrior(0, graph) - self.addPrior(num_poses - 1, graph) + # add priors on end + # self.addPrior(num_poses - 1, graph) + + initial.print_("Initial values:") # optimize using Levenberg-Marquardt optimization params = gtsam.LevenbergMarquardtParams() @@ -124,29 +137,46 @@ class ImuFactorExample(PreintegrationExample): optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params) result = optimizer.optimize() - # Calculate and print marginal covariances - marginals = gtsam.Marginals(graph, result) - print("Covariance on bias:\n", marginals.marginalCovariance(BIAS_KEY)) - for i in range(num_poses): - print("Covariance on pose {}:\n{}\n".format( - i, marginals.marginalCovariance(X(i)))) - print("Covariance on vel {}:\n{}\n".format( - i, marginals.marginalCovariance(V(i)))) + result.print_("Optimized values:") + + if compute_covariances: + # Calculate and print marginal covariances + marginals = gtsam.Marginals(graph, result) + print("Covariance on bias:\n", + marginals.marginalCovariance(BIAS_KEY)) + for i in range(num_poses): + print("Covariance on pose {}:\n{}\n".format( + i, marginals.marginalCovariance(X(i)))) + print("Covariance on vel {}:\n{}\n".format( + i, marginals.marginalCovariance(V(i)))) # Plot resulting poses i = 0 while result.exists(X(i)): pose_i = result.atPose3(X(i)) - plot_pose3(POSES_FIG, pose_i, 0.1) + plot_pose3(POSES_FIG+1, pose_i, 1) i += 1 + plt.title("Estimated Trajectory") - gtsam.utils.plot.set_axes_equal(POSES_FIG) + gtsam.utils.plot.set_axes_equal(POSES_FIG+1) - print(result.atConstantBias(BIAS_KEY)) + print("Bias Values", result.atConstantBias(BIAS_KEY)) plt.ioff() plt.show() if __name__ == '__main__': - ImuFactorExample().run() + parser = argparse.ArgumentParser("ImuFactorExample.py") + parser.add_argument("--twist_scenario", + default="sick_twist", + choices=("zero_twist", "forward_twist", "loop_twist", "sick_twist")) + parser.add_argument("--time", "-T", default=12, + type=int, help="Total time in seconds") + parser.add_argument("--compute_covariances", + default=False, action='store_true') + parser.add_argument("--verbose", default=False, action='store_true') + args = parser.parse_args() + + ImuFactorExample(args.twist_scenario).run( + args.time, args.compute_covariances, args.verbose) From e92c5e2ed42640df1991c675a83cc66ef988c2a9 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 27 Jul 2020 21:00:08 -0500 Subject: [PATCH 010/142] fix warnings from subplots and improve code --- .../python/examples/PreintegrationExample.py | 38 ++++++++++--------- 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/python/gtsam_py/python/examples/PreintegrationExample.py b/python/gtsam_py/python/examples/PreintegrationExample.py index ee7f9cd8f..e35722571 100644 --- a/python/gtsam_py/python/examples/PreintegrationExample.py +++ b/python/gtsam_py/python/examples/PreintegrationExample.py @@ -73,40 +73,43 @@ class PreintegrationExample(object): self.runner = gtsam.ScenarioRunner( self.scenario, self.params, self.dt, self.actualBias) + fig, self.axes = plt.subplots(4, 3) + fig.set_tight_layout(True) + def plotImu(self, t, measuredOmega, measuredAcc): plt.figure(IMU_FIG) # plot angular velocity omega_b = self.scenario.omega_b(t) for i, (label, color) in enumerate(zip(self.labels, self.colors)): - plt.subplot(4, 3, i + 1) - plt.scatter(t, omega_b[i], color='k', marker='.') - plt.scatter(t, measuredOmega[i], color=color, marker='.') - plt.xlabel('angular velocity ' + label) + ax = self.axes[0][i] + ax.scatter(t, omega_b[i], color='k', marker='.') + ax.scatter(t, measuredOmega[i], color=color, marker='.') + ax.set_xlabel('angular velocity ' + label) # plot acceleration in nav acceleration_n = self.scenario.acceleration_n(t) for i, (label, color) in enumerate(zip(self.labels, self.colors)): - plt.subplot(4, 3, i + 4) - plt.scatter(t, acceleration_n[i], color=color, marker='.') - plt.xlabel('acceleration in nav ' + label) + ax = self.axes[1][i] + ax.scatter(t, acceleration_n[i], color=color, marker='.') + ax.set_xlabel('acceleration in nav ' + label) # plot acceleration in body acceleration_b = self.scenario.acceleration_b(t) for i, (label, color) in enumerate(zip(self.labels, self.colors)): - plt.subplot(4, 3, i + 7) - plt.scatter(t, acceleration_b[i], color=color, marker='.') - plt.xlabel('acceleration in body ' + label) + ax = self.axes[2][i] + ax.scatter(t, acceleration_b[i], color=color, marker='.') + ax.set_xlabel('acceleration in body ' + label) # plot actual specific force, as well as corrupted actual = self.runner.actualSpecificForce(t) for i, (label, color) in enumerate(zip(self.labels, self.colors)): - plt.subplot(4, 3, i + 10) - plt.scatter(t, actual[i], color='k', marker='.') - plt.scatter(t, measuredAcc[i], color=color, marker='.') - plt.xlabel('specific force ' + label) + ax = self.axes[3][i] + ax.scatter(t, actual[i], color='k', marker='.') + ax.scatter(t, measuredAcc[i], color=color, marker='.') + ax.set_xlabel('specific force ' + label) - def plotGroundTruthPose(self, t): + def plotGroundTruthPose(self, t, scale=0.3, time_interval=0.01): # plot ground truth pose, as well as prediction from integrated IMU measurements actualPose = self.scenario.pose(t) plot_pose3(POSES_FIG, actualPose, 0.3) @@ -117,11 +120,10 @@ class PreintegrationExample(object): ax.set_ylim3d(-self.maxDim, self.maxDim) ax.set_zlim3d(-self.maxDim, self.maxDim) - plt.pause(0.01) + plt.pause(time_interval) - def run(self): + def run(self, T=12): # simulate the loop - T = 12 for i, t in enumerate(np.arange(0, T, self.dt)): measuredOmega = self.runner.measuredAngularVelocity(t) measuredAcc = self.runner.measuredSpecificForce(t) From 858f5d42d3757372eaf5835f4d513ade436918fa Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 27 Jul 2020 21:00:24 -0500 Subject: [PATCH 011/142] add incremental plotting function --- python/gtsam_py/python/utils/plot.py | 38 ++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/python/gtsam_py/python/utils/plot.py b/python/gtsam_py/python/utils/plot.py index ce602f506..90f5fe5e7 100644 --- a/python/gtsam_py/python/utils/plot.py +++ b/python/gtsam_py/python/utils/plot.py @@ -366,3 +366,41 @@ def plot_trajectory(fignum, values, scale=1, marginals=None, fig.suptitle(title) fig.canvas.set_window_title(title.lower()) + + +def plot_incremental_trajectory(fignum, values, start=0, + scale=1, marginals=None, + time_interval=0.0): + """ + Incrementally plot a complete 3D trajectory using poses in `values`. + + Args: + fignum (int): Integer representing the figure number to use for plotting. + values (gtsam.Values): Values dict containing the poses. + start (int): Starting index to start plotting from. + scale (float): Value to scale the poses by. + marginals (gtsam.Marginals): Marginalized probability values of the estimation. + Used to plot uncertainty bounds. + time_interval (float): Time in seconds to pause between each rendering. + Used to create animation effect. + """ + fig = plt.figure(fignum) + axes = fig.gca(projection='3d') + + pose3Values = gtsam.utilities.allPose3s(values) + keys = gtsam.KeyVector(pose3Values.keys()) + + for i in range(start, len(keys)): + key = keys[i] + if values.exists(key): + pose_i = values.atPose3(keys[i]) + plot_pose3(fignum, pose_i, scale) + + # Update the plot space to encompass all plotted points + axes.autoscale() + + # Set the 3 axes equal + set_axes_equal(fignum) + + # Pause for a fixed amount of seconds + plt.pause(time_interval) \ No newline at end of file From 0b550b355fa040a3916605eb859efdf8dbbb7e7c Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 27 Jul 2020 21:00:44 -0500 Subject: [PATCH 012/142] update ImuFactorExample2.py --- .../python/examples/ImuFactorExample2.py | 117 +++++++++--------- 1 file changed, 57 insertions(+), 60 deletions(-) diff --git a/python/gtsam_py/python/examples/ImuFactorExample2.py b/python/gtsam_py/python/examples/ImuFactorExample2.py index d5be69f47..969d4fe6b 100644 --- a/python/gtsam_py/python/examples/ImuFactorExample2.py +++ b/python/gtsam_py/python/examples/ImuFactorExample2.py @@ -1,6 +1,6 @@ """ iSAM2 example with ImuFactor. -Author: Robert Truax (C++), Frank Dellaert (Python) +Author: Frank Dellaert, Varun Agrawal """ # pylint: disable=invalid-name, E1101 @@ -8,17 +8,18 @@ from __future__ import print_function import math -import gtsam -import gtsam.utils.plot as gtsam_plot import matplotlib.pyplot as plt +from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611 import numpy as np + +import gtsam from gtsam import (ISAM2, BetweenFactorConstantBias, Cal3_S2, ConstantTwistScenario, ImuFactor, NonlinearFactorGraph, PinholeCameraCal3_S2, Point3, Pose3, PriorFactorConstantBias, PriorFactorPose3, PriorFactorVector, Rot3, Values) from gtsam.gtsam.symbol_shorthand import B, V, X -from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611 +from gtsam.utils import plot def vector3(x, y, z): @@ -26,46 +27,45 @@ def vector3(x, y, z): return np.array([x, y, z], dtype=np.float) -def ISAM2_plot(values, fignum=0): - """Plot poses.""" - fig = plt.figure(fignum) - axes = fig.gca(projection='3d') - plt.cla() - - i = 0 - min_bounds = 0, 0, 0 - max_bounds = 0, 0, 0 - while values.exists(X(i)): - pose_i = values.atPose3(X(i)) - gtsam_plot.plot_pose3(fignum, pose_i, 10) - position = pose_i.translation() - min_bounds = [min(v1, v2) for (v1, v2) in zip(position, min_bounds)] - max_bounds = [max(v1, v2) for (v1, v2) in zip(position, max_bounds)] - # max_bounds = min(pose_i.x(), max_bounds[0]), 0, 0 - i += 1 - - # draw - axes.set_xlim3d(min_bounds[0]-1, max_bounds[0]+1) - axes.set_ylim3d(min_bounds[1]-1, max_bounds[1]+1) - axes.set_zlim3d(min_bounds[2]-1, max_bounds[2]+1) - plt.pause(1) - - -# IMU preintegration parameters -# Default Params for a Z-up navigation frame, such as ENU: gravity points along negative Z-axis g = 9.81 n_gravity = vector3(0, 0, -g) -PARAMS = gtsam.PreintegrationParams.MakeSharedU(g) -I = np.eye(3) -PARAMS.setAccelerometerCovariance(I * 0.1) -PARAMS.setGyroscopeCovariance(I * 0.1) -PARAMS.setIntegrationCovariance(I * 0.1) -PARAMS.setUse2ndOrderCoriolis(False) -PARAMS.setOmegaCoriolis(vector3(0, 0, 0)) -BIAS_COVARIANCE = gtsam.noiseModel.Isotropic.Variance(6, 0.1) -DELTA = Pose3(Rot3.Rodrigues(0, 0, 0), - Point3(0.05, -0.10, 0.20)) + +def preintegration_parameters(): + # IMU preintegration parameters + # Default Params for a Z-up navigation frame, such as ENU: gravity points along negative Z-axis + PARAMS = gtsam.PreintegrationParams.MakeSharedU(g) + I = np.eye(3) + PARAMS.setAccelerometerCovariance(I * 0.1) + PARAMS.setGyroscopeCovariance(I * 0.1) + PARAMS.setIntegrationCovariance(I * 0.1) + PARAMS.setUse2ndOrderCoriolis(False) + PARAMS.setOmegaCoriolis(vector3(0, 0, 0)) + + BIAS_COVARIANCE = gtsam.noiseModel.Isotropic.Variance(6, 0.1) + DELTA = Pose3(Rot3.Rodrigues(0, 0, 0), + Point3(0.05, -0.10, 0.20)) + + return PARAMS, BIAS_COVARIANCE, DELTA + + +def get_camera(radius): + up = Point3(0, 0, 1) + target = Point3(0, 0, 0) + position = Point3(radius, 0, 0) + camera = PinholeCameraCal3_S2.Lookat(position, target, up, Cal3_S2()) + return camera + + +def get_scenario(radius, pose_0, angular_velocity, delta_t): + """Create the set of ground-truth landmarks and poses""" + + angular_velocity_vector = vector3(0, -angular_velocity, 0) + linear_velocity_vector = vector3(radius * angular_velocity, 0, 0) + scenario = ConstantTwistScenario( + angular_velocity_vector, linear_velocity_vector, pose_0) + + return scenario def IMU_example(): @@ -73,23 +73,17 @@ def IMU_example(): # Start with a camera on x-axis looking at origin radius = 30 - up = Point3(0, 0, 1) - target = Point3(0, 0, 0) - position = Point3(radius, 0, 0) - camera = PinholeCameraCal3_S2.Lookat(position, target, up, Cal3_S2()) + camera = get_camera(radius) pose_0 = camera.pose() - # Create the set of ground-truth landmarks and poses - angular_velocity = math.radians(180) # rad/sec delta_t = 1.0/18 # makes for 10 degrees per step + angular_velocity = math.radians(180) # rad/sec + scenario = get_scenario(radius, pose_0, angular_velocity, delta_t) - angular_velocity_vector = vector3(0, -angular_velocity, 0) - linear_velocity_vector = vector3(radius * angular_velocity, 0, 0) - scenario = ConstantTwistScenario( - angular_velocity_vector, linear_velocity_vector, pose_0) + PARAMS, BIAS_COVARIANCE, DELTA = preintegration_parameters() # Create a factor graph - newgraph = NonlinearFactorGraph() + graph = NonlinearFactorGraph() # Create (incremental) ISAM2 solver isam = ISAM2() @@ -102,21 +96,21 @@ def IMU_example(): # 30cm std on x,y,z 0.1 rad on roll,pitch,yaw noise = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.1, 0.1, 0.1, 0.3, 0.3, 0.3])) - newgraph.push_back(PriorFactorPose3(X(0), pose_0, noise)) + graph.push_back(PriorFactorPose3(X(0), pose_0, noise)) # Add imu priors biasKey = B(0) biasnoise = gtsam.noiseModel.Isotropic.Sigma(6, 0.1) biasprior = PriorFactorConstantBias(biasKey, gtsam.imuBias.ConstantBias(), biasnoise) - newgraph.push_back(biasprior) + graph.push_back(biasprior) initialEstimate.insert(biasKey, gtsam.imuBias.ConstantBias()) velnoise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) # Calculate with correct initial velocity n_velocity = vector3(0, angular_velocity * radius, 0) velprior = PriorFactorVector(V(0), n_velocity, velnoise) - newgraph.push_back(velprior) + graph.push_back(velprior) initialEstimate.insert(V(0), n_velocity) accum = gtsam.PreintegratedImuMeasurements(PARAMS) @@ -138,7 +132,7 @@ def IMU_example(): biasKey += 1 factor = BetweenFactorConstantBias( biasKey - 1, biasKey, gtsam.imuBias.ConstantBias(), BIAS_COVARIANCE) - newgraph.add(factor) + graph.add(factor) initialEstimate.insert(biasKey, gtsam.imuBias.ConstantBias()) # Predict acceleration and gyro measurements in (actual) body frame @@ -150,21 +144,24 @@ def IMU_example(): # Add Imu Factor imufac = ImuFactor(X(i - 1), V(i - 1), X(i), V(i), biasKey, accum) - newgraph.add(imufac) + graph.add(imufac) # insert new velocity, which is wrong initialEstimate.insert(V(i), n_velocity) accum.resetIntegration() # Incremental solution - isam.update(newgraph, initialEstimate) + isam.update(graph, initialEstimate) result = isam.calculateEstimate() - ISAM2_plot(result) + plot.plot_incremental_trajectory(0, result, + start=i, scale=3, time_interval=0.01) # reset - newgraph = NonlinearFactorGraph() + graph = NonlinearFactorGraph() initialEstimate.clear() + plt.show() + if __name__ == '__main__': IMU_example() From c8806dcb24f0aef8ed4c4712db1af323980fc535 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 27 Jul 2020 21:01:38 -0500 Subject: [PATCH 013/142] rename ImuFactorExample2 to more descriptive name --- .../examples/{ImuFactorExample2.py => ImuFactorISAM2Example.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename python/gtsam_py/python/examples/{ImuFactorExample2.py => ImuFactorISAM2Example.py} (100%) diff --git a/python/gtsam_py/python/examples/ImuFactorExample2.py b/python/gtsam_py/python/examples/ImuFactorISAM2Example.py similarity index 100% rename from python/gtsam_py/python/examples/ImuFactorExample2.py rename to python/gtsam_py/python/examples/ImuFactorISAM2Example.py From 95d3582c2eddc8b477fd511b91c7d222d548b010 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 27 Jul 2020 21:25:27 -0500 Subject: [PATCH 014/142] replaced SimpleCamera with PinholeCamera and updated tests --- cython/gtsam/examples/SFMExample.py | 2 +- .../python/tests/test_GaussianFactorGraph.py | 9 ++++---- .../python/tests/test_NonlinearOptimizer.py | 15 +++++++++++-- .../gtsam_py/python/tests/test_PriorFactor.py | 22 +++++++++++++++++++ .../gtsam_py/python/tests/test_SFMExample.py | 7 +++--- .../python/utils/visual_data_generator.py | 9 ++++---- 6 files changed, 50 insertions(+), 14 deletions(-) diff --git a/cython/gtsam/examples/SFMExample.py b/cython/gtsam/examples/SFMExample.py index e02def2f9..68053f862 100644 --- a/cython/gtsam/examples/SFMExample.py +++ b/cython/gtsam/examples/SFMExample.py @@ -20,7 +20,7 @@ from gtsam.gtsam import (Cal3_S2, DoglegOptimizer, GenericProjectionFactorCal3_S2, Marginals, NonlinearFactorGraph, PinholeCameraCal3_S2, Point3, Pose3, PriorFactorPoint3, PriorFactorPose3, Rot3, - SimpleCamera, Values) + PinholeCameraCal3_S2, Values) from gtsam.utils import plot diff --git a/python/gtsam_py/python/tests/test_GaussianFactorGraph.py b/python/gtsam_py/python/tests/test_GaussianFactorGraph.py index 759de0f3b..d520b0fa4 100644 --- a/python/gtsam_py/python/tests/test_GaussianFactorGraph.py +++ b/python/gtsam_py/python/tests/test_GaussianFactorGraph.py @@ -15,17 +15,18 @@ from __future__ import print_function import unittest import gtsam +import numpy as np +from gtsam.gtsam.symbol_shorthand import X from gtsam.utils.test_case import GtsamTestCase -import numpy as np def create_graph(): """Create a basic linear factor graph for testing""" graph = gtsam.GaussianFactorGraph() - x0 = gtsam.symbol('x', 0) - x1 = gtsam.symbol('x', 1) - x2 = gtsam.symbol('x', 2) + x0 = X(0) + x1 = X(1) + x2 = X(2) BETWEEN_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.ones(1)) PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.ones(1)) diff --git a/python/gtsam_py/python/tests/test_NonlinearOptimizer.py b/python/gtsam_py/python/tests/test_NonlinearOptimizer.py index 3041e325c..e9234a43b 100644 --- a/python/gtsam_py/python/tests/test_NonlinearOptimizer.py +++ b/python/gtsam_py/python/tests/test_NonlinearOptimizer.py @@ -15,10 +15,11 @@ from __future__ import print_function import unittest import gtsam -from gtsam import (DoglegOptimizer, DoglegParams, GaussNewtonOptimizer, +from gtsam import (DoglegOptimizer, DoglegParams, + DummyPreconditionerParameters, GaussNewtonOptimizer, GaussNewtonParams, LevenbergMarquardtOptimizer, LevenbergMarquardtParams, NonlinearFactorGraph, Ordering, - Point2, PriorFactorPoint2, Values) + PCGSolverParameters, Point2, PriorFactorPoint2, Values) from gtsam.utils.test_case import GtsamTestCase KEY1 = 1 @@ -61,6 +62,16 @@ class TestScenario(GtsamTestCase): fg, initial_values, lmParams).optimize() self.assertAlmostEqual(0, fg.error(actual2)) + # Levenberg-Marquardt + lmParams = LevenbergMarquardtParams.CeresDefaults() + lmParams.setLinearSolverType("ITERATIVE") + cgParams = PCGSolverParameters() + cgParams.setPreconditionerParams(DummyPreconditionerParameters()) + lmParams.setIterativeParams(cgParams) + actual2 = LevenbergMarquardtOptimizer( + fg, initial_values, lmParams).optimize() + self.assertAlmostEqual(0, fg.error(actual2)) + # Dogleg dlParams = DoglegParams() dlParams.setOrdering(ordering) diff --git a/python/gtsam_py/python/tests/test_PriorFactor.py b/python/gtsam_py/python/tests/test_PriorFactor.py index da072d0bd..0582cf5d7 100644 --- a/python/gtsam_py/python/tests/test_PriorFactor.py +++ b/python/gtsam_py/python/tests/test_PriorFactor.py @@ -35,5 +35,27 @@ class TestPriorFactor(GtsamTestCase): values.insert(key, priorVector) self.assertEqual(factor.error(values), 0) + def test_AddPrior(self): + """ + Test adding prior factors directly to factor graph via the .addPrior method. + """ + # define factor graph + graph = gtsam.NonlinearFactorGraph() + + # define and add Pose3 prior + key = 5 + priorPose3 = gtsam.Pose3() + model = gtsam.noiseModel.Unit.Create(6) + graph.addPriorPose3(key, priorPose3, model) + self.assertEqual(graph.size(), 1) + + # define and add Vector prior + key = 3 + priorVector = np.array([0., 0., 0.]) + model = gtsam.noiseModel.Unit.Create(3) + graph.addPriorVector(key, priorVector, model) + self.assertEqual(graph.size(), 2) + + if __name__ == "__main__": unittest.main() diff --git a/python/gtsam_py/python/tests/test_SFMExample.py b/python/gtsam_py/python/tests/test_SFMExample.py index 8b5473ff0..1425902cf 100644 --- a/python/gtsam_py/python/tests/test_SFMExample.py +++ b/python/gtsam_py/python/tests/test_SFMExample.py @@ -15,6 +15,7 @@ import numpy as np import gtsam import gtsam.utils.visual_data_generator as generator from gtsam import symbol +from gtsam.gtsam.noiseModel improt Isotropic, Diagonal from gtsam.utils.test_case import GtsamTestCase @@ -34,7 +35,7 @@ class TestSFMExample(GtsamTestCase): graph = gtsam.NonlinearFactorGraph() # Add factors for all measurements - measurementNoise = gtsam.noiseModel.Isotropic.Sigma(2, measurementNoiseSigma) + measurementNoise = Isotropic.Sigma(2, measurementNoiseSigma) for i in range(len(data.Z)): for k in range(len(data.Z[i])): j = data.J[i][k] @@ -42,10 +43,10 @@ class TestSFMExample(GtsamTestCase): data.Z[i][k], measurementNoise, symbol('x', i), symbol('p', j), data.K)) - posePriorNoise = gtsam.noiseModel.Diagonal.Sigmas(poseNoiseSigmas) + posePriorNoise = Diagonal.Sigmas(poseNoiseSigmas) graph.add(gtsam.PriorFactorPose3(symbol('x', 0), truth.cameras[0].pose(), posePriorNoise)) - pointPriorNoise = gtsam.noiseModel.Isotropic.Sigma(3, pointNoiseSigma) + pointPriorNoise = Isotropic.Sigma(3, pointNoiseSigma) graph.add(gtsam.PriorFactorPoint3(symbol('p', 0), truth.points[0], pointPriorNoise)) diff --git a/python/gtsam_py/python/utils/visual_data_generator.py b/python/gtsam_py/python/utils/visual_data_generator.py index bc60dcd70..24404ca03 100644 --- a/python/gtsam_py/python/utils/visual_data_generator.py +++ b/python/gtsam_py/python/utils/visual_data_generator.py @@ -3,6 +3,7 @@ from __future__ import print_function import numpy as np from math import pi, cos, sin import gtsam +from gtsam import PinholeCameraCal3_S2 class Options: @@ -99,10 +100,10 @@ def generate_data(options): for i in range(options.nrCameras): theta = i * 2 * pi / options.nrCameras t = gtsam.Point3(r * cos(theta), r * sin(theta), height) - truth.cameras[i] = gtsam.SimpleCamera.Lookat(t, - gtsam.Point3(0, 0, 0), - gtsam.Point3(0, 0, 1), - truth.K) + truth.cameras[i] = PinholeCameraCal3_S2.Lookat(t, + gtsam.Point3(0, 0, 0), + gtsam.Point3(0, 0, 1), + truth.K) # Create measurements for j in range(nrPoints): # All landmarks seen in every frame From 95b77f76347a653536cec52507241f766c54edee Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 27 Jul 2020 21:25:44 -0500 Subject: [PATCH 015/142] sort imports in python examples --- .../python/examples/ImuFactorExample.py | 8 +++----- .../python/examples/ImuFactorISAM2Example.py | 7 +++---- .../python/examples/PreintegrationExample.py | 5 ++--- .../python/examples/VisualISAMExample.py | 20 +++++++++++-------- 4 files changed, 20 insertions(+), 20 deletions(-) diff --git a/python/gtsam_py/python/examples/ImuFactorExample.py b/python/gtsam_py/python/examples/ImuFactorExample.py index 03b16098d..f9ff44738 100644 --- a/python/gtsam_py/python/examples/ImuFactorExample.py +++ b/python/gtsam_py/python/examples/ImuFactorExample.py @@ -15,14 +15,12 @@ from __future__ import print_function import argparse import math -import matplotlib.pyplot as plt -from mpl_toolkits.mplot3d import Axes3D -import numpy as np - import gtsam +import matplotlib.pyplot as plt +import numpy as np from gtsam.gtsam.symbol_shorthand import B, V, X from gtsam.utils.plot import plot_pose3 - +from mpl_toolkits.mplot3d import Axes3D from PreintegrationExample import POSES_FIG, PreintegrationExample diff --git a/python/gtsam_py/python/examples/ImuFactorISAM2Example.py b/python/gtsam_py/python/examples/ImuFactorISAM2Example.py index 969d4fe6b..6556eb554 100644 --- a/python/gtsam_py/python/examples/ImuFactorISAM2Example.py +++ b/python/gtsam_py/python/examples/ImuFactorISAM2Example.py @@ -8,11 +8,9 @@ from __future__ import print_function import math -import matplotlib.pyplot as plt -from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611 -import numpy as np - import gtsam +import matplotlib.pyplot as plt +import numpy as np from gtsam import (ISAM2, BetweenFactorConstantBias, Cal3_S2, ConstantTwistScenario, ImuFactor, NonlinearFactorGraph, PinholeCameraCal3_S2, Point3, Pose3, @@ -20,6 +18,7 @@ from gtsam import (ISAM2, BetweenFactorConstantBias, Cal3_S2, PriorFactorVector, Rot3, Values) from gtsam.gtsam.symbol_shorthand import B, V, X from gtsam.utils import plot +from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611 def vector3(x, y, z): diff --git a/python/gtsam_py/python/examples/PreintegrationExample.py b/python/gtsam_py/python/examples/PreintegrationExample.py index e35722571..e308bc933 100644 --- a/python/gtsam_py/python/examples/PreintegrationExample.py +++ b/python/gtsam_py/python/examples/PreintegrationExample.py @@ -10,12 +10,11 @@ A script validating the Preintegration of IMU measurements import math +import gtsam import matplotlib.pyplot as plt import numpy as np -from mpl_toolkits.mplot3d import Axes3D - -import gtsam from gtsam.utils.plot import plot_pose3 +from mpl_toolkits.mplot3d import Axes3D IMU_FIG = 1 POSES_FIG = 2 diff --git a/python/gtsam_py/python/examples/VisualISAMExample.py b/python/gtsam_py/python/examples/VisualISAMExample.py index 2e90ce21b..7d562c9b4 100644 --- a/python/gtsam_py/python/examples/VisualISAMExample.py +++ b/python/gtsam_py/python/examples/VisualISAMExample.py @@ -16,9 +16,9 @@ import numpy as np import gtsam from gtsam.examples import SFMdata from gtsam import (Cal3_S2, GenericProjectionFactorCal3_S2, - NonlinearFactorGraph, NonlinearISAM, Pose3, - PriorFactorPoint3, PriorFactorPose3, Rot3, - SimpleCamera, Values, Point3) + NonlinearFactorGraph, NonlinearISAM, Pose3, + PriorFactorPoint3, PriorFactorPose3, Rot3, + PinholeCameraCal3_S2, Values, Point3) def symbol(name: str, index: int) -> int: @@ -37,7 +37,8 @@ def main(): K = Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0) # Define the camera observation noise model - camera_noise = gtsam.noiseModel.Isotropic.Sigma(2, 1.0) # one pixel in u and v + camera_noise = gtsam.noiseModel.Isotropic.Sigma( + 2, 1.0) # one pixel in u and v # Create the set of ground-truth landmarks points = SFMdata.createPoints() @@ -54,15 +55,17 @@ def main(): # Loop over the different poses, adding the observations to iSAM incrementally for i, pose in enumerate(poses): - camera = SimpleCamera(pose, K) + camera = PinholeCameraCal3_S2(pose, K) # Add factors for each landmark observation for j, point in enumerate(points): measurement = camera.project(point) - factor = GenericProjectionFactorCal3_S2(measurement, camera_noise, symbol('x', i), symbol('l', j), K) + factor = GenericProjectionFactorCal3_S2( + measurement, camera_noise, symbol('x', i), symbol('l', j), K) graph.push_back(factor) # Intentionally initialize the variables off from the ground truth - noise = Pose3(r=Rot3.Rodrigues(-0.1, 0.2, 0.25), t=Point3(0.05, -0.10, 0.20)) + noise = Pose3(r=Rot3.Rodrigues(-0.1, 0.2, 0.25), + t=Point3(0.05, -0.10, 0.20)) initial_xi = pose.compose(noise) # Add an initial guess for the current pose @@ -74,7 +77,8 @@ def main(): # adding it to iSAM. if i == 0: # Add a prior on pose x0, with 0.3 rad std on roll,pitch,yaw and 0.1m x,y,z - pose_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) + pose_noise = gtsam.noiseModel.Diagonal.Sigmas( + np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) factor = PriorFactorPose3(symbol('x', 0), poses[0], pose_noise) graph.push_back(factor) From b6ab778e96ff299d0853c4a4e89d834c3cca48b3 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Tue, 28 Jul 2020 11:05:24 -0400 Subject: [PATCH 016/142] Fix import typo --- python/gtsam_py/python/tests/test_SFMExample.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/gtsam_py/python/tests/test_SFMExample.py b/python/gtsam_py/python/tests/test_SFMExample.py index 1425902cf..295bf0b5e 100644 --- a/python/gtsam_py/python/tests/test_SFMExample.py +++ b/python/gtsam_py/python/tests/test_SFMExample.py @@ -15,7 +15,7 @@ import numpy as np import gtsam import gtsam.utils.visual_data_generator as generator from gtsam import symbol -from gtsam.gtsam.noiseModel improt Isotropic, Diagonal +from gtsam.gtsam.noiseModel import Isotropic, Diagonal from gtsam.utils.test_case import GtsamTestCase From 08da0ab5a607086d605cc1470bff0344c5549542 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Tue, 28 Jul 2020 11:16:58 -0400 Subject: [PATCH 017/142] Change to new KeyVector accessor --- .../python/examples/Pose3SLAMExample_initializePose3Chordal.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/gtsam_py/python/examples/Pose3SLAMExample_initializePose3Chordal.py b/python/gtsam_py/python/examples/Pose3SLAMExample_initializePose3Chordal.py index 140669c95..2b2c5f991 100644 --- a/python/gtsam_py/python/examples/Pose3SLAMExample_initializePose3Chordal.py +++ b/python/gtsam_py/python/examples/Pose3SLAMExample_initializePose3Chordal.py @@ -26,7 +26,7 @@ graph, initial = gtsam.readG2o(g2oFile, is3D) # Add prior on the first key. TODO: assumes first key ios z priorModel = gtsam.noiseModel.Diagonal.Variances( np.array([1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4])) -firstKey = initial.keys().at(0) +firstKey = initial.keys()[0] graph.add(gtsam.PriorFactorPose3(0, gtsam.Pose3(), priorModel)) # Initializing Pose3 - chordal relaxation" From e9deca590ae3809b824e226a23af6bf1a7332058 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Wed, 29 Jul 2020 13:54:49 -0400 Subject: [PATCH 018/142] Sync with varun's pr --- python/gtsam_py/python/__init__.py | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/python/gtsam_py/python/__init__.py b/python/gtsam_py/python/__init__.py index 9e3e19187..ee00905cf 100644 --- a/python/gtsam_py/python/__init__.py +++ b/python/gtsam_py/python/__init__.py @@ -1,9 +1,23 @@ from .gtsam import * -def Point2(x=0, y=0): - import numpy as np - return np.array([x, y], dtype=float) -def Point3(x=0, y=0, z=0): +def _init(): import numpy as np - return np.array([x, y, z], dtype=float) + + global Point2 # export function + + def Point2(x=0, y=0): + return np.array([x, y], dtype=float) + + global Point3 # export function + + def Point3(x=0, y=0, z=0): + return np.array([x, y, z], dtype=float) + + # for interactive debugging + if __name__ == "__main__": + # we want all definitions accessible + globals().update(locals()) + + +_init() From 851d4a4af4732ef6ebbca0583b592ad393e8006c Mon Sep 17 00:00:00 2001 From: akrishnan86 Date: Thu, 30 Jul 2020 00:12:13 -0700 Subject: [PATCH 019/142] add binary measurement class --- gtsam/sfm/BinaryMeasurement.h | 92 +++++++++++++++++++++++ gtsam/sfm/TranslationFactor.h | 2 + gtsam/sfm/TranslationRecovery.cpp | 28 +++---- gtsam/sfm/TranslationRecovery.h | 13 ++-- gtsam/sfm/tests/testBinaryMeasurement.cpp | 61 +++++++++++++++ 5 files changed, 174 insertions(+), 22 deletions(-) create mode 100644 gtsam/sfm/BinaryMeasurement.h create mode 100644 gtsam/sfm/tests/testBinaryMeasurement.cpp diff --git a/gtsam/sfm/BinaryMeasurement.h b/gtsam/sfm/BinaryMeasurement.h new file mode 100644 index 000000000..41ffa38d6 --- /dev/null +++ b/gtsam/sfm/BinaryMeasurement.h @@ -0,0 +1,92 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2020, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +#pragma once + +/** + * @file BinaryMeasurement.h + * @author Akshay Krishnan + * @date July 2020 + * @brief Binary measurement for representing edges in the epipolar graph + */ + +#include +#include + +namespace gtsam { + +template +class BinaryMeasurement { + // Check that VALUE type is testable + BOOST_CONCEPT_ASSERT((IsTestable)); + +public: + typedef VALUE T; + + // shorthand for a smart pointer to a measurement + typedef typename boost::shared_ptr shared_ptr; + +private: + Key key1_, key2_; /** Keys */ + + VALUE measured_; /** The measurement */ + + SharedNoiseModel noiseModel_; /** Noise model */ + +public: + /** default constructor - only use for serialization */ + BinaryMeasurement() {} + + /** Constructor */ + BinaryMeasurement(Key key1, Key key2, const VALUE& measured, + const SharedNoiseModel& model = nullptr) : + key1_(key1), key2_(key2), measured_(measured), noiseModel_(model) { + } + + virtual ~BinaryMeasurement() {} + + Key key1() const { return key1_; } + + Key key2() const { return key2_; } + + const SharedNoiseModel& noiseModel() const { return noiseModel_; } + + /** implement functions needed for Testable */ + + /** print */ + void print(const std::string& s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + std::cout << s << "BinaryMeasurement(" + << keyFormatter(this->key1()) << "," + << keyFormatter(this->key2()) << ")\n"; + traits::Print(measured_, " measured: "); + this->noiseModel_->print(" noise model: "); + } + + /** equals */ + bool equals(const BetweenMeasurement& expected, double tol=1e-9) const { + const BetweenMeasurement *e = dynamic_cast*> (&expected); + return e != nullptr && key1_ == expected.key1() && + key2_ == expected.key2() + && traits::Equals(this->measured_, e->measured_, tol) && + noiseModel_.equals(expected.noiseModel()); + } + + /** return the measured */ + const VALUE& measured() const { + return measured_; + } + + /** number of variables attached to this measurement */ + std::size_t size() const { + return 2; + } + }; // \class BetweenMeasurement +} \ No newline at end of file diff --git a/gtsam/sfm/TranslationFactor.h b/gtsam/sfm/TranslationFactor.h index d63633d7e..d1f85cf01 100644 --- a/gtsam/sfm/TranslationFactor.h +++ b/gtsam/sfm/TranslationFactor.h @@ -36,6 +36,8 @@ namespace gtsam { * normalized(Tb - Ta) - w_aZb.point3() * * @addtogroup SFM + * + * */ class TranslationFactor : public NoiseModelFactor2 { private: diff --git a/gtsam/sfm/TranslationRecovery.cpp b/gtsam/sfm/TranslationRecovery.cpp index aeeae688f..c3be003ba 100644 --- a/gtsam/sfm/TranslationRecovery.cpp +++ b/gtsam/sfm/TranslationRecovery.cpp @@ -33,15 +33,12 @@ using namespace gtsam; using namespace std; NonlinearFactorGraph TranslationRecovery::buildGraph() const { - auto noiseModel = noiseModel::Isotropic::Sigma(3, 0.01); NonlinearFactorGraph graph; // Add all relative translation edges for (auto edge : relativeTranslations_) { - Key a, b; - tie(a, b) = edge.first; - const Unit3 w_aZb = edge.second; - graph.emplace_shared(a, b, w_aZb, noiseModel); + graph.emplace_shared(edge.key1(), edge.key2(), + edge.measured(), edge.noiseModel()); } return graph; @@ -49,14 +46,12 @@ NonlinearFactorGraph TranslationRecovery::buildGraph() const { void TranslationRecovery::addPrior(const double scale, NonlinearFactorGraph* graph) const { - auto noiseModel = noiseModel::Isotropic::Sigma(3, 0.01); + //TODO(akshay-krishnan): make this an input argument + auto priorNoiseModel = noiseModel::Isotropic::Sigma(3, 0.01); auto edge = relativeTranslations_.begin(); - Key a, b; - tie(a, b) = edge->first; - const Unit3 w_aZb = edge->second; - graph->emplace_shared >(a, Point3(0, 0, 0), noiseModel); - graph->emplace_shared >(b, scale * w_aZb.point3(), - noiseModel); + graph->emplace_shared >(edge->key1(), Point3(0, 0, 0), priorNoiseModel); + graph->emplace_shared >(edge->key2(), scale * edge->measured().point3(), + edge->noiseModel()); } Values TranslationRecovery::initalizeRandomly() const { @@ -71,10 +66,8 @@ Values TranslationRecovery::initalizeRandomly() const { // Loop over measurements and add a random translation for (auto edge : relativeTranslations_) { - Key a, b; - tie(a, b) = edge.first; - insert(a); - insert(b); + insert(edge.key1()); + insert(edge.key2()); } return initial; } @@ -90,6 +83,7 @@ Values TranslationRecovery::run(const double scale) const { TranslationRecovery::TranslationEdges TranslationRecovery::SimulateMeasurements( const Values& poses, const vector& edges) { + auto edgeNoiseModel = noiseModel::Isotropic::Sigma(3, 0.01); TranslationEdges relativeTranslations; for (auto edge : edges) { Key a, b; @@ -97,7 +91,7 @@ TranslationRecovery::TranslationEdges TranslationRecovery::SimulateMeasurements( const Pose3 wTa = poses.at(a), wTb = poses.at(b); const Point3 Ta = wTa.translation(), Tb = wTb.translation(); const Unit3 w_aZb(Tb - Ta); - relativeTranslations[edge] = w_aZb; + relativeTranslations.emplace_back(a, b, w_aZb, edgeNoiseModel); } return relativeTranslations; } diff --git a/gtsam/sfm/TranslationRecovery.h b/gtsam/sfm/TranslationRecovery.h index bb3c3cdb1..1392817b2 100644 --- a/gtsam/sfm/TranslationRecovery.h +++ b/gtsam/sfm/TranslationRecovery.h @@ -19,9 +19,10 @@ #include #include #include +#include -#include #include +#include namespace gtsam { @@ -48,7 +49,7 @@ namespace gtsam { class TranslationRecovery { public: using KeyPair = std::pair; - using TranslationEdges = std::map; + using TranslationEdges = std::vector>; private: TranslationEdges relativeTranslations_; @@ -59,7 +60,8 @@ class TranslationRecovery { * @brief Construct a new Translation Recovery object * * @param relativeTranslations the relative translations, in world coordinate - * frames, indexed in a map by a pair of Pose keys. + * frames, vector of BinaryMeasurements of Unit3, where each key of a measurement + * is a point in 3D. * @param lmParams (optional) gtsam::LavenbergMarquardtParams that can be * used to modify the parameters for the LM optimizer. By default, uses the * default LM parameters. @@ -105,8 +107,9 @@ class TranslationRecovery { * * @param poses SE(3) ground truth poses stored as Values * @param edges pairs (a,b) for which a measurement w_aZb will be generated. - * @return TranslationEdges map from a KeyPair to the simulated Unit3 - * translation direction measurement between the cameras in KeyPair. + * @return TranslationEdges vector of binary measurements where the keys are + * the cameras and the measurement is the simulated Unit3 translation + * direction between the cameras. */ static TranslationEdges SimulateMeasurements( const Values& poses, const std::vector& edges); diff --git a/gtsam/sfm/tests/testBinaryMeasurement.cpp b/gtsam/sfm/tests/testBinaryMeasurement.cpp new file mode 100644 index 000000000..ae3ba23e5 --- /dev/null +++ b/gtsam/sfm/tests/testBinaryMeasurement.cpp @@ -0,0 +1,61 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2020, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testBinaryMeasurement.cpp + * @brief Unit tests for BinaryMeasurement class + * @author Akshay Krishnan + * @date July 2020 + */ + +#include + +#include +#include +#include + +#include + +using namespace std; +using namespace gtsam; + +// Keys are deliberately *not* in sorted order to test that case. +static const Key kKey1(2), kKey2(1); + +// Create noise models for unit3 and rot3 +static SharedNoiseModel unit3_model(noiseModel::Isotropic::Sigma(2, 0.05)); +static SharedNoiseModel rot3_model(noiseModel::Isotropic::Sigma(3, 0.05)); + +const Unit3 unit3Measured(Vector3(1, 1, 1)); +const Rot3 rot3Measured; + +TEST(BinaryMeasurement, Unit3) { + BinaryMeasurement unit3Measurement(kKey1, kKey2, unit3Measured, + unit3_model); + EXPECT_LONGS_EQUAL(unit3Measurement.key1(), kKey1); + EXPECT_LONGS_EQUAL(unit3Measurement.key2(), kKey2); + EXPECT(unit3Measurement.measured().equals(unit3Measured)); +} + +TEST(BinaryMeasurement, Rot3) { + BinaryMeasurement rot3Measurement(kKey1, kKey2, rot3Measured, + rot3_model); + EXPECT_LONGS_EQUAL(rot3Measurement.key1(), kKey1); + EXPECT_LONGS_EQUAL(rot3Measurement.key2(), kKey2); + EXPECT(rot3Measurement.measured().equals(rot3Measured)); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ From 3d4a8e16a26907df941af55ad9da0c3173be6bf2 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Fri, 31 Jul 2020 11:49:31 -0400 Subject: [PATCH 020/142] Moved python files --- python/{gtsam_py/python => gtsam}/__init__.py | 0 .../{gtsam_py/python => gtsam}/examples/DogLegOptimizerExample.py | 0 python/{gtsam_py/python => gtsam}/examples/GPSFactorExample.py | 0 python/{gtsam_py/python => gtsam}/examples/ImuFactorExample.py | 0 .../{gtsam_py/python => gtsam}/examples/ImuFactorISAM2Example.py | 0 python/{gtsam_py/python => gtsam}/examples/OdometryExample.py | 0 .../python => gtsam}/examples/PlanarManipulatorExample.py | 0 python/{gtsam_py/python => gtsam}/examples/PlanarSLAMExample.py | 0 python/{gtsam_py/python => gtsam}/examples/Pose2SLAMExample.py | 0 .../{gtsam_py/python => gtsam}/examples/Pose2SLAMExample_g2o.py | 0 .../{gtsam_py/python => gtsam}/examples/Pose3SLAMExample_g2o.py | 0 .../examples/Pose3SLAMExample_initializePose3Chordal.py | 0 .../{gtsam_py/python => gtsam}/examples/PreintegrationExample.py | 0 python/{gtsam_py/python => gtsam}/examples/README.md | 0 python/{gtsam_py/python => gtsam}/examples/SFMExample.py | 0 python/{gtsam_py/python => gtsam}/examples/SFMdata.py | 0 python/{gtsam_py/python => gtsam}/examples/SimpleRotation.py | 0 python/{gtsam_py/python => gtsam}/examples/VisualISAM2Example.py | 0 python/{gtsam_py/python => gtsam}/examples/VisualISAMExample.py | 0 python/{gtsam_py/python => gtsam}/examples/__init__.py | 0 python/{gtsam_py/python => gtsam}/tests/testScenarioRunner.py | 0 python/{gtsam_py/python => gtsam}/tests/test_Cal3Unified.py | 0 python/{gtsam_py/python => gtsam}/tests/test_FrobeniusFactor.py | 0 .../{gtsam_py/python => gtsam}/tests/test_GaussianFactorGraph.py | 0 python/{gtsam_py/python => gtsam}/tests/test_JacobianFactor.py | 0 python/{gtsam_py/python => gtsam}/tests/test_KalmanFilter.py | 0 python/{gtsam_py/python => gtsam}/tests/test_KarcherMeanFactor.py | 0 .../{gtsam_py/python => gtsam}/tests/test_LocalizationExample.py | 0 .../{gtsam_py/python => gtsam}/tests/test_NonlinearOptimizer.py | 0 python/{gtsam_py/python => gtsam}/tests/test_OdometryExample.py | 0 python/{gtsam_py/python => gtsam}/tests/test_PlanarSLAMExample.py | 0 python/{gtsam_py/python => gtsam}/tests/test_Pose2.py | 0 python/{gtsam_py/python => gtsam}/tests/test_Pose2SLAMExample.py | 0 python/{gtsam_py/python => gtsam}/tests/test_Pose3.py | 0 python/{gtsam_py/python => gtsam}/tests/test_Pose3SLAMExample.py | 0 python/{gtsam_py/python => gtsam}/tests/test_PriorFactor.py | 0 python/{gtsam_py/python => gtsam}/tests/test_SFMExample.py | 0 python/{gtsam_py/python => gtsam}/tests/test_SO4.py | 0 python/{gtsam_py/python => gtsam}/tests/test_SOn.py | 0 python/{gtsam_py/python => gtsam}/tests/test_Scenario.py | 0 python/{gtsam_py/python => gtsam}/tests/test_SimpleCamera.py | 0 python/{gtsam_py/python => gtsam}/tests/test_StereoVOExample.py | 0 python/{gtsam_py/python => gtsam}/tests/test_Triangulation.py | 0 python/{gtsam_py/python => gtsam}/tests/test_Values.py | 0 python/{gtsam_py/python => gtsam}/tests/test_VisualISAMExample.py | 0 python/{gtsam_py/python => gtsam}/tests/test_dataset.py | 0 python/{gtsam_py/python => gtsam}/tests/test_dsf_map.py | 0 python/{gtsam_py/python => gtsam}/tests/test_initialize_pose3.py | 0 python/{gtsam_py/python => gtsam}/tests/test_logging_optimizer.py | 0 python/{gtsam_py/python => gtsam}/utils/__init__.py | 0 python/{gtsam_py/python => gtsam}/utils/circlePose3.py | 0 python/{gtsam_py/python => gtsam}/utils/logging_optimizer.py | 0 python/{gtsam_py/python => gtsam}/utils/plot.py | 0 python/{gtsam_py/python => gtsam}/utils/test_case.py | 0 python/{gtsam_py/python => gtsam}/utils/visual_data_generator.py | 0 python/{gtsam_py/python => gtsam}/utils/visual_isam.py | 0 python/{gtsam_unstable_py/python => gtsam_unstable}/__init__.py | 0 .../python => gtsam_unstable}/examples/FixedLagSmootherExample.py | 0 .../python => gtsam_unstable}/examples/TimeOfArrivalExample.py | 0 .../python => gtsam_unstable}/examples/__init__.py | 0 .../python => gtsam_unstable}/tests/__init__.py | 0 .../tests/test_FixedLagSmootherExample.py | 0 62 files changed, 0 insertions(+), 0 deletions(-) rename python/{gtsam_py/python => gtsam}/__init__.py (100%) rename python/{gtsam_py/python => gtsam}/examples/DogLegOptimizerExample.py (100%) rename python/{gtsam_py/python => gtsam}/examples/GPSFactorExample.py (100%) rename python/{gtsam_py/python => gtsam}/examples/ImuFactorExample.py (100%) rename python/{gtsam_py/python => gtsam}/examples/ImuFactorISAM2Example.py (100%) rename python/{gtsam_py/python => gtsam}/examples/OdometryExample.py (100%) rename python/{gtsam_py/python => gtsam}/examples/PlanarManipulatorExample.py (100%) rename python/{gtsam_py/python => gtsam}/examples/PlanarSLAMExample.py (100%) rename python/{gtsam_py/python => gtsam}/examples/Pose2SLAMExample.py (100%) rename python/{gtsam_py/python => gtsam}/examples/Pose2SLAMExample_g2o.py (100%) rename python/{gtsam_py/python => gtsam}/examples/Pose3SLAMExample_g2o.py (100%) rename python/{gtsam_py/python => gtsam}/examples/Pose3SLAMExample_initializePose3Chordal.py (100%) rename python/{gtsam_py/python => gtsam}/examples/PreintegrationExample.py (100%) rename python/{gtsam_py/python => gtsam}/examples/README.md (100%) rename python/{gtsam_py/python => gtsam}/examples/SFMExample.py (100%) rename python/{gtsam_py/python => gtsam}/examples/SFMdata.py (100%) rename python/{gtsam_py/python => gtsam}/examples/SimpleRotation.py (100%) rename python/{gtsam_py/python => gtsam}/examples/VisualISAM2Example.py (100%) rename python/{gtsam_py/python => gtsam}/examples/VisualISAMExample.py (100%) rename python/{gtsam_py/python => gtsam}/examples/__init__.py (100%) rename python/{gtsam_py/python => gtsam}/tests/testScenarioRunner.py (100%) rename python/{gtsam_py/python => gtsam}/tests/test_Cal3Unified.py (100%) rename python/{gtsam_py/python => gtsam}/tests/test_FrobeniusFactor.py (100%) rename python/{gtsam_py/python => gtsam}/tests/test_GaussianFactorGraph.py (100%) rename python/{gtsam_py/python => gtsam}/tests/test_JacobianFactor.py (100%) rename python/{gtsam_py/python => gtsam}/tests/test_KalmanFilter.py (100%) rename python/{gtsam_py/python => gtsam}/tests/test_KarcherMeanFactor.py (100%) rename python/{gtsam_py/python => gtsam}/tests/test_LocalizationExample.py (100%) rename python/{gtsam_py/python => gtsam}/tests/test_NonlinearOptimizer.py (100%) rename python/{gtsam_py/python => gtsam}/tests/test_OdometryExample.py (100%) rename python/{gtsam_py/python => gtsam}/tests/test_PlanarSLAMExample.py (100%) rename python/{gtsam_py/python => gtsam}/tests/test_Pose2.py (100%) rename python/{gtsam_py/python => gtsam}/tests/test_Pose2SLAMExample.py (100%) rename python/{gtsam_py/python => gtsam}/tests/test_Pose3.py (100%) rename python/{gtsam_py/python => gtsam}/tests/test_Pose3SLAMExample.py (100%) rename python/{gtsam_py/python => gtsam}/tests/test_PriorFactor.py (100%) rename python/{gtsam_py/python => gtsam}/tests/test_SFMExample.py (100%) rename python/{gtsam_py/python => gtsam}/tests/test_SO4.py (100%) rename python/{gtsam_py/python => gtsam}/tests/test_SOn.py (100%) rename python/{gtsam_py/python => gtsam}/tests/test_Scenario.py (100%) rename python/{gtsam_py/python => gtsam}/tests/test_SimpleCamera.py (100%) rename python/{gtsam_py/python => gtsam}/tests/test_StereoVOExample.py (100%) rename python/{gtsam_py/python => gtsam}/tests/test_Triangulation.py (100%) rename python/{gtsam_py/python => gtsam}/tests/test_Values.py (100%) rename python/{gtsam_py/python => gtsam}/tests/test_VisualISAMExample.py (100%) rename python/{gtsam_py/python => gtsam}/tests/test_dataset.py (100%) rename python/{gtsam_py/python => gtsam}/tests/test_dsf_map.py (100%) rename python/{gtsam_py/python => gtsam}/tests/test_initialize_pose3.py (100%) rename python/{gtsam_py/python => gtsam}/tests/test_logging_optimizer.py (100%) rename python/{gtsam_py/python => gtsam}/utils/__init__.py (100%) rename python/{gtsam_py/python => gtsam}/utils/circlePose3.py (100%) rename python/{gtsam_py/python => gtsam}/utils/logging_optimizer.py (100%) rename python/{gtsam_py/python => gtsam}/utils/plot.py (100%) rename python/{gtsam_py/python => gtsam}/utils/test_case.py (100%) rename python/{gtsam_py/python => gtsam}/utils/visual_data_generator.py (100%) rename python/{gtsam_py/python => gtsam}/utils/visual_isam.py (100%) rename python/{gtsam_unstable_py/python => gtsam_unstable}/__init__.py (100%) rename python/{gtsam_unstable_py/python => gtsam_unstable}/examples/FixedLagSmootherExample.py (100%) rename python/{gtsam_unstable_py/python => gtsam_unstable}/examples/TimeOfArrivalExample.py (100%) rename python/{gtsam_unstable_py/python => gtsam_unstable}/examples/__init__.py (100%) rename python/{gtsam_unstable_py/python => gtsam_unstable}/tests/__init__.py (100%) rename python/{gtsam_unstable_py/python => gtsam_unstable}/tests/test_FixedLagSmootherExample.py (100%) diff --git a/python/gtsam_py/python/__init__.py b/python/gtsam/__init__.py similarity index 100% rename from python/gtsam_py/python/__init__.py rename to python/gtsam/__init__.py diff --git a/python/gtsam_py/python/examples/DogLegOptimizerExample.py b/python/gtsam/examples/DogLegOptimizerExample.py similarity index 100% rename from python/gtsam_py/python/examples/DogLegOptimizerExample.py rename to python/gtsam/examples/DogLegOptimizerExample.py diff --git a/python/gtsam_py/python/examples/GPSFactorExample.py b/python/gtsam/examples/GPSFactorExample.py similarity index 100% rename from python/gtsam_py/python/examples/GPSFactorExample.py rename to python/gtsam/examples/GPSFactorExample.py diff --git a/python/gtsam_py/python/examples/ImuFactorExample.py b/python/gtsam/examples/ImuFactorExample.py similarity index 100% rename from python/gtsam_py/python/examples/ImuFactorExample.py rename to python/gtsam/examples/ImuFactorExample.py diff --git a/python/gtsam_py/python/examples/ImuFactorISAM2Example.py b/python/gtsam/examples/ImuFactorISAM2Example.py similarity index 100% rename from python/gtsam_py/python/examples/ImuFactorISAM2Example.py rename to python/gtsam/examples/ImuFactorISAM2Example.py diff --git a/python/gtsam_py/python/examples/OdometryExample.py b/python/gtsam/examples/OdometryExample.py similarity index 100% rename from python/gtsam_py/python/examples/OdometryExample.py rename to python/gtsam/examples/OdometryExample.py diff --git a/python/gtsam_py/python/examples/PlanarManipulatorExample.py b/python/gtsam/examples/PlanarManipulatorExample.py similarity index 100% rename from python/gtsam_py/python/examples/PlanarManipulatorExample.py rename to python/gtsam/examples/PlanarManipulatorExample.py diff --git a/python/gtsam_py/python/examples/PlanarSLAMExample.py b/python/gtsam/examples/PlanarSLAMExample.py similarity index 100% rename from python/gtsam_py/python/examples/PlanarSLAMExample.py rename to python/gtsam/examples/PlanarSLAMExample.py diff --git a/python/gtsam_py/python/examples/Pose2SLAMExample.py b/python/gtsam/examples/Pose2SLAMExample.py similarity index 100% rename from python/gtsam_py/python/examples/Pose2SLAMExample.py rename to python/gtsam/examples/Pose2SLAMExample.py diff --git a/python/gtsam_py/python/examples/Pose2SLAMExample_g2o.py b/python/gtsam/examples/Pose2SLAMExample_g2o.py similarity index 100% rename from python/gtsam_py/python/examples/Pose2SLAMExample_g2o.py rename to python/gtsam/examples/Pose2SLAMExample_g2o.py diff --git a/python/gtsam_py/python/examples/Pose3SLAMExample_g2o.py b/python/gtsam/examples/Pose3SLAMExample_g2o.py similarity index 100% rename from python/gtsam_py/python/examples/Pose3SLAMExample_g2o.py rename to python/gtsam/examples/Pose3SLAMExample_g2o.py diff --git a/python/gtsam_py/python/examples/Pose3SLAMExample_initializePose3Chordal.py b/python/gtsam/examples/Pose3SLAMExample_initializePose3Chordal.py similarity index 100% rename from python/gtsam_py/python/examples/Pose3SLAMExample_initializePose3Chordal.py rename to python/gtsam/examples/Pose3SLAMExample_initializePose3Chordal.py diff --git a/python/gtsam_py/python/examples/PreintegrationExample.py b/python/gtsam/examples/PreintegrationExample.py similarity index 100% rename from python/gtsam_py/python/examples/PreintegrationExample.py rename to python/gtsam/examples/PreintegrationExample.py diff --git a/python/gtsam_py/python/examples/README.md b/python/gtsam/examples/README.md similarity index 100% rename from python/gtsam_py/python/examples/README.md rename to python/gtsam/examples/README.md diff --git a/python/gtsam_py/python/examples/SFMExample.py b/python/gtsam/examples/SFMExample.py similarity index 100% rename from python/gtsam_py/python/examples/SFMExample.py rename to python/gtsam/examples/SFMExample.py diff --git a/python/gtsam_py/python/examples/SFMdata.py b/python/gtsam/examples/SFMdata.py similarity index 100% rename from python/gtsam_py/python/examples/SFMdata.py rename to python/gtsam/examples/SFMdata.py diff --git a/python/gtsam_py/python/examples/SimpleRotation.py b/python/gtsam/examples/SimpleRotation.py similarity index 100% rename from python/gtsam_py/python/examples/SimpleRotation.py rename to python/gtsam/examples/SimpleRotation.py diff --git a/python/gtsam_py/python/examples/VisualISAM2Example.py b/python/gtsam/examples/VisualISAM2Example.py similarity index 100% rename from python/gtsam_py/python/examples/VisualISAM2Example.py rename to python/gtsam/examples/VisualISAM2Example.py diff --git a/python/gtsam_py/python/examples/VisualISAMExample.py b/python/gtsam/examples/VisualISAMExample.py similarity index 100% rename from python/gtsam_py/python/examples/VisualISAMExample.py rename to python/gtsam/examples/VisualISAMExample.py diff --git a/python/gtsam_py/python/examples/__init__.py b/python/gtsam/examples/__init__.py similarity index 100% rename from python/gtsam_py/python/examples/__init__.py rename to python/gtsam/examples/__init__.py diff --git a/python/gtsam_py/python/tests/testScenarioRunner.py b/python/gtsam/tests/testScenarioRunner.py similarity index 100% rename from python/gtsam_py/python/tests/testScenarioRunner.py rename to python/gtsam/tests/testScenarioRunner.py diff --git a/python/gtsam_py/python/tests/test_Cal3Unified.py b/python/gtsam/tests/test_Cal3Unified.py similarity index 100% rename from python/gtsam_py/python/tests/test_Cal3Unified.py rename to python/gtsam/tests/test_Cal3Unified.py diff --git a/python/gtsam_py/python/tests/test_FrobeniusFactor.py b/python/gtsam/tests/test_FrobeniusFactor.py similarity index 100% rename from python/gtsam_py/python/tests/test_FrobeniusFactor.py rename to python/gtsam/tests/test_FrobeniusFactor.py diff --git a/python/gtsam_py/python/tests/test_GaussianFactorGraph.py b/python/gtsam/tests/test_GaussianFactorGraph.py similarity index 100% rename from python/gtsam_py/python/tests/test_GaussianFactorGraph.py rename to python/gtsam/tests/test_GaussianFactorGraph.py diff --git a/python/gtsam_py/python/tests/test_JacobianFactor.py b/python/gtsam/tests/test_JacobianFactor.py similarity index 100% rename from python/gtsam_py/python/tests/test_JacobianFactor.py rename to python/gtsam/tests/test_JacobianFactor.py diff --git a/python/gtsam_py/python/tests/test_KalmanFilter.py b/python/gtsam/tests/test_KalmanFilter.py similarity index 100% rename from python/gtsam_py/python/tests/test_KalmanFilter.py rename to python/gtsam/tests/test_KalmanFilter.py diff --git a/python/gtsam_py/python/tests/test_KarcherMeanFactor.py b/python/gtsam/tests/test_KarcherMeanFactor.py similarity index 100% rename from python/gtsam_py/python/tests/test_KarcherMeanFactor.py rename to python/gtsam/tests/test_KarcherMeanFactor.py diff --git a/python/gtsam_py/python/tests/test_LocalizationExample.py b/python/gtsam/tests/test_LocalizationExample.py similarity index 100% rename from python/gtsam_py/python/tests/test_LocalizationExample.py rename to python/gtsam/tests/test_LocalizationExample.py diff --git a/python/gtsam_py/python/tests/test_NonlinearOptimizer.py b/python/gtsam/tests/test_NonlinearOptimizer.py similarity index 100% rename from python/gtsam_py/python/tests/test_NonlinearOptimizer.py rename to python/gtsam/tests/test_NonlinearOptimizer.py diff --git a/python/gtsam_py/python/tests/test_OdometryExample.py b/python/gtsam/tests/test_OdometryExample.py similarity index 100% rename from python/gtsam_py/python/tests/test_OdometryExample.py rename to python/gtsam/tests/test_OdometryExample.py diff --git a/python/gtsam_py/python/tests/test_PlanarSLAMExample.py b/python/gtsam/tests/test_PlanarSLAMExample.py similarity index 100% rename from python/gtsam_py/python/tests/test_PlanarSLAMExample.py rename to python/gtsam/tests/test_PlanarSLAMExample.py diff --git a/python/gtsam_py/python/tests/test_Pose2.py b/python/gtsam/tests/test_Pose2.py similarity index 100% rename from python/gtsam_py/python/tests/test_Pose2.py rename to python/gtsam/tests/test_Pose2.py diff --git a/python/gtsam_py/python/tests/test_Pose2SLAMExample.py b/python/gtsam/tests/test_Pose2SLAMExample.py similarity index 100% rename from python/gtsam_py/python/tests/test_Pose2SLAMExample.py rename to python/gtsam/tests/test_Pose2SLAMExample.py diff --git a/python/gtsam_py/python/tests/test_Pose3.py b/python/gtsam/tests/test_Pose3.py similarity index 100% rename from python/gtsam_py/python/tests/test_Pose3.py rename to python/gtsam/tests/test_Pose3.py diff --git a/python/gtsam_py/python/tests/test_Pose3SLAMExample.py b/python/gtsam/tests/test_Pose3SLAMExample.py similarity index 100% rename from python/gtsam_py/python/tests/test_Pose3SLAMExample.py rename to python/gtsam/tests/test_Pose3SLAMExample.py diff --git a/python/gtsam_py/python/tests/test_PriorFactor.py b/python/gtsam/tests/test_PriorFactor.py similarity index 100% rename from python/gtsam_py/python/tests/test_PriorFactor.py rename to python/gtsam/tests/test_PriorFactor.py diff --git a/python/gtsam_py/python/tests/test_SFMExample.py b/python/gtsam/tests/test_SFMExample.py similarity index 100% rename from python/gtsam_py/python/tests/test_SFMExample.py rename to python/gtsam/tests/test_SFMExample.py diff --git a/python/gtsam_py/python/tests/test_SO4.py b/python/gtsam/tests/test_SO4.py similarity index 100% rename from python/gtsam_py/python/tests/test_SO4.py rename to python/gtsam/tests/test_SO4.py diff --git a/python/gtsam_py/python/tests/test_SOn.py b/python/gtsam/tests/test_SOn.py similarity index 100% rename from python/gtsam_py/python/tests/test_SOn.py rename to python/gtsam/tests/test_SOn.py diff --git a/python/gtsam_py/python/tests/test_Scenario.py b/python/gtsam/tests/test_Scenario.py similarity index 100% rename from python/gtsam_py/python/tests/test_Scenario.py rename to python/gtsam/tests/test_Scenario.py diff --git a/python/gtsam_py/python/tests/test_SimpleCamera.py b/python/gtsam/tests/test_SimpleCamera.py similarity index 100% rename from python/gtsam_py/python/tests/test_SimpleCamera.py rename to python/gtsam/tests/test_SimpleCamera.py diff --git a/python/gtsam_py/python/tests/test_StereoVOExample.py b/python/gtsam/tests/test_StereoVOExample.py similarity index 100% rename from python/gtsam_py/python/tests/test_StereoVOExample.py rename to python/gtsam/tests/test_StereoVOExample.py diff --git a/python/gtsam_py/python/tests/test_Triangulation.py b/python/gtsam/tests/test_Triangulation.py similarity index 100% rename from python/gtsam_py/python/tests/test_Triangulation.py rename to python/gtsam/tests/test_Triangulation.py diff --git a/python/gtsam_py/python/tests/test_Values.py b/python/gtsam/tests/test_Values.py similarity index 100% rename from python/gtsam_py/python/tests/test_Values.py rename to python/gtsam/tests/test_Values.py diff --git a/python/gtsam_py/python/tests/test_VisualISAMExample.py b/python/gtsam/tests/test_VisualISAMExample.py similarity index 100% rename from python/gtsam_py/python/tests/test_VisualISAMExample.py rename to python/gtsam/tests/test_VisualISAMExample.py diff --git a/python/gtsam_py/python/tests/test_dataset.py b/python/gtsam/tests/test_dataset.py similarity index 100% rename from python/gtsam_py/python/tests/test_dataset.py rename to python/gtsam/tests/test_dataset.py diff --git a/python/gtsam_py/python/tests/test_dsf_map.py b/python/gtsam/tests/test_dsf_map.py similarity index 100% rename from python/gtsam_py/python/tests/test_dsf_map.py rename to python/gtsam/tests/test_dsf_map.py diff --git a/python/gtsam_py/python/tests/test_initialize_pose3.py b/python/gtsam/tests/test_initialize_pose3.py similarity index 100% rename from python/gtsam_py/python/tests/test_initialize_pose3.py rename to python/gtsam/tests/test_initialize_pose3.py diff --git a/python/gtsam_py/python/tests/test_logging_optimizer.py b/python/gtsam/tests/test_logging_optimizer.py similarity index 100% rename from python/gtsam_py/python/tests/test_logging_optimizer.py rename to python/gtsam/tests/test_logging_optimizer.py diff --git a/python/gtsam_py/python/utils/__init__.py b/python/gtsam/utils/__init__.py similarity index 100% rename from python/gtsam_py/python/utils/__init__.py rename to python/gtsam/utils/__init__.py diff --git a/python/gtsam_py/python/utils/circlePose3.py b/python/gtsam/utils/circlePose3.py similarity index 100% rename from python/gtsam_py/python/utils/circlePose3.py rename to python/gtsam/utils/circlePose3.py diff --git a/python/gtsam_py/python/utils/logging_optimizer.py b/python/gtsam/utils/logging_optimizer.py similarity index 100% rename from python/gtsam_py/python/utils/logging_optimizer.py rename to python/gtsam/utils/logging_optimizer.py diff --git a/python/gtsam_py/python/utils/plot.py b/python/gtsam/utils/plot.py similarity index 100% rename from python/gtsam_py/python/utils/plot.py rename to python/gtsam/utils/plot.py diff --git a/python/gtsam_py/python/utils/test_case.py b/python/gtsam/utils/test_case.py similarity index 100% rename from python/gtsam_py/python/utils/test_case.py rename to python/gtsam/utils/test_case.py diff --git a/python/gtsam_py/python/utils/visual_data_generator.py b/python/gtsam/utils/visual_data_generator.py similarity index 100% rename from python/gtsam_py/python/utils/visual_data_generator.py rename to python/gtsam/utils/visual_data_generator.py diff --git a/python/gtsam_py/python/utils/visual_isam.py b/python/gtsam/utils/visual_isam.py similarity index 100% rename from python/gtsam_py/python/utils/visual_isam.py rename to python/gtsam/utils/visual_isam.py diff --git a/python/gtsam_unstable_py/python/__init__.py b/python/gtsam_unstable/__init__.py similarity index 100% rename from python/gtsam_unstable_py/python/__init__.py rename to python/gtsam_unstable/__init__.py diff --git a/python/gtsam_unstable_py/python/examples/FixedLagSmootherExample.py b/python/gtsam_unstable/examples/FixedLagSmootherExample.py similarity index 100% rename from python/gtsam_unstable_py/python/examples/FixedLagSmootherExample.py rename to python/gtsam_unstable/examples/FixedLagSmootherExample.py diff --git a/python/gtsam_unstable_py/python/examples/TimeOfArrivalExample.py b/python/gtsam_unstable/examples/TimeOfArrivalExample.py similarity index 100% rename from python/gtsam_unstable_py/python/examples/TimeOfArrivalExample.py rename to python/gtsam_unstable/examples/TimeOfArrivalExample.py diff --git a/python/gtsam_unstable_py/python/examples/__init__.py b/python/gtsam_unstable/examples/__init__.py similarity index 100% rename from python/gtsam_unstable_py/python/examples/__init__.py rename to python/gtsam_unstable/examples/__init__.py diff --git a/python/gtsam_unstable_py/python/tests/__init__.py b/python/gtsam_unstable/tests/__init__.py similarity index 100% rename from python/gtsam_unstable_py/python/tests/__init__.py rename to python/gtsam_unstable/tests/__init__.py diff --git a/python/gtsam_unstable_py/python/tests/test_FixedLagSmootherExample.py b/python/gtsam_unstable/tests/test_FixedLagSmootherExample.py similarity index 100% rename from python/gtsam_unstable_py/python/tests/test_FixedLagSmootherExample.py rename to python/gtsam_unstable/tests/test_FixedLagSmootherExample.py From fda79057e4e764e3d5028202c1af0571bc2ddaae Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Fri, 31 Jul 2020 12:16:25 -0400 Subject: [PATCH 021/142] import from is working --- python/gtsam/__init__.py | 1 - python/gtsam/examples/ImuFactorExample.py | 2 +- python/gtsam/examples/ImuFactorISAM2Example.py | 2 +- python/gtsam/examples/PlanarSLAMExample.py | 2 +- python/gtsam/examples/Pose2SLAMExample.py | 1 - python/gtsam/examples/SimpleRotation.py | 2 +- python/gtsam/examples/VisualISAM2Example.py | 2 +- python/gtsam/imuBias.py | 1 + python/gtsam/noiseModel.py | 1 + python/gtsam/symbol_shorthand.py | 1 + python/gtsam/tests/test_GaussianFactorGraph.py | 2 +- python/gtsam/tests/test_SFMExample.py | 2 +- 12 files changed, 10 insertions(+), 9 deletions(-) create mode 100644 python/gtsam/imuBias.py create mode 100644 python/gtsam/noiseModel.py create mode 100644 python/gtsam/symbol_shorthand.py diff --git a/python/gtsam/__init__.py b/python/gtsam/__init__.py index ee00905cf..555808d5a 100644 --- a/python/gtsam/__init__.py +++ b/python/gtsam/__init__.py @@ -1,6 +1,5 @@ from .gtsam import * - def _init(): import numpy as np diff --git a/python/gtsam/examples/ImuFactorExample.py b/python/gtsam/examples/ImuFactorExample.py index f9ff44738..eec7c5ebd 100644 --- a/python/gtsam/examples/ImuFactorExample.py +++ b/python/gtsam/examples/ImuFactorExample.py @@ -18,7 +18,7 @@ import math import gtsam import matplotlib.pyplot as plt import numpy as np -from gtsam.gtsam.symbol_shorthand import B, V, X +from gtsam.symbol_shorthand import B, V, X from gtsam.utils.plot import plot_pose3 from mpl_toolkits.mplot3d import Axes3D diff --git a/python/gtsam/examples/ImuFactorISAM2Example.py b/python/gtsam/examples/ImuFactorISAM2Example.py index 6556eb554..e0999b25b 100644 --- a/python/gtsam/examples/ImuFactorISAM2Example.py +++ b/python/gtsam/examples/ImuFactorISAM2Example.py @@ -16,7 +16,7 @@ from gtsam import (ISAM2, BetweenFactorConstantBias, Cal3_S2, PinholeCameraCal3_S2, Point3, Pose3, PriorFactorConstantBias, PriorFactorPose3, PriorFactorVector, Rot3, Values) -from gtsam.gtsam.symbol_shorthand import B, V, X +from gtsam.symbol_shorthand import B, V, X from gtsam.utils import plot from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611 diff --git a/python/gtsam/examples/PlanarSLAMExample.py b/python/gtsam/examples/PlanarSLAMExample.py index ba0529479..5ffdf048d 100644 --- a/python/gtsam/examples/PlanarSLAMExample.py +++ b/python/gtsam/examples/PlanarSLAMExample.py @@ -16,7 +16,7 @@ from __future__ import print_function import numpy as np import gtsam -from gtsam.gtsam.symbol_shorthand import X, L +from gtsam.symbol_shorthand import X, L # Create noise models PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) diff --git a/python/gtsam/examples/Pose2SLAMExample.py b/python/gtsam/examples/Pose2SLAMExample.py index 686da4db3..2569f0953 100644 --- a/python/gtsam/examples/Pose2SLAMExample.py +++ b/python/gtsam/examples/Pose2SLAMExample.py @@ -27,7 +27,6 @@ def vector3(x, y, z): """Create 3d double numpy array.""" return np.array([x, y, z], dtype=np.float) - # Create noise models PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(vector3(0.3, 0.3, 0.1)) ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(vector3(0.2, 0.2, 0.1)) diff --git a/python/gtsam/examples/SimpleRotation.py b/python/gtsam/examples/SimpleRotation.py index de8ab07c3..0fef261f8 100644 --- a/python/gtsam/examples/SimpleRotation.py +++ b/python/gtsam/examples/SimpleRotation.py @@ -12,7 +12,7 @@ a single variable with a single factor. import numpy as np import gtsam -from gtsam.gtsam.symbol_shorthand import X +from gtsam.symbol_shorthand import X def main(): """ diff --git a/python/gtsam/examples/VisualISAM2Example.py b/python/gtsam/examples/VisualISAM2Example.py index e4c0c8b8d..bacf510ec 100644 --- a/python/gtsam/examples/VisualISAM2Example.py +++ b/python/gtsam/examples/VisualISAM2Example.py @@ -17,7 +17,7 @@ import gtsam import gtsam.utils.plot as gtsam_plot import matplotlib.pyplot as plt import numpy as np -from gtsam.gtsam.symbol_shorthand import L, X +from gtsam.symbol_shorthand import L, X from gtsam.examples import SFMdata from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611 diff --git a/python/gtsam/imuBias.py b/python/gtsam/imuBias.py new file mode 100644 index 000000000..1cb367b9f --- /dev/null +++ b/python/gtsam/imuBias.py @@ -0,0 +1 @@ +from .gtsam.imuBias import * diff --git a/python/gtsam/noiseModel.py b/python/gtsam/noiseModel.py new file mode 100644 index 000000000..9b1929a8e --- /dev/null +++ b/python/gtsam/noiseModel.py @@ -0,0 +1 @@ +from .gtsam.noiseModel import * \ No newline at end of file diff --git a/python/gtsam/symbol_shorthand.py b/python/gtsam/symbol_shorthand.py new file mode 100644 index 000000000..956ed693a --- /dev/null +++ b/python/gtsam/symbol_shorthand.py @@ -0,0 +1 @@ +from .gtsam.symbol_shorthand import * \ No newline at end of file diff --git a/python/gtsam/tests/test_GaussianFactorGraph.py b/python/gtsam/tests/test_GaussianFactorGraph.py index d520b0fa4..a29b0f263 100644 --- a/python/gtsam/tests/test_GaussianFactorGraph.py +++ b/python/gtsam/tests/test_GaussianFactorGraph.py @@ -16,7 +16,7 @@ import unittest import gtsam import numpy as np -from gtsam.gtsam.symbol_shorthand import X +from gtsam.symbol_shorthand import X from gtsam.utils.test_case import GtsamTestCase diff --git a/python/gtsam/tests/test_SFMExample.py b/python/gtsam/tests/test_SFMExample.py index 295bf0b5e..5e787f5ae 100644 --- a/python/gtsam/tests/test_SFMExample.py +++ b/python/gtsam/tests/test_SFMExample.py @@ -15,7 +15,7 @@ import numpy as np import gtsam import gtsam.utils.visual_data_generator as generator from gtsam import symbol -from gtsam.gtsam.noiseModel import Isotropic, Diagonal +from gtsam.noiseModel import Isotropic, Diagonal from gtsam.utils.test_case import GtsamTestCase From 1cabd2000fcd580625395d02f926e0a216f95f39 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Fri, 31 Jul 2020 12:39:04 -0400 Subject: [PATCH 022/142] Sync with new_wrapper develop branch --- python/gtsam/__init__.py | 5 ++++ .../gtsam/examples/ImuFactorISAM2Example.py | 2 +- python/gtsam/examples/README.md | 12 +++++----- python/gtsam/examples/VisualISAMExample.py | 17 +++++--------- python/gtsam/tests/test_SFMExample.py | 20 ++++++++-------- python/gtsam/utils/plot.py | 23 ++++++++----------- 6 files changed, 37 insertions(+), 42 deletions(-) diff --git a/python/gtsam/__init__.py b/python/gtsam/__init__.py index 555808d5a..e6fd8c9c8 100644 --- a/python/gtsam/__init__.py +++ b/python/gtsam/__init__.py @@ -1,16 +1,21 @@ from .gtsam import * + def _init(): + """This function is to add shims for the long-gone Point2 and Point3 types""" + import numpy as np global Point2 # export function def Point2(x=0, y=0): + """Shim for the deleted Point2 type.""" return np.array([x, y], dtype=float) global Point3 # export function def Point3(x=0, y=0, z=0): + """Shim for the deleted Point3 type.""" return np.array([x, y, z], dtype=float) # for interactive debugging diff --git a/python/gtsam/examples/ImuFactorISAM2Example.py b/python/gtsam/examples/ImuFactorISAM2Example.py index e0999b25b..98f6218ea 100644 --- a/python/gtsam/examples/ImuFactorISAM2Example.py +++ b/python/gtsam/examples/ImuFactorISAM2Example.py @@ -1,6 +1,6 @@ """ iSAM2 example with ImuFactor. -Author: Frank Dellaert, Varun Agrawal +Author: Robert Truax (C++), Frank Dellaert, Varun Agrawal """ # pylint: disable=invalid-name, E1101 diff --git a/python/gtsam/examples/README.md b/python/gtsam/examples/README.md index 46b87f079..e998e4dcd 100644 --- a/python/gtsam/examples/README.md +++ b/python/gtsam/examples/README.md @@ -5,8 +5,8 @@ | CameraResectioning | | | CreateSFMExampleData | | | DiscreteBayesNet_FG | none of the required discrete functionality is exposed through Python | -| easyPoint2KalmanFilter | ExtendedKalmanFilter not exposed through Python | -| elaboratePoint2KalmanFilter | GaussianSequentialSolver not exposed through Python | +| easyPoint2KalmanFilter | ExtendedKalmanFilter not yet exposed through Python | +| elaboratePoint2KalmanFilter | GaussianSequentialSolver not yet exposed through Python | | ImuFactorExample2 | X | | ImuFactorsExample | | | ISAM2Example_SmartFactor | | @@ -20,7 +20,7 @@ | Pose2SLAMExample_g2o | X | | Pose2SLAMExample_graph | | | Pose2SLAMExample_graphviz | | -| Pose2SLAMExample_lago | lago not exposed through Python | +| Pose2SLAMExample_lago | lago not yet exposed through Python | | Pose2SLAMStressTest | | | Pose2SLAMwSPCG | | | Pose3SLAMExample_changeKeys | | @@ -42,11 +42,11 @@ | StereoVOExample | | | StereoVOExample_large | | | TimeTBB | | -| UGM_chain | discrete functionality not exposed | -| UGM_small | discrete functionality not exposed | +| UGM_chain | discrete functionality not yet exposed | +| UGM_small | discrete functionality not yet exposed | | VisualISAM2Example | X | | VisualISAMExample | X | Extra Examples (with no C++ equivalent) - PlanarManipulatorExample -- SFMData +- SFMData \ No newline at end of file diff --git a/python/gtsam/examples/VisualISAMExample.py b/python/gtsam/examples/VisualISAMExample.py index 7d562c9b4..f99d3f3e6 100644 --- a/python/gtsam/examples/VisualISAMExample.py +++ b/python/gtsam/examples/VisualISAMExample.py @@ -19,12 +19,7 @@ from gtsam import (Cal3_S2, GenericProjectionFactorCal3_S2, NonlinearFactorGraph, NonlinearISAM, Pose3, PriorFactorPoint3, PriorFactorPose3, Rot3, PinholeCameraCal3_S2, Values, Point3) - - -def symbol(name: str, index: int) -> int: - """ helper for creating a symbol without explicitly casting 'name' from str to int """ - return gtsam.symbol(name, index) - +from gtsam.symbol_shorthand import X, L def main(): """ @@ -60,7 +55,7 @@ def main(): for j, point in enumerate(points): measurement = camera.project(point) factor = GenericProjectionFactorCal3_S2( - measurement, camera_noise, symbol('x', i), symbol('l', j), K) + measurement, camera_noise, X(i), L(j), K) graph.push_back(factor) # Intentionally initialize the variables off from the ground truth @@ -69,7 +64,7 @@ def main(): initial_xi = pose.compose(noise) # Add an initial guess for the current pose - initial_estimate.insert(symbol('x', i), initial_xi) + initial_estimate.insert(X(i), initial_xi) # If this is the first iteration, add a prior on the first pose to set the coordinate frame # and a prior on the first landmark to set the scale @@ -79,12 +74,12 @@ def main(): # Add a prior on pose x0, with 0.3 rad std on roll,pitch,yaw and 0.1m x,y,z pose_noise = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) - factor = PriorFactorPose3(symbol('x', 0), poses[0], pose_noise) + factor = PriorFactorPose3(X(0), poses[0], pose_noise) graph.push_back(factor) # Add a prior on landmark l0 point_noise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) - factor = PriorFactorPoint3(symbol('l', 0), points[0], point_noise) + factor = PriorFactorPoint3(L(0), points[0], point_noise) graph.push_back(factor) # Add initial guesses to all observed landmarks @@ -92,7 +87,7 @@ def main(): for j, point in enumerate(points): # Intentionally initialize the variables off from the ground truth initial_lj = points[j] + noise - initial_estimate.insert(symbol('l', j), initial_lj) + initial_estimate.insert(L(j), initial_lj) else: # Update iSAM with the new factors isam.update(graph, initial_estimate) diff --git a/python/gtsam/tests/test_SFMExample.py b/python/gtsam/tests/test_SFMExample.py index 5e787f5ae..47a3cbe3e 100644 --- a/python/gtsam/tests/test_SFMExample.py +++ b/python/gtsam/tests/test_SFMExample.py @@ -17,7 +17,7 @@ import gtsam.utils.visual_data_generator as generator from gtsam import symbol from gtsam.noiseModel import Isotropic, Diagonal from gtsam.utils.test_case import GtsamTestCase - +from gtsam.symbol_shorthand import X, P class TestSFMExample(GtsamTestCase): @@ -41,23 +41,23 @@ class TestSFMExample(GtsamTestCase): j = data.J[i][k] graph.add(gtsam.GenericProjectionFactorCal3_S2( data.Z[i][k], measurementNoise, - symbol('x', i), symbol('p', j), data.K)) + X(i), P(j), data.K)) posePriorNoise = Diagonal.Sigmas(poseNoiseSigmas) - graph.add(gtsam.PriorFactorPose3(symbol('x', 0), + graph.add(gtsam.PriorFactorPose3(X(0), truth.cameras[0].pose(), posePriorNoise)) pointPriorNoise = Isotropic.Sigma(3, pointNoiseSigma) - graph.add(gtsam.PriorFactorPoint3(symbol('p', 0), + graph.add(gtsam.PriorFactorPoint3(P(0), truth.points[0], pointPriorNoise)) # Initial estimate initialEstimate = gtsam.Values() for i in range(len(truth.cameras)): pose_i = truth.cameras[i].pose() - initialEstimate.insert(symbol('x', i), pose_i) + initialEstimate.insert(X(i), pose_i) for j in range(len(truth.points)): point_j = truth.points[j] - initialEstimate.insert(symbol('p', j), point_j) + initialEstimate.insert(P(j), point_j) # Optimization optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate) @@ -67,16 +67,16 @@ class TestSFMExample(GtsamTestCase): # Marginalization marginals = gtsam.Marginals(graph, result) - marginals.marginalCovariance(symbol('p', 0)) - marginals.marginalCovariance(symbol('x', 0)) + marginals.marginalCovariance(P(0)) + marginals.marginalCovariance(X(0)) # Check optimized results, should be equal to ground truth for i in range(len(truth.cameras)): - pose_i = result.atPose3(symbol('x', i)) + pose_i = result.atPose3(X(i)) self.gtsamAssertEquals(pose_i, truth.cameras[i].pose(), 1e-5) for j in range(len(truth.points)): - point_j = result.atPoint3(symbol('p', j)) + point_j = result.atPoint3(P(j)) self.gtsamAssertEquals(point_j, truth.points[j], 1e-5) if __name__ == "__main__": diff --git a/python/gtsam/utils/plot.py b/python/gtsam/utils/plot.py index 90f5fe5e7..8b34ad2bf 100644 --- a/python/gtsam/utils/plot.py +++ b/python/gtsam/utils/plot.py @@ -109,7 +109,7 @@ def plot_pose2_on_axes(axes, pose, axis_length=0.1, covariance=None): # get rotation and translation (center) gRp = pose.rotation().matrix() # rotation from pose to global t = pose.translation() - origin = np.array(t) + origin = t # draw the camera axes x_axis = origin + gRp[:, 0] * axis_length @@ -221,9 +221,8 @@ def plot_3d_points(fignum, values, linespec="g*", marginals=None, keys = values.keys() # Plot points and covariance matrices - for i in range(len(keys)): + for key in keys: try: - key = keys[i] point = values.atPoint3(key) if marginals is not None: covariance = marginals.marginalCovariance(key) @@ -321,17 +320,15 @@ def plot_trajectory(fignum, values, scale=1, marginals=None, """ pose3Values = gtsam.utilities.allPose3s(values) keys = gtsam.KeyVector(pose3Values.keys()) - lastIndex = None + lastKey = None - for i in range(len(keys)): - key = keys[i] + for key in keys: try: pose = pose3Values.atPose3(key) except: print("Warning: no Pose3 at key: {0}".format(key)) - if lastIndex is not None: - lastKey = keys[lastIndex] + if lastKey is not None: try: lastPose = pose3Values.atPose3(lastKey) except: @@ -346,11 +343,10 @@ def plot_trajectory(fignum, values, scale=1, marginals=None, fig = plot_pose3(fignum, lastPose, P=covariance, axis_length=scale, axis_labels=axis_labels) - lastIndex = i + lastKey = key # Draw final pose - if lastIndex is not None: - lastKey = keys[lastIndex] + if lastKey is not None: try: lastPose = pose3Values.atPose3(lastKey) if marginals: @@ -390,10 +386,9 @@ def plot_incremental_trajectory(fignum, values, start=0, pose3Values = gtsam.utilities.allPose3s(values) keys = gtsam.KeyVector(pose3Values.keys()) - for i in range(start, len(keys)): - key = keys[i] + for key in keys[start:]: if values.exists(key): - pose_i = values.atPose3(keys[i]) + pose_i = values.atPose3(key) plot_pose3(fignum, pose_i, scale) # Update the plot space to encompass all plotted points From b02cc3f7e3aba926392d29e4b734e05ef2d4c6ee Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Fri, 31 Jul 2020 12:42:09 -0400 Subject: [PATCH 023/142] remove function import --- python/gtsam/utils/circlePose3.py | 5 +++-- python/gtsam/utils/visual_data_generator.py | 7 ++++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/python/gtsam/utils/circlePose3.py b/python/gtsam/utils/circlePose3.py index 37e7c5568..e1def9427 100644 --- a/python/gtsam/utils/circlePose3.py +++ b/python/gtsam/utils/circlePose3.py @@ -1,6 +1,7 @@ import gtsam +import math import numpy as np -from math import pi, cos, sin +from math import pi def circlePose3(numPoses=8, radius=1.0, symbolChar='\0'): @@ -25,7 +26,7 @@ def circlePose3(numPoses=8, radius=1.0, symbolChar='\0'): np.array([[0., 1., 0.], [1., 0., 0.], [0., 0., -1.]], order='F')) for i in range(numPoses): key = gtsam.symbol(symbolChar, i) - gti = gtsam.Point3(radius * cos(theta), radius * sin(theta), 0) + gti = gtsam.Point3(radius * math.cos(theta), radius * math.sin(theta), 0) oRi = gtsam.Rot3.Yaw( -theta) # negative yaw goes counterclockwise, with Z down ! gTi = gtsam.Pose3(gRo.compose(oRi), gti) diff --git a/python/gtsam/utils/visual_data_generator.py b/python/gtsam/utils/visual_data_generator.py index 24404ca03..c6e5b8c63 100644 --- a/python/gtsam/utils/visual_data_generator.py +++ b/python/gtsam/utils/visual_data_generator.py @@ -1,7 +1,8 @@ from __future__ import print_function import numpy as np -from math import pi, cos, sin +import math +from math import pi import gtsam from gtsam import PinholeCameraCal3_S2 @@ -85,7 +86,7 @@ def generate_data(options): r = 10 for j in range(len(truth.points)): theta = j * 2 * pi / nrPoints - truth.points[j] = gtsam.Point3(r * cos(theta), r * sin(theta), 0) + truth.points[j] = gtsam.Point3(r * math.cos(theta), r * math.sin(theta), 0) else: # 3D landmarks as vertices of a cube truth.points = [ gtsam.Point3(10, 10, 10), gtsam.Point3(-10, 10, 10), @@ -99,7 +100,7 @@ def generate_data(options): r = 40 for i in range(options.nrCameras): theta = i * 2 * pi / options.nrCameras - t = gtsam.Point3(r * cos(theta), r * sin(theta), height) + t = gtsam.Point3(r * math.cos(theta), r * math.sin(theta), height) truth.cameras[i] = PinholeCameraCal3_S2.Lookat(t, gtsam.Point3(0, 0, 0), gtsam.Point3(0, 0, 1), From c0486d39a0d7ef05e421f787a77008248b2d64ff Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Fri, 31 Jul 2020 12:47:08 -0400 Subject: [PATCH 024/142] Import classes used more than once --- python/gtsam/utils/visual_data_generator.py | 32 ++++++++++----------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/python/gtsam/utils/visual_data_generator.py b/python/gtsam/utils/visual_data_generator.py index c6e5b8c63..32ccbc8fa 100644 --- a/python/gtsam/utils/visual_data_generator.py +++ b/python/gtsam/utils/visual_data_generator.py @@ -4,7 +4,7 @@ import numpy as np import math from math import pi import gtsam -from gtsam import PinholeCameraCal3_S2 +from gtsam import Point3, Pose3, PinholeCameraCal3_S2, Cal3_S2 class Options: @@ -12,7 +12,7 @@ class Options: Options to generate test scenario """ - def __init__(self, triangle=False, nrCameras=3, K=gtsam.Cal3_S2()): + def __init__(self, triangle=False, nrCameras=3, K=Cal3_S2()): """ Options to generate test scenario @param triangle: generate a triangle scene with 3 points if True, otherwise @@ -29,10 +29,10 @@ class GroundTruth: Object holding generated ground-truth data """ - def __init__(self, K=gtsam.Cal3_S2(), nrCameras=3, nrPoints=4): + def __init__(self, K=Cal3_S2(), nrCameras=3, nrPoints=4): self.K = K - self.cameras = [gtsam.Pose3()] * nrCameras - self.points = [gtsam.Point3(0, 0, 0)] * nrPoints + self.cameras = [Pose3()] * nrCameras + self.points = [Point3(0, 0, 0)] * nrPoints def print_(self, s=""): print(s) @@ -54,11 +54,11 @@ class Data: class NoiseModels: pass - def __init__(self, K=gtsam.Cal3_S2(), nrCameras=3, nrPoints=4): + def __init__(self, K=Cal3_S2(), nrCameras=3, nrPoints=4): self.K = K self.Z = [x[:] for x in [[gtsam.Point2()] * nrPoints] * nrCameras] self.J = [x[:] for x in [[0] * nrPoints] * nrCameras] - self.odometry = [gtsam.Pose3()] * nrCameras + self.odometry = [Pose3()] * nrCameras # Set Noise parameters self.noiseModels = Data.NoiseModels() @@ -75,7 +75,7 @@ class Data: def generate_data(options): """ Generate ground-truth and measurement data. """ - K = gtsam.Cal3_S2(500, 500, 0, 640. / 2., 480. / 2.) + K = Cal3_S2(500, 500, 0, 640. / 2., 480. / 2.) nrPoints = 3 if options.triangle else 8 truth = GroundTruth(K=K, nrCameras=options.nrCameras, nrPoints=nrPoints) @@ -86,13 +86,13 @@ def generate_data(options): r = 10 for j in range(len(truth.points)): theta = j * 2 * pi / nrPoints - truth.points[j] = gtsam.Point3(r * math.cos(theta), r * math.sin(theta), 0) + truth.points[j] = Point3(r * math.cos(theta), r * math.sin(theta), 0) else: # 3D landmarks as vertices of a cube truth.points = [ - gtsam.Point3(10, 10, 10), gtsam.Point3(-10, 10, 10), - gtsam.Point3(-10, -10, 10), gtsam.Point3(10, -10, 10), - gtsam.Point3(10, 10, -10), gtsam.Point3(-10, 10, -10), - gtsam.Point3(-10, -10, -10), gtsam.Point3(10, -10, -10) + Point3(10, 10, 10), Point3(-10, 10, 10), + Point3(-10, -10, 10), Point3(10, -10, 10), + Point3(10, 10, -10), Point3(-10, 10, -10), + Point3(-10, -10, -10), Point3(10, -10, -10) ] # Create camera cameras on a circle around the triangle @@ -100,10 +100,10 @@ def generate_data(options): r = 40 for i in range(options.nrCameras): theta = i * 2 * pi / options.nrCameras - t = gtsam.Point3(r * math.cos(theta), r * math.sin(theta), height) + t = Point3(r * math.cos(theta), r * math.sin(theta), height) truth.cameras[i] = PinholeCameraCal3_S2.Lookat(t, - gtsam.Point3(0, 0, 0), - gtsam.Point3(0, 0, 1), + Point3(0, 0, 0), + Point3(0, 0, 1), truth.K) # Create measurements for j in range(nrPoints): From fa97bc23727c63a42810629b6235e5c1cf250ae8 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Fri, 31 Jul 2020 12:48:50 -0400 Subject: [PATCH 025/142] Newline --- python/gtsam/utils/plot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/gtsam/utils/plot.py b/python/gtsam/utils/plot.py index 8b34ad2bf..0267da8c3 100644 --- a/python/gtsam/utils/plot.py +++ b/python/gtsam/utils/plot.py @@ -398,4 +398,4 @@ def plot_incremental_trajectory(fignum, values, start=0, set_axes_equal(fignum) # Pause for a fixed amount of seconds - plt.pause(time_interval) \ No newline at end of file + plt.pause(time_interval) From 9326bc1ce63e1ae34c3bb3808f16dd443593bf04 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 2 Aug 2020 16:23:50 -0400 Subject: [PATCH 026/142] Update README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 0b3ecb11d..015d65e3d 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,7 @@ # README - Georgia Tech Smoothing and Mapping Library +**As of August 1, develop is officially in "Pre 4.1" mode, and features deprecated in 4.0 were removed. Use the last 4.0.3 release if you need those features. However, most are easily converted and can be tracked down (in 4.0.3) by disabling the cmake flag GTSAM_ALLOW_DEPRECATED_SINCE_V4** + ## What is GTSAM? GTSAM is a C++ library that implements smoothing and From 00a95642993be561421dec2a50add5be53e04d2c Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sun, 2 Aug 2020 20:55:50 -0500 Subject: [PATCH 027/142] fix warning in FrobeniusWormholeFactor --- gtsam/slam/FrobeniusFactor.cpp | 33 ++++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/gtsam/slam/FrobeniusFactor.cpp b/gtsam/slam/FrobeniusFactor.cpp index 6981a6a1c..6d73a73d3 100644 --- a/gtsam/slam/FrobeniusFactor.cpp +++ b/gtsam/slam/FrobeniusFactor.cpp @@ -32,7 +32,7 @@ namespace gtsam { //****************************************************************************** boost::shared_ptr ConvertPose3NoiseModel( - const SharedNoiseModel& model, size_t d, bool defaultToUnit) { + const SharedNoiseModel &model, size_t d, bool defaultToUnit) { double sigma = 1.0; if (model != nullptr) { if (model->dim() != 6) { @@ -56,26 +56,29 @@ FrobeniusWormholeFactor::FrobeniusWormholeFactor( Key j1, Key j2, const Rot3 &R12, size_t p, const SharedNoiseModel &model, const boost::shared_ptr &G) : NoiseModelFactor2(ConvertPose3NoiseModel(model, p * 3), j1, j2), - M_(R12.matrix()), // 3*3 in all cases - p_(p), // 4 for SO(4) - pp_(p * p), // 16 for SO(4) + M_(R12.matrix()), // 3*3 in all cases + p_(p), // 4 for SO(4) + pp_(p * p), // 16 for SO(4) G_(G) { if (noiseModel()->dim() != 3 * p_) throw std::invalid_argument( "FrobeniusWormholeFactor: model with incorrect dimension."); if (!G) { G_ = boost::make_shared(); - *G_ = SOn::VectorizedGenerators(p); // expensive! + *G_ = SOn::VectorizedGenerators(p); // expensive! } - if (G_->rows() != pp_ || G_->cols() != SOn::Dimension(p)) - throw std::invalid_argument("FrobeniusWormholeFactor: passed in generators " - "of incorrect dimension."); + if (static_cast(G_->rows()) != pp_ || + static_cast(G_->cols()) != SOn::Dimension(p)) + throw std::invalid_argument( + "FrobeniusWormholeFactor: passed in generators " + "of incorrect dimension."); } //****************************************************************************** -void FrobeniusWormholeFactor::print(const std::string &s, const KeyFormatter &keyFormatter) const { - std::cout << s << "FrobeniusWormholeFactor<" << p_ << ">(" << keyFormatter(key1()) << "," - << keyFormatter(key2()) << ")\n"; +void FrobeniusWormholeFactor::print(const std::string &s, + const KeyFormatter &keyFormatter) const { + std::cout << s << "FrobeniusWormholeFactor<" << p_ << ">(" + << keyFormatter(key1()) << "," << keyFormatter(key2()) << ")\n"; traits::Print(M_, " M: "); noiseModel_->print(" noise model: "); } @@ -90,12 +93,12 @@ bool FrobeniusWormholeFactor::equals(const NonlinearFactor &expected, //****************************************************************************** Vector FrobeniusWormholeFactor::evaluateError( - const SOn& Q1, const SOn& Q2, boost::optional H1, - boost::optional H2) const { + const SOn &Q1, const SOn &Q2, boost::optional H1, + boost::optional H2) const { gttic(FrobeniusWormholeFactorP_evaluateError); - const Matrix& M1 = Q1.matrix(); - const Matrix& M2 = Q2.matrix(); + const Matrix &M1 = Q1.matrix(); + const Matrix &M2 = Q2.matrix(); assert(M1.rows() == p_ && M2.rows() == p_); const size_t dim = 3 * p_; // Stiefel manifold dimension From ae5956bd10edd2d0e1b92f5778f12b4ac6ff0dd7 Mon Sep 17 00:00:00 2001 From: akrishnan86 Date: Wed, 5 Aug 2020 00:18:42 -0700 Subject: [PATCH 028/142] changes with passing unit tests --- gtsam/sfm/BinaryMeasurement.h | 24 +++++++++++++----------- tests/testTranslationRecovery.cpp | 28 +++++++++++++++++----------- 2 files changed, 30 insertions(+), 22 deletions(-) diff --git a/gtsam/sfm/BinaryMeasurement.h b/gtsam/sfm/BinaryMeasurement.h index 41ffa38d6..99f0e6882 100644 --- a/gtsam/sfm/BinaryMeasurement.h +++ b/gtsam/sfm/BinaryMeasurement.h @@ -18,6 +18,13 @@ * @brief Binary measurement for representing edges in the epipolar graph */ +#include + + +#include +#include +#include + #include #include @@ -71,22 +78,17 @@ public: } /** equals */ - bool equals(const BetweenMeasurement& expected, double tol=1e-9) const { - const BetweenMeasurement *e = dynamic_cast*> (&expected); - return e != nullptr && key1_ == expected.key1() && - key2_ == expected.key2() + bool equals(const BinaryMeasurement& expected, double tol=1e-9) const { + const BinaryMeasurement *e = dynamic_cast*> (&expected); + return e != nullptr && key1_ == e->key1_ && + key2_ == e->key2_ && traits::Equals(this->measured_, e->measured_, tol) && - noiseModel_.equals(expected.noiseModel()); + noiseModel_->equals(expected.noiseModel()); } /** return the measured */ - const VALUE& measured() const { + VALUE measured() const { return measured_; } - - /** number of variables attached to this measurement */ - std::size_t size() const { - return 2; - } }; // \class BetweenMeasurement } \ No newline at end of file diff --git a/tests/testTranslationRecovery.cpp b/tests/testTranslationRecovery.cpp index 5a98c3bf5..eb34ba803 100644 --- a/tests/testTranslationRecovery.cpp +++ b/tests/testTranslationRecovery.cpp @@ -49,15 +49,17 @@ TEST(TranslationRecovery, BAL) { poses, {{0, 1}, {0, 2}, {1, 2}}); // Check - const Pose3 wTa = poses.at(0), wTb = poses.at(1), - wTc = poses.at(2); - const Point3 Ta = wTa.translation(), Tb = wTb.translation(), - Tc = wTc.translation(); - const Rot3 aRw = wTa.rotation().inverse(); - const Unit3 w_aZb = relativeTranslations.at({0, 1}); - EXPECT(assert_equal(Unit3(Tb - Ta), w_aZb)); - const Unit3 w_aZc = relativeTranslations.at({0, 2}); - EXPECT(assert_equal(Unit3(Tc - Ta), w_aZc)); + Unit3 w_aZb_stored; // measurement between 0 and 1 stored for next unit test + for(auto& unitTranslation : relativeTranslations) { + const Pose3 wTa = poses.at(unitTranslation.key1()), + wTb = poses.at(unitTranslation.key2()); + const Point3 Ta = wTa.translation(), Tb = wTb.translation(); + const Unit3 w_aZb = unitTranslation.measured(); + EXPECT(assert_equal(Unit3(Tb - Ta), w_aZb)); + if(unitTranslation.key1() == 0 && unitTranslation.key2() == 1) { + w_aZb_stored = unitTranslation.measured(); + } + } TranslationRecovery algorithm(relativeTranslations); const auto graph = algorithm.buildGraph(); @@ -69,10 +71,14 @@ TEST(TranslationRecovery, BAL) { // Check result for first two translations, determined by prior EXPECT(assert_equal(Point3(0, 0, 0), result.at(0))); - EXPECT(assert_equal(Point3(2 * w_aZb.point3()), result.at(1))); + EXPECT(assert_equal(Point3(2 * w_aZb_stored.point3()), result.at(1))); // Check that the third translations is correct - Point3 expected = (Tc - Ta) * (scale / (Tb - Ta).norm()); + Point3 Ta = poses.at(0).translation(); + Point3 Tb = poses.at(1).translation(); + Point3 Tc = poses.at(2).translation(); + Point3 expected = + (Tc - Ta) * (scale / (Tb - Ta).norm()); EXPECT(assert_equal(expected, result.at(2), 1e-4)); // TODO(frank): how to get stats back? From 68671427e67d29ca51e988a32c192a3299057a64 Mon Sep 17 00:00:00 2001 From: mawallace Date: Wed, 5 Aug 2020 22:15:10 -0400 Subject: [PATCH 029/142] Remove set_zlabel from plot_pose2 --- cython/gtsam/utils/plot.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cython/gtsam/utils/plot.py b/cython/gtsam/utils/plot.py index 1e976a69e..77031eec4 100644 --- a/cython/gtsam/utils/plot.py +++ b/cython/gtsam/utils/plot.py @@ -155,7 +155,6 @@ def plot_pose2(fignum, pose, axis_length=0.1, covariance=None, axes.set_xlabel(axis_labels[0]) axes.set_ylabel(axis_labels[1]) - axes.set_zlabel(axis_labels[2]) return fig From 667fb9a4b956f04baf619126a4e33d4a4869f207 Mon Sep 17 00:00:00 2001 From: akrishnan86 Date: Thu, 6 Aug 2020 06:28:34 -0700 Subject: [PATCH 030/142] binary measurement wrap --- gtsam.h | 44 +++++++++++++++++++++++++++++++---- gtsam/sfm/BinaryMeasurement.h | 3 +++ 2 files changed, 43 insertions(+), 4 deletions(-) diff --git a/gtsam.h b/gtsam.h index 2cd30be42..80fbddec7 100644 --- a/gtsam.h +++ b/gtsam.h @@ -2568,10 +2568,12 @@ virtual class BetweenFactor : gtsam::NoiseModelFactor { void serialize() const; }; - - #include -template +template virtual class NonlinearEquality : gtsam::NoiseModelFactor { // Constructor - forces exact evaluation NonlinearEquality(size_t j, const T& feasible); @@ -2582,7 +2584,6 @@ virtual class NonlinearEquality : gtsam::NoiseModelFactor { void serialize() const; }; - #include template virtual class RangeFactor : gtsam::NoiseModelFactor { @@ -2880,6 +2881,41 @@ virtual class FrobeniusWormholeFactor : gtsam::NoiseModelFactor { Vector evaluateError(const gtsam::SOn& Q1, const gtsam::SOn& Q2); }; +#include +template +class BinaryMeasurement { + BinaryMeasurement(size_t key1, size_t key2, const T& measured, + const gtsam::noiseModel::Base* model); + size_t key1() const; + size_t key2() const; + T measured() const; +}; + +typedef gtsam::BinaryMeasurement BinaryMeasurementUnit3; +typedef gtsam::BinaryMeasurement BinaryMeasurementRot3; + +// std::vector::shared_ptr> +class BinaryMeasurementUnit3s +{ + BinaryMeasurementUnit3s(); + size_t size() const; + gtsam::BinaryMeasurementUnit3* at(size_t i) const; + void push_back(const gtsam::BinaryMeasurementUnit3* measurement); +}; + +// std::vector::shared_ptr> +class BinaryMeasurementRot3s +{ + BinaryMeasurementRot3s(); + size_t size() const; + gtsam::BinaryMeasurementRot3* at(size_t i) const; + void push_back(const gtsam::BinaryMeasurementRot3* measurement); +}; + +#include +class TranslationRecovery { + TranslationRecovery() +} //************************************************************************* // Navigation //************************************************************************* diff --git a/gtsam/sfm/BinaryMeasurement.h b/gtsam/sfm/BinaryMeasurement.h index 99f0e6882..c5bb9c625 100644 --- a/gtsam/sfm/BinaryMeasurement.h +++ b/gtsam/sfm/BinaryMeasurement.h @@ -91,4 +91,7 @@ public: return measured_; } }; // \class BetweenMeasurement + + using BinaryMeasurementUnit3s = std::vector::shared_ptr>; + using BinaryMeasurementRot3s = std::vector::shared_ptr>; } \ No newline at end of file From ccdd1471ed148aa0df051c59b4a9f043ce7aa459 Mon Sep 17 00:00:00 2001 From: ss Date: Thu, 6 Aug 2020 14:26:22 -0400 Subject: [PATCH 031/142] Fix pose2 align bug. --- gtsam/geometry/Pose2.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/gtsam/geometry/Pose2.cpp b/gtsam/geometry/Pose2.cpp index 9c41a76c8..71df0f753 100644 --- a/gtsam/geometry/Pose2.cpp +++ b/gtsam/geometry/Pose2.cpp @@ -322,10 +322,10 @@ boost::optional align(const vector& pairs) { // calculate cos and sin double c=0,s=0; for(const Point2Pair& pair: pairs) { - Point2 dq = pair.first - cp; - Point2 dp = pair.second - cq; - c += dp.x() * dq.x() + dp.y() * dq.y(); - s += dp.y() * dq.x() - dp.x() * dq.y(); // this works but is negative from formula above !! :-( + Point2 dp = pair.first - cp; + Point2 dq = pair.second - cq; + c += dp.x() * dq.x() + dp.y() * dq.y(); + s += -dp.y() * dq.x() + dp.x() * dq.y(); } // calculate angle and translation From 87499699776e6cea7e9a08b02ac78ea1a93e0d9c Mon Sep 17 00:00:00 2001 From: Sam Bateman Date: Thu, 6 Aug 2020 16:59:57 -0700 Subject: [PATCH 032/142] added mutex to BayesTreeCliqueBase for access to cached variable and added copy/copy assignment constructors to allow for full previous functionality while adding thread safety to marginalCovariance. --- gtsam/inference/BayesTreeCliqueBase-inst.h | 4 +++ gtsam/inference/BayesTreeCliqueBase.h | 30 +++++++++++++++++++--- 2 files changed, 31 insertions(+), 3 deletions(-) diff --git a/gtsam/inference/BayesTreeCliqueBase-inst.h b/gtsam/inference/BayesTreeCliqueBase-inst.h index a02fe274e..df775557c 100644 --- a/gtsam/inference/BayesTreeCliqueBase-inst.h +++ b/gtsam/inference/BayesTreeCliqueBase-inst.h @@ -91,6 +91,7 @@ namespace gtsam { template size_t BayesTreeCliqueBase::numCachedSeparatorMarginals() const { + std::lock_guard marginalLock(cachedSeparatorMarginalMutex_); if (!cachedSeparatorMarginal_) return 0; @@ -144,6 +145,7 @@ namespace gtsam { typename BayesTreeCliqueBase::FactorGraphType BayesTreeCliqueBase::separatorMarginal( Eliminate function) const { + std::lock_guard marginalLock(cachedSeparatorMarginalMutex_); gttic(BayesTreeCliqueBase_separatorMarginal); // Check if the Separator marginal was already calculated if (!cachedSeparatorMarginal_) { @@ -206,6 +208,8 @@ namespace gtsam { // When a shortcut is requested, all of the shortcuts between it and the // root are also generated. So, if this clique's cached shortcut is set, // recursively call over all child cliques. Otherwise, it is unnecessary. + + std::lock_guard marginalLock(cachedSeparatorMarginalMutex_); if (cachedSeparatorMarginal_) { for(derived_ptr& child: children) { child->deleteCachedShortcuts(); diff --git a/gtsam/inference/BayesTreeCliqueBase.h b/gtsam/inference/BayesTreeCliqueBase.h index 7aca84635..be71801b8 100644 --- a/gtsam/inference/BayesTreeCliqueBase.h +++ b/gtsam/inference/BayesTreeCliqueBase.h @@ -24,6 +24,7 @@ #include #include +#include namespace gtsam { @@ -75,10 +76,28 @@ namespace gtsam { /** Construct from a conditional, leaving parent and child pointers uninitialized */ BayesTreeCliqueBase(const sharedConditional& conditional) : conditional_(conditional), problemSize_(1) {} + /** Shallow copy constructor */ + BayesTreeCliqueBase(const BayesTreeCliqueBase& c) : conditional_(c.conditional_), parent_(c.parent_), children(c.children), problemSize_(c.problemSize_), is_root(c.is_root) {} + + /** Shallow copy assignment constructor */ + BayesTreeCliqueBase& operator=(const BayesTreeCliqueBase& c) { + conditional_ = c.conditional_; + parent_ = c.parent_; + children = c.children; + problemSize_ = c.problemSize_; + is_root = c.is_root; + return *this; + } + /// @} - /// This stores the Cached separator margnal P(S) + /// This stores the Cached separator marginal P(S) mutable boost::optional cachedSeparatorMarginal_; + /// This protects Cached seperator marginal P(S) from concurent read/writes + /// as many the functions which access it are const (hence the mutable) + /// leading to the false impression that these const functions are thread-safe + /// which is not true due to these mutable values. This is fixed by applying this mutex. + mutable std::mutex cachedSeparatorMarginalMutex_; public: sharedConditional conditional_; @@ -144,7 +163,9 @@ namespace gtsam { void deleteCachedShortcuts(); const boost::optional& cachedSeparatorMarginal() const { - return cachedSeparatorMarginal_; } + std::lock_guard marginalLock(cachedSeparatorMarginalMutex_); + return cachedSeparatorMarginal_; + } friend class BayesTree; @@ -159,7 +180,10 @@ namespace gtsam { KeyVector shortcut_indices(const derived_ptr& B, const FactorGraphType& p_Cp_B) const; /** Non-recursive delete cached shortcuts and marginals - internal only. */ - void deleteCachedShortcutsNonRecursive() { cachedSeparatorMarginal_ = boost::none; } + void deleteCachedShortcutsNonRecursive() { + std::lock_guard marginalLock(cachedSeparatorMarginalMutex_); + cachedSeparatorMarginal_ = boost::none; + } private: From 9564c4873b62ce7a4b6e3a467da10f1ad1138b5d Mon Sep 17 00:00:00 2001 From: Sam Bateman Date: Thu, 6 Aug 2020 17:48:21 -0700 Subject: [PATCH 033/142] update spelling/indent --- gtsam/inference/BayesTreeCliqueBase-inst.h | 2 +- gtsam/inference/BayesTreeCliqueBase.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/gtsam/inference/BayesTreeCliqueBase-inst.h b/gtsam/inference/BayesTreeCliqueBase-inst.h index df775557c..dfcc7c318 100644 --- a/gtsam/inference/BayesTreeCliqueBase-inst.h +++ b/gtsam/inference/BayesTreeCliqueBase-inst.h @@ -91,7 +91,7 @@ namespace gtsam { template size_t BayesTreeCliqueBase::numCachedSeparatorMarginals() const { - std::lock_guard marginalLock(cachedSeparatorMarginalMutex_); + std::lock_guard marginalLock(cachedSeparatorMarginalMutex_); if (!cachedSeparatorMarginal_) return 0; diff --git a/gtsam/inference/BayesTreeCliqueBase.h b/gtsam/inference/BayesTreeCliqueBase.h index be71801b8..7b79ccf68 100644 --- a/gtsam/inference/BayesTreeCliqueBase.h +++ b/gtsam/inference/BayesTreeCliqueBase.h @@ -93,7 +93,7 @@ namespace gtsam { /// This stores the Cached separator marginal P(S) mutable boost::optional cachedSeparatorMarginal_; - /// This protects Cached seperator marginal P(S) from concurent read/writes + /// This protects Cached seperator marginal P(S) from concurrent read/writes /// as many the functions which access it are const (hence the mutable) /// leading to the false impression that these const functions are thread-safe /// which is not true due to these mutable values. This is fixed by applying this mutex. From 9579db73ead0e154260e338b0af7a15dab07c999 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Thu, 6 Aug 2020 20:56:18 -0500 Subject: [PATCH 034/142] Revert "fix warning in FrobeniusWormholeFactor" This reverts commit 00a95642993be561421dec2a50add5be53e04d2c. --- gtsam/slam/FrobeniusFactor.cpp | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/gtsam/slam/FrobeniusFactor.cpp b/gtsam/slam/FrobeniusFactor.cpp index 6d73a73d3..6981a6a1c 100644 --- a/gtsam/slam/FrobeniusFactor.cpp +++ b/gtsam/slam/FrobeniusFactor.cpp @@ -32,7 +32,7 @@ namespace gtsam { //****************************************************************************** boost::shared_ptr ConvertPose3NoiseModel( - const SharedNoiseModel &model, size_t d, bool defaultToUnit) { + const SharedNoiseModel& model, size_t d, bool defaultToUnit) { double sigma = 1.0; if (model != nullptr) { if (model->dim() != 6) { @@ -56,29 +56,26 @@ FrobeniusWormholeFactor::FrobeniusWormholeFactor( Key j1, Key j2, const Rot3 &R12, size_t p, const SharedNoiseModel &model, const boost::shared_ptr &G) : NoiseModelFactor2(ConvertPose3NoiseModel(model, p * 3), j1, j2), - M_(R12.matrix()), // 3*3 in all cases - p_(p), // 4 for SO(4) - pp_(p * p), // 16 for SO(4) + M_(R12.matrix()), // 3*3 in all cases + p_(p), // 4 for SO(4) + pp_(p * p), // 16 for SO(4) G_(G) { if (noiseModel()->dim() != 3 * p_) throw std::invalid_argument( "FrobeniusWormholeFactor: model with incorrect dimension."); if (!G) { G_ = boost::make_shared(); - *G_ = SOn::VectorizedGenerators(p); // expensive! + *G_ = SOn::VectorizedGenerators(p); // expensive! } - if (static_cast(G_->rows()) != pp_ || - static_cast(G_->cols()) != SOn::Dimension(p)) - throw std::invalid_argument( - "FrobeniusWormholeFactor: passed in generators " - "of incorrect dimension."); + if (G_->rows() != pp_ || G_->cols() != SOn::Dimension(p)) + throw std::invalid_argument("FrobeniusWormholeFactor: passed in generators " + "of incorrect dimension."); } //****************************************************************************** -void FrobeniusWormholeFactor::print(const std::string &s, - const KeyFormatter &keyFormatter) const { - std::cout << s << "FrobeniusWormholeFactor<" << p_ << ">(" - << keyFormatter(key1()) << "," << keyFormatter(key2()) << ")\n"; +void FrobeniusWormholeFactor::print(const std::string &s, const KeyFormatter &keyFormatter) const { + std::cout << s << "FrobeniusWormholeFactor<" << p_ << ">(" << keyFormatter(key1()) << "," + << keyFormatter(key2()) << ")\n"; traits::Print(M_, " M: "); noiseModel_->print(" noise model: "); } @@ -93,12 +90,12 @@ bool FrobeniusWormholeFactor::equals(const NonlinearFactor &expected, //****************************************************************************** Vector FrobeniusWormholeFactor::evaluateError( - const SOn &Q1, const SOn &Q2, boost::optional H1, - boost::optional H2) const { + const SOn& Q1, const SOn& Q2, boost::optional H1, + boost::optional H2) const { gttic(FrobeniusWormholeFactorP_evaluateError); - const Matrix &M1 = Q1.matrix(); - const Matrix &M2 = Q2.matrix(); + const Matrix& M1 = Q1.matrix(); + const Matrix& M2 = Q2.matrix(); assert(M1.rows() == p_ && M2.rows() == p_); const size_t dim = 3 * p_; // Stiefel manifold dimension From b9ce41ce025f5199f2844a21b800e4346d800206 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Thu, 6 Aug 2020 20:58:14 -0500 Subject: [PATCH 035/142] fix warning in FrobeniusWormholeFactor --- gtsam/slam/FrobeniusFactor.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/gtsam/slam/FrobeniusFactor.cpp b/gtsam/slam/FrobeniusFactor.cpp index 6981a6a1c..80aeea947 100644 --- a/gtsam/slam/FrobeniusFactor.cpp +++ b/gtsam/slam/FrobeniusFactor.cpp @@ -67,7 +67,8 @@ FrobeniusWormholeFactor::FrobeniusWormholeFactor( G_ = boost::make_shared(); *G_ = SOn::VectorizedGenerators(p); // expensive! } - if (G_->rows() != pp_ || G_->cols() != SOn::Dimension(p)) + if (static_cast(G_->rows()) != pp_ || + static_cast(G_->cols()) != SOn::Dimension(p)) throw std::invalid_argument("FrobeniusWormholeFactor: passed in generators " "of incorrect dimension."); } From 3ea989772375b0a78c8dd8e9bd68bbc22de38982 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Fri, 7 Aug 2020 16:11:05 -0500 Subject: [PATCH 036/142] function for consistent width printing of CMake flags --- CMakeLists.txt | 104 +++++++++++++++++++------------------- cmake/GtsamPrinting.cmake | 32 ++++++++---- 2 files changed, 73 insertions(+), 63 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 61730d45a..d2b9bc75e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -509,104 +509,104 @@ set(CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-dev (>= 1.43)") #Example: "libc6 (>= # Print configuration variables message(STATUS "===============================================================") message(STATUS "================ Configuration Options ======================") -message(STATUS " CMAKE_CXX_COMPILER_ID type : ${CMAKE_CXX_COMPILER_ID}") -message(STATUS " CMAKE_CXX_COMPILER_VERSION : ${CMAKE_CXX_COMPILER_VERSION}") -message(STATUS " CMake version : ${CMAKE_VERSION}") -message(STATUS " CMake generator : ${CMAKE_GENERATOR}") -message(STATUS " CMake build tool : ${CMAKE_BUILD_TOOL}") +print_config("CMAKE_CXX_COMPILER_ID type" "${CMAKE_CXX_COMPILER_ID}") +print_config("CMAKE_CXX_COMPILER_VERSION" "${CMAKE_CXX_COMPILER_VERSION}") +print_config("CMake version" "${CMAKE_VERSION}") +print_config("CMake generator" "${CMAKE_GENERATOR}") +print_config("CMake build tool" "${CMAKE_BUILD_TOOL}") message(STATUS "Build flags ") -print_config_flag(${GTSAM_BUILD_TESTS} "Build Tests ") -print_config_flag(${GTSAM_BUILD_EXAMPLES_ALWAYS} "Build examples with 'make all' ") -print_config_flag(${GTSAM_BUILD_TIMING_ALWAYS} "Build timing scripts with 'make all'") +print_enabled_config(${GTSAM_BUILD_TESTS} "Build Tests") +print_enabled_config(${GTSAM_BUILD_EXAMPLES_ALWAYS} "Build examples with 'make all'") +print_enabled_config(${GTSAM_BUILD_TIMING_ALWAYS} "Build timing scripts with 'make all'") if (DOXYGEN_FOUND) - print_config_flag(${GTSAM_BUILD_DOCS} "Build Docs ") + print_enabled_config(${GTSAM_BUILD_DOCS} "Build Docs") endif() -print_config_flag(${BUILD_SHARED_LIBS} "Build shared GTSAM libraries ") -print_config_flag(${GTSAM_BUILD_TYPE_POSTFIXES} "Put build type in library name ") +print_enabled_config(${BUILD_SHARED_LIBS} "Build shared GTSAM libraries") +print_enabled_config(${GTSAM_BUILD_TYPE_POSTFIXES} "Put build type in library name") if(GTSAM_UNSTABLE_AVAILABLE) - print_config_flag(${GTSAM_BUILD_UNSTABLE} "Build libgtsam_unstable ") + print_enabled_config(${GTSAM_BUILD_UNSTABLE} "Build libgtsam_unstable") endif() if(NOT MSVC AND NOT XCODE_VERSION) - print_config_flag(${GTSAM_BUILD_WITH_MARCH_NATIVE} "Build for native architecture ") - message(STATUS " Build type : ${CMAKE_BUILD_TYPE}") - message(STATUS " C compilation flags : ${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}") - message(STATUS " C++ compilation flags : ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}") + print_enabled_config(${GTSAM_BUILD_WITH_MARCH_NATIVE} "Build for native architecture ") + print_config("Build type" "${CMAKE_BUILD_TYPE}") + print_config("C compilation flags" "${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}") + print_config("C++ compilation flags" "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}") endif() print_build_options_for_target(gtsam) -message(STATUS " Use System Eigen : ${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})") +print_config("Use System Eigen" "${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})") if(GTSAM_USE_TBB) - message(STATUS " Use Intel TBB : Yes") + print_config("Use Intel TBB" "Yes") elseif(TBB_FOUND) - message(STATUS " Use Intel TBB : TBB found but GTSAM_WITH_TBB is disabled") + print_config("Use Intel TBB" "TBB found but GTSAM_WITH_TBB is disabled") else() - message(STATUS " Use Intel TBB : TBB not found") + print_config("Use Intel TBB" "TBB not found") endif() if(GTSAM_USE_EIGEN_MKL) - message(STATUS " Eigen will use MKL : Yes") + print_config("Eigen will use MKL" "Yes") elseif(MKL_FOUND) - message(STATUS " Eigen will use MKL : MKL found but GTSAM_WITH_EIGEN_MKL is disabled") + print_config("Eigen will use MKL" "MKL found but GTSAM_WITH_EIGEN_MKL is disabled") else() - message(STATUS " Eigen will use MKL : MKL not found") + print_config("Eigen will use MKL" "MKL not found") endif() if(GTSAM_USE_EIGEN_MKL_OPENMP) - message(STATUS " Eigen will use MKL and OpenMP : Yes") + print_config("Eigen will use MKL and OpenMP" "Yes") elseif(OPENMP_FOUND AND NOT GTSAM_WITH_EIGEN_MKL) - message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled") + print_config("Eigen will use MKL and OpenMP" "OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled") elseif(OPENMP_FOUND AND NOT MKL_FOUND) - message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but MKL not found") + print_config("Eigen will use MKL and OpenMP" "OpenMP found but MKL not found") elseif(OPENMP_FOUND) - message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled") + print_config("Eigen will use MKL and OpenMP" "OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled") else() - message(STATUS " Eigen will use MKL and OpenMP : OpenMP not found") + print_config("Eigen will use MKL and OpenMP" "OpenMP not found") endif() -message(STATUS " Default allocator : ${GTSAM_DEFAULT_ALLOCATOR}") +print_config("Default allocator" "${GTSAM_DEFAULT_ALLOCATOR}") if(GTSAM_THROW_CHEIRALITY_EXCEPTION) - message(STATUS " Cheirality exceptions enabled : YES") + print_config("Cheirality exceptions enabled" "YES") else() - message(STATUS " Cheirality exceptions enabled : NO") + print_config("Cheirality exceptions enabled" "NO") endif() if(NOT MSVC AND NOT XCODE_VERSION) if(CCACHE_FOUND AND GTSAM_BUILD_WITH_CCACHE) - message(STATUS " Build with ccache : Yes") + print_config("Build with ccache" "Yes") elseif(CCACHE_FOUND) - message(STATUS " Build with ccache : ccache found but GTSAM_BUILD_WITH_CCACHE is disabled") + print_config("Build with ccache" "ccache found but GTSAM_BUILD_WITH_CCACHE is disabled") else() - message(STATUS " Build with ccache : No") + print_config("Build with ccache" "No") endif() endif() -message(STATUS "Packaging flags ") -message(STATUS " CPack Source Generator : ${CPACK_SOURCE_GENERATOR}") -message(STATUS " CPack Generator : ${CPACK_GENERATOR}") +message(STATUS "Packaging flags") +print_config("CPack Source Generator" "${CPACK_SOURCE_GENERATOR}") +print_config("CPack Generator" "${CPACK_GENERATOR}") message(STATUS "GTSAM flags ") -print_config_flag(${GTSAM_USE_QUATERNIONS} "Quaternions as default Rot3 ") -print_config_flag(${GTSAM_ENABLE_CONSISTENCY_CHECKS} "Runtime consistency checking ") -print_config_flag(${GTSAM_ROT3_EXPMAP} "Rot3 retract is full ExpMap ") -print_config_flag(${GTSAM_POSE3_EXPMAP} "Pose3 retract is full ExpMap ") -print_config_flag(${GTSAM_ALLOW_DEPRECATED_SINCE_V41} "Allow features deprecated in GTSAM 4.1") -print_config_flag(${GTSAM_TYPEDEF_POINTS_TO_VECTORS} "Point3 is typedef to Vector3 ") -print_config_flag(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ") -print_config_flag(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration") -print_config_flag(${GTSAM_BUILD_WRAP} "Build Wrap ") +print_enabled_config(${GTSAM_USE_QUATERNIONS} "Quaternions as default Rot3 ") +print_enabled_config(${GTSAM_ENABLE_CONSISTENCY_CHECKS} "Runtime consistency checking ") +print_enabled_config(${GTSAM_ROT3_EXPMAP} "Rot3 retract is full ExpMap ") +print_enabled_config(${GTSAM_POSE3_EXPMAP} "Pose3 retract is full ExpMap ") +print_enabled_config(${GTSAM_ALLOW_DEPRECATED_SINCE_V41} "Allow features deprecated in GTSAM 4.1") +print_enabled_config(${GTSAM_TYPEDEF_POINTS_TO_VECTORS} "Point3 is typedef to Vector3 ") +print_enabled_config(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ") +print_enabled_config(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration") +print_enabled_config(${GTSAM_BUILD_WRAP} "Build Wrap ") -message(STATUS "MATLAB toolbox flags ") -print_config_flag(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install MATLAB toolbox ") +message(STATUS "MATLAB toolbox flags") +print_enabled_config(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install MATLAB toolbox ") if (${GTSAM_INSTALL_MATLAB_TOOLBOX}) - message(STATUS " MATLAB root : ${MATLAB_ROOT}") - message(STATUS " MEX binary : ${MEX_COMMAND}") + print_config("MATLAB root" "${MATLAB_ROOT}") + print_config("MEX binary" "${MEX_COMMAND}") endif() message(STATUS "Cython toolbox flags ") -print_config_flag(${GTSAM_INSTALL_CYTHON_TOOLBOX} "Install Cython toolbox ") +print_enabled_config(${GTSAM_INSTALL_CYTHON_TOOLBOX} "Install Cython toolbox ") if(GTSAM_INSTALL_CYTHON_TOOLBOX) - message(STATUS " Python version : ${GTSAM_PYTHON_VERSION}") + print_config("Python version" "${GTSAM_PYTHON_VERSION}") endif() message(STATUS "===============================================================") diff --git a/cmake/GtsamPrinting.cmake b/cmake/GtsamPrinting.cmake index e53f9c54f..5757f5ee1 100644 --- a/cmake/GtsamPrinting.cmake +++ b/cmake/GtsamPrinting.cmake @@ -1,14 +1,3 @@ -# print configuration variables -# Usage: -#print_config_flag(${GTSAM_BUILD_TESTS} "Build Tests ") -function(print_config_flag flag msg) - if (flag) - message(STATUS " ${msg}: Enabled") - else () - message(STATUS " ${msg}: Disabled") - endif () -endfunction() - # Based on https://github.com/jimbraun/XCDF/blob/master/cmake/CMakePadString.cmake function(string_pad RESULT_NAME DESIRED_LENGTH VALUE) string(LENGTH "${VALUE}" VALUE_LENGTH) @@ -26,6 +15,27 @@ endfunction() set(GTSAM_PRINT_SUMMARY_PADDING_LENGTH 50 CACHE STRING "Padding of cmake summary report lines after configuring.") mark_as_advanced(GTSAM_PRINT_SUMMARY_PADDING_LENGTH) +# print configuration variables with automatic padding +# Usage: +# print_config(${GTSAM_BUILD_TESTS} "Build Tests") +function(print_config config msg) + string_pad(padded_config ${GTSAM_PRINT_SUMMARY_PADDING_LENGTH} " ${config}") + message(STATUS "${padded_config}: ${msg}") +endfunction() + +# print configuration variable with enabled/disabled value +# Usage: +# print_enabled_config(${GTSAM_BUILD_TESTS} "Build Tests ") +function(print_enabled_config config msg) + string_pad(padded_msg ${GTSAM_PRINT_SUMMARY_PADDING_LENGTH} " ${msg}") + if (config) + message(STATUS "${padded_msg}: Enabled") + else () + message(STATUS "${padded_msg}: Disabled") + endif () +endfunction() + + # Print " var: ${var}" padding with spaces as needed function(print_padded variable_name) string_pad(padded_prop ${GTSAM_PRINT_SUMMARY_PADDING_LENGTH} " ${variable_name}") From 2540285afe0ab2d252fba83ea7afd1ea4a3ad88a Mon Sep 17 00:00:00 2001 From: akrishnan86 Date: Sat, 8 Aug 2020 12:09:30 -0700 Subject: [PATCH 037/142] sfm changes to docs, format, wrapper --- gtsam.h | 22 ----- gtsam/sfm/BinaryMeasurement.h | 97 +++++++++++------------ gtsam/sfm/TranslationRecovery.cpp | 12 +-- gtsam/sfm/TranslationRecovery.h | 10 +-- gtsam/sfm/tests/testBinaryMeasurement.cpp | 13 ++- gtsam/sfm/tests/testTranslationFactor.cpp | 8 +- 6 files changed, 71 insertions(+), 91 deletions(-) diff --git a/gtsam.h b/gtsam.h index 80fbddec7..4cac956b6 100644 --- a/gtsam.h +++ b/gtsam.h @@ -2894,28 +2894,6 @@ class BinaryMeasurement { typedef gtsam::BinaryMeasurement BinaryMeasurementUnit3; typedef gtsam::BinaryMeasurement BinaryMeasurementRot3; -// std::vector::shared_ptr> -class BinaryMeasurementUnit3s -{ - BinaryMeasurementUnit3s(); - size_t size() const; - gtsam::BinaryMeasurementUnit3* at(size_t i) const; - void push_back(const gtsam::BinaryMeasurementUnit3* measurement); -}; - -// std::vector::shared_ptr> -class BinaryMeasurementRot3s -{ - BinaryMeasurementRot3s(); - size_t size() const; - gtsam::BinaryMeasurementRot3* at(size_t i) const; - void push_back(const gtsam::BinaryMeasurementRot3* measurement); -}; - -#include -class TranslationRecovery { - TranslationRecovery() -} //************************************************************************* // Navigation //************************************************************************* diff --git a/gtsam/sfm/BinaryMeasurement.h b/gtsam/sfm/BinaryMeasurement.h index c5bb9c625..9b0874106 100644 --- a/gtsam/sfm/BinaryMeasurement.h +++ b/gtsam/sfm/BinaryMeasurement.h @@ -15,12 +15,15 @@ * @file BinaryMeasurement.h * @author Akshay Krishnan * @date July 2020 - * @brief Binary measurement for representing edges in the epipolar graph + * @brief Binary measurement for representing edges in the epipolar graph. + * A binary measurement is similar to a BetweenFactor, except that it does not contain + * an error function. It is a measurement (along with a noise model) from one key to + * another. Note that the direction is important. A measurement from key1 to key2 is not + * the same as the same measurement from key2 to key1. */ #include - #include #include #include @@ -30,68 +33,60 @@ namespace gtsam { -template +template class BinaryMeasurement { - // Check that VALUE type is testable - BOOST_CONCEPT_ASSERT((IsTestable)); + // Check that VALUE type is testable + BOOST_CONCEPT_ASSERT((IsTestable)); -public: - typedef VALUE T; + public: + typedef VALUE T; - // shorthand for a smart pointer to a measurement - typedef typename boost::shared_ptr shared_ptr; + // shorthand for a smart pointer to a measurement + typedef typename boost::shared_ptr shared_ptr; -private: - Key key1_, key2_; /** Keys */ + private: + Key key1_, key2_; /** Keys */ - VALUE measured_; /** The measurement */ + VALUE measured_; /** The measurement */ - SharedNoiseModel noiseModel_; /** Noise model */ + SharedNoiseModel noiseModel_; /** Noise model */ -public: - /** default constructor - only use for serialization */ - BinaryMeasurement() {} - - /** Constructor */ - BinaryMeasurement(Key key1, Key key2, const VALUE& measured, - const SharedNoiseModel& model = nullptr) : + public: + /** Constructor */ + BinaryMeasurement(Key key1, Key key2, const VALUE &measured, + const SharedNoiseModel &model = nullptr) : key1_(key1), key2_(key2), measured_(measured), noiseModel_(model) { - } + } - virtual ~BinaryMeasurement() {} + Key key1() const { return key1_; } - Key key1() const { return key1_; } + Key key2() const { return key2_; } - Key key2() const { return key2_; } + const SharedNoiseModel &noiseModel() const { return noiseModel_; } - const SharedNoiseModel& noiseModel() const { return noiseModel_; } + /** implement functions needed for Testable */ - /** implement functions needed for Testable */ + /** print */ + void print(const std::string &s, const KeyFormatter &keyFormatter = DefaultKeyFormatter) const { + std::cout << s << "BinaryMeasurement(" + << keyFormatter(this->key1()) << "," + << keyFormatter(this->key2()) << ")\n"; + traits::Print(measured_, " measured: "); + this->noiseModel_->print(" noise model: "); + } - /** print */ - void print(const std::string& s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { - std::cout << s << "BinaryMeasurement(" - << keyFormatter(this->key1()) << "," - << keyFormatter(this->key2()) << ")\n"; - traits::Print(measured_, " measured: "); - this->noiseModel_->print(" noise model: "); - } + /** equals */ + bool equals(const BinaryMeasurement &expected, double tol = 1e-9) const { + const BinaryMeasurement *e = dynamic_cast *> (&expected); + return e != nullptr && key1_ == e->key1_ && + key2_ == e->key2_ + && traits::Equals(this->measured_, e->measured_, tol) && + noiseModel_->equals(*expected.noiseModel()); + } - /** equals */ - bool equals(const BinaryMeasurement& expected, double tol=1e-9) const { - const BinaryMeasurement *e = dynamic_cast*> (&expected); - return e != nullptr && key1_ == e->key1_ && - key2_ == e->key2_ - && traits::Equals(this->measured_, e->measured_, tol) && - noiseModel_->equals(expected.noiseModel()); - } - - /** return the measured */ - VALUE measured() const { - return measured_; - } - }; // \class BetweenMeasurement - - using BinaryMeasurementUnit3s = std::vector::shared_ptr>; - using BinaryMeasurementRot3s = std::vector::shared_ptr>; + /** return the measured value */ + VALUE measured() const { + return measured_; + } +}; // \class BetweenMeasurement } \ No newline at end of file diff --git a/gtsam/sfm/TranslationRecovery.cpp b/gtsam/sfm/TranslationRecovery.cpp index c3be003ba..665336cc4 100644 --- a/gtsam/sfm/TranslationRecovery.cpp +++ b/gtsam/sfm/TranslationRecovery.cpp @@ -10,10 +10,10 @@ * -------------------------------------------------------------------------- */ /** - * @file TranslationRecovery.h + * @file TranslationRecovery.cpp * @author Frank Dellaert * @date March 2020 - * @brief test recovering translations when rotations are given. + * @brief Source code for recovering translations when rotations are given */ #include @@ -37,15 +37,15 @@ NonlinearFactorGraph TranslationRecovery::buildGraph() const { // Add all relative translation edges for (auto edge : relativeTranslations_) { - graph.emplace_shared(edge.key1(), edge.key2(), - edge.measured(), edge.noiseModel()); + graph.emplace_shared(edge.key1(), edge.key2(), + edge.measured(), edge.noiseModel()); } return graph; } void TranslationRecovery::addPrior(const double scale, - NonlinearFactorGraph* graph) const { + NonlinearFactorGraph *graph) const { //TODO(akshay-krishnan): make this an input argument auto priorNoiseModel = noiseModel::Isotropic::Sigma(3, 0.01); auto edge = relativeTranslations_.begin(); @@ -82,7 +82,7 @@ Values TranslationRecovery::run(const double scale) const { } TranslationRecovery::TranslationEdges TranslationRecovery::SimulateMeasurements( - const Values& poses, const vector& edges) { + const Values &poses, const vector &edges) { auto edgeNoiseModel = noiseModel::Isotropic::Sigma(3, 0.01); TranslationEdges relativeTranslations; for (auto edge : edges) { diff --git a/gtsam/sfm/TranslationRecovery.h b/gtsam/sfm/TranslationRecovery.h index 1392817b2..4c6a95cbe 100644 --- a/gtsam/sfm/TranslationRecovery.h +++ b/gtsam/sfm/TranslationRecovery.h @@ -13,7 +13,7 @@ * @file TranslationRecovery.h * @author Frank Dellaert * @date March 2020 - * @brief test recovering translations when rotations are given. + * @brief Recovering translations in an epipolar graph when rotations are given. */ #include @@ -66,8 +66,8 @@ class TranslationRecovery { * used to modify the parameters for the LM optimizer. By default, uses the * default LM parameters. */ - TranslationRecovery(const TranslationEdges& relativeTranslations, - const LevenbergMarquardtParams& lmParams = LevenbergMarquardtParams()) + TranslationRecovery(const TranslationEdges &relativeTranslations, + const LevenbergMarquardtParams &lmParams = LevenbergMarquardtParams()) : relativeTranslations_(relativeTranslations), params_(lmParams) { params_.setVerbosityLM("Summary"); } @@ -85,7 +85,7 @@ class TranslationRecovery { * @param scale scale for first relative translation which fixes gauge. * @param graph factor graph to which prior is added. */ - void addPrior(const double scale, NonlinearFactorGraph* graph) const; + void addPrior(const double scale, NonlinearFactorGraph *graph) const; /** * @brief Create random initial translations. @@ -112,6 +112,6 @@ class TranslationRecovery { * direction between the cameras. */ static TranslationEdges SimulateMeasurements( - const Values& poses, const std::vector& edges); + const Values &poses, const std::vector &edges); }; } // namespace gtsam diff --git a/gtsam/sfm/tests/testBinaryMeasurement.cpp b/gtsam/sfm/tests/testBinaryMeasurement.cpp index ae3ba23e5..3dd81c2c1 100644 --- a/gtsam/sfm/tests/testBinaryMeasurement.cpp +++ b/gtsam/sfm/tests/testBinaryMeasurement.cpp @@ -27,7 +27,6 @@ using namespace std; using namespace gtsam; -// Keys are deliberately *not* in sorted order to test that case. static const Key kKey1(2), kKey2(1); // Create noise models for unit3 and rot3 @@ -43,14 +42,24 @@ TEST(BinaryMeasurement, Unit3) { EXPECT_LONGS_EQUAL(unit3Measurement.key1(), kKey1); EXPECT_LONGS_EQUAL(unit3Measurement.key2(), kKey2); EXPECT(unit3Measurement.measured().equals(unit3Measured)); + + BinaryMeasurement unit3MeasurementCopy(kKey1, kKey2, unit3Measured, + unit3_model); + EXPECT(unit3Measurement.equals(unit3MeasurementCopy)); } TEST(BinaryMeasurement, Rot3) { + // testing the accessors BinaryMeasurement rot3Measurement(kKey1, kKey2, rot3Measured, - rot3_model); + rot3_model); EXPECT_LONGS_EQUAL(rot3Measurement.key1(), kKey1); EXPECT_LONGS_EQUAL(rot3Measurement.key2(), kKey2); EXPECT(rot3Measurement.measured().equals(rot3Measured)); + + // testing the equals function + BinaryMeasurement rot3MeasurementCopy(kKey1, kKey2, rot3Measured, + rot3_model); + EXPECT(rot3Measurement.equals(rot3MeasurementCopy)); } /* ************************************************************************* */ diff --git a/gtsam/sfm/tests/testTranslationFactor.cpp b/gtsam/sfm/tests/testTranslationFactor.cpp index 37e8b6c0f..3ff76ac5c 100644 --- a/gtsam/sfm/tests/testTranslationFactor.cpp +++ b/gtsam/sfm/tests/testTranslationFactor.cpp @@ -51,8 +51,6 @@ TEST(TranslationFactor, ZeroError) { // Verify we get the expected error Vector expected = (Vector3() << 0, 0, 0).finished(); EXPECT(assert_equal(expected, actualError, 1e-9)); - - } /* ************************************************************************* */ @@ -67,13 +65,13 @@ TEST(TranslationFactor, NonZeroError) { Vector actualError(factor.evaluateError(T1, T2)); // verify we get the expected error - Vector expected = (Vector3() << -1, 1/sqrt(2), 1/sqrt(2)).finished(); + Vector expected = (Vector3() << -1, 1 / sqrt(2), 1 / sqrt(2)).finished(); EXPECT(assert_equal(expected, actualError, 1e-9)); } /* ************************************************************************* */ -Vector factorError(const Point3& T1, const Point3& T2, - const TranslationFactor& factor) { +Vector factorError(const Point3 &T1, const Point3 &T2, + const TranslationFactor &factor) { return factor.evaluateError(T1, T2); } From 1159032d89f39b791e2b557f841349babbf4422f Mon Sep 17 00:00:00 2001 From: akrishnan86 Date: Sun, 9 Aug 2020 22:03:01 -0700 Subject: [PATCH 038/142] update binary measurement brief --- gtsam/sfm/BinaryMeasurement.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gtsam/sfm/BinaryMeasurement.h b/gtsam/sfm/BinaryMeasurement.h index 9b0874106..c4b4a9804 100644 --- a/gtsam/sfm/BinaryMeasurement.h +++ b/gtsam/sfm/BinaryMeasurement.h @@ -15,7 +15,7 @@ * @file BinaryMeasurement.h * @author Akshay Krishnan * @date July 2020 - * @brief Binary measurement for representing edges in the epipolar graph. + * @brief Binary measurement represents a measurement between two keys in a graph. * A binary measurement is similar to a BetweenFactor, except that it does not contain * an error function. It is a measurement (along with a noise model) from one key to * another. Note that the direction is important. A measurement from key1 to key2 is not From 7cfc5c352254b5bbc710f3e1d5b1f9bf19cfd84f Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Mon, 10 Aug 2020 22:56:43 -0400 Subject: [PATCH 039/142] Added Spectra 0.9.0 to 3rdparty --- LICENSE | 2 + gtsam/3rdparty/CMakeLists.txt | 3 +- gtsam/3rdparty/Spectra/GenEigsBase.h | 482 +++++++++++++ .../Spectra/GenEigsComplexShiftSolver.h | 157 ++++ .../3rdparty/Spectra/GenEigsRealShiftSolver.h | 89 +++ gtsam/3rdparty/Spectra/GenEigsSolver.h | 158 +++++ gtsam/3rdparty/Spectra/LinAlg/Arnoldi.h | 284 ++++++++ gtsam/3rdparty/Spectra/LinAlg/BKLDLT.h | 530 ++++++++++++++ gtsam/3rdparty/Spectra/LinAlg/DoubleShiftQR.h | 384 ++++++++++ gtsam/3rdparty/Spectra/LinAlg/Lanczos.h | 167 +++++ gtsam/3rdparty/Spectra/LinAlg/TridiagEigen.h | 219 ++++++ .../Spectra/LinAlg/UpperHessenbergEigen.h | 319 +++++++++ .../Spectra/LinAlg/UpperHessenbergQR.h | 670 ++++++++++++++++++ gtsam/3rdparty/Spectra/MatOp/DenseCholesky.h | 108 +++ .../Spectra/MatOp/DenseGenComplexShiftSolve.h | 102 +++ .../3rdparty/Spectra/MatOp/DenseGenMatProd.h | 80 +++ .../Spectra/MatOp/DenseGenRealShiftSolve.h | 88 +++ .../3rdparty/Spectra/MatOp/DenseSymMatProd.h | 73 ++ .../Spectra/MatOp/DenseSymShiftSolve.h | 92 +++ gtsam/3rdparty/Spectra/MatOp/SparseCholesky.h | 109 +++ .../3rdparty/Spectra/MatOp/SparseGenMatProd.h | 74 ++ .../Spectra/MatOp/SparseGenRealShiftSolve.h | 93 +++ .../Spectra/MatOp/SparseRegularInverse.h | 100 +++ .../3rdparty/Spectra/MatOp/SparseSymMatProd.h | 73 ++ .../Spectra/MatOp/SparseSymShiftSolve.h | 95 +++ .../Spectra/MatOp/internal/ArnoldiOp.h | 150 ++++ .../MatOp/internal/SymGEigsCholeskyOp.h | 75 ++ .../Spectra/MatOp/internal/SymGEigsRegInvOp.h | 72 ++ gtsam/3rdparty/Spectra/SymEigsBase.h | 448 ++++++++++++ gtsam/3rdparty/Spectra/SymEigsShiftSolver.h | 203 ++++++ gtsam/3rdparty/Spectra/SymEigsSolver.h | 171 +++++ gtsam/3rdparty/Spectra/SymGEigsSolver.h | 326 +++++++++ gtsam/3rdparty/Spectra/Util/CompInfo.h | 35 + gtsam/3rdparty/Spectra/Util/GEigsMode.h | 32 + gtsam/3rdparty/Spectra/Util/SelectionRule.h | 275 +++++++ gtsam/3rdparty/Spectra/Util/SimpleRandom.h | 91 +++ gtsam/3rdparty/Spectra/Util/TypeTraits.h | 71 ++ gtsam/3rdparty/Spectra/contrib/LOBPCGSolver.h | 552 +++++++++++++++ .../Spectra/contrib/PartialSVDSolver.h | 202 ++++++ gtsam/CMakeLists.txt | 2 + 40 files changed, 7255 insertions(+), 1 deletion(-) create mode 100644 gtsam/3rdparty/Spectra/GenEigsBase.h create mode 100644 gtsam/3rdparty/Spectra/GenEigsComplexShiftSolver.h create mode 100644 gtsam/3rdparty/Spectra/GenEigsRealShiftSolver.h create mode 100644 gtsam/3rdparty/Spectra/GenEigsSolver.h create mode 100644 gtsam/3rdparty/Spectra/LinAlg/Arnoldi.h create mode 100644 gtsam/3rdparty/Spectra/LinAlg/BKLDLT.h create mode 100644 gtsam/3rdparty/Spectra/LinAlg/DoubleShiftQR.h create mode 100644 gtsam/3rdparty/Spectra/LinAlg/Lanczos.h create mode 100644 gtsam/3rdparty/Spectra/LinAlg/TridiagEigen.h create mode 100644 gtsam/3rdparty/Spectra/LinAlg/UpperHessenbergEigen.h create mode 100644 gtsam/3rdparty/Spectra/LinAlg/UpperHessenbergQR.h create mode 100644 gtsam/3rdparty/Spectra/MatOp/DenseCholesky.h create mode 100644 gtsam/3rdparty/Spectra/MatOp/DenseGenComplexShiftSolve.h create mode 100644 gtsam/3rdparty/Spectra/MatOp/DenseGenMatProd.h create mode 100644 gtsam/3rdparty/Spectra/MatOp/DenseGenRealShiftSolve.h create mode 100644 gtsam/3rdparty/Spectra/MatOp/DenseSymMatProd.h create mode 100644 gtsam/3rdparty/Spectra/MatOp/DenseSymShiftSolve.h create mode 100644 gtsam/3rdparty/Spectra/MatOp/SparseCholesky.h create mode 100644 gtsam/3rdparty/Spectra/MatOp/SparseGenMatProd.h create mode 100644 gtsam/3rdparty/Spectra/MatOp/SparseGenRealShiftSolve.h create mode 100644 gtsam/3rdparty/Spectra/MatOp/SparseRegularInverse.h create mode 100644 gtsam/3rdparty/Spectra/MatOp/SparseSymMatProd.h create mode 100644 gtsam/3rdparty/Spectra/MatOp/SparseSymShiftSolve.h create mode 100644 gtsam/3rdparty/Spectra/MatOp/internal/ArnoldiOp.h create mode 100644 gtsam/3rdparty/Spectra/MatOp/internal/SymGEigsCholeskyOp.h create mode 100644 gtsam/3rdparty/Spectra/MatOp/internal/SymGEigsRegInvOp.h create mode 100644 gtsam/3rdparty/Spectra/SymEigsBase.h create mode 100644 gtsam/3rdparty/Spectra/SymEigsShiftSolver.h create mode 100644 gtsam/3rdparty/Spectra/SymEigsSolver.h create mode 100644 gtsam/3rdparty/Spectra/SymGEigsSolver.h create mode 100644 gtsam/3rdparty/Spectra/Util/CompInfo.h create mode 100644 gtsam/3rdparty/Spectra/Util/GEigsMode.h create mode 100644 gtsam/3rdparty/Spectra/Util/SelectionRule.h create mode 100644 gtsam/3rdparty/Spectra/Util/SimpleRandom.h create mode 100644 gtsam/3rdparty/Spectra/Util/TypeTraits.h create mode 100644 gtsam/3rdparty/Spectra/contrib/LOBPCGSolver.h create mode 100644 gtsam/3rdparty/Spectra/contrib/PartialSVDSolver.h diff --git a/LICENSE b/LICENSE index d828deb55..228a6b942 100644 --- a/LICENSE +++ b/LICENSE @@ -23,3 +23,5 @@ ordering library - Included unmodified in gtsam/3rdparty/metis - Licenced under Apache License v 2.0, provided in gtsam/3rdparty/metis/LICENSE.txt +- Spectra v0.9.0: Sparse Eigenvalue Computation Toolkit as a Redesigned ARPACK. + - Licenced under MPL2, provided at https://github.com/yixuan/spectra diff --git a/gtsam/3rdparty/CMakeLists.txt b/gtsam/3rdparty/CMakeLists.txt index c8fecc339..8b356393b 100644 --- a/gtsam/3rdparty/CMakeLists.txt +++ b/gtsam/3rdparty/CMakeLists.txt @@ -1,7 +1,8 @@ -# install CCOLAMD headers +# install CCOLAMD and SuiteSparse_config headers install(FILES CCOLAMD/Include/ccolamd.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/gtsam/3rdparty/CCOLAMD) install(FILES SuiteSparse_config/SuiteSparse_config.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/gtsam/3rdparty/SuiteSparse_config) +# install Eigen unless System Eigen is used if(NOT GTSAM_USE_SYSTEM_EIGEN) # Find plain .h files file(GLOB_RECURSE eigen_headers "${CMAKE_CURRENT_SOURCE_DIR}/Eigen/Eigen/*.h") diff --git a/gtsam/3rdparty/Spectra/GenEigsBase.h b/gtsam/3rdparty/Spectra/GenEigsBase.h new file mode 100644 index 000000000..e084033d7 --- /dev/null +++ b/gtsam/3rdparty/Spectra/GenEigsBase.h @@ -0,0 +1,482 @@ +// Copyright (C) 2018-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef GEN_EIGS_BASE_H +#define GEN_EIGS_BASE_H + +#include +#include // std::vector +#include // std::abs, std::pow, std::sqrt +#include // std::min, std::copy +#include // std::complex, std::conj, std::norm, std::abs +#include // std::invalid_argument + +#include "Util/TypeTraits.h" +#include "Util/SelectionRule.h" +#include "Util/CompInfo.h" +#include "Util/SimpleRandom.h" +#include "MatOp/internal/ArnoldiOp.h" +#include "LinAlg/UpperHessenbergQR.h" +#include "LinAlg/DoubleShiftQR.h" +#include "LinAlg/UpperHessenbergEigen.h" +#include "LinAlg/Arnoldi.h" + +namespace Spectra { + +/// +/// \ingroup EigenSolver +/// +/// This is the base class for general eigen solvers, mainly for internal use. +/// It is kept here to provide the documentation for member functions of concrete eigen solvers +/// such as GenEigsSolver and GenEigsRealShiftSolver. +/// +template +class GenEigsBase +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Array Array; + typedef Eigen::Array BoolArray; + typedef Eigen::Map MapMat; + typedef Eigen::Map MapVec; + typedef Eigen::Map MapConstVec; + + typedef std::complex Complex; + typedef Eigen::Matrix ComplexMatrix; + typedef Eigen::Matrix ComplexVector; + + typedef ArnoldiOp ArnoldiOpType; + typedef Arnoldi ArnoldiFac; + +protected: + // clang-format off + OpType* m_op; // object to conduct matrix operation, + // e.g. matrix-vector product + const Index m_n; // dimension of matrix A + const Index m_nev; // number of eigenvalues requested + const Index m_ncv; // dimension of Krylov subspace in the Arnoldi method + Index m_nmatop; // number of matrix operations called + Index m_niter; // number of restarting iterations + + ArnoldiFac m_fac; // Arnoldi factorization + + ComplexVector m_ritz_val; // Ritz values + ComplexMatrix m_ritz_vec; // Ritz vectors + ComplexVector m_ritz_est; // last row of m_ritz_vec + +private: + BoolArray m_ritz_conv; // indicator of the convergence of Ritz values + int m_info; // status of the computation + + const Scalar m_near_0; // a very small value, but 1.0 / m_near_0 does not overflow + // ~= 1e-307 for the "double" type + const Scalar m_eps; // the machine precision, ~= 1e-16 for the "double" type + const Scalar m_eps23; // m_eps^(2/3), used to test the convergence + // clang-format on + + // Real Ritz values calculated from UpperHessenbergEigen have exact zero imaginary part + // Complex Ritz values have exact conjugate pairs + // So we use exact tests here + static bool is_complex(const Complex& v) { return v.imag() != Scalar(0); } + static bool is_conj(const Complex& v1, const Complex& v2) { return v1 == Eigen::numext::conj(v2); } + + // Implicitly restarted Arnoldi factorization + void restart(Index k) + { + using std::norm; + + if (k >= m_ncv) + return; + + DoubleShiftQR decomp_ds(m_ncv); + UpperHessenbergQR decomp_hb(m_ncv); + Matrix Q = Matrix::Identity(m_ncv, m_ncv); + + for (Index i = k; i < m_ncv; i++) + { + if (is_complex(m_ritz_val[i]) && is_conj(m_ritz_val[i], m_ritz_val[i + 1])) + { + // H - mu * I = Q1 * R1 + // H <- R1 * Q1 + mu * I = Q1' * H * Q1 + // H - conj(mu) * I = Q2 * R2 + // H <- R2 * Q2 + conj(mu) * I = Q2' * H * Q2 + // + // (H - mu * I) * (H - conj(mu) * I) = Q1 * Q2 * R2 * R1 = Q * R + const Scalar s = Scalar(2) * m_ritz_val[i].real(); + const Scalar t = norm(m_ritz_val[i]); + + decomp_ds.compute(m_fac.matrix_H(), s, t); + + // Q -> Q * Qi + decomp_ds.apply_YQ(Q); + // H -> Q'HQ + // Matrix Q = Matrix::Identity(m_ncv, m_ncv); + // decomp_ds.apply_YQ(Q); + // m_fac_H = Q.transpose() * m_fac_H * Q; + m_fac.compress_H(decomp_ds); + + i++; + } + else + { + // QR decomposition of H - mu * I, mu is real + decomp_hb.compute(m_fac.matrix_H(), m_ritz_val[i].real()); + + // Q -> Q * Qi + decomp_hb.apply_YQ(Q); + // H -> Q'HQ = RQ + mu * I + m_fac.compress_H(decomp_hb); + } + } + + m_fac.compress_V(Q); + m_fac.factorize_from(k, m_ncv, m_nmatop); + + retrieve_ritzpair(); + } + + // Calculates the number of converged Ritz values + Index num_converged(Scalar tol) + { + // thresh = tol * max(m_eps23, abs(theta)), theta for Ritz value + Array thresh = tol * m_ritz_val.head(m_nev).array().abs().max(m_eps23); + Array resid = m_ritz_est.head(m_nev).array().abs() * m_fac.f_norm(); + // Converged "wanted" Ritz values + m_ritz_conv = (resid < thresh); + + return m_ritz_conv.cast().sum(); + } + + // Returns the adjusted nev for restarting + Index nev_adjusted(Index nconv) + { + using std::abs; + + Index nev_new = m_nev; + for (Index i = m_nev; i < m_ncv; i++) + if (abs(m_ritz_est[i]) < m_near_0) + nev_new++; + + // Adjust nev_new, according to dnaup2.f line 660~674 in ARPACK + nev_new += std::min(nconv, (m_ncv - nev_new) / 2); + if (nev_new == 1 && m_ncv >= 6) + nev_new = m_ncv / 2; + else if (nev_new == 1 && m_ncv > 3) + nev_new = 2; + + if (nev_new > m_ncv - 2) + nev_new = m_ncv - 2; + + // Increase nev by one if ritz_val[nev - 1] and + // ritz_val[nev] are conjugate pairs + if (is_complex(m_ritz_val[nev_new - 1]) && + is_conj(m_ritz_val[nev_new - 1], m_ritz_val[nev_new])) + { + nev_new++; + } + + return nev_new; + } + + // Retrieves and sorts Ritz values and Ritz vectors + void retrieve_ritzpair() + { + UpperHessenbergEigen decomp(m_fac.matrix_H()); + const ComplexVector& evals = decomp.eigenvalues(); + ComplexMatrix evecs = decomp.eigenvectors(); + + SortEigenvalue sorting(evals.data(), evals.size()); + std::vector ind = sorting.index(); + + // Copy the Ritz values and vectors to m_ritz_val and m_ritz_vec, respectively + for (Index i = 0; i < m_ncv; i++) + { + m_ritz_val[i] = evals[ind[i]]; + m_ritz_est[i] = evecs(m_ncv - 1, ind[i]); + } + for (Index i = 0; i < m_nev; i++) + { + m_ritz_vec.col(i).noalias() = evecs.col(ind[i]); + } + } + +protected: + // Sorts the first nev Ritz pairs in the specified order + // This is used to return the final results + virtual void sort_ritzpair(int sort_rule) + { + // First make sure that we have a valid index vector + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + std::vector ind = sorting.index(); + + switch (sort_rule) + { + case LARGEST_MAGN: + break; + case LARGEST_REAL: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + case LARGEST_IMAG: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + case SMALLEST_MAGN: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + case SMALLEST_REAL: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + case SMALLEST_IMAG: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + default: + throw std::invalid_argument("unsupported sorting rule"); + } + + ComplexVector new_ritz_val(m_ncv); + ComplexMatrix new_ritz_vec(m_ncv, m_nev); + BoolArray new_ritz_conv(m_nev); + + for (Index i = 0; i < m_nev; i++) + { + new_ritz_val[i] = m_ritz_val[ind[i]]; + new_ritz_vec.col(i).noalias() = m_ritz_vec.col(ind[i]); + new_ritz_conv[i] = m_ritz_conv[ind[i]]; + } + + m_ritz_val.swap(new_ritz_val); + m_ritz_vec.swap(new_ritz_vec); + m_ritz_conv.swap(new_ritz_conv); + } + +public: + /// \cond + + GenEigsBase(OpType* op, BOpType* Bop, Index nev, Index ncv) : + m_op(op), + m_n(m_op->rows()), + m_nev(nev), + m_ncv(ncv > m_n ? m_n : ncv), + m_nmatop(0), + m_niter(0), + m_fac(ArnoldiOpType(op, Bop), m_ncv), + m_info(NOT_COMPUTED), + m_near_0(TypeTraits::min() * Scalar(10)), + m_eps(Eigen::NumTraits::epsilon()), + m_eps23(Eigen::numext::pow(m_eps, Scalar(2.0) / 3)) + { + if (nev < 1 || nev > m_n - 2) + throw std::invalid_argument("nev must satisfy 1 <= nev <= n - 2, n is the size of matrix"); + + if (ncv < nev + 2 || ncv > m_n) + throw std::invalid_argument("ncv must satisfy nev + 2 <= ncv <= n, n is the size of matrix"); + } + + /// + /// Virtual destructor + /// + virtual ~GenEigsBase() {} + + /// \endcond + + /// + /// Initializes the solver by providing an initial residual vector. + /// + /// \param init_resid Pointer to the initial residual vector. + /// + /// **Spectra** (and also **ARPACK**) uses an iterative algorithm + /// to find eigenvalues. This function allows the user to provide the initial + /// residual vector. + /// + void init(const Scalar* init_resid) + { + // Reset all matrices/vectors to zero + m_ritz_val.resize(m_ncv); + m_ritz_vec.resize(m_ncv, m_nev); + m_ritz_est.resize(m_ncv); + m_ritz_conv.resize(m_nev); + + m_ritz_val.setZero(); + m_ritz_vec.setZero(); + m_ritz_est.setZero(); + m_ritz_conv.setZero(); + + m_nmatop = 0; + m_niter = 0; + + // Initialize the Arnoldi factorization + MapConstVec v0(init_resid, m_n); + m_fac.init(v0, m_nmatop); + } + + /// + /// Initializes the solver by providing a random initial residual vector. + /// + /// This overloaded function generates a random initial residual vector + /// (with a fixed random seed) for the algorithm. Elements in the vector + /// follow independent Uniform(-0.5, 0.5) distribution. + /// + void init() + { + SimpleRandom rng(0); + Vector init_resid = rng.random_vec(m_n); + init(init_resid.data()); + } + + /// + /// Conducts the major computation procedure. + /// + /// \param maxit Maximum number of iterations allowed in the algorithm. + /// \param tol Precision parameter for the calculated eigenvalues. + /// \param sort_rule Rule to sort the eigenvalues and eigenvectors. + /// Supported values are + /// `Spectra::LARGEST_MAGN`, `Spectra::LARGEST_REAL`, + /// `Spectra::LARGEST_IMAG`, `Spectra::SMALLEST_MAGN`, + /// `Spectra::SMALLEST_REAL` and `Spectra::SMALLEST_IMAG`, + /// for example `LARGEST_MAGN` indicates that eigenvalues + /// with largest magnitude come first. + /// Note that this argument is only used to + /// **sort** the final result, and the **selection** rule + /// (e.g. selecting the largest or smallest eigenvalues in the + /// full spectrum) is specified by the template parameter + /// `SelectionRule` of GenEigsSolver. + /// + /// \return Number of converged eigenvalues. + /// + Index compute(Index maxit = 1000, Scalar tol = 1e-10, int sort_rule = LARGEST_MAGN) + { + // The m-step Arnoldi factorization + m_fac.factorize_from(1, m_ncv, m_nmatop); + retrieve_ritzpair(); + // Restarting + Index i, nconv = 0, nev_adj; + for (i = 0; i < maxit; i++) + { + nconv = num_converged(tol); + if (nconv >= m_nev) + break; + + nev_adj = nev_adjusted(nconv); + restart(nev_adj); + } + // Sorting results + sort_ritzpair(sort_rule); + + m_niter += i + 1; + m_info = (nconv >= m_nev) ? SUCCESSFUL : NOT_CONVERGING; + + return std::min(m_nev, nconv); + } + + /// + /// Returns the status of the computation. + /// The full list of enumeration values can be found in \ref Enumerations. + /// + int info() const { return m_info; } + + /// + /// Returns the number of iterations used in the computation. + /// + Index num_iterations() const { return m_niter; } + + /// + /// Returns the number of matrix operations used in the computation. + /// + Index num_operations() const { return m_nmatop; } + + /// + /// Returns the converged eigenvalues. + /// + /// \return A complex-valued vector containing the eigenvalues. + /// Returned vector type will be `Eigen::Vector, ...>`, depending on + /// the template parameter `Scalar` defined. + /// + ComplexVector eigenvalues() const + { + const Index nconv = m_ritz_conv.cast().sum(); + ComplexVector res(nconv); + + if (!nconv) + return res; + + Index j = 0; + for (Index i = 0; i < m_nev; i++) + { + if (m_ritz_conv[i]) + { + res[j] = m_ritz_val[i]; + j++; + } + } + + return res; + } + + /// + /// Returns the eigenvectors associated with the converged eigenvalues. + /// + /// \param nvec The number of eigenvectors to return. + /// + /// \return A complex-valued matrix containing the eigenvectors. + /// Returned matrix type will be `Eigen::Matrix, ...>`, + /// depending on the template parameter `Scalar` defined. + /// + ComplexMatrix eigenvectors(Index nvec) const + { + const Index nconv = m_ritz_conv.cast().sum(); + nvec = std::min(nvec, nconv); + ComplexMatrix res(m_n, nvec); + + if (!nvec) + return res; + + ComplexMatrix ritz_vec_conv(m_ncv, nvec); + Index j = 0; + for (Index i = 0; i < m_nev && j < nvec; i++) + { + if (m_ritz_conv[i]) + { + ritz_vec_conv.col(j).noalias() = m_ritz_vec.col(i); + j++; + } + } + + res.noalias() = m_fac.matrix_V() * ritz_vec_conv; + + return res; + } + + /// + /// Returns all converged eigenvectors. + /// + ComplexMatrix eigenvectors() const + { + return eigenvectors(m_nev); + } +}; + +} // namespace Spectra + +#endif // GEN_EIGS_BASE_H diff --git a/gtsam/3rdparty/Spectra/GenEigsComplexShiftSolver.h b/gtsam/3rdparty/Spectra/GenEigsComplexShiftSolver.h new file mode 100644 index 000000000..da7c1b422 --- /dev/null +++ b/gtsam/3rdparty/Spectra/GenEigsComplexShiftSolver.h @@ -0,0 +1,157 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef GEN_EIGS_COMPLEX_SHIFT_SOLVER_H +#define GEN_EIGS_COMPLEX_SHIFT_SOLVER_H + +#include + +#include "GenEigsBase.h" +#include "Util/SelectionRule.h" +#include "MatOp/DenseGenComplexShiftSolve.h" + +namespace Spectra { + +/// +/// \ingroup EigenSolver +/// +/// This class implements the eigen solver for general real matrices with +/// a complex shift value in the **shift-and-invert mode**. The background +/// knowledge of the shift-and-invert mode can be found in the documentation +/// of the SymEigsShiftSolver class. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// \tparam SelectionRule An enumeration value indicating the selection rule of +/// the shifted-and-inverted eigenvalues. +/// The full list of enumeration values can be found in +/// \ref Enumerations. +/// \tparam OpType The name of the matrix operation class. Users could either +/// use the DenseGenComplexShiftSolve wrapper class, or define their +/// own that implements all the public member functions as in +/// DenseGenComplexShiftSolve. +/// +template > +class GenEigsComplexShiftSolver : public GenEigsBase +{ +private: + typedef Eigen::Index Index; + typedef std::complex Complex; + typedef Eigen::Matrix Vector; + typedef Eigen::Matrix ComplexVector; + + const Scalar m_sigmar; + const Scalar m_sigmai; + + // First transform back the Ritz values, and then sort + void sort_ritzpair(int sort_rule) + { + using std::abs; + using std::sqrt; + using std::norm; + + // The eigenvalues we get from the iteration is + // nu = 0.5 * (1 / (lambda - sigma) + 1 / (lambda - conj(sigma))) + // So the eigenvalues of the original problem is + // 1 \pm sqrt(1 - 4 * nu^2 * sigmai^2) + // lambda = sigmar + ----------------------------------- + // 2 * nu + // We need to pick the correct root + // Let (lambdaj, vj) be the j-th eigen pair, then A * vj = lambdaj * vj + // and inv(A - r * I) * vj = 1 / (lambdaj - r) * vj + // where r is any shift value. + // We can use this identity to determine lambdaj + // + // op(v) computes Re(inv(A - r * I) * v) for any real v + // If r is real, then op(v) is also real. Let a = Re(vj), b = Im(vj), + // then op(vj) = op(a) + op(b) * i + // By comparing op(vj) and [1 / (lambdaj - r) * vj], we can determine + // which one is the correct root + + // Select a random shift value + SimpleRandom rng(0); + const Scalar shiftr = rng.random() * m_sigmar + rng.random(); + const Complex shift = Complex(shiftr, Scalar(0)); + this->m_op->set_shift(shiftr, Scalar(0)); + + // Calculate inv(A - r * I) * vj + Vector v_real(this->m_n), v_imag(this->m_n), OPv_real(this->m_n), OPv_imag(this->m_n); + const Scalar eps = Eigen::NumTraits::epsilon(); + for (Index i = 0; i < this->m_nev; i++) + { + v_real.noalias() = this->m_fac.matrix_V() * this->m_ritz_vec.col(i).real(); + v_imag.noalias() = this->m_fac.matrix_V() * this->m_ritz_vec.col(i).imag(); + this->m_op->perform_op(v_real.data(), OPv_real.data()); + this->m_op->perform_op(v_imag.data(), OPv_imag.data()); + + // Two roots computed from the quadratic equation + const Complex nu = this->m_ritz_val[i]; + const Complex root_part1 = m_sigmar + Scalar(0.5) / nu; + const Complex root_part2 = Scalar(0.5) * sqrt(Scalar(1) - Scalar(4) * m_sigmai * m_sigmai * (nu * nu)) / nu; + const Complex root1 = root_part1 + root_part2; + const Complex root2 = root_part1 - root_part2; + + // Test roots + Scalar err1 = Scalar(0), err2 = Scalar(0); + for (int k = 0; k < this->m_n; k++) + { + const Complex rhs1 = Complex(v_real[k], v_imag[k]) / (root1 - shift); + const Complex rhs2 = Complex(v_real[k], v_imag[k]) / (root2 - shift); + const Complex OPv = Complex(OPv_real[k], OPv_imag[k]); + err1 += norm(OPv - rhs1); + err2 += norm(OPv - rhs2); + } + + const Complex lambdaj = (err1 < err2) ? root1 : root2; + this->m_ritz_val[i] = lambdaj; + + if (abs(Eigen::numext::imag(lambdaj)) > eps) + { + this->m_ritz_val[i + 1] = Eigen::numext::conj(lambdaj); + i++; + } + else + { + this->m_ritz_val[i] = Complex(Eigen::numext::real(lambdaj), Scalar(0)); + } + } + + GenEigsBase::sort_ritzpair(sort_rule); + } + +public: + /// + /// Constructor to create a eigen solver object using the shift-and-invert mode. + /// + /// \param op Pointer to the matrix operation object. This class should implement + /// the complex shift-solve operation of \f$A\f$: calculating + /// \f$\mathrm{Re}\{(A-\sigma I)^{-1}v\}\f$ for any vector \f$v\f$. Users could either + /// create the object from the DenseGenComplexShiftSolve wrapper class, or + /// define their own that implements all the public member functions + /// as in DenseGenComplexShiftSolve. + /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$, + /// where \f$n\f$ is the size of matrix. + /// \param ncv Parameter that controls the convergence speed of the algorithm. + /// Typically a larger `ncv` means faster convergence, but it may + /// also result in greater memory use and more matrix operations + /// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$, + /// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$. + /// \param sigmar The real part of the shift. + /// \param sigmai The imaginary part of the shift. + /// + GenEigsComplexShiftSolver(OpType* op, Index nev, Index ncv, const Scalar& sigmar, const Scalar& sigmai) : + GenEigsBase(op, NULL, nev, ncv), + m_sigmar(sigmar), m_sigmai(sigmai) + { + this->m_op->set_shift(m_sigmar, m_sigmai); + } +}; + +} // namespace Spectra + +#endif // GEN_EIGS_COMPLEX_SHIFT_SOLVER_H diff --git a/gtsam/3rdparty/Spectra/GenEigsRealShiftSolver.h b/gtsam/3rdparty/Spectra/GenEigsRealShiftSolver.h new file mode 100644 index 000000000..6f28308f1 --- /dev/null +++ b/gtsam/3rdparty/Spectra/GenEigsRealShiftSolver.h @@ -0,0 +1,89 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef GEN_EIGS_REAL_SHIFT_SOLVER_H +#define GEN_EIGS_REAL_SHIFT_SOLVER_H + +#include + +#include "GenEigsBase.h" +#include "Util/SelectionRule.h" +#include "MatOp/DenseGenRealShiftSolve.h" + +namespace Spectra { + +/// +/// \ingroup EigenSolver +/// +/// This class implements the eigen solver for general real matrices with +/// a real shift value in the **shift-and-invert mode**. The background +/// knowledge of the shift-and-invert mode can be found in the documentation +/// of the SymEigsShiftSolver class. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// \tparam SelectionRule An enumeration value indicating the selection rule of +/// the shifted-and-inverted eigenvalues. +/// The full list of enumeration values can be found in +/// \ref Enumerations. +/// \tparam OpType The name of the matrix operation class. Users could either +/// use the wrapper classes such as DenseGenRealShiftSolve and +/// SparseGenRealShiftSolve, or define their +/// own that implements all the public member functions as in +/// DenseGenRealShiftSolve. +/// +template > +class GenEigsRealShiftSolver : public GenEigsBase +{ +private: + typedef Eigen::Index Index; + typedef std::complex Complex; + typedef Eigen::Array ComplexArray; + + const Scalar m_sigma; + + // First transform back the Ritz values, and then sort + void sort_ritzpair(int sort_rule) + { + // The eigenvalues we get from the iteration is nu = 1 / (lambda - sigma) + // So the eigenvalues of the original problem is lambda = 1 / nu + sigma + ComplexArray ritz_val_org = Scalar(1.0) / this->m_ritz_val.head(this->m_nev).array() + m_sigma; + this->m_ritz_val.head(this->m_nev) = ritz_val_org; + GenEigsBase::sort_ritzpair(sort_rule); + } + +public: + /// + /// Constructor to create a eigen solver object using the shift-and-invert mode. + /// + /// \param op Pointer to the matrix operation object. This class should implement + /// the shift-solve operation of \f$A\f$: calculating + /// \f$(A-\sigma I)^{-1}v\f$ for any vector \f$v\f$. Users could either + /// create the object from the wrapper class such as DenseGenRealShiftSolve, or + /// define their own that implements all the public member functions + /// as in DenseGenRealShiftSolve. + /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$, + /// where \f$n\f$ is the size of matrix. + /// \param ncv Parameter that controls the convergence speed of the algorithm. + /// Typically a larger `ncv` means faster convergence, but it may + /// also result in greater memory use and more matrix operations + /// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$, + /// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$. + /// \param sigma The real-valued shift. + /// + GenEigsRealShiftSolver(OpType* op, Index nev, Index ncv, Scalar sigma) : + GenEigsBase(op, NULL, nev, ncv), + m_sigma(sigma) + { + this->m_op->set_shift(m_sigma); + } +}; + +} // namespace Spectra + +#endif // GEN_EIGS_REAL_SHIFT_SOLVER_H diff --git a/gtsam/3rdparty/Spectra/GenEigsSolver.h b/gtsam/3rdparty/Spectra/GenEigsSolver.h new file mode 100644 index 000000000..3ead1dc4d --- /dev/null +++ b/gtsam/3rdparty/Spectra/GenEigsSolver.h @@ -0,0 +1,158 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef GEN_EIGS_SOLVER_H +#define GEN_EIGS_SOLVER_H + +#include + +#include "GenEigsBase.h" +#include "Util/SelectionRule.h" +#include "MatOp/DenseGenMatProd.h" + +namespace Spectra { + +/// +/// \ingroup EigenSolver +/// +/// This class implements the eigen solver for general real matrices, i.e., +/// to solve \f$Ax=\lambda x\f$ for a possibly non-symmetric \f$A\f$ matrix. +/// +/// Most of the background information documented in the SymEigsSolver class +/// also applies to the GenEigsSolver class here, except that the eigenvalues +/// and eigenvectors of a general matrix can now be complex-valued. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// \tparam SelectionRule An enumeration value indicating the selection rule of +/// the requested eigenvalues, for example `LARGEST_MAGN` +/// to retrieve eigenvalues with the largest magnitude. +/// The full list of enumeration values can be found in +/// \ref Enumerations. +/// \tparam OpType The name of the matrix operation class. Users could either +/// use the wrapper classes such as DenseGenMatProd and +/// SparseGenMatProd, or define their +/// own that implements all the public member functions as in +/// DenseGenMatProd. +/// +/// An example that illustrates the usage of GenEigsSolver is give below: +/// +/// \code{.cpp} +/// #include +/// #include +/// // is implicitly included +/// #include +/// +/// using namespace Spectra; +/// +/// int main() +/// { +/// // We are going to calculate the eigenvalues of M +/// Eigen::MatrixXd M = Eigen::MatrixXd::Random(10, 10); +/// +/// // Construct matrix operation object using the wrapper class +/// DenseGenMatProd op(M); +/// +/// // Construct eigen solver object, requesting the largest +/// // (in magnitude, or norm) three eigenvalues +/// GenEigsSolver< double, LARGEST_MAGN, DenseGenMatProd > eigs(&op, 3, 6); +/// +/// // Initialize and compute +/// eigs.init(); +/// int nconv = eigs.compute(); +/// +/// // Retrieve results +/// Eigen::VectorXcd evalues; +/// if(eigs.info() == SUCCESSFUL) +/// evalues = eigs.eigenvalues(); +/// +/// std::cout << "Eigenvalues found:\n" << evalues << std::endl; +/// +/// return 0; +/// } +/// \endcode +/// +/// And also an example for sparse matrices: +/// +/// \code{.cpp} +/// #include +/// #include +/// #include +/// #include +/// #include +/// +/// using namespace Spectra; +/// +/// int main() +/// { +/// // A band matrix with 1 on the main diagonal, 2 on the below-main subdiagonal, +/// // and 3 on the above-main subdiagonal +/// const int n = 10; +/// Eigen::SparseMatrix M(n, n); +/// M.reserve(Eigen::VectorXi::Constant(n, 3)); +/// for(int i = 0; i < n; i++) +/// { +/// M.insert(i, i) = 1.0; +/// if(i > 0) +/// M.insert(i - 1, i) = 3.0; +/// if(i < n - 1) +/// M.insert(i + 1, i) = 2.0; +/// } +/// +/// // Construct matrix operation object using the wrapper class SparseGenMatProd +/// SparseGenMatProd op(M); +/// +/// // Construct eigen solver object, requesting the largest three eigenvalues +/// GenEigsSolver< double, LARGEST_MAGN, SparseGenMatProd > eigs(&op, 3, 6); +/// +/// // Initialize and compute +/// eigs.init(); +/// int nconv = eigs.compute(); +/// +/// // Retrieve results +/// Eigen::VectorXcd evalues; +/// if(eigs.info() == SUCCESSFUL) +/// evalues = eigs.eigenvalues(); +/// +/// std::cout << "Eigenvalues found:\n" << evalues << std::endl; +/// +/// return 0; +/// } +/// \endcode +template > +class GenEigsSolver : public GenEigsBase +{ +private: + typedef Eigen::Index Index; + +public: + /// + /// Constructor to create a solver object. + /// + /// \param op Pointer to the matrix operation object, which should implement + /// the matrix-vector multiplication operation of \f$A\f$: + /// calculating \f$Av\f$ for any vector \f$v\f$. Users could either + /// create the object from the wrapper class such as DenseGenMatProd, or + /// define their own that implements all the public member functions + /// as in DenseGenMatProd. + /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$, + /// where \f$n\f$ is the size of matrix. + /// \param ncv Parameter that controls the convergence speed of the algorithm. + /// Typically a larger `ncv` means faster convergence, but it may + /// also result in greater memory use and more matrix operations + /// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$, + /// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$. + /// + GenEigsSolver(OpType* op, Index nev, Index ncv) : + GenEigsBase(op, NULL, nev, ncv) + {} +}; + +} // namespace Spectra + +#endif // GEN_EIGS_SOLVER_H diff --git a/gtsam/3rdparty/Spectra/LinAlg/Arnoldi.h b/gtsam/3rdparty/Spectra/LinAlg/Arnoldi.h new file mode 100644 index 000000000..730ba9556 --- /dev/null +++ b/gtsam/3rdparty/Spectra/LinAlg/Arnoldi.h @@ -0,0 +1,284 @@ +// Copyright (C) 2018-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef ARNOLDI_H +#define ARNOLDI_H + +#include +#include // std::sqrt +#include // std::invalid_argument +#include // std::stringstream + +#include "../MatOp/internal/ArnoldiOp.h" +#include "../Util/TypeTraits.h" +#include "../Util/SimpleRandom.h" +#include "UpperHessenbergQR.h" +#include "DoubleShiftQR.h" + +namespace Spectra { + +// Arnoldi factorization A * V = V * H + f * e' +// A: n x n +// V: n x k +// H: k x k +// f: n x 1 +// e: [0, ..., 0, 1] +// V and H are allocated of dimension m, so the maximum value of k is m +template +class Arnoldi +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapMat; + typedef Eigen::Map MapVec; + typedef Eigen::Map MapConstMat; + typedef Eigen::Map MapConstVec; + +protected: + // clang-format off + ArnoldiOpType m_op; // Operators for the Arnoldi factorization + + const Index m_n; // dimension of A + const Index m_m; // maximum dimension of subspace V + Index m_k; // current dimension of subspace V + + Matrix m_fac_V; // V matrix in the Arnoldi factorization + Matrix m_fac_H; // H matrix in the Arnoldi factorization + Vector m_fac_f; // residual in the Arnoldi factorization + Scalar m_beta; // ||f||, B-norm of f + + const Scalar m_near_0; // a very small value, but 1.0 / m_near_0 does not overflow + // ~= 1e-307 for the "double" type + const Scalar m_eps; // the machine precision, ~= 1e-16 for the "double" type + // clang-format on + + // Given orthonormal basis functions V, find a nonzero vector f such that V'Bf = 0 + // Assume that f has been properly allocated + void expand_basis(MapConstMat& V, const Index seed, Vector& f, Scalar& fnorm) + { + using std::sqrt; + + const Scalar thresh = m_eps * sqrt(Scalar(m_n)); + Vector Vf(V.cols()); + for (Index iter = 0; iter < 5; iter++) + { + // Randomly generate a new vector and orthogonalize it against V + SimpleRandom rng(seed + 123 * iter); + f.noalias() = rng.random_vec(m_n); + // f <- f - V * V'Bf, so that f is orthogonal to V in B-norm + m_op.trans_product(V, f, Vf); + f.noalias() -= V * Vf; + // fnorm <- ||f|| + fnorm = m_op.norm(f); + + // If fnorm is too close to zero, we try a new random vector, + // otherwise return the result + if (fnorm >= thresh) + return; + } + } + +public: + Arnoldi(const ArnoldiOpType& op, Index m) : + m_op(op), m_n(op.rows()), m_m(m), m_k(0), + m_near_0(TypeTraits::min() * Scalar(10)), + m_eps(Eigen::NumTraits::epsilon()) + {} + + virtual ~Arnoldi() {} + + // Const-reference to internal structures + const Matrix& matrix_V() const { return m_fac_V; } + const Matrix& matrix_H() const { return m_fac_H; } + const Vector& vector_f() const { return m_fac_f; } + Scalar f_norm() const { return m_beta; } + Index subspace_dim() const { return m_k; } + + // Initialize with an operator and an initial vector + void init(MapConstVec& v0, Index& op_counter) + { + m_fac_V.resize(m_n, m_m); + m_fac_H.resize(m_m, m_m); + m_fac_f.resize(m_n); + m_fac_H.setZero(); + + // Verify the initial vector + const Scalar v0norm = m_op.norm(v0); + if (v0norm < m_near_0) + throw std::invalid_argument("initial residual vector cannot be zero"); + + // Points to the first column of V + MapVec v(m_fac_V.data(), m_n); + + // Normalize + v.noalias() = v0 / v0norm; + + // Compute H and f + Vector w(m_n); + m_op.perform_op(v.data(), w.data()); + op_counter++; + + m_fac_H(0, 0) = m_op.inner_product(v, w); + m_fac_f.noalias() = w - v * m_fac_H(0, 0); + + // In some cases f is zero in exact arithmetics, but due to rounding errors + // it may contain tiny fluctuations. When this happens, we force f to be zero + if (m_fac_f.cwiseAbs().maxCoeff() < m_eps) + { + m_fac_f.setZero(); + m_beta = Scalar(0); + } + else + { + m_beta = m_op.norm(m_fac_f); + } + + // Indicate that this is a step-1 factorization + m_k = 1; + } + + // Arnoldi factorization starting from step-k + virtual void factorize_from(Index from_k, Index to_m, Index& op_counter) + { + using std::sqrt; + + if (to_m <= from_k) + return; + + if (from_k > m_k) + { + std::stringstream msg; + msg << "Arnoldi: from_k (= " << from_k << ") is larger than the current subspace dimension (= " << m_k << ")"; + throw std::invalid_argument(msg.str()); + } + + const Scalar beta_thresh = m_eps * sqrt(Scalar(m_n)); + + // Pre-allocate vectors + Vector Vf(to_m); + Vector w(m_n); + + // Keep the upperleft k x k submatrix of H and set other elements to 0 + m_fac_H.rightCols(m_m - from_k).setZero(); + m_fac_H.block(from_k, 0, m_m - from_k, from_k).setZero(); + + for (Index i = from_k; i <= to_m - 1; i++) + { + bool restart = false; + // If beta = 0, then the next V is not full rank + // We need to generate a new residual vector that is orthogonal + // to the current V, which we call a restart + if (m_beta < m_near_0) + { + MapConstMat V(m_fac_V.data(), m_n, i); // The first i columns + expand_basis(V, 2 * i, m_fac_f, m_beta); + restart = true; + } + + // v <- f / ||f|| + m_fac_V.col(i).noalias() = m_fac_f / m_beta; // The (i+1)-th column + + // Note that H[i+1, i] equals to the unrestarted beta + m_fac_H(i, i - 1) = restart ? Scalar(0) : m_beta; + + // w <- A * v, v = m_fac_V.col(i) + m_op.perform_op(&m_fac_V(0, i), w.data()); + op_counter++; + + const Index i1 = i + 1; + // First i+1 columns of V + MapConstMat Vs(m_fac_V.data(), m_n, i1); + // h = m_fac_H(0:i, i) + MapVec h(&m_fac_H(0, i), i1); + // h <- V'Bw + m_op.trans_product(Vs, w, h); + + // f <- w - V * h + m_fac_f.noalias() = w - Vs * h; + m_beta = m_op.norm(m_fac_f); + + if (m_beta > Scalar(0.717) * m_op.norm(h)) + continue; + + // f/||f|| is going to be the next column of V, so we need to test + // whether V'B(f/||f||) ~= 0 + m_op.trans_product(Vs, m_fac_f, Vf.head(i1)); + Scalar ortho_err = Vf.head(i1).cwiseAbs().maxCoeff(); + // If not, iteratively correct the residual + int count = 0; + while (count < 5 && ortho_err > m_eps * m_beta) + { + // There is an edge case: when beta=||f|| is close to zero, f mostly consists + // of noises of rounding errors, so the test [ortho_err < eps * beta] is very + // likely to fail. In particular, if beta=0, then the test is ensured to fail. + // Hence when this happens, we force f to be zero, and then restart in the + // next iteration. + if (m_beta < beta_thresh) + { + m_fac_f.setZero(); + m_beta = Scalar(0); + break; + } + + // f <- f - V * Vf + m_fac_f.noalias() -= Vs * Vf.head(i1); + // h <- h + Vf + h.noalias() += Vf.head(i1); + // beta <- ||f|| + m_beta = m_op.norm(m_fac_f); + + m_op.trans_product(Vs, m_fac_f, Vf.head(i1)); + ortho_err = Vf.head(i1).cwiseAbs().maxCoeff(); + count++; + } + } + + // Indicate that this is a step-m factorization + m_k = to_m; + } + + // Apply H -> Q'HQ, where Q is from a double shift QR decomposition + void compress_H(const DoubleShiftQR& decomp) + { + decomp.matrix_QtHQ(m_fac_H); + m_k -= 2; + } + + // Apply H -> Q'HQ, where Q is from an upper Hessenberg QR decomposition + void compress_H(const UpperHessenbergQR& decomp) + { + decomp.matrix_QtHQ(m_fac_H); + m_k--; + } + + // Apply V -> VQ and compute the new f. + // Should be called after compress_H(), since m_k is updated there. + // Only need to update the first k+1 columns of V + // The first (m - k + i) elements of the i-th column of Q are non-zero, + // and the rest are zero + void compress_V(const Matrix& Q) + { + Matrix Vs(m_n, m_k + 1); + for (Index i = 0; i < m_k; i++) + { + const Index nnz = m_m - m_k + i + 1; + MapConstVec q(&Q(0, i), nnz); + Vs.col(i).noalias() = m_fac_V.leftCols(nnz) * q; + } + Vs.col(m_k).noalias() = m_fac_V * Q.col(m_k); + m_fac_V.leftCols(m_k + 1).noalias() = Vs; + + Vector fk = m_fac_f * Q(m_m - 1, m_k - 1) + m_fac_V.col(m_k) * m_fac_H(m_k, m_k - 1); + m_fac_f.swap(fk); + m_beta = m_op.norm(m_fac_f); + } +}; + +} // namespace Spectra + +#endif // ARNOLDI_H diff --git a/gtsam/3rdparty/Spectra/LinAlg/BKLDLT.h b/gtsam/3rdparty/Spectra/LinAlg/BKLDLT.h new file mode 100644 index 000000000..2345e0fda --- /dev/null +++ b/gtsam/3rdparty/Spectra/LinAlg/BKLDLT.h @@ -0,0 +1,530 @@ +// Copyright (C) 2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef BK_LDLT_H +#define BK_LDLT_H + +#include +#include +#include + +#include "../Util/CompInfo.h" + +namespace Spectra { + +// Bunch-Kaufman LDLT decomposition +// References: +// 1. Bunch, J. R., & Kaufman, L. (1977). Some stable methods for calculating inertia and solving symmetric linear systems. +// Mathematics of computation, 31(137), 163-179. +// 2. Golub, G. H., & Van Loan, C. F. (2012). Matrix computations (Vol. 3). JHU press. Section 4.4. +// 3. Bunch-Parlett diagonal pivoting +// 4. Ashcraft, C., Grimes, R. G., & Lewis, J. G. (1998). Accurate symmetric indefinite linear equation solvers. +// SIAM Journal on Matrix Analysis and Applications, 20(2), 513-561. +template +class BKLDLT +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapVec; + typedef Eigen::Map MapConstVec; + + typedef Eigen::Matrix IntVector; + typedef Eigen::Ref GenericVector; + typedef Eigen::Ref GenericMatrix; + typedef const Eigen::Ref ConstGenericMatrix; + typedef const Eigen::Ref ConstGenericVector; + + Index m_n; + Vector m_data; // storage for a lower-triangular matrix + std::vector m_colptr; // pointers to columns + IntVector m_perm; // [-2, -1, 3, 1, 4, 5]: 0 <-> 2, 1 <-> 1, 2 <-> 3, 3 <-> 1, 4 <-> 4, 5 <-> 5 + std::vector > m_permc; // compressed version of m_perm: [(0, 2), (2, 3), (3, 1)] + + bool m_computed; + int m_info; + + // Access to elements + // Pointer to the k-th column + Scalar* col_pointer(Index k) { return m_colptr[k]; } + // A[i, j] -> m_colptr[j][i - j], i >= j + Scalar& coeff(Index i, Index j) { return m_colptr[j][i - j]; } + const Scalar& coeff(Index i, Index j) const { return m_colptr[j][i - j]; } + // A[i, i] -> m_colptr[i][0] + Scalar& diag_coeff(Index i) { return m_colptr[i][0]; } + const Scalar& diag_coeff(Index i) const { return m_colptr[i][0]; } + + // Compute column pointers + void compute_pointer() + { + m_colptr.clear(); + m_colptr.reserve(m_n); + Scalar* head = m_data.data(); + + for (Index i = 0; i < m_n; i++) + { + m_colptr.push_back(head); + head += (m_n - i); + } + } + + // Copy mat - shift * I to m_data + void copy_data(ConstGenericMatrix& mat, int uplo, const Scalar& shift) + { + if (uplo == Eigen::Lower) + { + for (Index j = 0; j < m_n; j++) + { + const Scalar* begin = &mat.coeffRef(j, j); + const Index len = m_n - j; + std::copy(begin, begin + len, col_pointer(j)); + diag_coeff(j) -= shift; + } + } + else + { + Scalar* dest = m_data.data(); + for (Index i = 0; i < m_n; i++) + { + for (Index j = i; j < m_n; j++, dest++) + { + *dest = mat.coeff(i, j); + } + diag_coeff(i) -= shift; + } + } + } + + // Compute compressed permutations + void compress_permutation() + { + for (Index i = 0; i < m_n; i++) + { + // Recover the permutation action + const Index perm = (m_perm[i] >= 0) ? (m_perm[i]) : (-m_perm[i] - 1); + if (perm != i) + m_permc.push_back(std::make_pair(i, perm)); + } + } + + // Working on the A[k:end, k:end] submatrix + // Exchange k <-> r + // Assume r >= k + void pivoting_1x1(Index k, Index r) + { + // No permutation + if (k == r) + { + m_perm[k] = r; + return; + } + + // A[k, k] <-> A[r, r] + std::swap(diag_coeff(k), diag_coeff(r)); + + // A[(r+1):end, k] <-> A[(r+1):end, r] + std::swap_ranges(&coeff(r + 1, k), col_pointer(k + 1), &coeff(r + 1, r)); + + // A[(k+1):(r-1), k] <-> A[r, (k+1):(r-1)] + Scalar* src = &coeff(k + 1, k); + for (Index j = k + 1; j < r; j++, src++) + { + std::swap(*src, coeff(r, j)); + } + + m_perm[k] = r; + } + + // Working on the A[k:end, k:end] submatrix + // Exchange [k+1, k] <-> [r, p] + // Assume p >= k, r >= k+1 + void pivoting_2x2(Index k, Index r, Index p) + { + pivoting_1x1(k, p); + pivoting_1x1(k + 1, r); + + // A[k+1, k] <-> A[r, k] + std::swap(coeff(k + 1, k), coeff(r, k)); + + // Use negative signs to indicate a 2x2 block + // Also minus one to distinguish a negative zero from a positive zero + m_perm[k] = -m_perm[k] - 1; + m_perm[k + 1] = -m_perm[k + 1] - 1; + } + + // A[r1, c1:c2] <-> A[r2, c1:c2] + // Assume r2 >= r1 > c2 >= c1 + void interchange_rows(Index r1, Index r2, Index c1, Index c2) + { + if (r1 == r2) + return; + + for (Index j = c1; j <= c2; j++) + { + std::swap(coeff(r1, j), coeff(r2, j)); + } + } + + // lambda = |A[r, k]| = max{|A[k+1, k]|, ..., |A[end, k]|} + // Largest (in magnitude) off-diagonal element in the first column of the current reduced matrix + // r is the row index + // Assume k < end + Scalar find_lambda(Index k, Index& r) + { + using std::abs; + + const Scalar* head = col_pointer(k); // => A[k, k] + const Scalar* end = col_pointer(k + 1); + // Start with r=k+1, lambda=A[k+1, k] + r = k + 1; + Scalar lambda = abs(head[1]); + // Scan remaining elements + for (const Scalar* ptr = head + 2; ptr < end; ptr++) + { + const Scalar abs_elem = abs(*ptr); + if (lambda < abs_elem) + { + lambda = abs_elem; + r = k + (ptr - head); + } + } + + return lambda; + } + + // sigma = |A[p, r]| = max {|A[k, r]|, ..., |A[end, r]|} \ {A[r, r]} + // Largest (in magnitude) off-diagonal element in the r-th column of the current reduced matrix + // p is the row index + // Assume k < r < end + Scalar find_sigma(Index k, Index r, Index& p) + { + using std::abs; + + // First search A[r+1, r], ..., A[end, r], which has the same task as find_lambda() + // If r == end, we skip this search + Scalar sigma = Scalar(-1); + if (r < m_n - 1) + sigma = find_lambda(r, p); + + // Then search A[k, r], ..., A[r-1, r], which maps to A[r, k], ..., A[r, r-1] + for (Index j = k; j < r; j++) + { + const Scalar abs_elem = abs(coeff(r, j)); + if (sigma < abs_elem) + { + sigma = abs_elem; + p = j; + } + } + + return sigma; + } + + // Generate permutations and apply to A + // Return true if the resulting pivoting is 1x1, and false if 2x2 + bool permutate_mat(Index k, const Scalar& alpha) + { + using std::abs; + + Index r = k, p = k; + const Scalar lambda = find_lambda(k, r); + + // If lambda=0, no need to interchange + if (lambda > Scalar(0)) + { + const Scalar abs_akk = abs(diag_coeff(k)); + // If |A[k, k]| >= alpha * lambda, no need to interchange + if (abs_akk < alpha * lambda) + { + const Scalar sigma = find_sigma(k, r, p); + + // If sigma * |A[k, k]| >= alpha * lambda^2, no need to interchange + if (sigma * abs_akk < alpha * lambda * lambda) + { + if (abs_akk >= alpha * sigma) + { + // Permutation on A + pivoting_1x1(k, r); + + // Permutation on L + interchange_rows(k, r, 0, k - 1); + return true; + } + else + { + // There are two versions of permutation here + // 1. A[k+1, k] <-> A[r, k] + // 2. A[k+1, k] <-> A[r, p], where p >= k and r >= k+1 + // + // Version 1 and 2 are used by Ref[1] and Ref[2], respectively + + // Version 1 implementation + p = k; + + // Version 2 implementation + // [r, p] and [p, r] are symmetric, but we need to make sure + // p >= k and r >= k+1, so it is safe to always make r > p + // One exception is when min{r,p} == k+1, in which case we make + // r = k+1, so that only one permutation needs to be performed + /* const Index rp_min = std::min(r, p); + const Index rp_max = std::max(r, p); + if(rp_min == k + 1) + { + r = rp_min; p = rp_max; + } else { + r = rp_max; p = rp_min; + } */ + + // Right now we use Version 1 since it reduces the overhead of interchange + + // Permutation on A + pivoting_2x2(k, r, p); + // Permutation on L + interchange_rows(k, p, 0, k - 1); + interchange_rows(k + 1, r, 0, k - 1); + return false; + } + } + } + } + + return true; + } + + // E = [e11, e12] + // [e21, e22] + // Overwrite E with inv(E) + void inverse_inplace_2x2(Scalar& e11, Scalar& e21, Scalar& e22) const + { + // inv(E) = [d11, d12], d11 = e22/delta, d21 = -e21/delta, d22 = e11/delta + // [d21, d22] + const Scalar delta = e11 * e22 - e21 * e21; + std::swap(e11, e22); + e11 /= delta; + e22 /= delta; + e21 = -e21 / delta; + } + + // Return value is the status, SUCCESSFUL/NUMERICAL_ISSUE + int gaussian_elimination_1x1(Index k) + { + // D = 1 / A[k, k] + const Scalar akk = diag_coeff(k); + // Return NUMERICAL_ISSUE if not invertible + if (akk == Scalar(0)) + return NUMERICAL_ISSUE; + + diag_coeff(k) = Scalar(1) / akk; + + // B -= l * l' / A[k, k], B := A[(k+1):end, (k+1):end], l := L[(k+1):end, k] + Scalar* lptr = col_pointer(k) + 1; + const Index ldim = m_n - k - 1; + MapVec l(lptr, ldim); + for (Index j = 0; j < ldim; j++) + { + MapVec(col_pointer(j + k + 1), ldim - j).noalias() -= (lptr[j] / akk) * l.tail(ldim - j); + } + + // l /= A[k, k] + l /= akk; + + return SUCCESSFUL; + } + + // Return value is the status, SUCCESSFUL/NUMERICAL_ISSUE + int gaussian_elimination_2x2(Index k) + { + // D = inv(E) + Scalar& e11 = diag_coeff(k); + Scalar& e21 = coeff(k + 1, k); + Scalar& e22 = diag_coeff(k + 1); + // Return NUMERICAL_ISSUE if not invertible + if (e11 * e22 - e21 * e21 == Scalar(0)) + return NUMERICAL_ISSUE; + + inverse_inplace_2x2(e11, e21, e22); + + // X = l * inv(E), l := L[(k+2):end, k:(k+1)] + Scalar* l1ptr = &coeff(k + 2, k); + Scalar* l2ptr = &coeff(k + 2, k + 1); + const Index ldim = m_n - k - 2; + MapVec l1(l1ptr, ldim), l2(l2ptr, ldim); + + Eigen::Matrix X(ldim, 2); + X.col(0).noalias() = l1 * e11 + l2 * e21; + X.col(1).noalias() = l1 * e21 + l2 * e22; + + // B -= l * inv(E) * l' = X * l', B = A[(k+2):end, (k+2):end] + for (Index j = 0; j < ldim; j++) + { + MapVec(col_pointer(j + k + 2), ldim - j).noalias() -= (X.col(0).tail(ldim - j) * l1ptr[j] + X.col(1).tail(ldim - j) * l2ptr[j]); + } + + // l = X + l1.noalias() = X.col(0); + l2.noalias() = X.col(1); + + return SUCCESSFUL; + } + +public: + BKLDLT() : + m_n(0), m_computed(false), m_info(NOT_COMPUTED) + {} + + // Factorize mat - shift * I + BKLDLT(ConstGenericMatrix& mat, int uplo = Eigen::Lower, const Scalar& shift = Scalar(0)) : + m_n(mat.rows()), m_computed(false), m_info(NOT_COMPUTED) + { + compute(mat, uplo, shift); + } + + void compute(ConstGenericMatrix& mat, int uplo = Eigen::Lower, const Scalar& shift = Scalar(0)) + { + using std::abs; + + m_n = mat.rows(); + if (m_n != mat.cols()) + throw std::invalid_argument("BKLDLT: matrix must be square"); + + m_perm.setLinSpaced(m_n, 0, m_n - 1); + m_permc.clear(); + + // Copy data + m_data.resize((m_n * (m_n + 1)) / 2); + compute_pointer(); + copy_data(mat, uplo, shift); + + const Scalar alpha = (1.0 + std::sqrt(17.0)) / 8.0; + Index k = 0; + for (k = 0; k < m_n - 1; k++) + { + // 1. Interchange rows and columns of A, and save the result to m_perm + bool is_1x1 = permutate_mat(k, alpha); + + // 2. Gaussian elimination + if (is_1x1) + { + m_info = gaussian_elimination_1x1(k); + } + else + { + m_info = gaussian_elimination_2x2(k); + k++; + } + + // 3. Check status + if (m_info != SUCCESSFUL) + break; + } + // Invert the last 1x1 block if it exists + if (k == m_n - 1) + { + const Scalar akk = diag_coeff(k); + if (akk == Scalar(0)) + m_info = NUMERICAL_ISSUE; + + diag_coeff(k) = Scalar(1) / diag_coeff(k); + } + + compress_permutation(); + + m_computed = true; + } + + // Solve Ax=b + void solve_inplace(GenericVector b) const + { + if (!m_computed) + throw std::logic_error("BKLDLT: need to call compute() first"); + + // PAP' = LDL' + // 1. b -> Pb + Scalar* x = b.data(); + MapVec res(x, m_n); + Index npermc = m_permc.size(); + for (Index i = 0; i < npermc; i++) + { + std::swap(x[m_permc[i].first], x[m_permc[i].second]); + } + + // 2. Lz = Pb + // If m_perm[end] < 0, then end with m_n - 3, otherwise end with m_n - 2 + const Index end = (m_perm[m_n - 1] < 0) ? (m_n - 3) : (m_n - 2); + for (Index i = 0; i <= end; i++) + { + const Index b1size = m_n - i - 1; + const Index b2size = b1size - 1; + if (m_perm[i] >= 0) + { + MapConstVec l(&coeff(i + 1, i), b1size); + res.segment(i + 1, b1size).noalias() -= l * x[i]; + } + else + { + MapConstVec l1(&coeff(i + 2, i), b2size); + MapConstVec l2(&coeff(i + 2, i + 1), b2size); + res.segment(i + 2, b2size).noalias() -= (l1 * x[i] + l2 * x[i + 1]); + i++; + } + } + + // 3. Dw = z + for (Index i = 0; i < m_n; i++) + { + const Scalar e11 = diag_coeff(i); + if (m_perm[i] >= 0) + { + x[i] *= e11; + } + else + { + const Scalar e21 = coeff(i + 1, i), e22 = diag_coeff(i + 1); + const Scalar wi = x[i] * e11 + x[i + 1] * e21; + x[i + 1] = x[i] * e21 + x[i + 1] * e22; + x[i] = wi; + i++; + } + } + + // 4. L'y = w + // If m_perm[end] < 0, then start with m_n - 3, otherwise start with m_n - 2 + Index i = (m_perm[m_n - 1] < 0) ? (m_n - 3) : (m_n - 2); + for (; i >= 0; i--) + { + const Index ldim = m_n - i - 1; + MapConstVec l(&coeff(i + 1, i), ldim); + x[i] -= res.segment(i + 1, ldim).dot(l); + + if (m_perm[i] < 0) + { + MapConstVec l2(&coeff(i + 1, i - 1), ldim); + x[i - 1] -= res.segment(i + 1, ldim).dot(l2); + i--; + } + } + + // 5. x = P'y + for (Index i = npermc - 1; i >= 0; i--) + { + std::swap(x[m_permc[i].first], x[m_permc[i].second]); + } + } + + Vector solve(ConstGenericVector& b) const + { + Vector res = b; + solve_inplace(res); + return res; + } + + int info() const { return m_info; } +}; + +} // namespace Spectra + +#endif // BK_LDLT_H diff --git a/gtsam/3rdparty/Spectra/LinAlg/DoubleShiftQR.h b/gtsam/3rdparty/Spectra/LinAlg/DoubleShiftQR.h new file mode 100644 index 000000000..2845688b4 --- /dev/null +++ b/gtsam/3rdparty/Spectra/LinAlg/DoubleShiftQR.h @@ -0,0 +1,384 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef DOUBLE_SHIFT_QR_H +#define DOUBLE_SHIFT_QR_H + +#include +#include // std::vector +#include // std::min, std::fill, std::copy +#include // std::abs, std::sqrt, std::pow +#include // std::invalid_argument, std::logic_error + +#include "../Util/TypeTraits.h" + +namespace Spectra { + +template +class DoubleShiftQR +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Matrix3X; + typedef Eigen::Matrix Vector; + typedef Eigen::Array IntArray; + + typedef Eigen::Ref GenericMatrix; + typedef const Eigen::Ref ConstGenericMatrix; + + Index m_n; // Dimension of the matrix + Matrix m_mat_H; // A copy of the matrix to be factorized + Scalar m_shift_s; // Shift constant + Scalar m_shift_t; // Shift constant + Matrix3X m_ref_u; // Householder reflectors + IntArray m_ref_nr; // How many rows does each reflector affects + // 3 - A general reflector + // 2 - A Givens rotation + // 1 - An identity transformation + const Scalar m_near_0; // a very small value, but 1.0 / m_safe_min does not overflow + // ~= 1e-307 for the "double" type + const Scalar m_eps; // the machine precision, + // e.g. ~= 1e-16 for the "double" type + const Scalar m_eps_rel; + const Scalar m_eps_abs; + bool m_computed; // Whether matrix has been factorized + + void compute_reflector(const Scalar& x1, const Scalar& x2, const Scalar& x3, Index ind) + { + using std::abs; + + Scalar* u = &m_ref_u.coeffRef(0, ind); + unsigned char* nr = m_ref_nr.data(); + // In general case the reflector affects 3 rows + nr[ind] = 3; + Scalar x2x3 = Scalar(0); + // If x3 is zero, decrease nr by 1 + if (abs(x3) < m_near_0) + { + // If x2 is also zero, nr will be 1, and we can exit this function + if (abs(x2) < m_near_0) + { + nr[ind] = 1; + return; + } + else + { + nr[ind] = 2; + } + x2x3 = abs(x2); + } + else + { + x2x3 = Eigen::numext::hypot(x2, x3); + } + + // x1' = x1 - rho * ||x|| + // rho = -sign(x1), if x1 == 0, we choose rho = 1 + Scalar x1_new = x1 - ((x1 <= 0) - (x1 > 0)) * Eigen::numext::hypot(x1, x2x3); + Scalar x_norm = Eigen::numext::hypot(x1_new, x2x3); + // Double check the norm of new x + if (x_norm < m_near_0) + { + nr[ind] = 1; + return; + } + u[0] = x1_new / x_norm; + u[1] = x2 / x_norm; + u[2] = x3 / x_norm; + } + + void compute_reflector(const Scalar* x, Index ind) + { + compute_reflector(x[0], x[1], x[2], ind); + } + + // Update the block X = H(il:iu, il:iu) + void update_block(Index il, Index iu) + { + // Block size + const Index bsize = iu - il + 1; + + // If block size == 1, there is no need to apply reflectors + if (bsize == 1) + { + m_ref_nr.coeffRef(il) = 1; + return; + } + + const Scalar x00 = m_mat_H.coeff(il, il), + x01 = m_mat_H.coeff(il, il + 1), + x10 = m_mat_H.coeff(il + 1, il), + x11 = m_mat_H.coeff(il + 1, il + 1); + // m00 = x00 * (x00 - s) + x01 * x10 + t + const Scalar m00 = x00 * (x00 - m_shift_s) + x01 * x10 + m_shift_t; + // m10 = x10 * (x00 + x11 - s) + const Scalar m10 = x10 * (x00 + x11 - m_shift_s); + + // For block size == 2, do a Givens rotation on M = X * X - s * X + t * I + if (bsize == 2) + { + // This causes nr=2 + compute_reflector(m00, m10, 0, il); + // Apply the reflector to X + apply_PX(m_mat_H.block(il, il, 2, m_n - il), m_n, il); + apply_XP(m_mat_H.block(0, il, il + 2, 2), m_n, il); + + m_ref_nr.coeffRef(il + 1) = 1; + return; + } + + // For block size >=3, use the regular strategy + // m20 = x21 * x10 + const Scalar m20 = m_mat_H.coeff(il + 2, il + 1) * m_mat_H.coeff(il + 1, il); + compute_reflector(m00, m10, m20, il); + + // Apply the first reflector + apply_PX(m_mat_H.block(il, il, 3, m_n - il), m_n, il); + apply_XP(m_mat_H.block(0, il, il + std::min(bsize, Index(4)), 3), m_n, il); + + // Calculate the following reflectors + // If entering this loop, block size is at least 4. + for (Index i = 1; i < bsize - 2; i++) + { + compute_reflector(&m_mat_H.coeffRef(il + i, il + i - 1), il + i); + // Apply the reflector to X + apply_PX(m_mat_H.block(il + i, il + i - 1, 3, m_n - il - i + 1), m_n, il + i); + apply_XP(m_mat_H.block(0, il + i, il + std::min(bsize, Index(i + 4)), 3), m_n, il + i); + } + + // The last reflector + // This causes nr=2 + compute_reflector(m_mat_H.coeff(iu - 1, iu - 2), m_mat_H.coeff(iu, iu - 2), 0, iu - 1); + // Apply the reflector to X + apply_PX(m_mat_H.block(iu - 1, iu - 2, 2, m_n - iu + 2), m_n, iu - 1); + apply_XP(m_mat_H.block(0, iu - 1, il + bsize, 2), m_n, iu - 1); + + m_ref_nr.coeffRef(iu) = 1; + } + + // P = I - 2 * u * u' = P' + // PX = X - 2 * u * (u'X) + void apply_PX(GenericMatrix X, Index stride, Index u_ind) const + { + const Index nr = m_ref_nr.coeff(u_ind); + if (nr == 1) + return; + + const Scalar u0 = m_ref_u.coeff(0, u_ind), + u1 = m_ref_u.coeff(1, u_ind); + const Scalar u0_2 = Scalar(2) * u0, + u1_2 = Scalar(2) * u1; + + const Index nrow = X.rows(); + const Index ncol = X.cols(); + + Scalar* xptr = X.data(); + if (nr == 2 || nrow == 2) + { + for (Index i = 0; i < ncol; i++, xptr += stride) + { + const Scalar tmp = u0_2 * xptr[0] + u1_2 * xptr[1]; + xptr[0] -= tmp * u0; + xptr[1] -= tmp * u1; + } + } + else + { + const Scalar u2 = m_ref_u.coeff(2, u_ind); + const Scalar u2_2 = Scalar(2) * u2; + for (Index i = 0; i < ncol; i++, xptr += stride) + { + const Scalar tmp = u0_2 * xptr[0] + u1_2 * xptr[1] + u2_2 * xptr[2]; + xptr[0] -= tmp * u0; + xptr[1] -= tmp * u1; + xptr[2] -= tmp * u2; + } + } + } + + // x is a pointer to a vector + // Px = x - 2 * dot(x, u) * u + void apply_PX(Scalar* x, Index u_ind) const + { + const Index nr = m_ref_nr.coeff(u_ind); + if (nr == 1) + return; + + const Scalar u0 = m_ref_u.coeff(0, u_ind), + u1 = m_ref_u.coeff(1, u_ind), + u2 = m_ref_u.coeff(2, u_ind); + + // When the reflector only contains two elements, u2 has been set to zero + const bool nr_is_2 = (nr == 2); + const Scalar dot2 = Scalar(2) * (x[0] * u0 + x[1] * u1 + (nr_is_2 ? 0 : (x[2] * u2))); + x[0] -= dot2 * u0; + x[1] -= dot2 * u1; + if (!nr_is_2) + x[2] -= dot2 * u2; + } + + // XP = X - 2 * (X * u) * u' + void apply_XP(GenericMatrix X, Index stride, Index u_ind) const + { + const Index nr = m_ref_nr.coeff(u_ind); + if (nr == 1) + return; + + const Scalar u0 = m_ref_u.coeff(0, u_ind), + u1 = m_ref_u.coeff(1, u_ind); + const Scalar u0_2 = Scalar(2) * u0, + u1_2 = Scalar(2) * u1; + + const int nrow = X.rows(); + const int ncol = X.cols(); + Scalar *X0 = X.data(), *X1 = X0 + stride; // X0 => X.col(0), X1 => X.col(1) + + if (nr == 2 || ncol == 2) + { + // tmp = 2 * u0 * X0 + 2 * u1 * X1 + // X0 => X0 - u0 * tmp + // X1 => X1 - u1 * tmp + for (Index i = 0; i < nrow; i++) + { + const Scalar tmp = u0_2 * X0[i] + u1_2 * X1[i]; + X0[i] -= tmp * u0; + X1[i] -= tmp * u1; + } + } + else + { + Scalar* X2 = X1 + stride; // X2 => X.col(2) + const Scalar u2 = m_ref_u.coeff(2, u_ind); + const Scalar u2_2 = Scalar(2) * u2; + for (Index i = 0; i < nrow; i++) + { + const Scalar tmp = u0_2 * X0[i] + u1_2 * X1[i] + u2_2 * X2[i]; + X0[i] -= tmp * u0; + X1[i] -= tmp * u1; + X2[i] -= tmp * u2; + } + } + } + +public: + DoubleShiftQR(Index size) : + m_n(size), + m_near_0(TypeTraits::min() * Scalar(10)), + m_eps(Eigen::NumTraits::epsilon()), + m_eps_rel(m_eps), + m_eps_abs(m_near_0 * (m_n / m_eps)), + m_computed(false) + {} + + DoubleShiftQR(ConstGenericMatrix& mat, const Scalar& s, const Scalar& t) : + m_n(mat.rows()), + m_mat_H(m_n, m_n), + m_shift_s(s), + m_shift_t(t), + m_ref_u(3, m_n), + m_ref_nr(m_n), + m_near_0(TypeTraits::min() * Scalar(10)), + m_eps(Eigen::NumTraits::epsilon()), + m_eps_rel(m_eps), + m_eps_abs(m_near_0 * (m_n / m_eps)), + m_computed(false) + { + compute(mat, s, t); + } + + void compute(ConstGenericMatrix& mat, const Scalar& s, const Scalar& t) + { + using std::abs; + + m_n = mat.rows(); + if (m_n != mat.cols()) + throw std::invalid_argument("DoubleShiftQR: matrix must be square"); + + m_mat_H.resize(m_n, m_n); + m_shift_s = s; + m_shift_t = t; + m_ref_u.resize(3, m_n); + m_ref_nr.resize(m_n); + + // Make a copy of mat + std::copy(mat.data(), mat.data() + mat.size(), m_mat_H.data()); + + // Obtain the indices of zero elements in the subdiagonal, + // so that H can be divided into several blocks + std::vector zero_ind; + zero_ind.reserve(m_n - 1); + zero_ind.push_back(0); + Scalar* Hii = m_mat_H.data(); + for (Index i = 0; i < m_n - 2; i++, Hii += (m_n + 1)) + { + // Hii[1] => m_mat_H(i + 1, i) + const Scalar h = abs(Hii[1]); + if (h <= 0 || h <= m_eps_rel * (abs(Hii[0]) + abs(Hii[m_n + 1]))) + { + Hii[1] = 0; + zero_ind.push_back(i + 1); + } + // Make sure m_mat_H is upper Hessenberg + // Zero the elements below m_mat_H(i + 1, i) + std::fill(Hii + 2, Hii + m_n - i, Scalar(0)); + } + zero_ind.push_back(m_n); + + for (std::vector::size_type i = 0; i < zero_ind.size() - 1; i++) + { + const Index start = zero_ind[i]; + const Index end = zero_ind[i + 1] - 1; + // Compute refelctors and update each block + update_block(start, end); + } + + m_computed = true; + } + + void matrix_QtHQ(Matrix& dest) const + { + if (!m_computed) + throw std::logic_error("DoubleShiftQR: need to call compute() first"); + + dest.noalias() = m_mat_H; + } + + // Q = P0 * P1 * ... + // Q'y = P_{n-2} * ... * P1 * P0 * y + void apply_QtY(Vector& y) const + { + if (!m_computed) + throw std::logic_error("DoubleShiftQR: need to call compute() first"); + + Scalar* y_ptr = y.data(); + const Index n1 = m_n - 1; + for (Index i = 0; i < n1; i++, y_ptr++) + { + apply_PX(y_ptr, i); + } + } + + // Q = P0 * P1 * ... + // YQ = Y * P0 * P1 * ... + void apply_YQ(GenericMatrix Y) const + { + if (!m_computed) + throw std::logic_error("DoubleShiftQR: need to call compute() first"); + + const Index nrow = Y.rows(); + const Index n2 = m_n - 2; + for (Index i = 0; i < n2; i++) + { + apply_XP(Y.block(0, i, nrow, 3), nrow, i); + } + apply_XP(Y.block(0, n2, nrow, 2), nrow, n2); + } +}; + +} // namespace Spectra + +#endif // DOUBLE_SHIFT_QR_H diff --git a/gtsam/3rdparty/Spectra/LinAlg/Lanczos.h b/gtsam/3rdparty/Spectra/LinAlg/Lanczos.h new file mode 100644 index 000000000..53cf52be8 --- /dev/null +++ b/gtsam/3rdparty/Spectra/LinAlg/Lanczos.h @@ -0,0 +1,167 @@ +// Copyright (C) 2018-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef LANCZOS_H +#define LANCZOS_H + +#include +#include // std::sqrt +#include // std::invalid_argument +#include // std::stringstream + +#include "Arnoldi.h" + +namespace Spectra { + +// Lanczos factorization A * V = V * H + f * e' +// A: n x n +// V: n x k +// H: k x k +// f: n x 1 +// e: [0, ..., 0, 1] +// V and H are allocated of dimension m, so the maximum value of k is m +template +class Lanczos : public Arnoldi +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapMat; + typedef Eigen::Map MapVec; + typedef Eigen::Map MapConstMat; + typedef Eigen::Map MapConstVec; + + using Arnoldi::m_op; + using Arnoldi::m_n; + using Arnoldi::m_m; + using Arnoldi::m_k; + using Arnoldi::m_fac_V; + using Arnoldi::m_fac_H; + using Arnoldi::m_fac_f; + using Arnoldi::m_beta; + using Arnoldi::m_near_0; + using Arnoldi::m_eps; + +public: + Lanczos(const ArnoldiOpType& op, Index m) : + Arnoldi(op, m) + {} + + // Lanczos factorization starting from step-k + void factorize_from(Index from_k, Index to_m, Index& op_counter) + { + using std::sqrt; + + if (to_m <= from_k) + return; + + if (from_k > m_k) + { + std::stringstream msg; + msg << "Lanczos: from_k (= " << from_k << ") is larger than the current subspace dimension (= " << m_k << ")"; + throw std::invalid_argument(msg.str()); + } + + const Scalar beta_thresh = m_eps * sqrt(Scalar(m_n)); + + // Pre-allocate vectors + Vector Vf(to_m); + Vector w(m_n); + + // Keep the upperleft k x k submatrix of H and set other elements to 0 + m_fac_H.rightCols(m_m - from_k).setZero(); + m_fac_H.block(from_k, 0, m_m - from_k, from_k).setZero(); + + for (Index i = from_k; i <= to_m - 1; i++) + { + bool restart = false; + // If beta = 0, then the next V is not full rank + // We need to generate a new residual vector that is orthogonal + // to the current V, which we call a restart + if (m_beta < m_near_0) + { + MapConstMat V(m_fac_V.data(), m_n, i); // The first i columns + this->expand_basis(V, 2 * i, m_fac_f, m_beta); + restart = true; + } + + // v <- f / ||f|| + MapVec v(&m_fac_V(0, i), m_n); // The (i+1)-th column + v.noalias() = m_fac_f / m_beta; + + // Note that H[i+1, i] equals to the unrestarted beta + m_fac_H(i, i - 1) = restart ? Scalar(0) : m_beta; + + // w <- A * v + m_op.perform_op(v.data(), w.data()); + op_counter++; + + // H[i+1, i+1] = = v'Bw + m_fac_H(i - 1, i) = m_fac_H(i, i - 1); // Due to symmetry + m_fac_H(i, i) = m_op.inner_product(v, w); + + // f <- w - V * V'Bw = w - H[i+1, i] * V{i} - H[i+1, i+1] * V{i+1} + // If restarting, we know that H[i+1, i] = 0 + if (restart) + m_fac_f.noalias() = w - m_fac_H(i, i) * v; + else + m_fac_f.noalias() = w - m_fac_H(i, i - 1) * m_fac_V.col(i - 1) - m_fac_H(i, i) * v; + + m_beta = m_op.norm(m_fac_f); + + // f/||f|| is going to be the next column of V, so we need to test + // whether V'B(f/||f||) ~= 0 + const Index i1 = i + 1; + MapMat Vs(m_fac_V.data(), m_n, i1); // The first (i+1) columns + m_op.trans_product(Vs, m_fac_f, Vf.head(i1)); + Scalar ortho_err = Vf.head(i1).cwiseAbs().maxCoeff(); + // If not, iteratively correct the residual + int count = 0; + while (count < 5 && ortho_err > m_eps * m_beta) + { + // There is an edge case: when beta=||f|| is close to zero, f mostly consists + // of noises of rounding errors, so the test [ortho_err < eps * beta] is very + // likely to fail. In particular, if beta=0, then the test is ensured to fail. + // Hence when this happens, we force f to be zero, and then restart in the + // next iteration. + if (m_beta < beta_thresh) + { + m_fac_f.setZero(); + m_beta = Scalar(0); + break; + } + + // f <- f - V * Vf + m_fac_f.noalias() -= Vs * Vf.head(i1); + // h <- h + Vf + m_fac_H(i - 1, i) += Vf[i - 1]; + m_fac_H(i, i - 1) = m_fac_H(i - 1, i); + m_fac_H(i, i) += Vf[i]; + // beta <- ||f|| + m_beta = m_op.norm(m_fac_f); + + m_op.trans_product(Vs, m_fac_f, Vf.head(i1)); + ortho_err = Vf.head(i1).cwiseAbs().maxCoeff(); + count++; + } + } + + // Indicate that this is a step-m factorization + m_k = to_m; + } + + // Apply H -> Q'HQ, where Q is from a tridiagonal QR decomposition + void compress_H(const TridiagQR& decomp) + { + decomp.matrix_QtHQ(m_fac_H); + m_k--; + } +}; + +} // namespace Spectra + +#endif // LANCZOS_H diff --git a/gtsam/3rdparty/Spectra/LinAlg/TridiagEigen.h b/gtsam/3rdparty/Spectra/LinAlg/TridiagEigen.h new file mode 100644 index 000000000..122e0e551 --- /dev/null +++ b/gtsam/3rdparty/Spectra/LinAlg/TridiagEigen.h @@ -0,0 +1,219 @@ +// The code was adapted from Eigen/src/Eigenvaleus/SelfAdjointEigenSolver.h +// +// Copyright (C) 2008-2010 Gael Guennebaud +// Copyright (C) 2010 Jitse Niesen +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef TRIDIAG_EIGEN_H +#define TRIDIAG_EIGEN_H + +#include +#include +#include + +#include "../Util/TypeTraits.h" + +namespace Spectra { + +template +class TridiagEigen +{ +private: + typedef Eigen::Index Index; + // For convenience in adapting the tridiagonal_qr_step() function + typedef Scalar RealScalar; + + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + + typedef Eigen::Ref GenericMatrix; + typedef const Eigen::Ref ConstGenericMatrix; + + Index m_n; + Vector m_main_diag; // Main diagonal elements of the matrix + Vector m_sub_diag; // Sub-diagonal elements of the matrix + Matrix m_evecs; // To store eigenvectors + + bool m_computed; + const Scalar m_near_0; // a very small value, ~= 1e-307 for the "double" type + + // Adapted from Eigen/src/Eigenvaleus/SelfAdjointEigenSolver.h + static void tridiagonal_qr_step(RealScalar* diag, + RealScalar* subdiag, Index start, + Index end, Scalar* matrixQ, + Index n) + { + using std::abs; + + RealScalar td = (diag[end - 1] - diag[end]) * RealScalar(0.5); + RealScalar e = subdiag[end - 1]; + // Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still + // underflow thus leading to inf/NaN values when using the following commented code: + // RealScalar e2 = numext::abs2(subdiag[end-1]); + // RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2)); + // This explain the following, somewhat more complicated, version: + RealScalar mu = diag[end]; + if (td == Scalar(0)) + mu -= abs(e); + else + { + RealScalar e2 = Eigen::numext::abs2(subdiag[end - 1]); + RealScalar h = Eigen::numext::hypot(td, e); + if (e2 == RealScalar(0)) + mu -= (e / (td + (td > RealScalar(0) ? RealScalar(1) : RealScalar(-1)))) * (e / h); + else + mu -= e2 / (td + (td > RealScalar(0) ? h : -h)); + } + + RealScalar x = diag[start] - mu; + RealScalar z = subdiag[start]; + Eigen::Map q(matrixQ, n, n); + for (Index k = start; k < end; ++k) + { + Eigen::JacobiRotation rot; + rot.makeGivens(x, z); + + const RealScalar s = rot.s(); + const RealScalar c = rot.c(); + + // do T = G' T G + RealScalar sdk = s * diag[k] + c * subdiag[k]; + RealScalar dkp1 = s * subdiag[k] + c * diag[k + 1]; + + diag[k] = c * (c * diag[k] - s * subdiag[k]) - s * (c * subdiag[k] - s * diag[k + 1]); + diag[k + 1] = s * sdk + c * dkp1; + subdiag[k] = c * sdk - s * dkp1; + + if (k > start) + subdiag[k - 1] = c * subdiag[k - 1] - s * z; + + x = subdiag[k]; + + if (k < end - 1) + { + z = -s * subdiag[k + 1]; + subdiag[k + 1] = c * subdiag[k + 1]; + } + + // apply the givens rotation to the unit matrix Q = Q * G + if (matrixQ) + q.applyOnTheRight(k, k + 1, rot); + } + } + +public: + TridiagEigen() : + m_n(0), m_computed(false), + m_near_0(TypeTraits::min() * Scalar(10)) + {} + + TridiagEigen(ConstGenericMatrix& mat) : + m_n(mat.rows()), m_computed(false), + m_near_0(TypeTraits::min() * Scalar(10)) + { + compute(mat); + } + + void compute(ConstGenericMatrix& mat) + { + using std::abs; + + m_n = mat.rows(); + if (m_n != mat.cols()) + throw std::invalid_argument("TridiagEigen: matrix must be square"); + + m_main_diag.resize(m_n); + m_sub_diag.resize(m_n - 1); + m_evecs.resize(m_n, m_n); + m_evecs.setIdentity(); + + // Scale matrix to improve stability + const Scalar scale = std::max(mat.diagonal().cwiseAbs().maxCoeff(), + mat.diagonal(-1).cwiseAbs().maxCoeff()); + // If scale=0, mat is a zero matrix, so we can early stop + if (scale < m_near_0) + { + // m_main_diag contains eigenvalues + m_main_diag.setZero(); + // m_evecs has been set identity + // m_evecs.setIdentity(); + m_computed = true; + return; + } + m_main_diag.noalias() = mat.diagonal() / scale; + m_sub_diag.noalias() = mat.diagonal(-1) / scale; + + Scalar* diag = m_main_diag.data(); + Scalar* subdiag = m_sub_diag.data(); + + Index end = m_n - 1; + Index start = 0; + Index iter = 0; // total number of iterations + int info = 0; // 0 for success, 1 for failure + + const Scalar considerAsZero = TypeTraits::min(); + const Scalar precision = Scalar(2) * Eigen::NumTraits::epsilon(); + + while (end > 0) + { + for (Index i = start; i < end; i++) + if (abs(subdiag[i]) <= considerAsZero || + abs(subdiag[i]) <= (abs(diag[i]) + abs(diag[i + 1])) * precision) + subdiag[i] = 0; + + // find the largest unreduced block + while (end > 0 && subdiag[end - 1] == Scalar(0)) + end--; + + if (end <= 0) + break; + + // if we spent too many iterations, we give up + iter++; + if (iter > 30 * m_n) + { + info = 1; + break; + } + + start = end - 1; + while (start > 0 && subdiag[start - 1] != Scalar(0)) + start--; + + tridiagonal_qr_step(diag, subdiag, start, end, m_evecs.data(), m_n); + } + + if (info > 0) + throw std::runtime_error("TridiagEigen: eigen decomposition failed"); + + // Scale eigenvalues back + m_main_diag *= scale; + + m_computed = true; + } + + const Vector& eigenvalues() const + { + if (!m_computed) + throw std::logic_error("TridiagEigen: need to call compute() first"); + + // After calling compute(), main_diag will contain the eigenvalues. + return m_main_diag; + } + + const Matrix& eigenvectors() const + { + if (!m_computed) + throw std::logic_error("TridiagEigen: need to call compute() first"); + + return m_evecs; + } +}; + +} // namespace Spectra + +#endif // TRIDIAG_EIGEN_H diff --git a/gtsam/3rdparty/Spectra/LinAlg/UpperHessenbergEigen.h b/gtsam/3rdparty/Spectra/LinAlg/UpperHessenbergEigen.h new file mode 100644 index 000000000..4865e9db8 --- /dev/null +++ b/gtsam/3rdparty/Spectra/LinAlg/UpperHessenbergEigen.h @@ -0,0 +1,319 @@ +// The code was adapted from Eigen/src/Eigenvaleus/EigenSolver.h +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2010,2012 Jitse Niesen +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef UPPER_HESSENBERG_EIGEN_H +#define UPPER_HESSENBERG_EIGEN_H + +#include +#include +#include + +namespace Spectra { + +template +class UpperHessenbergEigen +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + + typedef Eigen::Ref GenericMatrix; + typedef const Eigen::Ref ConstGenericMatrix; + + typedef std::complex Complex; + typedef Eigen::Matrix ComplexMatrix; + typedef Eigen::Matrix ComplexVector; + + Index m_n; // Size of the matrix + Eigen::RealSchur m_realSchur; // Schur decomposition solver + Matrix m_matT; // Schur T matrix + Matrix m_eivec; // Storing eigenvectors + ComplexVector m_eivalues; // Eigenvalues + + bool m_computed; + + void doComputeEigenvectors() + { + using std::abs; + + const Index size = m_eivec.cols(); + const Scalar eps = Eigen::NumTraits::epsilon(); + + // inefficient! this is already computed in RealSchur + Scalar norm(0); + for (Index j = 0; j < size; ++j) + { + norm += m_matT.row(j).segment((std::max)(j - 1, Index(0)), size - (std::max)(j - 1, Index(0))).cwiseAbs().sum(); + } + + // Backsubstitute to find vectors of upper triangular form + if (norm == Scalar(0)) + return; + + for (Index n = size - 1; n >= 0; n--) + { + Scalar p = m_eivalues.coeff(n).real(); + Scalar q = m_eivalues.coeff(n).imag(); + + // Scalar vector + if (q == Scalar(0)) + { + Scalar lastr(0), lastw(0); + Index l = n; + + m_matT.coeffRef(n, n) = Scalar(1); + for (Index i = n - 1; i >= 0; i--) + { + Scalar w = m_matT.coeff(i, i) - p; + Scalar r = m_matT.row(i).segment(l, n - l + 1).dot(m_matT.col(n).segment(l, n - l + 1)); + + if (m_eivalues.coeff(i).imag() < Scalar(0)) + { + lastw = w; + lastr = r; + } + else + { + l = i; + if (m_eivalues.coeff(i).imag() == Scalar(0)) + { + if (w != Scalar(0)) + m_matT.coeffRef(i, n) = -r / w; + else + m_matT.coeffRef(i, n) = -r / (eps * norm); + } + else // Solve real equations + { + Scalar x = m_matT.coeff(i, i + 1); + Scalar y = m_matT.coeff(i + 1, i); + Scalar denom = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag(); + Scalar t = (x * lastr - lastw * r) / denom; + m_matT.coeffRef(i, n) = t; + if (abs(x) > abs(lastw)) + m_matT.coeffRef(i + 1, n) = (-r - w * t) / x; + else + m_matT.coeffRef(i + 1, n) = (-lastr - y * t) / lastw; + } + + // Overflow control + Scalar t = abs(m_matT.coeff(i, n)); + if ((eps * t) * t > Scalar(1)) + m_matT.col(n).tail(size - i) /= t; + } + } + } + else if (q < Scalar(0) && n > 0) + { // Complex vector + Scalar lastra(0), lastsa(0), lastw(0); + Index l = n - 1; + + // Last vector component imaginary so matrix is triangular + if (abs(m_matT.coeff(n, n - 1)) > abs(m_matT.coeff(n - 1, n))) + { + m_matT.coeffRef(n - 1, n - 1) = q / m_matT.coeff(n, n - 1); + m_matT.coeffRef(n - 1, n) = -(m_matT.coeff(n, n) - p) / m_matT.coeff(n, n - 1); + } + else + { + Complex cc = Complex(Scalar(0), -m_matT.coeff(n - 1, n)) / Complex(m_matT.coeff(n - 1, n - 1) - p, q); + m_matT.coeffRef(n - 1, n - 1) = Eigen::numext::real(cc); + m_matT.coeffRef(n - 1, n) = Eigen::numext::imag(cc); + } + m_matT.coeffRef(n, n - 1) = Scalar(0); + m_matT.coeffRef(n, n) = Scalar(1); + for (Index i = n - 2; i >= 0; i--) + { + Scalar ra = m_matT.row(i).segment(l, n - l + 1).dot(m_matT.col(n - 1).segment(l, n - l + 1)); + Scalar sa = m_matT.row(i).segment(l, n - l + 1).dot(m_matT.col(n).segment(l, n - l + 1)); + Scalar w = m_matT.coeff(i, i) - p; + + if (m_eivalues.coeff(i).imag() < Scalar(0)) + { + lastw = w; + lastra = ra; + lastsa = sa; + } + else + { + l = i; + if (m_eivalues.coeff(i).imag() == Scalar(0)) + { + Complex cc = Complex(-ra, -sa) / Complex(w, q); + m_matT.coeffRef(i, n - 1) = Eigen::numext::real(cc); + m_matT.coeffRef(i, n) = Eigen::numext::imag(cc); + } + else + { + // Solve complex equations + Scalar x = m_matT.coeff(i, i + 1); + Scalar y = m_matT.coeff(i + 1, i); + Scalar vr = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag() - q * q; + Scalar vi = (m_eivalues.coeff(i).real() - p) * Scalar(2) * q; + if ((vr == Scalar(0)) && (vi == Scalar(0))) + vr = eps * norm * (abs(w) + abs(q) + abs(x) + abs(y) + abs(lastw)); + + Complex cc = Complex(x * lastra - lastw * ra + q * sa, x * lastsa - lastw * sa - q * ra) / Complex(vr, vi); + m_matT.coeffRef(i, n - 1) = Eigen::numext::real(cc); + m_matT.coeffRef(i, n) = Eigen::numext::imag(cc); + if (abs(x) > (abs(lastw) + abs(q))) + { + m_matT.coeffRef(i + 1, n - 1) = (-ra - w * m_matT.coeff(i, n - 1) + q * m_matT.coeff(i, n)) / x; + m_matT.coeffRef(i + 1, n) = (-sa - w * m_matT.coeff(i, n) - q * m_matT.coeff(i, n - 1)) / x; + } + else + { + cc = Complex(-lastra - y * m_matT.coeff(i, n - 1), -lastsa - y * m_matT.coeff(i, n)) / Complex(lastw, q); + m_matT.coeffRef(i + 1, n - 1) = Eigen::numext::real(cc); + m_matT.coeffRef(i + 1, n) = Eigen::numext::imag(cc); + } + } + + // Overflow control + Scalar t = std::max(abs(m_matT.coeff(i, n - 1)), abs(m_matT.coeff(i, n))); + if ((eps * t) * t > Scalar(1)) + m_matT.block(i, n - 1, size - i, 2) /= t; + } + } + + // We handled a pair of complex conjugate eigenvalues, so need to skip them both + n--; + } + } + + // Back transformation to get eigenvectors of original matrix + Vector m_tmp(size); + for (Index j = size - 1; j >= 0; j--) + { + m_tmp.noalias() = m_eivec.leftCols(j + 1) * m_matT.col(j).segment(0, j + 1); + m_eivec.col(j) = m_tmp; + } + } + +public: + UpperHessenbergEigen() : + m_n(0), m_computed(false) + {} + + UpperHessenbergEigen(ConstGenericMatrix& mat) : + m_n(mat.rows()), m_computed(false) + { + compute(mat); + } + + void compute(ConstGenericMatrix& mat) + { + using std::abs; + using std::sqrt; + + if (mat.rows() != mat.cols()) + throw std::invalid_argument("UpperHessenbergEigen: matrix must be square"); + + m_n = mat.rows(); + // Scale matrix prior to the Schur decomposition + const Scalar scale = mat.cwiseAbs().maxCoeff(); + + // Reduce to real Schur form + Matrix Q = Matrix::Identity(m_n, m_n); + m_realSchur.computeFromHessenberg(mat / scale, Q, true); + if (m_realSchur.info() != Eigen::Success) + throw std::runtime_error("UpperHessenbergEigen: eigen decomposition failed"); + + m_matT = m_realSchur.matrixT(); + m_eivec = m_realSchur.matrixU(); + + // Compute eigenvalues from matT + m_eivalues.resize(m_n); + Index i = 0; + while (i < m_n) + { + // Real eigenvalue + if (i == m_n - 1 || m_matT.coeff(i + 1, i) == Scalar(0)) + { + m_eivalues.coeffRef(i) = m_matT.coeff(i, i); + ++i; + } + else // Complex eigenvalues + { + Scalar p = Scalar(0.5) * (m_matT.coeff(i, i) - m_matT.coeff(i + 1, i + 1)); + Scalar z; + // Compute z = sqrt(abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1))); + // without overflow + { + Scalar t0 = m_matT.coeff(i + 1, i); + Scalar t1 = m_matT.coeff(i, i + 1); + Scalar maxval = std::max(abs(p), std::max(abs(t0), abs(t1))); + t0 /= maxval; + t1 /= maxval; + Scalar p0 = p / maxval; + z = maxval * sqrt(abs(p0 * p0 + t0 * t1)); + } + m_eivalues.coeffRef(i) = Complex(m_matT.coeff(i + 1, i + 1) + p, z); + m_eivalues.coeffRef(i + 1) = Complex(m_matT.coeff(i + 1, i + 1) + p, -z); + i += 2; + } + } + + // Compute eigenvectors + doComputeEigenvectors(); + + // Scale eigenvalues back + m_eivalues *= scale; + + m_computed = true; + } + + const ComplexVector& eigenvalues() const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergEigen: need to call compute() first"); + + return m_eivalues; + } + + ComplexMatrix eigenvectors() + { + using std::abs; + + if (!m_computed) + throw std::logic_error("UpperHessenbergEigen: need to call compute() first"); + + Index n = m_eivec.cols(); + ComplexMatrix matV(n, n); + for (Index j = 0; j < n; ++j) + { + // imaginary part of real eigenvalue is already set to exact zero + if (Eigen::numext::imag(m_eivalues.coeff(j)) == Scalar(0) || j + 1 == n) + { + // we have a real eigen value + matV.col(j) = m_eivec.col(j).template cast(); + matV.col(j).normalize(); + } + else + { + // we have a pair of complex eigen values + for (Index i = 0; i < n; ++i) + { + matV.coeffRef(i, j) = Complex(m_eivec.coeff(i, j), m_eivec.coeff(i, j + 1)); + matV.coeffRef(i, j + 1) = Complex(m_eivec.coeff(i, j), -m_eivec.coeff(i, j + 1)); + } + matV.col(j).normalize(); + matV.col(j + 1).normalize(); + ++j; + } + } + + return matV; + } +}; + +} // namespace Spectra + +#endif // UPPER_HESSENBERG_EIGEN_H diff --git a/gtsam/3rdparty/Spectra/LinAlg/UpperHessenbergQR.h b/gtsam/3rdparty/Spectra/LinAlg/UpperHessenbergQR.h new file mode 100644 index 000000000..5778f11dc --- /dev/null +++ b/gtsam/3rdparty/Spectra/LinAlg/UpperHessenbergQR.h @@ -0,0 +1,670 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef UPPER_HESSENBERG_QR_H +#define UPPER_HESSENBERG_QR_H + +#include +#include // std::sqrt +#include // std::fill, std::copy +#include // std::logic_error + +namespace Spectra { + +/// +/// \defgroup Internals Internal Classes +/// +/// Classes for internal use. May be useful to developers. +/// + +/// +/// \ingroup Internals +/// @{ +/// + +/// +/// \defgroup LinearAlgebra Linear Algebra +/// +/// A number of classes for linear algebra operations. +/// + +/// +/// \ingroup LinearAlgebra +/// +/// Perform the QR decomposition of an upper Hessenberg matrix. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// +template +class UpperHessenbergQR +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Matrix RowVector; + typedef Eigen::Array Array; + + typedef Eigen::Ref GenericMatrix; + typedef const Eigen::Ref ConstGenericMatrix; + + Matrix m_mat_T; + +protected: + Index m_n; + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + // Q = G1 * G2 * ... * G_{n-1} + Scalar m_shift; + Array m_rot_cos; + Array m_rot_sin; + bool m_computed; + + // Given x and y, compute 1) r = sqrt(x^2 + y^2), 2) c = x / r, 3) s = -y / r + // If both x and y are zero, set c = 1 and s = 0 + // We must implement it in a numerically stable way + static void compute_rotation(const Scalar& x, const Scalar& y, Scalar& r, Scalar& c, Scalar& s) + { + using std::sqrt; + + const Scalar xsign = (x > Scalar(0)) - (x < Scalar(0)); + const Scalar ysign = (y > Scalar(0)) - (y < Scalar(0)); + const Scalar xabs = x * xsign; + const Scalar yabs = y * ysign; + if (xabs > yabs) + { + // In this case xabs != 0 + const Scalar ratio = yabs / xabs; // so that 0 <= ratio < 1 + const Scalar common = sqrt(Scalar(1) + ratio * ratio); + c = xsign / common; + r = xabs * common; + s = -y / r; + } + else + { + if (yabs == Scalar(0)) + { + r = Scalar(0); + c = Scalar(1); + s = Scalar(0); + return; + } + const Scalar ratio = xabs / yabs; // so that 0 <= ratio <= 1 + const Scalar common = sqrt(Scalar(1) + ratio * ratio); + s = -ysign / common; + r = yabs * common; + c = x / r; + } + } + +public: + /// + /// Constructor to preallocate memory. Computation can + /// be performed later by calling the compute() method. + /// + UpperHessenbergQR(Index size) : + m_n(size), + m_rot_cos(m_n - 1), + m_rot_sin(m_n - 1), + m_computed(false) + {} + + /// + /// Constructor to create an object that performs and stores the + /// QR decomposition of an upper Hessenberg matrix `mat`, with an + /// optional shift: \f$H-sI=QR\f$. Here \f$H\f$ stands for the matrix + /// `mat`, and \f$s\f$ is the shift. + /// + /// \param mat Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// Only the upper triangular and the lower subdiagonal parts of + /// the matrix are used. + /// + UpperHessenbergQR(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0)) : + m_n(mat.rows()), + m_shift(shift), + m_rot_cos(m_n - 1), + m_rot_sin(m_n - 1), + m_computed(false) + { + compute(mat, shift); + } + + /// + /// Virtual destructor. + /// + virtual ~UpperHessenbergQR(){}; + + /// + /// Conduct the QR factorization of an upper Hessenberg matrix with + /// an optional shift. + /// + /// \param mat Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// Only the upper triangular and the lower subdiagonal parts of + /// the matrix are used. + /// + virtual void compute(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0)) + { + m_n = mat.rows(); + if (m_n != mat.cols()) + throw std::invalid_argument("UpperHessenbergQR: matrix must be square"); + + m_shift = shift; + m_mat_T.resize(m_n, m_n); + m_rot_cos.resize(m_n - 1); + m_rot_sin.resize(m_n - 1); + + // Make a copy of mat - s * I + std::copy(mat.data(), mat.data() + mat.size(), m_mat_T.data()); + m_mat_T.diagonal().array() -= m_shift; + + Scalar xi, xj, r, c, s; + Scalar *Tii, *ptr; + const Index n1 = m_n - 1; + for (Index i = 0; i < n1; i++) + { + Tii = &m_mat_T.coeffRef(i, i); + + // Make sure mat_T is upper Hessenberg + // Zero the elements below mat_T(i + 1, i) + std::fill(Tii + 2, Tii + m_n - i, Scalar(0)); + + xi = Tii[0]; // mat_T(i, i) + xj = Tii[1]; // mat_T(i + 1, i) + compute_rotation(xi, xj, r, c, s); + m_rot_cos[i] = c; + m_rot_sin[i] = s; + + // For a complete QR decomposition, + // we first obtain the rotation matrix + // G = [ cos sin] + // [-sin cos] + // and then do T[i:(i + 1), i:(n - 1)] = G' * T[i:(i + 1), i:(n - 1)] + + // Gt << c, -s, s, c; + // m_mat_T.block(i, i, 2, m_n - i) = Gt * m_mat_T.block(i, i, 2, m_n - i); + Tii[0] = r; // m_mat_T(i, i) => r + Tii[1] = 0; // m_mat_T(i + 1, i) => 0 + ptr = Tii + m_n; // m_mat_T(i, k), k = i+1, i+2, ..., n-1 + for (Index j = i + 1; j < m_n; j++, ptr += m_n) + { + Scalar tmp = ptr[0]; + ptr[0] = c * tmp - s * ptr[1]; + ptr[1] = s * tmp + c * ptr[1]; + } + + // If we do not need to calculate the R matrix, then + // only the cos and sin sequences are required. + // In such case we only update T[i + 1, (i + 1):(n - 1)] + // m_mat_T.block(i + 1, i + 1, 1, m_n - i - 1) *= c; + // m_mat_T.block(i + 1, i + 1, 1, m_n - i - 1) += s * mat_T.block(i, i + 1, 1, m_n - i - 1); + } + + m_computed = true; + } + + /// + /// Return the \f$R\f$ matrix in the QR decomposition, which is an + /// upper triangular matrix. + /// + /// \return Returned matrix type will be `Eigen::Matrix`, depending on + /// the template parameter `Scalar` defined. + /// + virtual Matrix matrix_R() const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + return m_mat_T; + } + + /// + /// Overwrite `dest` with \f$Q'HQ = RQ + sI\f$, where \f$H\f$ is the input matrix `mat`, + /// and \f$s\f$ is the shift. The result is an upper Hessenberg matrix. + /// + /// \param mat The matrix to be overwritten, whose type should be `Eigen::Matrix`, + /// depending on the template parameter `Scalar` defined. + /// + virtual void matrix_QtHQ(Matrix& dest) const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + // Make a copy of the R matrix + dest.resize(m_n, m_n); + std::copy(m_mat_T.data(), m_mat_T.data() + m_mat_T.size(), dest.data()); + + // Compute the RQ matrix + const Index n1 = m_n - 1; + for (Index i = 0; i < n1; i++) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + // RQ[, i:(i + 1)] = RQ[, i:(i + 1)] * Gi + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + Scalar *Yi, *Yi1; + Yi = &dest.coeffRef(0, i); + Yi1 = Yi + m_n; // RQ(0, i + 1) + const Index i2 = i + 2; + for (Index j = 0; j < i2; j++) + { + const Scalar tmp = Yi[j]; + Yi[j] = c * tmp - s * Yi1[j]; + Yi1[j] = s * tmp + c * Yi1[j]; + } + + /* Vector dest = RQ.block(0, i, i + 2, 1); + dest.block(0, i, i + 2, 1) = c * Yi - s * dest.block(0, i + 1, i + 2, 1); + dest.block(0, i + 1, i + 2, 1) = s * Yi + c * dest.block(0, i + 1, i + 2, 1); */ + } + + // Add the shift to the diagonal + dest.diagonal().array() += m_shift; + } + + /// + /// Apply the \f$Q\f$ matrix to a vector \f$y\f$. + /// + /// \param Y A vector that will be overwritten by the matrix product \f$Qy\f$. + /// + /// Vector type can be `Eigen::Vector`, depending on + /// the template parameter `Scalar` defined. + /// + // Y -> QY = G1 * G2 * ... * Y + void apply_QY(Vector& Y) const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + for (Index i = m_n - 2; i >= 0; i--) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + // Y[i:(i + 1)] = Gi * Y[i:(i + 1)] + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + const Scalar tmp = Y[i]; + Y[i] = c * tmp + s * Y[i + 1]; + Y[i + 1] = -s * tmp + c * Y[i + 1]; + } + } + + /// + /// Apply the \f$Q\f$ matrix to a vector \f$y\f$. + /// + /// \param Y A vector that will be overwritten by the matrix product \f$Q'y\f$. + /// + /// Vector type can be `Eigen::Vector`, depending on + /// the template parameter `Scalar` defined. + /// + // Y -> Q'Y = G_{n-1}' * ... * G2' * G1' * Y + void apply_QtY(Vector& Y) const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + const Index n1 = m_n - 1; + for (Index i = 0; i < n1; i++) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + // Y[i:(i + 1)] = Gi' * Y[i:(i + 1)] + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + const Scalar tmp = Y[i]; + Y[i] = c * tmp - s * Y[i + 1]; + Y[i + 1] = s * tmp + c * Y[i + 1]; + } + } + + /// + /// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$. + /// + /// \param Y A matrix that will be overwritten by the matrix product \f$QY\f$. + /// + /// Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + // Y -> QY = G1 * G2 * ... * Y + void apply_QY(GenericMatrix Y) const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + RowVector Yi(Y.cols()), Yi1(Y.cols()); + for (Index i = m_n - 2; i >= 0; i--) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + // Y[i:(i + 1), ] = Gi * Y[i:(i + 1), ] + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + Yi.noalias() = Y.row(i); + Yi1.noalias() = Y.row(i + 1); + Y.row(i) = c * Yi + s * Yi1; + Y.row(i + 1) = -s * Yi + c * Yi1; + } + } + + /// + /// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$. + /// + /// \param Y A matrix that will be overwritten by the matrix product \f$Q'Y\f$. + /// + /// Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + // Y -> Q'Y = G_{n-1}' * ... * G2' * G1' * Y + void apply_QtY(GenericMatrix Y) const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + RowVector Yi(Y.cols()), Yi1(Y.cols()); + const Index n1 = m_n - 1; + for (Index i = 0; i < n1; i++) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + // Y[i:(i + 1), ] = Gi' * Y[i:(i + 1), ] + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + Yi.noalias() = Y.row(i); + Yi1.noalias() = Y.row(i + 1); + Y.row(i) = c * Yi - s * Yi1; + Y.row(i + 1) = s * Yi + c * Yi1; + } + } + + /// + /// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$. + /// + /// \param Y A matrix that will be overwritten by the matrix product \f$YQ\f$. + /// + /// Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + // Y -> YQ = Y * G1 * G2 * ... + void apply_YQ(GenericMatrix Y) const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + /*Vector Yi(Y.rows()); + for(Index i = 0; i < m_n - 1; i++) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + // Y[, i:(i + 1)] = Y[, i:(i + 1)] * Gi + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + Yi.noalias() = Y.col(i); + Y.col(i) = c * Yi - s * Y.col(i + 1); + Y.col(i + 1) = s * Yi + c * Y.col(i + 1); + }*/ + Scalar *Y_col_i, *Y_col_i1; + const Index n1 = m_n - 1; + const Index nrow = Y.rows(); + for (Index i = 0; i < n1; i++) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + + Y_col_i = &Y.coeffRef(0, i); + Y_col_i1 = &Y.coeffRef(0, i + 1); + for (Index j = 0; j < nrow; j++) + { + Scalar tmp = Y_col_i[j]; + Y_col_i[j] = c * tmp - s * Y_col_i1[j]; + Y_col_i1[j] = s * tmp + c * Y_col_i1[j]; + } + } + } + + /// + /// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$. + /// + /// \param Y A matrix that will be overwritten by the matrix product \f$YQ'\f$. + /// + /// Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + // Y -> YQ' = Y * G_{n-1}' * ... * G2' * G1' + void apply_YQt(GenericMatrix Y) const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + Vector Yi(Y.rows()); + for (Index i = m_n - 2; i >= 0; i--) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + // Y[, i:(i + 1)] = Y[, i:(i + 1)] * Gi' + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + Yi.noalias() = Y.col(i); + Y.col(i) = c * Yi + s * Y.col(i + 1); + Y.col(i + 1) = -s * Yi + c * Y.col(i + 1); + } + } +}; + +/// +/// \ingroup LinearAlgebra +/// +/// Perform the QR decomposition of a tridiagonal matrix, a special +/// case of upper Hessenberg matrices. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// +template +class TridiagQR : public UpperHessenbergQR +{ +private: + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef const Eigen::Ref ConstGenericMatrix; + + typedef typename Matrix::Index Index; + + Vector m_T_diag; // diagonal elements of T + Vector m_T_lsub; // lower subdiagonal of T + Vector m_T_usub; // upper subdiagonal of T + Vector m_T_usub2; // 2nd upper subdiagonal of T + +public: + /// + /// Constructor to preallocate memory. Computation can + /// be performed later by calling the compute() method. + /// + TridiagQR(Index size) : + UpperHessenbergQR(size) + {} + + /// + /// Constructor to create an object that performs and stores the + /// QR decomposition of an upper Hessenberg matrix `mat`, with an + /// optional shift: \f$H-sI=QR\f$. Here \f$H\f$ stands for the matrix + /// `mat`, and \f$s\f$ is the shift. + /// + /// \param mat Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// Only the major- and sub- diagonal parts of + /// the matrix are used. + /// + TridiagQR(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0)) : + UpperHessenbergQR(mat.rows()) + { + this->compute(mat, shift); + } + + /// + /// Conduct the QR factorization of a tridiagonal matrix with an + /// optional shift. + /// + /// \param mat Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// Only the major- and sub- diagonal parts of + /// the matrix are used. + /// + void compute(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0)) + { + this->m_n = mat.rows(); + if (this->m_n != mat.cols()) + throw std::invalid_argument("TridiagQR: matrix must be square"); + + this->m_shift = shift; + m_T_diag.resize(this->m_n); + m_T_lsub.resize(this->m_n - 1); + m_T_usub.resize(this->m_n - 1); + m_T_usub2.resize(this->m_n - 2); + this->m_rot_cos.resize(this->m_n - 1); + this->m_rot_sin.resize(this->m_n - 1); + + m_T_diag.array() = mat.diagonal().array() - this->m_shift; + m_T_lsub.noalias() = mat.diagonal(-1); + m_T_usub.noalias() = m_T_lsub; + + // A number of pointers to avoid repeated address calculation + Scalar *c = this->m_rot_cos.data(), // pointer to the cosine vector + *s = this->m_rot_sin.data(), // pointer to the sine vector + r; + const Index n1 = this->m_n - 1; + for (Index i = 0; i < n1; i++) + { + // diag[i] == T[i, i] + // lsub[i] == T[i + 1, i] + // r = sqrt(T[i, i]^2 + T[i + 1, i]^2) + // c = T[i, i] / r, s = -T[i + 1, i] / r + this->compute_rotation(m_T_diag.coeff(i), m_T_lsub.coeff(i), r, *c, *s); + + // For a complete QR decomposition, + // we first obtain the rotation matrix + // G = [ cos sin] + // [-sin cos] + // and then do T[i:(i + 1), i:(i + 2)] = G' * T[i:(i + 1), i:(i + 2)] + + // Update T[i, i] and T[i + 1, i] + // The updated value of T[i, i] is known to be r + // The updated value of T[i + 1, i] is known to be 0 + m_T_diag.coeffRef(i) = r; + m_T_lsub.coeffRef(i) = Scalar(0); + // Update T[i, i + 1] and T[i + 1, i + 1] + // usub[i] == T[i, i + 1] + // diag[i + 1] == T[i + 1, i + 1] + const Scalar tmp = m_T_usub.coeff(i); + m_T_usub.coeffRef(i) = (*c) * tmp - (*s) * m_T_diag.coeff(i + 1); + m_T_diag.coeffRef(i + 1) = (*s) * tmp + (*c) * m_T_diag.coeff(i + 1); + // Update T[i, i + 2] and T[i + 1, i + 2] + // usub2[i] == T[i, i + 2] + // usub[i + 1] == T[i + 1, i + 2] + if (i < n1 - 1) + { + m_T_usub2.coeffRef(i) = -(*s) * m_T_usub.coeff(i + 1); + m_T_usub.coeffRef(i + 1) *= (*c); + } + + c++; + s++; + + // If we do not need to calculate the R matrix, then + // only the cos and sin sequences are required. + // In such case we only update T[i + 1, (i + 1):(i + 2)] + // T[i + 1, i + 1] = c * T[i + 1, i + 1] + s * T[i, i + 1]; + // T[i + 1, i + 2] *= c; + } + + this->m_computed = true; + } + + /// + /// Return the \f$R\f$ matrix in the QR decomposition, which is an + /// upper triangular matrix. + /// + /// \return Returned matrix type will be `Eigen::Matrix`, depending on + /// the template parameter `Scalar` defined. + /// + Matrix matrix_R() const + { + if (!this->m_computed) + throw std::logic_error("TridiagQR: need to call compute() first"); + + Matrix R = Matrix::Zero(this->m_n, this->m_n); + R.diagonal().noalias() = m_T_diag; + R.diagonal(1).noalias() = m_T_usub; + R.diagonal(2).noalias() = m_T_usub2; + + return R; + } + + /// + /// Overwrite `dest` with \f$Q'HQ = RQ + sI\f$, where \f$H\f$ is the input matrix `mat`, + /// and \f$s\f$ is the shift. The result is a tridiagonal matrix. + /// + /// \param mat The matrix to be overwritten, whose type should be `Eigen::Matrix`, + /// depending on the template parameter `Scalar` defined. + /// + void matrix_QtHQ(Matrix& dest) const + { + if (!this->m_computed) + throw std::logic_error("TridiagQR: need to call compute() first"); + + // Make a copy of the R matrix + dest.resize(this->m_n, this->m_n); + dest.setZero(); + dest.diagonal().noalias() = m_T_diag; + // The upper diagonal refers to m_T_usub + // The 2nd upper subdiagonal will be zero in RQ + + // Compute the RQ matrix + // [m11 m12] points to RQ[i:(i+1), i:(i+1)] + // [0 m22] + // + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + const Index n1 = this->m_n - 1; + for (Index i = 0; i < n1; i++) + { + const Scalar c = this->m_rot_cos.coeff(i); + const Scalar s = this->m_rot_sin.coeff(i); + const Scalar m11 = dest.coeff(i, i), + m12 = m_T_usub.coeff(i), + m22 = m_T_diag.coeff(i + 1); + + // Update the diagonal and the lower subdiagonal of dest + dest.coeffRef(i, i) = c * m11 - s * m12; + dest.coeffRef(i + 1, i) = -s * m22; + dest.coeffRef(i + 1, i + 1) = c * m22; + } + + // Copy the lower subdiagonal to upper subdiagonal + dest.diagonal(1).noalias() = dest.diagonal(-1); + + // Add the shift to the diagonal + dest.diagonal().array() += this->m_shift; + } +}; + +/// +/// @} +/// + +} // namespace Spectra + +#endif // UPPER_HESSENBERG_QR_H diff --git a/gtsam/3rdparty/Spectra/MatOp/DenseCholesky.h b/gtsam/3rdparty/Spectra/MatOp/DenseCholesky.h new file mode 100644 index 000000000..c41cae729 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/DenseCholesky.h @@ -0,0 +1,108 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef DENSE_CHOLESKY_H +#define DENSE_CHOLESKY_H + +#include +#include +#include +#include "../Util/CompInfo.h" + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the operations related to Cholesky decomposition on a +/// positive definite matrix, \f$B=LL'\f$, where \f$L\f$ is a lower triangular +/// matrix. It is mainly used in the SymGEigsSolver generalized eigen solver +/// in the Cholesky decomposition mode. +/// +template +class DenseCholesky +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstMat; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + const Index m_n; + Eigen::LLT m_decomp; + int m_info; // status of the decomposition + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** matrix object, whose type can be + /// `Eigen::Matrix` (e.g. `Eigen::MatrixXd` and + /// `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + DenseCholesky(ConstGenericMatrix& mat) : + m_n(mat.rows()), m_info(NOT_COMPUTED) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("DenseCholesky: matrix must be square"); + + m_decomp.compute(mat); + m_info = (m_decomp.info() == Eigen::Success) ? + SUCCESSFUL : + NUMERICAL_ISSUE; + } + + /// + /// Returns the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Returns the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Returns the status of the computation. + /// The full list of enumeration values can be found in \ref Enumerations. + /// + int info() const { return m_info; } + + /// + /// Performs the lower triangular solving operation \f$y=L^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(L) * x_in + void lower_triangular_solve(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_decomp.matrixL().solve(x); + } + + /// + /// Performs the upper triangular solving operation \f$y=(L')^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(L') * x_in + void upper_triangular_solve(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_decomp.matrixU().solve(x); + } +}; + +} // namespace Spectra + +#endif // DENSE_CHOLESKY_H diff --git a/gtsam/3rdparty/Spectra/MatOp/DenseGenComplexShiftSolve.h b/gtsam/3rdparty/Spectra/MatOp/DenseGenComplexShiftSolve.h new file mode 100644 index 000000000..02ad32f8f --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/DenseGenComplexShiftSolve.h @@ -0,0 +1,102 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef DENSE_GEN_COMPLEX_SHIFT_SOLVE_H +#define DENSE_GEN_COMPLEX_SHIFT_SOLVE_H + +#include +#include +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the complex shift-solve operation on a general real matrix \f$A\f$, +/// i.e., calculating \f$y=\mathrm{Re}\{(A-\sigma I)^{-1}x\}\f$ for any complex-valued +/// \f$\sigma\f$ and real-valued vector \f$x\f$. It is mainly used in the +/// GenEigsComplexShiftSolver eigen solver. +/// +template +class DenseGenComplexShiftSolve +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + typedef std::complex Complex; + typedef Eigen::Matrix ComplexMatrix; + typedef Eigen::Matrix ComplexVector; + + typedef Eigen::PartialPivLU ComplexSolver; + + ConstGenericMatrix m_mat; + const Index m_n; + ComplexSolver m_solver; + ComplexVector m_x_cache; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** matrix object, whose type can be + /// `Eigen::Matrix` (e.g. `Eigen::MatrixXd` and + /// `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + DenseGenComplexShiftSolve(ConstGenericMatrix& mat) : + m_mat(mat), m_n(mat.rows()) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("DenseGenComplexShiftSolve: matrix must be square"); + } + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Set the complex shift \f$\sigma\f$. + /// + /// \param sigmar Real part of \f$\sigma\f$. + /// \param sigmai Imaginary part of \f$\sigma\f$. + /// + void set_shift(Scalar sigmar, Scalar sigmai) + { + m_solver.compute(m_mat.template cast() - Complex(sigmar, sigmai) * ComplexMatrix::Identity(m_n, m_n)); + m_x_cache.resize(m_n); + m_x_cache.setZero(); + } + + /// + /// Perform the complex shift-solve operation + /// \f$y=\mathrm{Re}\{(A-\sigma I)^{-1}x\}\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = Re( inv(A - sigma * I) * x_in ) + void perform_op(const Scalar* x_in, Scalar* y_out) + { + m_x_cache.real() = MapConstVec(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_solver.solve(m_x_cache).real(); + } +}; + +} // namespace Spectra + +#endif // DENSE_GEN_COMPLEX_SHIFT_SOLVE_H diff --git a/gtsam/3rdparty/Spectra/MatOp/DenseGenMatProd.h b/gtsam/3rdparty/Spectra/MatOp/DenseGenMatProd.h new file mode 100644 index 000000000..c4ade80a3 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/DenseGenMatProd.h @@ -0,0 +1,80 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef DENSE_GEN_MAT_PROD_H +#define DENSE_GEN_MAT_PROD_H + +#include + +namespace Spectra { + +/// +/// \defgroup MatOp Matrix Operations +/// +/// Define matrix operations on existing matrix objects +/// + +/// +/// \ingroup MatOp +/// +/// This class defines the matrix-vector multiplication operation on a +/// general real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector +/// \f$x\f$. It is mainly used in the GenEigsSolver and +/// SymEigsSolver eigen solvers. +/// +template +class DenseGenMatProd +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + ConstGenericMatrix m_mat; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** matrix object, whose type can be + /// `Eigen::Matrix` (e.g. `Eigen::MatrixXd` and + /// `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + DenseGenMatProd(ConstGenericMatrix& mat) : + m_mat(mat) + {} + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_mat.rows(); } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_mat.cols(); } + + /// + /// Perform the matrix-vector multiplication operation \f$y=Ax\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = A * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_mat.cols()); + MapVec y(y_out, m_mat.rows()); + y.noalias() = m_mat * x; + } +}; + +} // namespace Spectra + +#endif // DENSE_GEN_MAT_PROD_H diff --git a/gtsam/3rdparty/Spectra/MatOp/DenseGenRealShiftSolve.h b/gtsam/3rdparty/Spectra/MatOp/DenseGenRealShiftSolve.h new file mode 100644 index 000000000..9c93ecc7a --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/DenseGenRealShiftSolve.h @@ -0,0 +1,88 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef DENSE_GEN_REAL_SHIFT_SOLVE_H +#define DENSE_GEN_REAL_SHIFT_SOLVE_H + +#include +#include +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the shift-solve operation on a general real matrix \f$A\f$, +/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and +/// vector \f$x\f$. It is mainly used in the GenEigsRealShiftSolver eigen solver. +/// +template +class DenseGenRealShiftSolve +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + ConstGenericMatrix m_mat; + const Index m_n; + Eigen::PartialPivLU m_solver; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** matrix object, whose type can be + /// `Eigen::Matrix` (e.g. `Eigen::MatrixXd` and + /// `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + DenseGenRealShiftSolve(ConstGenericMatrix& mat) : + m_mat(mat), m_n(mat.rows()) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("DenseGenRealShiftSolve: matrix must be square"); + } + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Set the real shift \f$\sigma\f$. + /// + void set_shift(Scalar sigma) + { + m_solver.compute(m_mat - sigma * Matrix::Identity(m_n, m_n)); + } + + /// + /// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(A - sigma * I) * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_solver.solve(x); + } +}; + +} // namespace Spectra + +#endif // DENSE_GEN_REAL_SHIFT_SOLVE_H diff --git a/gtsam/3rdparty/Spectra/MatOp/DenseSymMatProd.h b/gtsam/3rdparty/Spectra/MatOp/DenseSymMatProd.h new file mode 100644 index 000000000..76c792686 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/DenseSymMatProd.h @@ -0,0 +1,73 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef DENSE_SYM_MAT_PROD_H +#define DENSE_SYM_MAT_PROD_H + +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the matrix-vector multiplication operation on a +/// symmetric real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector +/// \f$x\f$. It is mainly used in the SymEigsSolver eigen solver. +/// +template +class DenseSymMatProd +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + ConstGenericMatrix m_mat; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** matrix object, whose type can be + /// `Eigen::Matrix` (e.g. `Eigen::MatrixXd` and + /// `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + DenseSymMatProd(ConstGenericMatrix& mat) : + m_mat(mat) + {} + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_mat.rows(); } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_mat.cols(); } + + /// + /// Perform the matrix-vector multiplication operation \f$y=Ax\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = A * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_mat.cols()); + MapVec y(y_out, m_mat.rows()); + y.noalias() = m_mat.template selfadjointView() * x; + } +}; + +} // namespace Spectra + +#endif // DENSE_SYM_MAT_PROD_H diff --git a/gtsam/3rdparty/Spectra/MatOp/DenseSymShiftSolve.h b/gtsam/3rdparty/Spectra/MatOp/DenseSymShiftSolve.h new file mode 100644 index 000000000..620aa9413 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/DenseSymShiftSolve.h @@ -0,0 +1,92 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef DENSE_SYM_SHIFT_SOLVE_H +#define DENSE_SYM_SHIFT_SOLVE_H + +#include +#include + +#include "../LinAlg/BKLDLT.h" +#include "../Util/CompInfo.h" + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the shift-solve operation on a real symmetric matrix \f$A\f$, +/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and +/// vector \f$x\f$. It is mainly used in the SymEigsShiftSolver eigen solver. +/// +template +class DenseSymShiftSolve +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + ConstGenericMatrix m_mat; + const int m_n; + BKLDLT m_solver; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** matrix object, whose type can be + /// `Eigen::Matrix` (e.g. `Eigen::MatrixXd` and + /// `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + DenseSymShiftSolve(ConstGenericMatrix& mat) : + m_mat(mat), m_n(mat.rows()) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("DenseSymShiftSolve: matrix must be square"); + } + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Set the real shift \f$\sigma\f$. + /// + void set_shift(Scalar sigma) + { + m_solver.compute(m_mat, Uplo, sigma); + if (m_solver.info() != SUCCESSFUL) + throw std::invalid_argument("DenseSymShiftSolve: factorization failed with the given shift"); + } + + /// + /// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(A - sigma * I) * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_solver.solve(x); + } +}; + +} // namespace Spectra + +#endif // DENSE_SYM_SHIFT_SOLVE_H diff --git a/gtsam/3rdparty/Spectra/MatOp/SparseCholesky.h b/gtsam/3rdparty/Spectra/MatOp/SparseCholesky.h new file mode 100644 index 000000000..a73efc389 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/SparseCholesky.h @@ -0,0 +1,109 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SPARSE_CHOLESKY_H +#define SPARSE_CHOLESKY_H + +#include +#include +#include +#include +#include "../Util/CompInfo.h" + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the operations related to Cholesky decomposition on a +/// sparse positive definite matrix, \f$B=LL'\f$, where \f$L\f$ is a lower triangular +/// matrix. It is mainly used in the SymGEigsSolver generalized eigen solver +/// in the Cholesky decomposition mode. +/// +template +class SparseCholesky +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef Eigen::SparseMatrix SparseMatrix; + typedef const Eigen::Ref ConstGenericSparseMatrix; + + const Index m_n; + Eigen::SimplicialLLT m_decomp; + int m_info; // status of the decomposition + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** sparse matrix object, whose type can be + /// `Eigen::SparseMatrix` or its mapped version + /// `Eigen::Map >`. + /// + SparseCholesky(ConstGenericSparseMatrix& mat) : + m_n(mat.rows()) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("SparseCholesky: matrix must be square"); + + m_decomp.compute(mat); + m_info = (m_decomp.info() == Eigen::Success) ? + SUCCESSFUL : + NUMERICAL_ISSUE; + } + + /// + /// Returns the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Returns the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Returns the status of the computation. + /// The full list of enumeration values can be found in \ref Enumerations. + /// + int info() const { return m_info; } + + /// + /// Performs the lower triangular solving operation \f$y=L^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(L) * x_in + void lower_triangular_solve(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_decomp.permutationP() * x; + m_decomp.matrixL().solveInPlace(y); + } + + /// + /// Performs the upper triangular solving operation \f$y=(L')^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(L') * x_in + void upper_triangular_solve(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_decomp.matrixU().solve(x); + y = m_decomp.permutationPinv() * y; + } +}; + +} // namespace Spectra + +#endif // SPARSE_CHOLESKY_H diff --git a/gtsam/3rdparty/Spectra/MatOp/SparseGenMatProd.h b/gtsam/3rdparty/Spectra/MatOp/SparseGenMatProd.h new file mode 100644 index 000000000..0cc1f6674 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/SparseGenMatProd.h @@ -0,0 +1,74 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SPARSE_GEN_MAT_PROD_H +#define SPARSE_GEN_MAT_PROD_H + +#include +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the matrix-vector multiplication operation on a +/// sparse real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector +/// \f$x\f$. It is mainly used in the GenEigsSolver and SymEigsSolver +/// eigen solvers. +/// +template +class SparseGenMatProd +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef Eigen::SparseMatrix SparseMatrix; + typedef const Eigen::Ref ConstGenericSparseMatrix; + + ConstGenericSparseMatrix m_mat; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** sparse matrix object, whose type can be + /// `Eigen::SparseMatrix` or its mapped version + /// `Eigen::Map >`. + /// + SparseGenMatProd(ConstGenericSparseMatrix& mat) : + m_mat(mat) + {} + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_mat.rows(); } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_mat.cols(); } + + /// + /// Perform the matrix-vector multiplication operation \f$y=Ax\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = A * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_mat.cols()); + MapVec y(y_out, m_mat.rows()); + y.noalias() = m_mat * x; + } +}; + +} // namespace Spectra + +#endif // SPARSE_GEN_MAT_PROD_H diff --git a/gtsam/3rdparty/Spectra/MatOp/SparseGenRealShiftSolve.h b/gtsam/3rdparty/Spectra/MatOp/SparseGenRealShiftSolve.h new file mode 100644 index 000000000..fc6e3c508 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/SparseGenRealShiftSolve.h @@ -0,0 +1,93 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SPARSE_GEN_REAL_SHIFT_SOLVE_H +#define SPARSE_GEN_REAL_SHIFT_SOLVE_H + +#include +#include +#include +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the shift-solve operation on a sparse real matrix \f$A\f$, +/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and +/// vector \f$x\f$. It is mainly used in the GenEigsRealShiftSolver eigen solver. +/// +template +class SparseGenRealShiftSolve +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef Eigen::SparseMatrix SparseMatrix; + typedef const Eigen::Ref ConstGenericSparseMatrix; + + ConstGenericSparseMatrix m_mat; + const int m_n; + Eigen::SparseLU m_solver; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** sparse matrix object, whose type can be + /// `Eigen::SparseMatrix` or its mapped version + /// `Eigen::Map >`. + /// + SparseGenRealShiftSolve(ConstGenericSparseMatrix& mat) : + m_mat(mat), m_n(mat.rows()) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("SparseGenRealShiftSolve: matrix must be square"); + } + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Set the real shift \f$\sigma\f$. + /// + void set_shift(Scalar sigma) + { + SparseMatrix I(m_n, m_n); + I.setIdentity(); + + m_solver.compute(m_mat - sigma * I); + if (m_solver.info() != Eigen::Success) + throw std::invalid_argument("SparseGenRealShiftSolve: factorization failed with the given shift"); + } + + /// + /// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(A - sigma * I) * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_solver.solve(x); + } +}; + +} // namespace Spectra + +#endif // SPARSE_GEN_REAL_SHIFT_SOLVE_H diff --git a/gtsam/3rdparty/Spectra/MatOp/SparseRegularInverse.h b/gtsam/3rdparty/Spectra/MatOp/SparseRegularInverse.h new file mode 100644 index 000000000..3abd0c177 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/SparseRegularInverse.h @@ -0,0 +1,100 @@ +// Copyright (C) 2017-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SPARSE_REGULAR_INVERSE_H +#define SPARSE_REGULAR_INVERSE_H + +#include +#include +#include +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines matrix operations required by the generalized eigen solver +/// in the regular inverse mode. For a sparse and positive definite matrix \f$B\f$, +/// it implements the matrix-vector product \f$y=Bx\f$ and the linear equation +/// solving operation \f$y=B^{-1}x\f$. +/// +/// This class is intended to be used with the SymGEigsSolver generalized eigen solver +/// in the regular inverse mode. +/// +template +class SparseRegularInverse +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef Eigen::SparseMatrix SparseMatrix; + typedef const Eigen::Ref ConstGenericSparseMatrix; + + ConstGenericSparseMatrix m_mat; + const int m_n; + Eigen::ConjugateGradient m_cg; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** sparse matrix object, whose type can be + /// `Eigen::SparseMatrix` or its mapped version + /// `Eigen::Map >`. + /// + SparseRegularInverse(ConstGenericSparseMatrix& mat) : + m_mat(mat), m_n(mat.rows()) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("SparseRegularInverse: matrix must be square"); + + m_cg.compute(mat); + } + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Perform the solving operation \f$y=B^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(B) * x_in + void solve(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_cg.solve(x); + } + + /// + /// Perform the matrix-vector multiplication operation \f$y=Bx\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = B * x_in + void mat_prod(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_mat.template selfadjointView() * x; + } +}; + +} // namespace Spectra + +#endif // SPARSE_REGULAR_INVERSE_H diff --git a/gtsam/3rdparty/Spectra/MatOp/SparseSymMatProd.h b/gtsam/3rdparty/Spectra/MatOp/SparseSymMatProd.h new file mode 100644 index 000000000..2dd8799eb --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/SparseSymMatProd.h @@ -0,0 +1,73 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SPARSE_SYM_MAT_PROD_H +#define SPARSE_SYM_MAT_PROD_H + +#include +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the matrix-vector multiplication operation on a +/// sparse real symmetric matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector +/// \f$x\f$. It is mainly used in the SymEigsSolver eigen solver. +/// +template +class SparseSymMatProd +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef Eigen::SparseMatrix SparseMatrix; + typedef const Eigen::Ref ConstGenericSparseMatrix; + + ConstGenericSparseMatrix m_mat; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** sparse matrix object, whose type can be + /// `Eigen::SparseMatrix` or its mapped version + /// `Eigen::Map >`. + /// + SparseSymMatProd(ConstGenericSparseMatrix& mat) : + m_mat(mat) + {} + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_mat.rows(); } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_mat.cols(); } + + /// + /// Perform the matrix-vector multiplication operation \f$y=Ax\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = A * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_mat.cols()); + MapVec y(y_out, m_mat.rows()); + y.noalias() = m_mat.template selfadjointView() * x; + } +}; + +} // namespace Spectra + +#endif // SPARSE_SYM_MAT_PROD_H diff --git a/gtsam/3rdparty/Spectra/MatOp/SparseSymShiftSolve.h b/gtsam/3rdparty/Spectra/MatOp/SparseSymShiftSolve.h new file mode 100644 index 000000000..674c6ac52 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/SparseSymShiftSolve.h @@ -0,0 +1,95 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SPARSE_SYM_SHIFT_SOLVE_H +#define SPARSE_SYM_SHIFT_SOLVE_H + +#include +#include +#include +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the shift-solve operation on a sparse real symmetric matrix \f$A\f$, +/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and +/// vector \f$x\f$. It is mainly used in the SymEigsShiftSolver eigen solver. +/// +template +class SparseSymShiftSolve +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef Eigen::SparseMatrix SparseMatrix; + typedef const Eigen::Ref ConstGenericSparseMatrix; + + ConstGenericSparseMatrix m_mat; + const int m_n; + Eigen::SparseLU m_solver; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** sparse matrix object, whose type can be + /// `Eigen::SparseMatrix` or its mapped version + /// `Eigen::Map >`. + /// + SparseSymShiftSolve(ConstGenericSparseMatrix& mat) : + m_mat(mat), m_n(mat.rows()) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("SparseSymShiftSolve: matrix must be square"); + } + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Set the real shift \f$\sigma\f$. + /// + void set_shift(Scalar sigma) + { + SparseMatrix mat = m_mat.template selfadjointView(); + SparseMatrix identity(m_n, m_n); + identity.setIdentity(); + mat = mat - sigma * identity; + m_solver.isSymmetric(true); + m_solver.compute(mat); + if (m_solver.info() != Eigen::Success) + throw std::invalid_argument("SparseSymShiftSolve: factorization failed with the given shift"); + } + + /// + /// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(A - sigma * I) * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_solver.solve(x); + } +}; + +} // namespace Spectra + +#endif // SPARSE_SYM_SHIFT_SOLVE_H diff --git a/gtsam/3rdparty/Spectra/MatOp/internal/ArnoldiOp.h b/gtsam/3rdparty/Spectra/MatOp/internal/ArnoldiOp.h new file mode 100644 index 000000000..68654aafd --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/internal/ArnoldiOp.h @@ -0,0 +1,150 @@ +// Copyright (C) 2018-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef ARNOLDI_OP_H +#define ARNOLDI_OP_H + +#include +#include // std::sqrt + +namespace Spectra { + +/// +/// \ingroup Internals +/// @{ +/// + +/// +/// \defgroup Operators Operators +/// +/// Different types of operators. +/// + +/// +/// \ingroup Operators +/// +/// Operators used in the Arnoldi factorization. +/// +template +class ArnoldiOp +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + + OpType& m_op; + BOpType& m_Bop; + Vector m_cache; + +public: + ArnoldiOp(OpType* op, BOpType* Bop) : + m_op(*op), m_Bop(*Bop), m_cache(op->rows()) + {} + + inline Index rows() const { return m_op.rows(); } + + // In generalized eigenvalue problem Ax=lambda*Bx, define the inner product to be = x'By. + // For regular eigenvalue problems, it is the usual inner product = x'y + + // Compute = x'By + // x and y are two vectors + template + Scalar inner_product(const Arg1& x, const Arg2& y) + { + m_Bop.mat_prod(y.data(), m_cache.data()); + return x.dot(m_cache); + } + + // Compute res = = X'By + // X is a matrix, y is a vector, res is a vector + template + void trans_product(const Arg1& x, const Arg2& y, Eigen::Ref res) + { + m_Bop.mat_prod(y.data(), m_cache.data()); + res.noalias() = x.transpose() * m_cache; + } + + // B-norm of a vector, ||x||_B = sqrt(x'Bx) + template + Scalar norm(const Arg& x) + { + using std::sqrt; + return sqrt(inner_product(x, x)); + } + + // The "A" operator to generate the Krylov subspace + inline void perform_op(const Scalar* x_in, Scalar* y_out) + { + m_op.perform_op(x_in, y_out); + } +}; + +/// +/// \ingroup Operators +/// +/// Placeholder for the B-operator when \f$B = I\f$. +/// +class IdentityBOp +{}; + +/// +/// \ingroup Operators +/// +/// Partial specialization for the case \f$B = I\f$. +/// +template +class ArnoldiOp +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + + OpType& m_op; + +public: + ArnoldiOp(OpType* op, IdentityBOp* /*Bop*/) : + m_op(*op) + {} + + inline Index rows() const { return m_op.rows(); } + + // Compute = x'y + // x and y are two vectors + template + Scalar inner_product(const Arg1& x, const Arg2& y) const + { + return x.dot(y); + } + + // Compute res = = X'y + // X is a matrix, y is a vector, res is a vector + template + void trans_product(const Arg1& x, const Arg2& y, Eigen::Ref res) const + { + res.noalias() = x.transpose() * y; + } + + // B-norm of a vector. For regular eigenvalue problems it is simply the L2 norm + template + Scalar norm(const Arg& x) + { + return x.norm(); + } + + // The "A" operator to generate the Krylov subspace + inline void perform_op(const Scalar* x_in, Scalar* y_out) + { + m_op.perform_op(x_in, y_out); + } +}; + +/// +/// @} +/// + +} // namespace Spectra + +#endif // ARNOLDI_OP_H diff --git a/gtsam/3rdparty/Spectra/MatOp/internal/SymGEigsCholeskyOp.h b/gtsam/3rdparty/Spectra/MatOp/internal/SymGEigsCholeskyOp.h new file mode 100644 index 000000000..fa9958352 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/internal/SymGEigsCholeskyOp.h @@ -0,0 +1,75 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SYM_GEIGS_CHOLESKY_OP_H +#define SYM_GEIGS_CHOLESKY_OP_H + +#include +#include "../DenseSymMatProd.h" +#include "../DenseCholesky.h" + +namespace Spectra { + +/// +/// \ingroup Operators +/// +/// This class defines the matrix operation for generalized eigen solver in the +/// Cholesky decomposition mode. It calculates \f$y=L^{-1}A(L')^{-1}x\f$ for any +/// vector \f$x\f$, where \f$L\f$ is the Cholesky decomposition of \f$B\f$. +/// This class is intended for internal use. +/// +template , + typename BOpType = DenseCholesky > +class SymGEigsCholeskyOp +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + + OpType& m_op; + BOpType& m_Bop; + Vector m_cache; // temporary working space + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param op Pointer to the \f$A\f$ matrix operation object. + /// \param Bop Pointer to the \f$B\f$ matrix operation object. + /// + SymGEigsCholeskyOp(OpType& op, BOpType& Bop) : + m_op(op), m_Bop(Bop), m_cache(op.rows()) + {} + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_Bop.rows(); } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_Bop.rows(); } + + /// + /// Perform the matrix operation \f$y=L^{-1}A(L')^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(L) * A * inv(L') * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) + { + m_Bop.upper_triangular_solve(x_in, y_out); + m_op.perform_op(y_out, m_cache.data()); + m_Bop.lower_triangular_solve(m_cache.data(), y_out); + } +}; + +} // namespace Spectra + +#endif // SYM_GEIGS_CHOLESKY_OP_H diff --git a/gtsam/3rdparty/Spectra/MatOp/internal/SymGEigsRegInvOp.h b/gtsam/3rdparty/Spectra/MatOp/internal/SymGEigsRegInvOp.h new file mode 100644 index 000000000..514269c7f --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/internal/SymGEigsRegInvOp.h @@ -0,0 +1,72 @@ +// Copyright (C) 2017-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SYM_GEIGS_REG_INV_OP_H +#define SYM_GEIGS_REG_INV_OP_H + +#include +#include "../SparseSymMatProd.h" +#include "../SparseRegularInverse.h" + +namespace Spectra { + +/// +/// \ingroup Operators +/// +/// This class defines the matrix operation for generalized eigen solver in the +/// regular inverse mode. This class is intended for internal use. +/// +template , + typename BOpType = SparseRegularInverse > +class SymGEigsRegInvOp +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + + OpType& m_op; + BOpType& m_Bop; + Vector m_cache; // temporary working space + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param op Pointer to the \f$A\f$ matrix operation object. + /// \param Bop Pointer to the \f$B\f$ matrix operation object. + /// + SymGEigsRegInvOp(OpType& op, BOpType& Bop) : + m_op(op), m_Bop(Bop), m_cache(op.rows()) + {} + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_Bop.rows(); } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_Bop.rows(); } + + /// + /// Perform the matrix operation \f$y=B^{-1}Ax\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(B) * A * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) + { + m_op.perform_op(x_in, m_cache.data()); + m_Bop.solve(m_cache.data(), y_out); + } +}; + +} // namespace Spectra + +#endif // SYM_GEIGS_REG_INV_OP_H diff --git a/gtsam/3rdparty/Spectra/SymEigsBase.h b/gtsam/3rdparty/Spectra/SymEigsBase.h new file mode 100644 index 000000000..9601425d5 --- /dev/null +++ b/gtsam/3rdparty/Spectra/SymEigsBase.h @@ -0,0 +1,448 @@ +// Copyright (C) 2018-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SYM_EIGS_BASE_H +#define SYM_EIGS_BASE_H + +#include +#include // std::vector +#include // std::abs, std::pow, std::sqrt +#include // std::min, std::copy +#include // std::invalid_argument + +#include "Util/TypeTraits.h" +#include "Util/SelectionRule.h" +#include "Util/CompInfo.h" +#include "Util/SimpleRandom.h" +#include "MatOp/internal/ArnoldiOp.h" +#include "LinAlg/UpperHessenbergQR.h" +#include "LinAlg/TridiagEigen.h" +#include "LinAlg/Lanczos.h" + +namespace Spectra { + +/// +/// \defgroup EigenSolver Eigen Solvers +/// +/// Eigen solvers for different types of problems. +/// + +/// +/// \ingroup EigenSolver +/// +/// This is the base class for symmetric eigen solvers, mainly for internal use. +/// It is kept here to provide the documentation for member functions of concrete eigen solvers +/// such as SymEigsSolver and SymEigsShiftSolver. +/// +template +class SymEigsBase +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Array Array; + typedef Eigen::Array BoolArray; + typedef Eigen::Map MapMat; + typedef Eigen::Map MapVec; + typedef Eigen::Map MapConstVec; + + typedef ArnoldiOp ArnoldiOpType; + typedef Lanczos LanczosFac; + +protected: + // clang-format off + OpType* m_op; // object to conduct matrix operation, + // e.g. matrix-vector product + const Index m_n; // dimension of matrix A + const Index m_nev; // number of eigenvalues requested + const Index m_ncv; // dimension of Krylov subspace in the Lanczos method + Index m_nmatop; // number of matrix operations called + Index m_niter; // number of restarting iterations + + LanczosFac m_fac; // Lanczos factorization + Vector m_ritz_val; // Ritz values + +private: + Matrix m_ritz_vec; // Ritz vectors + Vector m_ritz_est; // last row of m_ritz_vec, also called the Ritz estimates + BoolArray m_ritz_conv; // indicator of the convergence of Ritz values + int m_info; // status of the computation + + const Scalar m_near_0; // a very small value, but 1.0 / m_near_0 does not overflow + // ~= 1e-307 for the "double" type + const Scalar m_eps; // the machine precision, ~= 1e-16 for the "double" type + const Scalar m_eps23; // m_eps^(2/3), used to test the convergence + // clang-format on + + // Implicitly restarted Lanczos factorization + void restart(Index k) + { + if (k >= m_ncv) + return; + + TridiagQR decomp(m_ncv); + Matrix Q = Matrix::Identity(m_ncv, m_ncv); + + for (Index i = k; i < m_ncv; i++) + { + // QR decomposition of H-mu*I, mu is the shift + decomp.compute(m_fac.matrix_H(), m_ritz_val[i]); + + // Q -> Q * Qi + decomp.apply_YQ(Q); + // H -> Q'HQ + // Since QR = H - mu * I, we have H = QR + mu * I + // and therefore Q'HQ = RQ + mu * I + m_fac.compress_H(decomp); + } + + m_fac.compress_V(Q); + m_fac.factorize_from(k, m_ncv, m_nmatop); + + retrieve_ritzpair(); + } + + // Calculates the number of converged Ritz values + Index num_converged(Scalar tol) + { + // thresh = tol * max(m_eps23, abs(theta)), theta for Ritz value + Array thresh = tol * m_ritz_val.head(m_nev).array().abs().max(m_eps23); + Array resid = m_ritz_est.head(m_nev).array().abs() * m_fac.f_norm(); + // Converged "wanted" Ritz values + m_ritz_conv = (resid < thresh); + + return m_ritz_conv.cast().sum(); + } + + // Returns the adjusted nev for restarting + Index nev_adjusted(Index nconv) + { + using std::abs; + + Index nev_new = m_nev; + for (Index i = m_nev; i < m_ncv; i++) + if (abs(m_ritz_est[i]) < m_near_0) + nev_new++; + + // Adjust nev_new, according to dsaup2.f line 677~684 in ARPACK + nev_new += std::min(nconv, (m_ncv - nev_new) / 2); + if (nev_new == 1 && m_ncv >= 6) + nev_new = m_ncv / 2; + else if (nev_new == 1 && m_ncv > 2) + nev_new = 2; + + if (nev_new > m_ncv - 1) + nev_new = m_ncv - 1; + + return nev_new; + } + + // Retrieves and sorts Ritz values and Ritz vectors + void retrieve_ritzpair() + { + TridiagEigen decomp(m_fac.matrix_H()); + const Vector& evals = decomp.eigenvalues(); + const Matrix& evecs = decomp.eigenvectors(); + + SortEigenvalue sorting(evals.data(), evals.size()); + std::vector ind = sorting.index(); + + // For BOTH_ENDS, the eigenvalues are sorted according + // to the LARGEST_ALGE rule, so we need to move those smallest + // values to the left + // The order would be + // Largest => Smallest => 2nd largest => 2nd smallest => ... + // We keep this order since the first k values will always be + // the wanted collection, no matter k is nev_updated (used in restart()) + // or is nev (used in sort_ritzpair()) + if (SelectionRule == BOTH_ENDS) + { + std::vector ind_copy(ind); + for (Index i = 0; i < m_ncv; i++) + { + // If i is even, pick values from the left (large values) + // If i is odd, pick values from the right (small values) + if (i % 2 == 0) + ind[i] = ind_copy[i / 2]; + else + ind[i] = ind_copy[m_ncv - 1 - i / 2]; + } + } + + // Copy the Ritz values and vectors to m_ritz_val and m_ritz_vec, respectively + for (Index i = 0; i < m_ncv; i++) + { + m_ritz_val[i] = evals[ind[i]]; + m_ritz_est[i] = evecs(m_ncv - 1, ind[i]); + } + for (Index i = 0; i < m_nev; i++) + { + m_ritz_vec.col(i).noalias() = evecs.col(ind[i]); + } + } + +protected: + // Sorts the first nev Ritz pairs in the specified order + // This is used to return the final results + virtual void sort_ritzpair(int sort_rule) + { + // First make sure that we have a valid index vector + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + std::vector ind = sorting.index(); + + switch (sort_rule) + { + case LARGEST_ALGE: + break; + case LARGEST_MAGN: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + case SMALLEST_ALGE: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + case SMALLEST_MAGN: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + default: + throw std::invalid_argument("unsupported sorting rule"); + } + + Vector new_ritz_val(m_ncv); + Matrix new_ritz_vec(m_ncv, m_nev); + BoolArray new_ritz_conv(m_nev); + + for (Index i = 0; i < m_nev; i++) + { + new_ritz_val[i] = m_ritz_val[ind[i]]; + new_ritz_vec.col(i).noalias() = m_ritz_vec.col(ind[i]); + new_ritz_conv[i] = m_ritz_conv[ind[i]]; + } + + m_ritz_val.swap(new_ritz_val); + m_ritz_vec.swap(new_ritz_vec); + m_ritz_conv.swap(new_ritz_conv); + } + +public: + /// \cond + + SymEigsBase(OpType* op, BOpType* Bop, Index nev, Index ncv) : + m_op(op), + m_n(m_op->rows()), + m_nev(nev), + m_ncv(ncv > m_n ? m_n : ncv), + m_nmatop(0), + m_niter(0), + m_fac(ArnoldiOpType(op, Bop), m_ncv), + m_info(NOT_COMPUTED), + m_near_0(TypeTraits::min() * Scalar(10)), + m_eps(Eigen::NumTraits::epsilon()), + m_eps23(Eigen::numext::pow(m_eps, Scalar(2.0) / 3)) + { + if (nev < 1 || nev > m_n - 1) + throw std::invalid_argument("nev must satisfy 1 <= nev <= n - 1, n is the size of matrix"); + + if (ncv <= nev || ncv > m_n) + throw std::invalid_argument("ncv must satisfy nev < ncv <= n, n is the size of matrix"); + } + + /// + /// Virtual destructor + /// + virtual ~SymEigsBase() {} + + /// \endcond + + /// + /// Initializes the solver by providing an initial residual vector. + /// + /// \param init_resid Pointer to the initial residual vector. + /// + /// **Spectra** (and also **ARPACK**) uses an iterative algorithm + /// to find eigenvalues. This function allows the user to provide the initial + /// residual vector. + /// + void init(const Scalar* init_resid) + { + // Reset all matrices/vectors to zero + m_ritz_val.resize(m_ncv); + m_ritz_vec.resize(m_ncv, m_nev); + m_ritz_est.resize(m_ncv); + m_ritz_conv.resize(m_nev); + + m_ritz_val.setZero(); + m_ritz_vec.setZero(); + m_ritz_est.setZero(); + m_ritz_conv.setZero(); + + m_nmatop = 0; + m_niter = 0; + + // Initialize the Lanczos factorization + MapConstVec v0(init_resid, m_n); + m_fac.init(v0, m_nmatop); + } + + /// + /// Initializes the solver by providing a random initial residual vector. + /// + /// This overloaded function generates a random initial residual vector + /// (with a fixed random seed) for the algorithm. Elements in the vector + /// follow independent Uniform(-0.5, 0.5) distribution. + /// + void init() + { + SimpleRandom rng(0); + Vector init_resid = rng.random_vec(m_n); + init(init_resid.data()); + } + + /// + /// Conducts the major computation procedure. + /// + /// \param maxit Maximum number of iterations allowed in the algorithm. + /// \param tol Precision parameter for the calculated eigenvalues. + /// \param sort_rule Rule to sort the eigenvalues and eigenvectors. + /// Supported values are + /// `Spectra::LARGEST_ALGE`, `Spectra::LARGEST_MAGN`, + /// `Spectra::SMALLEST_ALGE` and `Spectra::SMALLEST_MAGN`, + /// for example `LARGEST_ALGE` indicates that largest eigenvalues + /// come first. Note that this argument is only used to + /// **sort** the final result, and the **selection** rule + /// (e.g. selecting the largest or smallest eigenvalues in the + /// full spectrum) is specified by the template parameter + /// `SelectionRule` of SymEigsSolver. + /// + /// \return Number of converged eigenvalues. + /// + Index compute(Index maxit = 1000, Scalar tol = 1e-10, int sort_rule = LARGEST_ALGE) + { + // The m-step Lanczos factorization + m_fac.factorize_from(1, m_ncv, m_nmatop); + retrieve_ritzpair(); + // Restarting + Index i, nconv = 0, nev_adj; + for (i = 0; i < maxit; i++) + { + nconv = num_converged(tol); + if (nconv >= m_nev) + break; + + nev_adj = nev_adjusted(nconv); + restart(nev_adj); + } + // Sorting results + sort_ritzpair(sort_rule); + + m_niter += i + 1; + m_info = (nconv >= m_nev) ? SUCCESSFUL : NOT_CONVERGING; + + return std::min(m_nev, nconv); + } + + /// + /// Returns the status of the computation. + /// The full list of enumeration values can be found in \ref Enumerations. + /// + int info() const { return m_info; } + + /// + /// Returns the number of iterations used in the computation. + /// + Index num_iterations() const { return m_niter; } + + /// + /// Returns the number of matrix operations used in the computation. + /// + Index num_operations() const { return m_nmatop; } + + /// + /// Returns the converged eigenvalues. + /// + /// \return A vector containing the eigenvalues. + /// Returned vector type will be `Eigen::Vector`, depending on + /// the template parameter `Scalar` defined. + /// + Vector eigenvalues() const + { + const Index nconv = m_ritz_conv.cast().sum(); + Vector res(nconv); + + if (!nconv) + return res; + + Index j = 0; + for (Index i = 0; i < m_nev; i++) + { + if (m_ritz_conv[i]) + { + res[j] = m_ritz_val[i]; + j++; + } + } + + return res; + } + + /// + /// Returns the eigenvectors associated with the converged eigenvalues. + /// + /// \param nvec The number of eigenvectors to return. + /// + /// \return A matrix containing the eigenvectors. + /// Returned matrix type will be `Eigen::Matrix`, + /// depending on the template parameter `Scalar` defined. + /// + virtual Matrix eigenvectors(Index nvec) const + { + const Index nconv = m_ritz_conv.cast().sum(); + nvec = std::min(nvec, nconv); + Matrix res(m_n, nvec); + + if (!nvec) + return res; + + Matrix ritz_vec_conv(m_ncv, nvec); + Index j = 0; + for (Index i = 0; i < m_nev && j < nvec; i++) + { + if (m_ritz_conv[i]) + { + ritz_vec_conv.col(j).noalias() = m_ritz_vec.col(i); + j++; + } + } + + res.noalias() = m_fac.matrix_V() * ritz_vec_conv; + + return res; + } + + /// + /// Returns all converged eigenvectors. + /// + virtual Matrix eigenvectors() const + { + return eigenvectors(m_nev); + } +}; + +} // namespace Spectra + +#endif // SYM_EIGS_BASE_H diff --git a/gtsam/3rdparty/Spectra/SymEigsShiftSolver.h b/gtsam/3rdparty/Spectra/SymEigsShiftSolver.h new file mode 100644 index 000000000..e2b410cb1 --- /dev/null +++ b/gtsam/3rdparty/Spectra/SymEigsShiftSolver.h @@ -0,0 +1,203 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SYM_EIGS_SHIFT_SOLVER_H +#define SYM_EIGS_SHIFT_SOLVER_H + +#include + +#include "SymEigsBase.h" +#include "Util/SelectionRule.h" +#include "MatOp/DenseSymShiftSolve.h" + +namespace Spectra { + +/// +/// \ingroup EigenSolver +/// +/// This class implements the eigen solver for real symmetric matrices using +/// the **shift-and-invert mode**. The background information of the symmetric +/// eigen solver is documented in the SymEigsSolver class. Here we focus on +/// explaining the shift-and-invert mode. +/// +/// The shift-and-invert mode is based on the following fact: +/// If \f$\lambda\f$ and \f$x\f$ are a pair of eigenvalue and eigenvector of +/// matrix \f$A\f$, such that \f$Ax=\lambda x\f$, then for any \f$\sigma\f$, +/// we have +/// \f[(A-\sigma I)^{-1}x=\nu x\f] +/// where +/// \f[\nu=\frac{1}{\lambda-\sigma}\f] +/// which indicates that \f$(\nu, x)\f$ is an eigenpair of the matrix +/// \f$(A-\sigma I)^{-1}\f$. +/// +/// Therefore, if we pass the matrix operation \f$(A-\sigma I)^{-1}y\f$ +/// (rather than \f$Ay\f$) to the eigen solver, then we would get the desired +/// values of \f$\nu\f$, and \f$\lambda\f$ can also be easily obtained by noting +/// that \f$\lambda=\sigma+\nu^{-1}\f$. +/// +/// The reason why we need this type of manipulation is that +/// the algorithm of **Spectra** (and also **ARPACK**) +/// is good at finding eigenvalues with large magnitude, but may fail in looking +/// for eigenvalues that are close to zero. However, if we really need them, we +/// can set \f$\sigma=0\f$, find the largest eigenvalues of \f$A^{-1}\f$, and then +/// transform back to \f$\lambda\f$, since in this case largest values of \f$\nu\f$ +/// implies smallest values of \f$\lambda\f$. +/// +/// To summarize, in the shift-and-invert mode, the selection rule will apply to +/// \f$\nu=1/(\lambda-\sigma)\f$ rather than \f$\lambda\f$. So a selection rule +/// of `LARGEST_MAGN` combined with shift \f$\sigma\f$ will find eigenvalues of +/// \f$A\f$ that are closest to \f$\sigma\f$. But note that the eigenvalues() +/// method will always return the eigenvalues in the original problem (i.e., +/// returning \f$\lambda\f$ rather than \f$\nu\f$), and eigenvectors are the +/// same for both the original problem and the shifted-and-inverted problem. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// \tparam SelectionRule An enumeration value indicating the selection rule of +/// the shifted-and-inverted eigenvalues. +/// The full list of enumeration values can be found in +/// \ref Enumerations. +/// \tparam OpType The name of the matrix operation class. Users could either +/// use the wrapper classes such as DenseSymShiftSolve and +/// SparseSymShiftSolve, or define their +/// own that implements all the public member functions as in +/// DenseSymShiftSolve. +/// +/// Below is an example that illustrates the use of the shift-and-invert mode: +/// +/// \code{.cpp} +/// #include +/// #include +/// // is implicitly included +/// #include +/// +/// using namespace Spectra; +/// +/// int main() +/// { +/// // A size-10 diagonal matrix with elements 1, 2, ..., 10 +/// Eigen::MatrixXd M = Eigen::MatrixXd::Zero(10, 10); +/// for(int i = 0; i < M.rows(); i++) +/// M(i, i) = i + 1; +/// +/// // Construct matrix operation object using the wrapper class +/// DenseSymShiftSolve op(M); +/// +/// // Construct eigen solver object with shift 0 +/// // This will find eigenvalues that are closest to 0 +/// SymEigsShiftSolver< double, LARGEST_MAGN, +/// DenseSymShiftSolve > eigs(&op, 3, 6, 0.0); +/// +/// eigs.init(); +/// eigs.compute(); +/// if(eigs.info() == SUCCESSFUL) +/// { +/// Eigen::VectorXd evalues = eigs.eigenvalues(); +/// // Will get (3.0, 2.0, 1.0) +/// std::cout << "Eigenvalues found:\n" << evalues << std::endl; +/// } +/// +/// return 0; +/// } +/// \endcode +/// +/// Also an example for user-supplied matrix shift-solve operation class: +/// +/// \code{.cpp} +/// #include +/// #include +/// #include +/// +/// using namespace Spectra; +/// +/// // M = diag(1, 2, ..., 10) +/// class MyDiagonalTenShiftSolve +/// { +/// private: +/// double sigma_; +/// public: +/// int rows() { return 10; } +/// int cols() { return 10; } +/// void set_shift(double sigma) { sigma_ = sigma; } +/// // y_out = inv(A - sigma * I) * x_in +/// // inv(A - sigma * I) = diag(1/(1-sigma), 1/(2-sigma), ...) +/// void perform_op(double *x_in, double *y_out) +/// { +/// for(int i = 0; i < rows(); i++) +/// { +/// y_out[i] = x_in[i] / (i + 1 - sigma_); +/// } +/// } +/// }; +/// +/// int main() +/// { +/// MyDiagonalTenShiftSolve op; +/// // Find three eigenvalues that are closest to 3.14 +/// SymEigsShiftSolver eigs(&op, 3, 6, 3.14); +/// eigs.init(); +/// eigs.compute(); +/// if(eigs.info() == SUCCESSFUL) +/// { +/// Eigen::VectorXd evalues = eigs.eigenvalues(); +/// // Will get (4.0, 3.0, 2.0) +/// std::cout << "Eigenvalues found:\n" << evalues << std::endl; +/// } +/// +/// return 0; +/// } +/// \endcode +/// +template > +class SymEigsShiftSolver : public SymEigsBase +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Array Array; + + const Scalar m_sigma; + + // First transform back the Ritz values, and then sort + void sort_ritzpair(int sort_rule) + { + Array m_ritz_val_org = Scalar(1.0) / this->m_ritz_val.head(this->m_nev).array() + m_sigma; + this->m_ritz_val.head(this->m_nev) = m_ritz_val_org; + SymEigsBase::sort_ritzpair(sort_rule); + } + +public: + /// + /// Constructor to create a eigen solver object using the shift-and-invert mode. + /// + /// \param op Pointer to the matrix operation object, which should implement + /// the shift-solve operation of \f$A\f$: calculating + /// \f$(A-\sigma I)^{-1}v\f$ for any vector \f$v\f$. Users could either + /// create the object from the wrapper class such as DenseSymShiftSolve, or + /// define their own that implements all the public member functions + /// as in DenseSymShiftSolve. + /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$, + /// where \f$n\f$ is the size of matrix. + /// \param ncv Parameter that controls the convergence speed of the algorithm. + /// Typically a larger `ncv_` means faster convergence, but it may + /// also result in greater memory use and more matrix operations + /// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$, + /// and is advised to take \f$ncv \ge 2\cdot nev\f$. + /// \param sigma The value of the shift. + /// + SymEigsShiftSolver(OpType* op, Index nev, Index ncv, Scalar sigma) : + SymEigsBase(op, NULL, nev, ncv), + m_sigma(sigma) + { + this->m_op->set_shift(m_sigma); + } +}; + +} // namespace Spectra + +#endif // SYM_EIGS_SHIFT_SOLVER_H diff --git a/gtsam/3rdparty/Spectra/SymEigsSolver.h b/gtsam/3rdparty/Spectra/SymEigsSolver.h new file mode 100644 index 000000000..404df1e52 --- /dev/null +++ b/gtsam/3rdparty/Spectra/SymEigsSolver.h @@ -0,0 +1,171 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SYM_EIGS_SOLVER_H +#define SYM_EIGS_SOLVER_H + +#include + +#include "SymEigsBase.h" +#include "Util/SelectionRule.h" +#include "MatOp/DenseSymMatProd.h" + +namespace Spectra { + +/// +/// \ingroup EigenSolver +/// +/// This class implements the eigen solver for real symmetric matrices, i.e., +/// to solve \f$Ax=\lambda x\f$ where \f$A\f$ is symmetric. +/// +/// **Spectra** is designed to calculate a specified number (\f$k\f$) +/// of eigenvalues of a large square matrix (\f$A\f$). Usually \f$k\f$ is much +/// less than the size of the matrix (\f$n\f$), so that only a few eigenvalues +/// and eigenvectors are computed. +/// +/// Rather than providing the whole \f$A\f$ matrix, the algorithm only requires +/// the matrix-vector multiplication operation of \f$A\f$. Therefore, users of +/// this solver need to supply a class that computes the result of \f$Av\f$ +/// for any given vector \f$v\f$. The name of this class should be given to +/// the template parameter `OpType`, and instance of this class passed to +/// the constructor of SymEigsSolver. +/// +/// If the matrix \f$A\f$ is already stored as a matrix object in **Eigen**, +/// for example `Eigen::MatrixXd`, then there is an easy way to construct such +/// matrix operation class, by using the built-in wrapper class DenseSymMatProd +/// which wraps an existing matrix object in **Eigen**. This is also the +/// default template parameter for SymEigsSolver. For sparse matrices, the +/// wrapper class SparseSymMatProd can be used similarly. +/// +/// If the users need to define their own matrix-vector multiplication operation +/// class, it should implement all the public member functions as in DenseSymMatProd. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// \tparam SelectionRule An enumeration value indicating the selection rule of +/// the requested eigenvalues, for example `LARGEST_MAGN` +/// to retrieve eigenvalues with the largest magnitude. +/// The full list of enumeration values can be found in +/// \ref Enumerations. +/// \tparam OpType The name of the matrix operation class. Users could either +/// use the wrapper classes such as DenseSymMatProd and +/// SparseSymMatProd, or define their +/// own that implements all the public member functions as in +/// DenseSymMatProd. +/// +/// Below is an example that demonstrates the usage of this class. +/// +/// \code{.cpp} +/// #include +/// #include +/// // is implicitly included +/// #include +/// +/// using namespace Spectra; +/// +/// int main() +/// { +/// // We are going to calculate the eigenvalues of M +/// Eigen::MatrixXd A = Eigen::MatrixXd::Random(10, 10); +/// Eigen::MatrixXd M = A + A.transpose(); +/// +/// // Construct matrix operation object using the wrapper class DenseSymMatProd +/// DenseSymMatProd op(M); +/// +/// // Construct eigen solver object, requesting the largest three eigenvalues +/// SymEigsSolver< double, LARGEST_ALGE, DenseSymMatProd > eigs(&op, 3, 6); +/// +/// // Initialize and compute +/// eigs.init(); +/// int nconv = eigs.compute(); +/// +/// // Retrieve results +/// Eigen::VectorXd evalues; +/// if(eigs.info() == SUCCESSFUL) +/// evalues = eigs.eigenvalues(); +/// +/// std::cout << "Eigenvalues found:\n" << evalues << std::endl; +/// +/// return 0; +/// } +/// \endcode +/// +/// And here is an example for user-supplied matrix operation class. +/// +/// \code{.cpp} +/// #include +/// #include +/// #include +/// +/// using namespace Spectra; +/// +/// // M = diag(1, 2, ..., 10) +/// class MyDiagonalTen +/// { +/// public: +/// int rows() { return 10; } +/// int cols() { return 10; } +/// // y_out = M * x_in +/// void perform_op(double *x_in, double *y_out) +/// { +/// for(int i = 0; i < rows(); i++) +/// { +/// y_out[i] = x_in[i] * (i + 1); +/// } +/// } +/// }; +/// +/// int main() +/// { +/// MyDiagonalTen op; +/// SymEigsSolver eigs(&op, 3, 6); +/// eigs.init(); +/// eigs.compute(); +/// if(eigs.info() == SUCCESSFUL) +/// { +/// Eigen::VectorXd evalues = eigs.eigenvalues(); +/// // Will get (10, 9, 8) +/// std::cout << "Eigenvalues found:\n" << evalues << std::endl; +/// } +/// +/// return 0; +/// } +/// \endcode +/// +template > +class SymEigsSolver : public SymEigsBase +{ +private: + typedef Eigen::Index Index; + +public: + /// + /// Constructor to create a solver object. + /// + /// \param op Pointer to the matrix operation object, which should implement + /// the matrix-vector multiplication operation of \f$A\f$: + /// calculating \f$Av\f$ for any vector \f$v\f$. Users could either + /// create the object from the wrapper class such as DenseSymMatProd, or + /// define their own that implements all the public member functions + /// as in DenseSymMatProd. + /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$, + /// where \f$n\f$ is the size of matrix. + /// \param ncv Parameter that controls the convergence speed of the algorithm. + /// Typically a larger `ncv` means faster convergence, but it may + /// also result in greater memory use and more matrix operations + /// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$, + /// and is advised to take \f$ncv \ge 2\cdot nev\f$. + /// + SymEigsSolver(OpType* op, Index nev, Index ncv) : + SymEigsBase(op, NULL, nev, ncv) + {} +}; + +} // namespace Spectra + +#endif // SYM_EIGS_SOLVER_H diff --git a/gtsam/3rdparty/Spectra/SymGEigsSolver.h b/gtsam/3rdparty/Spectra/SymGEigsSolver.h new file mode 100644 index 000000000..68aa37cfc --- /dev/null +++ b/gtsam/3rdparty/Spectra/SymGEigsSolver.h @@ -0,0 +1,326 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SYM_GEIGS_SOLVER_H +#define SYM_GEIGS_SOLVER_H + +#include "SymEigsBase.h" +#include "Util/GEigsMode.h" +#include "MatOp/internal/SymGEigsCholeskyOp.h" +#include "MatOp/internal/SymGEigsRegInvOp.h" + +namespace Spectra { + +/// +/// \defgroup GEigenSolver Generalized Eigen Solvers +/// +/// Generalized eigen solvers for different types of problems. +/// + +/// +/// \ingroup GEigenSolver +/// +/// This class implements the generalized eigen solver for real symmetric +/// matrices, i.e., to solve \f$Ax=\lambda Bx\f$ where \f$A\f$ is symmetric and +/// \f$B\f$ is positive definite. +/// +/// There are two modes of this solver, specified by the template parameter +/// GEigsMode. See the pages for the specialized classes for details. +/// - The Cholesky mode assumes that \f$B\f$ can be factorized using Cholesky +/// decomposition, which is the preferred mode when the decomposition is +/// available. (This can be easily done in Eigen using the dense or sparse +/// Cholesky solver.) +/// See \ref SymGEigsSolver "SymGEigsSolver (Cholesky mode)" for more details. +/// - The regular inverse mode requires the matrix-vector product \f$Bv\f$ and the +/// linear equation solving operation \f$B^{-1}v\f$. This mode should only be +/// used when the Cholesky decomposition of \f$B\f$ is hard to implement, or +/// when computing \f$B^{-1}v\f$ is much faster than the Cholesky decomposition. +/// See \ref SymGEigsSolver "SymGEigsSolver (Regular inverse mode)" for more details. + +// Empty class template +template +class SymGEigsSolver +{}; + +/// +/// \ingroup GEigenSolver +/// +/// This class implements the generalized eigen solver for real symmetric +/// matrices using Cholesky decomposition, i.e., to solve \f$Ax=\lambda Bx\f$ +/// where \f$A\f$ is symmetric and \f$B\f$ is positive definite with the Cholesky +/// decomposition \f$B=LL'\f$. +/// +/// This solver requires two matrix operation objects: one for \f$A\f$ that implements +/// the matrix multiplication \f$Av\f$, and one for \f$B\f$ that implements the lower +/// and upper triangular solving \f$L^{-1}v\f$ and \f$(L')^{-1}v\f$. +/// +/// If \f$A\f$ and \f$B\f$ are stored as Eigen matrices, then the first operation +/// can be created using the DenseSymMatProd or SparseSymMatProd classes, and +/// the second operation can be created using the DenseCholesky or SparseCholesky +/// classes. If the users need to define their own operation classes, then they +/// should implement all the public member functions as in those built-in classes. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// \tparam SelectionRule An enumeration value indicating the selection rule of +/// the requested eigenvalues, for example `LARGEST_MAGN` +/// to retrieve eigenvalues with the largest magnitude. +/// The full list of enumeration values can be found in +/// \ref Enumerations. +/// \tparam OpType The name of the matrix operation class for \f$A\f$. Users could either +/// use the wrapper classes such as DenseSymMatProd and +/// SparseSymMatProd, or define their +/// own that implements all the public member functions as in +/// DenseSymMatProd. +/// \tparam BOpType The name of the matrix operation class for \f$B\f$. Users could either +/// use the wrapper classes such as DenseCholesky and +/// SparseCholesky, or define their +/// own that implements all the public member functions as in +/// DenseCholesky. +/// \tparam GEigsMode Mode of the generalized eigen solver. In this solver +/// it is Spectra::GEIGS_CHOLESKY. +/// +/// Below is an example that demonstrates the usage of this class. +/// +/// \code{.cpp} +/// #include +/// #include +/// #include +/// #include +/// #include +/// #include +/// #include +/// +/// using namespace Spectra; +/// +/// int main() +/// { +/// // We are going to solve the generalized eigenvalue problem A * x = lambda * B * x +/// const int n = 100; +/// +/// // Define the A matrix +/// Eigen::MatrixXd M = Eigen::MatrixXd::Random(n, n); +/// Eigen::MatrixXd A = M + M.transpose(); +/// +/// // Define the B matrix, a band matrix with 2 on the diagonal and 1 on the subdiagonals +/// Eigen::SparseMatrix B(n, n); +/// B.reserve(Eigen::VectorXi::Constant(n, 3)); +/// for(int i = 0; i < n; i++) +/// { +/// B.insert(i, i) = 2.0; +/// if(i > 0) +/// B.insert(i - 1, i) = 1.0; +/// if(i < n - 1) +/// B.insert(i + 1, i) = 1.0; +/// } +/// +/// // Construct matrix operation object using the wrapper classes +/// DenseSymMatProd op(A); +/// SparseCholesky Bop(B); +/// +/// // Construct generalized eigen solver object, requesting the largest three generalized eigenvalues +/// SymGEigsSolver, SparseCholesky, GEIGS_CHOLESKY> +/// geigs(&op, &Bop, 3, 6); +/// +/// // Initialize and compute +/// geigs.init(); +/// int nconv = geigs.compute(); +/// +/// // Retrieve results +/// Eigen::VectorXd evalues; +/// Eigen::MatrixXd evecs; +/// if(geigs.info() == SUCCESSFUL) +/// { +/// evalues = geigs.eigenvalues(); +/// evecs = geigs.eigenvectors(); +/// } +/// +/// std::cout << "Generalized eigenvalues found:\n" << evalues << std::endl; +/// std::cout << "Generalized eigenvectors found:\n" << evecs.topRows(10) << std::endl; +/// +/// // Verify results using the generalized eigen solver in Eigen +/// Eigen::MatrixXd Bdense = B; +/// Eigen::GeneralizedSelfAdjointEigenSolver es(A, Bdense); +/// +/// std::cout << "Generalized eigenvalues:\n" << es.eigenvalues().tail(3) << std::endl; +/// std::cout << "Generalized eigenvectors:\n" << es.eigenvectors().rightCols(3).topRows(10) << std::endl; +/// +/// return 0; +/// } +/// \endcode + +// Partial specialization for GEigsMode = GEIGS_CHOLESKY +template +class SymGEigsSolver : + public SymEigsBase, IdentityBOp> +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + + BOpType* m_Bop; + +public: + /// + /// Constructor to create a solver object. + /// + /// \param op Pointer to the \f$A\f$ matrix operation object. It + /// should implement the matrix-vector multiplication operation of \f$A\f$: + /// calculating \f$Av\f$ for any vector \f$v\f$. Users could either + /// create the object from the wrapper classes such as DenseSymMatProd, or + /// define their own that implements all the public member functions + /// as in DenseSymMatProd. + /// \param Bop Pointer to the \f$B\f$ matrix operation object. It + /// represents a Cholesky decomposition of \f$B\f$, and should + /// implement the lower and upper triangular solving operations: + /// calculating \f$L^{-1}v\f$ and \f$(L')^{-1}v\f$ for any vector + /// \f$v\f$, where \f$LL'=B\f$. Users could either + /// create the object from the wrapper classes such as DenseCholesky, or + /// define their own that implements all the public member functions + /// as in DenseCholesky. + /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$, + /// where \f$n\f$ is the size of matrix. + /// \param ncv Parameter that controls the convergence speed of the algorithm. + /// Typically a larger `ncv` means faster convergence, but it may + /// also result in greater memory use and more matrix operations + /// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$, + /// and is advised to take \f$ncv \ge 2\cdot nev\f$. + /// + SymGEigsSolver(OpType* op, BOpType* Bop, Index nev, Index ncv) : + SymEigsBase, IdentityBOp>( + new SymGEigsCholeskyOp(*op, *Bop), NULL, nev, ncv), + m_Bop(Bop) + {} + + /// \cond + + ~SymGEigsSolver() + { + // m_op contains the constructed SymGEigsCholeskyOp object + delete this->m_op; + } + + Matrix eigenvectors(Index nvec) const + { + Matrix res = SymEigsBase, IdentityBOp>::eigenvectors(nvec); + Vector tmp(res.rows()); + const Index nconv = res.cols(); + for (Index i = 0; i < nconv; i++) + { + m_Bop->upper_triangular_solve(&res(0, i), tmp.data()); + res.col(i).noalias() = tmp; + } + + return res; + } + + Matrix eigenvectors() const + { + return SymGEigsSolver::eigenvectors(this->m_nev); + } + + /// \endcond +}; + +/// +/// \ingroup GEigenSolver +/// +/// This class implements the generalized eigen solver for real symmetric +/// matrices in the regular inverse mode, i.e., to solve \f$Ax=\lambda Bx\f$ +/// where \f$A\f$ is symmetric, and \f$B\f$ is positive definite with the operations +/// defined below. +/// +/// This solver requires two matrix operation objects: one for \f$A\f$ that implements +/// the matrix multiplication \f$Av\f$, and one for \f$B\f$ that implements the +/// matrix-vector product \f$Bv\f$ and the linear equation solving operation \f$B^{-1}v\f$. +/// +/// If \f$A\f$ and \f$B\f$ are stored as Eigen matrices, then the first operation +/// can be created using the DenseSymMatProd or SparseSymMatProd classes, and +/// the second operation can be created using the SparseRegularInverse class. There is no +/// wrapper class for a dense \f$B\f$ matrix since in this case the Cholesky mode +/// is always preferred. If the users need to define their own operation classes, then they +/// should implement all the public member functions as in those built-in classes. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// \tparam SelectionRule An enumeration value indicating the selection rule of +/// the requested eigenvalues, for example `LARGEST_MAGN` +/// to retrieve eigenvalues with the largest magnitude. +/// The full list of enumeration values can be found in +/// \ref Enumerations. +/// \tparam OpType The name of the matrix operation class for \f$A\f$. Users could either +/// use the wrapper classes such as DenseSymMatProd and +/// SparseSymMatProd, or define their +/// own that implements all the public member functions as in +/// DenseSymMatProd. +/// \tparam BOpType The name of the matrix operation class for \f$B\f$. Users could either +/// use the wrapper class SparseRegularInverse, or define their +/// own that implements all the public member functions as in +/// SparseRegularInverse. +/// \tparam GEigsMode Mode of the generalized eigen solver. In this solver +/// it is Spectra::GEIGS_REGULAR_INVERSE. +/// + +// Partial specialization for GEigsMode = GEIGS_REGULAR_INVERSE +template +class SymGEigsSolver : + public SymEigsBase, BOpType> +{ +private: + typedef Eigen::Index Index; + +public: + /// + /// Constructor to create a solver object. + /// + /// \param op Pointer to the \f$A\f$ matrix operation object. It + /// should implement the matrix-vector multiplication operation of \f$A\f$: + /// calculating \f$Av\f$ for any vector \f$v\f$. Users could either + /// create the object from the wrapper classes such as DenseSymMatProd, or + /// define their own that implements all the public member functions + /// as in DenseSymMatProd. + /// \param Bop Pointer to the \f$B\f$ matrix operation object. It should + /// implement the multiplication operation \f$Bv\f$ and the linear equation + /// solving operation \f$B^{-1}v\f$ for any vector \f$v\f$. Users could either + /// create the object from the wrapper class SparseRegularInverse, or + /// define their own that implements all the public member functions + /// as in SparseRegularInverse. + /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$, + /// where \f$n\f$ is the size of matrix. + /// \param ncv Parameter that controls the convergence speed of the algorithm. + /// Typically a larger `ncv` means faster convergence, but it may + /// also result in greater memory use and more matrix operations + /// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$, + /// and is advised to take \f$ncv \ge 2\cdot nev\f$. + /// + SymGEigsSolver(OpType* op, BOpType* Bop, Index nev, Index ncv) : + SymEigsBase, BOpType>( + new SymGEigsRegInvOp(*op, *Bop), Bop, nev, ncv) + {} + + /// \cond + ~SymGEigsSolver() + { + // m_op contains the constructed SymGEigsRegInvOp object + delete this->m_op; + } + /// \endcond +}; + +} // namespace Spectra + +#endif // SYM_GEIGS_SOLVER_H diff --git a/gtsam/3rdparty/Spectra/Util/CompInfo.h b/gtsam/3rdparty/Spectra/Util/CompInfo.h new file mode 100644 index 000000000..07b8399a1 --- /dev/null +++ b/gtsam/3rdparty/Spectra/Util/CompInfo.h @@ -0,0 +1,35 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef COMP_INFO_H +#define COMP_INFO_H + +namespace Spectra { + +/// +/// \ingroup Enumerations +/// +/// The enumeration to report the status of computation. +/// +enum COMPUTATION_INFO +{ + SUCCESSFUL = 0, ///< Computation was successful. + + NOT_COMPUTED, ///< Used in eigen solvers, indicating that computation + ///< has not been conducted. Users should call + ///< the `compute()` member function of solvers. + + NOT_CONVERGING, ///< Used in eigen solvers, indicating that some eigenvalues + ///< did not converge. The `compute()` + ///< function returns the number of converged eigenvalues. + + NUMERICAL_ISSUE ///< Used in Cholesky decomposition, indicating that the + ///< matrix is not positive definite. +}; + +} // namespace Spectra + +#endif // COMP_INFO_H diff --git a/gtsam/3rdparty/Spectra/Util/GEigsMode.h b/gtsam/3rdparty/Spectra/Util/GEigsMode.h new file mode 100644 index 000000000..a547ac0bf --- /dev/null +++ b/gtsam/3rdparty/Spectra/Util/GEigsMode.h @@ -0,0 +1,32 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef GEIGS_MODE_H +#define GEIGS_MODE_H + +namespace Spectra { + +/// +/// \ingroup Enumerations +/// +/// The enumeration to specify the mode of generalized eigenvalue solver. +/// +enum GEIGS_MODE +{ + GEIGS_CHOLESKY = 0, ///< Using Cholesky decomposition to solve generalized eigenvalues. + + GEIGS_REGULAR_INVERSE, ///< Regular inverse mode for generalized eigenvalue solver. + + GEIGS_SHIFT_INVERT, ///< Shift-and-invert mode for generalized eigenvalue solver. + + GEIGS_BUCKLING, ///< Buckling mode for generalized eigenvalue solver. + + GEIGS_CAYLEY ///< Cayley transformation mode for generalized eigenvalue solver. +}; + +} // namespace Spectra + +#endif // GEIGS_MODE_H diff --git a/gtsam/3rdparty/Spectra/Util/SelectionRule.h b/gtsam/3rdparty/Spectra/Util/SelectionRule.h new file mode 100644 index 000000000..237950b4d --- /dev/null +++ b/gtsam/3rdparty/Spectra/Util/SelectionRule.h @@ -0,0 +1,275 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SELECTION_RULE_H +#define SELECTION_RULE_H + +#include // std::vector +#include // std::abs +#include // std::sort +#include // std::complex +#include // std::pair +#include // std::invalid_argument + +namespace Spectra { + +/// +/// \defgroup Enumerations +/// +/// Enumeration types for the selection rule of eigenvalues. +/// + +/// +/// \ingroup Enumerations +/// +/// The enumeration of selection rules of desired eigenvalues. +/// +enum SELECT_EIGENVALUE +{ + LARGEST_MAGN = 0, ///< Select eigenvalues with largest magnitude. Magnitude + ///< means the absolute value for real numbers and norm for + ///< complex numbers. Applies to both symmetric and general + ///< eigen solvers. + + LARGEST_REAL, ///< Select eigenvalues with largest real part. Only for general eigen solvers. + + LARGEST_IMAG, ///< Select eigenvalues with largest imaginary part (in magnitude). Only for general eigen solvers. + + LARGEST_ALGE, ///< Select eigenvalues with largest algebraic value, considering + ///< any negative sign. Only for symmetric eigen solvers. + + SMALLEST_MAGN, ///< Select eigenvalues with smallest magnitude. Applies to both symmetric and general + ///< eigen solvers. + + SMALLEST_REAL, ///< Select eigenvalues with smallest real part. Only for general eigen solvers. + + SMALLEST_IMAG, ///< Select eigenvalues with smallest imaginary part (in magnitude). Only for general eigen solvers. + + SMALLEST_ALGE, ///< Select eigenvalues with smallest algebraic value. Only for symmetric eigen solvers. + + BOTH_ENDS ///< Select eigenvalues half from each end of the spectrum. When + ///< `nev` is odd, compute more from the high end. Only for symmetric eigen solvers. +}; + +/// +/// \ingroup Enumerations +/// +/// The enumeration of selection rules of desired eigenvalues. Alias for `SELECT_EIGENVALUE`. +/// +enum SELECT_EIGENVALUE_ALIAS +{ + WHICH_LM = 0, ///< Alias for `LARGEST_MAGN` + WHICH_LR, ///< Alias for `LARGEST_REAL` + WHICH_LI, ///< Alias for `LARGEST_IMAG` + WHICH_LA, ///< Alias for `LARGEST_ALGE` + WHICH_SM, ///< Alias for `SMALLEST_MAGN` + WHICH_SR, ///< Alias for `SMALLEST_REAL` + WHICH_SI, ///< Alias for `SMALLEST_IMAG` + WHICH_SA, ///< Alias for `SMALLEST_ALGE` + WHICH_BE ///< Alias for `BOTH_ENDS` +}; + +/// \cond + +// Get the element type of a "scalar" +// ElemType => double +// ElemType< std::complex > => double +template +class ElemType +{ +public: + typedef T type; +}; + +template +class ElemType > +{ +public: + typedef T type; +}; + +// When comparing eigenvalues, we first calculate the "target" +// to sort. For example, if we want to choose the eigenvalues with +// largest magnitude, the target will be -abs(x). +// The minus sign is due to the fact that std::sort() sorts in ascending order. + +// Default target: throw an exception +template +class SortingTarget +{ +public: + static typename ElemType::type get(const Scalar& val) + { + using std::abs; + throw std::invalid_argument("incompatible selection rule"); + return -abs(val); + } +}; + +// Specialization for LARGEST_MAGN +// This covers [float, double, complex] x [LARGEST_MAGN] +template +class SortingTarget +{ +public: + static typename ElemType::type get(const Scalar& val) + { + using std::abs; + return -abs(val); + } +}; + +// Specialization for LARGEST_REAL +// This covers [complex] x [LARGEST_REAL] +template +class SortingTarget, LARGEST_REAL> +{ +public: + static RealType get(const std::complex& val) + { + return -val.real(); + } +}; + +// Specialization for LARGEST_IMAG +// This covers [complex] x [LARGEST_IMAG] +template +class SortingTarget, LARGEST_IMAG> +{ +public: + static RealType get(const std::complex& val) + { + using std::abs; + return -abs(val.imag()); + } +}; + +// Specialization for LARGEST_ALGE +// This covers [float, double] x [LARGEST_ALGE] +template +class SortingTarget +{ +public: + static Scalar get(const Scalar& val) + { + return -val; + } +}; + +// Here BOTH_ENDS is the same as LARGEST_ALGE, but +// we need some additional steps, which are done in +// SymEigsSolver.h => retrieve_ritzpair(). +// There we move the smallest values to the proper locations. +template +class SortingTarget +{ +public: + static Scalar get(const Scalar& val) + { + return -val; + } +}; + +// Specialization for SMALLEST_MAGN +// This covers [float, double, complex] x [SMALLEST_MAGN] +template +class SortingTarget +{ +public: + static typename ElemType::type get(const Scalar& val) + { + using std::abs; + return abs(val); + } +}; + +// Specialization for SMALLEST_REAL +// This covers [complex] x [SMALLEST_REAL] +template +class SortingTarget, SMALLEST_REAL> +{ +public: + static RealType get(const std::complex& val) + { + return val.real(); + } +}; + +// Specialization for SMALLEST_IMAG +// This covers [complex] x [SMALLEST_IMAG] +template +class SortingTarget, SMALLEST_IMAG> +{ +public: + static RealType get(const std::complex& val) + { + using std::abs; + return abs(val.imag()); + } +}; + +// Specialization for SMALLEST_ALGE +// This covers [float, double] x [SMALLEST_ALGE] +template +class SortingTarget +{ +public: + static Scalar get(const Scalar& val) + { + return val; + } +}; + +// Sort eigenvalues and return the order index +template +class PairComparator +{ +public: + bool operator()(const PairType& v1, const PairType& v2) + { + return v1.first < v2.first; + } +}; + +template +class SortEigenvalue +{ +private: + typedef typename ElemType::type TargetType; // Type of the sorting target, will be + // a floating number type, e.g. "double" + typedef std::pair PairType; // Type of the sorting pair, including + // the sorting target and the index + + std::vector pair_sort; + +public: + SortEigenvalue(const T* start, int size) : + pair_sort(size) + { + for (int i = 0; i < size; i++) + { + pair_sort[i].first = SortingTarget::get(start[i]); + pair_sort[i].second = i; + } + PairComparator comp; + std::sort(pair_sort.begin(), pair_sort.end(), comp); + } + + std::vector index() + { + std::vector ind(pair_sort.size()); + for (unsigned int i = 0; i < ind.size(); i++) + ind[i] = pair_sort[i].second; + + return ind; + } +}; + +/// \endcond + +} // namespace Spectra + +#endif // SELECTION_RULE_H diff --git a/gtsam/3rdparty/Spectra/Util/SimpleRandom.h b/gtsam/3rdparty/Spectra/Util/SimpleRandom.h new file mode 100644 index 000000000..83fa7c86f --- /dev/null +++ b/gtsam/3rdparty/Spectra/Util/SimpleRandom.h @@ -0,0 +1,91 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SIMPLE_RANDOM_H +#define SIMPLE_RANDOM_H + +#include + +/// \cond + +namespace Spectra { + +// We need a simple pseudo random number generator here: +// 1. It is used to generate initial and restarted residual vector. +// 2. It is not necessary to be so "random" and advanced. All we hope +// is that the residual vector is not in the space spanned by the +// current Krylov space. This should be met almost surely. +// 3. We don't want to call RNG in C++, since we actually want the +// algorithm to be deterministic. Also, calling RNG in C/C++ is not +// allowed in R packages submitted to CRAN. +// 4. The method should be as simple as possible, so an LCG is enough. +// 5. Based on public domain code by Ray Gardner +// http://stjarnhimlen.se/snippets/rg_rand.c + +template +class SimpleRandom +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + + const unsigned int m_a; // multiplier + const unsigned long m_max; // 2^31 - 1 + long m_rand; + + inline long next_long_rand(long seed) + { + unsigned long lo, hi; + + lo = m_a * (long) (seed & 0xFFFF); + hi = m_a * (long) ((unsigned long) seed >> 16); + lo += (hi & 0x7FFF) << 16; + if (lo > m_max) + { + lo &= m_max; + ++lo; + } + lo += hi >> 15; + if (lo > m_max) + { + lo &= m_max; + ++lo; + } + return (long) lo; + } + +public: + SimpleRandom(unsigned long init_seed) : + m_a(16807), + m_max(2147483647L), + m_rand(init_seed ? (init_seed & m_max) : 1) + {} + + Scalar random() + { + m_rand = next_long_rand(m_rand); + return Scalar(m_rand) / Scalar(m_max) - Scalar(0.5); + } + + // Vector of random numbers of type Scalar + // Ranging from -0.5 to 0.5 + Vector random_vec(const Index len) + { + Vector res(len); + for (Index i = 0; i < len; i++) + { + m_rand = next_long_rand(m_rand); + res[i] = Scalar(m_rand) / Scalar(m_max) - Scalar(0.5); + } + return res; + } +}; + +} // namespace Spectra + +/// \endcond + +#endif // SIMPLE_RANDOM_H diff --git a/gtsam/3rdparty/Spectra/Util/TypeTraits.h b/gtsam/3rdparty/Spectra/Util/TypeTraits.h new file mode 100644 index 000000000..29288c5a6 --- /dev/null +++ b/gtsam/3rdparty/Spectra/Util/TypeTraits.h @@ -0,0 +1,71 @@ +// Copyright (C) 2018-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef TYPE_TRAITS_H +#define TYPE_TRAITS_H + +#include +#include + +/// \cond + +namespace Spectra { + +// For a real value type "Scalar", we want to know its smallest +// positive value, i.e., std::numeric_limits::min(). +// However, we must take non-standard value types into account, +// so we rely on Eigen::NumTraits. +// +// Eigen::NumTraits has defined epsilon() and lowest(), but +// lowest() means negative highest(), which is a very small +// negative value. +// +// Therefore, we manually define this limit, and use eplison()^3 +// to mimic it for non-standard types. + +// Generic definition +template +struct TypeTraits +{ + static inline Scalar min() + { + return Eigen::numext::pow(Eigen::NumTraits::epsilon(), Scalar(3)); + } +}; + +// Full specialization +template <> +struct TypeTraits +{ + static inline float min() + { + return std::numeric_limits::min(); + } +}; + +template <> +struct TypeTraits +{ + static inline double min() + { + return std::numeric_limits::min(); + } +}; + +template <> +struct TypeTraits +{ + static inline long double min() + { + return std::numeric_limits::min(); + } +}; + +} // namespace Spectra + +/// \endcond + +#endif // TYPE_TRAITS_H diff --git a/gtsam/3rdparty/Spectra/contrib/LOBPCGSolver.h b/gtsam/3rdparty/Spectra/contrib/LOBPCGSolver.h new file mode 100644 index 000000000..69c4d92c0 --- /dev/null +++ b/gtsam/3rdparty/Spectra/contrib/LOBPCGSolver.h @@ -0,0 +1,552 @@ +// Written by Anna Araslanova +// Modified by Yixuan Qiu +// License: MIT + +#ifndef LOBPCG_SOLVER +#define LOBPCG_SOLVER + +#include +#include + +#include +#include +#include +#include +#include + +#include "../SymGEigsSolver.h" + +namespace Spectra { + +/// +/// \ingroup EigenSolver +/// + +/// *** METHOD +/// The class represent the LOBPCG algorithm, which was invented by Andrew Knyazev +/// Theoretical background of the procedure can be found in the articles below +/// - Knyazev, A.V., 2001. Toward the optimal preconditioned eigensolver : Locally optimal block preconditioned conjugate gradient method.SIAM journal on scientific computing, 23(2), pp.517 - 541. +/// - Knyazev, A.V., Argentati, M.E., Lashuk, I. and Ovtchinnikov, E.E., 2007. Block locally optimal preconditioned eigenvalue xolvers(BLOPEX) in HYPRE and PETSc.SIAM Journal on Scientific Computing, 29(5), pp.2224 - 2239. +/// +/// *** CONDITIONS OF USE +/// Locally Optimal Block Preconditioned Conjugate Gradient(LOBPCG) is a method for finding the M smallest eigenvalues +/// and eigenvectors of a large symmetric positive definite generalized eigenvalue problem +/// \f$Ax=\lambda Bx,\f$ +/// where \f$A_{NxN}\f$ is a symmetric matrix, \f$B\f$ is symmetric and positive - definite. \f$A and B\f$ are also assumed large and sparse +/// \f$\textit{M}\f$ should be \f$\<< textit{N}\f$ (at least \f$\textit{5M} < \textit{N} \f$) +/// +/// *** ARGUMENTS +/// Eigen::SparseMatrix A; // N*N - Ax = lambda*Bx, lrage and sparse +/// Eigen::SparseMatrix X; // N*M - initial approximations to eigenvectors (random in general case) +/// Spectra::LOBPCGSolver solver(A, X); +/// *Eigen::SparseMatrix B; // N*N - Ax = lambda*Bx, sparse, positive definite +/// solver.setConstraints(B); +/// *Eigen::SparseMatrix Y; // N*K - constraints, already found eigenvectors +/// solver.setB(B); +/// *Eigen::SparseMatrix T; // N*N - preconditioner ~ A^-1 +/// solver.setPreconditioner(T); +/// +/// *** OUTCOMES +/// solver.solve(); // compute eigenpairs // void +/// solver.info(); // state of converjance // int +/// solver.residuals(); // get residuals to evaluate biases // Eigen::Matrix +/// solver.eigenvalues(); // get eigenvalues // Eigen::Matrix +/// solver.eigenvectors(); // get eigenvectors // Eigen::Matrix +/// +/// *** EXAMPLE +/// \code{.cpp} +/// #include +/// +/// // random A +/// Matrix a; +/// a = (Matrix::Random(10, 10).array() > 0.6).cast() * Matrix::Random(10, 10).array() * 5; +/// a = Matrix((a).triangularView()) + Matrix((a).triangularView()).transpose(); +/// for (int i = 0; i < 10; i++) +/// a(i, i) = i + 0.5; +/// std::cout << a << "\n"; +/// Eigen::SparseMatrix A(a.sparseView()); +/// // random X +/// Eigen::Matrix x; +/// x = Matrix::Random(10, 2).array(); +/// Eigen::SparseMatrix X(x.sparseView()); +/// // solve Ax = lambda*x +/// Spectra::LOBPCGSolver solver(A, X); +/// solver.compute(10, 1e-4); // 10 iterations, L2_tolerance = 1e-4*N +/// std::cout << "info\n" << solver.info() << std::endl; +/// std::cout << "eigenvalues\n" << solver.eigenvalues() << std::endl; +/// std::cout << "eigenvectors\n" << solver.eigenvectors() << std::endl; +/// std::cout << "residuals\n" << solver.residuals() << std::endl; +/// \endcode +/// + +template +class LOBPCGSolver +{ +private: + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + + typedef std::complex Complex; + typedef Eigen::Matrix ComplexMatrix; + typedef Eigen::Matrix ComplexVector; + + typedef Eigen::SparseMatrix SparseMatrix; + typedef Eigen::SparseMatrix SparseComplexMatrix; + + const int m_n; // dimension of matrix A + const int m_nev; // number of eigenvalues requested + SparseMatrix A, X; + SparseMatrix m_Y, m_B, m_preconditioner; + bool flag_with_constraints, flag_with_B, flag_with_preconditioner; + +public: + SparseMatrix m_residuals; + Matrix m_evectors; + Vector m_evalues; + int m_info; + +private: + // B-orthonormalize matrix M + int orthogonalizeInPlace(SparseMatrix& M, SparseMatrix& B, + SparseMatrix& true_BM, bool has_true_BM = false) + { + SparseMatrix BM; + + if (has_true_BM == false) + { + if (flag_with_B) + { + BM = B * M; + } + else + { + BM = M; + } + } + else + { + BM = true_BM; + } + + Eigen::SimplicialLDLT chol_MBM(M.transpose() * BM); + + if (chol_MBM.info() != SUCCESSFUL) + { + // LDLT decomposition fail + m_info = chol_MBM.info(); + return chol_MBM.info(); + } + + SparseComplexMatrix Upper_MBM = chol_MBM.matrixU().template cast(); + ComplexVector D_MBM_vec = chol_MBM.vectorD().template cast(); + + D_MBM_vec = D_MBM_vec.cwiseSqrt(); + + for (int i = 0; i < D_MBM_vec.rows(); i++) + { + D_MBM_vec(i) = Complex(1.0, 0.0) / D_MBM_vec(i); + } + + SparseComplexMatrix D_MBM_mat(D_MBM_vec.asDiagonal()); + + SparseComplexMatrix U_inv(Upper_MBM.rows(), Upper_MBM.cols()); + U_inv.setIdentity(); + Upper_MBM.template triangularView().solveInPlace(U_inv); + + SparseComplexMatrix right_product = U_inv * D_MBM_mat; + M = M * right_product.real(); + if (flag_with_B) + { + true_BM = B * M; + } + else + { + true_BM = M; + } + + return SUCCESSFUL; + } + + void applyConstraintsInPlace(SparseMatrix& X, SparseMatrix& Y, + SparseMatrix& B) + { + SparseMatrix BY; + if (flag_with_B) + { + BY = B * Y; + } + else + { + BY = Y; + } + + SparseMatrix YBY = Y.transpose() * BY; + SparseMatrix BYX = BY.transpose() * X; + + SparseMatrix YBY_XYX = (Matrix(YBY).bdcSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(Matrix(BYX))).sparseView(); + X = X - Y * YBY_XYX; + } + + /* + return + 'AB + CD' + */ + Matrix stack_4_matricies(Matrix A, Matrix B, + Matrix C, Matrix D) + { + Matrix result(A.rows() + C.rows(), A.cols() + B.cols()); + result.topLeftCorner(A.rows(), A.cols()) = A; + result.topRightCorner(B.rows(), B.cols()) = B; + result.bottomLeftCorner(C.rows(), C.cols()) = C; + result.bottomRightCorner(D.rows(), D.cols()) = D; + return result; + } + + Matrix stack_9_matricies(Matrix A, Matrix B, Matrix C, + Matrix D, Matrix E, Matrix F, + Matrix G, Matrix H, Matrix I) + { + Matrix result(A.rows() + D.rows() + G.rows(), A.cols() + B.cols() + C.cols()); + result.block(0, 0, A.rows(), A.cols()) = A; + result.block(0, A.cols(), B.rows(), B.cols()) = B; + result.block(0, A.cols() + B.cols(), C.rows(), C.cols()) = C; + result.block(A.rows(), 0, D.rows(), D.cols()) = D; + result.block(A.rows(), A.cols(), E.rows(), E.cols()) = E; + result.block(A.rows(), A.cols() + B.cols(), F.rows(), F.cols()) = F; + result.block(A.rows() + D.rows(), 0, G.rows(), G.cols()) = G; + result.block(A.rows() + D.rows(), A.cols(), H.rows(), H.cols()) = H; + result.block(A.rows() + D.rows(), A.cols() + B.cols(), I.rows(), I.cols()) = I; + + return result; + } + + void sort_epairs(Vector& evalues, Matrix& evectors, int SelectionRule) + { + std::function cmp; + if (SelectionRule == SMALLEST_ALGE) + cmp = std::less{}; + else + cmp = std::greater{}; + + std::map epairs(cmp); + for (int i = 0; i < m_evectors.cols(); ++i) + epairs.insert(std::make_pair(evalues(i), evectors.col(i))); + + int i = 0; + for (auto& epair : epairs) + { + evectors.col(i) = epair.second; + evalues(i) = epair.first; + i++; + } + } + + void removeColumns(SparseMatrix& matrix, std::vector& colToRemove) + { + // remove columns through matrix multiplication + SparseMatrix new_matrix(matrix.cols(), matrix.cols() - int(colToRemove.size())); + int iCol = 0; + std::vector> tripletList; + tripletList.reserve(matrix.cols() - int(colToRemove.size())); + + for (int iRow = 0; iRow < matrix.cols(); iRow++) + { + if (std::find(colToRemove.begin(), colToRemove.end(), iRow) == colToRemove.end()) + { + tripletList.push_back(Eigen::Triplet(iRow, iCol, 1)); + iCol++; + } + } + + new_matrix.setFromTriplets(tripletList.begin(), tripletList.end()); + matrix = matrix * new_matrix; + } + + int checkConvergence_getBlocksize(SparseMatrix& m_residuals, Scalar tolerance_L2, std::vector& columnsToDelete) + { + // square roots from sum of squares by column + int BlockSize = m_nev; + Scalar sum, buffer; + + for (int iCol = 0; iCol < m_nev; iCol++) + { + sum = 0; + for (int iRow = 0; iRow < m_n; iRow++) + { + buffer = m_residuals.coeff(iRow, iCol); + sum += buffer * buffer; + } + + if (sqrt(sum) < tolerance_L2) + { + BlockSize--; + columnsToDelete.push_back(iCol); + } + } + return BlockSize; + } + +public: + LOBPCGSolver(const SparseMatrix& A, const SparseMatrix X) : + m_n(A.rows()), + m_nev(X.cols()), + m_info(NOT_COMPUTED), + flag_with_constraints(false), + flag_with_B(false), + flag_with_preconditioner(false), + A(A), + X(X) + { + if (A.rows() != X.rows() || A.rows() != A.cols()) + throw std::invalid_argument("Wrong size"); + + //if (m_n < 5* m_nev) + // throw std::invalid_argument("The problem size is small compared to the block size. Use standard eigensolver"); + } + + void setConstraints(const SparseMatrix& Y) + { + m_Y = Y; + flag_with_constraints = true; + } + + void setB(const SparseMatrix& B) + { + if (B.rows() != A.rows() || B.cols() != A.cols()) + throw std::invalid_argument("Wrong size"); + m_B = B; + flag_with_B = true; + } + + void setPreconditioner(const SparseMatrix& preconditioner) + { + m_preconditioner = preconditioner; + flag_with_preconditioner = true; + } + + void compute(int maxit = 10, Scalar tol_div_n = 1e-7) + { + Scalar tolerance_L2 = tol_div_n * m_n; + int BlockSize; + int max_iter = std::min(m_n, maxit); + + SparseMatrix directions, AX, AR, BX, AD, ADD, DD, BDD, BD, XAD, RAD, DAD, XBD, RBD, BR, sparse_eVecX, sparse_eVecR, sparse_eVecD, inverse_matrix; + Matrix XAR, RAR, XBR, gramA, gramB, eVecX, eVecR, eVecD; + std::vector columnsToDelete; + + if (flag_with_constraints) + { + // Apply the constraints Y to X + applyConstraintsInPlace(X, m_Y, m_B); + } + + // Make initial vectors orthonormal + // implicit BX declaration + if (orthogonalizeInPlace(X, m_B, BX) != SUCCESSFUL) + { + max_iter = 0; + } + + AX = A * X; + // Solve the following NxN eigenvalue problem for all N eigenvalues and -vectors: + // first approximation via a dense problem + Eigen::EigenSolver eigs(Matrix(X.transpose() * AX)); + + if (eigs.info() != SUCCESSFUL) + { + m_info = eigs.info(); + max_iter = 0; + } + else + { + m_evalues = eigs.eigenvalues().real(); + m_evectors = eigs.eigenvectors().real(); + sort_epairs(m_evalues, m_evectors, SMALLEST_ALGE); + sparse_eVecX = m_evectors.sparseView(); + + X = X * sparse_eVecX; + AX = AX * sparse_eVecX; + BX = BX * sparse_eVecX; + } + + for (int iter_num = 0; iter_num < max_iter; iter_num++) + { + m_residuals.resize(m_n, m_nev); + for (int i = 0; i < m_nev; i++) + { + m_residuals.col(i) = AX.col(i) - m_evalues(i) * BX.col(i); + } + BlockSize = checkConvergence_getBlocksize(m_residuals, tolerance_L2, columnsToDelete); + + if (BlockSize == 0) + { + m_info = SUCCESSFUL; + break; + } + + // substitution of the original active mask + if (columnsToDelete.size() > 0) + { + removeColumns(m_residuals, columnsToDelete); + if (iter_num > 0) + { + removeColumns(directions, columnsToDelete); + removeColumns(AD, columnsToDelete); + removeColumns(BD, columnsToDelete); + } + columnsToDelete.clear(); // for next iteration + } + + if (flag_with_preconditioner) + { + // Apply the preconditioner to the residuals + m_residuals = m_preconditioner * m_residuals; + } + + if (flag_with_constraints) + { + // Apply the constraints Y to residuals + applyConstraintsInPlace(m_residuals, m_Y, m_B); + } + + if (orthogonalizeInPlace(m_residuals, m_B, BR) != SUCCESSFUL) + { + break; + } + AR = A * m_residuals; + + // Orthonormalize conjugate directions + if (iter_num > 0) + { + if (orthogonalizeInPlace(directions, m_B, BD, true) != SUCCESSFUL) + { + break; + } + AD = A * directions; + } + + // Perform the Rayleigh Ritz Procedure + XAR = Matrix(X.transpose() * AR); + RAR = Matrix(m_residuals.transpose() * AR); + XBR = Matrix(X.transpose() * BR); + + if (iter_num > 0) + { + XAD = X.transpose() * AD; + RAD = m_residuals.transpose() * AD; + DAD = directions.transpose() * AD; + XBD = X.transpose() * BD; + RBD = m_residuals.transpose() * BD; + + gramA = stack_9_matricies(m_evalues.asDiagonal(), XAR, XAD, XAR.transpose(), RAR, RAD, XAD.transpose(), RAD.transpose(), DAD.transpose()); + gramB = stack_9_matricies(Matrix::Identity(m_nev, m_nev), XBR, XBD, XBR.transpose(), Matrix::Identity(BlockSize, BlockSize), RBD, XBD.transpose(), RBD.transpose(), Matrix::Identity(BlockSize, BlockSize)); + } + else + { + gramA = stack_4_matricies(m_evalues.asDiagonal(), XAR, XAR.transpose(), RAR); + gramB = stack_4_matricies(Matrix::Identity(m_nev, m_nev), XBR, XBR.transpose(), Matrix::Identity(BlockSize, BlockSize)); + } + + //calculate the lowest/largest m eigenpairs; Solve the generalized eigenvalue problem. + DenseSymMatProd Aop(gramA); + DenseCholesky Bop(gramB); + + SymGEigsSolver, + DenseCholesky, GEIGS_CHOLESKY> + geigs(&Aop, &Bop, m_nev, std::min(10, int(gramA.rows()) - 1)); + + geigs.init(); + int nconv = geigs.compute(); + + //Mat evecs; + if (geigs.info() == SUCCESSFUL) + { + m_evalues = geigs.eigenvalues(); + m_evectors = geigs.eigenvectors(); + sort_epairs(m_evalues, m_evectors, SMALLEST_ALGE); + } + else + { + // Problem With General EgenVec + m_info = geigs.info(); + break; + } + + // Compute Ritz vectors + if (iter_num > 0) + { + eVecX = m_evectors.block(0, 0, m_nev, m_nev); + eVecR = m_evectors.block(m_nev, 0, BlockSize, m_nev); + eVecD = m_evectors.block(m_nev + BlockSize, 0, BlockSize, m_nev); + + sparse_eVecX = eVecX.sparseView(); + sparse_eVecR = eVecR.sparseView(); + sparse_eVecD = eVecD.sparseView(); + + DD = m_residuals * sparse_eVecR; // new conjugate directions + ADD = AR * sparse_eVecR; + BDD = BR * sparse_eVecR; + + DD = DD + directions * sparse_eVecD; + ADD = ADD + AD * sparse_eVecD; + BDD = BDD + BD * sparse_eVecD; + } + else + { + eVecX = m_evectors.block(0, 0, m_nev, m_nev); + eVecR = m_evectors.block(m_nev, 0, BlockSize, m_nev); + + sparse_eVecX = eVecX.sparseView(); + sparse_eVecR = eVecR.sparseView(); + + DD = m_residuals * sparse_eVecR; + ADD = AR * sparse_eVecR; + BDD = BR * sparse_eVecR; + } + + X = X * sparse_eVecX + DD; + AX = AX * sparse_eVecX + ADD; + BX = BX * sparse_eVecX + BDD; + + directions = DD; + AD = ADD; + BD = BDD; + + } // iteration loop + + // calculate last residuals + m_residuals.resize(m_n, m_nev); + for (int i = 0; i < m_nev; i++) + { + m_residuals.col(i) = AX.col(i) - m_evalues(i) * BX.col(i); + } + BlockSize = checkConvergence_getBlocksize(m_residuals, tolerance_L2, columnsToDelete); + + if (BlockSize == 0) + { + m_info = SUCCESSFUL; + } + } // compute + + Vector eigenvalues() + { + return m_evalues; + } + + Matrix eigenvectors() + { + return m_evectors; + } + + Matrix residuals() + { + return Matrix(m_residuals); + } + + int info() { return m_info; } +}; + +} // namespace Spectra + +#endif // LOBPCG_SOLVER diff --git a/gtsam/3rdparty/Spectra/contrib/PartialSVDSolver.h b/gtsam/3rdparty/Spectra/contrib/PartialSVDSolver.h new file mode 100644 index 000000000..2fab26b97 --- /dev/null +++ b/gtsam/3rdparty/Spectra/contrib/PartialSVDSolver.h @@ -0,0 +1,202 @@ +// Copyright (C) 2018 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef PARTIAL_SVD_SOLVER_H +#define PARTIAL_SVD_SOLVER_H + +#include +#include "../SymEigsSolver.h" + +namespace Spectra { + +// Abstract class for matrix operation +template +class SVDMatOp +{ +public: + virtual int rows() const = 0; + virtual int cols() const = 0; + + // y_out = A' * A * x_in or y_out = A * A' * x_in + virtual void perform_op(const Scalar* x_in, Scalar* y_out) = 0; + + virtual ~SVDMatOp() {} +}; + +// Operation of a tall matrix in SVD +// We compute the eigenvalues of A' * A +// MatrixType is either Eigen::Matrix or Eigen::SparseMatrix +template +class SVDTallMatOp : public SVDMatOp +{ +private: + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + ConstGenericMatrix m_mat; + const int m_dim; + Vector m_cache; + +public: + // Constructor + SVDTallMatOp(ConstGenericMatrix& mat) : + m_mat(mat), + m_dim(std::min(mat.rows(), mat.cols())), + m_cache(mat.rows()) + {} + + // These are the rows and columns of A' * A + int rows() const { return m_dim; } + int cols() const { return m_dim; } + + // y_out = A' * A * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) + { + MapConstVec x(x_in, m_mat.cols()); + MapVec y(y_out, m_mat.cols()); + m_cache.noalias() = m_mat * x; + y.noalias() = m_mat.transpose() * m_cache; + } +}; + +// Operation of a wide matrix in SVD +// We compute the eigenvalues of A * A' +// MatrixType is either Eigen::Matrix or Eigen::SparseMatrix +template +class SVDWideMatOp : public SVDMatOp +{ +private: + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + ConstGenericMatrix m_mat; + const int m_dim; + Vector m_cache; + +public: + // Constructor + SVDWideMatOp(ConstGenericMatrix& mat) : + m_mat(mat), + m_dim(std::min(mat.rows(), mat.cols())), + m_cache(mat.cols()) + {} + + // These are the rows and columns of A * A' + int rows() const { return m_dim; } + int cols() const { return m_dim; } + + // y_out = A * A' * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) + { + MapConstVec x(x_in, m_mat.rows()); + MapVec y(y_out, m_mat.rows()); + m_cache.noalias() = m_mat.transpose() * x; + y.noalias() = m_mat * m_cache; + } +}; + +// Partial SVD solver +// MatrixType is either Eigen::Matrix or Eigen::SparseMatrix +template > +class PartialSVDSolver +{ +private: + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef const Eigen::Ref ConstGenericMatrix; + + ConstGenericMatrix m_mat; + const int m_m; + const int m_n; + SVDMatOp* m_op; + SymEigsSolver >* m_eigs; + int m_nconv; + Matrix m_evecs; + +public: + // Constructor + PartialSVDSolver(ConstGenericMatrix& mat, int ncomp, int ncv) : + m_mat(mat), m_m(mat.rows()), m_n(mat.cols()), m_evecs(0, 0) + { + // Determine the matrix type, tall or wide + if (m_m > m_n) + { + m_op = new SVDTallMatOp(mat); + } + else + { + m_op = new SVDWideMatOp(mat); + } + + // Solver object + m_eigs = new SymEigsSolver >(m_op, ncomp, ncv); + } + + // Destructor + virtual ~PartialSVDSolver() + { + delete m_eigs; + delete m_op; + } + + // Computation + int compute(int maxit = 1000, Scalar tol = 1e-10) + { + m_eigs->init(); + m_nconv = m_eigs->compute(maxit, tol); + + return m_nconv; + } + + // The converged singular values + Vector singular_values() const + { + Vector svals = m_eigs->eigenvalues().cwiseSqrt(); + + return svals; + } + + // The converged left singular vectors + Matrix matrix_U(int nu) + { + if (m_evecs.cols() < 1) + { + m_evecs = m_eigs->eigenvectors(); + } + nu = std::min(nu, m_nconv); + if (m_m <= m_n) + { + return m_evecs.leftCols(nu); + } + + return m_mat * (m_evecs.leftCols(nu).array().rowwise() / m_eigs->eigenvalues().head(nu).transpose().array().sqrt()).matrix(); + } + + // The converged right singular vectors + Matrix matrix_V(int nv) + { + if (m_evecs.cols() < 1) + { + m_evecs = m_eigs->eigenvectors(); + } + nv = std::min(nv, m_nconv); + if (m_m > m_n) + { + return m_evecs.leftCols(nv); + } + + return m_mat.transpose() * (m_evecs.leftCols(nv).array().rowwise() / m_eigs->eigenvalues().head(nv).transpose().array().sqrt()).matrix(); + } +}; + +} // namespace Spectra + +#endif // PARTIAL_SVD_SOLVER_H diff --git a/gtsam/CMakeLists.txt b/gtsam/CMakeLists.txt index 49b8bc19b..8736a5954 100644 --- a/gtsam/CMakeLists.txt +++ b/gtsam/CMakeLists.txt @@ -136,6 +136,8 @@ target_include_directories(gtsam BEFORE PUBLIC # SuiteSparse_config $ $ + # Spectra + $ # CCOLAMD $ $ From 68a8320c6821e5d742d45411dcd0b69ee886e94e Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Thu, 13 Aug 2020 14:17:51 -0500 Subject: [PATCH 040/142] default string value for printing Imu factors --- gtsam/navigation/CombinedImuFactor.cpp | 9 +++++---- gtsam/navigation/CombinedImuFactor.h | 4 ++-- gtsam/navigation/ImuFactor.cpp | 14 +++++++------- gtsam/navigation/ImuFactor.h | 8 ++++---- gtsam/navigation/PreintegratedRotation.cpp | 5 ++--- gtsam/navigation/PreintegrationBase.cpp | 2 +- gtsam/navigation/PreintegrationBase.h | 2 +- 7 files changed, 22 insertions(+), 22 deletions(-) diff --git a/gtsam/navigation/CombinedImuFactor.cpp b/gtsam/navigation/CombinedImuFactor.cpp index 49c666030..e41a8de44 100644 --- a/gtsam/navigation/CombinedImuFactor.cpp +++ b/gtsam/navigation/CombinedImuFactor.cpp @@ -167,10 +167,11 @@ gtsam::NonlinearFactor::shared_ptr CombinedImuFactor::clone() const { //------------------------------------------------------------------------------ void CombinedImuFactor::print(const string& s, const KeyFormatter& keyFormatter) const { - cout << s << "CombinedImuFactor(" << keyFormatter(this->key1()) << "," - << keyFormatter(this->key2()) << "," << keyFormatter(this->key3()) << "," - << keyFormatter(this->key4()) << "," << keyFormatter(this->key5()) << "," - << keyFormatter(this->key6()) << ")\n"; + cout << (s == "" ? s : s + "\n") << "CombinedImuFactor(" + << keyFormatter(this->key1()) << "," << keyFormatter(this->key2()) << "," + << keyFormatter(this->key3()) << "," << keyFormatter(this->key4()) << "," + << keyFormatter(this->key5()) << "," << keyFormatter(this->key6()) + << ")\n"; _PIM_.print(" preintegrated measurements:"); this->noiseModel_->print(" noise model: "); } diff --git a/gtsam/navigation/CombinedImuFactor.h b/gtsam/navigation/CombinedImuFactor.h index 55cc48eda..387353136 100644 --- a/gtsam/navigation/CombinedImuFactor.h +++ b/gtsam/navigation/CombinedImuFactor.h @@ -303,8 +303,8 @@ public: GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os, const CombinedImuFactor&); /// print - void print(const std::string& s, const KeyFormatter& keyFormatter = - DefaultKeyFormatter) const override; + void print(const std::string& s = "", const KeyFormatter& keyFormatter = + DefaultKeyFormatter) const override; /// equals bool equals(const NonlinearFactor& expected, double tol = 1e-9) const override; diff --git a/gtsam/navigation/ImuFactor.cpp b/gtsam/navigation/ImuFactor.cpp index ac7221cd9..cebddf05d 100644 --- a/gtsam/navigation/ImuFactor.cpp +++ b/gtsam/navigation/ImuFactor.cpp @@ -130,10 +130,10 @@ std::ostream& operator<<(std::ostream& os, const ImuFactor& f) { //------------------------------------------------------------------------------ void ImuFactor::print(const string& s, const KeyFormatter& keyFormatter) const { - cout << s << "ImuFactor(" << keyFormatter(this->key1()) << "," - << keyFormatter(this->key2()) << "," << keyFormatter(this->key3()) << "," - << keyFormatter(this->key4()) << "," << keyFormatter(this->key5()) - << ")\n"; + cout << (s == "" ? s : s + "\n") << "ImuFactor(" << keyFormatter(this->key1()) + << "," << keyFormatter(this->key2()) << "," << keyFormatter(this->key3()) + << "," << keyFormatter(this->key4()) << "," << keyFormatter(this->key5()) + << ")\n"; cout << *this << endl; } @@ -226,9 +226,9 @@ std::ostream& operator<<(std::ostream& os, const ImuFactor2& f) { //------------------------------------------------------------------------------ void ImuFactor2::print(const string& s, const KeyFormatter& keyFormatter) const { - cout << s << "ImuFactor2(" << keyFormatter(this->key1()) << "," - << keyFormatter(this->key2()) << "," << keyFormatter(this->key3()) - << ")\n"; + cout << (s == "" ? s : s + "\n") << "ImuFactor2(" + << keyFormatter(this->key1()) << "," << keyFormatter(this->key2()) << "," + << keyFormatter(this->key3()) << ")\n"; cout << *this << endl; } diff --git a/gtsam/navigation/ImuFactor.h b/gtsam/navigation/ImuFactor.h index 957478e97..51df3f24a 100644 --- a/gtsam/navigation/ImuFactor.h +++ b/gtsam/navigation/ImuFactor.h @@ -205,8 +205,8 @@ public: /// @name Testable /// @{ GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os, const ImuFactor&); - void print(const std::string& s, const KeyFormatter& keyFormatter = - DefaultKeyFormatter) const override; + void print(const std::string& s = "", const KeyFormatter& keyFormatter = + DefaultKeyFormatter) const override; bool equals(const NonlinearFactor& expected, double tol = 1e-9) const override; /// @} @@ -283,8 +283,8 @@ public: /// @name Testable /// @{ GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os, const ImuFactor2&); - void print(const std::string& s, const KeyFormatter& keyFormatter = - DefaultKeyFormatter) const override; + void print(const std::string& s = "", const KeyFormatter& keyFormatter = + DefaultKeyFormatter) const override; bool equals(const NonlinearFactor& expected, double tol = 1e-9) const override; /// @} diff --git a/gtsam/navigation/PreintegratedRotation.cpp b/gtsam/navigation/PreintegratedRotation.cpp index c5d48b734..f827c7c59 100644 --- a/gtsam/navigation/PreintegratedRotation.cpp +++ b/gtsam/navigation/PreintegratedRotation.cpp @@ -26,12 +26,11 @@ using namespace std; namespace gtsam { void PreintegratedRotationParams::print(const string& s) const { - cout << s << endl; + cout << (s == "" ? s : s + "\n") << endl; cout << "gyroscopeCovariance:\n[\n" << gyroscopeCovariance << "\n]" << endl; if (omegaCoriolis) cout << "omegaCoriolis = (" << omegaCoriolis->transpose() << ")" << endl; - if (body_P_sensor) - body_P_sensor->print("body_P_sensor"); + if (body_P_sensor) body_P_sensor->print("body_P_sensor"); } bool PreintegratedRotationParams::equals( diff --git a/gtsam/navigation/PreintegrationBase.cpp b/gtsam/navigation/PreintegrationBase.cpp index 45560f34d..111594663 100644 --- a/gtsam/navigation/PreintegrationBase.cpp +++ b/gtsam/navigation/PreintegrationBase.cpp @@ -46,7 +46,7 @@ ostream& operator<<(ostream& os, const PreintegrationBase& pim) { //------------------------------------------------------------------------------ void PreintegrationBase::print(const string& s) const { - cout << s << *this << endl; + cout << (s == "" ? s : s + "\n") << *this << endl; } //------------------------------------------------------------------------------ diff --git a/gtsam/navigation/PreintegrationBase.h b/gtsam/navigation/PreintegrationBase.h index 2f02a95b8..e118a6232 100644 --- a/gtsam/navigation/PreintegrationBase.h +++ b/gtsam/navigation/PreintegrationBase.h @@ -116,7 +116,7 @@ class GTSAM_EXPORT PreintegrationBase { /// @name Testable /// @{ GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os, const PreintegrationBase& pim); - virtual void print(const std::string& s) const; + virtual void print(const std::string& s="") const; /// @} /// @name Main functionality From e8f91c663f36f4edede0d044e5069a37d5801084 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Thu, 13 Aug 2020 21:55:17 -0500 Subject: [PATCH 041/142] fix FrobeniusBetweenFactor declaration --- gtsam/slam/FrobeniusFactor.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gtsam/slam/FrobeniusFactor.h b/gtsam/slam/FrobeniusFactor.h index f254b4b81..474cf6143 100644 --- a/gtsam/slam/FrobeniusFactor.h +++ b/gtsam/slam/FrobeniusFactor.h @@ -92,7 +92,7 @@ class FrobeniusFactor : public NoiseModelFactor2 { * and in fact only SO3 and SO4 really work, as we need SO::AdjointMap. */ template -GTSAM_EXPORT class FrobeniusBetweenFactor : public NoiseModelFactor2 { +class GTSAM_EXPORT FrobeniusBetweenFactor : public NoiseModelFactor2 { Rot R12_; ///< measured rotation between R1 and R2 Eigen::Matrix R2hat_H_R1_; ///< fixed derivative of R2hat wrpt R1 From 4194f97897819ace187c3d6955bd6a4aacfa77e4 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Fri, 14 Aug 2020 14:58:07 -0500 Subject: [PATCH 042/142] comment out timing-out CI stage --- .travis.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index 2069f48e0..986e86cc2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -90,11 +90,11 @@ jobs: env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF GTSAM_ALLOW_DEPRECATED_SINCE_V41=ON script: bash .travis.sh -b # on Linux, with GTSAM_WITH_TBB on to make sure GTSAM still compiles/tests - - stage: special - os: linux - compiler: gcc - env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF GTSAM_WITH_TBB=ON - script: bash .travis.sh -t + # - stage: special + # os: linux + # compiler: gcc + # env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF GTSAM_WITH_TBB=ON + # script: bash .travis.sh -t # -------- STAGE 2: TESTS ----------- # on Mac, GCC - stage: test From a978c15d8e17eeac95b334300e9b4fdff6e4110d Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Wed, 12 Aug 2020 19:41:56 -0400 Subject: [PATCH 043/142] Templated some methods internally --- gtsam/slam/dataset.cpp | 360 ++++++++++++++++++------------- gtsam/slam/dataset.h | 45 ++-- gtsam/slam/tests/testDataset.cpp | 34 ++- 3 files changed, 267 insertions(+), 172 deletions(-) diff --git a/gtsam/slam/dataset.cpp b/gtsam/slam/dataset.cpp index 3a2d04127..4f56d3afe 100644 --- a/gtsam/slam/dataset.cpp +++ b/gtsam/slam/dataset.cpp @@ -105,10 +105,10 @@ string createRewrittenFileName(const string& name) { } /* ************************************************************************* */ -GraphAndValues load2D(pair dataset, int maxID, +GraphAndValues load2D(pair dataset, Key maxNr, bool addNoise, bool smart, NoiseFormat noiseFormat, KernelFunctionType kernelFunctionType) { - return load2D(dataset.first, dataset.second, maxID, addNoise, smart, + return load2D(dataset.first, dataset.second, maxNr, addNoise, smart, noiseFormat, kernelFunctionType); } @@ -226,6 +226,54 @@ boost::optional parseVertexLandmark(istream& is, const string& } } +/* ************************************************************************* */ +// Parse types T into a Key-indexed map +template +static map parseIntoMap(const string &filename, Key maxNr = 0) { + ifstream is(filename.c_str()); + if (!is) + throw invalid_argument("parse: can not find file " + filename); + + map result; + string tag; + while (!is.eof()) { + boost::optional> indexed_t; + is >> indexed_t; + if (indexed_t) { + // optional filter + if (maxNr && indexed_t->first >= maxNr) + continue; + result.insert(*indexed_t); + } + is.ignore(LINESIZE, '\n'); + } + return result; +} + +/* ************************************************************************* */ +istream &operator>>(istream &is, boost::optional &indexed) { + string tag; + is >> tag; + indexed = parseVertexPose(is, tag); + return is; +} + +map parse2DPoses(const string &filename, Key maxNr) { + return parseIntoMap(filename, maxNr); +} + +/* ************************************************************************* */ +istream &operator>>(istream &is, boost::optional &indexed) { + string tag; + is >> tag; + indexed = parseVertexLandmark(is, tag); + return is; +} + +map parse2DLandmarks(const string &filename, Key maxNr) { + return parseIntoMap(filename, maxNr); +} + /* ************************************************************************* */ boost::optional parseEdge(istream& is, const string& tag) { if ((tag == "EDGE2") || (tag == "EDGE") || (tag == "EDGE_SE2") @@ -241,49 +289,27 @@ boost::optional parseEdge(istream& is, const string& tag) { } /* ************************************************************************* */ -GraphAndValues load2D(const string& filename, SharedNoiseModel model, Key maxID, - bool addNoise, bool smart, NoiseFormat noiseFormat, - KernelFunctionType kernelFunctionType) { +GraphAndValues load2D(const string &filename, SharedNoiseModel model, Key maxNr, + bool addNoise, bool smart, NoiseFormat noiseFormat, + KernelFunctionType kernelFunctionType) { + + Values::shared_ptr initial(new Values); + + const auto poses = parse2DPoses(filename, maxNr); + for (const auto& key_pose : poses) { + initial->insert(key_pose.first, key_pose.second); + } + const auto landmarks = parse2DLandmarks(filename, maxNr); + for (const auto& key_landmark : landmarks) { + initial->insert(key_landmark.first, key_landmark.second); + } ifstream is(filename.c_str()); if (!is) throw invalid_argument("load2D: can not find file " + filename); - Values::shared_ptr initial(new Values); NonlinearFactorGraph::shared_ptr graph(new NonlinearFactorGraph); - string tag; - - // load the poses and landmarks - while (!is.eof()) { - if (!(is >> tag)) - break; - - const auto indexed_pose = parseVertexPose(is, tag); - if (indexed_pose) { - Key id = indexed_pose->first; - - // optional filter - if (maxID && id >= maxID) - continue; - - initial->insert(id, indexed_pose->second); - } - const auto indexed_landmark = parseVertexLandmark(is, tag); - if (indexed_landmark) { - Key id = indexed_landmark->first; - - // optional filter - if (maxID && id >= maxID) - continue; - - initial->insert(id, indexed_landmark->second); - } - is.ignore(LINESIZE, '\n'); - } - is.clear(); /* clears the end-of-file and error flags */ - is.seekg(0, ios::beg); - // If asked, create a sampler with random number generator std::unique_ptr sampler; if (addNoise) { @@ -302,6 +328,7 @@ GraphAndValues load2D(const string& filename, SharedNoiseModel model, Key maxID, bool haveLandmark = false; const bool useModelInFile = !model; while (!is.eof()) { + string tag; if (!(is >> tag)) break; @@ -315,7 +342,7 @@ GraphAndValues load2D(const string& filename, SharedNoiseModel model, Key maxID, kernelFunctionType); // optional filter - if (maxID && (id1 >= maxID || id2 >= maxID)) + if (maxNr && (id1 >= maxNr || id2 >= maxNr)) continue; if (useModelInFile) @@ -375,7 +402,7 @@ GraphAndValues load2D(const string& filename, SharedNoiseModel model, Key maxID, if (tag == "LANDMARK" || tag == "BR") { // optional filter - if (maxID && id1 >= maxID) + if (maxNr && id1 >= maxNr) continue; // Create noise model @@ -404,8 +431,8 @@ GraphAndValues load2D(const string& filename, SharedNoiseModel model, Key maxID, /* ************************************************************************* */ GraphAndValues load2D_robust(const string& filename, - noiseModel::Base::shared_ptr& model, int maxID) { - return load2D(filename, model, maxID); + noiseModel::Base::shared_ptr& model, Key maxNr) { + return load2D(filename, model, maxNr); } /* ************************************************************************* */ @@ -448,10 +475,10 @@ GraphAndValues readG2o(const string& g2oFile, const bool is3D, return load3D(g2oFile); } else { // just call load2D - int maxID = 0; + Key maxNr = 0; bool addNoise = false; bool smart = true; - return load2D(g2oFile, SharedNoiseModel(), maxID, addNoise, smart, + return load2D(g2oFile, SharedNoiseModel(), maxNr, addNoise, smart, NoiseFormatG2O, kernelFunctionType); } } @@ -516,8 +543,8 @@ void writeG2o(const NonlinearFactorGraph& graph, const Values& estimate, Pose2 pose = factor->measured(); //.inverse(); stream << "EDGE_SE2 " << factor->key1() << " " << factor->key2() << " " << pose.x() << " " << pose.y() << " " << pose.theta(); - for (int i = 0; i < 3; i++){ - for (int j = i; j < 3; j++){ + for (size_t i = 0; i < 3; i++){ + for (size_t j = i; j < 3; j++){ stream << " " << Info(i, j); } } @@ -545,13 +572,13 @@ void writeG2o(const NonlinearFactorGraph& graph, const Values& estimate, << " " << q.y() << " " << q.z() << " " << q.w(); Matrix InfoG2o = I_6x6; - InfoG2o.block(0,0,3,3) = Info.block(3,3,3,3); // cov translation - InfoG2o.block(3,3,3,3) = Info.block(0,0,3,3); // cov rotation - InfoG2o.block(0,3,3,3) = Info.block(0,3,3,3); // off diagonal - InfoG2o.block(3,0,3,3) = Info.block(3,0,3,3); // off diagonal + InfoG2o.block<3, 3>(0, 0) = Info.block<3, 3>(3, 3); // cov translation + InfoG2o.block<3, 3>(3, 3) = Info.block<3, 3>(0, 0); // cov rotation + InfoG2o.block<3, 3>(0, 3) = Info.block<3, 3>(0, 3); // off diagonal + InfoG2o.block<3, 3>(3, 0) = Info.block<3, 3>(3, 0); // off diagonal - for (int i = 0; i < 6; i++){ - for (int j = i; j < 6; j++){ + for (size_t i = 0; i < 6; i++){ + for (size_t j = i; j < 6; j++){ stream << " " << InfoG2o(i, j); } } @@ -562,126 +589,157 @@ void writeG2o(const NonlinearFactorGraph& graph, const Values& estimate, } /* ************************************************************************* */ -static Rot3 NormalizedRot3(double w, double x, double y, double z) { +// parse quaternion in x,y,z,w order, and normalize to unit length +istream &operator>>(istream &is, Quaternion &q) { + double x, y, z, w; + is >> x >> y >> z >> w; const double norm = sqrt(w * w + x * x + y * y + z * z), f = 1.0 / norm; - return Rot3::Quaternion(f * w, f * x, f * y, f * z); + q = Quaternion(f * w, f * x, f * y, f * z); + return is; } /* ************************************************************************* */ -std::map parse3DPoses(const string& filename) { +// parse Rot3 from roll, pitch, yaw +istream &operator>>(istream &is, Rot3 &R) { + double yaw, pitch, roll; + is >> roll >> pitch >> yaw; // notice order ! + R = Rot3::Ypr(yaw, pitch, roll); + return is; +} + +/* ************************************************************************* */ +istream &operator>>(istream &is, boost::optional> &indexed) { + string tag; + is >> tag; + if (tag == "VERTEX3") { + Key id; + double x, y, z; + Rot3 R; + is >> id >> x >> y >> z >> R; + indexed.reset({id, Pose3(R, {x, y, z})}); + } else if (tag == "VERTEX_SE3:QUAT") { + Key id; + double x, y, z; + Quaternion q; + is >> id >> x >> y >> z >> q; + indexed.reset({id, Pose3(q, {x, y, z})}); + } + return is; +} + +map parse3DPoses(const string &filename, Key maxNr) { + return parseIntoMap(filename, maxNr); +} + +/* ************************************************************************* */ +istream &operator>>(istream &is, boost::optional> &indexed) { + string tag; + is >> tag; + if (tag == "VERTEX_TRACKXYZ") { + Key id; + double x, y, z; + is >> id >> x >> y >> z; + indexed.reset({id, Point3(x, y, z)}); + } + return is; +} + +map parse3DLandmarks(const string &filename, Key maxNr) { + return parseIntoMap(filename, maxNr); +} + +/* ************************************************************************* */ +// Parse BetweenFactor shared pointers into a vector +template +static vector>> +parseIntoVector(const string &filename, Key maxNr = 0) { ifstream is(filename.c_str()); if (!is) - throw invalid_argument("parse3DPoses: can not find file " + filename); + throw invalid_argument("parse: can not find file " + filename); - std::map poses; + vector>> result; + string tag; while (!is.eof()) { - char buf[LINESIZE]; - is.getline(buf, LINESIZE); - istringstream ls(buf); - string tag; - ls >> tag; - - if (tag == "VERTEX3") { - Key id; - double x, y, z, roll, pitch, yaw; - ls >> id >> x >> y >> z >> roll >> pitch >> yaw; - poses.emplace(id, Pose3(Rot3::Ypr(yaw, pitch, roll), {x, y, z})); - } - if (tag == "VERTEX_SE3:QUAT") { - Key id; - double x, y, z, qx, qy, qz, qw; - ls >> id >> x >> y >> z >> qx >> qy >> qz >> qw; - poses.emplace(id, Pose3(NormalizedRot3(qw, qx, qy, qz), {x, y, z})); + boost::shared_ptr> shared; + is >> shared; + if (shared) { + // optional filter + if (maxNr && (shared->key1() >= maxNr || shared->key1() >= maxNr)) + continue; + result.push_back(shared); } + is.ignore(LINESIZE, '\n'); } - return poses; + return result; } /* ************************************************************************* */ -std::map parse3DLandmarks(const string& filename) { - ifstream is(filename.c_str()); - if (!is) - throw invalid_argument("parse3DLandmarks: can not find file " + filename); - - std::map landmarks; - while (!is.eof()) { - char buf[LINESIZE]; - is.getline(buf, LINESIZE); - istringstream ls(buf); - string tag; - ls >> tag; - - if (tag == "VERTEX_TRACKXYZ") { - Key id; - double x, y, z; - ls >> id >> x >> y >> z; - landmarks.emplace(id, Point3(x, y, z)); - } +// Parse a symmetric covariance matrix (onlyupper-triangular part is stored) +istream &operator>>(istream &is, Matrix6 &m) { + for (size_t i = 0; i < 6; i++) + for (size_t j = i; j < 6; j++){ + is >> m(i, j); + m(j,i) = m(i,j); } - return landmarks; + return is; } /* ************************************************************************* */ -BetweenFactorPose3s parse3DFactors( - const string& filename, - const noiseModel::Diagonal::shared_ptr& corruptingNoise) { - ifstream is(filename.c_str()); - if (!is) throw invalid_argument("parse3DFactors: can not find file " + filename); +istream &operator>>(istream &is, + boost::shared_ptr> &shared) { + string tag; + is >> tag; + + Matrix6 m; + if (tag == "EDGE3") { + Key id1, id2; + double x, y, z; + Rot3 R; + is >> id1 >> id2 >> x >> y >> z >> R; + Pose3 pose(R, {x, y, z}); + + is >> m; + SharedNoiseModel model = noiseModel::Gaussian::Information(m); + shared.reset(new BetweenFactor(id1, id2, pose, model)); + } + if (tag == "EDGE_SE3:QUAT") { + Key id1, id2; + double x, y, z; + Quaternion q; + is >> id1 >> id2 >> x >> y >> z >> q; + Pose3 pose(q, {x, y, z}); + + // EDGE_SE3:QUAT stores covariance in t,R order, unlike GTSAM: + is >> m; + Matrix6 mgtsam; + mgtsam.block<3, 3>(0, 0) = m.block<3, 3>(3, 3); // cov rotation + mgtsam.block<3, 3>(3, 3) = m.block<3, 3>(0, 0); // cov translation + mgtsam.block<3, 3>(0, 3) = m.block<3, 3>(0, 3); // off diagonal + mgtsam.block<3, 3>(3, 0) = m.block<3, 3>(3, 0); // off diagonal + SharedNoiseModel model = noiseModel::Gaussian::Information(mgtsam); + + shared.reset(new BetweenFactor(id1, id2, pose, model)); + } + return is; +} + +/* ************************************************************************* */ +BetweenFactorPose3s +parse3DFactors(const string &filename, + const noiseModel::Diagonal::shared_ptr &corruptingNoise, + Key maxNr) { + auto factors = parseIntoVector(filename, maxNr); - boost::optional sampler; if (corruptingNoise) { - sampler = Sampler(corruptingNoise); - } - - std::vector::shared_ptr> factors; - while (!is.eof()) { - char buf[LINESIZE]; - is.getline(buf, LINESIZE); - istringstream ls(buf); - string tag; - ls >> tag; - - if (tag == "EDGE3") { - Key id1, id2; - double x, y, z, roll, pitch, yaw; - ls >> id1 >> id2 >> x >> y >> z >> roll >> pitch >> yaw; - Matrix m(6, 6); - for (size_t i = 0; i < 6; i++) - for (size_t j = i; j < 6; j++) ls >> m(i, j); - SharedNoiseModel model = noiseModel::Gaussian::Information(m); - factors.emplace_back(new BetweenFactor( - id1, id2, Pose3(Rot3::Ypr(yaw, pitch, roll), {x, y, z}), model)); - } - if (tag == "EDGE_SE3:QUAT") { - Key id1, id2; - double x, y, z, qx, qy, qz, qw; - ls >> id1 >> id2 >> x >> y >> z >> qx >> qy >> qz >> qw; - Matrix m(6, 6); - for (size_t i = 0; i < 6; i++) { - for (size_t j = i; j < 6; j++) { - double mij; - ls >> mij; - m(i, j) = mij; - m(j, i) = mij; - } - } - Matrix mgtsam(6, 6); - - mgtsam.block<3, 3>(0, 0) = m.block<3, 3>(3, 3); // cov rotation - mgtsam.block<3, 3>(3, 3) = m.block<3, 3>(0, 0); // cov translation - mgtsam.block<3, 3>(0, 3) = m.block<3, 3>(0, 3); // off diagonal - mgtsam.block<3, 3>(3, 0) = m.block<3, 3>(3, 0); // off diagonal - - SharedNoiseModel model = noiseModel::Gaussian::Information(mgtsam); - auto R12 = NormalizedRot3(qw, qx, qy, qz); - if (sampler) { - R12 = R12.retract(sampler->sample()); - } - - factors.emplace_back(new BetweenFactor( - id1, id2, Pose3(R12, {x, y, z}), model)); + Sampler sampler(corruptingNoise); + for (auto factor : factors) { + auto pose = factor->measured(); + factor.reset(new BetweenFactor(factor->key1(), factor->key2(), + pose.retract(sampler.sample()), + factor->noiseModel())); } } + return factors; } diff --git a/gtsam/slam/dataset.h b/gtsam/slam/dataset.h index 30fa913ba..caef96cf4 100644 --- a/gtsam/slam/dataset.h +++ b/gtsam/slam/dataset.h @@ -23,10 +23,8 @@ #include #include #include -#include -#include +#include #include -#include #include #include #include @@ -114,18 +112,35 @@ GTSAM_EXPORT boost::optional parseVertexLandmark(std::istream& GTSAM_EXPORT boost::optional parseEdge(std::istream& is, const std::string& tag); +using BetweenFactorPose2s = + std::vector::shared_ptr>; + +/// Parse edges in 2D g2o graph file into a set of BetweenFactors. +GTSAM_EXPORT BetweenFactorPose2s parse2DFactors( + const std::string &filename, + const noiseModel::Diagonal::shared_ptr &corruptingNoise = nullptr); + +/// Parse vertices in 2D g2o graph file into a map of Pose2s. +GTSAM_EXPORT std::map parse2DPoses(const std::string &filename, + Key maxNr = 0); + +/// Parse landmarks in 2D g2o graph file into a map of Point2s. +GTSAM_EXPORT std::map parse2DLandmarks(const string &filename, + Key maxNr = 0); + /// Return type for load functions -typedef std::pair GraphAndValues; +using GraphAndValues = + std::pair; /** * Load TORO 2D Graph * @param dataset/model pair as constructed by [dataset] - * @param maxID if non-zero cut out vertices >= maxID + * @param maxNr if non-zero cut out vertices >= maxNr * @param addNoise add noise to the edges * @param smart try to reduce complexity of covariance to cheapest model */ GTSAM_EXPORT GraphAndValues load2D( - std::pair dataset, int maxID = 0, + std::pair dataset, Key maxNr = 0, bool addNoise = false, bool smart = true, // NoiseFormat noiseFormat = NoiseFormatAUTO, @@ -135,7 +150,7 @@ GTSAM_EXPORT GraphAndValues load2D( * Load TORO/G2O style graph files * @param filename * @param model optional noise model to use instead of one specified by file - * @param maxID if non-zero cut out vertices >= maxID + * @param maxNr if non-zero cut out vertices >= maxNr * @param addNoise add noise to the edges * @param smart try to reduce complexity of covariance to cheapest model * @param noiseFormat how noise parameters are stored @@ -143,13 +158,13 @@ GTSAM_EXPORT GraphAndValues load2D( * @return graph and initial values */ GTSAM_EXPORT GraphAndValues load2D(const std::string& filename, - SharedNoiseModel model = SharedNoiseModel(), Key maxID = 0, bool addNoise = + SharedNoiseModel model = SharedNoiseModel(), Key maxNr = 0, bool addNoise = false, bool smart = true, NoiseFormat noiseFormat = NoiseFormatAUTO, // KernelFunctionType kernelFunctionType = KernelFunctionTypeNONE); /// @deprecated load2D now allows for arbitrary models and wrapping a robust kernel GTSAM_EXPORT GraphAndValues load2D_robust(const std::string& filename, - noiseModel::Base::shared_ptr& model, int maxID = 0); + noiseModel::Base::shared_ptr& model, Key maxNr = 0); /** save 2d graph */ GTSAM_EXPORT void save2D(const NonlinearFactorGraph& graph, @@ -179,14 +194,18 @@ GTSAM_EXPORT void writeG2o(const NonlinearFactorGraph& graph, /// Parse edges in 3D TORO graph file into a set of BetweenFactors. using BetweenFactorPose3s = std::vector::shared_ptr>; -GTSAM_EXPORT BetweenFactorPose3s parse3DFactors(const std::string& filename, - const noiseModel::Diagonal::shared_ptr& corruptingNoise=nullptr); +GTSAM_EXPORT BetweenFactorPose3s parse3DFactors( + const std::string &filename, + const noiseModel::Diagonal::shared_ptr &corruptingNoise = nullptr, + Key maxNr = 0); /// Parse vertices in 3D TORO/g2o graph file into a map of Pose3s. -GTSAM_EXPORT std::map parse3DPoses(const std::string& filename); +GTSAM_EXPORT std::map parse3DPoses(const std::string &filename, + Key maxNr = 0); /// Parse landmarks in 3D g2o graph file into a map of Point3s. -GTSAM_EXPORT std::map parse3DLandmarks(const string& filename); +GTSAM_EXPORT std::map parse3DLandmarks(const std::string &filename, + Key maxNr = 0); /// Load TORO 3D Graph GTSAM_EXPORT GraphAndValues load3D(const std::string& filename); diff --git a/gtsam/slam/tests/testDataset.cpp b/gtsam/slam/tests/testDataset.cpp index b6b1776d2..6ee44cfd6 100644 --- a/gtsam/slam/tests/testDataset.cpp +++ b/gtsam/slam/tests/testDataset.cpp @@ -89,20 +89,38 @@ TEST( dataSet, parseEdge) } /* ************************************************************************* */ -TEST( dataSet, load2D) -{ +TEST(dataSet, load2D) { ///< The structure where we will save the SfM data const string filename = findExampleDataFile("w100.graph"); NonlinearFactorGraph::shared_ptr graph; Values::shared_ptr initial; boost::tie(graph, initial) = load2D(filename); - EXPECT_LONGS_EQUAL(300,graph->size()); - EXPECT_LONGS_EQUAL(100,initial->size()); - noiseModel::Unit::shared_ptr model = noiseModel::Unit::Create(3); - BetweenFactor expected(1, 0, Pose2(-0.99879,0.0417574,-0.00818381), model); - BetweenFactor::shared_ptr actual = boost::dynamic_pointer_cast< - BetweenFactor >(graph->at(0)); + EXPECT_LONGS_EQUAL(300, graph->size()); + EXPECT_LONGS_EQUAL(100, initial->size()); + auto model = noiseModel::Unit::Create(3); + BetweenFactor expected(1, 0, Pose2(-0.99879, 0.0417574, -0.00818381), + model); + BetweenFactor::shared_ptr actual = + boost::dynamic_pointer_cast>(graph->at(0)); EXPECT(assert_equal(expected, *actual)); + + // // Check factor parsing + // const auto actualFactors = parse2DFactors(filename); + // for (size_t i : {0, 1, 2, 3, 4, 5}) { + // EXPECT(assert_equal( + // *boost::dynamic_pointer_cast>(graph->at(i)), + // *actualFactors[i], 1e-5)); + // } + + // Check pose parsing + const auto actualPoses = parse2DPoses(filename); + for (size_t j : {0, 1, 2, 3, 4}) { + EXPECT(assert_equal(initial->at(j), actualPoses.at(j), 1e-5)); + } + + // Check landmark parsing + const auto actualLandmarks = parse2DLandmarks(filename); + EXPECT_LONGS_EQUAL(0, actualLandmarks.size()); } /* ************************************************************************* */ From d67afa8a3d141cc1b913c6fb88ddac74b1c55c1a Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Wed, 12 Aug 2020 23:24:52 -0400 Subject: [PATCH 044/142] Very generic parseToVector --- gtsam/slam/dataset.cpp | 576 +++++++++++++++++++------------ gtsam/slam/dataset.h | 24 +- gtsam/slam/tests/testDataset.cpp | 14 +- 3 files changed, 370 insertions(+), 244 deletions(-) diff --git a/gtsam/slam/dataset.cpp b/gtsam/slam/dataset.cpp index 4f56d3afe..29162dafb 100644 --- a/gtsam/slam/dataset.cpp +++ b/gtsam/slam/dataset.cpp @@ -21,21 +21,25 @@ #include #include #include + #include #include #include + +#include +#include + +#include + #include #include -#include -#include -#include -#include + #include #include #include -#include #include #include +#include #include #include @@ -56,7 +60,7 @@ using namespace gtsam::symbol_shorthand; namespace gtsam { /* ************************************************************************* */ -string findExampleDataFile(const string& name) { +string findExampleDataFile(const string &name) { // Search source tree and installed location vector rootsToSearch; @@ -73,88 +77,88 @@ string findExampleDataFile(const string& name) { namesToSearch.push_back(name + ".xml"); // Find first name that exists - for(const fs::path root: rootsToSearch) { - for(const fs::path name: namesToSearch) { + for (const fs::path root : rootsToSearch) { + for (const fs::path name : namesToSearch) { if (fs::is_regular_file(root / name)) return (root / name).string(); } } // If we did not return already, then we did not find the file - throw invalid_argument( - "gtsam::findExampleDataFile could not find a matching file in\n" - GTSAM_SOURCE_TREE_DATASET_DIR " or\n" - GTSAM_INSTALLED_DATASET_DIR " named\n" + name + ", " + name - + ".graph, or " + name + ".txt"); + throw invalid_argument("gtsam::findExampleDataFile could not find a matching " + "file in\n" GTSAM_SOURCE_TREE_DATASET_DIR + " or\n" GTSAM_INSTALLED_DATASET_DIR " named\n" + + name + ", " + name + ".graph, or " + name + ".txt"); } /* ************************************************************************* */ -string createRewrittenFileName(const string& name) { +string createRewrittenFileName(const string &name) { // Search source tree and installed location if (!exists(fs::path(name))) { throw invalid_argument( - "gtsam::createRewrittenFileName could not find a matching file in\n" - + name); + "gtsam::createRewrittenFileName could not find a matching file in\n" + + name); } fs::path p(name); - fs::path newpath = fs::path(p.parent_path().string()) - / fs::path(p.stem().string() + "-rewritten.txt"); + fs::path newpath = fs::path(p.parent_path().string()) / + fs::path(p.stem().string() + "-rewritten.txt"); return newpath.string(); } /* ************************************************************************* */ GraphAndValues load2D(pair dataset, Key maxNr, - bool addNoise, bool smart, NoiseFormat noiseFormat, - KernelFunctionType kernelFunctionType) { + bool addNoise, bool smart, NoiseFormat noiseFormat, + KernelFunctionType kernelFunctionType) { return load2D(dataset.first, dataset.second, maxNr, addNoise, smart, - noiseFormat, kernelFunctionType); + noiseFormat, kernelFunctionType); } /* ************************************************************************* */ -// Read noise parameters and interpret them according to flags -static SharedNoiseModel readNoiseModel(ifstream& is, bool smart, - NoiseFormat noiseFormat, KernelFunctionType kernelFunctionType) { - double v1, v2, v3, v4, v5, v6; - is >> v1 >> v2 >> v3 >> v4 >> v5 >> v6; - +// Interpret noise parameters according to flags +static SharedNoiseModel +createNoiseModel(const Vector6 v, bool smart, NoiseFormat noiseFormat, + KernelFunctionType kernelFunctionType) { if (noiseFormat == NoiseFormatAUTO) { // Try to guess covariance matrix layout - if (v1 != 0.0 && v2 == 0.0 && v3 != 0.0 && v4 != 0.0 && v5 == 0.0 - && v6 == 0.0) { - // NoiseFormatGRAPH + if (v(0) != 0.0 && v(1) == 0.0 && v(2) != 0.0 && // + v(3) != 0.0 && v(4) == 0.0 && v(5) == 0.0) { noiseFormat = NoiseFormatGRAPH; - } else if (v1 != 0.0 && v2 == 0.0 && v3 == 0.0 && v4 != 0.0 && v5 == 0.0 - && v6 != 0.0) { - // NoiseFormatCOV + } else if (v(0) != 0.0 && v(1) == 0.0 && v(2) == 0.0 && // + v(3) != 0.0 && v(4) == 0.0 && v(5) != 0.0) { noiseFormat = NoiseFormatCOV; } else { throw std::invalid_argument( - "load2D: unrecognized covariance matrix format in dataset file. Please specify the noise format."); + "load2D: unrecognized covariance matrix format in dataset file. " + "Please specify the noise format."); } } // Read matrix and check that diagonal entries are non-zero - Matrix M(3, 3); + Matrix3 M; switch (noiseFormat) { case NoiseFormatG2O: case NoiseFormatCOV: - // i.e., [ v1 v2 v3; v2' v4 v5; v3' v5' v6 ] - if (v1 == 0.0 || v4 == 0.0 || v6 == 0.0) + // i.e., [v(0) v(1) v(2); + // v(1)' v(3) v(4); + // v(2)' v(4)' v(5) ] + if (v(0) == 0.0 || v(3) == 0.0 || v(5) == 0.0) throw runtime_error( "load2D::readNoiseModel looks like this is not G2O matrix order"); - M << v1, v2, v3, v2, v4, v5, v3, v5, v6; + M << v(0), v(1), v(2), v(1), v(3), v(4), v(2), v(4), v(5); break; case NoiseFormatTORO: case NoiseFormatGRAPH: // http://www.openslam.org/toro.html // inf_ff inf_fs inf_ss inf_rr inf_fr inf_sr - // i.e., [ v1 v2 v5; v2' v3 v6; v5' v6' v4 ] - if (v1 == 0.0 || v3 == 0.0 || v4 == 0.0) + // i.e., [v(0) v(1) v(4); + // v(1)' v(2) v(5); + // v(4)' v(5)' v(3) ] + if (v(0) == 0.0 || v(2) == 0.0 || v(3) == 0.0) throw invalid_argument( "load2D::readNoiseModel looks like this is not TORO matrix order"); - M << v1, v2, v5, v2, v3, v6, v5, v6, v4; + M << v(0), v(1), v(4), v(1), v(2), v(5), v(4), v(5), v(3); break; default: throw runtime_error("load2D: invalid noise format"); @@ -195,15 +199,18 @@ static SharedNoiseModel readNoiseModel(ifstream& is, bool smart, } } -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41 /* ************************************************************************* */ -boost::optional parseVertex(istream& is, const string& tag) { - return parseVertexPose(is, tag); +// Read noise parameters and interpret them according to flags +static SharedNoiseModel readNoiseModel(istream &is, bool smart, + NoiseFormat noiseFormat, + KernelFunctionType kernelFunctionType) { + Vector6 v; + is >> v(0) >> v(1) >> v(2) >> v(3) >> v(4) >> v(5); + return createNoiseModel(v, smart, noiseFormat, kernelFunctionType); } -#endif /* ************************************************************************* */ -boost::optional parseVertexPose(istream& is, const string& tag) { +boost::optional parseVertexPose(istream &is, const string &tag) { if ((tag == "VERTEX2") || (tag == "VERTEX_SE2") || (tag == "VERTEX")) { Key id; double x, y, yaw; @@ -215,7 +222,8 @@ boost::optional parseVertexPose(istream& is, const string& tag) { } /* ************************************************************************* */ -boost::optional parseVertexLandmark(istream& is, const string& tag) { +boost::optional parseVertexLandmark(istream &is, + const string &tag) { if (tag == "VERTEX_XY") { Key id; double x, y; @@ -275,9 +283,37 @@ map parse2DLandmarks(const string &filename, Key maxNr) { } /* ************************************************************************* */ -boost::optional parseEdge(istream& is, const string& tag) { - if ((tag == "EDGE2") || (tag == "EDGE") || (tag == "EDGE_SE2") - || (tag == "ODOMETRY")) { +// Parse a file by first parsing the `Parsed` type and then processing it with +// the supplied `process` function. Result is put in a vector evaluates to true. +// Requires: a stream >> operator for boost::optional +template +static vector +parseToVector(const string &filename, + const std::function process) { + ifstream is(filename.c_str()); + if (!is) + throw invalid_argument("parse: can not find file " + filename); + + vector result; + string tag; + while (!is.eof()) { + boost::optional parsed; + is >> parsed; + if (parsed) { + Output factor = process(*parsed); // can return nullptr + if (factor) { + result.push_back(factor); + } + } + is.ignore(LINESIZE, '\n'); + } + return result; +} + +/* ************************************************************************* */ +boost::optional parseEdge(istream &is, const string &tag) { + if ((tag == "EDGE2") || (tag == "EDGE") || (tag == "EDGE_SE2") || + (tag == "ODOMETRY")) { Key id1, id2; double x, y, yaw; @@ -288,6 +324,86 @@ boost::optional parseEdge(istream& is, const string& tag) { } } +/* ************************************************************************* */ +struct Measurement2 { + Key id1, id2; + Pose2 pose; + Vector6 v; // 6 noise model parameters for edge +}; + +istream &operator>>(istream &is, boost::optional &edge) { + string tag; + is >> tag; + if ((tag == "EDGE2") || (tag == "EDGE") || (tag == "EDGE_SE2") || + (tag == "ODOMETRY")) { + Key id1, id2; + double x, y, yaw; + Vector6 v; + is >> id1 >> id2 >> x >> y >> yaw >> // + v(0) >> v(1) >> v(2) >> v(3) >> v(4) >> v(5); + edge.reset({id1, id2, Pose2(x, y, yaw), v}); + } + return is; +} + +/* ************************************************************************* */ +// Create a sampler +boost::shared_ptr createSampler(const SharedNoiseModel &model) { + auto noise = boost::dynamic_pointer_cast(model); + if (!noise) + throw invalid_argument("gtsam::load: invalid noise model for adding noise" + "(current version assumes diagonal noise model)!"); + return boost::shared_ptr(new Sampler(noise)); +} + +/* ************************************************************************* */ +// Small functor to process the edge into a BetweenFactor::shared_ptr +struct ProcessPose2 { + // The arguments + Key maxNr = 0; + SharedNoiseModel model; + boost::shared_ptr sampler; + + // Arguments for parsing noise model + bool smart = true; + NoiseFormat noiseFormat = NoiseFormatAUTO; + KernelFunctionType kernelFunctionType = KernelFunctionTypeNONE; + + // The actual function + BetweenFactor::shared_ptr operator()(const Measurement2 &edge) { + // optional filter + if (maxNr && (edge.id1 >= maxNr || edge.id2 >= maxNr)) + return nullptr; + + // Get pose and optionally add noise + Pose2 T12 = edge.pose; + if (sampler) + T12 = T12.retract(sampler->sample()); + + // Create factor + // If model is nullptr we use the model from the file + return boost::make_shared>( + edge.id1, edge.id2, T12, + model + ? model + : createNoiseModel(edge.v, smart, noiseFormat, kernelFunctionType)); + } +}; + +/* ************************************************************************* */ +BetweenFactorPose2s +parse2DFactors(const string &filename, + const noiseModel::Diagonal::shared_ptr &corruptingNoise, + Key maxNr) { + ProcessPose2 process; + process.maxNr = maxNr; + if (corruptingNoise) { + process.sampler = createSampler(corruptingNoise); + } + return parseToVector::shared_ptr>(filename, + process); +} + /* ************************************************************************* */ GraphAndValues load2D(const string &filename, SharedNoiseModel model, Key maxNr, bool addNoise, bool smart, NoiseFormat noiseFormat, @@ -296,11 +412,11 @@ GraphAndValues load2D(const string &filename, SharedNoiseModel model, Key maxNr, Values::shared_ptr initial(new Values); const auto poses = parse2DPoses(filename, maxNr); - for (const auto& key_pose : poses) { + for (const auto &key_pose : poses) { initial->insert(key_pose.first, key_pose.second); } const auto landmarks = parse2DLandmarks(filename, maxNr); - for (const auto& key_landmark : landmarks) { + for (const auto &key_landmark : landmarks) { initial->insert(key_landmark.first, key_landmark.second); } @@ -311,7 +427,7 @@ GraphAndValues load2D(const string &filename, SharedNoiseModel model, Key maxNr, NonlinearFactorGraph::shared_ptr graph(new NonlinearFactorGraph); // If asked, create a sampler with random number generator - std::unique_ptr sampler; + boost::shared_ptr sampler; if (addNoise) { noiseModel::Diagonal::shared_ptr noise; if (model) @@ -319,7 +435,7 @@ GraphAndValues load2D(const string &filename, SharedNoiseModel model, Key maxNr, if (!noise) throw invalid_argument( "gtsam::load2D: invalid noise model for adding noise" - "(current version assumes diagonal noise model)!"); + "(current version assumes diagonal noise model)!"); sampler.reset(new Sampler(noise)); } @@ -335,11 +451,11 @@ GraphAndValues load2D(const string &filename, SharedNoiseModel model, Key maxNr, auto between_pose = parseEdge(is, tag); if (between_pose) { std::tie(id1, id2) = between_pose->first; - Pose2& l1Xl2 = between_pose->second; + Pose2 l1Xl2 = between_pose->second; // read noise model - SharedNoiseModel modelInFile = readNoiseModel(is, smart, noiseFormat, - kernelFunctionType); + SharedNoiseModel modelInFile = + readNoiseModel(is, smart, noiseFormat, kernelFunctionType); // optional filter if (maxNr && (id1 >= maxNr || id2 >= maxNr)) @@ -369,7 +485,8 @@ GraphAndValues load2D(const string &filename, SharedNoiseModel model, Key maxNr, is >> id1 >> id2 >> bearing >> range >> bearing_std >> range_std; } - // A landmark measurement, TODO Frank says: don't know why is converted to bearing-range + // A landmark measurement, TODO Frank says: don't know why is converted to + // bearing-range if (tag == "LANDMARK") { double lmx, lmy; double v1, v2, v3; @@ -380,8 +497,9 @@ GraphAndValues load2D(const string &filename, SharedNoiseModel model, Key maxNr, bearing = atan2(lmy, lmx); range = sqrt(lmx * lmx + lmy * lmy); - // In our experience, the x-y covariance on landmark sightings is not very good, so assume - // it describes the uncertainty at a range of 10m, and convert that to bearing/range uncertainty. + // In our experience, the x-y covariance on landmark sightings is not + // very good, so assume it describes the uncertainty at a range of 10m, + // and convert that to bearing/range uncertainty. if (std::abs(v1 - v3) < 1e-4) { bearing_std = sqrt(v1 / 10.0); range_std = sqrt(v1); @@ -389,10 +507,11 @@ GraphAndValues load2D(const string &filename, SharedNoiseModel model, Key maxNr, bearing_std = 1; range_std = 1; if (!haveLandmark) { - cout - << "Warning: load2D is a very simple dataset loader and is ignoring the\n" - "non-uniform covariance on LANDMARK measurements in this file." - << endl; + cout << "Warning: load2D is a very simple dataset loader and is " + "ignoring the\n" + "non-uniform covariance on LANDMARK measurements in this " + "file." + << endl; haveLandmark = true; } } @@ -407,11 +526,12 @@ GraphAndValues load2D(const string &filename, SharedNoiseModel model, Key maxNr, // Create noise model noiseModel::Diagonal::shared_ptr measurementNoise = - noiseModel::Diagonal::Sigmas((Vector(2) << bearing_std, range_std).finished()); + noiseModel::Diagonal::Sigmas( + (Vector(2) << bearing_std, range_std).finished()); // Add to graph *graph += BearingRangeFactor(id1, L(id2), bearing, range, - measurementNoise); + measurementNoise); // Insert poses or points if they do not exist yet if (!initial->exists(id1)) @@ -430,46 +550,46 @@ GraphAndValues load2D(const string &filename, SharedNoiseModel model, Key maxNr, } /* ************************************************************************* */ -GraphAndValues load2D_robust(const string& filename, - noiseModel::Base::shared_ptr& model, Key maxNr) { +GraphAndValues load2D_robust(const string &filename, + noiseModel::Base::shared_ptr &model, Key maxNr) { return load2D(filename, model, maxNr); } /* ************************************************************************* */ -void save2D(const NonlinearFactorGraph& graph, const Values& config, - const noiseModel::Diagonal::shared_ptr model, const string& filename) { +void save2D(const NonlinearFactorGraph &graph, const Values &config, + const noiseModel::Diagonal::shared_ptr model, + const string &filename) { fstream stream(filename.c_str(), fstream::out); // save poses - - for(const Values::ConstKeyValuePair& key_value: config) { - const Pose2& pose = key_value.value.cast(); + for (const Values::ConstKeyValuePair &key_value : config) { + const Pose2 &pose = key_value.value.cast(); stream << "VERTEX2 " << key_value.key << " " << pose.x() << " " << pose.y() - << " " << pose.theta() << endl; + << " " << pose.theta() << endl; } // save edges - Matrix R = model->R(); - Matrix RR = trans(R) * R; //prod(trans(R),R); - for(boost::shared_ptr factor_: graph) { - boost::shared_ptr > factor = - boost::dynamic_pointer_cast >(factor_); + // TODO(frank): why don't we use model in factor? + Matrix3 R = model->R(); + Matrix3 RR = R.transpose() * R; + for (auto f : graph) { + auto factor = boost::dynamic_pointer_cast>(f); if (!factor) continue; - Pose2 pose = factor->measured().inverse(); + const Pose2 pose = factor->measured().inverse(); stream << "EDGE2 " << factor->key2() << " " << factor->key1() << " " - << pose.x() << " " << pose.y() << " " << pose.theta() << " " << RR(0, 0) - << " " << RR(0, 1) << " " << RR(1, 1) << " " << RR(2, 2) << " " - << RR(0, 2) << " " << RR(1, 2) << endl; + << pose.x() << " " << pose.y() << " " << pose.theta() << " " + << RR(0, 0) << " " << RR(0, 1) << " " << RR(1, 1) << " " << RR(2, 2) + << " " << RR(0, 2) << " " << RR(1, 2) << endl; } stream.close(); } /* ************************************************************************* */ -GraphAndValues readG2o(const string& g2oFile, const bool is3D, +GraphAndValues readG2o(const string &g2oFile, const bool is3D, KernelFunctionType kernelFunctionType) { if (is3D) { return load3D(g2oFile); @@ -484,86 +604,90 @@ GraphAndValues readG2o(const string& g2oFile, const bool is3D, } /* ************************************************************************* */ -void writeG2o(const NonlinearFactorGraph& graph, const Values& estimate, - const string& filename) { +void writeG2o(const NonlinearFactorGraph &graph, const Values &estimate, + const string &filename) { fstream stream(filename.c_str(), fstream::out); // save 2D poses - for (const auto& key_value : estimate) { - auto p = dynamic_cast*>(&key_value.value); - if (!p) continue; - const Pose2& pose = p->value(); + for (const auto &key_value : estimate) { + auto p = dynamic_cast *>(&key_value.value); + if (!p) + continue; + const Pose2 &pose = p->value(); stream << "VERTEX_SE2 " << key_value.key << " " << pose.x() << " " - << pose.y() << " " << pose.theta() << endl; + << pose.y() << " " << pose.theta() << endl; } // save 3D poses - for(const auto& key_value: estimate) { - auto p = dynamic_cast*>(&key_value.value); - if (!p) continue; - const Pose3& pose = p->value(); + for (const auto &key_value : estimate) { + auto p = dynamic_cast *>(&key_value.value); + if (!p) + continue; + const Pose3 &pose = p->value(); const Point3 t = pose.translation(); const auto q = pose.rotation().toQuaternion(); stream << "VERTEX_SE3:QUAT " << key_value.key << " " << t.x() << " " - << t.y() << " " << t.z() << " " << q.x() << " " << q.y() << " " - << q.z() << " " << q.w() << endl; + << t.y() << " " << t.z() << " " << q.x() << " " << q.y() << " " + << q.z() << " " << q.w() << endl; } // save 2D landmarks - for(const auto& key_value: estimate) { - auto p = dynamic_cast*>(&key_value.value); - if (!p) continue; - const Point2& point = p->value(); + for (const auto &key_value : estimate) { + auto p = dynamic_cast *>(&key_value.value); + if (!p) + continue; + const Point2 &point = p->value(); stream << "VERTEX_XY " << key_value.key << " " << point.x() << " " - << point.y() << endl; + << point.y() << endl; } // save 3D landmarks - for(const auto& key_value: estimate) { - auto p = dynamic_cast*>(&key_value.value); - if (!p) continue; - const Point3& point = p->value(); + for (const auto &key_value : estimate) { + auto p = dynamic_cast *>(&key_value.value); + if (!p) + continue; + const Point3 &point = p->value(); stream << "VERTEX_TRACKXYZ " << key_value.key << " " << point.x() << " " - << point.y() << " " << point.z() << endl; + << point.y() << " " << point.z() << endl; } // save edges (2D or 3D) - for(const auto& factor_: graph) { - boost::shared_ptr > factor = - boost::dynamic_pointer_cast >(factor_); - if (factor){ + for (const auto &factor_ : graph) { + boost::shared_ptr> factor = + boost::dynamic_pointer_cast>(factor_); + if (factor) { SharedNoiseModel model = factor->noiseModel(); boost::shared_ptr gaussianModel = boost::dynamic_pointer_cast(model); - if (!gaussianModel){ + if (!gaussianModel) { model->print("model\n"); throw invalid_argument("writeG2o: invalid noise model!"); } - Matrix Info = gaussianModel->R().transpose() * gaussianModel->R(); + Matrix3 Info = gaussianModel->R().transpose() * gaussianModel->R(); Pose2 pose = factor->measured(); //.inverse(); stream << "EDGE_SE2 " << factor->key1() << " " << factor->key2() << " " - << pose.x() << " " << pose.y() << " " << pose.theta(); - for (size_t i = 0; i < 3; i++){ - for (size_t j = i; j < 3; j++){ + << pose.x() << " " << pose.y() << " " << pose.theta(); + for (size_t i = 0; i < 3; i++) { + for (size_t j = i; j < 3; j++) { stream << " " << Info(i, j); } } stream << endl; } - boost::shared_ptr< BetweenFactor > factor3D = - boost::dynamic_pointer_cast< BetweenFactor >(factor_); + boost::shared_ptr> factor3D = + boost::dynamic_pointer_cast>(factor_); - if (factor3D){ + if (factor3D) { SharedNoiseModel model = factor3D->noiseModel(); boost::shared_ptr gaussianModel = boost::dynamic_pointer_cast(model); - if (!gaussianModel){ + if (!gaussianModel) { model->print("model\n"); throw invalid_argument("writeG2o: invalid noise model!"); } - Matrix Info = gaussianModel->R().transpose() * gaussianModel->R(); + Matrix6 Info = gaussianModel->R().transpose() * gaussianModel->R(); const Pose3 pose3D = factor3D->measured(); const Point3 p = pose3D.translation(); const auto q = pose3D.rotation().toQuaternion(); @@ -571,14 +695,14 @@ void writeG2o(const NonlinearFactorGraph& graph, const Values& estimate, << " " << p.x() << " " << p.y() << " " << p.z() << " " << q.x() << " " << q.y() << " " << q.z() << " " << q.w(); - Matrix InfoG2o = I_6x6; + Matrix6 InfoG2o = I_6x6; InfoG2o.block<3, 3>(0, 0) = Info.block<3, 3>(3, 3); // cov translation InfoG2o.block<3, 3>(3, 3) = Info.block<3, 3>(0, 0); // cov rotation InfoG2o.block<3, 3>(0, 3) = Info.block<3, 3>(0, 3); // off diagonal InfoG2o.block<3, 3>(3, 0) = Info.block<3, 3>(3, 0); // off diagonal - for (size_t i = 0; i < 6; i++){ - for (size_t j = i; j < 6; j++){ + for (size_t i = 0; i < 6; i++) { + for (size_t j = i; j < 6; j++) { stream << " " << InfoG2o(i, j); } } @@ -602,7 +726,7 @@ istream &operator>>(istream &is, Quaternion &q) { // parse Rot3 from roll, pitch, yaw istream &operator>>(istream &is, Rot3 &R) { double yaw, pitch, roll; - is >> roll >> pitch >> yaw; // notice order ! + is >> roll >> pitch >> yaw; // notice order ! R = Rot3::Ypr(yaw, pitch, roll); return is; } @@ -648,69 +772,43 @@ map parse3DLandmarks(const string &filename, Key maxNr) { return parseIntoMap(filename, maxNr); } -/* ************************************************************************* */ -// Parse BetweenFactor shared pointers into a vector -template -static vector>> -parseIntoVector(const string &filename, Key maxNr = 0) { - ifstream is(filename.c_str()); - if (!is) - throw invalid_argument("parse: can not find file " + filename); - - vector>> result; - string tag; - while (!is.eof()) { - boost::shared_ptr> shared; - is >> shared; - if (shared) { - // optional filter - if (maxNr && (shared->key1() >= maxNr || shared->key1() >= maxNr)) - continue; - result.push_back(shared); - } - is.ignore(LINESIZE, '\n'); - } - return result; -} - /* ************************************************************************* */ // Parse a symmetric covariance matrix (onlyupper-triangular part is stored) istream &operator>>(istream &is, Matrix6 &m) { for (size_t i = 0; i < 6; i++) - for (size_t j = i; j < 6; j++){ + for (size_t j = i; j < 6; j++) { is >> m(i, j); - m(j,i) = m(i,j); - } + m(j, i) = m(i, j); + } return is; } /* ************************************************************************* */ -istream &operator>>(istream &is, - boost::shared_ptr> &shared) { +struct Measurement3 { + Key id1, id2; + Pose3 pose; + SharedNoiseModel model; +}; + +istream &operator>>(istream &is, boost::optional &edge) { string tag; is >> tag; - Matrix6 m; if (tag == "EDGE3") { Key id1, id2; double x, y, z; Rot3 R; - is >> id1 >> id2 >> x >> y >> z >> R; - Pose3 pose(R, {x, y, z}); - - is >> m; - SharedNoiseModel model = noiseModel::Gaussian::Information(m); - shared.reset(new BetweenFactor(id1, id2, pose, model)); + is >> id1 >> id2 >> x >> y >> z >> R >> m; + edge.reset( + {id1, id2, Pose3(R, {x, y, z}), noiseModel::Gaussian::Information(m)}); } if (tag == "EDGE_SE3:QUAT") { Key id1, id2; double x, y, z; Quaternion q; - is >> id1 >> id2 >> x >> y >> z >> q; - Pose3 pose(q, {x, y, z}); + is >> id1 >> id2 >> x >> y >> z >> q >> m; // EDGE_SE3:QUAT stores covariance in t,R order, unlike GTSAM: - is >> m; Matrix6 mgtsam; mgtsam.block<3, 3>(0, 0) = m.block<3, 3>(3, 3); // cov rotation mgtsam.block<3, 3>(3, 3) = m.block<3, 3>(0, 0); // cov translation @@ -718,47 +816,67 @@ istream &operator>>(istream &is, mgtsam.block<3, 3>(3, 0) = m.block<3, 3>(3, 0); // off diagonal SharedNoiseModel model = noiseModel::Gaussian::Information(mgtsam); - shared.reset(new BetweenFactor(id1, id2, pose, model)); + edge.reset({id1, id2, Pose3(q, {x, y, z}), + noiseModel::Gaussian::Information(mgtsam)}); } return is; } +/* ************************************************************************* */ +// Small functor to process the edge into a BetweenFactor::shared_ptr +struct ProcessPose3 { + // The arguments + Key maxNr = 0; + SharedNoiseModel model; + boost::shared_ptr sampler; + + // The actual function + BetweenFactor::shared_ptr operator()(const Measurement3 &edge) { + // optional filter + if (maxNr && (edge.id1 >= maxNr || edge.id2 >= maxNr)) + return nullptr; + + // Get pose and optionally add noise + Pose3 T12 = edge.pose; + if (sampler) + T12 = T12.retract(sampler->sample()); + + // Create factor + return boost::make_shared>(edge.id1, edge.id2, T12, + edge.model); + } +}; + /* ************************************************************************* */ BetweenFactorPose3s parse3DFactors(const string &filename, const noiseModel::Diagonal::shared_ptr &corruptingNoise, Key maxNr) { - auto factors = parseIntoVector(filename, maxNr); - + ProcessPose3 process; + process.maxNr = maxNr; if (corruptingNoise) { - Sampler sampler(corruptingNoise); - for (auto factor : factors) { - auto pose = factor->measured(); - factor.reset(new BetweenFactor(factor->key1(), factor->key2(), - pose.retract(sampler.sample()), - factor->noiseModel())); - } + process.sampler = createSampler(corruptingNoise); } - - return factors; + return parseToVector::shared_ptr>(filename, + process); } /* ************************************************************************* */ -GraphAndValues load3D(const string& filename) { +GraphAndValues load3D(const string &filename) { const auto factors = parse3DFactors(filename); NonlinearFactorGraph::shared_ptr graph(new NonlinearFactorGraph); - for (const auto& factor : factors) { + for (const auto &factor : factors) { graph->push_back(factor); } Values::shared_ptr initial(new Values); const auto poses = parse3DPoses(filename); - for (const auto& key_pose : poses) { + for (const auto &key_pose : poses) { initial->insert(key_pose.first, key_pose.second); } const auto landmarks = parse3DLandmarks(filename); - for (const auto& key_landmark : landmarks) { + for (const auto &key_landmark : landmarks) { initial->insert(key_landmark.first, key_landmark.second); } @@ -766,7 +884,8 @@ GraphAndValues load3D(const string& filename) { } /* ************************************************************************* */ -Rot3 openGLFixedRotation() { // this is due to different convention for cameras in gtsam and openGL +Rot3 openGLFixedRotation() { // this is due to different convention for + // cameras in gtsam and openGL /* R = [ 1 0 0 * 0 -1 0 * 0 0 -1] @@ -779,7 +898,7 @@ Rot3 openGLFixedRotation() { // this is due to different convention for cameras } /* ************************************************************************* */ -Pose3 openGL2gtsam(const Rot3& R, double tx, double ty, double tz) { +Pose3 openGL2gtsam(const Rot3 &R, double tx, double ty, double tz) { Rot3 R90 = openGLFixedRotation(); Rot3 wRc = (R.inverse()).compose(R90); @@ -788,7 +907,7 @@ Pose3 openGL2gtsam(const Rot3& R, double tx, double ty, double tz) { } /* ************************************************************************* */ -Pose3 gtsam2openGL(const Rot3& R, double tx, double ty, double tz) { +Pose3 gtsam2openGL(const Rot3 &R, double tx, double ty, double tz) { Rot3 R90 = openGLFixedRotation(); Rot3 cRw_openGL = R90.compose(R.inverse()); Point3 t_openGL = cRw_openGL.rotate(Point3(-tx, -ty, -tz)); @@ -796,13 +915,13 @@ Pose3 gtsam2openGL(const Rot3& R, double tx, double ty, double tz) { } /* ************************************************************************* */ -Pose3 gtsam2openGL(const Pose3& PoseGTSAM) { +Pose3 gtsam2openGL(const Pose3 &PoseGTSAM) { return gtsam2openGL(PoseGTSAM.rotation(), PoseGTSAM.x(), PoseGTSAM.y(), - PoseGTSAM.z()); + PoseGTSAM.z()); } /* ************************************************************************* */ -bool readBundler(const string& filename, SfmData &data) { +bool readBundler(const string &filename, SfmData &data) { // Load the data file ifstream is(filename.c_str(), ifstream::in); if (!is) { @@ -837,7 +956,7 @@ bool readBundler(const string& filename, SfmData &data) { // Check for all-zero R, in which case quit if (r11 == 0 && r12 == 0 && r13 == 0) { cout << "Error in readBundler: zero rotation matrix for pose " << i - << endl; + << endl; return false; } @@ -889,7 +1008,7 @@ bool readBundler(const string& filename, SfmData &data) { } /* ************************************************************************* */ -bool readBAL(const string& filename, SfmData &data) { +bool readBAL(const string &filename, SfmData &data) { // Load the data file ifstream is(filename.c_str(), ifstream::in); if (!is) { @@ -937,7 +1056,7 @@ bool readBAL(const string& filename, SfmData &data) { // Get the 3D position float x, y, z; is >> x >> y >> z; - SfmTrack& track = data.tracks[j]; + SfmTrack &track = data.tracks[j]; track.p = Point3(x, y, z); track.r = 0.4f; track.g = 0.4f; @@ -949,7 +1068,7 @@ bool readBAL(const string& filename, SfmData &data) { } /* ************************************************************************* */ -bool writeBAL(const string& filename, SfmData &data) { +bool writeBAL(const string &filename, SfmData &data) { // Open the output file ofstream os; os.open(filename.c_str()); @@ -967,29 +1086,32 @@ bool writeBAL(const string& filename, SfmData &data) { // Write observations os << data.number_cameras() << " " << data.number_tracks() << " " - << nrObservations << endl; + << nrObservations << endl; os << endl; for (size_t j = 0; j < data.number_tracks(); j++) { // for each 3D point j - const SfmTrack& track = data.tracks[j]; + const SfmTrack &track = data.tracks[j]; - for (size_t k = 0; k < track.number_measurements(); k++) { // for each observation of the 3D point j + for (size_t k = 0; k < track.number_measurements(); + k++) { // for each observation of the 3D point j size_t i = track.measurements[k].first; // camera id double u0 = data.cameras[i].calibration().u0(); double v0 = data.cameras[i].calibration().v0(); if (u0 != 0 || v0 != 0) { - cout - << "writeBAL has not been tested for calibration with nonzero (u0,v0)" - << endl; + cout << "writeBAL has not been tested for calibration with nonzero " + "(u0,v0)" + << endl; } - double pixelBALx = track.measurements[k].second.x() - u0; // center of image is the origin - double pixelBALy = -(track.measurements[k].second.y() - v0); // center of image is the origin + double pixelBALx = track.measurements[k].second.x() - + u0; // center of image is the origin + double pixelBALy = -(track.measurements[k].second.y() - + v0); // center of image is the origin Point2 pixelMeasurement(pixelBALx, pixelBALy); - os << i /*camera id*/<< " " << j /*point id*/<< " " - << pixelMeasurement.x() /*u of the pixel*/<< " " - << pixelMeasurement.y() /*v of the pixel*/<< endl; + os << i /*camera id*/ << " " << j /*point id*/ << " " + << pixelMeasurement.x() /*u of the pixel*/ << " " + << pixelMeasurement.y() /*v of the pixel*/ << endl; } } os << endl; @@ -1022,15 +1144,17 @@ bool writeBAL(const string& filename, SfmData &data) { return true; } -bool writeBALfromValues(const string& filename, const SfmData &data, - Values& values) { +bool writeBALfromValues(const string &filename, const SfmData &data, + Values &values) { using Camera = PinholeCamera; SfmData dataValues = data; // Store poses or cameras in SfmData size_t nrPoses = values.count(); - if (nrPoses == dataValues.number_cameras()) { // we only estimated camera poses - for (size_t i = 0; i < dataValues.number_cameras(); i++) { // for each camera + if (nrPoses == + dataValues.number_cameras()) { // we only estimated camera poses + for (size_t i = 0; i < dataValues.number_cameras(); + i++) { // for each camera Key poseKey = symbol('x', i); Pose3 pose = values.at(poseKey); Cal3Bundler K = dataValues.cameras[i].calibration(); @@ -1039,29 +1163,29 @@ bool writeBALfromValues(const string& filename, const SfmData &data, } } else { size_t nrCameras = values.count(); - if (nrCameras == dataValues.number_cameras()) { // we only estimated camera poses and calibration - for (size_t i = 0; i < nrCameras; i++) { // for each camera - Key cameraKey = i; // symbol('c',i); + if (nrCameras == dataValues.number_cameras()) { // we only estimated camera + // poses and calibration + for (size_t i = 0; i < nrCameras; i++) { // for each camera + Key cameraKey = i; // symbol('c',i); Camera camera = values.at(cameraKey); dataValues.cameras[i] = camera; } } else { - cout - << "writeBALfromValues: different number of cameras in SfM_dataValues (#cameras " - << dataValues.number_cameras() << ") and values (#cameras " - << nrPoses << ", #poses " << nrCameras << ")!!" - << endl; + cout << "writeBALfromValues: different number of cameras in " + "SfM_dataValues (#cameras " + << dataValues.number_cameras() << ") and values (#cameras " + << nrPoses << ", #poses " << nrCameras << ")!!" << endl; return false; } } // Store 3D points in SfmData - size_t nrPoints = values.count(), nrTracks = dataValues.number_tracks(); + size_t nrPoints = values.count(), + nrTracks = dataValues.number_tracks(); if (nrPoints != nrTracks) { - cout - << "writeBALfromValues: different number of points in SfM_dataValues (#points= " - << nrTracks << ") and values (#points " - << nrPoints << ")!!" << endl; + cout << "writeBALfromValues: different number of points in " + "SfM_dataValues (#points= " + << nrTracks << ") and values (#points " << nrPoints << ")!!" << endl; } for (size_t j = 0; j < nrTracks; j++) { // for each point @@ -1073,7 +1197,7 @@ bool writeBALfromValues(const string& filename, const SfmData &data, dataValues.tracks[j].r = 1.0; dataValues.tracks[j].g = 0.0; dataValues.tracks[j].b = 0.0; - dataValues.tracks[j].p = Point3(0,0,0); + dataValues.tracks[j].p = Point3(0, 0, 0); } } @@ -1081,22 +1205,22 @@ bool writeBALfromValues(const string& filename, const SfmData &data, return writeBAL(filename, dataValues); } -Values initialCamerasEstimate(const SfmData& db) { +Values initialCamerasEstimate(const SfmData &db) { Values initial; size_t i = 0; // NO POINTS: j = 0; - for(const SfmCamera& camera: db.cameras) + for (const SfmCamera &camera : db.cameras) initial.insert(i++, camera); return initial; } -Values initialCamerasAndPointsEstimate(const SfmData& db) { +Values initialCamerasAndPointsEstimate(const SfmData &db) { Values initial; size_t i = 0, j = 0; - for(const SfmCamera& camera: db.cameras) + for (const SfmCamera &camera : db.cameras) initial.insert((i++), camera); - for(const SfmTrack& track: db.tracks) + for (const SfmTrack &track : db.tracks) initial.insert(P(j++), track.p); return initial; } -} // \namespace gtsam +} // namespace gtsam diff --git a/gtsam/slam/dataset.h b/gtsam/slam/dataset.h index caef96cf4..5b92800af 100644 --- a/gtsam/slam/dataset.h +++ b/gtsam/slam/dataset.h @@ -77,16 +77,6 @@ typedef std::pair IndexedPose; typedef std::pair IndexedLandmark; typedef std::pair, Pose2> IndexedEdge; -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41 -/** - * Parse TORO/G2O vertex "id x y yaw" - * @param is input stream - * @param tag string parsed from input stream, will only parse if vertex type - */ -GTSAM_EXPORT boost::optional parseVertex(std::istream& is, - const std::string& tag); -#endif - /** * Parse TORO/G2O vertex "id x y yaw" * @param is input stream @@ -118,7 +108,8 @@ using BetweenFactorPose2s = /// Parse edges in 2D g2o graph file into a set of BetweenFactors. GTSAM_EXPORT BetweenFactorPose2s parse2DFactors( const std::string &filename, - const noiseModel::Diagonal::shared_ptr &corruptingNoise = nullptr); + const noiseModel::Diagonal::shared_ptr &corruptingNoise = nullptr, + Key maxNr = 0); /// Parse vertices in 2D g2o graph file into a map of Pose2s. GTSAM_EXPORT std::map parse2DPoses(const std::string &filename, @@ -343,4 +334,15 @@ GTSAM_EXPORT Values initialCamerasEstimate(const SfmData& db); */ GTSAM_EXPORT Values initialCamerasAndPointsEstimate(const SfmData& db); +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41 +/** + * Parse TORO/G2O vertex "id x y yaw" + * @param is input stream + * @param tag string parsed from input stream, will only parse if vertex type + */ +GTSAM_EXPORT boost::optional parseVertex(std::istream &is, + const std::string &tag) { + return parseVertexPose(is, tag); +} +#endif } // namespace gtsam diff --git a/gtsam/slam/tests/testDataset.cpp b/gtsam/slam/tests/testDataset.cpp index 6ee44cfd6..bc65dc351 100644 --- a/gtsam/slam/tests/testDataset.cpp +++ b/gtsam/slam/tests/testDataset.cpp @@ -104,13 +104,13 @@ TEST(dataSet, load2D) { boost::dynamic_pointer_cast>(graph->at(0)); EXPECT(assert_equal(expected, *actual)); - // // Check factor parsing - // const auto actualFactors = parse2DFactors(filename); - // for (size_t i : {0, 1, 2, 3, 4, 5}) { - // EXPECT(assert_equal( - // *boost::dynamic_pointer_cast>(graph->at(i)), - // *actualFactors[i], 1e-5)); - // } + // Check factor parsing + const auto actualFactors = parse2DFactors(filename); + for (size_t i : {0, 1, 2, 3, 4, 5}) { + EXPECT(assert_equal( + *boost::dynamic_pointer_cast>(graph->at(i)), + *actualFactors[i], 1e-5)); + } // Check pose parsing const auto actualPoses = parse2DPoses(filename); From aa3c0f8c5d679e1c5fb7fd16ae8078243e810672 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Wed, 12 Aug 2020 23:59:57 -0400 Subject: [PATCH 045/142] refactored load2d --- gtsam/slam/dataset.cpp | 189 ++++++++++++++++++----------------------- gtsam/slam/dataset.h | 26 +++--- 2 files changed, 96 insertions(+), 119 deletions(-) diff --git a/gtsam/slam/dataset.cpp b/gtsam/slam/dataset.cpp index 29162dafb..af8904e49 100644 --- a/gtsam/slam/dataset.cpp +++ b/gtsam/slam/dataset.cpp @@ -108,10 +108,10 @@ string createRewrittenFileName(const string &name) { } /* ************************************************************************* */ -GraphAndValues load2D(pair dataset, Key maxNr, +GraphAndValues load2D(pair dataset, Key maxKey, bool addNoise, bool smart, NoiseFormat noiseFormat, KernelFunctionType kernelFunctionType) { - return load2D(dataset.first, dataset.second, maxNr, addNoise, smart, + return load2D(dataset.first, dataset.second, maxKey, addNoise, smart, noiseFormat, kernelFunctionType); } @@ -199,16 +199,6 @@ createNoiseModel(const Vector6 v, bool smart, NoiseFormat noiseFormat, } } -/* ************************************************************************* */ -// Read noise parameters and interpret them according to flags -static SharedNoiseModel readNoiseModel(istream &is, bool smart, - NoiseFormat noiseFormat, - KernelFunctionType kernelFunctionType) { - Vector6 v; - is >> v(0) >> v(1) >> v(2) >> v(3) >> v(4) >> v(5); - return createNoiseModel(v, smart, noiseFormat, kernelFunctionType); -} - /* ************************************************************************* */ boost::optional parseVertexPose(istream &is, const string &tag) { if ((tag == "VERTEX2") || (tag == "VERTEX_SE2") || (tag == "VERTEX")) { @@ -237,7 +227,7 @@ boost::optional parseVertexLandmark(istream &is, /* ************************************************************************* */ // Parse types T into a Key-indexed map template -static map parseIntoMap(const string &filename, Key maxNr = 0) { +static map parseIntoMap(const string &filename, Key maxKey = 0) { ifstream is(filename.c_str()); if (!is) throw invalid_argument("parse: can not find file " + filename); @@ -249,7 +239,7 @@ static map parseIntoMap(const string &filename, Key maxNr = 0) { is >> indexed_t; if (indexed_t) { // optional filter - if (maxNr && indexed_t->first >= maxNr) + if (maxKey && indexed_t->first >= maxKey) continue; result.insert(*indexed_t); } @@ -266,8 +256,8 @@ istream &operator>>(istream &is, boost::optional &indexed) { return is; } -map parse2DPoses(const string &filename, Key maxNr) { - return parseIntoMap(filename, maxNr); +map parse2DPoses(const string &filename, Key maxKey) { + return parseIntoMap(filename, maxKey); } /* ************************************************************************* */ @@ -278,8 +268,8 @@ istream &operator>>(istream &is, boost::optional &indexed) { return is; } -map parse2DLandmarks(const string &filename, Key maxNr) { - return parseIntoMap(filename, maxNr); +map parse2DLandmarks(const string &filename, Key maxKey) { + return parseIntoMap(filename, maxKey); } /* ************************************************************************* */ @@ -360,19 +350,26 @@ boost::shared_ptr createSampler(const SharedNoiseModel &model) { // Small functor to process the edge into a BetweenFactor::shared_ptr struct ProcessPose2 { // The arguments - Key maxNr = 0; + Key maxKey; SharedNoiseModel model; boost::shared_ptr sampler; // Arguments for parsing noise model - bool smart = true; - NoiseFormat noiseFormat = NoiseFormatAUTO; - KernelFunctionType kernelFunctionType = KernelFunctionTypeNONE; + bool smart; + NoiseFormat noiseFormat; + KernelFunctionType kernelFunctionType; + + ProcessPose2(Key maxKey = 0, SharedNoiseModel model = nullptr, + boost::shared_ptr sampler = nullptr, bool smart = true, + NoiseFormat noiseFormat = NoiseFormatAUTO, + KernelFunctionType kernelFunctionType = KernelFunctionTypeNONE) + : maxKey(maxKey), model(model), sampler(sampler), smart(smart), + noiseFormat(noiseFormat), kernelFunctionType(kernelFunctionType) {} // The actual function BetweenFactor::shared_ptr operator()(const Measurement2 &edge) { // optional filter - if (maxNr && (edge.id1 >= maxNr || edge.id2 >= maxNr)) + if (maxKey && (edge.id1 >= maxKey || edge.id2 >= maxKey)) return nullptr; // Get pose and optionally add noise @@ -394,90 +391,32 @@ struct ProcessPose2 { BetweenFactorPose2s parse2DFactors(const string &filename, const noiseModel::Diagonal::shared_ptr &corruptingNoise, - Key maxNr) { - ProcessPose2 process; - process.maxNr = maxNr; - if (corruptingNoise) { - process.sampler = createSampler(corruptingNoise); - } + Key maxKey) { + ProcessPose2 process(maxKey, nullptr, + corruptingNoise ? createSampler(corruptingNoise) + : nullptr); + return parseToVector::shared_ptr>(filename, process); } /* ************************************************************************* */ -GraphAndValues load2D(const string &filename, SharedNoiseModel model, Key maxNr, - bool addNoise, bool smart, NoiseFormat noiseFormat, - KernelFunctionType kernelFunctionType) { - - Values::shared_ptr initial(new Values); - - const auto poses = parse2DPoses(filename, maxNr); - for (const auto &key_pose : poses) { - initial->insert(key_pose.first, key_pose.second); - } - const auto landmarks = parse2DLandmarks(filename, maxNr); - for (const auto &key_landmark : landmarks) { - initial->insert(key_landmark.first, key_landmark.second); - } - +static void parseOthers(const string &filename, Key maxKey, + NonlinearFactorGraph::shared_ptr graph, + Values::shared_ptr initial) { + // Do a pass to get other types of measurements ifstream is(filename.c_str()); if (!is) throw invalid_argument("load2D: can not find file " + filename); - NonlinearFactorGraph::shared_ptr graph(new NonlinearFactorGraph); - - // If asked, create a sampler with random number generator - boost::shared_ptr sampler; - if (addNoise) { - noiseModel::Diagonal::shared_ptr noise; - if (model) - noise = boost::dynamic_pointer_cast(model); - if (!noise) - throw invalid_argument( - "gtsam::load2D: invalid noise model for adding noise" - "(current version assumes diagonal noise model)!"); - sampler.reset(new Sampler(noise)); - } - - // Parse the pose constraints Key id1, id2; - bool haveLandmark = false; - const bool useModelInFile = !model; while (!is.eof()) { string tag; if (!(is >> tag)) break; - auto between_pose = parseEdge(is, tag); - if (between_pose) { - std::tie(id1, id2) = between_pose->first; - Pose2 l1Xl2 = between_pose->second; - - // read noise model - SharedNoiseModel modelInFile = - readNoiseModel(is, smart, noiseFormat, kernelFunctionType); - - // optional filter - if (maxNr && (id1 >= maxNr || id2 >= maxNr)) - continue; - - if (useModelInFile) - model = modelInFile; - - if (addNoise) - l1Xl2 = l1Xl2.retract(sampler->sample()); - - // Insert vertices if pure odometry file - if (!initial->exists(id1)) - initial->insert(id1, Pose2()); - if (!initial->exists(id2)) - initial->insert(id2, initial->at(id1) * l1Xl2); - - NonlinearFactor::shared_ptr factor( - new BetweenFactor(id1, id2, l1Xl2, model)); - graph->push_back(factor); - } // Parse measurements + bool haveLandmark = false; double bearing, range, bearing_std, range_std; // A bearing-range measurement @@ -485,8 +424,7 @@ GraphAndValues load2D(const string &filename, SharedNoiseModel model, Key maxNr, is >> id1 >> id2 >> bearing >> range >> bearing_std >> range_std; } - // A landmark measurement, TODO Frank says: don't know why is converted to - // bearing-range + // A landmark measurement, converted to bearing-range if (tag == "LANDMARK") { double lmx, lmy; double v1, v2, v3; @@ -521,7 +459,7 @@ GraphAndValues load2D(const string &filename, SharedNoiseModel model, Key maxNr, if (tag == "LANDMARK" || tag == "BR") { // optional filter - if (maxNr && id1 >= maxNr) + if (maxKey && id1 >= maxKey) continue; // Create noise model @@ -545,14 +483,53 @@ GraphAndValues load2D(const string &filename, SharedNoiseModel model, Key maxNr, } is.ignore(LINESIZE, '\n'); } +} + +/* ************************************************************************* */ +GraphAndValues load2D(const string &filename, SharedNoiseModel model, Key maxKey, + bool addNoise, bool smart, NoiseFormat noiseFormat, + KernelFunctionType kernelFunctionType) { + + Values::shared_ptr initial(new Values); + + const auto poses = parse2DPoses(filename, maxKey); + for (const auto &key_pose : poses) { + initial->insert(key_pose.first, key_pose.second); + } + const auto landmarks = parse2DLandmarks(filename, maxKey); + for (const auto &key_landmark : landmarks) { + initial->insert(key_landmark.first, key_landmark.second); + } + + // Parse the pose constraints + ProcessPose2 process(maxKey, model, addNoise ? createSampler(model) : nullptr, + smart, noiseFormat, kernelFunctionType); + auto factors = parseToVector::shared_ptr>( + filename, process); + + // Add factors into the graph and add poses if necessary + auto graph = boost::make_shared(); + for (const auto &f : factors) { + graph->push_back(f); + + // Insert vertices if pure odometry file + Key id1 = f->key1(), id2 = f->key2(); + if (!initial->exists(id1)) + initial->insert(id1, Pose2()); + if (!initial->exists(id2)) + initial->insert(id2, initial->at(id1) * f->measured()); + } + + // Do a pass to parse landmarks and bearing-range measurements + parseOthers(filename, maxKey, graph, initial); return make_pair(graph, initial); } /* ************************************************************************* */ GraphAndValues load2D_robust(const string &filename, - noiseModel::Base::shared_ptr &model, Key maxNr) { - return load2D(filename, model, maxNr); + noiseModel::Base::shared_ptr &model, Key maxKey) { + return load2D(filename, model, maxKey); } /* ************************************************************************* */ @@ -595,10 +572,10 @@ GraphAndValues readG2o(const string &g2oFile, const bool is3D, return load3D(g2oFile); } else { // just call load2D - Key maxNr = 0; + Key maxKey = 0; bool addNoise = false; bool smart = true; - return load2D(g2oFile, SharedNoiseModel(), maxNr, addNoise, smart, + return load2D(g2oFile, SharedNoiseModel(), maxKey, addNoise, smart, NoiseFormatG2O, kernelFunctionType); } } @@ -751,8 +728,8 @@ istream &operator>>(istream &is, boost::optional> &indexed) { return is; } -map parse3DPoses(const string &filename, Key maxNr) { - return parseIntoMap(filename, maxNr); +map parse3DPoses(const string &filename, Key maxKey) { + return parseIntoMap(filename, maxKey); } /* ************************************************************************* */ @@ -768,8 +745,8 @@ istream &operator>>(istream &is, boost::optional> &indexed) { return is; } -map parse3DLandmarks(const string &filename, Key maxNr) { - return parseIntoMap(filename, maxNr); +map parse3DLandmarks(const string &filename, Key maxKey) { + return parseIntoMap(filename, maxKey); } /* ************************************************************************* */ @@ -826,14 +803,14 @@ istream &operator>>(istream &is, boost::optional &edge) { // Small functor to process the edge into a BetweenFactor::shared_ptr struct ProcessPose3 { // The arguments - Key maxNr = 0; + Key maxKey = 0; SharedNoiseModel model; boost::shared_ptr sampler; // The actual function BetweenFactor::shared_ptr operator()(const Measurement3 &edge) { // optional filter - if (maxNr && (edge.id1 >= maxNr || edge.id2 >= maxNr)) + if (maxKey && (edge.id1 >= maxKey || edge.id2 >= maxKey)) return nullptr; // Get pose and optionally add noise @@ -851,9 +828,9 @@ struct ProcessPose3 { BetweenFactorPose3s parse3DFactors(const string &filename, const noiseModel::Diagonal::shared_ptr &corruptingNoise, - Key maxNr) { + Key maxKey) { ProcessPose3 process; - process.maxNr = maxNr; + process.maxKey = maxKey; if (corruptingNoise) { process.sampler = createSampler(corruptingNoise); } diff --git a/gtsam/slam/dataset.h b/gtsam/slam/dataset.h index 5b92800af..61eb9256e 100644 --- a/gtsam/slam/dataset.h +++ b/gtsam/slam/dataset.h @@ -109,15 +109,15 @@ using BetweenFactorPose2s = GTSAM_EXPORT BetweenFactorPose2s parse2DFactors( const std::string &filename, const noiseModel::Diagonal::shared_ptr &corruptingNoise = nullptr, - Key maxNr = 0); + Key maxKey = 0); /// Parse vertices in 2D g2o graph file into a map of Pose2s. GTSAM_EXPORT std::map parse2DPoses(const std::string &filename, - Key maxNr = 0); + Key maxKey = 0); /// Parse landmarks in 2D g2o graph file into a map of Point2s. GTSAM_EXPORT std::map parse2DLandmarks(const string &filename, - Key maxNr = 0); + Key maxKey = 0); /// Return type for load functions using GraphAndValues = @@ -126,12 +126,12 @@ using GraphAndValues = /** * Load TORO 2D Graph * @param dataset/model pair as constructed by [dataset] - * @param maxNr if non-zero cut out vertices >= maxNr + * @param maxKey if non-zero cut out vertices >= maxKey * @param addNoise add noise to the edges * @param smart try to reduce complexity of covariance to cheapest model */ GTSAM_EXPORT GraphAndValues load2D( - std::pair dataset, Key maxNr = 0, + std::pair dataset, Key maxKey = 0, bool addNoise = false, bool smart = true, // NoiseFormat noiseFormat = NoiseFormatAUTO, @@ -141,7 +141,7 @@ GTSAM_EXPORT GraphAndValues load2D( * Load TORO/G2O style graph files * @param filename * @param model optional noise model to use instead of one specified by file - * @param maxNr if non-zero cut out vertices >= maxNr + * @param maxKey if non-zero cut out vertices >= maxKey * @param addNoise add noise to the edges * @param smart try to reduce complexity of covariance to cheapest model * @param noiseFormat how noise parameters are stored @@ -149,13 +149,13 @@ GTSAM_EXPORT GraphAndValues load2D( * @return graph and initial values */ GTSAM_EXPORT GraphAndValues load2D(const std::string& filename, - SharedNoiseModel model = SharedNoiseModel(), Key maxNr = 0, bool addNoise = + SharedNoiseModel model = SharedNoiseModel(), Key maxKey = 0, bool addNoise = false, bool smart = true, NoiseFormat noiseFormat = NoiseFormatAUTO, // KernelFunctionType kernelFunctionType = KernelFunctionTypeNONE); /// @deprecated load2D now allows for arbitrary models and wrapping a robust kernel GTSAM_EXPORT GraphAndValues load2D_robust(const std::string& filename, - noiseModel::Base::shared_ptr& model, Key maxNr = 0); + noiseModel::Base::shared_ptr& model, Key maxKey = 0); /** save 2d graph */ GTSAM_EXPORT void save2D(const NonlinearFactorGraph& graph, @@ -188,15 +188,15 @@ using BetweenFactorPose3s = std::vector::shared_ptr> GTSAM_EXPORT BetweenFactorPose3s parse3DFactors( const std::string &filename, const noiseModel::Diagonal::shared_ptr &corruptingNoise = nullptr, - Key maxNr = 0); + Key maxKey = 0); /// Parse vertices in 3D TORO/g2o graph file into a map of Pose3s. GTSAM_EXPORT std::map parse3DPoses(const std::string &filename, - Key maxNr = 0); + Key maxKey = 0); /// Parse landmarks in 3D g2o graph file into a map of Point3s. GTSAM_EXPORT std::map parse3DLandmarks(const std::string &filename, - Key maxNr = 0); + Key maxKey = 0); /// Load TORO 3D Graph GTSAM_EXPORT GraphAndValues load3D(const std::string& filename); @@ -340,8 +340,8 @@ GTSAM_EXPORT Values initialCamerasAndPointsEstimate(const SfmData& db); * @param is input stream * @param tag string parsed from input stream, will only parse if vertex type */ -GTSAM_EXPORT boost::optional parseVertex(std::istream &is, - const std::string &tag) { +inline boost::optional parseVertex(std::istream &is, + const std::string &tag) { return parseVertexPose(is, tag); } #endif From e62d04ce2715f9fa913ce0979447334c3eb27855 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Thu, 13 Aug 2020 20:38:58 -0400 Subject: [PATCH 046/142] Templated parse methods --- gtsam/nonlinear/Values.h | 2 +- gtsam/sam/BearingRangeFactor.h | 15 +- gtsam/sfm/BinaryMeasurement.h | 93 ++-- gtsam/slam/dataset.cpp | 763 +++++++++++++++++-------------- gtsam/slam/dataset.h | 79 ++-- gtsam/slam/tests/testDataset.cpp | 26 +- timing/timeFrobeniusFactor.cpp | 2 +- 7 files changed, 530 insertions(+), 450 deletions(-) diff --git a/gtsam/nonlinear/Values.h b/gtsam/nonlinear/Values.h index aadbcf394..b49188b68 100644 --- a/gtsam/nonlinear/Values.h +++ b/gtsam/nonlinear/Values.h @@ -397,7 +397,7 @@ namespace gtsam { template size_t count() const { size_t i = 0; - for (const auto& key_value : *this) { + for (const auto key_value : *this) { if (dynamic_cast*>(&key_value.value)) ++i; } diff --git a/gtsam/sam/BearingRangeFactor.h b/gtsam/sam/BearingRangeFactor.h index af7b47446..b8e231915 100644 --- a/gtsam/sam/BearingRangeFactor.h +++ b/gtsam/sam/BearingRangeFactor.h @@ -42,12 +42,19 @@ class BearingRangeFactor public: typedef boost::shared_ptr shared_ptr; - /// default constructor + /// Default constructor BearingRangeFactor() {} - /// primary constructor - BearingRangeFactor(Key key1, Key key2, const B& measuredBearing, - const R& measuredRange, const SharedNoiseModel& model) + /// Construct from BearingRange instance + BearingRangeFactor(Key key1, Key key2, const T &bearingRange, + const SharedNoiseModel &model) + : Base({key1, key2}, model, T(bearingRange)) { + this->initialize(expression({key1, key2})); + } + + /// Construct from separate bearing and range + BearingRangeFactor(Key key1, Key key2, const B &measuredBearing, + const R &measuredRange, const SharedNoiseModel &model) : Base({key1, key2}, model, T(measuredBearing, measuredRange)) { this->initialize(expression({key1, key2})); } diff --git a/gtsam/sfm/BinaryMeasurement.h b/gtsam/sfm/BinaryMeasurement.h index c4b4a9804..c525c1b9e 100644 --- a/gtsam/sfm/BinaryMeasurement.h +++ b/gtsam/sfm/BinaryMeasurement.h @@ -15,78 +15,69 @@ * @file BinaryMeasurement.h * @author Akshay Krishnan * @date July 2020 - * @brief Binary measurement represents a measurement between two keys in a graph. - * A binary measurement is similar to a BetweenFactor, except that it does not contain - * an error function. It is a measurement (along with a noise model) from one key to - * another. Note that the direction is important. A measurement from key1 to key2 is not - * the same as the same measurement from key2 to key1. + * @brief Binary measurement represents a measurement between two keys in a + * graph. A binary measurement is similar to a BetweenFactor, except that it + * does not contain an error function. It is a measurement (along with a noise + * model) from one key to another. Note that the direction is important. A + * measurement from key1 to key2 is not the same as the same measurement from + * key2 to key1. */ -#include - #include -#include -#include - +#include #include #include +#include +#include + namespace gtsam { -template -class BinaryMeasurement { - // Check that VALUE type is testable - BOOST_CONCEPT_ASSERT((IsTestable)); - - public: - typedef VALUE T; +template class BinaryMeasurement : public Factor { + // Check that T type is testable + BOOST_CONCEPT_ASSERT((IsTestable)); +public: // shorthand for a smart pointer to a measurement - typedef typename boost::shared_ptr shared_ptr; + using shared_ptr = typename boost::shared_ptr; - private: - Key key1_, key2_; /** Keys */ +private: + T measured_; ///< The measurement + SharedNoiseModel noiseModel_; ///< Noise model - VALUE measured_; /** The measurement */ +public: + BinaryMeasurement(Key key1, Key key2, const T &measured, + const SharedNoiseModel &model = nullptr) + : Factor(std::vector({key1, key2})), measured_(measured), + noiseModel_(model) {} - SharedNoiseModel noiseModel_; /** Noise model */ - - public: - /** Constructor */ - BinaryMeasurement(Key key1, Key key2, const VALUE &measured, - const SharedNoiseModel &model = nullptr) : - key1_(key1), key2_(key2), measured_(measured), noiseModel_(model) { - } - - Key key1() const { return key1_; } - - Key key2() const { return key2_; } + /// @name Standard Interface + /// @{ + Key key1() const { return keys_[0]; } + Key key2() const { return keys_[1]; } + const T &measured() const { return measured_; } const SharedNoiseModel &noiseModel() const { return noiseModel_; } - /** implement functions needed for Testable */ + /// @} + /// @name Testable + /// @{ - /** print */ - void print(const std::string &s, const KeyFormatter &keyFormatter = DefaultKeyFormatter) const { - std::cout << s << "BinaryMeasurement(" - << keyFormatter(this->key1()) << "," + void print(const std::string &s, + const KeyFormatter &keyFormatter = DefaultKeyFormatter) const { + std::cout << s << "BinaryMeasurement(" << keyFormatter(this->key1()) << "," << keyFormatter(this->key2()) << ")\n"; traits::Print(measured_, " measured: "); this->noiseModel_->print(" noise model: "); } - /** equals */ bool equals(const BinaryMeasurement &expected, double tol = 1e-9) const { - const BinaryMeasurement *e = dynamic_cast *> (&expected); - return e != nullptr && key1_ == e->key1_ && - key2_ == e->key2_ - && traits::Equals(this->measured_, e->measured_, tol) && - noiseModel_->equals(*expected.noiseModel()); + const BinaryMeasurement *e = + dynamic_cast *>(&expected); + return e != nullptr && Factor::equals(*e) && + traits::Equals(this->measured_, e->measured_, tol) && + noiseModel_->equals(*expected.noiseModel()); } - - /** return the measured value */ - VALUE measured() const { - return measured_; - } -}; // \class BetweenMeasurement -} \ No newline at end of file + /// @} +}; +} // namespace gtsam \ No newline at end of file diff --git a/gtsam/slam/dataset.cpp b/gtsam/slam/dataset.cpp index af8904e49..11473d084 100644 --- a/gtsam/slam/dataset.cpp +++ b/gtsam/slam/dataset.cpp @@ -108,11 +108,94 @@ string createRewrittenFileName(const string &name) { } /* ************************************************************************* */ -GraphAndValues load2D(pair dataset, Key maxKey, - bool addNoise, bool smart, NoiseFormat noiseFormat, - KernelFunctionType kernelFunctionType) { - return load2D(dataset.first, dataset.second, maxKey, addNoise, smart, - noiseFormat, kernelFunctionType); +// Parse a file by calling the parse(is, tag) function for every line. +template +using Parser = + std::function(istream &is, const string &tag)>; + +template +static void parseLines(const string &filename, Parser parse) { + ifstream is(filename.c_str()); + if (!is) + throw invalid_argument("parse: can not find file " + filename); + while (!is.eof()) { + string tag; + is >> tag; + parse(is, tag); // ignore return value + is.ignore(LINESIZE, '\n'); + } +} + +/* ************************************************************************* */ +// Parse types T into a Key-indexed map +template +map parseToMap(const string &filename, Parser> parse, + Key maxKey) { + ifstream is(filename.c_str()); + if (!is) + throw invalid_argument("parse: can not find file " + filename); + + map result; + Parser> emplace = [&result, parse](istream &is, + const string &tag) { + if (auto t = parse(is, tag)) + result.emplace(*t); + return boost::none; + }; + parseLines(filename, emplace); + return result; +} + +/* ************************************************************************* */ +// Parse a file and push results on a vector +// TODO(frank): do we need this *and* parseMeasurements? +template +static vector parseToVector(const string &filename, Parser parse) { + vector result; + Parser add = [&result, parse](istream &is, const string &tag) { + if (auto t = parse(is, tag)) + result.push_back(*t); + return boost::none; + }; + parseLines(filename, add); + return result; +} + +/* ************************************************************************* */ +boost::optional parseVertexPose(istream &is, const string &tag) { + if ((tag == "VERTEX2") || (tag == "VERTEX_SE2") || (tag == "VERTEX")) { + Key id; + double x, y, yaw; + is >> id >> x >> y >> yaw; + return IndexedPose(id, Pose2(x, y, yaw)); + } else { + return boost::none; + } +} + +template <> +std::map parseVariables(const std::string &filename, + Key maxKey) { + return parseToMap(filename, parseVertexPose, maxKey); +} + +/* ************************************************************************* */ +boost::optional parseVertexLandmark(istream &is, + const string &tag) { + if (tag == "VERTEX_XY") { + Key id; + double x, y; + is >> id >> x >> y; + return IndexedLandmark(id, Point2(x, y)); + } else { + return boost::none; + } +} + +template <> +std::map parseVariables(const std::string &filename, + Key maxKey) { + return parseToMap(filename, parseVertexLandmark, maxKey); } /* ************************************************************************* */ @@ -199,107 +282,6 @@ createNoiseModel(const Vector6 v, bool smart, NoiseFormat noiseFormat, } } -/* ************************************************************************* */ -boost::optional parseVertexPose(istream &is, const string &tag) { - if ((tag == "VERTEX2") || (tag == "VERTEX_SE2") || (tag == "VERTEX")) { - Key id; - double x, y, yaw; - is >> id >> x >> y >> yaw; - return IndexedPose(id, Pose2(x, y, yaw)); - } else { - return boost::none; - } -} - -/* ************************************************************************* */ -boost::optional parseVertexLandmark(istream &is, - const string &tag) { - if (tag == "VERTEX_XY") { - Key id; - double x, y; - is >> id >> x >> y; - return IndexedLandmark(id, Point2(x, y)); - } else { - return boost::none; - } -} - -/* ************************************************************************* */ -// Parse types T into a Key-indexed map -template -static map parseIntoMap(const string &filename, Key maxKey = 0) { - ifstream is(filename.c_str()); - if (!is) - throw invalid_argument("parse: can not find file " + filename); - - map result; - string tag; - while (!is.eof()) { - boost::optional> indexed_t; - is >> indexed_t; - if (indexed_t) { - // optional filter - if (maxKey && indexed_t->first >= maxKey) - continue; - result.insert(*indexed_t); - } - is.ignore(LINESIZE, '\n'); - } - return result; -} - -/* ************************************************************************* */ -istream &operator>>(istream &is, boost::optional &indexed) { - string tag; - is >> tag; - indexed = parseVertexPose(is, tag); - return is; -} - -map parse2DPoses(const string &filename, Key maxKey) { - return parseIntoMap(filename, maxKey); -} - -/* ************************************************************************* */ -istream &operator>>(istream &is, boost::optional &indexed) { - string tag; - is >> tag; - indexed = parseVertexLandmark(is, tag); - return is; -} - -map parse2DLandmarks(const string &filename, Key maxKey) { - return parseIntoMap(filename, maxKey); -} - -/* ************************************************************************* */ -// Parse a file by first parsing the `Parsed` type and then processing it with -// the supplied `process` function. Result is put in a vector evaluates to true. -// Requires: a stream >> operator for boost::optional -template -static vector -parseToVector(const string &filename, - const std::function process) { - ifstream is(filename.c_str()); - if (!is) - throw invalid_argument("parse: can not find file " + filename); - - vector result; - string tag; - while (!is.eof()) { - boost::optional parsed; - is >> parsed; - if (parsed) { - Output factor = process(*parsed); // can return nullptr - if (factor) { - result.push_back(factor); - } - } - is.ignore(LINESIZE, '\n'); - } - return result; -} - /* ************************************************************************* */ boost::optional parseEdge(istream &is, const string &tag) { if ((tag == "EDGE2") || (tag == "EDGE") || (tag == "EDGE_SE2") || @@ -315,29 +297,74 @@ boost::optional parseEdge(istream &is, const string &tag) { } /* ************************************************************************* */ -struct Measurement2 { - Key id1, id2; - Pose2 pose; - Vector6 v; // 6 noise model parameters for edge -}; - -istream &operator>>(istream &is, boost::optional &edge) { - string tag; - is >> tag; - if ((tag == "EDGE2") || (tag == "EDGE") || (tag == "EDGE_SE2") || - (tag == "ODOMETRY")) { - Key id1, id2; - double x, y, yaw; - Vector6 v; - is >> id1 >> id2 >> x >> y >> yaw >> // - v(0) >> v(1) >> v(2) >> v(3) >> v(4) >> v(5); - edge.reset({id1, id2, Pose2(x, y, yaw), v}); - } - return is; -} +// Measurement parsers are implemented as a functor, as they has several options +// to filter and create the measurement model. +template struct ParseMeasurement; /* ************************************************************************* */ -// Create a sampler +// Converting from Measurement to BetweenFactor is generic +template struct ParseFactor : ParseMeasurement { + ParseFactor(const ParseMeasurement &parent) + : ParseMeasurement(parent) {} + + // We parse a measurement then convert + typename boost::optional::shared_ptr> + operator()(istream &is, const string &tag) { + if (auto m = ParseMeasurement::operator()(is, tag)) + return boost::make_shared>( + m->key1(), m->key2(), m->measured(), m->noiseModel()); + else + return boost::none; + } +}; + +/* ************************************************************************* */ +// Pose2 measurement parser +template <> struct ParseMeasurement { + // The arguments + boost::shared_ptr sampler; + Key maxKey; + + // For noise model creation + bool smart; + NoiseFormat noiseFormat; + KernelFunctionType kernelFunctionType; + + // If this is not null, will use instead of parsed model: + SharedNoiseModel model; + + // The actual parser + boost::optional> operator()(istream &is, + const string &tag) { + auto edge = parseEdge(is, tag); + if (!edge) + return boost::none; + + // parse noise model + Vector6 v; + is >> v(0) >> v(1) >> v(2) >> v(3) >> v(4) >> v(5); + + // optional filter + Key id1, id2; + tie(id1, id2) = edge->first; + if (maxKey && (id1 >= maxKey || id2 >= maxKey)) + return boost::none; + + // Get pose and optionally add noise + Pose2 &pose = edge->second; + if (sampler) + pose = pose.retract(sampler->sample()); + + // emplace measurement + auto modelFromFile = + createNoiseModel(v, smart, noiseFormat, kernelFunctionType); + return BinaryMeasurement(id1, id2, pose, + model ? model : modelFromFile); + } +}; + +/* ************************************************************************* */ +// Create a sampler to corrupt a measurement boost::shared_ptr createSampler(const SharedNoiseModel &model) { auto noise = boost::dynamic_pointer_cast(model); if (!noise) @@ -347,81 +374,75 @@ boost::shared_ptr createSampler(const SharedNoiseModel &model) { } /* ************************************************************************* */ -// Small functor to process the edge into a BetweenFactor::shared_ptr -struct ProcessPose2 { - // The arguments - Key maxKey; - SharedNoiseModel model; - boost::shared_ptr sampler; - - // Arguments for parsing noise model - bool smart; - NoiseFormat noiseFormat; - KernelFunctionType kernelFunctionType; - - ProcessPose2(Key maxKey = 0, SharedNoiseModel model = nullptr, - boost::shared_ptr sampler = nullptr, bool smart = true, - NoiseFormat noiseFormat = NoiseFormatAUTO, - KernelFunctionType kernelFunctionType = KernelFunctionTypeNONE) - : maxKey(maxKey), model(model), sampler(sampler), smart(smart), - noiseFormat(noiseFormat), kernelFunctionType(kernelFunctionType) {} - - // The actual function - BetweenFactor::shared_ptr operator()(const Measurement2 &edge) { - // optional filter - if (maxKey && (edge.id1 >= maxKey || edge.id2 >= maxKey)) - return nullptr; - - // Get pose and optionally add noise - Pose2 T12 = edge.pose; - if (sampler) - T12 = T12.retract(sampler->sample()); - - // Create factor - // If model is nullptr we use the model from the file - return boost::make_shared>( - edge.id1, edge.id2, T12, - model - ? model - : createNoiseModel(edge.v, smart, noiseFormat, kernelFunctionType)); - } -}; - -/* ************************************************************************* */ -BetweenFactorPose2s -parse2DFactors(const string &filename, - const noiseModel::Diagonal::shared_ptr &corruptingNoise, - Key maxKey) { - ProcessPose2 process(maxKey, nullptr, - corruptingNoise ? createSampler(corruptingNoise) - : nullptr); - - return parseToVector::shared_ptr>(filename, - process); +// Implementation of parseMeasurements for Pose2 +template <> +std::vector> +parseMeasurements(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, Key maxKey) { + ParseMeasurement parse{model ? createSampler(model) : nullptr, maxKey, + true, NoiseFormatAUTO, KernelFunctionTypeNONE}; + return parseToVector>(filename, parse); } /* ************************************************************************* */ -static void parseOthers(const string &filename, Key maxKey, - NonlinearFactorGraph::shared_ptr graph, - Values::shared_ptr initial) { - // Do a pass to get other types of measurements - ifstream is(filename.c_str()); - if (!is) - throw invalid_argument("load2D: can not find file " + filename); +// Implementation of parseMeasurements for Rot2, which converts from Pose2 - Key id1, id2; - while (!is.eof()) { - string tag; - if (!(is >> tag)) - break; +// Extract Rot2 measurement from Pose2 measurement +static BinaryMeasurement convert(const BinaryMeasurement &p) { + auto gaussian = + boost::dynamic_pointer_cast(p.noiseModel()); + if (!gaussian) + throw std::invalid_argument( + "parseMeasurements can only convert Pose2 measurements " + "with Gaussian noise models."); + const Matrix3 M = gaussian->covariance(); + return BinaryMeasurement(p.key1(), p.key2(), p.measured().rotation(), + noiseModel::Isotropic::Variance(1, M(2, 2))); +} - // Parse measurements - bool haveLandmark = false; +template <> +std::vector> +parseMeasurements(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, Key maxKey) { + auto poses = parseMeasurements(filename, model, maxKey); + std::vector> result; + result.reserve(poses.size()); + for (const auto &p : poses) + result.push_back(convert(p)); + return result; +} + +/* ************************************************************************* */ +// Implementation of parseFactors for Pose2 +template <> +std::vector::shared_ptr> +parseFactors(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, Key maxKey) { + ParseFactor parse({model ? createSampler(model) : nullptr, maxKey, + true, NoiseFormatAUTO, KernelFunctionTypeNONE, + nullptr}); + return parseToVector::shared_ptr>(filename, parse); +} + +/* ************************************************************************* */ +using BearingRange2D = BearingRange; + +template <> struct ParseMeasurement { + // arguments + Key maxKey; + + // The actual parser + boost::optional> + operator()(istream &is, const string &tag) { + if (tag != "BR" && tag != "LANDMARK") + return boost::none; + + Key id1, id2; + is >> id1 >> id2; double bearing, range, bearing_std, range_std; - // A bearing-range measurement if (tag == "BR") { - is >> id1 >> id2 >> bearing >> range >> bearing_std >> range_std; + is >> bearing >> range >> bearing_std >> range_std; } // A landmark measurement, converted to bearing-range @@ -429,7 +450,7 @@ static void parseOthers(const string &filename, Key maxKey, double lmx, lmy; double v1, v2, v3; - is >> id1 >> id2 >> lmx >> lmy >> v1 >> v2 >> v3; + is >> lmx >> lmy >> v1 >> v2 >> v3; // Convert x,y to bearing,range bearing = atan2(lmy, lmx); @@ -442,90 +463,93 @@ static void parseOthers(const string &filename, Key maxKey, bearing_std = sqrt(v1 / 10.0); range_std = sqrt(v1); } else { + // TODO(frank): we are ignoring the non-uniform covariance bearing_std = 1; range_std = 1; - if (!haveLandmark) { - cout << "Warning: load2D is a very simple dataset loader and is " - "ignoring the\n" - "non-uniform covariance on LANDMARK measurements in this " - "file." - << endl; - haveLandmark = true; - } } } - // Do some common stuff for bearing-range measurements - if (tag == "LANDMARK" || tag == "BR") { + // optional filter + if (maxKey && id1 >= maxKey) + return boost::none; - // optional filter - if (maxKey && id1 >= maxKey) - continue; + // Create noise model + auto measurementNoise = noiseModel::Diagonal::Sigmas( + (Vector(2) << bearing_std, range_std).finished()); - // Create noise model - noiseModel::Diagonal::shared_ptr measurementNoise = - noiseModel::Diagonal::Sigmas( - (Vector(2) << bearing_std, range_std).finished()); - - // Add to graph - *graph += BearingRangeFactor(id1, L(id2), bearing, range, - measurementNoise); - - // Insert poses or points if they do not exist yet - if (!initial->exists(id1)) - initial->insert(id1, Pose2()); - if (!initial->exists(L(id2))) { - Pose2 pose = initial->at(id1); - Point2 local(cos(bearing) * range, sin(bearing) * range); - Point2 global = pose.transformFrom(local); - initial->insert(L(id2), global); - } - } - is.ignore(LINESIZE, '\n'); + return BinaryMeasurement( + id1, L(id2), BearingRange2D(bearing, range), measurementNoise); } -} +}; /* ************************************************************************* */ -GraphAndValues load2D(const string &filename, SharedNoiseModel model, Key maxKey, - bool addNoise, bool smart, NoiseFormat noiseFormat, +GraphAndValues load2D(const string &filename, SharedNoiseModel model, + Key maxKey, bool addNoise, bool smart, + NoiseFormat noiseFormat, KernelFunctionType kernelFunctionType) { - Values::shared_ptr initial(new Values); + auto graph = boost::make_shared(); + auto initial = boost::make_shared(); - const auto poses = parse2DPoses(filename, maxKey); + // TODO(frank): do a single pass + const auto poses = parseVariables(filename, maxKey); for (const auto &key_pose : poses) { initial->insert(key_pose.first, key_pose.second); } - const auto landmarks = parse2DLandmarks(filename, maxKey); + + const auto landmarks = parseVariables(filename, maxKey); for (const auto &key_landmark : landmarks) { - initial->insert(key_landmark.first, key_landmark.second); + initial->insert(L(key_landmark.first), key_landmark.second); } - // Parse the pose constraints - ProcessPose2 process(maxKey, model, addNoise ? createSampler(model) : nullptr, - smart, noiseFormat, kernelFunctionType); - auto factors = parseToVector::shared_ptr>( - filename, process); + // Create parser for Pose2 and bearing/range + ParseFactor parseBetweenFactor( + {addNoise ? createSampler(model) : nullptr, maxKey, smart, noiseFormat, + kernelFunctionType, model}); + ParseMeasurement parseBearingRange{maxKey}; - // Add factors into the graph and add poses if necessary - auto graph = boost::make_shared(); - for (const auto &f : factors) { - graph->push_back(f); + Parser parse = [&](istream &is, const string &tag) { + if (auto f = parseBetweenFactor(is, tag)) { + graph->push_back(*f); - // Insert vertices if pure odometry file - Key id1 = f->key1(), id2 = f->key2(); - if (!initial->exists(id1)) - initial->insert(id1, Pose2()); - if (!initial->exists(id2)) - initial->insert(id2, initial->at(id1) * f->measured()); - } + // Insert vertices if pure odometry file + Key key1 = (*f)->key1(), key2 = (*f)->key2(); + if (!initial->exists(key1)) + initial->insert(key1, Pose2()); + if (!initial->exists(key2)) + initial->insert(key2, initial->at(key1) * (*f)->measured()); + } else if (auto m = parseBearingRange(is, tag)) { + Key key1 = m->key1(), key2 = m->key2(); + BearingRange2D br = m->measured(); + graph->emplace_shared>(key1, key2, br, + m->noiseModel()); - // Do a pass to parse landmarks and bearing-range measurements - parseOthers(filename, maxKey, graph, initial); + // Insert poses or points if they do not exist yet + if (!initial->exists(key1)) + initial->insert(key1, Pose2()); + if (!initial->exists(key2)) { + Pose2 pose = initial->at(key1); + Point2 local = br.bearing() * Point2(br.range(), 0); + Point2 global = pose.transformFrom(local); + initial->insert(key2, global); + } + } + return 0; + }; + + parseLines(filename, parse); return make_pair(graph, initial); } +/* ************************************************************************* */ +GraphAndValues load2D(pair dataset, Key maxKey, + bool addNoise, bool smart, NoiseFormat noiseFormat, + KernelFunctionType kernelFunctionType) { + return load2D(dataset.first, dataset.second, maxKey, addNoise, smart, + noiseFormat, kernelFunctionType); +} + /* ************************************************************************* */ GraphAndValues load2D_robust(const string &filename, noiseModel::Base::shared_ptr &model, Key maxKey) { @@ -540,7 +564,7 @@ void save2D(const NonlinearFactorGraph &graph, const Values &config, fstream stream(filename.c_str(), fstream::out); // save poses - for (const Values::ConstKeyValuePair &key_value : config) { + for (const Values::ConstKeyValuePair key_value : config) { const Pose2 &pose = key_value.value.cast(); stream << "VERTEX2 " << key_value.key << " " << pose.x() << " " << pose.y() << " " << pose.theta() << endl; @@ -586,7 +610,7 @@ void writeG2o(const NonlinearFactorGraph &graph, const Values &estimate, fstream stream(filename.c_str(), fstream::out); // save 2D poses - for (const auto &key_value : estimate) { + for (const auto key_value : estimate) { auto p = dynamic_cast *>(&key_value.value); if (!p) continue; @@ -596,7 +620,7 @@ void writeG2o(const NonlinearFactorGraph &graph, const Values &estimate, } // save 3D poses - for (const auto &key_value : estimate) { + for (const auto key_value : estimate) { auto p = dynamic_cast *>(&key_value.value); if (!p) continue; @@ -609,7 +633,7 @@ void writeG2o(const NonlinearFactorGraph &graph, const Values &estimate, } // save 2D landmarks - for (const auto &key_value : estimate) { + for (const auto key_value : estimate) { auto p = dynamic_cast *>(&key_value.value); if (!p) continue; @@ -619,7 +643,7 @@ void writeG2o(const NonlinearFactorGraph &graph, const Values &estimate, } // save 3D landmarks - for (const auto &key_value : estimate) { + for (const auto key_value : estimate) { auto p = dynamic_cast *>(&key_value.value); if (!p) continue; @@ -709,44 +733,46 @@ istream &operator>>(istream &is, Rot3 &R) { } /* ************************************************************************* */ -istream &operator>>(istream &is, boost::optional> &indexed) { - string tag; - is >> tag; +boost::optional> parseVertexPose3(istream &is, + const string &tag) { if (tag == "VERTEX3") { Key id; double x, y, z; Rot3 R; is >> id >> x >> y >> z >> R; - indexed.reset({id, Pose3(R, {x, y, z})}); + return make_pair(id, Pose3(R, {x, y, z})); } else if (tag == "VERTEX_SE3:QUAT") { Key id; double x, y, z; Quaternion q; is >> id >> x >> y >> z >> q; - indexed.reset({id, Pose3(q, {x, y, z})}); - } - return is; + return make_pair(id, Pose3(q, {x, y, z})); + } else + return boost::none; } -map parse3DPoses(const string &filename, Key maxKey) { - return parseIntoMap(filename, maxKey); +template <> +std::map parseVariables(const std::string &filename, + Key maxKey) { + return parseToMap(filename, parseVertexPose3, maxKey); } /* ************************************************************************* */ -istream &operator>>(istream &is, boost::optional> &indexed) { - string tag; - is >> tag; +boost::optional> parseVertexPoint3(istream &is, + const string &tag) { if (tag == "VERTEX_TRACKXYZ") { Key id; double x, y, z; is >> id >> x >> y >> z; - indexed.reset({id, Point3(x, y, z)}); - } - return is; + return make_pair(id, Point3(x, y, z)); + } else + return boost::none; } -map parse3DLandmarks(const string &filename, Key maxKey) { - return parseIntoMap(filename, maxKey); +template <> +std::map parseVariables(const std::string &filename, + Key maxKey) { + return parseToMap(filename, parseVertexPoint3, maxKey); } /* ************************************************************************* */ @@ -761,97 +787,126 @@ istream &operator>>(istream &is, Matrix6 &m) { } /* ************************************************************************* */ -struct Measurement3 { - Key id1, id2; - Pose3 pose; - SharedNoiseModel model; +// Pose3 measurement parser +template <> struct ParseMeasurement { + // The arguments + boost::shared_ptr sampler; + Key maxKey; + + // The actual parser + boost::optional> operator()(istream &is, + const string &tag) { + if (tag != "EDGE3" && tag != "EDGE_SE3:QUAT") + return boost::none; + + // parse ids and optionally filter + Key id1, id2; + is >> id1 >> id2; + if (maxKey && (id1 >= maxKey || id2 >= maxKey)) + return boost::none; + + Matrix6 m; + if (tag == "EDGE3") { + double x, y, z; + Rot3 R; + is >> x >> y >> z >> R >> m; + Pose3 T12(R, {x, y, z}); + // optionally add noise + if (sampler) + T12 = T12.retract(sampler->sample()); + + return BinaryMeasurement(id1, id2, T12, + noiseModel::Gaussian::Information(m)); + } else if (tag == "EDGE_SE3:QUAT") { + double x, y, z; + Quaternion q; + is >> x >> y >> z >> q >> m; + + Pose3 T12(q, {x, y, z}); + // optionally add noise + if (sampler) + T12 = T12.retract(sampler->sample()); + + // EDGE_SE3:QUAT stores covariance in t,R order, unlike GTSAM: + Matrix6 mgtsam; + mgtsam.block<3, 3>(0, 0) = m.block<3, 3>(3, 3); // cov rotation + mgtsam.block<3, 3>(3, 3) = m.block<3, 3>(0, 0); // cov translation + mgtsam.block<3, 3>(0, 3) = m.block<3, 3>(0, 3); // off diagonal + mgtsam.block<3, 3>(3, 0) = m.block<3, 3>(3, 0); // off diagonal + SharedNoiseModel model = noiseModel::Gaussian::Information(mgtsam); + + return BinaryMeasurement( + id1, id2, T12, noiseModel::Gaussian::Information(mgtsam)); + } else + return boost::none; + } }; -istream &operator>>(istream &is, boost::optional &edge) { - string tag; - is >> tag; - Matrix6 m; - if (tag == "EDGE3") { - Key id1, id2; - double x, y, z; - Rot3 R; - is >> id1 >> id2 >> x >> y >> z >> R >> m; - edge.reset( - {id1, id2, Pose3(R, {x, y, z}), noiseModel::Gaussian::Information(m)}); - } - if (tag == "EDGE_SE3:QUAT") { - Key id1, id2; - double x, y, z; - Quaternion q; - is >> id1 >> id2 >> x >> y >> z >> q >> m; - - // EDGE_SE3:QUAT stores covariance in t,R order, unlike GTSAM: - Matrix6 mgtsam; - mgtsam.block<3, 3>(0, 0) = m.block<3, 3>(3, 3); // cov rotation - mgtsam.block<3, 3>(3, 3) = m.block<3, 3>(0, 0); // cov translation - mgtsam.block<3, 3>(0, 3) = m.block<3, 3>(0, 3); // off diagonal - mgtsam.block<3, 3>(3, 0) = m.block<3, 3>(3, 0); // off diagonal - SharedNoiseModel model = noiseModel::Gaussian::Information(mgtsam); - - edge.reset({id1, id2, Pose3(q, {x, y, z}), - noiseModel::Gaussian::Information(mgtsam)}); - } - return is; +/* ************************************************************************* */ +// Implementation of parseMeasurements for Pose3 +template <> +std::vector> +parseMeasurements(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, Key maxKey) { + ParseMeasurement parse{model ? createSampler(model) : nullptr, maxKey}; + return parseToVector>(filename, parse); } /* ************************************************************************* */ -// Small functor to process the edge into a BetweenFactor::shared_ptr -struct ProcessPose3 { - // The arguments - Key maxKey = 0; - SharedNoiseModel model; - boost::shared_ptr sampler; +// Implementation of parseMeasurements for Rot3, which converts from Pose3 - // The actual function - BetweenFactor::shared_ptr operator()(const Measurement3 &edge) { - // optional filter - if (maxKey && (edge.id1 >= maxKey || edge.id2 >= maxKey)) - return nullptr; +// Extract Rot3 measurement from Pose3 measurement +static BinaryMeasurement convert(const BinaryMeasurement &p) { + auto gaussian = + boost::dynamic_pointer_cast(p.noiseModel()); + if (!gaussian) + throw std::invalid_argument( + "parseMeasurements can only convert Pose3 measurements " + "with Gaussian noise models."); + const Matrix6 M = gaussian->covariance(); + return BinaryMeasurement( + p.key1(), p.key2(), p.measured().rotation(), + noiseModel::Gaussian::Covariance(M.block<3, 3>(3, 3), true)); +} - // Get pose and optionally add noise - Pose3 T12 = edge.pose; - if (sampler) - T12 = T12.retract(sampler->sample()); - - // Create factor - return boost::make_shared>(edge.id1, edge.id2, T12, - edge.model); - } -}; +template <> +std::vector> +parseMeasurements(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, Key maxKey) { + auto poses = parseMeasurements(filename, model, maxKey); + std::vector> result; + result.reserve(poses.size()); + for (const auto &p : poses) + result.push_back(convert(p)); + return result; +} /* ************************************************************************* */ -BetweenFactorPose3s -parse3DFactors(const string &filename, - const noiseModel::Diagonal::shared_ptr &corruptingNoise, - Key maxKey) { - ProcessPose3 process; - process.maxKey = maxKey; - if (corruptingNoise) { - process.sampler = createSampler(corruptingNoise); - } - return parseToVector::shared_ptr>(filename, - process); +// Implementation of parseFactors for Pose3 +template <> +std::vector::shared_ptr> +parseFactors(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, Key maxKey) { + ParseFactor parse({model ? createSampler(model) : nullptr, maxKey}); + return parseToVector::shared_ptr>(filename, parse); } /* ************************************************************************* */ GraphAndValues load3D(const string &filename) { - const auto factors = parse3DFactors(filename); - NonlinearFactorGraph::shared_ptr graph(new NonlinearFactorGraph); + auto graph = boost::make_shared(); + auto initial = boost::make_shared(); + + // TODO(frank): do a single pass + const auto factors = parseFactors(filename); for (const auto &factor : factors) { graph->push_back(factor); } - Values::shared_ptr initial(new Values); - - const auto poses = parse3DPoses(filename); + const auto poses = parseVariables(filename); for (const auto &key_pose : poses) { initial->insert(key_pose.first, key_pose.second); } + const auto landmarks = parse3DLandmarks(filename); for (const auto &key_landmark : landmarks) { initial->insert(key_landmark.first, key_landmark.second); @@ -1200,4 +1255,20 @@ Values initialCamerasAndPointsEstimate(const SfmData &db) { return initial; } +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41 +std::map parse3DPoses(const std::string &filename, Key maxKey) { + return parseVariables(filename, maxKey); +} + +std::map parse3DLandmarks(const std::string &filename, + Key maxKey) { + return parseVariables(filename, maxKey); +} + +BetweenFactorPose3s +parse3DFactors(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, Key maxKey) { + return parseFactors(filename, model, maxKey); +} +#endif } // namespace gtsam diff --git a/gtsam/slam/dataset.h b/gtsam/slam/dataset.h index 61eb9256e..38ddfd1dc 100644 --- a/gtsam/slam/dataset.h +++ b/gtsam/slam/dataset.h @@ -20,6 +20,7 @@ #pragma once +#include #include #include #include @@ -72,6 +73,34 @@ enum KernelFunctionType { KernelFunctionTypeNONE, KernelFunctionTypeHUBER, KernelFunctionTypeTUKEY }; +/** + * Parse variables in 2D g2o graph file into a map. + * Instantiated in .cpp T equal to Pose2, Pose3 + */ +template +GTSAM_EXPORT std::map parseVariables(const std::string &filename, + Key maxKey = 0); + +/** + * Parse edges in 2D g2o graph file into a set of binary measuremnts. + * Instantiated in .cpp for T equal to Pose2, Pose3 + */ +template +GTSAM_EXPORT std::vector> +parseMeasurements(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model = nullptr, + Key maxKey = 0); + +/** + * Parse edges in 2D g2o graph file into a set of BetweenFactors. + * Instantiated in .cpp T equal to Pose2, Pose3 + */ +template +GTSAM_EXPORT std::vector::shared_ptr> +parseFactors(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model = nullptr, + Key maxKey = 0); + /// Return type for auxiliary functions typedef std::pair IndexedPose; typedef std::pair IndexedLandmark; @@ -90,7 +119,6 @@ GTSAM_EXPORT boost::optional parseVertexPose(std::istream& is, * @param is input stream * @param tag string parsed from input stream, will only parse if vertex type */ - GTSAM_EXPORT boost::optional parseVertexLandmark(std::istream& is, const std::string& tag); @@ -102,23 +130,6 @@ GTSAM_EXPORT boost::optional parseVertexLandmark(std::istream& GTSAM_EXPORT boost::optional parseEdge(std::istream& is, const std::string& tag); -using BetweenFactorPose2s = - std::vector::shared_ptr>; - -/// Parse edges in 2D g2o graph file into a set of BetweenFactors. -GTSAM_EXPORT BetweenFactorPose2s parse2DFactors( - const std::string &filename, - const noiseModel::Diagonal::shared_ptr &corruptingNoise = nullptr, - Key maxKey = 0); - -/// Parse vertices in 2D g2o graph file into a map of Pose2s. -GTSAM_EXPORT std::map parse2DPoses(const std::string &filename, - Key maxKey = 0); - -/// Parse landmarks in 2D g2o graph file into a map of Point2s. -GTSAM_EXPORT std::map parse2DLandmarks(const string &filename, - Key maxKey = 0); - /// Return type for load functions using GraphAndValues = std::pair; @@ -183,21 +194,6 @@ GTSAM_EXPORT GraphAndValues readG2o(const std::string& g2oFile, const bool is3D GTSAM_EXPORT void writeG2o(const NonlinearFactorGraph& graph, const Values& estimate, const std::string& filename); -/// Parse edges in 3D TORO graph file into a set of BetweenFactors. -using BetweenFactorPose3s = std::vector::shared_ptr>; -GTSAM_EXPORT BetweenFactorPose3s parse3DFactors( - const std::string &filename, - const noiseModel::Diagonal::shared_ptr &corruptingNoise = nullptr, - Key maxKey = 0); - -/// Parse vertices in 3D TORO/g2o graph file into a map of Pose3s. -GTSAM_EXPORT std::map parse3DPoses(const std::string &filename, - Key maxKey = 0); - -/// Parse landmarks in 3D g2o graph file into a map of Point3s. -GTSAM_EXPORT std::map parse3DLandmarks(const std::string &filename, - Key maxKey = 0); - /// Load TORO 3D Graph GTSAM_EXPORT GraphAndValues load3D(const std::string& filename); @@ -335,14 +331,21 @@ GTSAM_EXPORT Values initialCamerasEstimate(const SfmData& db); GTSAM_EXPORT Values initialCamerasAndPointsEstimate(const SfmData& db); #ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41 -/** - * Parse TORO/G2O vertex "id x y yaw" - * @param is input stream - * @param tag string parsed from input stream, will only parse if vertex type - */ inline boost::optional parseVertex(std::istream &is, const std::string &tag) { return parseVertexPose(is, tag); } + +GTSAM_EXPORT std::map parse3DPoses(const std::string &filename, + Key maxKey = 0); + +GTSAM_EXPORT std::map parse3DLandmarks(const std::string &filename, + Key maxKey = 0); + +using BetweenFactorPose3s = std::vector::shared_ptr>; +GTSAM_EXPORT BetweenFactorPose3s parse3DFactors( + const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model = nullptr, Key maxKey = 0); + #endif } // namespace gtsam diff --git a/gtsam/slam/tests/testDataset.cpp b/gtsam/slam/tests/testDataset.cpp index bc65dc351..864f42162 100644 --- a/gtsam/slam/tests/testDataset.cpp +++ b/gtsam/slam/tests/testDataset.cpp @@ -104,8 +104,16 @@ TEST(dataSet, load2D) { boost::dynamic_pointer_cast>(graph->at(0)); EXPECT(assert_equal(expected, *actual)); - // Check factor parsing - const auto actualFactors = parse2DFactors(filename); + // Check binary measurements, Pose2 + auto measurements = parseMeasurements(filename, nullptr, 5); + EXPECT_LONGS_EQUAL(4, measurements.size()); + + // Check binary measurements, Rot2 + auto measurements2 = parseMeasurements(filename); + EXPECT_LONGS_EQUAL(300, measurements2.size()); + + // // Check factor parsing + const auto actualFactors = parseFactors(filename); for (size_t i : {0, 1, 2, 3, 4, 5}) { EXPECT(assert_equal( *boost::dynamic_pointer_cast>(graph->at(i)), @@ -113,13 +121,13 @@ TEST(dataSet, load2D) { } // Check pose parsing - const auto actualPoses = parse2DPoses(filename); + const auto actualPoses = parseVariables(filename); for (size_t j : {0, 1, 2, 3, 4}) { EXPECT(assert_equal(initial->at(j), actualPoses.at(j), 1e-5)); } // Check landmark parsing - const auto actualLandmarks = parse2DLandmarks(filename); + const auto actualLandmarks = parseVariables(filename); EXPECT_LONGS_EQUAL(0, actualLandmarks.size()); } @@ -202,7 +210,7 @@ TEST(dataSet, readG2o3D) { } // Check factor parsing - const auto actualFactors = parse3DFactors(g2oFile); + const auto actualFactors = parseFactors(g2oFile); for (size_t i : {0, 1, 2, 3, 4, 5}) { EXPECT(assert_equal( *boost::dynamic_pointer_cast>(expectedGraph[i]), @@ -210,7 +218,7 @@ TEST(dataSet, readG2o3D) { } // Check pose parsing - const auto actualPoses = parse3DPoses(g2oFile); + const auto actualPoses = parseVariables(g2oFile); for (size_t j : {0, 1, 2, 3, 4}) { EXPECT(assert_equal(poses[j], actualPoses.at(j), 1e-5)); } @@ -277,7 +285,7 @@ TEST(dataSet, readG2oCheckDeterminants) { const string g2oFile = findExampleDataFile("toyExample.g2o"); // Check determinants in factors - auto factors = parse3DFactors(g2oFile); + auto factors = parseFactors(g2oFile); EXPECT_LONGS_EQUAL(6, factors.size()); for (const auto& factor : factors) { const Rot3 R = factor->measured().rotation(); @@ -285,7 +293,7 @@ TEST(dataSet, readG2oCheckDeterminants) { } // Check determinants in initial values - const map poses = parse3DPoses(g2oFile); + const map poses = parseVariables(g2oFile); EXPECT_LONGS_EQUAL(5, poses.size()); for (const auto& key_value : poses) { const Rot3 R = key_value.second.rotation(); @@ -300,7 +308,7 @@ TEST(dataSet, readG2oLandmarks) { const string g2oFile = findExampleDataFile("example_with_vertices.g2o"); // Check number of poses and landmarks. Should be 8 each. - const map poses = parse3DPoses(g2oFile); + const map poses = parseVariables(g2oFile); EXPECT_LONGS_EQUAL(8, poses.size()); const map landmarks = parse3DLandmarks(g2oFile); EXPECT_LONGS_EQUAL(8, landmarks.size()); diff --git a/timing/timeFrobeniusFactor.cpp b/timing/timeFrobeniusFactor.cpp index 8bdda968f..44bb084cc 100644 --- a/timing/timeFrobeniusFactor.cpp +++ b/timing/timeFrobeniusFactor.cpp @@ -58,7 +58,7 @@ int main(int argc, char* argv[]) { // Read G2O file const auto factors = parse3DFactors(g2oFile); - const auto poses = parse3DPoses(g2oFile); + const auto poses = parseVariables(g2oFile); // Build graph NonlinearFactorGraph graph; From de7b56a491fde5ec3ed1536b4b8270787e97b2a5 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Fri, 14 Aug 2020 22:02:17 -0400 Subject: [PATCH 047/142] Turn off gcc build as it times out every time --- .travis.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index 986e86cc2..d6c6cb156 100644 --- a/.travis.yml +++ b/.travis.yml @@ -112,11 +112,11 @@ jobs: compiler: gcc env: CMAKE_BUILD_TYPE=Release script: bash .travis.sh -t - - stage: test - os: linux - compiler: gcc - env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF - script: bash .travis.sh -t + # - stage: test + # os: linux + # compiler: gcc + # env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF + # script: bash .travis.sh -t - stage: test os: linux compiler: clang From 97537f2a36b85f62c46dc04bcd4ed23ec25c8b98 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sat, 15 Aug 2020 08:06:15 -0400 Subject: [PATCH 048/142] Avoid clang warnings about double-brace initialization --- gtsam/sam/BearingRangeFactor.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/gtsam/sam/BearingRangeFactor.h b/gtsam/sam/BearingRangeFactor.h index b8e231915..482dbb974 100644 --- a/gtsam/sam/BearingRangeFactor.h +++ b/gtsam/sam/BearingRangeFactor.h @@ -48,15 +48,15 @@ class BearingRangeFactor /// Construct from BearingRange instance BearingRangeFactor(Key key1, Key key2, const T &bearingRange, const SharedNoiseModel &model) - : Base({key1, key2}, model, T(bearingRange)) { - this->initialize(expression({key1, key2})); + : Base({{key1, key2}}, model, T(bearingRange)) { + this->initialize(expression({{key1, key2}})); } /// Construct from separate bearing and range BearingRangeFactor(Key key1, Key key2, const B &measuredBearing, const R &measuredRange, const SharedNoiseModel &model) - : Base({key1, key2}, model, T(measuredBearing, measuredRange)) { - this->initialize(expression({key1, key2})); + : Base({{key1, key2}}, model, T(measuredBearing, measuredRange)) { + this->initialize(expression({{key1, key2}})); } virtual ~BearingRangeFactor() {} From fc085626416b34c94379b9d76669a04746c87f56 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sat, 15 Aug 2020 08:06:37 -0400 Subject: [PATCH 049/142] Hunted down deprecated use of parse3DLandmarks --- gtsam/slam/dataset.cpp | 2 +- gtsam/slam/tests/testDataset.cpp | 6 +++--- timing/timeFrobeniusFactor.cpp | 12 ++++++------ 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/gtsam/slam/dataset.cpp b/gtsam/slam/dataset.cpp index 11473d084..2aac939d9 100644 --- a/gtsam/slam/dataset.cpp +++ b/gtsam/slam/dataset.cpp @@ -907,7 +907,7 @@ GraphAndValues load3D(const string &filename) { initial->insert(key_pose.first, key_pose.second); } - const auto landmarks = parse3DLandmarks(filename); + const auto landmarks = parseVariables(filename); for (const auto &key_landmark : landmarks) { initial->insert(key_landmark.first, key_landmark.second); } diff --git a/gtsam/slam/tests/testDataset.cpp b/gtsam/slam/tests/testDataset.cpp index 864f42162..3e804b6b0 100644 --- a/gtsam/slam/tests/testDataset.cpp +++ b/gtsam/slam/tests/testDataset.cpp @@ -224,7 +224,7 @@ TEST(dataSet, readG2o3D) { } // Check landmark parsing - const auto actualLandmarks = parse3DLandmarks(g2oFile); + const auto actualLandmarks = parseVariables(g2oFile); for (size_t j : {0, 1, 2, 3, 4}) { EXPECT(assert_equal(poses[j], actualPoses.at(j), 1e-5)); } @@ -299,7 +299,7 @@ TEST(dataSet, readG2oCheckDeterminants) { const Rot3 R = key_value.second.rotation(); EXPECT_DOUBLES_EQUAL(1.0, R.matrix().determinant(), 1e-9); } - const map landmarks = parse3DLandmarks(g2oFile); + const map landmarks = parseVariables(g2oFile); EXPECT_LONGS_EQUAL(0, landmarks.size()); } @@ -310,7 +310,7 @@ TEST(dataSet, readG2oLandmarks) { // Check number of poses and landmarks. Should be 8 each. const map poses = parseVariables(g2oFile); EXPECT_LONGS_EQUAL(8, poses.size()); - const map landmarks = parse3DLandmarks(g2oFile); + const map landmarks = parseVariables(g2oFile); EXPECT_LONGS_EQUAL(8, landmarks.size()); } diff --git a/timing/timeFrobeniusFactor.cpp b/timing/timeFrobeniusFactor.cpp index 44bb084cc..c0ac28ed9 100644 --- a/timing/timeFrobeniusFactor.cpp +++ b/timing/timeFrobeniusFactor.cpp @@ -57,7 +57,7 @@ int main(int argc, char* argv[]) { } // Read G2O file - const auto factors = parse3DFactors(g2oFile); + const auto measurements = parseMeasurements(g2oFile); const auto poses = parseVariables(g2oFile); // Build graph @@ -66,12 +66,12 @@ int main(int argc, char* argv[]) { auto priorModel = noiseModel::Isotropic::Sigma(6, 10000); graph.add(PriorFactor(0, SOn::identity(4), priorModel)); auto G = boost::make_shared(SOn::VectorizedGenerators(4)); - for (const auto& factor : factors) { - const auto& keys = factor->keys(); - const auto& Tij = factor->measured(); - const auto& model = factor->noiseModel(); + for (const auto &m : measurements) { + const auto &keys = m.keys(); + const Rot3 &Tij = m.measured(); + const auto &model = m.noiseModel(); graph.emplace_shared( - keys[0], keys[1], Rot3(Tij.rotation().matrix()), 4, model, G); + keys[0], keys[1], Tij, 4, model, G); } std::mt19937 rng(42); From d0724a77bb30f3c4092a33b4a36b65a2d4da46f0 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Wed, 29 Apr 2020 13:49:52 -0400 Subject: [PATCH 050/142] Backport GitHub Actions CI (#259) * Add GitHub Actions * Add Windows Check * Fix wrong os selection * Add XCode version * Upgrade GCC Version * Make GCC match Ubuntu name * Do not fail everyone when one failed * More checks on ccache * Do not expose GenericValue on Windows * Fix BetweenFactor for Windows * Update * Add Python CI * Typo * Add note about GENERICVALUE_VISIBILITY * Fix Windows Boost * Change substitution scheme to PowerShell * Fix the spurious error of the Point3 default constructor * Separate builds to allow easier restarts * Fix uninitialized variable usage * Change of policy, only build python stuff, mac and win on PRs * Further separate the Python tests --- .github/workflows/build-cython.yml | 96 +++++++++++++++++++++ .github/workflows/build-linux.yml | 71 +++++++++++++++ .github/workflows/build-macos.yml | 51 +++++++++++ .github/workflows/build-python.yml | 96 +++++++++++++++++++++ .github/workflows/build-windows.yml | 75 ++++++++++++++++ .travis.sh | 2 +- cython/gtsam/utils/visual_data_generator.py | 14 +-- gtsam/base/GenericValue.h | 8 ++ gtsam/slam/BetweenFactor.h | 8 ++ 9 files changed, 413 insertions(+), 8 deletions(-) create mode 100644 .github/workflows/build-cython.yml create mode 100644 .github/workflows/build-linux.yml create mode 100644 .github/workflows/build-macos.yml create mode 100644 .github/workflows/build-python.yml create mode 100644 .github/workflows/build-windows.yml diff --git a/.github/workflows/build-cython.yml b/.github/workflows/build-cython.yml new file mode 100644 index 000000000..c159a60fc --- /dev/null +++ b/.github/workflows/build-cython.yml @@ -0,0 +1,96 @@ +name: Cython CI + +on: [pull_request] + +jobs: + build: + name: ${{ matrix.name }} ${{ matrix.build_type }} ${{ matrix.python_version }} + runs-on: ${{ matrix.os }} + + env: + CTEST_OUTPUT_ON_FAILURE: ON + CTEST_PARALLEL_LEVEL: 2 + CMAKE_BUILD_TYPE: ${{ matrix.build_type }} + PYTHON_VERSION: ${{ matrix.python_version }} + WRAPPER: ${{ matrix.wrapper }} + strategy: + fail-fast: false + matrix: + # Github Actions requires a single row to be added to the build matrix. + # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. + name: [ + ubuntu-18.04-gcc-5, + ubuntu-18.04-gcc-9, + ubuntu-18.04-clang-9, + macOS-10.15-xcode-11.3.1, + ] + + build_type: [Debug, Release] + python_version: [3] + wrapper: [cython] + include: + - name: ubuntu-18.04-gcc-5 + os: ubuntu-18.04 + compiler: gcc + version: "5" + + - name: ubuntu-18.04-gcc-9 + os: ubuntu-18.04 + compiler: gcc + version: "9" + + - name: ubuntu-18.04-clang-9 + os: ubuntu-18.04 + compiler: clang + version: "9" + + - name: macOS-10.15-xcode-11.3.1 + os: macOS-10.15 + compiler: xcode + version: "11.3.1" + + steps: + - name: Checkout + uses: actions/checkout@master + - name: Install (Linux) + if: runner.os == 'Linux' + run: | + # LLVM 9 is not in Bionic's repositories so we add the official LLVM repository. + if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then + sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" + fi + sudo apt-get -y update + sudo apt-get -y upgrade + + sudo apt install cmake g++-9 clang-9 build-essential pkg-config libpython-dev python-numpy libboost-all-dev + + if [ "${{ matrix.compiler }}" = "gcc" ]; then + sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib + echo "::set-env name=CC::gcc-${{ matrix.version }}" + echo "::set-env name=CXX::g++-${{ matrix.version }}" + else + sudo apt-get install -y clang-${{ matrix.version }} g++-multilib + echo "::set-env name=CC::clang-${{ matrix.version }}" + echo "::set-env name=CXX::clang++-${{ matrix.version }}" + fi + - name: Install (macOS) + if: runner.os == 'macOS' + run: | + brew install cmake ninja boost + if [ "${{ matrix.compiler }}" = "gcc" ]; then + brew install gcc@${{ matrix.version }} + echo "::set-env name=CC::gcc-${{ matrix.version }}" + echo "::set-env name=CXX::g++-${{ matrix.version }}" + else + sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app + echo "::set-env name=CC::clang" + echo "::set-env name=CXX::clang++" + fi + - name: Build (Linux) + if: runner.os == 'Linux' + run: | + bash .travis.python.sh + - name: Build (macOS) + if: runner.os == 'macOS' + run: | + bash .travis.python.sh \ No newline at end of file diff --git a/.github/workflows/build-linux.yml b/.github/workflows/build-linux.yml new file mode 100644 index 000000000..f58fe5f37 --- /dev/null +++ b/.github/workflows/build-linux.yml @@ -0,0 +1,71 @@ +name: CI Linux + +on: [push, pull_request] + +jobs: + build: + name: ${{ matrix.name }} ${{ matrix.build_type }} ${{ matrix.build_unstable }} + runs-on: ${{ matrix.os }} + + env: + CTEST_OUTPUT_ON_FAILURE: ON + CTEST_PARALLEL_LEVEL: 2 + CMAKE_BUILD_TYPE: ${{ matrix.build_type }} + GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }} + strategy: + fail-fast: false + matrix: + # Github Actions requires a single row to be added to the build matrix. + # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. + name: [ + ubuntu-18.04-gcc-5, + ubuntu-18.04-gcc-9, + ubuntu-18.04-clang-9, + ] + + build_type: [Debug, Release] + build_unstable: [ON, OFF] + include: + - name: ubuntu-18.04-gcc-5 + os: ubuntu-18.04 + compiler: gcc + version: "5" + + - name: ubuntu-18.04-gcc-9 + os: ubuntu-18.04 + compiler: gcc + version: "9" + + - name: ubuntu-18.04-clang-9 + os: ubuntu-18.04 + compiler: clang + version: "9" + + steps: + - name: Checkout + uses: actions/checkout@master + - name: Install (Linux) + if: runner.os == 'Linux' + run: | + # LLVM 9 is not in Bionic's repositories so we add the official LLVM repository. + if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then + sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" + fi + sudo apt-get -y update + sudo apt-get -y upgrade + + sudo apt install cmake g++-9 clang-9 build-essential pkg-config libpython-dev python-numpy libboost-all-dev + + if [ "${{ matrix.compiler }}" = "gcc" ]; then + sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib + echo "::set-env name=CC::gcc-${{ matrix.version }}" + echo "::set-env name=CXX::g++-${{ matrix.version }}" + else + sudo apt-get install -y clang-${{ matrix.version }} g++-multilib + echo "::set-env name=CC::clang-${{ matrix.version }}" + echo "::set-env name=CXX::clang++-${{ matrix.version }}" + fi + - name: Build (Linux) + if: runner.os == 'Linux' + run: | + bash .travis.sh -t \ No newline at end of file diff --git a/.github/workflows/build-macos.yml b/.github/workflows/build-macos.yml new file mode 100644 index 000000000..2ab2e63c5 --- /dev/null +++ b/.github/workflows/build-macos.yml @@ -0,0 +1,51 @@ +name: CI macOS + +on: [pull_request] + +jobs: + build: + name: ${{ matrix.name }} ${{ matrix.build_type }} ${{ matrix.build_unstable }} + runs-on: ${{ matrix.os }} + + env: + CTEST_OUTPUT_ON_FAILURE: ON + CTEST_PARALLEL_LEVEL: 2 + CMAKE_BUILD_TYPE: ${{ matrix.build_type }} + GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }} + strategy: + fail-fast: false + matrix: + # Github Actions requires a single row to be added to the build matrix. + # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. + name: [ + macOS-10.15-xcode-11.3.1, + ] + + build_type: [Debug, Release] + build_unstable: [ON, OFF] + include: + - name: macOS-10.15-xcode-11.3.1 + os: macOS-10.15 + compiler: xcode + version: "11.3.1" + + steps: + - name: Checkout + uses: actions/checkout@master + - name: Install (macOS) + if: runner.os == 'macOS' + run: | + brew install cmake ninja boost + if [ "${{ matrix.compiler }}" = "gcc" ]; then + brew install gcc@${{ matrix.version }} + echo "::set-env name=CC::gcc-${{ matrix.version }}" + echo "::set-env name=CXX::g++-${{ matrix.version }}" + else + sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app + echo "::set-env name=CC::clang" + echo "::set-env name=CXX::clang++" + fi + - name: Build (macOS) + if: runner.os == 'macOS' + run: | + bash .travis.sh -t \ No newline at end of file diff --git a/.github/workflows/build-python.yml b/.github/workflows/build-python.yml new file mode 100644 index 000000000..2dc98f05d --- /dev/null +++ b/.github/workflows/build-python.yml @@ -0,0 +1,96 @@ +name: Python CI + +on: [pull_request] + +jobs: + build: + name: ${{ matrix.name }} ${{ matrix.build_type }} ${{ matrix.python_version }} + runs-on: ${{ matrix.os }} + + env: + CTEST_OUTPUT_ON_FAILURE: ON + CTEST_PARALLEL_LEVEL: 2 + CMAKE_BUILD_TYPE: ${{ matrix.build_type }} + PYTHON_VERSION: ${{ matrix.python_version }} + WRAPPER: ${{ matrix.wrapper }} + strategy: + fail-fast: false + matrix: + # Github Actions requires a single row to be added to the build matrix. + # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. + name: [ + ubuntu-18.04-gcc-5, + ubuntu-18.04-gcc-9, + ubuntu-18.04-clang-9, + macOS-10.15-xcode-11.3.1, + ] + + build_type: [Debug, Release] + python_version: [3] + wrapper: [pybind] + include: + - name: ubuntu-18.04-gcc-5 + os: ubuntu-18.04 + compiler: gcc + version: "5" + + - name: ubuntu-18.04-gcc-9 + os: ubuntu-18.04 + compiler: gcc + version: "9" + + - name: ubuntu-18.04-clang-9 + os: ubuntu-18.04 + compiler: clang + version: "9" + + - name: macOS-10.15-xcode-11.3.1 + os: macOS-10.15 + compiler: xcode + version: "11.3.1" + + steps: + - name: Checkout + uses: actions/checkout@master + - name: Install (Linux) + if: runner.os == 'Linux' + run: | + # LLVM 9 is not in Bionic's repositories so we add the official LLVM repository. + if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then + sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" + fi + sudo apt-get -y update + sudo apt-get -y upgrade + + sudo apt install cmake g++-9 clang-9 build-essential pkg-config libpython-dev python-numpy libboost-all-dev + + if [ "${{ matrix.compiler }}" = "gcc" ]; then + sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib + echo "::set-env name=CC::gcc-${{ matrix.version }}" + echo "::set-env name=CXX::g++-${{ matrix.version }}" + else + sudo apt-get install -y clang-${{ matrix.version }} g++-multilib + echo "::set-env name=CC::clang-${{ matrix.version }}" + echo "::set-env name=CXX::clang++-${{ matrix.version }}" + fi + - name: Install (macOS) + if: runner.os == 'macOS' + run: | + brew install cmake ninja boost + if [ "${{ matrix.compiler }}" = "gcc" ]; then + brew install gcc@${{ matrix.version }} + echo "::set-env name=CC::gcc-${{ matrix.version }}" + echo "::set-env name=CXX::g++-${{ matrix.version }}" + else + sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app + echo "::set-env name=CC::clang" + echo "::set-env name=CXX::clang++" + fi + - name: Build (Linux) + if: runner.os == 'Linux' + run: | + bash .travis.python.sh + - name: Build (macOS) + if: runner.os == 'macOS' + run: | + bash .travis.python.sh \ No newline at end of file diff --git a/.github/workflows/build-windows.yml b/.github/workflows/build-windows.yml new file mode 100644 index 000000000..516e67e1d --- /dev/null +++ b/.github/workflows/build-windows.yml @@ -0,0 +1,75 @@ +name: CI Windows + +on: [pull_request] + +jobs: + build: + name: ${{ matrix.name }} ${{ matrix.build_type }} ${{ matrix.build_unstable }} + runs-on: ${{ matrix.os }} + + env: + CTEST_OUTPUT_ON_FAILURE: ON + CTEST_PARALLEL_LEVEL: 2 + CMAKE_BUILD_TYPE: ${{ matrix.build_type }} + GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }} + strategy: + fail-fast: false + matrix: + # Github Actions requires a single row to be added to the build matrix. + # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. + name: [ + windows-2016-cl, + windows-2019-cl, + ] + + build_type: [Debug, Release] + build_unstable: [ON, OFF] + include: + - name: windows-2016-cl + os: windows-2016 + compiler: cl + + - name: windows-2019-cl + os: windows-2019 + compiler: cl + + steps: + - name: Checkout + uses: actions/checkout@master + - name: Install (Windows) + if: runner.os == 'Windows' + shell: powershell + run: | + Invoke-Expression (New-Object System.Net.WebClient).DownloadString('https://get.scoop.sh') + scoop install ninja --global + if ("${{ matrix.compiler }}".StartsWith("clang")) { + scoop install llvm --global + } + if ("${{ matrix.compiler }}" -eq "gcc") { + # Chocolatey GCC is broken on the windows-2019 image. + # See: https://github.com/DaanDeMeyer/doctest/runs/231595515 + # See: https://github.community/t5/GitHub-Actions/Something-is-wrong-with-the-chocolatey-installed-version-of-gcc/td-p/32413 + scoop install gcc --global + echo "::set-env name=CC::gcc" + echo "::set-env name=CXX::g++" + } elseif ("${{ matrix.compiler }}" -eq "clang") { + echo "::set-env name=CC::clang" + echo "::set-env name=CXX::clang++" + } else { + echo "::set-env name=CC::${{ matrix.compiler }}" + echo "::set-env name=CXX::${{ matrix.compiler }}" + } + # Scoop modifies the PATH so we make the modified PATH global. + echo "::set-env name=PATH::$env:PATH" + - name: Build (Windows) + if: runner.os == 'Windows' + run: | + cmake -E remove_directory build + echo "BOOST_ROOT_1_72_0: ${env:BOOST_ROOT_1_72_0}" + cmake -B build -S . -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DBOOST_ROOT="${env:BOOST_ROOT_1_72_0}" -DBOOST_INCLUDEDIR="${env:BOOST_ROOT_1_72_0}\boost\include" -DBOOST_LIBRARYDIR="${env:BOOST_ROOT_1_72_0}\lib" + cmake --build build --config ${{ matrix.build_type }} --target gtsam + cmake --build build --config ${{ matrix.build_type }} --target gtsam_unstable + cmake --build build --config ${{ matrix.build_type }} --target wrap + cmake --build build --config ${{ matrix.build_type }} --target check.base + cmake --build build --config ${{ matrix.build_type }} --target check.base_unstable + cmake --build build --config ${{ matrix.build_type }} --target check.linear \ No newline at end of file diff --git a/.travis.sh b/.travis.sh index 7777e9919..35b964222 100755 --- a/.travis.sh +++ b/.travis.sh @@ -71,7 +71,7 @@ function configure() function finish () { # Print ccache stats - ccache -s + [ -x "$(command -v ccache)" ] && ccache -s cd $SOURCE_DIR } diff --git a/cython/gtsam/utils/visual_data_generator.py b/cython/gtsam/utils/visual_data_generator.py index f04588e70..5ce72fe68 100644 --- a/cython/gtsam/utils/visual_data_generator.py +++ b/cython/gtsam/utils/visual_data_generator.py @@ -30,8 +30,8 @@ class GroundTruth: def __init__(self, K=Cal3_S2(), nrCameras=3, nrPoints=4): self.K = K - self.cameras = [Pose3()] * nrCameras - self.points = [Point3()] * nrPoints + self.cameras = [gtsam.Pose3()] * nrCameras + self.points = [gtsam.Point3(0, 0, 0)] * nrPoints def print_(self, s=""): print(s) @@ -99,11 +99,11 @@ def generate_data(options): r = 40 for i in range(options.nrCameras): theta = i * 2 * np.pi / options.nrCameras - t = Point3(r * np.cos(theta), r * np.sin(theta), height) - truth.cameras[i] = PinholeCameraCal3_S2.Lookat(t, - Point3(), - Point3(0, 0, 1), - truth.K) + t = gtsam.Point3(r * np.cos(theta), r * np.sin(theta), height) + truth.cameras[i] = gtsam.SimpleCamera.Lookat(t, + gtsam.Point3(0, 0, 0), + gtsam.Point3(0, 0, 1), + truth.K) # Create measurements for j in range(nrPoints): # All landmarks seen in every frame diff --git a/gtsam/base/GenericValue.h b/gtsam/base/GenericValue.h index dc205b47f..0e928765f 100644 --- a/gtsam/base/GenericValue.h +++ b/gtsam/base/GenericValue.h @@ -30,6 +30,14 @@ #include #include // operator typeid +#ifdef _WIN32 +#define GENERICVALUE_VISIBILITY +#else +// This will trigger a LNKxxxx on MSVC, so disable for MSVC build +// Please refer to https://github.com/borglab/gtsam/blob/develop/Using-GTSAM-EXPORT.md +#define GENERICVALUE_VISIBILITY GTSAM_EXPORT +#endif + namespace gtsam { /** diff --git a/gtsam/slam/BetweenFactor.h b/gtsam/slam/BetweenFactor.h index 9afc2f72b..a594e95d5 100644 --- a/gtsam/slam/BetweenFactor.h +++ b/gtsam/slam/BetweenFactor.h @@ -21,6 +21,14 @@ #include #include +#ifdef _WIN32 +#define BETWEENFACTOR_VISIBILITY +#else +// This will trigger a LNKxxxx on MSVC, so disable for MSVC build +// Please refer to https://github.com/borglab/gtsam/blob/develop/Using-GTSAM-EXPORT.md +#define BETWEENFACTOR_VISIBILITY GTSAM_EXPORT +#endif + namespace gtsam { /** From 10754080fc34220503ae66f23bf3268a84c35322 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Fri, 14 Aug 2020 10:03:40 -0400 Subject: [PATCH 051/142] Remove travis and appveyor --- .travis.yml | 140 --------------------------------------------------- appveyor.yml | 33 ------------ 2 files changed, 173 deletions(-) delete mode 100644 .travis.yml delete mode 100644 appveyor.yml diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 986e86cc2..000000000 --- a/.travis.yml +++ /dev/null @@ -1,140 +0,0 @@ -language: cpp -cache: ccache -dist: xenial - -addons: - apt: - sources: - - ubuntu-toolchain-r-test - - sourceline: 'deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-9 main' - key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key' - packages: - - g++-9 - - clang-9 - - build-essential pkg-config - - cmake - - python3-dev libpython-dev - - python3-numpy - - libboost-all-dev - -# before_install: - # - if [ "$TRAVIS_OS_NAME" == "osx" ]; then brew update; fi - -install: - - if [ "$TRAVIS_OS_NAME" == "osx" ]; then HOMEBREW_NO_AUTO_UPDATE=1 brew install ccache ; fi - - if [ "$TRAVIS_OS_NAME" == "osx" ]; then export PATH="/usr/local/opt/ccache/libexec:$PATH" ; fi - -# We first do the compile stage specified below, then the matrix expansion specified after. -stages: - - compile - - test - - special - -env: - global: - - MAKEFLAGS="-j3" - - CCACHE_SLOPPINESS=pch_defines,time_macros - -# Compile stage without building examples/tests to populate the caches. -jobs: -# -------- STAGE 1: COMPILE ----------- - include: -# on Mac, GCC - - stage: compile - os: osx - compiler: gcc - env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF - script: bash .travis.sh -b - - stage: compile - os: osx - compiler: gcc - env: CMAKE_BUILD_TYPE=Release - script: bash .travis.sh -b -# on Mac, CLANG - - stage: compile - os: osx - compiler: clang - env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF - script: bash .travis.sh -b - - stage: compile - os: osx - compiler: clang - env: CMAKE_BUILD_TYPE=Release - script: bash .travis.sh -b -# on Linux, GCC - - stage: compile - os: linux - compiler: gcc - env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF - script: bash .travis.sh -b - - stage: compile - os: linux - compiler: gcc - env: CMAKE_BUILD_TYPE=Release - script: bash .travis.sh -b -# on Linux, CLANG - - stage: compile - os: linux - compiler: clang - env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF - script: bash .travis.sh -b - - stage: compile - os: linux - compiler: clang - env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Release - script: bash .travis.sh -b -# on Linux, with deprecated ON to make sure that path still compiles/tests - - stage: special - os: linux - compiler: clang - env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF GTSAM_ALLOW_DEPRECATED_SINCE_V41=ON - script: bash .travis.sh -b -# on Linux, with GTSAM_WITH_TBB on to make sure GTSAM still compiles/tests - # - stage: special - # os: linux - # compiler: gcc - # env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF GTSAM_WITH_TBB=ON - # script: bash .travis.sh -t -# -------- STAGE 2: TESTS ----------- -# on Mac, GCC - - stage: test - os: osx - compiler: clang - env: CMAKE_BUILD_TYPE=Release - script: bash .travis.sh -t - - stage: test - os: osx - compiler: clang - env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF - script: bash .travis.sh -t - - stage: test - os: linux - compiler: gcc - env: CMAKE_BUILD_TYPE=Release - script: bash .travis.sh -t - - stage: test - os: linux - compiler: gcc - env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF - script: bash .travis.sh -t - - stage: test - os: linux - compiler: clang - env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Release - script: bash .travis.sh -t -# on Linux, with quaternions ON to make sure that path still compiles/tests - - stage: special - os: linux - compiler: clang - env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Release GTSAM_BUILD_UNSTABLE=OFF GTSAM_USE_QUATERNIONS=ON - script: bash .travis.sh -t - - stage: special - os: linux - compiler: gcc - env: PYTHON_VERSION=3 - script: bash .travis.python.sh - - stage: special - os: osx - compiler: clang - env: PYTHON_VERSION=3 - script: bash .travis.python.sh diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index 3747354cf..000000000 --- a/appveyor.yml +++ /dev/null @@ -1,33 +0,0 @@ -# version format -version: 4.0.3-{branch}-build{build} - -os: Visual Studio 2019 - -clone_folder: c:\projects\gtsam - -platform: x64 -configuration: Release - -environment: - CTEST_OUTPUT_ON_FAILURE: 1 - BOOST_ROOT: C:/Libraries/boost_1_71_0 - -build_script: - - cd c:\projects\gtsam\build - # As of Dec 2019, not all unit tests build cleanly for MSVC, so we'll just - # check that parts of GTSAM build correctly: - #- cmake --build . - - cmake --build . --config Release --target gtsam - - cmake --build . --config Release --target gtsam_unstable - - cmake --build . --config Release --target wrap - #- cmake --build . --target check - - cmake --build . --config Release --target check.base - - cmake --build . --config Release --target check.base_unstable - - cmake --build . --config Release --target check.linear - -before_build: - - cd c:\projects\gtsam - - mkdir build - - cd build - # Disable examples to avoid AppVeyor timeout - - cmake -G "Visual Studio 16 2019" .. -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF From d43d8b7c6959ddd3e4232110a7beb4acbaedfc9e Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Fri, 14 Aug 2020 10:03:56 -0400 Subject: [PATCH 052/142] Limit python triggering --- .github/workflows/trigger-python.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/trigger-python.yml b/.github/workflows/trigger-python.yml index 8fad9e7ca..94527e732 100644 --- a/.github/workflows/trigger-python.yml +++ b/.github/workflows/trigger-python.yml @@ -1,6 +1,9 @@ # This triggers Cython builds on `gtsam-manylinux-build` name: Trigger Python Builds -on: push +on: + push: + branches: + - develop jobs: triggerCython: runs-on: ubuntu-latest From 96169665b0644c64bd62e88cb3f35bdb74709934 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Fri, 14 Aug 2020 10:06:00 -0400 Subject: [PATCH 053/142] Remove pybind build --- .github/workflows/build-python.yml | 96 ------------------------------ 1 file changed, 96 deletions(-) delete mode 100644 .github/workflows/build-python.yml diff --git a/.github/workflows/build-python.yml b/.github/workflows/build-python.yml deleted file mode 100644 index 2dc98f05d..000000000 --- a/.github/workflows/build-python.yml +++ /dev/null @@ -1,96 +0,0 @@ -name: Python CI - -on: [pull_request] - -jobs: - build: - name: ${{ matrix.name }} ${{ matrix.build_type }} ${{ matrix.python_version }} - runs-on: ${{ matrix.os }} - - env: - CTEST_OUTPUT_ON_FAILURE: ON - CTEST_PARALLEL_LEVEL: 2 - CMAKE_BUILD_TYPE: ${{ matrix.build_type }} - PYTHON_VERSION: ${{ matrix.python_version }} - WRAPPER: ${{ matrix.wrapper }} - strategy: - fail-fast: false - matrix: - # Github Actions requires a single row to be added to the build matrix. - # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. - name: [ - ubuntu-18.04-gcc-5, - ubuntu-18.04-gcc-9, - ubuntu-18.04-clang-9, - macOS-10.15-xcode-11.3.1, - ] - - build_type: [Debug, Release] - python_version: [3] - wrapper: [pybind] - include: - - name: ubuntu-18.04-gcc-5 - os: ubuntu-18.04 - compiler: gcc - version: "5" - - - name: ubuntu-18.04-gcc-9 - os: ubuntu-18.04 - compiler: gcc - version: "9" - - - name: ubuntu-18.04-clang-9 - os: ubuntu-18.04 - compiler: clang - version: "9" - - - name: macOS-10.15-xcode-11.3.1 - os: macOS-10.15 - compiler: xcode - version: "11.3.1" - - steps: - - name: Checkout - uses: actions/checkout@master - - name: Install (Linux) - if: runner.os == 'Linux' - run: | - # LLVM 9 is not in Bionic's repositories so we add the official LLVM repository. - if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then - sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" - fi - sudo apt-get -y update - sudo apt-get -y upgrade - - sudo apt install cmake g++-9 clang-9 build-essential pkg-config libpython-dev python-numpy libboost-all-dev - - if [ "${{ matrix.compiler }}" = "gcc" ]; then - sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib - echo "::set-env name=CC::gcc-${{ matrix.version }}" - echo "::set-env name=CXX::g++-${{ matrix.version }}" - else - sudo apt-get install -y clang-${{ matrix.version }} g++-multilib - echo "::set-env name=CC::clang-${{ matrix.version }}" - echo "::set-env name=CXX::clang++-${{ matrix.version }}" - fi - - name: Install (macOS) - if: runner.os == 'macOS' - run: | - brew install cmake ninja boost - if [ "${{ matrix.compiler }}" = "gcc" ]; then - brew install gcc@${{ matrix.version }} - echo "::set-env name=CC::gcc-${{ matrix.version }}" - echo "::set-env name=CXX::g++-${{ matrix.version }}" - else - sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app - echo "::set-env name=CC::clang" - echo "::set-env name=CXX::clang++" - fi - - name: Build (Linux) - if: runner.os == 'Linux' - run: | - bash .travis.python.sh - - name: Build (macOS) - if: runner.os == 'macOS' - run: | - bash .travis.python.sh \ No newline at end of file From a5b722d237c58c2354c081de169b7345b2a48b09 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Fri, 14 Aug 2020 10:48:17 -0400 Subject: [PATCH 054/142] Correct .travis.sh --- .travis.python.sh | 60 ++++++++++++++++++++++++++++++++++++++--------- .travis.sh | 16 ++++++------- 2 files changed, 57 insertions(+), 19 deletions(-) diff --git a/.travis.python.sh b/.travis.python.sh index 99506e749..7ccf56dd1 100644 --- a/.travis.python.sh +++ b/.travis.python.sh @@ -6,6 +6,11 @@ if [ -z ${PYTHON_VERSION+x} ]; then exit 127 fi +if [ -z ${WRAPPER+x} ]; then + echo "Please provide the wrapper to build!" + exit 126 +fi + PYTHON="python${PYTHON_VERSION}" if [[ $(uname) == "Darwin" ]]; then @@ -15,9 +20,29 @@ else sudo apt-get install wget libicu-dev python3-pip python3-setuptools fi -CURRDIR=$(pwd) +PATH=$PATH:$($PYTHON -c "import site; print(site.USER_BASE)")/bin -sudo $PYTHON -m pip install -r ./cython/requirements.txt +case $WRAPPER in +"cython") + BUILD_CYTHON="ON" + BUILD_PYBIND="OFF" + TYPEDEF_POINTS_TO_VECTORS="OFF" + + $PYTHON -m pip install --user -r ./cython/requirements.txt + ;; +"pybind") + BUILD_CYTHON="OFF" + BUILD_PYBIND="ON" + TYPEDEF_POINTS_TO_VECTORS="ON" + + $PYTHON -m pip install --user -r ./wrap/python/requirements.txt + ;; +*) + exit 126 + ;; +esac + +CURRDIR=$(pwd) mkdir $CURRDIR/build cd $CURRDIR/build @@ -27,17 +52,30 @@ cmake $CURRDIR -DCMAKE_BUILD_TYPE=Release \ -DGTSAM_USE_QUATERNIONS=OFF \ -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \ -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \ - -DGTSAM_INSTALL_CYTHON_TOOLBOX=ON \ + -DGTSAM_INSTALL_CYTHON_TOOLBOX=${BUILD_CYTHON} \ + -DGTSAM_BUILD_PYTHON=${BUILD_PYBIND} \ + -DGTSAM_TYPEDEF_POINTS_TO_VECTORS=${TYPEDEF_POINTS_TO_VECTORS} \ -DGTSAM_PYTHON_VERSION=$PYTHON_VERSION \ - -DGTSAM_ALLOW_DEPRECATED_SINCE_V41=OFF \ + -DGTSAM_ALLOW_DEPRECATED_SINCE_V4=OFF \ + -DPYTHON_EXECUTABLE:FILEPATH=$(which $PYTHON) \ -DCMAKE_INSTALL_PREFIX=$CURRDIR/../gtsam_install make -j$(nproc) install -cd cython - -sudo $PYTHON setup.py install - -cd $CURRDIR/cython/gtsam/tests - -$PYTHON -m unittest discover \ No newline at end of file +case $WRAPPER in +"cython") + cd $CURRDIR/../gtsam_install/cython + $PYTHON setup.py install --user --prefix= + cd $CURRDIR/cython/gtsam/tests + $PYTHON -m unittest discover + ;; +"pybind") + $PYTHON setup.py install --user --prefix= + cd $CURRDIR/wrap/python/gtsam_py/tests + $PYTHON -m unittest discover + ;; +*) + echo "THIS SHOULD NEVER HAPPEN!" + exit 125 + ;; +esac diff --git a/.travis.sh b/.travis.sh index 35b964222..422f68065 100755 --- a/.travis.sh +++ b/.travis.sh @@ -4,17 +4,17 @@ function install_tbb() { TBB_BASEURL=https://github.com/oneapi-src/oneTBB/releases/download - TBB_VERSION=4.4.2 - TBB_DIR=tbb44_20151115oss + TBB_VERSION=4.4.5 + TBB_DIR=tbb44_20160526oss TBB_SAVEPATH="/tmp/tbb.tgz" - if [ "$TRAVIS_OS_NAME" == "linux" ]; then + if [ "$(uname)" == "Linux" ]; then OS_SHORT="lin" TBB_LIB_DIR="intel64/gcc4.4" SUDO="sudo" - elif [ "$TRAVIS_OS_NAME" == "osx" ]; then - OS_SHORT="lin" + elif [ "$(uname)" == "Darwin" ]; then + OS_SHORT="osx" TBB_LIB_DIR="" SUDO="" @@ -46,7 +46,7 @@ function configure() rm -fr $BUILD_DIR || true mkdir $BUILD_DIR && cd $BUILD_DIR - install_tbb + [ "${GTSAM_WITH_TBB:-OFF}" = "ON" ] && install_tbb if [ ! -z "$GCC_VERSION" ]; then export CC=gcc-$GCC_VERSION @@ -61,9 +61,9 @@ function configure() -DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \ -DGTSAM_USE_QUATERNIONS=${GTSAM_USE_QUATERNIONS:-OFF} \ -DGTSAM_BUILD_EXAMPLES_ALWAYS=${GTSAM_BUILD_EXAMPLES_ALWAYS:-ON} \ - -DGTSAM_ALLOW_DEPRECATED_SINCE_V4=${GTSAM_ALLOW_DEPRECATED_SINCE_V41:-OFF} \ + -DGTSAM_ALLOW_DEPRECATED_SINCE_V4=${GTSAM_ALLOW_DEPRECATED_SINCE_V4:-OFF} \ -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \ - -DCMAKE_VERBOSE_MAKEFILE=OFF + -DCMAKE_VERBOSE_MAKEFILE=ON } From bb0de4f63b3aac40dd419f0c39c1b48cbf9ecc10 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Sat, 15 Aug 2020 11:51:56 -0400 Subject: [PATCH 055/142] switch to boost 1.69.0 --- .github/workflows/build-linux.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build-linux.yml b/.github/workflows/build-linux.yml index f58fe5f37..eab879e2e 100644 --- a/.github/workflows/build-linux.yml +++ b/.github/workflows/build-linux.yml @@ -54,7 +54,7 @@ jobs: sudo apt-get -y update sudo apt-get -y upgrade - sudo apt install cmake g++-9 clang-9 build-essential pkg-config libpython-dev python-numpy libboost-all-dev + sudo apt install cmake g++-9 clang-9 build-essential pkg-config libpython-dev python-numpy if [ "${{ matrix.compiler }}" = "gcc" ]; then sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib @@ -64,6 +64,8 @@ jobs: sudo apt-get install -y clang-${{ matrix.version }} g++-multilib echo "::set-env name=CC::clang-${{ matrix.version }}" echo "::set-env name=CXX::clang++-${{ matrix.version }}" + echo "::set-env name=BOOST_ROOT:$BOOST_ROOT_1_69_0" + echo "BOOST_ROOT env set to $BOOST_ROOT" fi - name: Build (Linux) if: runner.os == 'Linux' From b3caa6de8363605dee4caf34dc7b54a392161b12 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Sat, 15 Aug 2020 12:06:07 -0400 Subject: [PATCH 056/142] typo --- .github/workflows/build-linux.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-linux.yml b/.github/workflows/build-linux.yml index eab879e2e..fbf7d7cd1 100644 --- a/.github/workflows/build-linux.yml +++ b/.github/workflows/build-linux.yml @@ -64,7 +64,7 @@ jobs: sudo apt-get install -y clang-${{ matrix.version }} g++-multilib echo "::set-env name=CC::clang-${{ matrix.version }}" echo "::set-env name=CXX::clang++-${{ matrix.version }}" - echo "::set-env name=BOOST_ROOT:$BOOST_ROOT_1_69_0" + echo "::set-env name=BOOST_ROOT::$BOOST_ROOT_1_69_0" echo "BOOST_ROOT env set to $BOOST_ROOT" fi - name: Build (Linux) From 288195ba169456d9f5e89525f2554475b3cde231 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Sat, 15 Aug 2020 12:54:31 -0400 Subject: [PATCH 057/142] Add a display for current boost version --- .github/workflows/build-linux.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-linux.yml b/.github/workflows/build-linux.yml index fbf7d7cd1..837d0b174 100644 --- a/.github/workflows/build-linux.yml +++ b/.github/workflows/build-linux.yml @@ -56,6 +56,8 @@ jobs: sudo apt install cmake g++-9 clang-9 build-essential pkg-config libpython-dev python-numpy + echo "::set-env name=BOOST_ROOT::$(echo $BOOST_ROOT_1_69_0)" + if [ "${{ matrix.compiler }}" = "gcc" ]; then sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib echo "::set-env name=CC::gcc-${{ matrix.version }}" @@ -64,9 +66,11 @@ jobs: sudo apt-get install -y clang-${{ matrix.version }} g++-multilib echo "::set-env name=CC::clang-${{ matrix.version }}" echo "::set-env name=CXX::clang++-${{ matrix.version }}" - echo "::set-env name=BOOST_ROOT::$BOOST_ROOT_1_69_0" - echo "BOOST_ROOT env set to $BOOST_ROOT" fi + - name: Check Boost version + if: runner.os == 'Linux' + run: | + echo "BOOST_ROOT = $BOOST_ROOT" - name: Build (Linux) if: runner.os == 'Linux' run: | From b9f4f28e1aec4b4e1659785def4cbb7d7e931f86 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Sat, 15 Aug 2020 13:05:53 -0400 Subject: [PATCH 058/142] Manually specify BOOST_ROOT --- .travis.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.sh b/.travis.sh index 422f68065..c2b4e0590 100755 --- a/.travis.sh +++ b/.travis.sh @@ -64,6 +64,7 @@ function configure() -DGTSAM_ALLOW_DEPRECATED_SINCE_V4=${GTSAM_ALLOW_DEPRECATED_SINCE_V4:-OFF} \ -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \ -DCMAKE_VERBOSE_MAKEFILE=ON + -DBOOST_ROOT=$BOOST_ROOT } From b1e3b5495ce5c969d76f9363b6bb0828b9f250c1 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sat, 15 Aug 2020 13:05:58 -0400 Subject: [PATCH 059/142] Some behavior changes. - maxIndex now does what it says - id = size_t, Key is only for Values and Graph) - re-admitted methods needed for wrapper. --- examples/Data/example_with_vertices.g2o | 32 +-- gtsam/slam/dataset.cpp | 268 +++++++++++++----------- gtsam/slam/dataset.h | 71 ++++--- gtsam/slam/tests/testDataset.cpp | 46 ++-- tests/testNonlinearISAM.cpp | 2 +- 5 files changed, 229 insertions(+), 190 deletions(-) diff --git a/examples/Data/example_with_vertices.g2o b/examples/Data/example_with_vertices.g2o index 6c2f1a530..ca7cd86df 100644 --- a/examples/Data/example_with_vertices.g2o +++ b/examples/Data/example_with_vertices.g2o @@ -1,16 +1,16 @@ -VERTEX_SE3:QUAT 8646911284551352320 40 -1.15443e-13 10 0.557345 0.557345 -0.435162 -0.435162 -VERTEX_SE3:QUAT 8646911284551352321 28.2843 28.2843 10 0.301633 0.728207 -0.568567 -0.235508 -VERTEX_SE3:QUAT 8646911284551352322 -1.6986e-08 40 10 -3.89609e-10 0.788205 -0.615412 -2.07622e-10 -VERTEX_SE3:QUAT 8646911284551352323 -28.2843 28.2843 10 -0.301633 0.728207 -0.568567 0.235508 -VERTEX_SE3:QUAT 8646911284551352324 -40 -2.32554e-10 10 -0.557345 0.557345 -0.435162 0.435162 -VERTEX_SE3:QUAT 8646911284551352325 -28.2843 -28.2843 10 -0.728207 0.301633 -0.235508 0.568567 -VERTEX_SE3:QUAT 8646911284551352326 -2.53531e-09 -40 10 -0.788205 -1.25891e-11 -3.82742e-13 0.615412 -VERTEX_SE3:QUAT 8646911284551352327 28.2843 -28.2843 10 -0.728207 -0.301633 0.235508 0.568567 -VERTEX_TRACKXYZ 7782220156096217088 10 10 10 -VERTEX_TRACKXYZ 7782220156096217089 -10 10 10 -VERTEX_TRACKXYZ 7782220156096217090 -10 -10 10 -VERTEX_TRACKXYZ 7782220156096217091 10 -10 10 -VERTEX_TRACKXYZ 7782220156096217092 10 10 -10 -VERTEX_TRACKXYZ 7782220156096217093 -10 10 -10 -VERTEX_TRACKXYZ 7782220156096217094 -10 -10 -10 -VERTEX_TRACKXYZ 7782220156096217095 10 -10 -10 +VERTEX_SE3:QUAT 0 40 -1.15443e-13 10 0.557345 0.557345 -0.435162 -0.435162 +VERTEX_SE3:QUAT 1 28.2843 28.2843 10 0.301633 0.728207 -0.568567 -0.235508 +VERTEX_SE3:QUAT 2 -1.6986e-08 40 10 -3.89609e-10 0.788205 -0.615412 -2.07622e-10 +VERTEX_SE3:QUAT 3 -28.2843 28.2843 10 -0.301633 0.728207 -0.568567 0.235508 +VERTEX_SE3:QUAT 4 -40 -2.32554e-10 10 -0.557345 0.557345 -0.435162 0.435162 +VERTEX_SE3:QUAT 5 -28.2843 -28.2843 10 -0.728207 0.301633 -0.235508 0.568567 +VERTEX_SE3:QUAT 6 -2.53531e-09 -40 10 -0.788205 -1.25891e-11 -3.82742e-13 0.615412 +VERTEX_SE3:QUAT 7 28.2843 -28.2843 10 -0.728207 -0.301633 0.235508 0.568567 +VERTEX_TRACKXYZ 0 10 10 10 +VERTEX_TRACKXYZ 1 -10 10 10 +VERTEX_TRACKXYZ 2 -10 -10 10 +VERTEX_TRACKXYZ 3 10 -10 10 +VERTEX_TRACKXYZ 4 10 10 -10 +VERTEX_TRACKXYZ 5 -10 10 -10 +VERTEX_TRACKXYZ 6 -10 -10 -10 +VERTEX_TRACKXYZ 7 10 -10 -10 diff --git a/gtsam/slam/dataset.cpp b/gtsam/slam/dataset.cpp index 2aac939d9..9cc814245 100644 --- a/gtsam/slam/dataset.cpp +++ b/gtsam/slam/dataset.cpp @@ -53,7 +53,9 @@ using namespace std; namespace fs = boost::filesystem; -using namespace gtsam::symbol_shorthand; +using gtsam::symbol_shorthand::L; +using gtsam::symbol_shorthand::P; +using gtsam::symbol_shorthand::X; #define LINESIZE 81920 @@ -127,19 +129,16 @@ static void parseLines(const string &filename, Parser parse) { } /* ************************************************************************* */ -// Parse types T into a Key-indexed map +// Parse types T into a size_t-indexed map template -map parseToMap(const string &filename, Parser> parse, - Key maxKey) { - ifstream is(filename.c_str()); - if (!is) - throw invalid_argument("parse: can not find file " + filename); - - map result; - Parser> emplace = [&result, parse](istream &is, - const string &tag) { - if (auto t = parse(is, tag)) - result.emplace(*t); +map parseToMap(const string &filename, Parser> parse, + size_t maxIndex) { + map result; + Parser> emplace = [&](istream &is, const string &tag) { + if (auto t = parse(is, tag)) { + if (!maxIndex || t->first <= maxIndex) + result.emplace(*t); + } return boost::none; }; parseLines(filename, emplace); @@ -148,7 +147,6 @@ map parseToMap(const string &filename, Parser> parse, /* ************************************************************************* */ // Parse a file and push results on a vector -// TODO(frank): do we need this *and* parseMeasurements? template static vector parseToVector(const string &filename, Parser parse) { vector result; @@ -164,7 +162,7 @@ static vector parseToVector(const string &filename, Parser parse) { /* ************************************************************************* */ boost::optional parseVertexPose(istream &is, const string &tag) { if ((tag == "VERTEX2") || (tag == "VERTEX_SE2") || (tag == "VERTEX")) { - Key id; + size_t id; double x, y, yaw; is >> id >> x >> y >> yaw; return IndexedPose(id, Pose2(x, y, yaw)); @@ -174,16 +172,16 @@ boost::optional parseVertexPose(istream &is, const string &tag) { } template <> -std::map parseVariables(const std::string &filename, - Key maxKey) { - return parseToMap(filename, parseVertexPose, maxKey); +std::map parseVariables(const std::string &filename, + size_t maxIndex) { + return parseToMap(filename, parseVertexPose, maxIndex); } /* ************************************************************************* */ boost::optional parseVertexLandmark(istream &is, const string &tag) { if (tag == "VERTEX_XY") { - Key id; + size_t id; double x, y; is >> id >> x >> y; return IndexedLandmark(id, Point2(x, y)); @@ -193,9 +191,9 @@ boost::optional parseVertexLandmark(istream &is, } template <> -std::map parseVariables(const std::string &filename, - Key maxKey) { - return parseToMap(filename, parseVertexLandmark, maxKey); +std::map parseVariables(const std::string &filename, + size_t maxIndex) { + return parseToMap(filename, parseVertexLandmark, maxIndex); } /* ************************************************************************* */ @@ -287,10 +285,10 @@ boost::optional parseEdge(istream &is, const string &tag) { if ((tag == "EDGE2") || (tag == "EDGE") || (tag == "EDGE_SE2") || (tag == "ODOMETRY")) { - Key id1, id2; + size_t id1, id2; double x, y, yaw; is >> id1 >> id2 >> x >> y >> yaw; - return IndexedEdge(pair(id1, id2), Pose2(x, y, yaw)); + return IndexedEdge({id1, id2}, Pose2(x, y, yaw)); } else { return boost::none; } @@ -323,7 +321,7 @@ template struct ParseFactor : ParseMeasurement { template <> struct ParseMeasurement { // The arguments boost::shared_ptr sampler; - Key maxKey; + size_t maxIndex; // For noise model creation bool smart; @@ -345,9 +343,9 @@ template <> struct ParseMeasurement { is >> v(0) >> v(1) >> v(2) >> v(3) >> v(4) >> v(5); // optional filter - Key id1, id2; + size_t id1, id2; tie(id1, id2) = edge->first; - if (maxKey && (id1 >= maxKey || id2 >= maxKey)) + if (maxIndex && (id1 > maxIndex || id2 > maxIndex)) return boost::none; // Get pose and optionally add noise @@ -378,9 +376,11 @@ boost::shared_ptr createSampler(const SharedNoiseModel &model) { template <> std::vector> parseMeasurements(const std::string &filename, - const noiseModel::Diagonal::shared_ptr &model, Key maxKey) { - ParseMeasurement parse{model ? createSampler(model) : nullptr, maxKey, - true, NoiseFormatAUTO, KernelFunctionTypeNONE}; + const noiseModel::Diagonal::shared_ptr &model, + size_t maxIndex) { + ParseMeasurement parse{model ? createSampler(model) : nullptr, + maxIndex, true, NoiseFormatAUTO, + KernelFunctionTypeNONE}; return parseToVector>(filename, parse); } @@ -403,8 +403,9 @@ static BinaryMeasurement convert(const BinaryMeasurement &p) { template <> std::vector> parseMeasurements(const std::string &filename, - const noiseModel::Diagonal::shared_ptr &model, Key maxKey) { - auto poses = parseMeasurements(filename, model, maxKey); + const noiseModel::Diagonal::shared_ptr &model, + size_t maxIndex) { + auto poses = parseMeasurements(filename, model, maxIndex); std::vector> result; result.reserve(poses.size()); for (const auto &p : poses) @@ -417,8 +418,9 @@ parseMeasurements(const std::string &filename, template <> std::vector::shared_ptr> parseFactors(const std::string &filename, - const noiseModel::Diagonal::shared_ptr &model, Key maxKey) { - ParseFactor parse({model ? createSampler(model) : nullptr, maxKey, + const noiseModel::Diagonal::shared_ptr &model, + size_t maxIndex) { + ParseFactor parse({model ? createSampler(model) : nullptr, maxIndex, true, NoiseFormatAUTO, KernelFunctionTypeNONE, nullptr}); return parseToVector::shared_ptr>(filename, parse); @@ -429,7 +431,7 @@ using BearingRange2D = BearingRange; template <> struct ParseMeasurement { // arguments - Key maxKey; + size_t maxIndex; // The actual parser boost::optional> @@ -437,7 +439,7 @@ template <> struct ParseMeasurement { if (tag != "BR" && tag != "LANDMARK") return boost::none; - Key id1, id2; + size_t id1, id2; is >> id1 >> id2; double bearing, range, bearing_std, range_std; @@ -470,7 +472,7 @@ template <> struct ParseMeasurement { } // optional filter - if (maxKey && id1 >= maxKey) + if (maxIndex && id1 > maxIndex) return boost::none; // Create noise model @@ -484,30 +486,37 @@ template <> struct ParseMeasurement { /* ************************************************************************* */ GraphAndValues load2D(const string &filename, SharedNoiseModel model, - Key maxKey, bool addNoise, bool smart, + size_t maxIndex, bool addNoise, bool smart, NoiseFormat noiseFormat, KernelFunctionType kernelFunctionType) { - auto graph = boost::make_shared(); + // Single pass for poses and landmarks. auto initial = boost::make_shared(); + Parser insert = [maxIndex, &initial](istream &is, const string &tag) { + if (auto indexedPose = parseVertexPose(is, tag)) { + if (!maxIndex || indexedPose->first <= maxIndex) + initial->insert(indexedPose->first, indexedPose->second); + } else if (auto indexedLandmark = parseVertexLandmark(is, tag)) { + if (!maxIndex || indexedLandmark->first <= maxIndex) + initial->insert(L(indexedLandmark->first), indexedLandmark->second); + } + return 0; + }; + parseLines(filename, insert); - // TODO(frank): do a single pass - const auto poses = parseVariables(filename, maxKey); - for (const auto &key_pose : poses) { - initial->insert(key_pose.first, key_pose.second); - } + // Single pass for Pose2 and bearing-range factors. + auto graph = boost::make_shared(); - const auto landmarks = parseVariables(filename, maxKey); - for (const auto &key_landmark : landmarks) { - initial->insert(L(key_landmark.first), key_landmark.second); - } - - // Create parser for Pose2 and bearing/range + // Instantiate factor parser ParseFactor parseBetweenFactor( - {addNoise ? createSampler(model) : nullptr, maxKey, smart, noiseFormat, + {addNoise ? createSampler(model) : nullptr, maxIndex, smart, noiseFormat, kernelFunctionType, model}); - ParseMeasurement parseBearingRange{maxKey}; + // Instantiate bearing-range parser + ParseMeasurement parseBearingRange{maxIndex}; + + // Combine in a single parser that adds factors to `graph`, but also inserts + // new variables into `initial` when needed. Parser parse = [&](istream &is, const string &tag) { if (auto f = parseBetweenFactor(is, tag)) { graph->push_back(*f); @@ -537,23 +546,24 @@ GraphAndValues load2D(const string &filename, SharedNoiseModel model, return 0; }; - parseLines(filename, parse); + parseLines(filename, parse); return make_pair(graph, initial); } /* ************************************************************************* */ -GraphAndValues load2D(pair dataset, Key maxKey, +GraphAndValues load2D(pair dataset, size_t maxIndex, bool addNoise, bool smart, NoiseFormat noiseFormat, KernelFunctionType kernelFunctionType) { - return load2D(dataset.first, dataset.second, maxKey, addNoise, smart, + return load2D(dataset.first, dataset.second, maxIndex, addNoise, smart, noiseFormat, kernelFunctionType); } /* ************************************************************************* */ GraphAndValues load2D_robust(const string &filename, - noiseModel::Base::shared_ptr &model, Key maxKey) { - return load2D(filename, model, maxKey); + noiseModel::Base::shared_ptr &model, + size_t maxIndex) { + return load2D(filename, model, maxIndex); } /* ************************************************************************* */ @@ -596,10 +606,10 @@ GraphAndValues readG2o(const string &g2oFile, const bool is3D, return load3D(g2oFile); } else { // just call load2D - Key maxKey = 0; + size_t maxIndex = 0; bool addNoise = false; bool smart = true; - return load2D(g2oFile, SharedNoiseModel(), maxKey, addNoise, smart, + return load2D(g2oFile, SharedNoiseModel(), maxIndex, addNoise, smart, NoiseFormatG2O, kernelFunctionType); } } @@ -609,13 +619,16 @@ void writeG2o(const NonlinearFactorGraph &graph, const Values &estimate, const string &filename) { fstream stream(filename.c_str(), fstream::out); + // Use a lambda here to more easily modify behavior in future. + auto index = [](gtsam::Key key) { return Symbol(key).index(); }; + // save 2D poses for (const auto key_value : estimate) { auto p = dynamic_cast *>(&key_value.value); if (!p) continue; const Pose2 &pose = p->value(); - stream << "VERTEX_SE2 " << key_value.key << " " << pose.x() << " " + stream << "VERTEX_SE2 " << index(key_value.key) << " " << pose.x() << " " << pose.y() << " " << pose.theta() << endl; } @@ -627,7 +640,7 @@ void writeG2o(const NonlinearFactorGraph &graph, const Values &estimate, const Pose3 &pose = p->value(); const Point3 t = pose.translation(); const auto q = pose.rotation().toQuaternion(); - stream << "VERTEX_SE3:QUAT " << key_value.key << " " << t.x() << " " + stream << "VERTEX_SE3:QUAT " << index(key_value.key) << " " << t.x() << " " << t.y() << " " << t.z() << " " << q.x() << " " << q.y() << " " << q.z() << " " << q.w() << endl; } @@ -638,7 +651,7 @@ void writeG2o(const NonlinearFactorGraph &graph, const Values &estimate, if (!p) continue; const Point2 &point = p->value(); - stream << "VERTEX_XY " << key_value.key << " " << point.x() << " " + stream << "VERTEX_XY " << index(key_value.key) << " " << point.x() << " " << point.y() << endl; } @@ -648,26 +661,25 @@ void writeG2o(const NonlinearFactorGraph &graph, const Values &estimate, if (!p) continue; const Point3 &point = p->value(); - stream << "VERTEX_TRACKXYZ " << key_value.key << " " << point.x() << " " - << point.y() << " " << point.z() << endl; + stream << "VERTEX_TRACKXYZ " << index(key_value.key) << " " << point.x() + << " " << point.y() << " " << point.z() << endl; } // save edges (2D or 3D) for (const auto &factor_ : graph) { - boost::shared_ptr> factor = - boost::dynamic_pointer_cast>(factor_); + auto factor = boost::dynamic_pointer_cast>(factor_); if (factor) { SharedNoiseModel model = factor->noiseModel(); - boost::shared_ptr gaussianModel = - boost::dynamic_pointer_cast(model); + auto gaussianModel = boost::dynamic_pointer_cast(model); if (!gaussianModel) { model->print("model\n"); throw invalid_argument("writeG2o: invalid noise model!"); } Matrix3 Info = gaussianModel->R().transpose() * gaussianModel->R(); Pose2 pose = factor->measured(); //.inverse(); - stream << "EDGE_SE2 " << factor->key1() << " " << factor->key2() << " " - << pose.x() << " " << pose.y() << " " << pose.theta(); + stream << "EDGE_SE2 " << index(factor->key1()) << " " + << index(factor->key2()) << " " << pose.x() << " " << pose.y() + << " " << pose.theta(); for (size_t i = 0; i < 3; i++) { for (size_t j = i; j < 3; j++) { stream << " " << Info(i, j); @@ -676,8 +688,7 @@ void writeG2o(const NonlinearFactorGraph &graph, const Values &estimate, stream << endl; } - boost::shared_ptr> factor3D = - boost::dynamic_pointer_cast>(factor_); + auto factor3D = boost::dynamic_pointer_cast>(factor_); if (factor3D) { SharedNoiseModel model = factor3D->noiseModel(); @@ -692,9 +703,10 @@ void writeG2o(const NonlinearFactorGraph &graph, const Values &estimate, const Pose3 pose3D = factor3D->measured(); const Point3 p = pose3D.translation(); const auto q = pose3D.rotation().toQuaternion(); - stream << "EDGE_SE3:QUAT " << factor3D->key1() << " " << factor3D->key2() - << " " << p.x() << " " << p.y() << " " << p.z() << " " << q.x() - << " " << q.y() << " " << q.z() << " " << q.w(); + stream << "EDGE_SE3:QUAT " << index(factor3D->key1()) << " " + << index(factor3D->key2()) << " " << p.x() << " " << p.y() << " " + << p.z() << " " << q.x() << " " << q.y() << " " << q.z() << " " + << q.w(); Matrix6 InfoG2o = I_6x6; InfoG2o.block<3, 3>(0, 0) = Info.block<3, 3>(3, 3); // cov translation @@ -733,16 +745,16 @@ istream &operator>>(istream &is, Rot3 &R) { } /* ************************************************************************* */ -boost::optional> parseVertexPose3(istream &is, - const string &tag) { +boost::optional> parseVertexPose3(istream &is, + const string &tag) { if (tag == "VERTEX3") { - Key id; + size_t id; double x, y, z; Rot3 R; is >> id >> x >> y >> z >> R; return make_pair(id, Pose3(R, {x, y, z})); } else if (tag == "VERTEX_SE3:QUAT") { - Key id; + size_t id; double x, y, z; Quaternion q; is >> id >> x >> y >> z >> q; @@ -752,16 +764,16 @@ boost::optional> parseVertexPose3(istream &is, } template <> -std::map parseVariables(const std::string &filename, - Key maxKey) { - return parseToMap(filename, parseVertexPose3, maxKey); +std::map parseVariables(const std::string &filename, + size_t maxIndex) { + return parseToMap(filename, parseVertexPose3, maxIndex); } /* ************************************************************************* */ -boost::optional> parseVertexPoint3(istream &is, - const string &tag) { +boost::optional> parseVertexPoint3(istream &is, + const string &tag) { if (tag == "VERTEX_TRACKXYZ") { - Key id; + size_t id; double x, y, z; is >> id >> x >> y >> z; return make_pair(id, Point3(x, y, z)); @@ -770,9 +782,9 @@ boost::optional> parseVertexPoint3(istream &is, } template <> -std::map parseVariables(const std::string &filename, - Key maxKey) { - return parseToMap(filename, parseVertexPoint3, maxKey); +std::map parseVariables(const std::string &filename, + size_t maxIndex) { + return parseToMap(filename, parseVertexPoint3, maxIndex); } /* ************************************************************************* */ @@ -791,7 +803,7 @@ istream &operator>>(istream &is, Matrix6 &m) { template <> struct ParseMeasurement { // The arguments boost::shared_ptr sampler; - Key maxKey; + size_t maxIndex; // The actual parser boost::optional> operator()(istream &is, @@ -800,9 +812,9 @@ template <> struct ParseMeasurement { return boost::none; // parse ids and optionally filter - Key id1, id2; + size_t id1, id2; is >> id1 >> id2; - if (maxKey && (id1 >= maxKey || id2 >= maxKey)) + if (maxIndex && (id1 > maxIndex || id2 > maxIndex)) return boost::none; Matrix6 m; @@ -847,8 +859,10 @@ template <> struct ParseMeasurement { template <> std::vector> parseMeasurements(const std::string &filename, - const noiseModel::Diagonal::shared_ptr &model, Key maxKey) { - ParseMeasurement parse{model ? createSampler(model) : nullptr, maxKey}; + const noiseModel::Diagonal::shared_ptr &model, + size_t maxIndex) { + ParseMeasurement parse{model ? createSampler(model) : nullptr, + maxIndex}; return parseToVector>(filename, parse); } @@ -872,8 +886,9 @@ static BinaryMeasurement convert(const BinaryMeasurement &p) { template <> std::vector> parseMeasurements(const std::string &filename, - const noiseModel::Diagonal::shared_ptr &model, Key maxKey) { - auto poses = parseMeasurements(filename, model, maxKey); + const noiseModel::Diagonal::shared_ptr &model, + size_t maxIndex) { + auto poses = parseMeasurements(filename, model, maxIndex); std::vector> result; result.reserve(poses.size()); for (const auto &p : poses) @@ -886,8 +901,9 @@ parseMeasurements(const std::string &filename, template <> std::vector::shared_ptr> parseFactors(const std::string &filename, - const noiseModel::Diagonal::shared_ptr &model, Key maxKey) { - ParseFactor parse({model ? createSampler(model) : nullptr, maxKey}); + const noiseModel::Diagonal::shared_ptr &model, + size_t maxIndex) { + ParseFactor parse({model ? createSampler(model) : nullptr, maxIndex}); return parseToVector::shared_ptr>(filename, parse); } @@ -896,21 +912,22 @@ GraphAndValues load3D(const string &filename) { auto graph = boost::make_shared(); auto initial = boost::make_shared(); - // TODO(frank): do a single pass - const auto factors = parseFactors(filename); - for (const auto &factor : factors) { - graph->push_back(factor); - } + // Instantiate factor parser. maxIndex is always zero for load3D. + ParseFactor parseFactor({nullptr, 0}); - const auto poses = parseVariables(filename); - for (const auto &key_pose : poses) { - initial->insert(key_pose.first, key_pose.second); - } - - const auto landmarks = parseVariables(filename); - for (const auto &key_landmark : landmarks) { - initial->insert(key_landmark.first, key_landmark.second); - } + // Single pass for variables and factors. Unlike 2D version, does *not* insert + // variables into `initial` if referenced but not present. + Parser parse = [&](istream &is, const string &tag) { + if (auto indexedPose = parseVertexPose3(is, tag)) { + initial->insert(indexedPose->first, indexedPose->second); + } else if (auto indexedLandmark = parseVertexPoint3(is, tag)) { + initial->insert(L(indexedLandmark->first), indexedLandmark->second); + } else if (auto factor = parseFactor(is, tag)) { + graph->push_back(*factor); + } + return 0; + }; + parseLines(filename, parse); return make_pair(graph, initial); } @@ -1187,8 +1204,7 @@ bool writeBALfromValues(const string &filename, const SfmData &data, dataValues.number_cameras()) { // we only estimated camera poses for (size_t i = 0; i < dataValues.number_cameras(); i++) { // for each camera - Key poseKey = symbol('x', i); - Pose3 pose = values.at(poseKey); + Pose3 pose = values.at(X(i)); Cal3Bundler K = dataValues.cameras[i].calibration(); Camera camera(pose, K); dataValues.cameras[i] = camera; @@ -1255,20 +1271,22 @@ Values initialCamerasAndPointsEstimate(const SfmData &db) { return initial; } -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41 -std::map parse3DPoses(const std::string &filename, Key maxKey) { - return parseVariables(filename, maxKey); -} - -std::map parse3DLandmarks(const std::string &filename, - Key maxKey) { - return parseVariables(filename, maxKey); -} - +// To be deprecated when pybind wrapper is merged: BetweenFactorPose3s parse3DFactors(const std::string &filename, - const noiseModel::Diagonal::shared_ptr &model, Key maxKey) { - return parseFactors(filename, model, maxKey); + const noiseModel::Diagonal::shared_ptr &model, size_t maxIndex) { + return parseFactors(filename, model, maxIndex); +} + +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41 +std::map parse3DPoses(const std::string &filename, + size_t maxIndex) { + return parseVariables(filename, maxIndex); +} + +std::map parse3DLandmarks(const std::string &filename, + size_t maxIndex) { + return parseVariables(filename, maxIndex); } #endif } // namespace gtsam diff --git a/gtsam/slam/dataset.h b/gtsam/slam/dataset.h index 38ddfd1dc..7bdf6728d 100644 --- a/gtsam/slam/dataset.h +++ b/gtsam/slam/dataset.h @@ -74,37 +74,41 @@ enum KernelFunctionType { }; /** - * Parse variables in 2D g2o graph file into a map. - * Instantiated in .cpp T equal to Pose2, Pose3 + * Parse variables in a line-based text format (like g2o) into a map. + * Instantiated in .cpp Pose2, Point2, Pose3, and Point3. + * Note the map keys are integer indices, *not* gtsam::Keys. This is is + * different below where landmarks will use be L(index) symbols. */ template -GTSAM_EXPORT std::map parseVariables(const std::string &filename, - Key maxKey = 0); +GTSAM_EXPORT std::map parseVariables(const std::string &filename, + size_t maxIndex = 0); /** - * Parse edges in 2D g2o graph file into a set of binary measuremnts. - * Instantiated in .cpp for T equal to Pose2, Pose3 + * Parse binary measurements in a line-based text format (like g2o) into a + * vector. Instantiated in .cpp for Pose2, Rot2, Pose3, and Rot3. The rotation + * versions parse poses and extract only the rotation part, using the marginal + * covariance as noise model. */ template GTSAM_EXPORT std::vector> parseMeasurements(const std::string &filename, - const noiseModel::Diagonal::shared_ptr &model = nullptr, - Key maxKey = 0); + const noiseModel::Diagonal::shared_ptr &model = nullptr, + size_t maxIndex = 0); /** - * Parse edges in 2D g2o graph file into a set of BetweenFactors. - * Instantiated in .cpp T equal to Pose2, Pose3 + * Parse BetweenFactors in a line-based text format (like g2o) into a vector of + * shared pointers. Instantiated in .cpp T equal to Pose2 and Pose3. */ template GTSAM_EXPORT std::vector::shared_ptr> parseFactors(const std::string &filename, const noiseModel::Diagonal::shared_ptr &model = nullptr, - Key maxKey = 0); + size_t maxIndex = 0); /// Return type for auxiliary functions -typedef std::pair IndexedPose; -typedef std::pair IndexedLandmark; -typedef std::pair, Pose2> IndexedEdge; +typedef std::pair IndexedPose; +typedef std::pair IndexedLandmark; +typedef std::pair, Pose2> IndexedEdge; /** * Parse TORO/G2O vertex "id x y yaw" @@ -130,19 +134,21 @@ GTSAM_EXPORT boost::optional parseVertexLandmark(std::istream& GTSAM_EXPORT boost::optional parseEdge(std::istream& is, const std::string& tag); -/// Return type for load functions +/// Return type for load functions, which return a graph and initial values. For +/// landmarks, the gtsam::Symbol L(index) is used to insert into the Values. +/// Bearing-range measurements also refer tio landmarks with L(index). using GraphAndValues = std::pair; /** * Load TORO 2D Graph * @param dataset/model pair as constructed by [dataset] - * @param maxKey if non-zero cut out vertices >= maxKey + * @param maxIndex if non-zero cut out vertices >= maxIndex * @param addNoise add noise to the edges * @param smart try to reduce complexity of covariance to cheapest model */ GTSAM_EXPORT GraphAndValues load2D( - std::pair dataset, Key maxKey = 0, + std::pair dataset, size_t maxIndex = 0, bool addNoise = false, bool smart = true, // NoiseFormat noiseFormat = NoiseFormatAUTO, @@ -152,7 +158,7 @@ GTSAM_EXPORT GraphAndValues load2D( * Load TORO/G2O style graph files * @param filename * @param model optional noise model to use instead of one specified by file - * @param maxKey if non-zero cut out vertices >= maxKey + * @param maxIndex if non-zero cut out vertices >= maxIndex * @param addNoise add noise to the edges * @param smart try to reduce complexity of covariance to cheapest model * @param noiseFormat how noise parameters are stored @@ -160,13 +166,13 @@ GTSAM_EXPORT GraphAndValues load2D( * @return graph and initial values */ GTSAM_EXPORT GraphAndValues load2D(const std::string& filename, - SharedNoiseModel model = SharedNoiseModel(), Key maxKey = 0, bool addNoise = + SharedNoiseModel model = SharedNoiseModel(), size_t maxIndex = 0, bool addNoise = false, bool smart = true, NoiseFormat noiseFormat = NoiseFormatAUTO, // KernelFunctionType kernelFunctionType = KernelFunctionTypeNONE); /// @deprecated load2D now allows for arbitrary models and wrapping a robust kernel GTSAM_EXPORT GraphAndValues load2D_robust(const std::string& filename, - noiseModel::Base::shared_ptr& model, Key maxKey = 0); + noiseModel::Base::shared_ptr& model, size_t maxIndex = 0); /** save 2d graph */ GTSAM_EXPORT void save2D(const NonlinearFactorGraph& graph, @@ -190,6 +196,11 @@ GTSAM_EXPORT GraphAndValues readG2o(const std::string& g2oFile, const bool is3D * @param filename The name of the g2o file to write * @param graph NonlinearFactor graph storing the measurements * @param estimate Values + * + * Note:behavior change in PR #471: to be consistent with load2D and load3D, we + * write the *indices* to file and not the full Keys. This change really only + * affects landmarks, which get read as indices but stored in values with the + * symbol L(index). */ GTSAM_EXPORT void writeG2o(const NonlinearFactorGraph& graph, const Values& estimate, const std::string& filename); @@ -330,22 +341,24 @@ GTSAM_EXPORT Values initialCamerasEstimate(const SfmData& db); */ GTSAM_EXPORT Values initialCamerasAndPointsEstimate(const SfmData& db); +// To be deprecated when pybind wrapper is merged: +using BetweenFactorPose3s = std::vector::shared_ptr>; +GTSAM_EXPORT BetweenFactorPose3s +parse3DFactors(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model = nullptr, + size_t maxIndex = 0); + #ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41 inline boost::optional parseVertex(std::istream &is, const std::string &tag) { return parseVertexPose(is, tag); } -GTSAM_EXPORT std::map parse3DPoses(const std::string &filename, - Key maxKey = 0); +GTSAM_EXPORT std::map parse3DPoses(const std::string &filename, + size_t maxIndex = 0); -GTSAM_EXPORT std::map parse3DLandmarks(const std::string &filename, - Key maxKey = 0); - -using BetweenFactorPose3s = std::vector::shared_ptr>; -GTSAM_EXPORT BetweenFactorPose3s parse3DFactors( - const std::string &filename, - const noiseModel::Diagonal::shared_ptr &model = nullptr, Key maxKey = 0); +GTSAM_EXPORT std::map +parse3DLandmarks(const std::string &filename, size_t maxIndex = 0); #endif } // namespace gtsam diff --git a/gtsam/slam/tests/testDataset.cpp b/gtsam/slam/tests/testDataset.cpp index 3e804b6b0..aad9124c5 100644 --- a/gtsam/slam/tests/testDataset.cpp +++ b/gtsam/slam/tests/testDataset.cpp @@ -82,7 +82,7 @@ TEST( dataSet, parseEdge) const auto actual = parseEdge(is, tag); EXPECT(actual); if (actual) { - pair expected(0, 1); + pair expected(0, 1); EXPECT(expected == actual->first); EXPECT(assert_equal(Pose2(2, 3, 4), actual->second)); } @@ -105,8 +105,9 @@ TEST(dataSet, load2D) { EXPECT(assert_equal(expected, *actual)); // Check binary measurements, Pose2 - auto measurements = parseMeasurements(filename, nullptr, 5); - EXPECT_LONGS_EQUAL(4, measurements.size()); + size_t maxIndex = 5; + auto measurements = parseMeasurements(filename, nullptr, maxIndex); + EXPECT_LONGS_EQUAL(5, measurements.size()); // Check binary measurements, Rot2 auto measurements2 = parseMeasurements(filename); @@ -132,14 +133,22 @@ TEST(dataSet, load2D) { } /* ************************************************************************* */ -TEST( dataSet, load2DVictoriaPark) -{ +TEST(dataSet, load2DVictoriaPark) { const string filename = findExampleDataFile("victoria_park.txt"); NonlinearFactorGraph::shared_ptr graph; Values::shared_ptr initial; + + // Load all boost::tie(graph, initial) = load2D(filename); - EXPECT_LONGS_EQUAL(10608,graph->size()); - EXPECT_LONGS_EQUAL(7120,initial->size()); + EXPECT_LONGS_EQUAL(10608, graph->size()); + EXPECT_LONGS_EQUAL(7120, initial->size()); + + // Restrict keys + size_t maxIndex = 5; + boost::tie(graph, initial) = load2D(filename, nullptr, maxIndex); + EXPECT_LONGS_EQUAL(5, graph->size()); + EXPECT_LONGS_EQUAL(6, initial->size()); // file has 0 as well + EXPECT_LONGS_EQUAL(L(5), graph->at(4)->keys()[1]); } /* ************************************************************************* */ @@ -293,13 +302,13 @@ TEST(dataSet, readG2oCheckDeterminants) { } // Check determinants in initial values - const map poses = parseVariables(g2oFile); + const map poses = parseVariables(g2oFile); EXPECT_LONGS_EQUAL(5, poses.size()); for (const auto& key_value : poses) { const Rot3 R = key_value.second.rotation(); EXPECT_DOUBLES_EQUAL(1.0, R.matrix().determinant(), 1e-9); } - const map landmarks = parseVariables(g2oFile); + const map landmarks = parseVariables(g2oFile); EXPECT_LONGS_EQUAL(0, landmarks.size()); } @@ -308,10 +317,13 @@ TEST(dataSet, readG2oLandmarks) { const string g2oFile = findExampleDataFile("example_with_vertices.g2o"); // Check number of poses and landmarks. Should be 8 each. - const map poses = parseVariables(g2oFile); + const map poses = parseVariables(g2oFile); EXPECT_LONGS_EQUAL(8, poses.size()); - const map landmarks = parseVariables(g2oFile); + const map landmarks = parseVariables(g2oFile); EXPECT_LONGS_EQUAL(8, landmarks.size()); + + auto graphAndValues = load3D(g2oFile); + EXPECT(graphAndValues.second->exists(L(0))); } /* ************************************************************************* */ @@ -564,14 +576,12 @@ TEST( dataSet, writeBALfromValues_Dubrovnik){ Values value; for(size_t i=0; i < readData.number_cameras(); i++){ // for each camera - Key poseKey = symbol('x',i); Pose3 pose = poseChange.compose(readData.cameras[i].pose()); - value.insert(poseKey, pose); + value.insert(X(i), pose); } for(size_t j=0; j < readData.number_tracks(); j++){ // for each point - Key pointKey = P(j); Point3 point = poseChange.transformFrom( readData.tracks[j].p ); - value.insert(pointKey, point); + value.insert(P(j), point); } // Write values and readData to a file @@ -596,13 +606,11 @@ TEST( dataSet, writeBALfromValues_Dubrovnik){ EXPECT(assert_equal(expected,actual,12)); Pose3 expectedPose = camera0.pose(); - Key poseKey = symbol('x',0); - Pose3 actualPose = value.at(poseKey); + Pose3 actualPose = value.at(X(0)); EXPECT(assert_equal(expectedPose,actualPose, 1e-7)); Point3 expectedPoint = track0.p; - Key pointKey = P(0); - Point3 actualPoint = value.at(pointKey); + Point3 actualPoint = value.at(P(0)); EXPECT(assert_equal(expectedPoint,actualPoint, 1e-6)); } diff --git a/tests/testNonlinearISAM.cpp b/tests/testNonlinearISAM.cpp index 974806612..4249f5bc4 100644 --- a/tests/testNonlinearISAM.cpp +++ b/tests/testNonlinearISAM.cpp @@ -308,7 +308,7 @@ TEST(testNonlinearISAM, loop_closures ) { // check if edge const auto betweenPose = parseEdge(is, tag); if (betweenPose) { - Key id1, id2; + size_t id1, id2; tie(id1, id2) = betweenPose->first; graph.emplace_shared >(Symbol('x', id2), Symbol('x', id1), betweenPose->second, model); From bfae5561684de0a8c63ff56499f61e5fa868b237 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Sat, 15 Aug 2020 13:33:00 -0400 Subject: [PATCH 060/142] typo --- .travis.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.sh b/.travis.sh index c2b4e0590..7f5a58d0e 100755 --- a/.travis.sh +++ b/.travis.sh @@ -63,7 +63,7 @@ function configure() -DGTSAM_BUILD_EXAMPLES_ALWAYS=${GTSAM_BUILD_EXAMPLES_ALWAYS:-ON} \ -DGTSAM_ALLOW_DEPRECATED_SINCE_V4=${GTSAM_ALLOW_DEPRECATED_SINCE_V4:-OFF} \ -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \ - -DCMAKE_VERBOSE_MAKEFILE=ON + -DCMAKE_VERBOSE_MAKEFILE=ON \ -DBOOST_ROOT=$BOOST_ROOT } From 2d7fb05f6cf2bd944aee8afb8a067fcfb3af7caf Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Sat, 15 Aug 2020 13:54:02 -0400 Subject: [PATCH 061/142] Ignore system boost --- .travis.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.travis.sh b/.travis.sh index 7f5a58d0e..c4ac675bd 100755 --- a/.travis.sh +++ b/.travis.sh @@ -64,7 +64,10 @@ function configure() -DGTSAM_ALLOW_DEPRECATED_SINCE_V4=${GTSAM_ALLOW_DEPRECATED_SINCE_V4:-OFF} \ -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \ -DCMAKE_VERBOSE_MAKEFILE=ON \ - -DBOOST_ROOT=$BOOST_ROOT + -DBOOST_ROOT=$BOOST_ROOT \ + -DBoost_NO_SYSTEM_PATHS=ON \ + -DBOOST_INCLUDEDIR="$BOOST_ROOT/boost/include" \ + -DBOOST_LIBRARYDIR="$BOOST_ROOT/lib" } From ae341c83a46508c92f24f0dcba6a798713df2d7b Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Sat, 15 Aug 2020 14:13:07 -0400 Subject: [PATCH 062/142] Fix wrong include path --- .travis.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.sh b/.travis.sh index c4ac675bd..6aa4c6da6 100755 --- a/.travis.sh +++ b/.travis.sh @@ -66,7 +66,7 @@ function configure() -DCMAKE_VERBOSE_MAKEFILE=ON \ -DBOOST_ROOT=$BOOST_ROOT \ -DBoost_NO_SYSTEM_PATHS=ON \ - -DBOOST_INCLUDEDIR="$BOOST_ROOT/boost/include" \ + -DBOOST_INCLUDEDIR="$BOOST_ROOT/include" \ -DBOOST_LIBRARYDIR="$BOOST_ROOT/lib" } From 799486b2f3d761e2cb991d377de5cb622f728f0d Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Sat, 15 Aug 2020 14:45:58 -0400 Subject: [PATCH 063/142] Do not search for the cmake boost file --- .travis.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.travis.sh b/.travis.sh index 6aa4c6da6..eae460b2d 100755 --- a/.travis.sh +++ b/.travis.sh @@ -67,7 +67,8 @@ function configure() -DBOOST_ROOT=$BOOST_ROOT \ -DBoost_NO_SYSTEM_PATHS=ON \ -DBOOST_INCLUDEDIR="$BOOST_ROOT/include" \ - -DBOOST_LIBRARYDIR="$BOOST_ROOT/lib" + -DBOOST_LIBRARYDIR="$BOOST_ROOT/lib" \ + -DBoost_NO_BOOST_CMAKE=ON } From 7f7bf563f693f082a0d19ef7b6116ffd1084f97d Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Sat, 15 Aug 2020 15:28:56 -0400 Subject: [PATCH 064/142] Real reason why cannot find boost --- .travis.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.travis.sh b/.travis.sh index eae460b2d..30713cac2 100755 --- a/.travis.sh +++ b/.travis.sh @@ -66,9 +66,7 @@ function configure() -DCMAKE_VERBOSE_MAKEFILE=ON \ -DBOOST_ROOT=$BOOST_ROOT \ -DBoost_NO_SYSTEM_PATHS=ON \ - -DBOOST_INCLUDEDIR="$BOOST_ROOT/include" \ - -DBOOST_LIBRARYDIR="$BOOST_ROOT/lib" \ - -DBoost_NO_BOOST_CMAKE=ON + -DBoost_ARCHITECTURE=-x64 } From 635c6f89b05105a1160d81bb736e39218fbb3ee4 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sat, 15 Aug 2020 15:44:22 -0500 Subject: [PATCH 065/142] renamed from build-cython to build-python --- .github/workflows/{build-cython.yml => build-python.yml} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename .github/workflows/{build-cython.yml => build-python.yml} (98%) diff --git a/.github/workflows/build-cython.yml b/.github/workflows/build-python.yml similarity index 98% rename from .github/workflows/build-cython.yml rename to .github/workflows/build-python.yml index c159a60fc..497272411 100644 --- a/.github/workflows/build-cython.yml +++ b/.github/workflows/build-python.yml @@ -1,4 +1,4 @@ -name: Cython CI +name: Python CI on: [pull_request] From d9f5c1ebb7556da6b6ef8d0c40952a4b47207504 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sat, 15 Aug 2020 15:50:10 -0500 Subject: [PATCH 066/142] remove travis.python.sh and instead add script in .github folder --- .../scripts/python.sh | 36 +++++++++++-------- 1 file changed, 22 insertions(+), 14 deletions(-) rename .travis.python.sh => .github/scripts/python.sh (63%) diff --git a/.travis.python.sh b/.github/scripts/python.sh similarity index 63% rename from .travis.python.sh rename to .github/scripts/python.sh index 7ccf56dd1..70a5f46e4 100644 --- a/.travis.python.sh +++ b/.github/scripts/python.sh @@ -17,7 +17,7 @@ if [[ $(uname) == "Darwin" ]]; then brew install wget else # Install a system package required by our library - sudo apt-get install wget libicu-dev python3-pip python3-setuptools + sudo apt-get install -y wget libicu-dev python3-pip python3-setuptools fi PATH=$PATH:$($PYTHON -c "import site; print(site.USER_BASE)")/bin @@ -28,26 +28,26 @@ case $WRAPPER in BUILD_PYBIND="OFF" TYPEDEF_POINTS_TO_VECTORS="OFF" - $PYTHON -m pip install --user -r ./cython/requirements.txt + sudo $PYTHON -m pip install -r $GITHUB_WORKSPACE/cython/requirements.txt ;; "pybind") BUILD_CYTHON="OFF" BUILD_PYBIND="ON" TYPEDEF_POINTS_TO_VECTORS="ON" - $PYTHON -m pip install --user -r ./wrap/python/requirements.txt + sudo $PYTHON -m pip install -r $GITHUB_WORKSPACE/python/requirements.txt ;; *) exit 126 ;; esac -CURRDIR=$(pwd) +git submodule update --init --recursive -mkdir $CURRDIR/build -cd $CURRDIR/build +mkdir $GITHUB_WORKSPACE/build +cd $GITHUB_WORKSPACE/build -cmake $CURRDIR -DCMAKE_BUILD_TYPE=Release \ +cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=Release \ -DGTSAM_BUILD_TESTS=OFF -DGTSAM_BUILD_UNSTABLE=ON \ -DGTSAM_USE_QUATERNIONS=OFF \ -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \ @@ -56,26 +56,34 @@ cmake $CURRDIR -DCMAKE_BUILD_TYPE=Release \ -DGTSAM_BUILD_PYTHON=${BUILD_PYBIND} \ -DGTSAM_TYPEDEF_POINTS_TO_VECTORS=${TYPEDEF_POINTS_TO_VECTORS} \ -DGTSAM_PYTHON_VERSION=$PYTHON_VERSION \ - -DGTSAM_ALLOW_DEPRECATED_SINCE_V4=OFF \ -DPYTHON_EXECUTABLE:FILEPATH=$(which $PYTHON) \ - -DCMAKE_INSTALL_PREFIX=$CURRDIR/../gtsam_install + -DGTSAM_ALLOW_DEPRECATED_SINCE_V41=OFF \ + -DCMAKE_INSTALL_PREFIX=$GITHUB_WORKSPACE/gtsam_install -make -j$(nproc) install +make -j$(nproc) install & + +while ps -p $! > /dev/null +do + sleep 60 + now=$(date +%s) + printf "%d seconds have elapsed\n" $(( (now - start) )) +done case $WRAPPER in "cython") - cd $CURRDIR/../gtsam_install/cython + cd $GITHUB_WORKSPACE/gtsam_install/cython $PYTHON setup.py install --user --prefix= - cd $CURRDIR/cython/gtsam/tests + cd $GITHUB_WORKSPACE/gtsam_install/cython/gtsam/tests $PYTHON -m unittest discover ;; "pybind") + cd python $PYTHON setup.py install --user --prefix= - cd $CURRDIR/wrap/python/gtsam_py/tests + cd $GITHUB_WORKSPACE/wrap/python/gtsam_py/tests $PYTHON -m unittest discover ;; *) echo "THIS SHOULD NEVER HAPPEN!" exit 125 ;; -esac +esac \ No newline at end of file From 0172cd3c8c7348abca9cc42b5d77da3da38adf34 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sat, 15 Aug 2020 15:50:59 -0500 Subject: [PATCH 067/142] updates to python CI yml file --- .github/workflows/build-python.yml | 7 +++---- .github/workflows/build-windows.yml | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build-python.yml b/.github/workflows/build-python.yml index 497272411..9edd1c24d 100644 --- a/.github/workflows/build-python.yml +++ b/.github/workflows/build-python.yml @@ -60,9 +60,8 @@ jobs: sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" fi sudo apt-get -y update - sudo apt-get -y upgrade - sudo apt install cmake g++-9 clang-9 build-essential pkg-config libpython-dev python-numpy libboost-all-dev + sudo apt install cmake build-essential pkg-config libpython-dev python-numpy libboost-all-dev if [ "${{ matrix.compiler }}" = "gcc" ]; then sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib @@ -89,8 +88,8 @@ jobs: - name: Build (Linux) if: runner.os == 'Linux' run: | - bash .travis.python.sh + bash .github/scripts/python.sh - name: Build (macOS) if: runner.os == 'macOS' run: | - bash .travis.python.sh \ No newline at end of file + bash .github/scripts/python.sh \ No newline at end of file diff --git a/.github/workflows/build-windows.yml b/.github/workflows/build-windows.yml index 516e67e1d..f7c5dbcce 100644 --- a/.github/workflows/build-windows.yml +++ b/.github/workflows/build-windows.yml @@ -1,4 +1,4 @@ -name: CI Windows +name: Windows CI on: [pull_request] From 808c043e733f1691bf0b09336434649497868cdd Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sat, 15 Aug 2020 16:01:24 -0500 Subject: [PATCH 068/142] moved CI script for unix based systems --- .travis.sh => .github/scripts/unix.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) rename .travis.sh => .github/scripts/unix.sh (92%) mode change 100755 => 100644 diff --git a/.travis.sh b/.github/scripts/unix.sh old mode 100755 new mode 100644 similarity index 92% rename from .travis.sh rename to .github/scripts/unix.sh index 30713cac2..4f6d926fb --- a/.travis.sh +++ b/.github/scripts/unix.sh @@ -25,7 +25,7 @@ function install_tbb() TBBROOT=/tmp/$TBB_DIR # Copy the needed files to the correct places. - # This works correctly for travis builds, instead of setting path variables. + # This works correctly for CI builds, instead of setting path variables. # This is what Homebrew does to install TBB on Macs $SUDO cp -R $TBBROOT/lib/$TBB_LIB_DIR/* /usr/local/lib/ $SUDO cp -R $TBBROOT/include/ /usr/local/include/ @@ -38,11 +38,11 @@ function configure() set -e # Make sure any error makes the script to return an error code set -x # echo - SOURCE_DIR=`pwd` - BUILD_DIR=build + SOURCE_DIR=$GITHUB_WORKSPACE + BUILD_DIR=$GITHUB_WORKSPACE/build #env - git clean -fd || true + git submodule update --init --recursive rm -fr $BUILD_DIR || true mkdir $BUILD_DIR && cd $BUILD_DIR @@ -61,7 +61,7 @@ function configure() -DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \ -DGTSAM_USE_QUATERNIONS=${GTSAM_USE_QUATERNIONS:-OFF} \ -DGTSAM_BUILD_EXAMPLES_ALWAYS=${GTSAM_BUILD_EXAMPLES_ALWAYS:-ON} \ - -DGTSAM_ALLOW_DEPRECATED_SINCE_V4=${GTSAM_ALLOW_DEPRECATED_SINCE_V4:-OFF} \ + -DGTSAM_ALLOW_DEPRECATED_SINCE_V4=${GTSAM_ALLOW_DEPRECATED_SINCE_V41:-OFF} \ -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \ -DCMAKE_VERBOSE_MAKEFILE=ON \ -DBOOST_ROOT=$BOOST_ROOT \ @@ -114,4 +114,4 @@ case $1 in -t) test ;; -esac +esac \ No newline at end of file From 1e3ca015f0e7822e8c3c417a71651f04c204df93 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sat, 15 Aug 2020 16:01:40 -0500 Subject: [PATCH 069/142] updates to macOS and Linux CIs --- .github/workflows/build-linux.yml | 5 ++--- .github/workflows/build-macos.yml | 4 ++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build-linux.yml b/.github/workflows/build-linux.yml index 837d0b174..0ab87cbeb 100644 --- a/.github/workflows/build-linux.yml +++ b/.github/workflows/build-linux.yml @@ -52,9 +52,8 @@ jobs: sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" fi sudo apt-get -y update - sudo apt-get -y upgrade - sudo apt install cmake g++-9 clang-9 build-essential pkg-config libpython-dev python-numpy + sudo apt install cmake build-essential pkg-config libpython-dev python-numpy echo "::set-env name=BOOST_ROOT::$(echo $BOOST_ROOT_1_69_0)" @@ -74,4 +73,4 @@ jobs: - name: Build (Linux) if: runner.os == 'Linux' run: | - bash .travis.sh -t \ No newline at end of file + bash .github/scripts/nix.sh -t \ No newline at end of file diff --git a/.github/workflows/build-macos.yml b/.github/workflows/build-macos.yml index 2ab2e63c5..2feb02eb5 100644 --- a/.github/workflows/build-macos.yml +++ b/.github/workflows/build-macos.yml @@ -1,4 +1,4 @@ -name: CI macOS +name: macOS CI on: [pull_request] @@ -48,4 +48,4 @@ jobs: - name: Build (macOS) if: runner.os == 'macOS' run: | - bash .travis.sh -t \ No newline at end of file + bash .github/scripts/nix.sh -t \ No newline at end of file From de2344fc2e97d87808365bdf3091953f33cf5f69 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sat, 15 Aug 2020 16:03:44 -0500 Subject: [PATCH 070/142] fix script name --- .github/workflows/build-linux.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-linux.yml b/.github/workflows/build-linux.yml index 0ab87cbeb..5b02cb254 100644 --- a/.github/workflows/build-linux.yml +++ b/.github/workflows/build-linux.yml @@ -1,4 +1,4 @@ -name: CI Linux +name: Linux CI on: [push, pull_request] @@ -73,4 +73,4 @@ jobs: - name: Build (Linux) if: runner.os == 'Linux' run: | - bash .github/scripts/nix.sh -t \ No newline at end of file + bash .github/scripts/unix.sh -t \ No newline at end of file From 7f0767e85fb149f14dc7f42d2b3e9cc391737838 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sat, 15 Aug 2020 16:22:01 -0500 Subject: [PATCH 071/142] fix cython path --- .github/scripts/python.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/scripts/python.sh b/.github/scripts/python.sh index 70a5f46e4..4321f4c64 100644 --- a/.github/scripts/python.sh +++ b/.github/scripts/python.sh @@ -71,9 +71,9 @@ done case $WRAPPER in "cython") - cd $GITHUB_WORKSPACE/gtsam_install/cython + cd $GITHUB_WORKSPACE/build/cython $PYTHON setup.py install --user --prefix= - cd $GITHUB_WORKSPACE/gtsam_install/cython/gtsam/tests + cd $GITHUB_WORKSPACE/build/cython/gtsam/tests $PYTHON -m unittest discover ;; "pybind") From 4f28a0b88d39e53c8e1ed64e719dcc75a6b35f3c Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sat, 15 Aug 2020 17:54:45 -0400 Subject: [PATCH 072/142] Fixed covariance bug --- gtsam/slam/dataset.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gtsam/slam/dataset.cpp b/gtsam/slam/dataset.cpp index 9cc814245..12147adf2 100644 --- a/gtsam/slam/dataset.cpp +++ b/gtsam/slam/dataset.cpp @@ -880,7 +880,7 @@ static BinaryMeasurement convert(const BinaryMeasurement &p) { const Matrix6 M = gaussian->covariance(); return BinaryMeasurement( p.key1(), p.key2(), p.measured().rotation(), - noiseModel::Gaussian::Covariance(M.block<3, 3>(3, 3), true)); + noiseModel::Gaussian::Covariance(M.block<3, 3>(0, 0), true)); } template <> From ba6b18947dfeda49071b7489424a62424f1c7b9d Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sat, 15 Aug 2020 18:45:18 -0500 Subject: [PATCH 073/142] fix build script --- .github/workflows/build-macos.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-macos.yml b/.github/workflows/build-macos.yml index 2feb02eb5..1304d863e 100644 --- a/.github/workflows/build-macos.yml +++ b/.github/workflows/build-macos.yml @@ -48,4 +48,4 @@ jobs: - name: Build (macOS) if: runner.os == 'macOS' run: | - bash .github/scripts/nix.sh -t \ No newline at end of file + bash .github/scripts/unix.sh -t \ No newline at end of file From e04ef8494863143cce6d0d9f90ff5b70802b88ab Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sat, 15 Aug 2020 18:45:31 -0500 Subject: [PATCH 074/142] set LD_LIBRARY_PATH --- .github/workflows/build-linux.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/build-linux.yml b/.github/workflows/build-linux.yml index 5b02cb254..267d995f8 100644 --- a/.github/workflows/build-linux.yml +++ b/.github/workflows/build-linux.yml @@ -12,6 +12,8 @@ jobs: CTEST_PARALLEL_LEVEL: 2 CMAKE_BUILD_TYPE: ${{ matrix.build_type }} GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }} + LD_LIBRARY_PATH: /usr/local/lib + strategy: fail-fast: false matrix: From 5315e3ed35b819c30c8367d5fbba40f8f569a5ae Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sat, 15 Aug 2020 19:26:36 -0500 Subject: [PATCH 075/142] test LD_LIBRARY_PATH --- .github/workflows/build-linux.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/build-linux.yml b/.github/workflows/build-linux.yml index 267d995f8..bbdc22202 100644 --- a/.github/workflows/build-linux.yml +++ b/.github/workflows/build-linux.yml @@ -72,6 +72,8 @@ jobs: if: runner.os == 'Linux' run: | echo "BOOST_ROOT = $BOOST_ROOT" + echo "LD_LIBRARY_PATH = $LD_LIBRARY_PATH" + ls $LD_LIBRARY_PATH - name: Build (Linux) if: runner.os == 'Linux' run: | From 51b201988f77ac68ee943dbc0f187471779ea3b5 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sat, 15 Aug 2020 20:20:31 -0500 Subject: [PATCH 076/142] correctly add LD_LIBRARY_PATH boost lib directory --- .github/workflows/build-linux.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-linux.yml b/.github/workflows/build-linux.yml index bbdc22202..24a1a2c96 100644 --- a/.github/workflows/build-linux.yml +++ b/.github/workflows/build-linux.yml @@ -12,7 +12,6 @@ jobs: CTEST_PARALLEL_LEVEL: 2 CMAKE_BUILD_TYPE: ${{ matrix.build_type }} GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }} - LD_LIBRARY_PATH: /usr/local/lib strategy: fail-fast: false @@ -58,6 +57,7 @@ jobs: sudo apt install cmake build-essential pkg-config libpython-dev python-numpy echo "::set-env name=BOOST_ROOT::$(echo $BOOST_ROOT_1_69_0)" + echo "::set-env name=LD_LIBRARY_PATH::$(echo $BOOST_ROOT_1_69_0/lib)" if [ "${{ matrix.compiler }}" = "gcc" ]; then sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib From b23dd711b077529f969a10145e7c573b21566e67 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sat, 15 Aug 2020 23:40:43 -0500 Subject: [PATCH 077/142] remove unnecessary prints --- .github/workflows/build-linux.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/build-linux.yml b/.github/workflows/build-linux.yml index 24a1a2c96..4fac0f7cc 100644 --- a/.github/workflows/build-linux.yml +++ b/.github/workflows/build-linux.yml @@ -72,8 +72,6 @@ jobs: if: runner.os == 'Linux' run: | echo "BOOST_ROOT = $BOOST_ROOT" - echo "LD_LIBRARY_PATH = $LD_LIBRARY_PATH" - ls $LD_LIBRARY_PATH - name: Build (Linux) if: runner.os == 'Linux' run: | From a2e748130085afa30f51c1e4c00ee9f216402dc3 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sun, 16 Aug 2020 08:31:44 -0500 Subject: [PATCH 078/142] add description for CI scripts --- .github/scripts/python.sh | 7 ++++++- .github/scripts/unix.sh | 5 +++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/.github/scripts/python.sh b/.github/scripts/python.sh index 4321f4c64..5cc6ada24 100644 --- a/.github/scripts/python.sh +++ b/.github/scripts/python.sh @@ -1,4 +1,9 @@ #!/bin/bash + +########################################################## +# Build and test the GTSAM Python wrapper. +########################################################## + set -x -e if [ -z ${PYTHON_VERSION+x} ]; then @@ -77,7 +82,7 @@ case $WRAPPER in $PYTHON -m unittest discover ;; "pybind") - cd python + cd $GITHUB_WORKSPACE/python $PYTHON setup.py install --user --prefix= cd $GITHUB_WORKSPACE/wrap/python/gtsam_py/tests $PYTHON -m unittest discover diff --git a/.github/scripts/unix.sh b/.github/scripts/unix.sh index 4f6d926fb..aa6e49650 100644 --- a/.github/scripts/unix.sh +++ b/.github/scripts/unix.sh @@ -1,5 +1,10 @@ #!/bin/bash +########################################################## +# Build and test GTSAM for *nix based systems. +# Specifically Linux and macOS. +########################################################## + # install TBB with _debug.so files function install_tbb() { From 8288b55d4e10181e3c2eaf2328815a69167b5671 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 16 Aug 2020 17:30:52 -0400 Subject: [PATCH 079/142] Addressed review comments --- gtsam/slam/dataset.cpp | 30 ++++++++++++++++++++---------- gtsam/slam/dataset.h | 4 ++-- timing/timeFrobeniusFactor.cpp | 4 ++-- 3 files changed, 24 insertions(+), 14 deletions(-) diff --git a/gtsam/slam/dataset.cpp b/gtsam/slam/dataset.cpp index 12147adf2..d7b067d70 100644 --- a/gtsam/slam/dataset.cpp +++ b/gtsam/slam/dataset.cpp @@ -110,19 +110,21 @@ string createRewrittenFileName(const string &name) { } /* ************************************************************************* */ -// Parse a file by calling the parse(is, tag) function for every line. +// Type for parser functions used in parseLines below. template using Parser = std::function(istream &is, const string &tag)>; +// Parse a file by calling the parse(is, tag) function for every line. +// The result of calling the function is ignored, so typically parse function +// works via a side effect, like adding a factor into a graph etc. template static void parseLines(const string &filename, Parser parse) { ifstream is(filename.c_str()); if (!is) throw invalid_argument("parse: can not find file " + filename); - while (!is.eof()) { - string tag; - is >> tag; + string tag; + while (is >> tag) { parse(is, tag); // ignore return value is.ignore(LINESIZE, '\n'); } @@ -164,7 +166,9 @@ boost::optional parseVertexPose(istream &is, const string &tag) { if ((tag == "VERTEX2") || (tag == "VERTEX_SE2") || (tag == "VERTEX")) { size_t id; double x, y, yaw; - is >> id >> x >> y >> yaw; + if (!(is >> id >> x >> y >> yaw)) { + throw std::runtime_error("parseVertexPose encountered malformed line"); + } return IndexedPose(id, Pose2(x, y, yaw)); } else { return boost::none; @@ -183,7 +187,10 @@ boost::optional parseVertexLandmark(istream &is, if (tag == "VERTEX_XY") { size_t id; double x, y; - is >> id >> x >> y; + if (!(is >> id >> x >> y)) { + throw std::runtime_error( + "parseVertexLandmark encountered malformed line"); + } return IndexedLandmark(id, Point2(x, y)); } else { return boost::none; @@ -287,7 +294,9 @@ boost::optional parseEdge(istream &is, const string &tag) { size_t id1, id2; double x, y, yaw; - is >> id1 >> id2 >> x >> y >> yaw; + if (!(is >> id1 >> id2 >> x >> y >> yaw)) { + throw std::runtime_error("parseEdge encountered malformed line"); + } return IndexedEdge({id1, id2}, Pose2(x, y, yaw)); } else { return boost::none; @@ -295,8 +304,8 @@ boost::optional parseEdge(istream &is, const string &tag) { } /* ************************************************************************* */ -// Measurement parsers are implemented as a functor, as they has several options -// to filter and create the measurement model. +// Measurement parsers are implemented as a functor, as they have several +// options to filter and create the measurement model. template struct ParseMeasurement; /* ************************************************************************* */ @@ -670,7 +679,8 @@ void writeG2o(const NonlinearFactorGraph &graph, const Values &estimate, auto factor = boost::dynamic_pointer_cast>(factor_); if (factor) { SharedNoiseModel model = factor->noiseModel(); - auto gaussianModel = boost::dynamic_pointer_cast(model); + auto gaussianModel = + boost::dynamic_pointer_cast(model); if (!gaussianModel) { model->print("model\n"); throw invalid_argument("writeG2o: invalid noise model!"); diff --git a/gtsam/slam/dataset.h b/gtsam/slam/dataset.h index 7bdf6728d..53abe55ba 100644 --- a/gtsam/slam/dataset.h +++ b/gtsam/slam/dataset.h @@ -77,7 +77,7 @@ enum KernelFunctionType { * Parse variables in a line-based text format (like g2o) into a map. * Instantiated in .cpp Pose2, Point2, Pose3, and Point3. * Note the map keys are integer indices, *not* gtsam::Keys. This is is - * different below where landmarks will use be L(index) symbols. + * different below where landmarks will use L(index) symbols. */ template GTSAM_EXPORT std::map parseVariables(const std::string &filename, @@ -136,7 +136,7 @@ GTSAM_EXPORT boost::optional parseEdge(std::istream& is, /// Return type for load functions, which return a graph and initial values. For /// landmarks, the gtsam::Symbol L(index) is used to insert into the Values. -/// Bearing-range measurements also refer tio landmarks with L(index). +/// Bearing-range measurements also refer to landmarks with L(index). using GraphAndValues = std::pair; diff --git a/timing/timeFrobeniusFactor.cpp b/timing/timeFrobeniusFactor.cpp index c0ac28ed9..924213a33 100644 --- a/timing/timeFrobeniusFactor.cpp +++ b/timing/timeFrobeniusFactor.cpp @@ -68,10 +68,10 @@ int main(int argc, char* argv[]) { auto G = boost::make_shared(SOn::VectorizedGenerators(4)); for (const auto &m : measurements) { const auto &keys = m.keys(); - const Rot3 &Tij = m.measured(); + const Rot3 &Rij = m.measured(); const auto &model = m.noiseModel(); graph.emplace_shared( - keys[0], keys[1], Tij, 4, model, G); + keys[0], keys[1], Rij, 4, model, G); } std::mt19937 rng(42); From 0d5ee3fbc6d6848884aaa83a9339973aca8b8adc Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Sun, 16 Aug 2020 19:50:01 -0400 Subject: [PATCH 080/142] Always build unstable --- .github/workflows/build-linux.yml | 4 ++-- .github/workflows/build-macos.yml | 4 ++-- .github/workflows/build-windows.yml | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build-linux.yml b/.github/workflows/build-linux.yml index 4fac0f7cc..911bec59c 100644 --- a/.github/workflows/build-linux.yml +++ b/.github/workflows/build-linux.yml @@ -4,7 +4,7 @@ on: [push, pull_request] jobs: build: - name: ${{ matrix.name }} ${{ matrix.build_type }} ${{ matrix.build_unstable }} + name: ${{ matrix.name }} ${{ matrix.build_type }} runs-on: ${{ matrix.os }} env: @@ -25,7 +25,7 @@ jobs: ] build_type: [Debug, Release] - build_unstable: [ON, OFF] + build_unstable: [ON] include: - name: ubuntu-18.04-gcc-5 os: ubuntu-18.04 diff --git a/.github/workflows/build-macos.yml b/.github/workflows/build-macos.yml index 1304d863e..55d9071ef 100644 --- a/.github/workflows/build-macos.yml +++ b/.github/workflows/build-macos.yml @@ -4,7 +4,7 @@ on: [pull_request] jobs: build: - name: ${{ matrix.name }} ${{ matrix.build_type }} ${{ matrix.build_unstable }} + name: ${{ matrix.name }} ${{ matrix.build_type }} runs-on: ${{ matrix.os }} env: @@ -22,7 +22,7 @@ jobs: ] build_type: [Debug, Release] - build_unstable: [ON, OFF] + build_unstable: [ON] include: - name: macOS-10.15-xcode-11.3.1 os: macOS-10.15 diff --git a/.github/workflows/build-windows.yml b/.github/workflows/build-windows.yml index f7c5dbcce..b3150a751 100644 --- a/.github/workflows/build-windows.yml +++ b/.github/workflows/build-windows.yml @@ -4,7 +4,7 @@ on: [pull_request] jobs: build: - name: ${{ matrix.name }} ${{ matrix.build_type }} ${{ matrix.build_unstable }} + name: ${{ matrix.name }} ${{ matrix.build_type }} runs-on: ${{ matrix.os }} env: @@ -23,7 +23,7 @@ jobs: ] build_type: [Debug, Release] - build_unstable: [ON, OFF] + build_unstable: [ON] include: - name: windows-2016-cl os: windows-2016 From 84049ebecffcd334a97cedf9479fe8afc2f50f46 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Sun, 16 Aug 2020 20:02:35 -0400 Subject: [PATCH 081/142] Add Python to the name of CI --- .github/workflows/build-python.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-python.yml b/.github/workflows/build-python.yml index 9edd1c24d..0b4a7f12f 100644 --- a/.github/workflows/build-python.yml +++ b/.github/workflows/build-python.yml @@ -4,7 +4,7 @@ on: [pull_request] jobs: build: - name: ${{ matrix.name }} ${{ matrix.build_type }} ${{ matrix.python_version }} + name: ${{ matrix.name }} ${{ matrix.build_type }} Python ${{ matrix.python_version }} runs-on: ${{ matrix.os }} env: From 84afc944582e1ee518908169f815ce05ea4768cc Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Mon, 17 Aug 2020 07:43:10 -0400 Subject: [PATCH 082/142] Feature/shonan averaging (#473) Shonan Rotation Averaging. 199 commit messages below, many are obsolete as design has changed quite a bit over time, especially from the earlier period where I thought we only needed SO(4). * prototyping weighted sampler * Moved WeightedSampler into its own header * Random now uses std header . * Removed boost/random usage from linear and discrete directories * Made into class * Now using new WeightedSampler class * Inlined random direction generation * eradicated last vestiges of boost/random in gtsam_unstable * Added 3D example g2o file * Added Frobenius norm factors * Shonan averaging algorithm, using SOn class * Wrapping Frobenius and Shonan * Fixed issues with << * Use Builder parameters * Refactored Shonan interface * Fixed << issues as well as MATLAB segfault, using eval(), as discussed in issue #451 * ShonanAveragingParameters * New factor FrobeniusWormholeFactorP computes |Rj*P - Ri*P*Rij| * Fixed broken GetDimension for Lie groups with variable dimension. * Removed all but Shonan averaging factor and made everything work with new SOn * Just a single WormholeFactor, wrapped noise model * Use std * comments/todos * added timing script * add script to process ShonanAveraging timing results * Now producing a CSV file * Parse csv file and make combined plot * Fixed range * change p value and set two flags on * input file path, all the csv files proceeses at the same time * add check convergence rate part * csv file have name according to input data name * correct one mistake in initialization * generate the convergence rate for each p value * add yticks for the bar plot * add noises to the measurements * test add noise * Basic structure for checkOptimalityAt * change optimizer method to cholesky * buildQ now working. Tests should be better but visually inspected. * multiple test with cholesky * back * computeLambda now works * make combined plots while make bar plot * Calculate minimum eigenvalue - the very expensive version * Exposed computeMinEigenValue * make plots and bar togenter * method change to jacobi * add time for check optimality, min_eigen_value, sub_bound * updated plot min_eigen value and subounds * Adding Spectra headers * David's min eigenvalue code inserted and made to compile. * Made it work * Made "run" method work. * add rim.g2o name * Fixed bug in shifting eigenvalues * roundSolution which replaces projectFrom * removed extra arguments * Added to wrapper * Add SOn to template lists * roundSolution delete the extra arguement p * only calculate p=5 and change to the correct way computing f_R * Fixed conflict and made Google-style name changes * prototype descent code and unit test for initializeWithDescent * add averaging cost/time part in processing data * initializewithDescent success in test * Formatting and find example rather than hardcode * Removed accidentally checked in cmake files * give value to xi by block * correct gradient descent * correct xi * } * Fix wrapper * Make Hat/Vee have alternating signs * MakeATangentVector helpder function * Fixed cmake files * changed sign * add line search * unit test for line search * test real data with line search * correct comment * Fix boost::uniform_real * add save .dat file * correct test case * add explanation * delete redundant cout * add name to .dat output file * correct checkR * add get poses_ in shonan * add Vector Point type for savig data * Remove cmake file which magically re-appeared?? * Switched to std random library. * Prepare Klaus test * Add klaus3.g2o data. * fix comment * Fix derivatives * Fixed broken GetDimension for Lie groups with variable dimension. * Fix SOn tests to report correct dimension * Added tests for Klaus3 data * Add runWithRandomKlaus test for shonan. * Finish runWithRandomKlaus unittest. * Correct datafile. * Correct the format. * Added measured and keys methods * Shonan works on Klaus data * Create dense versions for wrappers, for testing * Now store D, Q, and L * Remove another cmake file incorrectly checked in. * Found and fixed the bug in ComputeLambda ! * Now using Q in Lambdas calculation, so Lambdas agree with Eriksson18cvpr. * Make FrobeniusFactor not use deprecated methods * FrobeniusWormholeFactor takes Rot3 as argument * Wrapped some more methods. * Wrapped more methods * Allow creating and populating BetweenFactorPose3s in python * New constructors for ShonanAveraging * add function of get measurements number * Remove option not to use noise model * wrap Use nrMeasurements * Made Logmap a bit more tolerant of slightly degenerate rotations (with trace < -1) * Allow for Anchor index * Fix anchor bug * Change outside view to Rot3 rather than SO3 * Add Lift in SOn class * Make comet working * Small fixes * Delete extra function * Add SOn::Lift * Removed hardcoded flag * Moved Frobenius factor to gtsam from unstable * Added new tests and made an old regression pass again * Cleaned up formatting and some comments, added EXPORT directives * Throw exception if wrongly dimensioned values are given * static_cast and other throw * Fixed run-time dimension * Added gauge-constraining factor * LM parameters now passed in, added Gauge fixing * 2D test scaffold * Comments * Pre-allocated generators * Document API * Add optional weight * New prior weeights infrastructure * Made d a template parameter * Recursive Hat and RetractJacobian test * Added Spectra 0.9.0 to 3rdparty * Enabling 2D averaging * Templatized Wormhole factor * ignore xcode folder * Fixed vec and VectorizedGenerators templates for fixed N!=3 or 4 * Simplifying constructors Moved file loading to tests (for now) All unit tests pass for d==3! * Templated some methods internally * Very generic parseToVector * refactored load2d * Very much improved FrobeniusWormholeFactor (Shonan) Jacobians * SO(2) averaging works ! * Templated parse methods * Switched to new Dataset paradigm * Moved Shonan to gtsam * Checked noise model is correctly gotten from file * Fixed covariance bug * Making Shonan wrapper work * Renamed FrobeniusWormholeFactor to ShonanFactor and moved into its own compilation unit in gtsam/sfm * Fixed wrong include * Simplified interface (removed irrelevant random inits) and fixed eigenvector test * Removed stray boost::none * Added citation as suggested by Jose * Made descent test deterministic * Fixed some comments, commented out flaky test Co-authored-by: Jing Wu Co-authored-by: jingwuOUO Co-authored-by: swang Co-authored-by: ss Co-authored-by: Fan Jiang --- .gitignore | 1 + cython/gtsam/tests/test_FrobeniusFactor.py | 4 +- examples/Data/Klaus3.g2o | 12 +- gtsam.h | 131 ++- gtsam/geometry/Rot2.cpp | 7 + gtsam/geometry/Rot2.h | 10 + gtsam/geometry/SOn-inl.h | 19 +- gtsam/geometry/SOn.cpp | 43 +- gtsam/geometry/SOn.h | 22 +- gtsam/geometry/tests/testSOn.cpp | 88 +- gtsam/nonlinear/NonlinearOptimizer.cpp | 2 +- gtsam/sfm/ShonanAveraging.cpp | 841 ++++++++++++++++++ gtsam/sfm/ShonanAveraging.h | 356 ++++++++ gtsam/sfm/ShonanFactor.cpp | 141 +++ gtsam/sfm/ShonanFactor.h | 91 ++ gtsam/sfm/ShonanGaugeFactor.h | 108 +++ gtsam/sfm/tests/testShonanAveraging.cpp | 360 ++++++++ gtsam/sfm/tests/testShonanFactor.cpp | 121 +++ gtsam/sfm/tests/testShonanGaugeFactor.cpp | 106 +++ gtsam/slam/FrobeniusFactor.cpp | 118 +-- gtsam/slam/FrobeniusFactor.h | 74 +- gtsam/slam/KarcherMeanFactor-inl.h | 18 +- gtsam/slam/KarcherMeanFactor.h | 43 +- gtsam/slam/tests/testFrobeniusFactor.cpp | 48 - gtsam_unstable/gtsam_unstable.h | 6 +- .../timing/process_shonan_timing_results.py | 215 +++++ gtsam_unstable/timing/timeShonanAveraging.cpp | 182 ++++ ...obeniusFactor.cpp => timeShonanFactor.cpp} | 10 +- 28 files changed, 2880 insertions(+), 297 deletions(-) create mode 100644 gtsam/sfm/ShonanAveraging.cpp create mode 100644 gtsam/sfm/ShonanAveraging.h create mode 100644 gtsam/sfm/ShonanFactor.cpp create mode 100644 gtsam/sfm/ShonanFactor.h create mode 100644 gtsam/sfm/ShonanGaugeFactor.h create mode 100644 gtsam/sfm/tests/testShonanAveraging.cpp create mode 100644 gtsam/sfm/tests/testShonanFactor.cpp create mode 100644 gtsam/sfm/tests/testShonanGaugeFactor.cpp create mode 100644 gtsam_unstable/timing/process_shonan_timing_results.py create mode 100644 gtsam_unstable/timing/timeShonanAveraging.cpp rename timing/{timeFrobeniusFactor.cpp => timeShonanFactor.cpp} (92%) diff --git a/.gitignore b/.gitignore index 1d89cac25..c2d6ce60f 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,4 @@ cython/gtsam_wrapper.pxd /CMakeSettings.json # for QtCreator: CMakeLists.txt.user* +xcode/ diff --git a/cython/gtsam/tests/test_FrobeniusFactor.py b/cython/gtsam/tests/test_FrobeniusFactor.py index f3f5354bb..e808627f5 100644 --- a/cython/gtsam/tests/test_FrobeniusFactor.py +++ b/cython/gtsam/tests/test_FrobeniusFactor.py @@ -13,7 +13,7 @@ import unittest import numpy as np from gtsam import (Rot3, SO3, SO4, FrobeniusBetweenFactorSO4, FrobeniusFactorSO4, - FrobeniusWormholeFactor, SOn) + ShonanFactor3, SOn) id = SO4() v1 = np.array([0, 0, 0, 0.1, 0, 0]) @@ -43,7 +43,7 @@ class TestFrobeniusFactorSO4(unittest.TestCase): """Test creation of a factor that calculates Shonan error.""" R1 = SO3.Expmap(v1[3:]) R2 = SO3.Expmap(v2[3:]) - factor = FrobeniusWormholeFactor(1, 2, Rot3(R1.between(R2).matrix()), p=4) + factor = ShonanFactor3(1, 2, Rot3(R1.between(R2).matrix()), p=4) I4 = SOn(4) Q1 = I4.retract(v1) Q2 = I4.retract(v2) diff --git a/examples/Data/Klaus3.g2o b/examples/Data/Klaus3.g2o index 4c7233fa7..83a6e6fd2 100644 --- a/examples/Data/Klaus3.g2o +++ b/examples/Data/Klaus3.g2o @@ -1,6 +1,6 @@ -VERTEX_SE3:QUAT 0 -3.865747774038187 0.06639337702667497 -0.16064874691945374 0.024595211709139555 0.49179523413089893 -0.06279232989379242 0.8680954132776109 -VERTEX_SE3:QUAT 1 -3.614793159814815 0.04774490041587656 -0.2837650367985949 0.00991721787943912 0.4854918961891193 -0.042343290945895576 0.8731588132957809 -VERTEX_SE3:QUAT 2 -3.255096913553434 0.013296754286114112 -0.5339792269680574 -0.027851108010665374 0.585478168397957 -0.05088341463532465 0.8086102325762403 -EDGE_SE3:QUAT 0 1 0.2509546142233723 -0.01864847661079841 -0.12311628987914114 -0.022048798853273946 -0.01796327847857683 0.010210006313668573 0.9995433591728293 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 -EDGE_SE3:QUAT 0 2 0.6106508604847534 -0.05309662274056086 -0.3733304800486037 -0.054972994022992064 0.10432547598981769 -0.02221474884651081 0.9927742290779572 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 -EDGE_SE3:QUAT 1 2 0.3596962462613811 -0.03444814612976245 -0.25021419016946256 -0.03174661848656213 0.11646825423134777 -0.02951742735854383 0.9922479626852876 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 +VERTEX_SE3:QUAT 0 -1.6618596980158338 -0.5736497760548741 -3.3319774096611026 -0.02676080288219576 -0.024497002638379624 -0.015064701622500615 0.9992281076190063 +VERTEX_SE3:QUAT 1 -1.431820463019384 -0.549139761976065 -3.160677992237872 -0.049543805396343954 -0.03232420352077356 -0.004386230477751116 0.998239108728862 +VERTEX_SE3:QUAT 2 -1.0394840214436651 -0.5268841046291037 -2.972143862665523 -0.07993768981394891 0.0825062894866454 -0.04088089479075661 0.9925378735259738 +EDGE_SE3:QUAT 0 1 0.23003923499644974 0.02451001407880915 0.17129941742323052 -0.022048798853273946 -0.01796327847857683 0.010210006313668573 0.9995433591728293 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 +EDGE_SE3:QUAT 0 2 0.6223756765721686 0.04676567142577037 0.35983354699557957 -0.054972994022992064 0.10432547598981769 -0.02221474884651081 0.9927742290779572 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 +EDGE_SE3:QUAT 1 2 0.3923364415757189 0.022255657346961222 0.18853412957234905 -0.03174661848656213 0.11646825423134777 -0.02951742735854383 0.9922479626852876 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 diff --git a/gtsam.h b/gtsam.h index 86bfa05b7..c5d25b0e9 100644 --- a/gtsam.h +++ b/gtsam.h @@ -2855,7 +2855,7 @@ virtual class KarcherMeanFactor : gtsam::NonlinearFactor { }; #include -gtsam::noiseModel::Isotropic* ConvertPose3NoiseModel( +gtsam::noiseModel::Isotropic* ConvertNoiseModel( gtsam::noiseModel::Base* model, size_t d); template @@ -2874,12 +2874,14 @@ virtual class FrobeniusBetweenFactor : gtsam::NoiseModelFactor { Vector evaluateError(const T& R1, const T& R2); }; -virtual class FrobeniusWormholeFactor : gtsam::NoiseModelFactor { - FrobeniusWormholeFactor(size_t key1, size_t key2, const gtsam::Rot3& R12, +#include + +virtual class ShonanFactor3 : gtsam::NoiseModelFactor { + ShonanFactor(size_t key1, size_t key2, const gtsam::Rot3 &R12, size_t p); - FrobeniusWormholeFactor(size_t key1, size_t key2, const gtsam::Rot3& R12, - size_t p, gtsam::noiseModel::Base* model); - Vector evaluateError(const gtsam::SOn& Q1, const gtsam::SOn& Q2); + ShonanFactor(size_t key1, size_t key2, const gtsam::Rot3 &R12, + size_t p, gtsam::noiseModel::Base *model); + Vector evaluateError(const gtsam::SOn &Q1, const gtsam::SOn &Q2); }; #include @@ -2895,6 +2897,123 @@ class BinaryMeasurement { typedef gtsam::BinaryMeasurement BinaryMeasurementUnit3; typedef gtsam::BinaryMeasurement BinaryMeasurementRot3; +#include + +// TODO(frank): copy/pasta below until we have integer template paremeters in wrap! + +class ShonanAveragingParameters2 { + ShonanAveragingParameters2(const gtsam::LevenbergMarquardtParams& lm); + ShonanAveragingParameters2(const gtsam::LevenbergMarquardtParams& lm, string method); + gtsam::LevenbergMarquardtParams getLMParams() const; + void setOptimalityThreshold(double value); + double getOptimalityThreshold() const; + void setAnchor(size_t index, const gtsam::Rot2& value); + void setAnchorWeight(double value); + double getAnchorWeight() const; + void setKarcherWeight(double value); + double getKarcherWeight(); + void setGaugesWeight(double value); + double getGaugesWeight(); +}; + +class ShonanAveragingParameters3 { + ShonanAveragingParameters3(const gtsam::LevenbergMarquardtParams& lm); + ShonanAveragingParameters3(const gtsam::LevenbergMarquardtParams& lm, string method); + gtsam::LevenbergMarquardtParams getLMParams() const; + void setOptimalityThreshold(double value); + double getOptimalityThreshold() const; + void setAnchor(size_t index, const gtsam::Rot3& value); + void setAnchorWeight(double value); + double getAnchorWeight() const; + void setKarcherWeight(double value); + double getKarcherWeight(); + void setGaugesWeight(double value); + double getGaugesWeight(); +}; + +class ShonanAveraging2 { + ShonanAveraging2(string g2oFile); + ShonanAveraging2(string g2oFile, + const gtsam::ShonanAveragingParameters2 ¶meters); + + // Query properties + size_t nrUnknowns() const; + size_t nrMeasurements() const; + gtsam::Rot2 measured(size_t i); + gtsam::KeyVector keys(size_t i); + + // Matrix API (advanced use, debugging) + Matrix denseD() const; + Matrix denseQ() const; + Matrix denseL() const; + // Matrix computeLambda_(Matrix S) const; + Matrix computeLambda_(const gtsam::Values& values) const; + Matrix computeA_(const gtsam::Values& values) const; + double computeMinEigenValue(const gtsam::Values& values) const; + gtsam::Values initializeWithDescent(size_t p, const gtsam::Values& values, + const Vector& minEigenVector, double minEigenValue) const; + + // Advanced API + gtsam::NonlinearFactorGraph buildGraphAt(size_t p) const; + double costAt(size_t p, const gtsam::Values& values) const; + pair computeMinEigenVector(const gtsam::Values& values) const; + bool checkOptimality(const gtsam::Values& values) const; + gtsam::LevenbergMarquardtOptimizer* createOptimizerAt(size_t p, const gtsam::Values& initial); + // gtsam::Values tryOptimizingAt(size_t p) const; + gtsam::Values tryOptimizingAt(size_t p, const gtsam::Values& initial) const; + gtsam::Values projectFrom(size_t p, const gtsam::Values& values) const; + gtsam::Values roundSolution(const gtsam::Values& values) const; + + // Basic API + double cost(const gtsam::Values& values) const; + gtsam::Values initializeRandomly() const; + pair run(const gtsam::Values& initial, size_t min_p, size_t max_p) const; +}; + +class ShonanAveraging3 { + ShonanAveraging3(string g2oFile); + ShonanAveraging3(string g2oFile, + const gtsam::ShonanAveragingParameters3 ¶meters); + + // TODO(frank): deprecate once we land pybind wrapper + ShonanAveraging3(const gtsam::BetweenFactorPose3s &factors); + ShonanAveraging3(const gtsam::BetweenFactorPose3s &factors, + const gtsam::ShonanAveragingParameters3 ¶meters); + + // Query properties + size_t nrUnknowns() const; + size_t nrMeasurements() const; + gtsam::Rot3 measured(size_t i); + gtsam::KeyVector keys(size_t i); + + // Matrix API (advanced use, debugging) + Matrix denseD() const; + Matrix denseQ() const; + Matrix denseL() const; + // Matrix computeLambda_(Matrix S) const; + Matrix computeLambda_(const gtsam::Values& values) const; + Matrix computeA_(const gtsam::Values& values) const; + double computeMinEigenValue(const gtsam::Values& values) const; + gtsam::Values initializeWithDescent(size_t p, const gtsam::Values& values, + const Vector& minEigenVector, double minEigenValue) const; + + // Advanced API + gtsam::NonlinearFactorGraph buildGraphAt(size_t p) const; + double costAt(size_t p, const gtsam::Values& values) const; + pair computeMinEigenVector(const gtsam::Values& values) const; + bool checkOptimality(const gtsam::Values& values) const; + gtsam::LevenbergMarquardtOptimizer* createOptimizerAt(size_t p, const gtsam::Values& initial); + // gtsam::Values tryOptimizingAt(size_t p) const; + gtsam::Values tryOptimizingAt(size_t p, const gtsam::Values& initial) const; + gtsam::Values projectFrom(size_t p, const gtsam::Values& values) const; + gtsam::Values roundSolution(const gtsam::Values& values) const; + + // Basic API + double cost(const gtsam::Values& values) const; + gtsam::Values initializeRandomly() const; + pair run(const gtsam::Values& initial, size_t min_p, size_t max_p) const; +}; + //************************************************************************* // Navigation //************************************************************************* diff --git a/gtsam/geometry/Rot2.cpp b/gtsam/geometry/Rot2.cpp index 04ed16774..7502c4ccf 100644 --- a/gtsam/geometry/Rot2.cpp +++ b/gtsam/geometry/Rot2.cpp @@ -39,6 +39,13 @@ Rot2 Rot2::atan2(double y, double x) { return R.normalize(); } +/* ************************************************************************* */ +Rot2 Rot2::Random(std::mt19937& rng) { + uniform_real_distribution randomAngle(-M_PI, M_PI); + double angle = randomAngle(rng); + return fromAngle(angle); +} + /* ************************************************************************* */ void Rot2::print(const string& s) const { cout << s << ": " << theta() << endl; diff --git a/gtsam/geometry/Rot2.h b/gtsam/geometry/Rot2.h index f46f12540..8a361f558 100644 --- a/gtsam/geometry/Rot2.h +++ b/gtsam/geometry/Rot2.h @@ -22,6 +22,8 @@ #include #include +#include + namespace gtsam { /** @@ -79,6 +81,14 @@ namespace gtsam { /** Named constructor that behaves as atan2, i.e., y,x order (!) and normalizes */ static Rot2 atan2(double y, double x); + /** + * Random, generates random angle \in [-p,pi] + * Example: + * std::mt19937 engine(42); + * Unit3 unit = Unit3::Random(engine); + */ + static Rot2 Random(std::mt19937 & rng); + /// @} /// @name Testable /// @{ diff --git a/gtsam/geometry/SOn-inl.h b/gtsam/geometry/SOn-inl.h index 9edce8336..6180f4cc7 100644 --- a/gtsam/geometry/SOn-inl.h +++ b/gtsam/geometry/SOn-inl.h @@ -60,8 +60,9 @@ typename SO::TangentVector SO::ChartAtOrigin::Local(const SO& R, template typename SO::MatrixDD SO::AdjointMap() const { + if (N==2) return I_1x1; // SO(2) case throw std::runtime_error( - "SO::AdjointMap only implemented for SO3 and SO4."); + "SO::AdjointMap only implemented for SO2, SO3 and SO4."); } template @@ -84,26 +85,22 @@ typename SO::MatrixDD SO::LogmapDerivative(const TangentVector& omega) { throw std::runtime_error("O::LogmapDerivative only implemented for SO3."); } +// Default fixed size version (but specialized elsewehere for N=2,3,4) template typename SO::VectorN2 SO::vec( OptionalJacobian H) const { - const size_t n = rows(); - const size_t n2 = n * n; - // Vectorize - VectorN2 X(n2); - X << Eigen::Map(matrix_.data(), n2, 1); + VectorN2 X = Eigen::Map(matrix_.data()); // If requested, calculate H as (I \oplus Q) * P, // where Q is the N*N rotation matrix, and P is calculated below. if (H) { // Calculate P matrix of vectorized generators // TODO(duy): Should we refactor this as the jacobian of Hat? - Matrix P = VectorizedGenerators(n); - const size_t d = dim(); - H->resize(n2, d); - for (size_t i = 0; i < n; i++) { - H->block(i * n, 0, n, d) = matrix_ * P.block(i * n, 0, n, d); + Matrix P = SO::VectorizedGenerators(); + for (size_t i = 0; i < N; i++) { + H->block(i * N, 0, N, dimension) = + matrix_ * P.block(i * N, 0, N, dimension); } } return X; diff --git a/gtsam/geometry/SOn.cpp b/gtsam/geometry/SOn.cpp index 37b6c1784..c6cff4214 100644 --- a/gtsam/geometry/SOn.cpp +++ b/gtsam/geometry/SOn.cpp @@ -22,21 +22,18 @@ namespace gtsam { template <> -GTSAM_EXPORT -Matrix SOn::Hat(const Vector& xi) { +GTSAM_EXPORT void SOn::Hat(const Vector &xi, Eigen::Ref X) { size_t n = AmbientDim(xi.size()); - if (n < 2) throw std::invalid_argument("SO::Hat: n<2 not supported"); - - Matrix X(n, n); // allocate space for n*n skew-symmetric matrix - X.setZero(); - if (n == 2) { + if (n < 2) + throw std::invalid_argument("SO::Hat: n<2 not supported"); + else if (n == 2) { // Handle SO(2) case as recursion bottom assert(xi.size() == 1); X << 0, -xi(0), xi(0), 0; } else { // Recursively call SO(n-1) call for top-left block const size_t dmin = (n - 1) * (n - 2) / 2; - X.topLeftCorner(n - 1, n - 1) = Hat(xi.tail(dmin)); + Hat(xi.tail(dmin), X.topLeftCorner(n - 1, n - 1)); // determine sign of last element (signs alternate) double sign = pow(-1.0, xi.size()); @@ -47,7 +44,14 @@ Matrix SOn::Hat(const Vector& xi) { X(j, n - 1) = -X(n - 1, j); sign = -sign; } + X(n - 1, n - 1) = 0; // bottom-right } +} + +template <> GTSAM_EXPORT Matrix SOn::Hat(const Vector &xi) { + size_t n = AmbientDim(xi.size()); + Matrix X(n, n); // allocate space for n*n skew-symmetric matrix + SOn::Hat(xi, X); return X; } @@ -99,4 +103,27 @@ SOn LieGroup::between(const SOn& g, DynamicJacobian H1, return result; } +// Dynamic version of vec +template <> typename SOn::VectorN2 SOn::vec(DynamicJacobian H) const { + const size_t n = rows(), n2 = n * n; + + // Vectorize + VectorN2 X(n2); + X << Eigen::Map(matrix_.data(), n2, 1); + + // If requested, calculate H as (I \oplus Q) * P, + // where Q is the N*N rotation matrix, and P is calculated below. + if (H) { + // Calculate P matrix of vectorized generators + // TODO(duy): Should we refactor this as the jacobian of Hat? + Matrix P = SOn::VectorizedGenerators(n); + const size_t d = dim(); + H->resize(n2, d); + for (size_t i = 0; i < n; i++) { + H->block(i * n, 0, n, d) = matrix_ * P.block(i * n, 0, n, d); + } + } + return X; +} + } // namespace gtsam diff --git a/gtsam/geometry/SOn.h b/gtsam/geometry/SOn.h index 004569416..86b6019e1 100644 --- a/gtsam/geometry/SOn.h +++ b/gtsam/geometry/SOn.h @@ -98,8 +98,8 @@ class SO : public LieGroup, internal::DimensionSO(N)> { template > static SO Lift(size_t n, const Eigen::MatrixBase &R) { Matrix Q = Matrix::Identity(n, n); - size_t p = R.rows(); - assert(p < n && R.cols() == p); + const int p = R.rows(); + assert(p >= 0 && p <= static_cast(n) && R.cols() == p); Q.topLeftCorner(p, p) = R; return SO(Q); } @@ -208,7 +208,7 @@ class SO : public LieGroup, internal::DimensionSO(N)> { // Calculate run-time dimensionality of manifold. // Available as dimension or Dim() for fixed N. - size_t dim() const { return Dimension(matrix_.rows()); } + size_t dim() const { return Dimension(static_cast(matrix_.rows())); } /** * Hat operator creates Lie algebra element corresponding to d-vector, where d @@ -227,9 +227,10 @@ class SO : public LieGroup, internal::DimensionSO(N)> { */ static MatrixNN Hat(const TangentVector& xi); - /** - * Inverse of Hat. See note about xi element order in Hat. - */ + /// In-place version of Hat (see details there), implements recursion. + static void Hat(const Vector &xi, Eigen::Ref X); + + /// Inverse of Hat. See note about xi element order in Hat. static TangentVector Vee(const MatrixNN& X); // Chart at origin @@ -295,10 +296,10 @@ class SO : public LieGroup, internal::DimensionSO(N)> { template > static Matrix VectorizedGenerators() { constexpr size_t N2 = static_cast(N * N); - Matrix G(N2, dimension); + Eigen::Matrix G; for (size_t j = 0; j < dimension; j++) { const auto X = Hat(Vector::Unit(dimension, j)); - G.col(j) = Eigen::Map(X.data(), N2, 1); + G.col(j) = Eigen::Map(X.data()); } return G; } @@ -362,6 +363,11 @@ template <> SOn LieGroup::between(const SOn& g, DynamicJacobian H1, DynamicJacobian H2) const; +/* + * Specialize dynamic vec. + */ +template <> typename SOn::VectorN2 SOn::vec(DynamicJacobian H) const; + /** Serialization function */ template void serialize( diff --git a/gtsam/geometry/tests/testSOn.cpp b/gtsam/geometry/tests/testSOn.cpp index 1cf8caed2..4d0ed98b3 100644 --- a/gtsam/geometry/tests/testSOn.cpp +++ b/gtsam/geometry/tests/testSOn.cpp @@ -39,8 +39,8 @@ using namespace std; using namespace gtsam; //****************************************************************************** -// Test dhynamic with n=0 -TEST(SOn, SO0) { +// Test dynamic with n=0 +TEST(SOn, SOn0) { const auto R = SOn(0); EXPECT_LONGS_EQUAL(0, R.rows()); EXPECT_LONGS_EQUAL(Eigen::Dynamic, SOn::dimension); @@ -50,7 +50,8 @@ TEST(SOn, SO0) { } //****************************************************************************** -TEST(SOn, SO5) { +// Test dynamic with n=5 +TEST(SOn, SOn5) { const auto R = SOn(5); EXPECT_LONGS_EQUAL(5, R.rows()); EXPECT_LONGS_EQUAL(Eigen::Dynamic, SOn::dimension); @@ -59,6 +60,28 @@ TEST(SOn, SO5) { EXPECT_LONGS_EQUAL(10, traits::GetDimension(R)); } +//****************************************************************************** +// Test fixed with n=2 +TEST(SOn, SO0) { + const auto R = SO<2>(); + EXPECT_LONGS_EQUAL(2, R.rows()); + EXPECT_LONGS_EQUAL(1, SO<2>::dimension); + EXPECT_LONGS_EQUAL(1, SO<2>::Dim()); + EXPECT_LONGS_EQUAL(1, R.dim()); + EXPECT_LONGS_EQUAL(1, traits>::GetDimension(R)); +} + +//****************************************************************************** +// Test fixed with n=5 +TEST(SOn, SO5) { + const auto R = SO<5>(); + EXPECT_LONGS_EQUAL(5, R.rows()); + EXPECT_LONGS_EQUAL(10, SO<5>::dimension); + EXPECT_LONGS_EQUAL(10, SO<5>::Dim()); + EXPECT_LONGS_EQUAL(10, R.dim()); + EXPECT_LONGS_EQUAL(10, traits>::GetDimension(R)); +} + //****************************************************************************** TEST(SOn, Concept) { BOOST_CONCEPT_ASSERT((IsGroup)); @@ -105,29 +128,29 @@ TEST(SOn, HatVee) { EXPECT(assert_equal((Vector)v.head<1>(), SOn::Vee(actual2))); Matrix expected3(3, 3); - expected3 << 0, -3, 2, // - 3, 0, -1, // - -2, 1, 0; + expected3 << 0, -3, 2, // + 3, 0, -1, // + -2, 1, 0; const auto actual3 = SOn::Hat(v.head<3>()); EXPECT(assert_equal(expected3, actual3)); EXPECT(assert_equal(skewSymmetric(1, 2, 3), actual3)); EXPECT(assert_equal((Vector)v.head<3>(), SOn::Vee(actual3))); Matrix expected4(4, 4); - expected4 << 0, -6, 5, 3, // - 6, 0, -4, -2, // - -5, 4, 0, 1, // - -3, 2, -1, 0; + expected4 << 0, -6, 5, 3, // + 6, 0, -4, -2, // + -5, 4, 0, 1, // + -3, 2, -1, 0; const auto actual4 = SOn::Hat(v.head<6>()); EXPECT(assert_equal(expected4, actual4)); EXPECT(assert_equal((Vector)v.head<6>(), SOn::Vee(actual4))); Matrix expected5(5, 5); - expected5 << 0,-10, 9, 7, -4, // - 10, 0, -8, -6, 3, // - -9, 8, 0, 5, -2, // - -7, 6, -5, 0, 1, // - 4, -3, 2, -1, 0; + expected5 << 0, -10, 9, 7, -4, // + 10, 0, -8, -6, 3, // + -9, 8, 0, 5, -2, // + -7, 6, -5, 0, 1, // + 4, -3, 2, -1, 0; const auto actual5 = SOn::Hat(v); EXPECT(assert_equal(expected5, actual5)); EXPECT(assert_equal((Vector)v, SOn::Vee(actual5))); @@ -159,6 +182,22 @@ TEST(SOn, RetractLocal) { CHECK(assert_equal(v1, SOn::ChartAtOrigin::Local(Q1), 1e-7)); } +//****************************************************************************** + +Matrix RetractJacobian(size_t n) { return SOn::VectorizedGenerators(n); } + +/// Test Jacobian of Retract at origin +TEST(SOn, RetractJacobian) { + Matrix actualH = RetractJacobian(3); + boost::function h = [](const Vector &v) { + return SOn::ChartAtOrigin::Retract(v).matrix(); + }; + Vector3 v; + v.setZero(); + const Matrix expectedH = numericalDerivative11(h, v, 1e-5); + CHECK(assert_equal(expectedH, actualH)); +} + //****************************************************************************** TEST(SOn, vec) { Vector10 v; @@ -166,11 +205,28 @@ TEST(SOn, vec) { SOn Q = SOn::ChartAtOrigin::Retract(v); Matrix actualH; const Vector actual = Q.vec(actualH); - boost::function h = [](const SOn& Q) { return Q.vec(); }; + boost::function h = [](const SOn &Q) { return Q.vec(); }; const Matrix H = numericalDerivative11(h, Q, 1e-5); CHECK(assert_equal(H, actualH)); } +//****************************************************************************** +TEST(SOn, VectorizedGenerators) { + // Default fixed + auto actual2 = SO<2>::VectorizedGenerators(); + CHECK(actual2.rows()==4 && actual2.cols()==1) + + // Specialized + auto actual3 = SO<3>::VectorizedGenerators(); + CHECK(actual3.rows()==9 && actual3.cols()==3) + auto actual4 = SO<4>::VectorizedGenerators(); + CHECK(actual4.rows()==16 && actual4.cols()==6) + + // Dynamic + auto actual5 = SOn::VectorizedGenerators(5); + CHECK(actual5.rows()==25 && actual5.cols()==10) +} + //****************************************************************************** int main() { TestResult tr; diff --git a/gtsam/nonlinear/NonlinearOptimizer.cpp b/gtsam/nonlinear/NonlinearOptimizer.cpp index 328a3facf..9a9c487b6 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearOptimizer.cpp @@ -90,7 +90,7 @@ void NonlinearOptimizer::defaultOptimize() { // Iterative loop do { // Do next iteration - currentError = error(); + currentError = error(); // TODO(frank): don't do this twice at first !? Computed above! iterate(); tictoc_finishedIteration(); diff --git a/gtsam/sfm/ShonanAveraging.cpp b/gtsam/sfm/ShonanAveraging.cpp new file mode 100644 index 000000000..d4f189b0b --- /dev/null +++ b/gtsam/sfm/ShonanAveraging.cpp @@ -0,0 +1,841 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file ShonanAveraging.cpp + * @date March 2019 - August 2020 + * @author Frank Dellaert, David Rosen, and Jing Wu + * @brief Shonan Averaging algorithm + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace gtsam { + +// In Wrappers we have no access to this so have a default ready +static std::mt19937 kRandomNumberGenerator(42); + +using Sparse = Eigen::SparseMatrix; + +/* ************************************************************************* */ +template +ShonanAveragingParameters::ShonanAveragingParameters( + const LevenbergMarquardtParams &_lm, const std::string &method, + double optimalityThreshold, double alpha, double beta, double gamma) + : lm(_lm), optimalityThreshold(optimalityThreshold), alpha(alpha), + beta(beta), gamma(gamma) { + // By default, we will do conjugate gradient + lm.linearSolverType = LevenbergMarquardtParams::Iterative; + + // Create subgraph builder parameters + SubgraphBuilderParameters builderParameters; + builderParameters.skeletonType = SubgraphBuilderParameters::KRUSKAL; + builderParameters.skeletonWeight = SubgraphBuilderParameters::EQUAL; + builderParameters.augmentationWeight = SubgraphBuilderParameters::SKELETON; + builderParameters.augmentationFactor = 0.0; + + auto pcg = boost::make_shared(); + + // Choose optimization method + if (method == "SUBGRAPH") { + lm.iterativeParams = + boost::make_shared(builderParameters); + } else if (method == "SGPC") { + pcg->preconditioner_ = + boost::make_shared(builderParameters); + lm.iterativeParams = pcg; + } else if (method == "JACOBI") { + pcg->preconditioner_ = + boost::make_shared(); + lm.iterativeParams = pcg; + } else if (method == "QR") { + lm.setLinearSolverType("MULTIFRONTAL_QR"); + } else if (method == "CHOLESKY") { + lm.setLinearSolverType("MULTIFRONTAL_CHOLESKY"); + } else { + throw std::invalid_argument("ShonanAveragingParameters: unknown method"); + } +} + +/* ************************************************************************* */ +// Explicit instantiation for d=2 and d=3 +template struct ShonanAveragingParameters<2>; +template struct ShonanAveragingParameters<3>; + +/* ************************************************************************* */ +// Calculate number of unknown rotations referenced by measurements +template +static size_t +NrUnknowns(const typename ShonanAveraging::Measurements &measurements) { + std::set keys; + for (const auto &measurement : measurements) { + keys.insert(measurement.key1()); + keys.insert(measurement.key2()); + } + return keys.size(); +} + +/* ************************************************************************* */ +template +ShonanAveraging::ShonanAveraging(const Measurements &measurements, + const Parameters ¶meters) + : parameters_(parameters), measurements_(measurements), + nrUnknowns_(NrUnknowns(measurements)) { + for (const auto &measurement : measurements_) { + const auto &model = measurement.noiseModel(); + if (model && model->dim() != SO::dimension) { + measurement.print("Factor with incorrect noise model:\n"); + throw std::invalid_argument("ShonanAveraging: measurements passed to " + "constructor have incorrect dimension."); + } + } + Q_ = buildQ(); + D_ = buildD(); + L_ = D_ - Q_; +} + +/* ************************************************************************* */ +template +NonlinearFactorGraph ShonanAveraging::buildGraphAt(size_t p) const { + NonlinearFactorGraph graph; + auto G = boost::make_shared(SO<-1>::VectorizedGenerators(p)); + for (const auto &measurement : measurements_) { + const auto &keys = measurement.keys(); + const auto &Rij = measurement.measured(); + const auto &model = measurement.noiseModel(); + graph.emplace_shared>(keys[0], keys[1], Rij, p, model, G); + } + + // Possibly add Karcher prior + if (parameters_.beta > 0) { + const size_t dim = SOn::Dimension(p); + graph.emplace_shared>(graph.keys(), dim); + } + + // Possibly add gauge factors - they are probably useless as gradient is zero + if (parameters_.gamma > 0 && p > d + 1) { + for (auto key : graph.keys()) + graph.emplace_shared(key, p, d, parameters_.gamma); + } + + return graph; +} + +/* ************************************************************************* */ +template +double ShonanAveraging::costAt(size_t p, const Values &values) const { + const NonlinearFactorGraph graph = buildGraphAt(p); + return graph.error(values); +} + +/* ************************************************************************* */ +template +boost::shared_ptr +ShonanAveraging::createOptimizerAt(size_t p, const Values &initial) const { + // Build graph + NonlinearFactorGraph graph = buildGraphAt(p); + + // Anchor prior is added here as depends on initial value (and cost is zero) + if (parameters_.alpha > 0) { + size_t i; + Rot value; + const size_t dim = SOn::Dimension(p); + std::tie(i, value) = parameters_.anchor; + auto model = noiseModel::Isotropic::Precision(dim, parameters_.alpha); + graph.emplace_shared>(i, SOn::Lift(p, value.matrix()), + model); + } + + // Optimize + return boost::make_shared(graph, initial, + parameters_.lm); +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::tryOptimizingAt(size_t p, + const Values &initial) const { + auto lm = createOptimizerAt(p, initial); + return lm->optimize(); +} + +/* ************************************************************************* */ +// Project to pxdN Stiefel manifold +template +Matrix ShonanAveraging::StiefelElementMatrix(const Values &values) { + const size_t N = values.size(); + const size_t p = values.at(0).rows(); + Matrix S(p, N * d); + for (const auto it : values.filter()) { + S.middleCols(it.key * d) = + it.value.matrix().leftCols(); // project Qj to Stiefel + } + return S; +} + +/* ************************************************************************* */ +template <> +Values ShonanAveraging<2>::projectFrom(size_t p, const Values &values) const { + Values result; + for (const auto it : values.filter()) { + assert(it.value.rows() == p); + const auto &M = it.value.matrix(); + const Rot2 R = Rot2::atan2(M(1, 0), M(0, 0)); + result.insert(it.key, R); + } + return result; +} + +template <> +Values ShonanAveraging<3>::projectFrom(size_t p, const Values &values) const { + Values result; + for (const auto it : values.filter()) { + assert(it.value.rows() == p); + const auto &M = it.value.matrix(); + const Rot3 R = Rot3::ClosestTo(M.topLeftCorner<3, 3>()); + result.insert(it.key, R); + } + return result; +} + +/* ************************************************************************* */ +template static Matrix RoundSolutionS(const Matrix &S) { + const size_t N = S.cols() / d; + // First, compute a thin SVD of S + Eigen::JacobiSVD svd(S, Eigen::ComputeThinV); + const Vector sigmas = svd.singularValues(); + + // Construct a diagonal matrix comprised of the first d singular values + using DiagonalMatrix = Eigen::DiagonalMatrix; + DiagonalMatrix Sigma_d; + Sigma_d.diagonal() = sigmas.head(); + + // Now, construct a rank-d truncated singular value decomposition for S + Matrix R = Sigma_d * svd.matrixV().leftCols().transpose(); + + // Count the number of blocks whose determinants have positive sign + size_t numPositiveBlocks = 0; + for (size_t i = 0; i < N; ++i) { + // Compute the determinant of the ith dxd block of R + double determinant = R.middleCols(d * i).determinant(); + if (determinant > 0) + ++numPositiveBlocks; + } + + if (numPositiveBlocks < N / 2) { + // Less than half of the total number of blocks have the correct sign. + // To reverse their orientations, multiply with a reflection matrix. + DiagonalMatrix reflector; + reflector.setIdentity(); + reflector.diagonal()(d - 1) = -1; + R = reflector * R; + } + + return R; +} + +/* ************************************************************************* */ +template <> Values ShonanAveraging<2>::roundSolutionS(const Matrix &S) const { + // Round to a 2*2N matrix + Matrix R = RoundSolutionS<2>(S); + + // Finally, project each dxd rotation block to SO(2) + Values values; + for (size_t j = 0; j < nrUnknowns(); ++j) { + const Rot2 Ri = Rot2::atan2(R(1, 2 * j), R(0, 2 * j)); + values.insert(j, Ri); + } + return values; +} + +template <> Values ShonanAveraging<3>::roundSolutionS(const Matrix &S) const { + // Round to a 3*3N matrix + Matrix R = RoundSolutionS<3>(S); + + // Finally, project each dxd rotation block to SO(3) + Values values; + for (size_t j = 0; j < nrUnknowns(); ++j) { + const Rot3 Ri = Rot3::ClosestTo(R.middleCols<3>(3 * j)); + values.insert(j, Ri); + } + return values; +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::roundSolution(const Values &values) const { + // Project to pxdN Stiefel manifold... + Matrix S = StiefelElementMatrix(values); + // ...and call version above. + return roundSolutionS(S); +} + +/* ************************************************************************* */ +template +double ShonanAveraging::cost(const Values &values) const { + NonlinearFactorGraph graph; + for (const auto &measurement : measurements_) { + const auto &keys = measurement.keys(); + const auto &Rij = measurement.measured(); + const auto &model = measurement.noiseModel(); + graph.emplace_shared>>( + keys[0], keys[1], SO(Rij.matrix()), model); + } + // Finally, project each dxd rotation block to SO(d) + Values result; + for (const auto it : values.filter()) { + result.insert(it.key, SO(it.value.matrix())); + } + return graph.error(result); +} + +/* ************************************************************************* */ +// Get kappa from noise model +template +static double Kappa(const BinaryMeasurement &measurement) { + const auto &isotropic = boost::dynamic_pointer_cast( + measurement.noiseModel()); + if (!isotropic) { + throw std::invalid_argument( + "Shonan averaging noise models must be isotropic."); + } + const double sigma = isotropic->sigma(); + return 1.0 / (sigma * sigma); +} + +/* ************************************************************************* */ +template Sparse ShonanAveraging::buildD() const { + // Each measurement contributes 2*d elements along the diagonal of the + // degree matrix. + static constexpr size_t stride = 2 * d; + + // Reserve space for triplets + std::vector> triplets; + triplets.reserve(stride * measurements_.size()); + + for (const auto &measurement : measurements_) { + // Get pose keys + const auto &keys = measurement.keys(); + + // Get kappa from noise model + double kappa = Kappa(measurement); + + const size_t di = d * keys[0], dj = d * keys[1]; + for (size_t k = 0; k < d; k++) { + // Elements of ith block-diagonal + triplets.emplace_back(di + k, di + k, kappa); + // Elements of jth block-diagonal + triplets.emplace_back(dj + k, dj + k, kappa); + } + } + + // Construct and return a sparse matrix from these triplets + const size_t dN = d * nrUnknowns(); + Sparse D(dN, dN); + D.setFromTriplets(triplets.begin(), triplets.end()); + + return D; +} + +/* ************************************************************************* */ +template Sparse ShonanAveraging::buildQ() const { + // Each measurement contributes 2*d^2 elements on a pair of symmetric + // off-diagonal blocks + static constexpr size_t stride = 2 * d * d; + + // Reserve space for triplets + std::vector> triplets; + triplets.reserve(stride * measurements_.size()); + + for (const auto &measurement : measurements_) { + // Get pose keys + const auto &keys = measurement.keys(); + + // Extract rotation measurement + const auto Rij = measurement.measured().matrix(); + + // Get kappa from noise model + double kappa = Kappa(measurement); + + const size_t di = d * keys[0], dj = d * keys[1]; + for (size_t r = 0; r < d; r++) { + for (size_t c = 0; c < d; c++) { + // Elements of ij block + triplets.emplace_back(di + r, dj + c, kappa * Rij(r, c)); + // Elements of ji block + triplets.emplace_back(dj + r, di + c, kappa * Rij(c, r)); + } + } + } + + // Construct and return a sparse matrix from these triplets + const size_t dN = d * nrUnknowns(); + Sparse Q(dN, dN); + Q.setFromTriplets(triplets.begin(), triplets.end()); + + return Q; +} + +/* ************************************************************************* */ +template +Sparse ShonanAveraging::computeLambda(const Matrix &S) const { + // Each pose contributes 2*d elements along the diagonal of Lambda + static constexpr size_t stride = d * d; + + // Reserve space for triplets + const size_t N = nrUnknowns(); + std::vector> triplets; + triplets.reserve(stride * N); + + // Do sparse-dense multiply to get Q*S' + auto QSt = Q_ * S.transpose(); + + for (size_t j = 0; j < N; j++) { + // Compute B, the building block for the j^th diagonal block of Lambda + const size_t dj = d * j; + Matrix B = QSt.middleRows(dj, d) * S.middleCols(dj); + + // Elements of jth block-diagonal + for (size_t r = 0; r < d; r++) + for (size_t c = 0; c < d; c++) + triplets.emplace_back(dj + r, dj + c, 0.5 * (B(r, c) + B(c, r))); + } + + // Construct and return a sparse matrix from these triplets + Sparse Lambda(d * N, d * N); + Lambda.setFromTriplets(triplets.begin(), triplets.end()); + return Lambda; +} + +/* ************************************************************************* */ +template +Sparse ShonanAveraging::computeLambda(const Values &values) const { + // Project to pxdN Stiefel manifold... + Matrix S = StiefelElementMatrix(values); + // ...and call version above. + return computeLambda(S); +} + +/* ************************************************************************* */ +template +Sparse ShonanAveraging::computeA(const Values &values) const { + assert(values.size() == nrUnknowns()); + const Matrix S = StiefelElementMatrix(values); + auto Lambda = computeLambda(S); + return Lambda - Q_; +} + +/* ************************************************************************* */ +/// MINIMUM EIGENVALUE COMPUTATIONS + +/** This is a lightweight struct used in conjunction with Spectra to compute + * the minimum eigenvalue and eigenvector of a sparse matrix A; it has a single + * nontrivial function, perform_op(x,y), that computes and returns the product + * y = (A + sigma*I) x */ +struct MatrixProdFunctor { + // Const reference to an externally-held matrix whose minimum-eigenvalue we + // want to compute + const Sparse &A_; + + // Spectral shift + double sigma_; + + // Constructor + explicit MatrixProdFunctor(const Sparse &A, double sigma = 0) + : A_(A), sigma_(sigma) {} + + int rows() const { return A_.rows(); } + int cols() const { return A_.cols(); } + + // Matrix-vector multiplication operation + void perform_op(const double *x, double *y) const { + // Wrap the raw arrays as Eigen Vector types + Eigen::Map X(x, rows()); + Eigen::Map Y(y, rows()); + + // Do the multiplication using wrapped Eigen vectors + Y = A_ * X + sigma_ * X; + } +}; + +/// Function to compute the minimum eigenvalue of A using Lanczos in Spectra. +/// This does 2 things: +/// +/// (1) Quick (coarse) eigenvalue computation to estimate the largest-magnitude +/// eigenvalue (2) A second eigenvalue computation applied to A-sigma*I, where +/// sigma is chosen to make the minimum eigenvalue of A the extremal eigenvalue +/// of A-sigma*I +/// +/// Upon completion, this returns a boolean value indicating whether the minimum +/// eigenvalue was computed to the required precision -- if so, its sets the +/// values of minEigenValue and minEigenVector appropriately + +/// Note that in the following function signature, S is supposed to be the +/// block-row-matrix that is a critical point for the optimization algorithm; +/// either S (Stiefel manifold) or R (block rotations). We use this to +/// construct a starting vector v for the Lanczos process that will be close to +/// the minimum eigenvector we're looking for whenever the relaxation is exact +/// -- this is a key feature that helps to make this method fast. Note that +/// instead of passing in all of S, it would be enough to pass in one of S's +/// *rows*, if that's more convenient. + +// For the defaults, David Rosen says: +// - maxIterations refers to the max number of Lanczos iterations to run; +// ~1000 should be sufficiently large +// - We've been using 10^-4 for the nonnegativity tolerance +// - for numLanczosVectors, 20 is a good default value + +static bool +SparseMinimumEigenValue(const Sparse &A, const Matrix &S, double *minEigenValue, + Vector *minEigenVector = 0, size_t *numIterations = 0, + size_t maxIterations = 1000, + double minEigenvalueNonnegativityTolerance = 10e-4, + Eigen::Index numLanczosVectors = 20) { + // a. Estimate the largest-magnitude eigenvalue of this matrix using Lanczos + MatrixProdFunctor lmOperator(A); + Spectra::SymEigsSolver + lmEigenValueSolver(&lmOperator, 1, std::min(numLanczosVectors, A.rows())); + lmEigenValueSolver.init(); + + const int lmConverged = lmEigenValueSolver.compute( + maxIterations, 1e-4, Spectra::SELECT_EIGENVALUE::LARGEST_MAGN); + + // Check convergence and bail out if necessary + if (lmConverged != 1) + return false; + + const double lmEigenValue = lmEigenValueSolver.eigenvalues()(0); + + if (lmEigenValue < 0) { + // The largest-magnitude eigenvalue is negative, and therefore also the + // minimum eigenvalue, so just return this solution + *minEigenValue = lmEigenValue; + if (minEigenVector) { + *minEigenVector = lmEigenValueSolver.eigenvectors(1).col(0); + minEigenVector->normalize(); // Ensure that this is a unit vector + } + return true; + } + + // The largest-magnitude eigenvalue is positive, and is therefore the + // maximum eigenvalue. Therefore, after shifting the spectrum of A + // by -2*lmEigenValue (by forming A - 2*lambda_max*I), the shifted + // spectrum will lie in the interval [minEigenValue(A) - 2* lambda_max(A), + // -lambda_max*A]; in particular, the largest-magnitude eigenvalue of + // A - 2*lambda_max*I is minEigenValue - 2*lambda_max, with corresponding + // eigenvector v_min + + MatrixProdFunctor minShiftedOperator(A, -2 * lmEigenValue); + + Spectra::SymEigsSolver + minEigenValueSolver(&minShiftedOperator, 1, + std::min(numLanczosVectors, A.rows())); + + // If S is a critical point of F, then S^T is also in the null space of S - + // Lambda(S) (cf. Lemma 6 of the tech report), and therefore its rows are + // eigenvectors corresponding to the eigenvalue 0. In the case that the + // relaxation is exact, this is the *minimum* eigenvalue, and therefore the + // rows of S are exactly the eigenvectors that we're looking for. On the + // other hand, if the relaxation is *not* exact, then S - Lambda(S) has at + // least one strictly negative eigenvalue, and the rows of S are *unstable + // fixed points* for the Lanczos iterations. Thus, we will take a slightly + // "fuzzed" version of the first row of S as an initialization for the + // Lanczos iterations; this allows for rapid convergence in the case that + // the relaxation is exact (since are starting close to a solution), while + // simultaneously allowing the iterations to escape from this fixed point in + // the case that the relaxation is not exact. + Vector v0 = S.row(0).transpose(); + Vector perturbation(v0.size()); + perturbation.setRandom(); + perturbation.normalize(); + Vector xinit = v0 + (.03 * v0.norm()) * perturbation; // Perturb v0 by ~3% + + // Use this to initialize the eigensolver + minEigenValueSolver.init(xinit.data()); + + // Now determine the relative precision required in the Lanczos method in + // order to be able to estimate the smallest eigenvalue within an *absolute* + // tolerance of 'minEigenvalueNonnegativityTolerance' + const int minConverged = minEigenValueSolver.compute( + maxIterations, minEigenvalueNonnegativityTolerance / lmEigenValue, + Spectra::SELECT_EIGENVALUE::LARGEST_MAGN); + + if (minConverged != 1) + return false; + + *minEigenValue = minEigenValueSolver.eigenvalues()(0) + 2 * lmEigenValue; + if (minEigenVector) { + *minEigenVector = minEigenValueSolver.eigenvectors(1).col(0); + minEigenVector->normalize(); // Ensure that this is a unit vector + } + if (numIterations) + *numIterations = minEigenValueSolver.num_iterations(); + return true; +} + +/* ************************************************************************* */ +template Sparse ShonanAveraging::computeA(const Matrix &S) const { + auto Lambda = computeLambda(S); + return Lambda - Q_; +} + +/* ************************************************************************* */ +template +double ShonanAveraging::computeMinEigenValue(const Values &values, + Vector *minEigenVector) const { + assert(values.size() == nrUnknowns()); + const Matrix S = StiefelElementMatrix(values); + auto A = computeA(S); + + double minEigenValue; + bool success = SparseMinimumEigenValue(A, S, &minEigenValue, minEigenVector); + if (!success) { + throw std::runtime_error( + "SparseMinimumEigenValue failed to compute minimum eigenvalue."); + } + return minEigenValue; +} + +/* ************************************************************************* */ +template +std::pair +ShonanAveraging::computeMinEigenVector(const Values &values) const { + Vector minEigenVector; + double minEigenValue = computeMinEigenValue(values, &minEigenVector); + return std::make_pair(minEigenValue, minEigenVector); +} + +/* ************************************************************************* */ +template +bool ShonanAveraging::checkOptimality(const Values &values) const { + double minEigenValue = computeMinEigenValue(values); + return minEigenValue > parameters_.optimalityThreshold; +} + +/* ************************************************************************* */ +/// Create a tangent direction xi with eigenvector segment v_i +template +Vector ShonanAveraging::MakeATangentVector(size_t p, const Vector &v, + size_t i) { + // Create a tangent direction xi with eigenvector segment v_i + const size_t dimension = SOn::Dimension(p); + const auto v_i = v.segment(d * i); + Vector xi = Vector::Zero(dimension); + double sign = pow(-1.0, round((p + 1) / 2) + 1); + for (size_t j = 0; j < d; j++) { + xi(j + p - d - 1) = sign * v_i(d - j - 1); + sign = -sign; + } + return xi; +} + +/* ************************************************************************* */ +template +Matrix ShonanAveraging::riemannianGradient(size_t p, + const Values &values) const { + Matrix S_dot = StiefelElementMatrix(values); + // calculate the gradient of F(Q_dot) at Q_dot + Matrix euclideanGradient = 2 * (L_ * (S_dot.transpose())).transpose(); + // cout << "euclidean gradient rows and cols" << euclideanGradient.rows() << + // "\t" << euclideanGradient.cols() << endl; + + // project the gradient onto the entire euclidean space + Matrix symBlockDiagProduct(p, d * nrUnknowns()); + for (size_t i = 0; i < nrUnknowns(); i++) { + // Compute block product + const size_t di = d * i; + const Matrix P = S_dot.middleCols(di).transpose() * + euclideanGradient.middleCols(di); + // Symmetrize this block + Matrix S = .5 * (P + P.transpose()); + // Compute S_dot * S and set corresponding block + symBlockDiagProduct.middleCols(di) = S_dot.middleCols(di) * S; + } + Matrix riemannianGradient = euclideanGradient - symBlockDiagProduct; + return riemannianGradient; +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::LiftwithDescent(size_t p, const Values &values, + const Vector &minEigenVector) { + Values lifted = LiftTo(p, values); + for (auto it : lifted.filter()) { + // Create a tangent direction xi with eigenvector segment v_i + // Assumes key is 0-based integer + const Vector xi = MakeATangentVector(p, minEigenVector, it.key); + // Move the old value in the descent direction + it.value = it.value.retract(xi); + } + return lifted; +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::initializeWithDescent( + size_t p, const Values &values, const Vector &minEigenVector, + double minEigenValue, double gradienTolerance, + double preconditionedGradNormTolerance) const { + double funcVal = costAt(p - 1, values); + double alphaMin = 1e-2; + double alpha = + std::max(1024 * alphaMin, 10 * gradienTolerance / fabs(minEigenValue)); + vector alphas; + vector fvals; + // line search + while ((alpha >= alphaMin)) { + Values Qplus = LiftwithDescent(p, values, alpha * minEigenVector); + double funcValTest = costAt(p, Qplus); + Matrix gradTest = riemannianGradient(p, Qplus); + double gradTestNorm = gradTest.norm(); + // Record alpha and funcVal + alphas.push_back(alpha); + fvals.push_back(funcValTest); + if ((funcVal > funcValTest) && (gradTestNorm > gradienTolerance)) { + return Qplus; + } + alpha /= 2; + } + + auto fminIter = min_element(fvals.begin(), fvals.end()); + auto minIdx = distance(fvals.begin(), fminIter); + double fMin = fvals[minIdx]; + double aMin = alphas[minIdx]; + if (fMin < funcVal) { + Values Qplus = LiftwithDescent(p, values, aMin * minEigenVector); + return Qplus; + } + + return LiftwithDescent(p, values, alpha * minEigenVector); +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::initializeRandomly(std::mt19937 &rng) const { + Values initial; + for (size_t j = 0; j < nrUnknowns(); j++) { + initial.insert(j, Rot::Random(rng)); + } + return initial; +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::initializeRandomly() const { + return ShonanAveraging::initializeRandomly(kRandomNumberGenerator); +} + +/* ************************************************************************* */ +template +std::pair ShonanAveraging::run(const Values &initialEstimate, + size_t pMin, + size_t pMax) const { + Values Qstar; + Values initialSOp = LiftTo(pMin, initialEstimate); // lift to pMin! + for (size_t p = pMin; p <= pMax; p++) { + // Optimize until convergence at this level + Qstar = tryOptimizingAt(p, initialSOp); + + // Check certificate of global optimzality + Vector minEigenVector; + double minEigenValue = computeMinEigenValue(Qstar, &minEigenVector); + if (minEigenValue > parameters_.optimalityThreshold) { + // If at global optimum, round and return solution + const Values SO3Values = roundSolution(Qstar); + return std::make_pair(SO3Values, minEigenValue); + } + + // Not at global optimimum yet, so check whether we will go to next level + if (p != pMax) { + // Calculate initial estimate for next level by following minEigenVector + initialSOp = + initializeWithDescent(p + 1, Qstar, minEigenVector, minEigenValue); + } + } + throw std::runtime_error("Shonan::run did not converge for given pMax"); +} + +/* ************************************************************************* */ +// Explicit instantiation for d=2 +template class ShonanAveraging<2>; + +ShonanAveraging2::ShonanAveraging2(const Measurements &measurements, + const Parameters ¶meters) + : ShonanAveraging<2>(measurements, parameters) {} +ShonanAveraging2::ShonanAveraging2(string g2oFile, const Parameters ¶meters) + : ShonanAveraging<2>(parseMeasurements(g2oFile), parameters) {} + +/* ************************************************************************* */ +// Explicit instantiation for d=3 +template class ShonanAveraging<3>; + +ShonanAveraging3::ShonanAveraging3(const Measurements &measurements, + const Parameters ¶meters) + : ShonanAveraging<3>(measurements, parameters) {} + +ShonanAveraging3::ShonanAveraging3(string g2oFile, const Parameters ¶meters) + : ShonanAveraging<3>(parseMeasurements(g2oFile), parameters) {} + +// TODO(frank): Deprecate after we land pybind wrapper + +// Extract Rot3 measurement from Pose3 betweenfactors +// Modeled after similar function in dataset.cpp +static BinaryMeasurement +convert(const BetweenFactor::shared_ptr &f) { + auto gaussian = + boost::dynamic_pointer_cast(f->noiseModel()); + if (!gaussian) + throw std::invalid_argument( + "parseMeasurements can only convert Pose3 measurements " + "with Gaussian noise models."); + const Matrix6 M = gaussian->covariance(); + return BinaryMeasurement( + f->key1(), f->key2(), f->measured().rotation(), + noiseModel::Gaussian::Covariance(M.block<3, 3>(3, 3), true)); +} + +static ShonanAveraging3::Measurements +extractRot3Measurements(const BetweenFactorPose3s &factors) { + ShonanAveraging3::Measurements result; + result.reserve(factors.size()); + for (auto f : factors) + result.push_back(convert(f)); + return result; +} + +ShonanAveraging3::ShonanAveraging3(const BetweenFactorPose3s &factors, + const Parameters ¶meters) + : ShonanAveraging<3>(extractRot3Measurements(factors), parameters) {} + +/* ************************************************************************* */ +} // namespace gtsam diff --git a/gtsam/sfm/ShonanAveraging.h b/gtsam/sfm/ShonanAveraging.h new file mode 100644 index 000000000..90c0bda63 --- /dev/null +++ b/gtsam/sfm/ShonanAveraging.h @@ -0,0 +1,356 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file ShonanAveraging.h + * @date March 2019 - August 2020 + * @author Frank Dellaert, David Rosen, and Jing Wu + * @brief Shonan Averaging algorithm + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace gtsam { +class NonlinearFactorGraph; +class LevenbergMarquardtOptimizer; + +/// Parameters governing optimization etc. +template struct GTSAM_EXPORT ShonanAveragingParameters { + // Select Rot2 or Rot3 interface based template parameter d + using Rot = typename std::conditional::type; + using Anchor = std::pair; + + // Paremeters themselves: + LevenbergMarquardtParams lm; // LM parameters + double optimalityThreshold; // threshold used in checkOptimality + Anchor anchor; // pose to use as anchor if not Karcher + double alpha; // weight of anchor-based prior (default 0) + double beta; // weight of Karcher-based prior (default 1) + double gamma; // weight of gauge-fixing factors (default 0) + + ShonanAveragingParameters(const LevenbergMarquardtParams &lm = + LevenbergMarquardtParams::CeresDefaults(), + const std::string &method = "JACOBI", + double optimalityThreshold = -1e-4, + double alpha = 0.0, double beta = 1.0, + double gamma = 0.0); + + LevenbergMarquardtParams getLMParams() const { return lm; } + + void setOptimalityThreshold(double value) { optimalityThreshold = value; } + double getOptimalityThreshold() const { return optimalityThreshold; } + + void setAnchor(size_t index, const Rot &value) { anchor = {index, value}; } + + void setAnchorWeight(double value) { alpha = value; } + double getAnchorWeight() { return alpha; } + + void setKarcherWeight(double value) { beta = value; } + double getKarcherWeight() { return beta; } + + void setGaugesWeight(double value) { gamma = value; } + double getGaugesWeight() { return gamma; } +}; + +using ShonanAveragingParameters2 = ShonanAveragingParameters<2>; +using ShonanAveragingParameters3 = ShonanAveragingParameters<3>; + +/** + * Class that implements Shonan Averaging from our ECCV'20 paper. + * Note: The "basic" API uses all Rot values (Rot2 or Rot3, depending on value + * of d), whereas the different levels and "advanced" API at SO(p) needs Values + * of type SOn. + * + * The template parameter d can be 2 or 3. + * Both are specialized in the .cpp file. + * + * If you use this code in your work, please consider citing our paper: + * Shonan Rotation Averaging, Global Optimality by Surfing SO(p)^n + * Frank Dellaert, David M. Rosen, Jing Wu, Robert Mahony, and Luca Carlone, + * European Computer Vision Conference, 2020. + * You can view our ECCV spotlight video at https://youtu.be/5ppaqMyHtE0 + */ +template class GTSAM_EXPORT ShonanAveraging { +public: + using Sparse = Eigen::SparseMatrix; + + // Define the Parameters type and use its typedef of the rotation type: + using Parameters = ShonanAveragingParameters; + using Rot = typename Parameters::Rot; + + // We store SO(d) BetweenFactors to get noise model + // TODO(frank): use BinaryMeasurement? + using Measurements = std::vector>; + +private: + Parameters parameters_; + Measurements measurements_; + size_t nrUnknowns_; + Sparse D_; // Sparse (diagonal) degree matrix + Sparse Q_; // Sparse measurement matrix, == \tilde{R} in Eriksson18cvpr + Sparse L_; // connection Laplacian L = D - Q, needed for optimality check + + /** + * Build 3Nx3N sparse matrix consisting of rotation measurements, arranged as + * (i,j) and (j,i) blocks within a sparse matrix. + */ + Sparse buildQ() const; + + /// Build 3Nx3N sparse degree matrix D + Sparse buildD() const; + +public: + /// @name Standard Constructors + /// @{ + + /// Construct from set of relative measurements (given as BetweenFactor + /// for now) NoiseModel *must* be isotropic. + ShonanAveraging(const Measurements &measurements, + const Parameters ¶meters = Parameters()); + + /// @} + /// @name Query properties + /// @{ + + /// Return number of unknowns + size_t nrUnknowns() const { return nrUnknowns_; } + + /// Return number of measurements + size_t nrMeasurements() const { return measurements_.size(); } + + /// k^th binary measurement + const BinaryMeasurement &measurement(size_t k) const { + return measurements_[k]; + } + + /// k^th measurement, as a Rot. + const Rot &measured(size_t k) const { return measurements_[k].measured(); } + + /// Keys for k^th measurement, as a vector of Key values. + const KeyVector &keys(size_t k) const { return measurements_[k].keys(); } + + /// @} + /// @name Matrix API (advanced use, debugging) + /// @{ + + Sparse D() const { return D_; } ///< Sparse version of D + Matrix denseD() const { return Matrix(D_); } ///< Dense version of D + Sparse Q() const { return Q_; } ///< Sparse version of Q + Matrix denseQ() const { return Matrix(Q_); } ///< Dense version of Q + Sparse L() const { return L_; } ///< Sparse version of L + Matrix denseL() const { return Matrix(L_); } ///< Dense version of L + + /// Version that takes pxdN Stiefel manifold elements + Sparse computeLambda(const Matrix &S) const; + + /// Dense versions of computeLambda for wrapper/testing + Matrix computeLambda_(const Values &values) const { + return Matrix(computeLambda(values)); + } + + /// Dense versions of computeLambda for wrapper/testing + Matrix computeLambda_(const Matrix &S) const { + return Matrix(computeLambda(S)); + } + + /// Compute A matrix whose Eigenvalues we will examine + Sparse computeA(const Values &values) const; + + /// Version that takes pxdN Stiefel manifold elements + Sparse computeA(const Matrix &S) const; + + /// Dense version of computeA for wrapper/testing + Matrix computeA_(const Values &values) const { + return Matrix(computeA(values)); + } + + /// Project to pxdN Stiefel manifold + static Matrix StiefelElementMatrix(const Values &values); + + /** + * Compute minimum eigenvalue for optimality check. + * @param values: should be of type SOn + */ + double computeMinEigenValue(const Values &values, + Vector *minEigenVector = nullptr) const; + + /// Project pxdN Stiefel manifold matrix S to Rot3^N + Values roundSolutionS(const Matrix &S) const; + + /// Create a tangent direction xi with eigenvector segment v_i + static Vector MakeATangentVector(size_t p, const Vector &v, size_t i); + + /// Calculate the riemannian gradient of F(values) at values + Matrix riemannianGradient(size_t p, const Values &values) const; + + /** + * Lift up the dimension of values in type SO(p-1) with descent direction + * provided by minEigenVector and return new values in type SO(p) + */ + static Values LiftwithDescent(size_t p, const Values &values, + const Vector &minEigenVector); + + /** + * Given some values at p-1, return new values at p, by doing a line search + * along the descent direction, computed from the minimum eigenvector at p-1. + * @param values should be of type SO(p-1) + * @param minEigenVector corresponding to minEigenValue at level p-1 + * @return values of type SO(p) + */ + Values + initializeWithDescent(size_t p, const Values &values, + const Vector &minEigenVector, double minEigenValue, + double gradienTolerance = 1e-2, + double preconditionedGradNormTolerance = 1e-4) const; + /// @} + /// @name Advanced API + /// @{ + + /** + * Build graph for SO(p) + * @param p the dimensionality of the rotation manifold to optimize over + */ + NonlinearFactorGraph buildGraphAt(size_t p) const; + + /** + * Calculate cost for SO(p) + * Values should be of type SO(p) + */ + double costAt(size_t p, const Values &values) const; + + /** + * Given an estimated local minimum Yopt for the (possibly lifted) + * relaxation, this function computes and returns the block-diagonal elements + * of the corresponding Lagrange multiplier. + */ + Sparse computeLambda(const Values &values) const; + + /** + * Compute minimum eigenvalue for optimality check. + * @param values: should be of type SOn + * @return minEigenVector and minEigenValue + */ + std::pair computeMinEigenVector(const Values &values) const; + + /** + * Check optimality + * @param values: should be of type SOn + */ + bool checkOptimality(const Values &values) const; + + /** + * Try to create optimizer at SO(p) + * @param p the dimensionality of the rotation manifold to optimize over + * @param initial initial SO(p) values + * @return lm optimizer + */ + boost::shared_ptr createOptimizerAt( + size_t p, const Values &initial) const; + + /** + * Try to optimize at SO(p) + * @param p the dimensionality of the rotation manifold to optimize over + * @param initial initial SO(p) values + * @return SO(p) values + */ + Values tryOptimizingAt(size_t p, const Values &initial) const; + + /** + * Project from SO(p) to Rot2 or Rot3 + * Values should be of type SO(p) + */ + Values projectFrom(size_t p, const Values &values) const; + + /** + * Project from SO(p)^N to Rot2^N or Rot3^N + * Values should be of type SO(p) + */ + Values roundSolution(const Values &values) const; + + /// Lift Values of type T to SO(p) + template static Values LiftTo(size_t p, const Values &values) { + Values result; + for (const auto it : values.filter()) { + result.insert(it.key, SOn::Lift(p, it.value.matrix())); + } + return result; + } + + /// @} + /// @name Basic API + /// @{ + + /** + * Calculate cost for SO(3) + * Values should be of type Rot3 + */ + double cost(const Values &values) const; + + /** + * Initialize randomly at SO(d) + * @param rng random number generator + * Example: + * std::mt19937 rng(42); + * Values initial = initializeRandomly(rng, p); + */ + Values initializeRandomly(std::mt19937 &rng) const; + + /// Random initialization for wrapper, fixed random seed. + Values initializeRandomly() const; + + /** + * Optimize at different values of p until convergence. + * @param initial initial Rot3 values + * @param pMin value of p to start Riemanian staircase at (default: d). + * @param pMax maximum value of p to try (default: 10) + * @return (Rot3 values, minimum eigenvalue) + */ + std::pair run(const Values &initialEstimate, size_t pMin = d, + size_t pMax = 10) const; + /// @} +}; + +// Subclasses for d=2 and d=3 that explicitly instantiate, as well as provide a +// convenience interface with file access. + +class ShonanAveraging2 : public ShonanAveraging<2> { +public: + ShonanAveraging2(const Measurements &measurements, + const Parameters ¶meters = Parameters()); + ShonanAveraging2(string g2oFile, const Parameters ¶meters = Parameters()); +}; + +class ShonanAveraging3 : public ShonanAveraging<3> { +public: + ShonanAveraging3(const Measurements &measurements, + const Parameters ¶meters = Parameters()); + ShonanAveraging3(string g2oFile, const Parameters ¶meters = Parameters()); + + // TODO(frank): Deprecate after we land pybind wrapper + ShonanAveraging3(const BetweenFactorPose3s &factors, + const Parameters ¶meters = Parameters()); +}; +} // namespace gtsam diff --git a/gtsam/sfm/ShonanFactor.cpp b/gtsam/sfm/ShonanFactor.cpp new file mode 100644 index 000000000..b911fb5a4 --- /dev/null +++ b/gtsam/sfm/ShonanFactor.cpp @@ -0,0 +1,141 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file FrobeniusFactor.cpp + * @date March 2019 + * @author Frank Dellaert + * @brief Various factors that minimize some Frobenius norm + */ + +#include + +#include +#include +#include + +#include +#include +#include + +using namespace std; + +namespace gtsam { + +//****************************************************************************** +template +ShonanFactor::ShonanFactor(Key j1, Key j2, const Rot &R12, size_t p, + const SharedNoiseModel &model, + const boost::shared_ptr &G) + : NoiseModelFactor2(ConvertNoiseModel(model, p * d), j1, j2), + M_(R12.matrix()), // d*d in all cases + p_(p), // 4 for SO(4) + pp_(p * p), // 16 for SO(4) + G_(G) { + if (noiseModel()->dim() != d * p_) + throw std::invalid_argument( + "ShonanFactor: model with incorrect dimension."); + if (!G) { + G_ = boost::make_shared(); + *G_ = SOn::VectorizedGenerators(p); // expensive! + } + if (static_cast(G_->rows()) != pp_ || + static_cast(G_->cols()) != SOn::Dimension(p)) + throw std::invalid_argument("ShonanFactor: passed in generators " + "of incorrect dimension."); +} + +//****************************************************************************** +template +void ShonanFactor::print(const std::string &s, + const KeyFormatter &keyFormatter) const { + std::cout << s << "ShonanFactor<" << p_ << ">(" << keyFormatter(key1()) << "," + << keyFormatter(key2()) << ")\n"; + traits::Print(M_, " M: "); + noiseModel_->print(" noise model: "); +} + +//****************************************************************************** +template +bool ShonanFactor::equals(const NonlinearFactor &expected, + double tol) const { + auto e = dynamic_cast(&expected); + return e != nullptr && NoiseModelFactor2::equals(*e, tol) && + p_ == e->p_ && M_ == e->M_; +} + +//****************************************************************************** +template +void ShonanFactor::fillJacobians(const Matrix &M1, const Matrix &M2, + boost::optional H1, + boost::optional H2) const { + gttic(ShonanFactor_Jacobians); + const size_t dim = p_ * d; // Stiefel manifold dimension + + if (H1) { + // If asked, calculate Jacobian H1 as as (M' \otimes M1) * G + // M' = dxd, M1 = pxp, G = (p*p)xDim(p), result should be dim x Dim(p) + // (M' \otimes M1) is dim*dim, but last pp-dim columns are zero + *H1 = Matrix::Zero(dim, G_->cols()); + for (size_t j = 0; j < d; j++) { + auto MG_j = M1 * G_->middleRows(j * p_, p_); // p_ * Dim(p) + for (size_t i = 0; i < d; i++) { + H1->middleRows(i * p_, p_) -= M_(j, i) * MG_j; + } + } + } + if (H2) { + // If asked, calculate Jacobian H2 as as (I_d \otimes M2) * G + // I_d = dxd, M2 = pxp, G = (p*p)xDim(p), result should be dim x Dim(p) + // (I_d \otimes M2) is dim*dim, but again last pp-dim columns are zero + H2->resize(dim, G_->cols()); + for (size_t i = 0; i < d; i++) { + H2->middleRows(i * p_, p_) = M2 * G_->middleRows(i * p_, p_); + } + } +} + +//****************************************************************************** +template +Vector ShonanFactor::evaluateError(const SOn &Q1, const SOn &Q2, + boost::optional H1, + boost::optional H2) const { + gttic(ShonanFactor_evaluateError); + + const Matrix &M1 = Q1.matrix(); + const Matrix &M2 = Q2.matrix(); + if (M1.rows() != static_cast(p_) || M2.rows() != static_cast(p_)) + throw std::invalid_argument("Invalid dimension SOn values passed to " + "ShonanFactor::evaluateError"); + + const size_t dim = p_ * d; // Stiefel manifold dimension + Vector fQ2(dim), hQ1(dim); + + // Vectorize and extract only d leftmost columns, i.e. vec(M2*P) + fQ2 << Eigen::Map(M2.data(), dim, 1); + + // Vectorize M1*P*R12 + const Matrix Q1PR12 = M1.leftCols() * M_; + hQ1 << Eigen::Map(Q1PR12.data(), dim, 1); + + this->fillJacobians(M1, M2, H1, H2); + + return fQ2 - hQ1; +} + +/* ************************************************************************* */ +// Explicit instantiation for d=2 and d=3 +template class ShonanFactor<2>; +template class ShonanFactor<3>; + +//****************************************************************************** + +} // namespace gtsam diff --git a/gtsam/sfm/ShonanFactor.h b/gtsam/sfm/ShonanFactor.h new file mode 100644 index 000000000..3c43c2c52 --- /dev/null +++ b/gtsam/sfm/ShonanFactor.h @@ -0,0 +1,91 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file ShonanFactor.h + * @date March 2019 + * @author Frank Dellaert + * @brief Main factor type in Shonan averaging, on SO(n) pairs + */ + +#pragma once + +#include +#include +#include +#include + +#include + +namespace gtsam { + +/** + * ShonanFactor is a BetweenFactor that moves in SO(p), but will + * land on the SO(d) sub-manifold of SO(p) at the global minimum. It projects + * the SO(p) matrices down to a Stiefel manifold of p*d matrices. + */ +template +class GTSAM_EXPORT ShonanFactor : public NoiseModelFactor2 { + Matrix M_; ///< measured rotation between R1 and R2 + size_t p_, pp_; ///< dimensionality constants + boost::shared_ptr G_; ///< matrix of vectorized generators + + // Select Rot2 or Rot3 interface based template parameter d + using Rot = typename std::conditional::type; + +public: + /// @name Constructor + /// @{ + + /// Constructor. Note we convert to d*p-dimensional noise model. + /// To save memory and mallocs, pass in the vectorized Lie algebra generators: + /// G = boost::make_shared(SOn::VectorizedGenerators(p)); + ShonanFactor(Key j1, Key j2, const Rot &R12, size_t p, + const SharedNoiseModel &model = nullptr, + const boost::shared_ptr &G = nullptr); + + /// @} + /// @name Testable + /// @{ + + /// print with optional string + void + print(const std::string &s, + const KeyFormatter &keyFormatter = DefaultKeyFormatter) const override; + + /// assert equality up to a tolerance + bool equals(const NonlinearFactor &expected, + double tol = 1e-9) const override; + + /// @} + /// @name NoiseModelFactor2 methods + /// @{ + + /// Error is Frobenius norm between Q1*P*R12 and Q2*P, where P=[I_3x3;0] + /// projects down from SO(p) to the Stiefel manifold of px3 matrices. + Vector + evaluateError(const SOn &Q1, const SOn &Q2, + boost::optional H1 = boost::none, + boost::optional H2 = boost::none) const override; + /// @} + +private: + /// Calculate Jacobians if asked, Only implemented for d=2 and 3 in .cpp + void fillJacobians(const Matrix &M1, const Matrix &M2, + boost::optional H1, + boost::optional H2) const; +}; + +// Explicit instantiation for d=2 and d=3 in .cpp file: +using ShonanFactor2 = ShonanFactor<2>; +using ShonanFactor3 = ShonanFactor<3>; + +} // namespace gtsam diff --git a/gtsam/sfm/ShonanGaugeFactor.h b/gtsam/sfm/ShonanGaugeFactor.h new file mode 100644 index 000000000..4847c5d58 --- /dev/null +++ b/gtsam/sfm/ShonanGaugeFactor.h @@ -0,0 +1,108 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file ShonanGaugeFactor.h + * @date March 2019 + * @author Frank Dellaert + * @brief Factor used in Shonan Averaging to clamp down gauge freedom + */ + +#pragma once + +#include +#include +#include + +#include + +namespace gtsam { +/** + * The ShonanGaugeFactor creates a constraint on a single SO(n) to avoid moving + * in the stabilizer. + * + * Details: SO(p) contains the p*3 Stiefel matrices of orthogonal frames: we + * take those to be the 3 columns on the left. + * The P*P skew-symmetric matrices associated with so(p) can be partitioned as + * (Appendix B in the ECCV paper): + * | [w] -K' | + * | K [g] | + * where w is the SO(3) space, K are the extra Stiefel diemnsions (wormhole!) + * and (g)amma are extra dimensions in SO(p) that do not modify the cost + * function. The latter corresponds to rotations SO(p-d), and so the first few + * values of p-d for d==3 with their corresponding dimensionality are {0:0, 1:0, + * 2:1, 3:3, ...} + * + * The ShonanGaugeFactor adds a unit Jacobian to these extra dimensions, + * essentially restricting the optimization to the Stiefel manifold. + */ +class GTSAM_EXPORT ShonanGaugeFactor : public NonlinearFactor { + // Row dimension, equal to the dimensionality of SO(p-d) + size_t rows_; + + /// Constant Jacobian + boost::shared_ptr whitenedJacobian_; + +public: + /** + * Construct from key for an SO(p) matrix, for base dimension d (2 or 3) + * If parameter gamma is given, it acts as a precision = 1/sigma^2, and + * the Jacobian will be multiplied with 1/sigma = sqrt(gamma). + */ + ShonanGaugeFactor(Key key, size_t p, size_t d = 3, + boost::optional gamma = boost::none) + : NonlinearFactor(boost::assign::cref_list_of<1>(key)) { + if (p < d) { + throw std::invalid_argument("ShonanGaugeFactor must have p>=d."); + } + // Calculate dimensions + size_t q = p - d; + size_t P = SOn::Dimension(p); // dimensionality of SO(p) + rows_ = SOn::Dimension(q); // dimensionality of SO(q), the gauge + + // Create constant Jacobian as a rows_*P matrix: there are rows_ penalized + // dimensions, but it is a bit tricky to find them among the P columns. + // The key is to look at how skew-symmetric matrices are laid out in SOn.h: + // the first tangent dimension will always be included, but beyond that we + // have to be careful. We always need to skip the d top-rows of the skew- + // symmetric matrix as they below to K, part of the Stiefel manifold. + Matrix A(rows_, P); + A.setZero(); + double invSigma = gamma ? std::sqrt(*gamma) : 1.0; + size_t i = 0, j = 0, n = p - 1 - d; + while (i < rows_) { + A.block(i, j, n, n) = invSigma * Matrix::Identity(n, n); + i += n; + j += n + d; // skip d columns + n -= 1; + } + // TODO(frank): assign the right one in the right columns + whitenedJacobian_ = + boost::make_shared(key, A, Vector::Zero(rows_)); + } + + /// Destructor + virtual ~ShonanGaugeFactor() {} + + /// Calculate the error of the factor: always zero + double error(const Values &c) const override { return 0; } + + /// get the dimension of the factor (number of rows on linearization) + size_t dim() const override { return rows_; } + + /// linearize to a GaussianFactor + boost::shared_ptr linearize(const Values &c) const override { + return whitenedJacobian_; + } +}; +// \ShonanGaugeFactor + +} // namespace gtsam \ No newline at end of file diff --git a/gtsam/sfm/tests/testShonanAveraging.cpp b/gtsam/sfm/tests/testShonanAveraging.cpp new file mode 100644 index 000000000..cc4319e15 --- /dev/null +++ b/gtsam/sfm/tests/testShonanAveraging.cpp @@ -0,0 +1,360 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testShonanAveraging.cpp + * @date March 2019 + * @author Frank Dellaert + * @brief Unit tests for Shonan Averaging algorithm + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +using namespace std; +using namespace gtsam; + +template +using Rot = typename std::conditional::type; + +template +using Pose = typename std::conditional::type; + +ShonanAveraging3 fromExampleName( + const std::string &name, + ShonanAveraging3::Parameters parameters = ShonanAveraging3::Parameters()) { + string g2oFile = findExampleDataFile(name); + return ShonanAveraging3(g2oFile, parameters); +} + +static const ShonanAveraging3 kShonan = fromExampleName("toyExample.g2o"); + +static std::mt19937 kRandomNumberGenerator(42); + +/* ************************************************************************* */ +TEST(ShonanAveraging3, checkConstructor) { + EXPECT_LONGS_EQUAL(5, kShonan.nrUnknowns()); + + EXPECT_LONGS_EQUAL(15, kShonan.D().rows()); + EXPECT_LONGS_EQUAL(15, kShonan.D().cols()); + auto D = kShonan.denseD(); + EXPECT_LONGS_EQUAL(15, D.rows()); + EXPECT_LONGS_EQUAL(15, D.cols()); + + EXPECT_LONGS_EQUAL(15, kShonan.Q().rows()); + EXPECT_LONGS_EQUAL(15, kShonan.Q().cols()); + auto Q = kShonan.denseQ(); + EXPECT_LONGS_EQUAL(15, Q.rows()); + EXPECT_LONGS_EQUAL(15, Q.cols()); + + EXPECT_LONGS_EQUAL(15, kShonan.L().rows()); + EXPECT_LONGS_EQUAL(15, kShonan.L().cols()); + auto L = kShonan.denseL(); + EXPECT_LONGS_EQUAL(15, L.rows()); + EXPECT_LONGS_EQUAL(15, L.cols()); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, buildGraphAt) { + auto graph = kShonan.buildGraphAt(5); + EXPECT_LONGS_EQUAL(7, graph.size()); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, checkOptimality) { + const Values randomRotations = kShonan.initializeRandomly(kRandomNumberGenerator); + Values random = ShonanAveraging3::LiftTo(4, randomRotations); // lift to 4! + auto Lambda = kShonan.computeLambda(random); + EXPECT_LONGS_EQUAL(15, Lambda.rows()); + EXPECT_LONGS_EQUAL(15, Lambda.cols()); + EXPECT_LONGS_EQUAL(45, Lambda.nonZeros()); + auto lambdaMin = kShonan.computeMinEigenValue(random); + // EXPECT_DOUBLES_EQUAL(-5.2964625490657866, lambdaMin, + // 1e-4); // Regression test + EXPECT_DOUBLES_EQUAL(-414.87376657555996, lambdaMin, + 1e-4); // Regression test + EXPECT(!kShonan.checkOptimality(random)); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, tryOptimizingAt3) { + const Values randomRotations = kShonan.initializeRandomly(kRandomNumberGenerator); + Values initial = ShonanAveraging3::LiftTo(3, randomRotations); // convert to SOn + EXPECT(!kShonan.checkOptimality(initial)); + const Values result = kShonan.tryOptimizingAt(3, initial); + EXPECT(kShonan.checkOptimality(result)); + auto lambdaMin = kShonan.computeMinEigenValue(result); + EXPECT_DOUBLES_EQUAL(-5.427688831332745e-07, lambdaMin, + 1e-4); // Regression test + EXPECT_DOUBLES_EQUAL(0, kShonan.costAt(3, result), 1e-4); + const Values SO3Values = kShonan.roundSolution(result); + EXPECT_DOUBLES_EQUAL(0, kShonan.cost(SO3Values), 1e-4); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, tryOptimizingAt4) { + const Values randomRotations = kShonan.initializeRandomly(kRandomNumberGenerator); + Values random = ShonanAveraging3::LiftTo(4, randomRotations); // lift to 4! + const Values result = kShonan.tryOptimizingAt(4, random); + EXPECT(kShonan.checkOptimality(result)); + EXPECT_DOUBLES_EQUAL(0, kShonan.costAt(4, result), 1e-3); + auto lambdaMin = kShonan.computeMinEigenValue(result); + EXPECT_DOUBLES_EQUAL(-5.427688831332745e-07, lambdaMin, + 1e-4); // Regression test + const Values SO3Values = kShonan.roundSolution(result); + EXPECT_DOUBLES_EQUAL(0, kShonan.cost(SO3Values), 1e-4); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, MakeATangentVector) { + Vector9 v; + v << 1, 2, 3, 4, 5, 6, 7, 8, 9; + Matrix expected(5, 5); + expected << 0, 0, 0, 0, -4, // + 0, 0, 0, 0, -5, // + 0, 0, 0, 0, -6, // + 0, 0, 0, 0, 0, // + 4, 5, 6, 0, 0; + const Vector xi_1 = ShonanAveraging3::MakeATangentVector(5, v, 1); + const auto actual = SOn::Hat(xi_1); + CHECK(assert_equal(expected, actual)); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, LiftTo) { + auto I = genericValue(Rot3()); + Values initial{{0, I}, {1, I}, {2, I}}; + Values lifted = ShonanAveraging3::LiftTo(5, initial); + EXPECT(assert_equal(SOn(5), lifted.at(0))); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, CheckWithEigen) { + // control randomness + static std::mt19937 rng(0); + Vector descentDirection = Vector::Random(15); // for use below + const Values randomRotations = kShonan.initializeRandomly(rng); + Values random = ShonanAveraging3::LiftTo(3, randomRotations); + + // Optimize + const Values Qstar3 = kShonan.tryOptimizingAt(3, random); + + // Compute Eigenvalue with Spectra solver + double lambda = kShonan.computeMinEigenValue(Qstar3); + + // Check Eigenvalue with slow Eigen version, converts matrix A to dense matrix! + const Matrix S = ShonanAveraging3::StiefelElementMatrix(Qstar3); + auto A = kShonan.computeA(S); + bool computeEigenvectors = false; + Eigen::EigenSolver eigenSolver(Matrix(A), computeEigenvectors); + auto lambdas = eigenSolver.eigenvalues().real(); + double minEigenValue = lambdas(0); + for (int i = 1; i < lambdas.size(); i++) + minEigenValue = min(lambdas(i), minEigenValue); + + // Actual check + EXPECT_DOUBLES_EQUAL(minEigenValue, lambda, 1e-12); + + // Construct test descent direction (as minEigenVector is not predictable + // across platforms, being one from a basically flat 3d- subspace) + + // Check descent + Values initialQ4 = + ShonanAveraging3::LiftwithDescent(4, Qstar3, descentDirection); + EXPECT_LONGS_EQUAL(5, initialQ4.size()); + + // TODO(frank): uncomment this regression test: currently not repeatable + // across platforms. + // Matrix expected(4, 4); + // expected << 0.0459224, -0.688689, -0.216922, 0.690321, // + // 0.92381, 0.191931, 0.255854, 0.21042, // + // -0.376669, 0.301589, 0.687953, 0.542111, // + // -0.0508588, 0.630804, -0.643587, 0.43046; + // EXPECT(assert_equal(SOn(expected), initialQ4.at(0), 1e-5)); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, initializeWithDescent) { + const Values randomRotations = kShonan.initializeRandomly(kRandomNumberGenerator); + Values random = ShonanAveraging3::LiftTo(3, randomRotations); + const Values Qstar3 = kShonan.tryOptimizingAt(3, random); + Vector minEigenVector; + double lambdaMin = kShonan.computeMinEigenValue(Qstar3, &minEigenVector); + Values initialQ4 = + kShonan.initializeWithDescent(4, Qstar3, minEigenVector, lambdaMin); + EXPECT_LONGS_EQUAL(5, initialQ4.size()); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, run) { + auto initial = kShonan.initializeRandomly(kRandomNumberGenerator); + auto result = kShonan.run(initial, 5); + EXPECT_DOUBLES_EQUAL(0, kShonan.cost(result.first), 1e-3); + EXPECT_DOUBLES_EQUAL(-5.427688831332745e-07, result.second, + 1e-4); // Regression test +} + +/* ************************************************************************* */ +namespace klaus { +// The data in the file is the Colmap solution +const Rot3 wR0(0.9992281076190063, -0.02676080288219576, -0.024497002638379624, + -0.015064701622500615); +const Rot3 wR1(0.998239108728862, -0.049543805396343954, -0.03232420352077356, + -0.004386230477751116); +const Rot3 wR2(0.9925378735259738, -0.07993768981394891, 0.0825062894866454, + -0.04088089479075661); +} // namespace klaus + +TEST(ShonanAveraging3, runKlaus) { + using namespace klaus; + + // Initialize a Shonan instance without the Karcher mean + ShonanAveraging3::Parameters parameters; + parameters.setKarcherWeight(0); + + // Load 3 pose example taken in Klaus by Shicong + static const ShonanAveraging3 shonan = + fromExampleName("Klaus3.g2o", parameters); + + // Check nr poses + EXPECT_LONGS_EQUAL(3, shonan.nrUnknowns()); + + // Colmap uses the Y-down vision frame, and the first 3 rotations are close to + // identity. We check that below. Note tolerance is quite high. + static const Rot3 identity; + EXPECT(assert_equal(identity, wR0, 0.2)); + EXPECT(assert_equal(identity, wR1, 0.2)); + EXPECT(assert_equal(identity, wR2, 0.2)); + + // Get measurements + const Rot3 R01 = shonan.measured(0); + const Rot3 R12 = shonan.measured(1); + const Rot3 R02 = shonan.measured(2); + + // Regression test to make sure data did not change. + EXPECT(assert_equal(Rot3(0.9995433591728293, -0.022048798853273946, + -0.01796327847857683, 0.010210006313668573), + R01)); + + // Check Colmap solution agrees OK with relative rotation measurements. + EXPECT(assert_equal(R01, wR0.between(wR1), 0.1)); + EXPECT(assert_equal(R12, wR1.between(wR2), 0.1)); + EXPECT(assert_equal(R02, wR0.between(wR2), 0.1)); + + // Run Shonan (with prior on first rotation) + auto initial = shonan.initializeRandomly(kRandomNumberGenerator); + auto result = shonan.run(initial, 5); + EXPECT_DOUBLES_EQUAL(0, shonan.cost(result.first), 1e-2); + EXPECT_DOUBLES_EQUAL(-9.2259161494467889e-05, result.second, + 1e-4); // Regression + + // Get Shonan solution in new frame R (R for result) + const Rot3 rR0 = result.first.at(0); + const Rot3 rR1 = result.first.at(1); + const Rot3 rR2 = result.first.at(2); + + // rR0 = rRw * wR0 => rRw = rR0 * wR0.inverse() + // rR1 = rRw * wR1 + // rR2 = rRw * wR2 + + const Rot3 rRw = rR0 * wR0.inverse(); + EXPECT(assert_equal(rRw * wR1, rR1, 0.1)) + EXPECT(assert_equal(rRw * wR2, rR2, 0.1)) +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, runKlausKarcher) { + using namespace klaus; + + // Load 3 pose example taken in Klaus by Shicong + static const ShonanAveraging3 shonan = fromExampleName("Klaus3.g2o"); + + // Run Shonan (with Karcher mean prior) + auto initial = shonan.initializeRandomly(kRandomNumberGenerator); + auto result = shonan.run(initial, 5); + EXPECT_DOUBLES_EQUAL(0, shonan.cost(result.first), 1e-2); + EXPECT_DOUBLES_EQUAL(-1.361402670507772e-05, result.second, + 1e-4); // Regression test + + // Get Shonan solution in new frame R (R for result) + const Rot3 rR0 = result.first.at(0); + const Rot3 rR1 = result.first.at(1); + const Rot3 rR2 = result.first.at(2); + + const Rot3 rRw = rR0 * wR0.inverse(); + EXPECT(assert_equal(rRw * wR1, rR1, 0.1)) + EXPECT(assert_equal(rRw * wR2, rR2, 0.1)) +} + +/* ************************************************************************* */ +TEST(ShonanAveraging2, runKlausKarcher) { + // Load 2D toy example + auto lmParams = LevenbergMarquardtParams::CeresDefaults(); + // lmParams.setVerbosityLM("SUMMARY"); + string g2oFile = findExampleDataFile("noisyToyGraph.txt"); + ShonanAveraging2::Parameters parameters(lmParams); + auto measurements = parseMeasurements(g2oFile); + ShonanAveraging2 shonan(measurements, parameters); + EXPECT_LONGS_EQUAL(4, shonan.nrUnknowns()); + + // Check graph building + NonlinearFactorGraph graph = shonan.buildGraphAt(2); + EXPECT_LONGS_EQUAL(6, graph.size()); + auto initial = shonan.initializeRandomly(kRandomNumberGenerator); + auto result = shonan.run(initial, 2); + EXPECT_DOUBLES_EQUAL(0.0008211, shonan.cost(result.first), 1e-6); + EXPECT_DOUBLES_EQUAL(0, result.second, 1e-10); // certificate! +} + +/* ************************************************************************* */ +// Test alpha/beta/gamma prior weighting. +TEST(ShonanAveraging3, PriorWeights) { + auto lmParams = LevenbergMarquardtParams::CeresDefaults(); + ShonanAveraging3::Parameters params(lmParams); + EXPECT_DOUBLES_EQUAL(0, params.alpha, 1e-9); + EXPECT_DOUBLES_EQUAL(1, params.beta, 1e-9); + EXPECT_DOUBLES_EQUAL(0, params.gamma, 1e-9); + double alpha = 100.0, beta = 200.0, gamma = 300.0; + params.setAnchorWeight(alpha); + params.setKarcherWeight(beta); + params.setGaugesWeight(gamma); + EXPECT_DOUBLES_EQUAL(alpha, params.alpha, 1e-9); + EXPECT_DOUBLES_EQUAL(beta, params.beta, 1e-9); + EXPECT_DOUBLES_EQUAL(gamma, params.gamma, 1e-9); + params.setKarcherWeight(0); + static const ShonanAveraging3 shonan = fromExampleName("Klaus3.g2o", params); + for (auto i : {0,1,2}) { + const auto& m = shonan.measurement(i); + auto isotropic = + boost::static_pointer_cast(m.noiseModel()); + CHECK(isotropic != nullptr); + EXPECT_LONGS_EQUAL(3, isotropic->dim()); + EXPECT_DOUBLES_EQUAL(0.2, isotropic->sigma(), 1e-9); + } + auto I = genericValue(Rot3()); + Values initial{{0, I}, {1, I}, {2, I}}; + EXPECT_DOUBLES_EQUAL(3.0756, shonan.cost(initial), 1e-4); + auto result = shonan.run(initial, 3, 3); + EXPECT_DOUBLES_EQUAL(0.0015, shonan.cost(result.first), 1e-4); +} +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/sfm/tests/testShonanFactor.cpp b/gtsam/sfm/tests/testShonanFactor.cpp new file mode 100644 index 000000000..ef94c5cf4 --- /dev/null +++ b/gtsam/sfm/tests/testShonanFactor.cpp @@ -0,0 +1,121 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * testFrobeniusFactor.cpp + * + * @file testFrobeniusFactor.cpp + * @date March 2019 + * @author Frank Dellaert + * @brief Check evaluateError for various Frobenius norm + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +using namespace std; +using namespace gtsam; + +//****************************************************************************** +namespace so3 { +SO3 id; +Vector3 v1 = (Vector(3) << 0.1, 0, 0).finished(); +SO3 R1 = SO3::Expmap(v1); +Vector3 v2 = (Vector(3) << 0.01, 0.02, 0.03).finished(); +SO3 R2 = SO3::Expmap(v2); +SO3 R12 = R1.between(R2); +} // namespace so3 + +//****************************************************************************** +namespace submanifold { +SO4 id; +Vector6 v1 = (Vector(6) << 0, 0, 0, 0.1, 0, 0).finished(); +SO3 R1 = SO3::Expmap(v1.tail<3>()); +SO4 Q1 = SO4::Expmap(v1); +Vector6 v2 = (Vector(6) << 0, 0, 0, 0.01, 0.02, 0.03).finished(); +SO3 R2 = SO3::Expmap(v2.tail<3>()); +SO4 Q2 = SO4::Expmap(v2); +SO3 R12 = R1.between(R2); +} // namespace submanifold + +/* ************************************************************************* */ +TEST(ShonanFactor3, evaluateError) { + auto model = noiseModel::Isotropic::Sigma(3, 1.2); + for (const size_t p : {5, 4, 3}) { + Matrix M = Matrix::Identity(p, p); + M.topLeftCorner(3, 3) = submanifold::R1.matrix(); + SOn Q1(M); + M.topLeftCorner(3, 3) = submanifold::R2.matrix(); + SOn Q2(M); + auto factor = ShonanFactor3(1, 2, Rot3(::so3::R12.matrix()), p, model); + Matrix H1, H2; + factor.evaluateError(Q1, Q2, H1, H2); + + // Test derivatives + Values values; + values.insert(1, Q1); + values.insert(2, Q2); + EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); + } +} + +/* ************************************************************************* */ +TEST(ShonanFactor3, equivalenceToSO3) { + using namespace ::submanifold; + auto R12 = ::so3::R12.retract(Vector3(0.1, 0.2, -0.1)); + auto model = noiseModel::Isotropic::Sigma(6, 1.2); // wrong dimension + auto factor3 = FrobeniusBetweenFactor(1, 2, R12, model); + auto factor4 = ShonanFactor3(1, 2, Rot3(R12.matrix()), 4, model); + const Matrix3 E3(factor3.evaluateError(R1, R2).data()); + const Matrix43 E4( + factor4.evaluateError(SOn(Q1.matrix()), SOn(Q2.matrix())).data()); + EXPECT(assert_equal((Matrix)E4.topLeftCorner<3, 3>(), E3, 1e-9)); + EXPECT(assert_equal((Matrix)E4.row(3), Matrix13::Zero(), 1e-9)); +} + +/* ************************************************************************* */ +TEST(ShonanFactor2, evaluateError) { + auto model = noiseModel::Isotropic::Sigma(1, 1.2); + const Rot2 R1(0.3), R2(0.5), R12(0.2); + for (const size_t p : {5, 4, 3, 2}) { + Matrix M = Matrix::Identity(p, p); + M.topLeftCorner(2, 2) = R1.matrix(); + SOn Q1(M); + M.topLeftCorner(2, 2) = R2.matrix(); + SOn Q2(M); + auto factor = ShonanFactor2(1, 2, R12, p, model); + Matrix H1, H2; + factor.evaluateError(Q1, Q2, H1, H2); + + // Test derivatives + Values values; + values.insert(1, Q1); + values.insert(2, Q2); + EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); + } +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/sfm/tests/testShonanGaugeFactor.cpp b/gtsam/sfm/tests/testShonanGaugeFactor.cpp new file mode 100644 index 000000000..344394b9c --- /dev/null +++ b/gtsam/sfm/tests/testShonanGaugeFactor.cpp @@ -0,0 +1,106 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testShonanGaugeFactor.cpp + * @date March 2019 + * @author Frank Dellaert + * @brief Unit tests for ShonanGaugeFactor class + */ + +#include + +#include + +#include +#include + +using namespace std; +using namespace gtsam; + +/* ************************************************************************* */ +// Check dimensions of all low-dim GaugeFactors +TEST(ShonanAveraging, GaugeFactorLows) { + constexpr Key key(123); + EXPECT_LONGS_EQUAL(0, ShonanGaugeFactor(key, 2, 2).dim()); + EXPECT_LONGS_EQUAL(0, ShonanGaugeFactor(key, 3, 2).dim()); + EXPECT_LONGS_EQUAL(1, ShonanGaugeFactor(key, 4, 2).dim()); // SO(4-2) -> 1 + EXPECT_LONGS_EQUAL(3, ShonanGaugeFactor(key, 5, 2).dim()); // SO(5-2) -> 3 + + EXPECT_LONGS_EQUAL(0, ShonanGaugeFactor(key, 3, 3).dim()); + EXPECT_LONGS_EQUAL(0, ShonanGaugeFactor(key, 4, 3).dim()); + EXPECT_LONGS_EQUAL(1, ShonanGaugeFactor(key, 5, 3).dim()); // SO(5-3) -> 1 +} + +/* ************************************************************************* */ +// Check ShonanGaugeFactor for SO(6) +TEST(ShonanAveraging, GaugeFactorSO6) { + constexpr Key key(666); + ShonanGaugeFactor factor(key, 6); // For SO(6) + Matrix A = Matrix::Zero(3, 15); // SO(6-3) = SO(3) == 3-dimensional gauge + A(0, 0) = 1; // first 2 of 6^th skew column, which has 5 non-zero entries + A(1, 1) = 1; // then we skip 3 tangent dimensions + A(2, 5) = 1; // first of 5th skew colum, which has 4 non-zero entries above + // diagonal. + JacobianFactor linearized(key, A, Vector::Zero(3)); + Values values; + EXPECT_LONGS_EQUAL(3, factor.dim()); + EXPECT(assert_equal(linearized, *boost::dynamic_pointer_cast( + factor.linearize(values)))); +} + +/* ************************************************************************* */ +// Check ShonanGaugeFactor for SO(7) +TEST(ShonanAveraging, GaugeFactorSO7) { + constexpr Key key(777); + ShonanGaugeFactor factor(key, 7); // For SO(7) + Matrix A = Matrix::Zero(6, 21); // SO(7-3) = SO(4) == 6-dimensional gauge + A(0, 0) = 1; // first 3 of 7^th skew column, which has 6 non-zero entries + A(1, 1) = 1; + A(2, 2) = 1; // then we skip 3 tangent dimensions + A(3, 6) = 1; // first 2 of 6^th skew column, which has 5 non-zero entries + A(4, 7) = 1; // then we skip 3 tangent dimensions + A(5, 11) = 1; // first of 5th skew colum, which has 4 non-zero entries above + // diagonal. + JacobianFactor linearized(key, A, Vector::Zero(6)); + Values values; + EXPECT_LONGS_EQUAL(6, factor.dim()); + EXPECT(assert_equal(linearized, *boost::dynamic_pointer_cast( + factor.linearize(values)))); +} + +/* ************************************************************************* */ +// Check ShonanGaugeFactor for SO(6), with base SO(2) +TEST(ShonanAveraging, GaugeFactorSO6over2) { + constexpr Key key(602); + double gamma = 4; + ShonanGaugeFactor factor(key, 6, 2, gamma); // For SO(6), base SO(2) + Matrix A = Matrix::Zero(6, 15); // SO(6-2) = SO(4) == 6-dimensional gauge + A(0, 0) = 2; // first 3 of 6^th skew column, which has 5 non-zero entries + A(1, 1) = 2; + A(2, 2) = 2; // then we skip only 2 tangent dimensions + A(3, 5) = 2; // first 2 of 5^th skew column, which has 4 non-zero entries + A(4, 6) = 2; // then we skip only 2 tangent dimensions + A(5, 9) = 2; // first of 4th skew colum, which has 3 non-zero entries above + // diagonal. + JacobianFactor linearized(key, A, Vector::Zero(6)); + Values values; + EXPECT_LONGS_EQUAL(6, factor.dim()); + EXPECT(assert_equal(linearized, *boost::dynamic_pointer_cast( + factor.linearize(values)))); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/slam/FrobeniusFactor.cpp b/gtsam/slam/FrobeniusFactor.cpp index 80aeea947..5697a0cd6 100644 --- a/gtsam/slam/FrobeniusFactor.cpp +++ b/gtsam/slam/FrobeniusFactor.cpp @@ -18,118 +18,38 @@ #include -#include -#include -#include - -#include -#include -#include - using namespace std; namespace gtsam { //****************************************************************************** -boost::shared_ptr ConvertPose3NoiseModel( - const SharedNoiseModel& model, size_t d, bool defaultToUnit) { +boost::shared_ptr +ConvertNoiseModel(const SharedNoiseModel &model, size_t d, bool defaultToUnit) { double sigma = 1.0; if (model != nullptr) { - if (model->dim() != 6) { - if (!defaultToUnit) - throw std::runtime_error("Can only convert Pose3 noise models"); - } else { - auto sigmas = model->sigmas().head(3).eval(); - if (sigmas(1) != sigmas(0) || sigmas(2) != sigmas(0)) { - if (!defaultToUnit) + auto sigmas = model->sigmas(); + size_t n = sigmas.size(); + if (n == 1) { + sigma = sigmas(0); // Rot2 + goto exit; + } + if (n == 3 || n == 6) { + sigma = sigmas(2); // Pose2, Rot3, or Pose3 + if (sigmas(0) != sigma || sigmas(1) != sigma) { + if (!defaultToUnit) { throw std::runtime_error("Can only convert isotropic rotation noise"); - } else { - sigma = sigmas(0); + } } + goto exit; + } + if (!defaultToUnit) { + throw std::runtime_error("Can only convert Pose2/Pose3 noise models"); } } +exit: return noiseModel::Isotropic::Sigma(d, sigma); } //****************************************************************************** -FrobeniusWormholeFactor::FrobeniusWormholeFactor( - Key j1, Key j2, const Rot3 &R12, size_t p, const SharedNoiseModel &model, - const boost::shared_ptr &G) - : NoiseModelFactor2(ConvertPose3NoiseModel(model, p * 3), j1, j2), - M_(R12.matrix()), // 3*3 in all cases - p_(p), // 4 for SO(4) - pp_(p * p), // 16 for SO(4) - G_(G) { - if (noiseModel()->dim() != 3 * p_) - throw std::invalid_argument( - "FrobeniusWormholeFactor: model with incorrect dimension."); - if (!G) { - G_ = boost::make_shared(); - *G_ = SOn::VectorizedGenerators(p); // expensive! - } - if (static_cast(G_->rows()) != pp_ || - static_cast(G_->cols()) != SOn::Dimension(p)) - throw std::invalid_argument("FrobeniusWormholeFactor: passed in generators " - "of incorrect dimension."); -} -//****************************************************************************** -void FrobeniusWormholeFactor::print(const std::string &s, const KeyFormatter &keyFormatter) const { - std::cout << s << "FrobeniusWormholeFactor<" << p_ << ">(" << keyFormatter(key1()) << "," - << keyFormatter(key2()) << ")\n"; - traits::Print(M_, " M: "); - noiseModel_->print(" noise model: "); -} - -//****************************************************************************** -bool FrobeniusWormholeFactor::equals(const NonlinearFactor &expected, - double tol) const { - auto e = dynamic_cast(&expected); - return e != nullptr && NoiseModelFactor2::equals(*e, tol) && - p_ == e->p_ && M_ == e->M_; -} - -//****************************************************************************** -Vector FrobeniusWormholeFactor::evaluateError( - const SOn& Q1, const SOn& Q2, boost::optional H1, - boost::optional H2) const { - gttic(FrobeniusWormholeFactorP_evaluateError); - - const Matrix& M1 = Q1.matrix(); - const Matrix& M2 = Q2.matrix(); - assert(M1.rows() == p_ && M2.rows() == p_); - - const size_t dim = 3 * p_; // Stiefel manifold dimension - Vector fQ2(dim), hQ1(dim); - - // Vectorize and extract only d leftmost columns, i.e. vec(M2*P) - fQ2 << Eigen::Map(M2.data(), dim, 1); - - // Vectorize M1*P*R12 - const Matrix Q1PR12 = M1.leftCols<3>() * M_; - hQ1 << Eigen::Map(Q1PR12.data(), dim, 1); - - // If asked, calculate Jacobian as (M \otimes M1) * G - if (H1) { - const size_t p2 = 2 * p_; - Matrix RPxQ = Matrix::Zero(dim, pp_); - RPxQ.block(0, 0, p_, dim) << M1 * M_(0, 0), M1 * M_(1, 0), M1 * M_(2, 0); - RPxQ.block(p_, 0, p_, dim) << M1 * M_(0, 1), M1 * M_(1, 1), M1 * M_(2, 1); - RPxQ.block(p2, 0, p_, dim) << M1 * M_(0, 2), M1 * M_(1, 2), M1 * M_(2, 2); - *H1 = -RPxQ * (*G_); - } - if (H2) { - const size_t p2 = 2 * p_; - Matrix PxQ = Matrix::Zero(dim, pp_); - PxQ.block(0, 0, p_, p_) = M2; - PxQ.block(p_, p_, p_, p_) = M2; - PxQ.block(p2, p2, p_, p_) = M2; - *H2 = PxQ * (*G_); - } - - return fQ2 - hQ1; -} - -//****************************************************************************** - -} // namespace gtsam +} // namespace gtsam diff --git a/gtsam/slam/FrobeniusFactor.h b/gtsam/slam/FrobeniusFactor.h index 474cf6143..1fc37c785 100644 --- a/gtsam/slam/FrobeniusFactor.h +++ b/gtsam/slam/FrobeniusFactor.h @@ -18,6 +18,7 @@ #pragma once +#include #include #include #include @@ -25,23 +26,24 @@ namespace gtsam { /** - * When creating (any) FrobeniusFactor we convert a 6-dimensional Pose3 - * BetweenFactor noise model into an 9 or 16-dimensional isotropic noise + * When creating (any) FrobeniusFactor we can convert a Rot/Pose + * BetweenFactor noise model into a n-dimensional isotropic noise * model used to weight the Frobenius norm. If the noise model passed is - * null we return a Dim-dimensional isotropic noise model with sigma=1.0. If - * not, we we check if the 3-dimensional noise model on rotations is - * isotropic. If it is, we extend to 'Dim' dimensions, otherwise we throw an + * null we return a n-dimensional isotropic noise model with sigma=1.0. If + * not, we we check if the d-dimensional noise model on rotations is + * isotropic. If it is, we extend to 'n' dimensions, otherwise we throw an * error. If defaultToUnit == false throws an exception on unexepcted input. */ - GTSAM_EXPORT boost::shared_ptr ConvertPose3NoiseModel( - const SharedNoiseModel& model, size_t d, bool defaultToUnit = true); +GTSAM_EXPORT boost::shared_ptr +ConvertNoiseModel(const SharedNoiseModel &model, size_t n, + bool defaultToUnit = true); /** * FrobeniusPrior calculates the Frobenius norm between a given matrix and an * element of SO(3) or SO(4). */ template -class FrobeniusPrior : public NoiseModelFactor1 { +class GTSAM_EXPORT FrobeniusPrior : public NoiseModelFactor1 { enum { Dim = Rot::VectorN2::RowsAtCompileTime }; using MatrixNN = typename Rot::MatrixNN; Eigen::Matrix vecM_; ///< vectorized matrix to approximate @@ -50,7 +52,7 @@ class FrobeniusPrior : public NoiseModelFactor1 { /// Constructor FrobeniusPrior(Key j, const MatrixNN& M, const SharedNoiseModel& model = nullptr) - : NoiseModelFactor1(ConvertPose3NoiseModel(model, Dim), j) { + : NoiseModelFactor1(ConvertNoiseModel(model, Dim), j) { vecM_ << Eigen::Map(M.data(), Dim, 1); } @@ -66,13 +68,13 @@ class FrobeniusPrior : public NoiseModelFactor1 { * The template argument can be any fixed-size SO. */ template -class FrobeniusFactor : public NoiseModelFactor2 { +class GTSAM_EXPORT FrobeniusFactor : public NoiseModelFactor2 { enum { Dim = Rot::VectorN2::RowsAtCompileTime }; public: /// Constructor FrobeniusFactor(Key j1, Key j2, const SharedNoiseModel& model = nullptr) - : NoiseModelFactor2(ConvertPose3NoiseModel(model, Dim), j1, + : NoiseModelFactor2(ConvertNoiseModel(model, Dim), j1, j2) {} /// Error is just Frobenius norm between rotation matrices. @@ -106,7 +108,7 @@ class GTSAM_EXPORT FrobeniusBetweenFactor : public NoiseModelFactor2 { FrobeniusBetweenFactor(Key j1, Key j2, const Rot& R12, const SharedNoiseModel& model = nullptr) : NoiseModelFactor2( - ConvertPose3NoiseModel(model, Dim), j1, j2), + ConvertNoiseModel(model, Dim), j1, j2), R12_(R12), R2hat_H_R1_(R12.inverse().AdjointMap()) {} @@ -150,52 +152,4 @@ class GTSAM_EXPORT FrobeniusBetweenFactor : public NoiseModelFactor2 { /// @} }; -/** - * FrobeniusWormholeFactor is a BetweenFactor that moves in SO(p), but will - * land on the SO(3) sub-manifold of SO(p) at the global minimum. It projects - * the SO(p) matrices down to a Stiefel manifold of p*d matrices. - * TODO(frank): template on D=2 or 3 - */ -class GTSAM_EXPORT FrobeniusWormholeFactor - : public NoiseModelFactor2 { - Matrix M_; ///< measured rotation between R1 and R2 - size_t p_, pp_; ///< dimensionality constants - boost::shared_ptr G_; ///< matrix of vectorized generators - -public: - /// @name Constructor - /// @{ - - /// Constructor. Note we convert to 3*p-dimensional noise model. - /// To save memory and mallocs, pass in the vectorized Lie algebra generators: - /// G = boost::make_shared(SOn::VectorizedGenerators(p)); - FrobeniusWormholeFactor(Key j1, Key j2, const Rot3 &R12, size_t p = 4, - const SharedNoiseModel &model = nullptr, - const boost::shared_ptr &G = nullptr); - - /// @} - /// @name Testable - /// @{ - - /// print with optional string - void - print(const std::string &s, - const KeyFormatter &keyFormatter = DefaultKeyFormatter) const override; - - /// assert equality up to a tolerance - bool equals(const NonlinearFactor &expected, - double tol = 1e-9) const override; - - /// @} - /// @name NoiseModelFactor2 methods - /// @{ - - /// Error is Frobenius norm between Q1*P*R12 and Q2*P, where P=[I_3x3;0] - /// projects down from SO(p) to the Stiefel manifold of px3 matrices. - Vector evaluateError(const SOn& Q1, const SOn& Q2, - boost::optional H1 = boost::none, - boost::optional H2 = boost::none) const override; - /// @} -}; - } // namespace gtsam diff --git a/gtsam/slam/KarcherMeanFactor-inl.h b/gtsam/slam/KarcherMeanFactor-inl.h index f10cc7e42..c81a9adc5 100644 --- a/gtsam/slam/KarcherMeanFactor-inl.h +++ b/gtsam/slam/KarcherMeanFactor-inl.h @@ -58,20 +58,22 @@ T FindKarcherMean(std::initializer_list&& rotations) { template template -KarcherMeanFactor::KarcherMeanFactor(const CONTAINER& keys, int d) - : NonlinearFactor(keys) { +KarcherMeanFactor::KarcherMeanFactor(const CONTAINER &keys, int d, + boost::optional beta) + : NonlinearFactor(keys), d_(static_cast(d)) { if (d <= 0) { throw std::invalid_argument( "KarcherMeanFactor needs dimension for dynamic types."); } - // Create the constant Jacobian made of D*D identity matrices, - // where D is the dimensionality of the manifold. - const auto I = Eigen::MatrixXd::Identity(d, d); + // Create the constant Jacobian made of d*d identity matrices, + // where d is the dimensionality of the manifold. + Matrix A = Matrix::Identity(d, d); + if (beta) A *= std::sqrt(*beta); std::map terms; for (Key j : keys) { - terms[j] = I; + terms[j] = A; } - jacobian_ = - boost::make_shared(terms, Eigen::VectorXd::Zero(d)); + whitenedJacobian_ = + boost::make_shared(terms, Vector::Zero(d)); } } // namespace gtsam \ No newline at end of file diff --git a/gtsam/slam/KarcherMeanFactor.h b/gtsam/slam/KarcherMeanFactor.h index 54b3930d4..b7cd3b11a 100644 --- a/gtsam/slam/KarcherMeanFactor.h +++ b/gtsam/slam/KarcherMeanFactor.h @@ -30,44 +30,51 @@ namespace gtsam { * PriorFactors. */ template -T FindKarcherMean(const std::vector>& rotations); +T FindKarcherMean(const std::vector> &rotations); -template -T FindKarcherMean(std::initializer_list&& rotations); +template T FindKarcherMean(std::initializer_list &&rotations); /** * The KarcherMeanFactor creates a constraint on all SO(n) variables with * given keys that the Karcher mean (see above) will stay the same. Note the * mean itself is irrelevant to the constraint and is not a parameter: the * constraint is implemented as enforcing that the sum of local updates is - * equal to zero, hence creating a rank-dim constraint. Note it is implemented as - * a soft constraint, as typically it is used to fix a gauge freedom. + * equal to zero, hence creating a rank-dim constraint. Note it is implemented + * as a soft constraint, as typically it is used to fix a gauge freedom. * */ -template -class KarcherMeanFactor : public NonlinearFactor { +template class KarcherMeanFactor : public NonlinearFactor { + // Compile time dimension: can be -1 + enum { D = traits::dimension }; + + // Runtime dimension: always >=0 + size_t d_; + /// Constant Jacobian made of d*d identity matrices - boost::shared_ptr jacobian_; + boost::shared_ptr whitenedJacobian_; - enum {D = traits::dimension}; - - public: - /// Construct from given keys. +public: + /** + * Construct from given keys. + * If parameter beta is given, it acts as a precision = 1/sigma^2, and + * the Jacobian will be multiplied with 1/sigma = sqrt(beta). + */ template - KarcherMeanFactor(const CONTAINER& keys, int d=D); + KarcherMeanFactor(const CONTAINER &keys, int d = D, + boost::optional beta = boost::none); /// Destructor virtual ~KarcherMeanFactor() {} /// Calculate the error of the factor: always zero - double error(const Values& c) const override { return 0; } + double error(const Values &c) const override { return 0; } /// get the dimension of the factor (number of rows on linearization) - size_t dim() const override { return D; } + size_t dim() const override { return d_; } /// linearize to a GaussianFactor - boost::shared_ptr linearize(const Values& c) const override { - return jacobian_; + boost::shared_ptr linearize(const Values &c) const override { + return whitenedJacobian_; } }; // \KarcherMeanFactor -} // namespace gtsam +} // namespace gtsam diff --git a/gtsam/slam/tests/testFrobeniusFactor.cpp b/gtsam/slam/tests/testFrobeniusFactor.cpp index 9cb0c19fa..321b54c86 100644 --- a/gtsam/slam/tests/testFrobeniusFactor.cpp +++ b/gtsam/slam/tests/testFrobeniusFactor.cpp @@ -188,54 +188,6 @@ TEST(FrobeniusBetweenFactorSO4, evaluateError) { EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); } -//****************************************************************************** -namespace submanifold { -SO4 id; -Vector6 v1 = (Vector(6) << 0, 0, 0, 0.1, 0, 0).finished(); -SO3 R1 = SO3::Expmap(v1.tail<3>()); -SO4 Q1 = SO4::Expmap(v1); -Vector6 v2 = (Vector(6) << 0, 0, 0, 0.01, 0.02, 0.03).finished(); -SO3 R2 = SO3::Expmap(v2.tail<3>()); -SO4 Q2 = SO4::Expmap(v2); -SO3 R12 = R1.between(R2); -} // namespace submanifold - -/* ************************************************************************* */ -TEST(FrobeniusWormholeFactor, evaluateError) { - auto model = noiseModel::Isotropic::Sigma(6, 1.2); // dimension = 6 not 16 - for (const size_t p : {5, 4, 3}) { - Matrix M = Matrix::Identity(p, p); - M.topLeftCorner(3, 3) = submanifold::R1.matrix(); - SOn Q1(M); - M.topLeftCorner(3, 3) = submanifold::R2.matrix(); - SOn Q2(M); - auto factor = - FrobeniusWormholeFactor(1, 2, Rot3(::so3::R12.matrix()), p, model); - Matrix H1, H2; - factor.evaluateError(Q1, Q2, H1, H2); - - // Test derivatives - Values values; - values.insert(1, Q1); - values.insert(2, Q2); - EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); - } -} - -/* ************************************************************************* */ -TEST(FrobeniusWormholeFactor, equivalenceToSO3) { - using namespace ::submanifold; - auto R12 = ::so3::R12.retract(Vector3(0.1, 0.2, -0.1)); - auto model = noiseModel::Isotropic::Sigma(6, 1.2); // wrong dimension - auto factor3 = FrobeniusBetweenFactor(1, 2, R12, model); - auto factor4 = FrobeniusWormholeFactor(1, 2, Rot3(R12.matrix()), 4, model); - const Matrix3 E3(factor3.evaluateError(R1, R2).data()); - const Matrix43 E4( - factor4.evaluateError(SOn(Q1.matrix()), SOn(Q2.matrix())).data()); - EXPECT(assert_equal((Matrix)E4.topLeftCorner<3, 3>(), E3, 1e-9)); - EXPECT(assert_equal((Matrix)E4.row(3), Matrix13::Zero(), 1e-9)); -} - /* ************************************************************************* */ int main() { TestResult tr; diff --git a/gtsam_unstable/gtsam_unstable.h b/gtsam_unstable/gtsam_unstable.h index ef2d16bf0..05b30bb0b 100644 --- a/gtsam_unstable/gtsam_unstable.h +++ b/gtsam_unstable/gtsam_unstable.h @@ -12,10 +12,14 @@ class gtsam::Point2Vector; class gtsam::Rot2; class gtsam::Pose2; class gtsam::Point3; +class gtsam::SO3; +class gtsam::SO4; +class gtsam::SOn; class gtsam::Rot3; class gtsam::Pose3; virtual class gtsam::noiseModel::Base; virtual class gtsam::noiseModel::Gaussian; +virtual class gtsam::noiseModel::Isotropic; virtual class gtsam::imuBias::ConstantBias; virtual class gtsam::NonlinearFactor; virtual class gtsam::NoiseModelFactor; @@ -39,6 +43,7 @@ class gtsam::KeyVector; class gtsam::LevenbergMarquardtParams; class gtsam::ISAM2Params; class gtsam::GaussianDensity; +class gtsam::LevenbergMarquardtOptimizer; namespace gtsam { @@ -282,7 +287,6 @@ virtual class PriorFactor : gtsam::NoiseModelFactor { void serializable() const; // enabling serialization functionality }; - #include template virtual class BetweenFactor : gtsam::NoiseModelFactor { diff --git a/gtsam_unstable/timing/process_shonan_timing_results.py b/gtsam_unstable/timing/process_shonan_timing_results.py new file mode 100644 index 000000000..9cf934dba --- /dev/null +++ b/gtsam_unstable/timing/process_shonan_timing_results.py @@ -0,0 +1,215 @@ +""" +Process timing results from timeShonanAveraging +""" + +import xlrd +import numpy as np +import matplotlib.pyplot as plt +from matplotlib.ticker import FuncFormatter +import heapq +from collections import Counter + +def make_combined_plot(name, p_values, times, costs, min_cost_range=10): + """ Make a plot that combines timing and SO(3) cost. + Arguments: + name: string of the plot title + p_values: list of p-values (int) + times: list of timings (seconds) + costs: list of costs (double) + Will calculate the range of the costs, default minimum range = 10.0 + """ + min_cost = min(costs) + cost_range = max(max(costs)-min_cost,min_cost_range) + fig = plt.figure() + ax1 = fig.add_subplot(111) + ax1.plot(p_values, times, label="time") + ax1.set_ylabel('Time used to optimize \ seconds') + ax1.set_xlabel('p_value') + ax2 = ax1.twinx() + ax2.plot(p_values, costs, 'r', label="cost") + ax2.set_ylabel('Cost at SO(3) form') + ax2.set_xlabel('p_value') + ax2.set_xticks(p_values) + ax2.set_ylim(min_cost, min_cost + cost_range) + plt.title(name, fontsize=12) + ax1.legend(loc="upper left") + ax2.legend(loc="upper right") + plt.interactive(False) + plt.show() + +def make_convergence_plot(name, p_values, times, costs, iter=10): + """ Make a bar that show the success rate for each p_value according to whether the SO(3) cost converges + Arguments: + name: string of the plot title + p_values: list of p-values (int) + times: list of timings (seconds) + costs: list of costs (double) + iter: int of iteration number for each p_value + """ + + max_cost = np.mean(np.array(heapq.nlargest(iter, costs))) + # calculate mean costs for each p value + p_values = list(dict(Counter(p_values)).keys()) + # make sure the iter number + iter = int(len(times)/len(p_values)) + p_mean_cost = [np.mean(np.array(costs[i*iter:(i+1)*iter])) for i in range(len(p_values))] + p_max = p_values[p_mean_cost.index(max(p_mean_cost))] + # print(p_mean_cost) + # print(p_max) + + #take mean and make the combined plot + mean_times = [] + mean_costs = [] + for p in p_values: + costs_tmp = costs[p_values.index(p)*iter:(p_values.index(p)+1)*iter] + mean_cost = sum(costs_tmp)/len(costs_tmp) + mean_costs.append(mean_cost) + times_tmp = times[p_values.index(p)*iter:(p_values.index(p)+1)*iter] + mean_time = sum(times_tmp)/len(times_tmp) + mean_times.append(mean_time) + make_combined_plot(name, p_values,mean_times, mean_costs) + + # calculate the convergence rate for each p_value + p_success_rates = [] + if p_mean_cost[0] >= 0.95*np.mean(np.array(costs)) and p_mean_cost[0] <= 1.05*np.mean(np.array(costs)): + p_success_rates = [ 1.0 for p in p_values] + else: + for p in p_values: + if p > p_max: + p_costs = costs[p_values.index(p)*iter:(p_values.index(p)+1)*iter] + # print(p_costs) + converged = [ int(p_cost < 0.3*max_cost) for p_cost in p_costs] + success_rate = sum(converged)/len(converged) + p_success_rates.append(success_rate) + else: + p_success_rates.append(0) + + plt.bar(p_values, p_success_rates, align='center', alpha=0.5) + plt.xticks(p_values) + plt.yticks(np.arange(0, 1.2, 0.2), ['0%', '20%', '40%', '60%', '80%', '100%']) + plt.xlabel("p_value") + plt.ylabel("success rate") + plt.title(name, fontsize=12) + plt.show() + +def make_eigen_and_bound_plot(name, p_values, times1, costPs, cost3s, times2, min_eigens, subounds): + """ Make a plot that combines time for optimizing, time for optimizing and compute min_eigen, + min_eigen, subound (subound = (f_R - f_SDP) / f_SDP). + Arguments: + name: string of the plot title + p_values: list of p-values (int) + times1: list of timings (seconds) + costPs: f_SDP + cost3s: f_R + times2: list of timings (seconds) + min_eigens: list of min_eigen (double) + subounds: list of subound (double) + """ + + if dict(Counter(p_values))[5] != 1: + p_values = list(dict(Counter(p_values)).keys()) + iter = int(len(times1)/len(p_values)) + p_mean_times1 = [np.mean(np.array(times1[i*iter:(i+1)*iter])) for i in range(len(p_values))] + p_mean_times2 = [np.mean(np.array(times2[i*iter:(i+1)*iter])) for i in range(len(p_values))] + print("p_values \n", p_values) + print("p_mean_times_opti \n", p_mean_times1) + print("p_mean_times_eig \n", p_mean_times2) + + p_mean_costPs = [np.mean(np.array(costPs[i*iter:(i+1)*iter])) for i in range(len(p_values))] + p_mean_cost3s = [np.mean(np.array(cost3s[i*iter:(i+1)*iter])) for i in range(len(p_values))] + p_mean_lambdas = [np.mean(np.array(min_eigens[i*iter:(i+1)*iter])) for i in range(len(p_values))] + + print("p_mean_costPs \n", p_mean_costPs) + print("p_mean_cost3s \n", p_mean_cost3s) + print("p_mean_lambdas \n", p_mean_lambdas) + print("*******************************************************************************************************************") + + + else: + plt.figure(1) + plt.ylabel('Time used (seconds)') + plt.xlabel('p_value') + plt.plot(p_values, times1, 'r', label="time for optimizing") + plt.plot(p_values, times2, 'blue', label="time for optimizing and check") + plt.title(name, fontsize=12) + plt.legend(loc="best") + plt.interactive(False) + plt.show() + + plt.figure(2) + plt.ylabel('Min eigen_value') + plt.xlabel('p_value') + plt.plot(p_values, min_eigens, 'r', label="min_eigen values") + plt.title(name, fontsize=12) + plt.legend(loc="best") + plt.interactive(False) + plt.show() + + plt.figure(3) + plt.ylabel('sub_bounds') + plt.xlabel('p_value') + plt.plot(p_values, subounds, 'blue', label="sub_bounds") + plt.title(name, fontsize=12) + plt.legend(loc="best") + plt.show() + +# Process arguments and call plot function +import argparse +import csv +import os + +parser = argparse.ArgumentParser() +parser.add_argument("path") +args = parser.parse_args() + + +file_path = [] +domain = os.path.abspath(args.path) +for info in os.listdir(args.path): + file_path.append(os.path.join(domain, info)) +file_path.sort() +print(file_path) + + +# name of all the plots +names = {} +names[0] = 'tinyGrid3D vertex = 9, edge = 11' +names[1] = 'smallGrid3D vertex = 125, edge = 297' +names[2] = 'parking-garage vertex = 1661, edge = 6275' +names[3] = 'sphere2500 vertex = 2500, edge = 4949' +# names[4] = 'sphere_bignoise vertex = 2200, edge = 8647' +names[5] = 'torus3D vertex = 5000, edge = 9048' +names[6] = 'cubicle vertex = 5750, edge = 16869' +names[7] = 'rim vertex = 10195, edge = 29743' + +# Parse CSV file +for key, name in names.items(): + print(key, name) + + # find according file to process + name_file = None + for path in file_path: + if name[0:3] in path: + name_file = path + if name_file == None: + print("The file %s is not in the path" % name) + continue + + p_values, times1, costPs, cost3s, times2, min_eigens, subounds = [],[],[],[],[],[],[] + with open(name_file) as csvfile: + reader = csv.reader(csvfile, delimiter='\t') + for row in reader: + print(row) + p_values.append(int(row[0])) + times1.append(float(row[1])) + costPs.append(float(row[2])) + cost3s.append(float(row[3])) + if len(row) > 4: + times2.append(float(row[4])) + min_eigens.append(float(row[5])) + subounds.append(float(row[6])) + + #plot + # make_combined_plot(name, p_values, times1, cost3s) + # make_convergence_plot(name, p_values, times1, cost3s) + make_eigen_and_bound_plot(name, p_values, times1, costPs, cost3s, times2, min_eigens, subounds) diff --git a/gtsam_unstable/timing/timeShonanAveraging.cpp b/gtsam_unstable/timing/timeShonanAveraging.cpp new file mode 100644 index 000000000..795961aef --- /dev/null +++ b/gtsam_unstable/timing/timeShonanAveraging.cpp @@ -0,0 +1,182 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testShonanAveraging.cpp + * @date September 2019 + * @author Jing Wu + * @author Frank Dellaert + * @brief Timing script for Shonan Averaging algorithm + */ + +#include "gtsam/base/Matrix.h" +#include "gtsam/base/Vector.h" +#include "gtsam/geometry/Point3.h" +#include "gtsam/geometry/Rot3.h" +#include +#include + +#include + +#include +#include +#include +#include + +using namespace std; +using namespace gtsam; + +// string g2oFile = findExampleDataFile("toyExample.g2o"); + +// save a single line of timing info to an output stream +void saveData(size_t p, double time1, double costP, double cost3, double time2, + double min_eigenvalue, double suBound, std::ostream* os) { + *os << (int)p << "\t" << time1 << "\t" << costP << "\t" << cost3 << "\t" + << time2 << "\t" << min_eigenvalue << "\t" << suBound << endl; +} + +void checkR(const Matrix& R) { + Matrix R2 = R.transpose(); + Matrix actual_R = R2 * R; + assert_equal(Rot3(),Rot3(actual_R)); +} + +void saveResult(string name, const Values& values) { + ofstream myfile; + myfile.open("shonan_result_of_" + name + ".dat"); + size_t nrSO3 = values.count(); + myfile << "#Type SO3 Number " << nrSO3 << "\n"; + for (int i = 0; i < nrSO3; ++i) { + Matrix R = values.at(i).matrix(); + // Check if the result of R.Transpose*R satisfy orthogonal constraint + checkR(R); + myfile << i; + for (int m = 0; m < 3; ++m) { + for (int n = 0; n < 3; ++n) { + myfile << " " << R(m, n); + } + } + myfile << "\n"; + } + myfile.close(); + cout << "Saved shonan_result.dat file" << endl; +} + +void saveG2oResult(string name, const Values& values, std::map poses) { + ofstream myfile; + myfile.open("shonan_result_of_" + name + ".g2o"); + size_t nrSO3 = values.count(); + for (int i = 0; i < nrSO3; ++i) { + Matrix R = values.at(i).matrix(); + // Check if the result of R.Transpose*R satisfy orthogonal constraint + checkR(R); + myfile << "VERTEX_SE3:QUAT" << " "; + myfile << i << " "; + myfile << poses[i].x() << " " << poses[i].y() << " " << poses[i].z() << " "; + Vector quaternion = Rot3(R).quaternion(); + myfile << quaternion(3) << " " << quaternion(2) << " " << quaternion(1) << " " << quaternion(0); + myfile << "\n"; + } + myfile.close(); + cout << "Saved shonan_result.g2o file" << endl; +} + +void saveResultQuat(const Values& values) { + ofstream myfile; + myfile.open("shonan_result.dat"); + size_t nrSOn = values.count(); + for (int i = 0; i < nrSOn; ++i) { + GTSAM_PRINT(values.at(i)); + Rot3 R = Rot3(values.at(i).matrix()); + float x = R.toQuaternion().x(); + float y = R.toQuaternion().y(); + float z = R.toQuaternion().z(); + float w = R.toQuaternion().w(); + myfile << "QuatSO3 " << i; + myfile << "QuatSO3 " << i << " " << w << " " << x << " " << y << " " << z << "\n"; + myfile.close(); + } +} + +int main(int argc, char* argv[]) { + // primitive argument parsing: + if (argc > 3) { + throw runtime_error("Usage: timeShonanAveraging [g2oFile]"); + } + + string g2oFile; + try { + if (argc > 1) + g2oFile = argv[argc - 1]; + else + g2oFile = string( + "/home/jingwu/git/SESync/data/sphere2500.g2o"); + + } catch (const exception& e) { + cerr << e.what() << '\n'; + exit(1); + } + + // Create a csv output file + size_t pos1 = g2oFile.find("data/"); + size_t pos2 = g2oFile.find(".g2o"); + string name = g2oFile.substr(pos1 + 5, pos2 - pos1 - 5); + cout << name << endl; + ofstream csvFile("shonan_timing_of_" + name + ".csv"); + + // Create Shonan averaging instance from the file. + // ShonanAveragingParameters parameters; + // double sigmaNoiseInRadians = 0 * M_PI / 180; + // parameters.setNoiseSigma(sigmaNoiseInRadians); + static const ShonanAveraging3 kShonan(g2oFile); + + // increase p value and try optimize using Shonan Algorithm. use chrono for + // timing + size_t pMin = 3; + Values Qstar; + Vector minEigenVector; + double CostP = 0, Cost3 = 0, lambdaMin = 0, suBound = 0; + cout << "(int)p" << "\t" << "time1" << "\t" << "costP" << "\t" << "cost3" << "\t" + << "time2" << "\t" << "MinEigenvalue" << "\t" << "SuBound" << endl; + + const Values randomRotations = kShonan.initializeRandomly(kRandomNumberGenerator); + + for (size_t p = pMin; p < 6; p++) { + // Randomly initialize at lowest level, initialize by line search after that + const Values initial = + (p > pMin) ? kShonan.initializeWithDescent(p, Qstar, minEigenVector, + lambdaMin) + : ShonanAveraging::LiftTo(pMin, randomRotations); + chrono::steady_clock::time_point t1 = chrono::steady_clock::now(); + // optimizing + const Values result = kShonan.tryOptimizingAt(p, initial); + chrono::steady_clock::time_point t2 = chrono::steady_clock::now(); + chrono::duration timeUsed1 = + chrono::duration_cast>(t2 - t1); + lambdaMin = kShonan.computeMinEigenValue(result, &minEigenVector); + chrono::steady_clock::time_point t3 = chrono::steady_clock::now(); + chrono::duration timeUsed2 = + chrono::duration_cast>(t3 - t1); + Qstar = result; + CostP = kShonan.costAt(p, result); + const Values SO3Values = kShonan.roundSolution(result); + Cost3 = kShonan.cost(SO3Values); + suBound = (Cost3 - CostP) / CostP; + + saveData(p, timeUsed1.count(), CostP, Cost3, timeUsed2.count(), + lambdaMin, suBound, &cout); + saveData(p, timeUsed1.count(), CostP, Cost3, timeUsed2.count(), + lambdaMin, suBound, &csvFile); + } + saveResult(name, kShonan.roundSolution(Qstar)); + // saveG2oResult(name, kShonan.roundSolution(Qstar), kShonan.Poses()); + return 0; +} diff --git a/timing/timeFrobeniusFactor.cpp b/timing/timeShonanFactor.cpp similarity index 92% rename from timing/timeFrobeniusFactor.cpp rename to timing/timeShonanFactor.cpp index 924213a33..207d54a4d 100644 --- a/timing/timeFrobeniusFactor.cpp +++ b/timing/timeShonanFactor.cpp @@ -10,8 +10,8 @@ * -------------------------------------------------------------------------- */ /** - * @file timeFrobeniusFactor.cpp - * @brief time FrobeniusFactor with BAL file + * @file timeShonanFactor.cpp + * @brief time ShonanFactor with BAL file * @author Frank Dellaert * @date 2019 */ @@ -27,7 +27,7 @@ #include #include #include -#include +#include #include #include @@ -42,7 +42,7 @@ static SharedNoiseModel gNoiseModel = noiseModel::Unit::Create(2); int main(int argc, char* argv[]) { // primitive argument parsing: if (argc > 3) { - throw runtime_error("Usage: timeFrobeniusFactor [g2oFile]"); + throw runtime_error("Usage: timeShonanFactor [g2oFile]"); } string g2oFile; @@ -70,7 +70,7 @@ int main(int argc, char* argv[]) { const auto &keys = m.keys(); const Rot3 &Rij = m.measured(); const auto &model = m.noiseModel(); - graph.emplace_shared( + graph.emplace_shared( keys[0], keys[1], Rij, 4, model, G); } From 840831b62bda4248b41615fc74974f59e50e95cc Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Mon, 17 Aug 2020 14:37:12 -0400 Subject: [PATCH 083/142] Ported MATLAB Examples --- matlab/+gtsam/Point2.m | 12 +++++ matlab/+gtsam/Point3.m | 12 +++++ matlab/+gtsam/cylinderSampleProjection.m | 6 +-- .../+gtsam/cylinderSampleProjectionStereo.m | 20 ++++---- matlab/+gtsam/cylinderSampling.m | 6 +-- matlab/+gtsam/plotCamera.m | 2 +- matlab/+gtsam/plotFlyingResults.m | 16 +++---- matlab/+gtsam/plotPoint2.m | 6 +-- matlab/+gtsam/plotPoint3.m | 6 +-- matlab/+gtsam/plotPose3.m | 2 +- matlab/+gtsam/points2DTrackStereo.m | 2 +- matlab/gtsam_examples/CameraFlyingExample.m | 24 +++++----- matlab/gtsam_examples/MonocularVOExample.m | 4 +- matlab/gtsam_examples/PlanarSLAMExample.m | 9 ++-- .../PlanarSLAMExample_sampling.m | 11 +++-- matlab/gtsam_examples/Pose2SLAMwSPCG.m | 2 +- matlab/gtsam_examples/SBAExample.m | 4 +- matlab/gtsam_examples/SFMExample.m | 2 +- matlab/gtsam_examples/StereoVOExample.m | 8 ++-- matlab/gtsam_examples/VisualizeMEstimators.m | 2 +- matlab/gtsam_tests/testPlanarSLAMExample.m | 2 +- matlab/gtsam_tests/testSFMExample.m | 2 +- matlab/gtsam_tests/testStereoVOExample.m | 2 +- matlab/gtsam_tests/testValues.m | 8 ++-- matlab/gtsam_tests/testVisualISAMExample.m | 2 +- .../+imuSimulator/IMUComparison.m | 12 ++--- .../+imuSimulator/IMUComparison_with_cov.m | 4 +- .../+imuSimulator/calculateIMUMeas_coriolis.m | 2 +- .../+imuSimulator/calculateIMUMeasurement.m | 6 +-- .../+imuSimulator/coriolisExample.m | 30 ++++++------ .../+imuSimulator/covarianceAnalysisBetween.m | 10 ++-- .../covarianceAnalysisCreateFactorGraph.m | 8 ++-- .../covarianceAnalysisCreateTrajectory.m | 6 +-- .../+imuSimulator/integrateIMUTrajectory.m | 4 +- .../integrateIMUTrajectory_bodyFrame.m | 6 +-- .../integrateIMUTrajectory_navFrame.m | 4 +- .../+imuSimulator/integrateTrajectory.m | 8 ++-- .../+imuSimulator/test1onestep.m | 4 +- .../+imuSimulator/test2constglobal.m | 6 +-- .../+imuSimulator/test3constbody.m | 16 +++---- .../+imuSimulator/test4circle.m | 12 ++--- .../FlightCameraTransformIMU.m | 25 +++++----- ...ansformCalProjectionFactorIMUExampleISAM.m | 46 ++++++++++--------- .../plot_projected_landmarks.m | 6 +-- matlab/unstable_examples/project_landmarks.m | 4 +- 45 files changed, 213 insertions(+), 178 deletions(-) create mode 100644 matlab/+gtsam/Point2.m create mode 100644 matlab/+gtsam/Point3.m diff --git a/matlab/+gtsam/Point2.m b/matlab/+gtsam/Point2.m new file mode 100644 index 000000000..3ea65c33e --- /dev/null +++ b/matlab/+gtsam/Point2.m @@ -0,0 +1,12 @@ +function pt = Point2(varargin) + % Point2 shim + if nargin == 2 && isa(varargin{1}, 'double') + pt = [varargin{1} varargin{2}]'; + elseif nargin == 1 + pt = varargin{1}; + elseif nargin == 0 + pt = [0 0]'; + else + error('Arguments do not match any overload of Point2 shim'); + end +end \ No newline at end of file diff --git a/matlab/+gtsam/Point3.m b/matlab/+gtsam/Point3.m new file mode 100644 index 000000000..5f66b4517 --- /dev/null +++ b/matlab/+gtsam/Point3.m @@ -0,0 +1,12 @@ +function pt = Point3(varargin) + % Point3 shim + if nargin == 3 && isa(varargin{1}, 'double') + pt = [varargin{1} varargin{2} varargin{3}]'; + elseif nargin == 1 + pt = varargin{1}; + elseif nargin == 0 + pt = [0 0 0]'; + else + error('Arguments do not match any overload of Point3 shim'); + end +end \ No newline at end of file diff --git a/matlab/+gtsam/cylinderSampleProjection.m b/matlab/+gtsam/cylinderSampleProjection.m index 2b913b52d..697a57faa 100644 --- a/matlab/+gtsam/cylinderSampleProjection.m +++ b/matlab/+gtsam/cylinderSampleProjection.m @@ -50,9 +50,9 @@ for i = 1:cylinderNum visible = true; for k = 1:cylinderNum - rayCameraToPoint = pose.translation().between(sampledPoint3).vector(); - rayCameraToCylinder = pose.translation().between(cylinders{k}.centroid).vector(); - rayCylinderToPoint = cylinders{k}.centroid.between(sampledPoint3).vector(); + rayCameraToPoint = pose.translation().between(sampledPoint3); + rayCameraToCylinder = pose.translation().between(cylinders{k}.centroid); + rayCylinderToPoint = cylinders{k}.centroid.between(sampledPoint3); % Condition 1: all points in front of the cylinders' % surfaces are visible diff --git a/matlab/+gtsam/cylinderSampleProjectionStereo.m b/matlab/+gtsam/cylinderSampleProjectionStereo.m index 10409ac6d..58b4140fd 100644 --- a/matlab/+gtsam/cylinderSampleProjectionStereo.m +++ b/matlab/+gtsam/cylinderSampleProjectionStereo.m @@ -25,20 +25,20 @@ for i = 1:cylinderNum % For Cheirality Exception sampledPoint3 = cylinders{i}.Points{j}; sampledPoint3local = pose.transformTo(sampledPoint3); - if sampledPoint3local.z < 0 + if sampledPoint3local(3) < 0 continue; end % measurements - Z.du = K.fx() * K.baseline() / sampledPoint3local.z; - Z.uL = K.fx() * sampledPoint3local.x / sampledPoint3local.z + K.px(); + Z.du = K.fx() * K.baseline() / sampledPoint3local(3); + Z.uL = K.fx() * sampledPoint3local(1) / sampledPoint3local(3) + K.px(); Z.uR = Z.uL + Z.du; - Z.v = K.fy() / sampledPoint3local.z + K.py(); + Z.v = K.fy() / sampledPoint3local(3) + K.py(); % ignore points not visible in the scene - if Z.uL < 0 || Z.uL >= imageSize.x || ... - Z.uR < 0 || Z.uR >= imageSize.x || ... - Z.v < 0 || Z.v >= imageSize.y + if Z.uL < 0 || Z.uL >= imageSize(1) || ... + Z.uR < 0 || Z.uR >= imageSize(1) || ... + Z.v < 0 || Z.v >= imageSize(2) continue; end @@ -54,9 +54,9 @@ for i = 1:cylinderNum visible = true; for k = 1:cylinderNum - rayCameraToPoint = pose.translation().between(sampledPoint3).vector(); - rayCameraToCylinder = pose.translation().between(cylinders{k}.centroid).vector(); - rayCylinderToPoint = cylinders{k}.centroid.between(sampledPoint3).vector(); + rayCameraToPoint = sampledPoint3 - pose.translation(); + rayCameraToCylinder = cylinders{k}.centroid - pose.translation(); + rayCylinderToPoint = sampledPoint3 - cylinders{k}.centroid; % Condition 1: all points in front of the cylinders' % surfaces are visible diff --git a/matlab/+gtsam/cylinderSampling.m b/matlab/+gtsam/cylinderSampling.m index dcaa9c721..dc76295fa 100644 --- a/matlab/+gtsam/cylinderSampling.m +++ b/matlab/+gtsam/cylinderSampling.m @@ -12,8 +12,8 @@ function [cylinder] = cylinderSampling(baseCentroid, radius, height, density) % sample the points for i = 1:pointsNum theta = 2 * pi * rand; - x = radius * cos(theta) + baseCentroid.x; - y = radius * sin(theta) + baseCentroid.y; + x = radius * cos(theta) + baseCentroid(1); + y = radius * sin(theta) + baseCentroid(2); z = height * rand; points3{i,1} = Point3([x,y,z]'); end @@ -22,5 +22,5 @@ function [cylinder] = cylinderSampling(baseCentroid, radius, height, density) cylinder.radius = radius; cylinder.height = height; cylinder.Points = points3; - cylinder.centroid = Point3(baseCentroid.x, baseCentroid.y, height/2); + cylinder.centroid = Point3(baseCentroid(1), baseCentroid(2), height/2); end \ No newline at end of file diff --git a/matlab/+gtsam/plotCamera.m b/matlab/+gtsam/plotCamera.m index d0d1f45b7..986cd9a68 100644 --- a/matlab/+gtsam/plotCamera.m +++ b/matlab/+gtsam/plotCamera.m @@ -1,7 +1,7 @@ function plotCamera(pose, axisLength) hold on - C = pose.translation().vector(); + C = pose.translation(); R = pose.rotation().matrix(); xAxis = C+R(:,1)*axisLength; diff --git a/matlab/+gtsam/plotFlyingResults.m b/matlab/+gtsam/plotFlyingResults.m index 5d4a287c4..202f2409b 100644 --- a/matlab/+gtsam/plotFlyingResults.m +++ b/matlab/+gtsam/plotFlyingResults.m @@ -13,7 +13,7 @@ set(gcf, 'Position', [80,1,1800,1000]); %% plot all the cylinders and sampled points axis equal -axis([0, options.fieldSize.x, 0, options.fieldSize.y, 0, options.height + 30]); +axis([0, options.fieldSize(1), 0, options.fieldSize(2), 0, options.height + 30]); xlabel('X (m)'); ylabel('Y (m)'); zlabel('Height (m)'); @@ -50,8 +50,8 @@ for i = 1:cylinderNum [X,Y,Z] = cylinder(cylinders{i}.radius, sampleDensity * cylinders{i}.radius * cylinders{i}.height); - X = X + cylinders{i}.centroid.x; - Y = Y + cylinders{i}.centroid.y; + X = X + cylinders{i}.centroid(1); + Y = Y + cylinders{i}.centroid(2); Z = Z * cylinders{i}.height; h_cylinder{i} = surf(X,Y,Z); @@ -76,7 +76,7 @@ for i = 1:posesSize %plotCamera(poses{i}, 3); gRp = poses{i}.rotation().matrix(); % rotation from pose to global - C = poses{i}.translation().vector(); + C = poses{i}.translation(); axisLength = 3; xAxis = C+gRp(:,1)*axisLength; @@ -111,14 +111,14 @@ for i = 1:posesSize for j = 1:pointSize if ~isempty(pts3d{j}.cov{i}) hold on - h_point{j} = plot3(pts3d{j}.data.x, pts3d{j}.data.y, pts3d{j}.data.z); - h_point_cov{j} = gtsam.covarianceEllipse3D([pts3d{j}.data.x; pts3d{j}.data.y; pts3d{j}.data.z], ... + h_point{j} = plot3(pts3d{j}.data(1), pts3d{j}.data(2), pts3d{j}.data(3)); + h_point_cov{j} = gtsam.covarianceEllipse3D([pts3d{j}.data(1); pts3d{j}.data(2); pts3d{j}.data(3)], ... pts3d{j}.cov{i}, options.plot.covarianceScale); end end axis equal - axis([0, options.fieldSize.x, 0, options.fieldSize.y, 0, options.height + 30]); + axis([0, options.fieldSize(1), 0, options.fieldSize(2), 0, options.height + 30]); drawnow @@ -158,7 +158,7 @@ for i = 1 : posesSize hold on campos([poses{i}.x, poses{i}.y, poses{i}.z]); - camtarget([options.fieldSize.x/2, options.fieldSize.y/2, 0]); + camtarget([options.fieldSize(1)/2, options.fieldSize(2)/2, 0]); camlight(hlight, 'headlight'); if options.writeVideo diff --git a/matlab/+gtsam/plotPoint2.m b/matlab/+gtsam/plotPoint2.m index cd066951d..8d10ecab6 100644 --- a/matlab/+gtsam/plotPoint2.m +++ b/matlab/+gtsam/plotPoint2.m @@ -1,10 +1,10 @@ function plotPoint2(p,color,P) % plotPoint2 shows a Point2, possibly with covariance matrix if size(color,2)==1 - plot(p.x,p.y,[color '*']); + plot(p(1),p(2),[color '*']); else - plot(p.x,p.y,color); + plot(p(1),p(2),color); end if exist('P', 'var') && (~isempty(P)) - gtsam.covarianceEllipse([p.x;p.y],P,color(1)); + gtsam.covarianceEllipse([p(1);p(2)],P,color(1)); end \ No newline at end of file diff --git a/matlab/+gtsam/plotPoint3.m b/matlab/+gtsam/plotPoint3.m index 390b44939..85c84a210 100644 --- a/matlab/+gtsam/plotPoint3.m +++ b/matlab/+gtsam/plotPoint3.m @@ -1,12 +1,12 @@ function plotPoint3(p, color, P) %PLOTPOINT3 Plot a Point3 with an optional covariance matrix if size(color,2)==1 - plot3(p.x,p.y,p.z,[color '*']); + plot3(p(1),p(2),p(3),[color '*']); else - plot3(p.x,p.y,p.z,color); + plot3(p(1),p(2),p(3),color); end if exist('P', 'var') - gtsam.covarianceEllipse3D([p.x;p.y;p.z],P); + gtsam.covarianceEllipse3D([p(1);p(2);p(3)],P); end end diff --git a/matlab/+gtsam/plotPose3.m b/matlab/+gtsam/plotPose3.m index 8c3c7dd76..258e3f776 100644 --- a/matlab/+gtsam/plotPose3.m +++ b/matlab/+gtsam/plotPose3.m @@ -4,7 +4,7 @@ if nargin<3,axisLength=0.1;end % get rotation and translation (center) gRp = pose.rotation().matrix(); % rotation from pose to global -C = pose.translation().vector(); +C = pose.translation(); if ~isempty(axisLength) % draw the camera axes diff --git a/matlab/+gtsam/points2DTrackStereo.m b/matlab/+gtsam/points2DTrackStereo.m index 60c9f1295..04cddb7f7 100644 --- a/matlab/+gtsam/points2DTrackStereo.m +++ b/matlab/+gtsam/points2DTrackStereo.m @@ -38,7 +38,7 @@ graph.add(PriorFactorPose3(symbol('x', 1), cameraPoses{1}, posePriorNoise)); %% initialize graph and values initialEstimate = Values; for i = 1:pointsNum - point_j = points3d{i}.data.retract(0.05*randn(3,1)); + point_j = points3d{i}.data + (0.05*randn(3,1)); initialEstimate.insert(symbol('p', i), point_j); end diff --git a/matlab/gtsam_examples/CameraFlyingExample.m b/matlab/gtsam_examples/CameraFlyingExample.m index add2bc75a..a0dfef22a 100644 --- a/matlab/gtsam_examples/CameraFlyingExample.m +++ b/matlab/gtsam_examples/CameraFlyingExample.m @@ -46,7 +46,7 @@ options.camera.horizon = 60; % camera baseline options.camera.baseline = 0.05; % camera focal length -options.camera.f = round(options.camera.resolution.x * options.camera.horizon / ... +options.camera.f = round(options.camera.resolution(1) * options.camera.horizon / ... options.camera.fov); % camera focal baseline options.camera.fB = options.camera.f * options.camera.baseline; @@ -54,15 +54,15 @@ options.camera.fB = options.camera.f * options.camera.baseline; options.camera.disparity = options.camera.fB / options.camera.horizon; % Monocular Camera Calibration options.camera.monoK = Cal3_S2(options.camera.f, options.camera.f, 0, ... - options.camera.resolution.x/2, options.camera.resolution.y/2); + options.camera.resolution(1)/2, options.camera.resolution(2)/2); % Stereo Camera Calibration options.camera.stereoK = Cal3_S2Stereo(options.camera.f, options.camera.f, 0, ... - options.camera.resolution.x/2, options.camera.resolution.y/2, options.camera.disparity); + options.camera.resolution(1)/2, options.camera.resolution(2)/2, options.camera.disparity); % write video output options.writeVideo = true; % the testing field size (unit: meter) -options.fieldSize = Point2([100, 100]'); +options.fieldSize = Point2(100, 100); % camera flying speed (unit: meter / second) options.speed = 20; % camera flying height @@ -113,14 +113,14 @@ theta = 0; i = 1; while i <= cylinderNum theta = theta + 2*pi/10; - x = 40 * rand * cos(theta) + options.fieldSize.x/2; - y = 20 * rand * sin(theta) + options.fieldSize.y/2; - baseCentroid{i} = Point2([x, y]'); + x = 40 * rand * cos(theta) + options.fieldSize(1)/2; + y = 20 * rand * sin(theta) + options.fieldSize(2)/2; + baseCentroid{i} = Point2(x, y); % prevent two cylinders interact with each other regenerate = false; for j = 1:i-1 - if i > 1 && baseCentroid{i}.dist(baseCentroid{j}) < options.cylinder.radius * 2 + if i > 1 && norm(baseCentroid{i} - baseCentroid{j}) < options.cylinder.radius * 2 regenerate = true; break; end @@ -146,17 +146,17 @@ while 1 angle = 0.1*pi*(rand-0.5); inc_t = Point3(distance * cos(angle), ... distance * sin(angle), 0); - t = t.compose(inc_t); + t = t + inc_t; - if t.x > options.fieldSize.x - 10 || t.y > options.fieldSize.y - 10; + if t(1) > options.fieldSize(1) - 10 || t(2) > options.fieldSize(2) - 10; break; end %t = Point3([(i-1)*(options.fieldSize.x - 10)/options.poseNum + 10, ... % 15, 10]'); camera = PinholeCameraCal3_S2.Lookat(t, ... - Point3(options.fieldSize.x/2, options.fieldSize.y/2, 0), ... - Point3([0,0,1]'), options.camera.monoK); + Point3(options.fieldSize(1)/2, options.fieldSize(2)/2, 0), ... + Point3(0,0,1), options.camera.monoK); cameraPoses{end+1} = camera.pose; end diff --git a/matlab/gtsam_examples/MonocularVOExample.m b/matlab/gtsam_examples/MonocularVOExample.m index 11c4253de..0d09a1487 100644 --- a/matlab/gtsam_examples/MonocularVOExample.m +++ b/matlab/gtsam_examples/MonocularVOExample.m @@ -15,14 +15,14 @@ import gtsam.* %% Create two cameras and corresponding essential matrix E aRb = Rot3.Yaw(pi/2); -aTb = Point3 (0.1, 0, 0); +aTb = [.1, 0, 0]'; identity=Pose3; aPb = Pose3(aRb, aTb); cameraA = CalibratedCamera(identity); cameraB = CalibratedCamera(aPb); %% Create 5 points -P = { Point3(0, 0, 1), Point3(-0.1, 0, 1), Point3(0.1, 0, 1), Point3(0, 0.5, 0.5), Point3(0, -0.5, 0.5) }; +P = { [0, 0, 1]', [-0.1, 0, 1]', [0.1, 0, 1]', [0, 0.5, 0.5]', [0, -0.5, 0.5]' }; %% Project points in both cameras for i=1:5 diff --git a/matlab/gtsam_examples/PlanarSLAMExample.m b/matlab/gtsam_examples/PlanarSLAMExample.m index aec933d74..3febc23e6 100644 --- a/matlab/gtsam_examples/PlanarSLAMExample.m +++ b/matlab/gtsam_examples/PlanarSLAMExample.m @@ -71,9 +71,12 @@ marginals = Marginals(graph, result); plot2DTrajectory(result, [], marginals); plot2DPoints(result, 'b', marginals); -plot([result.atPose2(i1).x; result.atPoint2(j1).x],[result.atPose2(i1).y; result.atPoint2(j1).y], 'c-'); -plot([result.atPose2(i2).x; result.atPoint2(j1).x],[result.atPose2(i2).y; result.atPoint2(j1).y], 'c-'); -plot([result.atPose2(i3).x; result.atPoint2(j2).x],[result.atPose2(i3).y; result.atPoint2(j2).y], 'c-'); +p_j1 = result.atPoint2(j1); +p_j2 = result.atPoint2(j2); + +plot([result.atPose2(i1).x; p_j1(1)],[result.atPose2(i1).y; p_j1(2)], 'c-'); +plot([result.atPose2(i2).x; p_j1(1)],[result.atPose2(i2).y; p_j1(2)], 'c-'); +plot([result.atPose2(i3).x; p_j2(1)],[result.atPose2(i3).y; p_j2(2)], 'c-'); axis([-0.6 4.8 -1 1]) axis equal view(2) diff --git a/matlab/gtsam_examples/PlanarSLAMExample_sampling.m b/matlab/gtsam_examples/PlanarSLAMExample_sampling.m index 375ed645c..93979a68a 100644 --- a/matlab/gtsam_examples/PlanarSLAMExample_sampling.m +++ b/matlab/gtsam_examples/PlanarSLAMExample_sampling.m @@ -60,15 +60,18 @@ for j=1:2 S{j}=chol(Q{j}); % for sampling end -plot([sample.atPose2(i1).x; sample.atPoint2(j1).x],[sample.atPose2(i1).y; sample.atPoint2(j1).y], 'c-'); -plot([sample.atPose2(i2).x; sample.atPoint2(j1).x],[sample.atPose2(i2).y; sample.atPoint2(j1).y], 'c-'); -plot([sample.atPose2(i3).x; sample.atPoint2(j2).x],[sample.atPose2(i3).y; sample.atPoint2(j2).y], 'c-'); +p_j1 = sample.atPoint2(j1); +p_j2 = sample.atPoint2(j2); + +plot([sample.atPose2(i1).x; p_j1(1)],[sample.atPose2(i1).y; p_j1(2)], 'c-'); +plot([sample.atPose2(i2).x; p_j1(1)],[sample.atPose2(i2).y; p_j1(2)], 'c-'); +plot([sample.atPose2(i3).x; p_j2(1)],[sample.atPose2(i3).y; p_j2(2)], 'c-'); view(2); axis auto; axis equal %% Do Sampling on point 2 N=1000; for s=1:N delta = S{2}*randn(2,1); - proposedPoint = Point2(point{2}.x+delta(1),point{2}.y+delta(2)); + proposedPoint = Point2(point{2} + delta); plotPoint2(proposedPoint,'k.') end \ No newline at end of file diff --git a/matlab/gtsam_examples/Pose2SLAMwSPCG.m b/matlab/gtsam_examples/Pose2SLAMwSPCG.m index 67f22fe1d..2df7efe2f 100644 --- a/matlab/gtsam_examples/Pose2SLAMwSPCG.m +++ b/matlab/gtsam_examples/Pose2SLAMwSPCG.m @@ -54,7 +54,7 @@ initialEstimate.print(sprintf('\nInitial estimate:\n')); %% Optimize using Levenberg-Marquardt optimization with SubgraphSolver params = gtsam.LevenbergMarquardtParams; subgraphParams = gtsam.SubgraphSolverParameters; -params.setLinearSolverType('CONJUGATE_GRADIENT'); +params.setLinearSolverType('ITERATIVE'); params.setIterativeParams(subgraphParams); optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate); result = optimizer.optimize(); diff --git a/matlab/gtsam_examples/SBAExample.m b/matlab/gtsam_examples/SBAExample.m index 584ace09a..f94a2ee4e 100644 --- a/matlab/gtsam_examples/SBAExample.m +++ b/matlab/gtsam_examples/SBAExample.m @@ -47,7 +47,7 @@ end %% Add Gaussian priors for a pose and a landmark to constrain the system cameraPriorNoise = noiseModel.Diagonal.Sigmas(cameraNoiseSigmas); firstCamera = PinholeCameraCal3_S2(truth.cameras{1}.pose, truth.K); -graph.add(PriorFactorSimpleCamera(symbol('c',1), firstCamera, cameraPriorNoise)); +graph.add(PriorFactorPinholeCameraCal3_S2(symbol('c',1), firstCamera, cameraPriorNoise)); pointPriorNoise = noiseModel.Isotropic.Sigma(3,pointNoiseSigma); graph.add(PriorFactorPoint3(symbol('p',1), truth.points{1}, pointPriorNoise)); @@ -64,7 +64,7 @@ for i=1:size(truth.cameras,2) initialEstimate.insert(symbol('c',i), camera_i); end for j=1:size(truth.points,2) - point_j = Point3(truth.points{j}.vector() + 0.1*randn(3,1)); + point_j = Point3(truth.points{j} + 0.1*randn(3,1)); initialEstimate.insert(symbol('p',j), point_j); end initialEstimate.print(sprintf('\nInitial estimate:\n ')); diff --git a/matlab/gtsam_examples/SFMExample.m b/matlab/gtsam_examples/SFMExample.m index 6700e90d2..a57da929c 100644 --- a/matlab/gtsam_examples/SFMExample.m +++ b/matlab/gtsam_examples/SFMExample.m @@ -58,7 +58,7 @@ for i=1:size(truth.cameras,2) initialEstimate.insert(symbol('x',i), pose_i); end for j=1:size(truth.points,2) - point_j = Point3(truth.points{j}.vector() + 0.1*randn(3,1)); + point_j = Point3(truth.points{j} + 0.1*randn(3,1)); initialEstimate.insert(symbol('p',j), point_j); end initialEstimate.print(sprintf('\nInitial estimate:\n ')); diff --git a/matlab/gtsam_examples/StereoVOExample.m b/matlab/gtsam_examples/StereoVOExample.m index b437ca80c..e7345fcf2 100644 --- a/matlab/gtsam_examples/StereoVOExample.m +++ b/matlab/gtsam_examples/StereoVOExample.m @@ -51,10 +51,10 @@ graph.add(GenericStereoFactor3D(StereoPoint2(320, 270, 115), stereo_model, x2, l initialEstimate = Values; initialEstimate.insert(x1, first_pose); % noisy estimate for pose 2 -initialEstimate.insert(x2, Pose3(Rot3(), Point3(0.1,-.1,1.1))); -initialEstimate.insert(l1, Point3( 1, 1, 5)); -initialEstimate.insert(l2, Point3(-1, 1, 5)); -initialEstimate.insert(l3, Point3( 0,-.5, 5)); +initialEstimate.insert(x2, Pose3(Rot3(), [0.1, -.1, 1.1]')); +initialEstimate.insert(l1, [ 1, 1, 5]'); +initialEstimate.insert(l2, [-1, 1, 5]'); +initialEstimate.insert(l3, [ 0,-.5, 5]'); %% optimize fprintf(1,'Optimizing\n'); tic diff --git a/matlab/gtsam_examples/VisualizeMEstimators.m b/matlab/gtsam_examples/VisualizeMEstimators.m index 8a0485334..ce505df5d 100644 --- a/matlab/gtsam_examples/VisualizeMEstimators.m +++ b/matlab/gtsam_examples/VisualizeMEstimators.m @@ -48,7 +48,7 @@ function plot_m_estimator(x, model, plot_title, fig_id, filename) rho = zeros(size(x)); for i = 1:size(x, 2) w(i) = model.weight(x(i)); - rho(i) = model.residual(x(i)); + rho(i) = model.loss(x(i)); end psi = w .* x; diff --git a/matlab/gtsam_tests/testPlanarSLAMExample.m b/matlab/gtsam_tests/testPlanarSLAMExample.m index d3cab5d1f..e0b4dbfc8 100644 --- a/matlab/gtsam_tests/testPlanarSLAMExample.m +++ b/matlab/gtsam_tests/testPlanarSLAMExample.m @@ -66,4 +66,4 @@ CHECK('pose_1.equals(Pose2,1e-4)',pose_1.equals(Pose2,1e-4)); point_1 = result.atPoint2(symbol('l',1)); marginals.marginalCovariance(symbol('l',1)); -CHECK('point_1.equals(Point2(2,2),1e-4)',point_1.equals(Point2(2,2),1e-4)); +CHECK('point_1.equals(Point2(2,2),1e-4)',norm(point_1 - Point2(2,2)) < 1e-4); diff --git a/matlab/gtsam_tests/testSFMExample.m b/matlab/gtsam_tests/testSFMExample.m index 985cbdb2c..a1f63c3a7 100644 --- a/matlab/gtsam_tests/testSFMExample.m +++ b/matlab/gtsam_tests/testSFMExample.m @@ -69,5 +69,5 @@ end for j=1:size(truth.points,2) point_j = result.atPoint3(symbol('p',j)); - CHECK('point_j.equals(truth.points{j},1e-5)',point_j.equals(truth.points{j},1e-5)) + CHECK('point_j.equals(truth.points{j},1e-5)',norm(point_j - truth.points{j}) < 1e-5) end diff --git a/matlab/gtsam_tests/testStereoVOExample.m b/matlab/gtsam_tests/testStereoVOExample.m index 218d0ace1..c721244ba 100644 --- a/matlab/gtsam_tests/testStereoVOExample.m +++ b/matlab/gtsam_tests/testStereoVOExample.m @@ -65,4 +65,4 @@ pose_x1 = result.atPose3(x1); CHECK('pose_x1.equals(first_pose,1e-4)',pose_x1.equals(first_pose,1e-4)); point_l1 = result.atPoint3(l1); -CHECK('point_1.equals(expected_l1,1e-4)',point_l1.equals(expected_l1,1e-4)); \ No newline at end of file +CHECK('point_1.equals(expected_l1,1e-4)',norm(point_l1 - expected_l1) < 1e-4); \ No newline at end of file diff --git a/matlab/gtsam_tests/testValues.m b/matlab/gtsam_tests/testValues.m index fe2cd30fe..48bc83f2c 100644 --- a/matlab/gtsam_tests/testValues.m +++ b/matlab/gtsam_tests/testValues.m @@ -5,8 +5,8 @@ values = Values; E = EssentialMatrix(Rot3,Unit3); tol = 1e-9; -values.insert(0, Point2); -values.insert(1, Point3); +values.insert(0, Point2(0, 0)); +values.insert(1, Point3(0, 0, 0)); values.insert(2, Rot2); values.insert(3, Pose2); values.insert(4, Rot3); @@ -21,8 +21,8 @@ values.insert(10, imuBias.ConstantBias); values.insert(11, [1;2;3]); values.insert(12, [1 2;3 4]); -EXPECT('at',values.atPoint2(0).equals(Point2,tol)); -EXPECT('at',values.atPoint3(1).equals(Point3,tol)); +EXPECT('at',values.atPoint2(0) == Point2(0, 0)); +EXPECT('at',values.atPoint3(1) == Point3(0, 0, 0)); EXPECT('at',values.atRot2(2).equals(Rot2,tol)); EXPECT('at',values.atPose2(3).equals(Pose2,tol)); EXPECT('at',values.atRot3(4).equals(Rot3,tol)); diff --git a/matlab/gtsam_tests/testVisualISAMExample.m b/matlab/gtsam_tests/testVisualISAMExample.m index 223e823a6..f75942ea7 100644 --- a/matlab/gtsam_tests/testVisualISAMExample.m +++ b/matlab/gtsam_tests/testVisualISAMExample.m @@ -51,5 +51,5 @@ end for j=1:size(truth.points,2) point_j = result.atPoint3(symbol('l',j)); - CHECK('point_j.equals(truth.points{j},1e-5)',point_j.equals(truth.points{j},1e-5)) + CHECK('point_j.equals(truth.points{j},1e-5)',norm(point_j - truth.points{j}) < 1e-5) end diff --git a/matlab/unstable_examples/+imuSimulator/IMUComparison.m b/matlab/unstable_examples/+imuSimulator/IMUComparison.m index 68e20bb25..871f023ef 100644 --- a/matlab/unstable_examples/+imuSimulator/IMUComparison.m +++ b/matlab/unstable_examples/+imuSimulator/IMUComparison.m @@ -28,19 +28,19 @@ currentVelocityGlobalIMUbody = currentVelocityGlobal; %% Prepare data structures for actual trajectory and estimates % Actual trajectory positions = zeros(3, length(times)+1); -positions(:,1) = currentPoseGlobal.translation.vector; +positions(:,1) = currentPoseGlobal.translation; poses(1).p = positions(:,1); poses(1).R = currentPoseGlobal.rotation.matrix; % Trajectory estimate (integrated in the navigation frame) positionsIMUnav = zeros(3, length(times)+1); -positionsIMUnav(:,1) = currentPoseGlobalIMUbody.translation.vector; +positionsIMUnav(:,1) = currentPoseGlobalIMUbody.translation; posesIMUnav(1).p = positionsIMUnav(:,1); posesIMUnav(1).R = poses(1).R; % Trajectory estimate (integrated in the body frame) positionsIMUbody = zeros(3, length(times)+1); -positionsIMUbody(:,1) = currentPoseGlobalIMUbody.translation.vector; +positionsIMUbody(:,1) = currentPoseGlobalIMUbody.translation; posesIMUbody(1).p = positionsIMUbody(:,1); posesIMUbody(1).R = poses(1).R; @@ -120,9 +120,9 @@ for t = times currentPoseGlobalIMUnav, currentVelocityGlobalIMUnav, acc_omega, deltaT); %% Store data in some structure for statistics and plots - positions(:,i) = currentPoseGlobal.translation.vector; - positionsIMUbody(:,i) = currentPoseGlobalIMUbody.translation.vector; - positionsIMUnav(:,i) = currentPoseGlobalIMUnav.translation.vector; + positions(:,i) = currentPoseGlobal.translation; + positionsIMUbody(:,i) = currentPoseGlobalIMUbody.translation; + positionsIMUnav(:,i) = currentPoseGlobalIMUnav.translation; % - poses(i).p = positions(:,i); posesIMUbody(i).p = positionsIMUbody(:,i); diff --git a/matlab/unstable_examples/+imuSimulator/IMUComparison_with_cov.m b/matlab/unstable_examples/+imuSimulator/IMUComparison_with_cov.m index c589bea32..450697de0 100644 --- a/matlab/unstable_examples/+imuSimulator/IMUComparison_with_cov.m +++ b/matlab/unstable_examples/+imuSimulator/IMUComparison_with_cov.m @@ -28,7 +28,7 @@ currentVelocityGlobal = velocity; %% Prepare data structures for actual trajectory and estimates % Actual trajectory positions = zeros(3, length(times)+1); -positions(:,1) = currentPoseGlobal.translation.vector; +positions(:,1) = currentPoseGlobal.translation; poses(1).p = positions(:,1); poses(1).R = currentPoseGlobal.rotation.matrix; @@ -112,7 +112,7 @@ for t = times end %% Store data in some structure for statistics and plots - positions(:,i) = currentPoseGlobal.translation.vector; + positions(:,i) = currentPoseGlobal.translation; i = i + 1; end diff --git a/matlab/unstable_examples/+imuSimulator/calculateIMUMeas_coriolis.m b/matlab/unstable_examples/+imuSimulator/calculateIMUMeas_coriolis.m index c86e40a21..0d8abad2c 100644 --- a/matlab/unstable_examples/+imuSimulator/calculateIMUMeas_coriolis.m +++ b/matlab/unstable_examples/+imuSimulator/calculateIMUMeas_coriolis.m @@ -7,7 +7,7 @@ measuredOmega = omega1Body; % Acceleration measurement (in this simple toy example no other forces % act on the body and the only acceleration is the centripetal Coriolis acceleration) -measuredAcc = Point3(cross(omega1Body, velocity1Body)).vector; +measuredAcc = Point3(cross(omega1Body, velocity1Body)); acc_omega = [ measuredAcc; measuredOmega ]; end diff --git a/matlab/unstable_examples/+imuSimulator/calculateIMUMeasurement.m b/matlab/unstable_examples/+imuSimulator/calculateIMUMeasurement.m index 534b9365e..5ed1fc516 100644 --- a/matlab/unstable_examples/+imuSimulator/calculateIMUMeasurement.m +++ b/matlab/unstable_examples/+imuSimulator/calculateIMUMeasurement.m @@ -6,16 +6,16 @@ import gtsam.*; % Calculate gyro measured rotation rate by transforming body rotation rate % into the IMU frame. -measuredOmega = imuFrame.rotation.unrotate(Point3(omega1Body)).vector; +measuredOmega = imuFrame.rotation.unrotate(Point3(omega1Body)); % Transform both velocities into IMU frame, accounting for the velocity % induced by rigid body rotation on a lever arm (Coriolis effect). velocity1inertial = imuFrame.rotation.unrotate( ... - Point3(velocity1Body + cross(omega1Body, imuFrame.translation.vector))).vector; + Point3(velocity1Body + cross(omega1Body, imuFrame.translation))); imu2in1 = Rot3.Expmap(measuredOmega * deltaT); velocity2inertial = imu2in1.rotate(imuFrame.rotation.unrotate( ... - Point3(velocity2Body + cross(omega2Body, imuFrame.translation.vector)))).vector; + Point3(velocity2Body + cross(omega2Body, imuFrame.translation)))); % Acceleration in IMU frame measuredAcc = (velocity2inertial - velocity1inertial) / deltaT; diff --git a/matlab/unstable_examples/+imuSimulator/coriolisExample.m b/matlab/unstable_examples/+imuSimulator/coriolisExample.m index 35d27aa73..ee4deb433 100644 --- a/matlab/unstable_examples/+imuSimulator/coriolisExample.m +++ b/matlab/unstable_examples/+imuSimulator/coriolisExample.m @@ -190,13 +190,13 @@ for i = 1:length(times) newFactors.add(PriorFactorConstantBias(currentBiasKey, zeroBias, sigma_init_b)); % Store data - positionsInFixedGT(:,1) = currentPoseFixedGT.translation.vector; + positionsInFixedGT(:,1) = currentPoseFixedGT.translation; velocityInFixedGT(:,1) = currentVelocityFixedGT; - positionsInRotatingGT(:,1) = currentPoseRotatingGT.translation.vector; - %velocityInRotatingGT(:,1) = currentPoseRotatingGT.velocity.vector; - positionsEstimates(:,i) = currentPoseEstimate.translation.vector; - velocitiesEstimates(:,i) = currentVelocityEstimate.vector; - currentRotatingFrameForPlot(1).p = currentRotatingFrame.translation.vector; + positionsInRotatingGT(:,1) = currentPoseRotatingGT.translation; + %velocityInRotatingGT(:,1) = currentPoseRotatingGT.velocity; + positionsEstimates(:,i) = currentPoseEstimate.translation; + velocitiesEstimates(:,i) = currentVelocityEstimate; + currentRotatingFrameForPlot(1).p = currentRotatingFrame.translation; currentRotatingFrameForPlot(1).R = currentRotatingFrame.rotation.matrix; else @@ -204,18 +204,18 @@ for i = 1:length(times) % Update the position and velocity % x_t = x_0 + v_0*dt + 1/2*a_0*dt^2 % v_t = v_0 + a_0*dt - currentPositionFixedGT = Point3(currentPoseFixedGT.translation.vector ... + currentPositionFixedGT = Point3(currentPoseFixedGT.translation ... + currentVelocityFixedGT * deltaT + 0.5 * accelFixed * deltaT * deltaT); currentVelocityFixedGT = currentVelocityFixedGT + accelFixed * deltaT; currentPoseFixedGT = Pose3(Rot3, currentPositionFixedGT); % constant orientation % Rotate pose in fixed frame to get pose in rotating frame - previousPositionRotatingGT = currentPoseRotatingGT.translation.vector; + previousPositionRotatingGT = currentPoseRotatingGT.translation; currentRotatingFrame = currentRotatingFrame.compose(changePoseRotatingFrame); inverseCurrentRotatingFrame = (currentRotatingFrame.inverse); currentPoseRotatingGT = inverseCurrentRotatingFrame.compose(currentPoseFixedGT); - currentPositionRotatingGT = currentPoseRotatingGT.translation.vector; + currentPositionRotatingGT = currentPoseRotatingGT.translation; % Get velocity in rotating frame by treating it like a position and using compose % Maybe Luca knows a better way to do this within GTSAM. @@ -230,11 +230,11 @@ for i = 1:length(times) % - 0.5 * accelFixed * deltaT * deltaT) / deltaT + accelFixed * deltaT; % Store GT (ground truth) poses - positionsInFixedGT(:,i) = currentPoseFixedGT.translation.vector; + positionsInFixedGT(:,i) = currentPoseFixedGT.translation; velocityInFixedGT(:,i) = currentVelocityFixedGT; - positionsInRotatingGT(:,i) = currentPoseRotatingGT.translation.vector; + positionsInRotatingGT(:,i) = currentPoseRotatingGT.translation; velocityInRotatingGT(:,i) = currentVelocityRotatingGT; - currentRotatingFrameForPlot(i).p = currentRotatingFrame.translation.vector; + currentRotatingFrameForPlot(i).p = currentRotatingFrame.translation; currentRotatingFrameForPlot(i).R = currentRotatingFrame.rotation.matrix; %% Estimate trajectory in rotating frame using GTSAM (ground truth measurements) @@ -303,9 +303,9 @@ for i = 1:length(times) currentVelocityEstimate = isam.calculateEstimate(currentVelKey); currentBias = isam.calculateEstimate(currentBiasKey); - positionsEstimates(:,i) = currentPoseEstimate.translation.vector; - velocitiesEstimates(:,i) = currentVelocityEstimate.vector; - biasEstimates(:,i) = currentBias.vector; + positionsEstimates(:,i) = currentPoseEstimate.translation; + velocitiesEstimates(:,i) = currentVelocityEstimate; + biasEstimates(:,i) = currentBias; % In matrix form: R_error = R_gt'*R_estimate % Perform Logmap on the rotation matrix to get a vector diff --git a/matlab/unstable_examples/+imuSimulator/covarianceAnalysisBetween.m b/matlab/unstable_examples/+imuSimulator/covarianceAnalysisBetween.m index 2eddf75ee..64ba36d3b 100644 --- a/matlab/unstable_examples/+imuSimulator/covarianceAnalysisBetween.m +++ b/matlab/unstable_examples/+imuSimulator/covarianceAnalysisBetween.m @@ -151,14 +151,14 @@ hold on; if options.includeCameraFactors b = [-1000 2000 -2000 2000 -30 30]; for i = 1:size(metadata.camera.gtLandmarkPoints,2) - p = metadata.camera.gtLandmarkPoints(i).vector; + p = metadata.camera.gtLandmarkPoints(i); if(p(1) > b(1) && p(1) < b(2) && p(2) > b(3) && p(2) < b(4) && p(3) > b(5) && p(3) < b(6)) plot3(p(1), p(2), p(3), 'k+'); end end pointsToPlot = metadata.camera.gtLandmarkPoints(find(projectionFactorSeenBy > 0)); for i = 1:length(pointsToPlot) - p = pointsToPlot(i).vector; + p = pointsToPlot(i); plot3(p(1), p(2), p(3), 'gs', 'MarkerSize', 10); end end @@ -233,9 +233,9 @@ for k=1:numMonteCarloRuns for i=0:options.trajectoryLength % compute estimation errors currentPoseKey = symbol('x', i); - gtPosition = gtValues.at(currentPoseKey).translation.vector; - estPosition = estimate.at(currentPoseKey).translation.vector; - estR = estimate.at(currentPoseKey).rotation.matrix; + gtPosition = gtValues.atPose3(currentPoseKey).translation; + estPosition = estimate.atPose3(currentPoseKey).translation; + estR = estimate.atPose3(currentPoseKey).rotation.matrix; errPosition = estPosition - gtPosition; % compute covariances: diff --git a/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateFactorGraph.m b/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateFactorGraph.m index 00ae4b9c2..07f146dcb 100644 --- a/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateFactorGraph.m +++ b/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateFactorGraph.m @@ -14,7 +14,7 @@ graph = NonlinearFactorGraph; for i=0:length(measurements) % Get the current pose currentPoseKey = symbol('x', i); - currentPose = values.at(currentPoseKey); + currentPose = values.atPose3(currentPoseKey); if i==0 %% first time step, add priors @@ -26,11 +26,11 @@ for i=0:length(measurements) % IMU velocity and bias priors if options.includeIMUFactors == 1 currentVelKey = symbol('v', 0); - currentVel = values.at(currentVelKey).vector; + currentVel = values.atPoint3(currentVelKey); graph.add(PriorFactorLieVector(currentVelKey, LieVector(currentVel), noiseModels.noiseVel)); currentBiasKey = symbol('b', 0); - currentBias = values.at(currentBiasKey); + currentBias = values.atPoint3(currentBiasKey); graph.add(PriorFactorConstantBias(currentBiasKey, currentBias, noiseModels.noisePriorBias)); end @@ -155,7 +155,7 @@ for i=0:length(measurements) if options.includeCameraFactors == 1 for j = 1:length(measurements(i).landmarks) cameraMeasurmentNoise = measurementNoise.cameraNoiseVector .* randn(2,1); - cameraPixelMeasurement = measurements(i).landmarks(j).vector; + cameraPixelMeasurement = measurements(i).landmarks(j); % Only add the measurement if it is in the image frame (based on calibration) if(cameraPixelMeasurement(1) > 0 && cameraPixelMeasurement(2) > 0 ... && cameraPixelMeasurement(1) < 2*metadata.camera.calibration.px ... diff --git a/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateTrajectory.m b/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateTrajectory.m index 195b7ff69..3d8a9b5d2 100644 --- a/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateTrajectory.m +++ b/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateTrajectory.m @@ -40,7 +40,7 @@ if options.useRealData == 1 %% gt Between measurements if options.includeBetweenFactors == 1 && i > 0 - prevPose = values.at(currentPoseKey - 1); + prevPose = values.atPose3(currentPoseKey - 1); deltaPose = prevPose.between(currentPose); measurements(i).deltaVector = Pose3.Logmap(deltaPose); end @@ -65,7 +65,7 @@ if options.useRealData == 1 IMUdeltaPose = IMUPose1.between(IMUPose2); IMUdeltaPoseVector = Pose3.Logmap(IMUdeltaPose); IMUdeltaRotVector = IMUdeltaPoseVector(1:3); - IMUdeltaPositionVector = IMUPose2.translation.vector - IMUPose1.translation.vector; % translation in absolute frame + IMUdeltaPositionVector = IMUPose2.translation - IMUPose1.translation; % translation in absolute frame measurements(i).imu(j).deltaT = deltaT; @@ -88,7 +88,7 @@ if options.useRealData == 1 %% gt GPS measurements if options.includeGPSFactors == 1 && i > 0 - gpsPositionVector = imuSimulator.getPoseFromGtScenario(gtScenario,scenarioInd).translation.vector; + gpsPositionVector = imuSimulator.getPoseFromGtScenario(gtScenario,scenarioInd).translation; measurements(i).gpsPositionVector = gpsPositionVector; end diff --git a/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory.m b/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory.m index 3f72f1215..2de1e1103 100644 --- a/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory.m +++ b/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory.m @@ -5,9 +5,9 @@ function [ finalPose, finalVelocityGlobal ] = integrateIMUTrajectory( ... import gtsam.*; imu2in1 = Rot3.Expmap(acc_omegaIMU(4:6) * deltaT); -accelGlobal = initialPoseGlobal.rotation().rotate(Point3(acc_omegaIMU(1:3))).vector; +accelGlobal = initialPoseGlobal.rotation().rotate(Point3(acc_omegaIMU(1:3))); -finalPosition = Point3(initialPoseGlobal.translation.vector ... +finalPosition = Point3(initialPoseGlobal.translation ... + initialVelocityGlobal * deltaT + 0.5 * accelGlobal * deltaT * deltaT); finalVelocityGlobal = initialVelocityGlobal + accelGlobal * deltaT; finalRotation = initialPoseGlobal.rotation.compose(imu2in1); diff --git a/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_bodyFrame.m b/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_bodyFrame.m index 50b223060..bec2d760d 100644 --- a/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_bodyFrame.m +++ b/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_bodyFrame.m @@ -3,7 +3,7 @@ function [ finalPose, finalVelocityGlobal ] = integrateIMUTrajectory_bodyFrame( % Before integrating in the body frame we need to compensate for the Coriolis % effect -acc_body = acc_omegaIMU(1:3) - Point3(cross(acc_omegaIMU(4:6), velocity1Body)).vector; +acc_body = acc_omegaIMU(1:3) - Point3(cross(acc_omegaIMU(4:6), velocity1Body)); % after compensating for coriolis this will be essentially zero % since we are moving at constant body velocity @@ -16,8 +16,8 @@ finalPositionBody = velocity1Body * deltaT + 0.5 * acc_body * deltaT * deltaT; finalVelocityBody = velocity1Body + acc_body * deltaT; %% Express the integrated quantities in the global frame -finalVelocityGlobal = initialVelocityGlobal + (initialPoseGlobal.rotation().rotate(Point3(finalVelocityBody)).vector() ); -finalPosition = initialPoseGlobal.translation().vector() + initialPoseGlobal.rotation().rotate( Point3(finalPositionBody)).vector() ; +finalVelocityGlobal = initialVelocityGlobal + (initialPoseGlobal.rotation().rotate(Point3(finalVelocityBody)) ); +finalPosition = initialPoseGlobal.translation() + initialPoseGlobal.rotation().rotate( Point3(finalPositionBody)) ; finalRotation = initialPoseGlobal.rotation.compose(imu2in1); % Include position and rotation in a pose finalPose = Pose3(finalRotation, Point3(finalPosition) ); diff --git a/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_navFrame.m b/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_navFrame.m index b919520ac..ea851315f 100644 --- a/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_navFrame.m +++ b/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_navFrame.m @@ -9,8 +9,8 @@ finalRotation = initialPoseGlobal.rotation.compose(imu2in1); intermediateRotation = initialPoseGlobal.rotation.compose( Rot3.Expmap(acc_omegaIMU(4:6) * deltaT/2 )); % Integrate positions (equation (1) in Lupton) -accelGlobal = intermediateRotation.rotate(Point3(acc_omegaIMU(1:3))).vector; -finalPosition = Point3(initialPoseGlobal.translation.vector ... +accelGlobal = intermediateRotation.rotate(Point3(acc_omegaIMU(1:3))); +finalPosition = Point3(initialPoseGlobal.translation ... + initialVelocityGlobal * deltaT + 0.5 * accelGlobal * deltaT * deltaT); finalVelocityGlobal = initialVelocityGlobal + accelGlobal * deltaT; diff --git a/matlab/unstable_examples/+imuSimulator/integrateTrajectory.m b/matlab/unstable_examples/+imuSimulator/integrateTrajectory.m index e51b622b0..a704342ae 100644 --- a/matlab/unstable_examples/+imuSimulator/integrateTrajectory.m +++ b/matlab/unstable_examples/+imuSimulator/integrateTrajectory.m @@ -6,16 +6,16 @@ import gtsam.*; % Rotation: R^1_2 body2in1 = Rot3.Expmap(omega1Body * deltaT); % Velocity 2 in frame 1: v^1_2 = R^1_2 v^2_2 -velocity2inertial = body2in1.rotate(Point3(velocity2Body)).vector; +velocity2inertial = body2in1.rotate(Point3(velocity2Body)); % Acceleration: a^1 = (v^1_2 - v^1_1)/dt accelBody1 = (velocity2inertial - velocity1Body) / deltaT; % Velocity 1 in frame W: v^W_1 = R^W_1 v^1_1 -initialVelocityGlobal = initialPose.rotation().rotate(Point3(velocity1Body)).vector; +initialVelocityGlobal = initialPose.rotation().rotate(Point3(velocity1Body)); % Acceleration in frame W: a^W = R^W_1 a^1 -accelGlobal = initialPose.rotation().rotate(Point3(accelBody1)).vector; +accelGlobal = initialPose.rotation().rotate(Point3(accelBody1)); -finalPosition = Point3(initialPose.translation.vector + initialVelocityGlobal * deltaT + 0.5 * accelGlobal * deltaT * deltaT); +finalPosition = Point3(initialPose.translation + initialVelocityGlobal * deltaT + 0.5 * accelGlobal * deltaT * deltaT); finalVelocityGlobal = initialVelocityGlobal + accelGlobal * deltaT; finalRotation = initialPose.rotation.compose(body2in1); finalPose = Pose3(finalRotation, finalPosition); diff --git a/matlab/unstable_examples/+imuSimulator/test1onestep.m b/matlab/unstable_examples/+imuSimulator/test1onestep.m index 883569849..cb66d23d6 100644 --- a/matlab/unstable_examples/+imuSimulator/test1onestep.m +++ b/matlab/unstable_examples/+imuSimulator/test1onestep.m @@ -10,7 +10,7 @@ omega = [0;0;0.1]; velocity = [1;0;0]; R = Rot3.Expmap(omega * deltaT); -velocity2body = R.unrotate(Point3(velocity)).vector; +velocity2body = R.unrotate(Point3(velocity)); acc_omegaExpected = [-0.01; 0; 0; 0; 0; 0.1]; acc_omegaActual = imuSimulator.calculateIMUMeasurement(omega, omega, velocity, velocity2body, deltaT, Pose3(Rot3, Point3([1;0;0]))); disp('IMU measurement discrepancy:'); @@ -40,7 +40,7 @@ disp(acc_omegaActual - acc_omegaExpected); initialPose = Pose3; initialVelocity = velocity; finalPoseExpected = Pose3.Expmap([ omega; velocity ] * deltaT); -finalVelocityExpected = finalPoseExpected.rotation.rotate(Point3(velocity)).vector; +finalVelocityExpected = finalPoseExpected.rotation.rotate(Point3(velocity)); [ finalPoseActual, finalVelocityActual ] = imuSimulator.integrateTrajectory(initialPose, omega, velocity, velocity, deltaT); disp('Final pose discrepancy:'); disp(finalPoseExpected.between(finalPoseActual).matrix); diff --git a/matlab/unstable_examples/+imuSimulator/test2constglobal.m b/matlab/unstable_examples/+imuSimulator/test2constglobal.m index 19956d08a..6ab35d50b 100644 --- a/matlab/unstable_examples/+imuSimulator/test2constglobal.m +++ b/matlab/unstable_examples/+imuSimulator/test2constglobal.m @@ -21,12 +21,12 @@ positions = zeros(3, length(times)+1); i = 2; for t = times - velocity1body = currentPoseGlobal.rotation.unrotate(Point3(currentVelocityGlobal)).vector; + velocity1body = currentPoseGlobal.rotation.unrotate(Point3(currentVelocityGlobal)); R = Rot3.Expmap(omega * deltaT); - velocity2body = currentPoseGlobal.rotation.compose(R).unrotate(Point3(currentVelocityGlobal)).vector; + velocity2body = currentPoseGlobal.rotation.compose(R).unrotate(Point3(currentVelocityGlobal)); [ currentPoseGlobal, currentVelocityGlobal ] = imuSimulator.integrateTrajectory(currentPoseGlobal, omega, velocity1body, velocity2body, deltaT); - positions(:,i) = currentPoseGlobal.translation.vector; + positions(:,i) = currentPoseGlobal.translation; i = i + 1; end diff --git a/matlab/unstable_examples/+imuSimulator/test3constbody.m b/matlab/unstable_examples/+imuSimulator/test3constbody.m index b3ee2edfc..8ee14ab78 100644 --- a/matlab/unstable_examples/+imuSimulator/test3constbody.m +++ b/matlab/unstable_examples/+imuSimulator/test3constbody.m @@ -26,27 +26,27 @@ currentPoseGlobal = Pose3; currentVelocityGlobal = velocity; % Initial state (IMU) currentPoseGlobalIMU = Pose3; %currentPoseGlobal.compose(IMUinBody); -%currentVelocityGlobalIMU = IMUinBody.rotation.unrotate(Point3(velocity)).vector; % no Coriolis here? +%currentVelocityGlobalIMU = IMUinBody.rotation.unrotate(Point3(velocity)); % no Coriolis here? currentVelocityGlobalIMU = IMUinBody.rotation.unrotate( ... - Point3(velocity + cross(omega, IMUinBody.translation.vector))).vector; + Point3(velocity + cross(omega, IMUinBody.translation))); % Positions % body trajectory positions = zeros(3, length(times)+1); -positions(:,1) = currentPoseGlobal.translation.vector; +positions(:,1) = currentPoseGlobal.translation; poses(1).p = positions(:,1); poses(1).R = currentPoseGlobal.rotation.matrix; % Expected IMU trajectory (from body trajectory and lever arm) positionsIMUe = zeros(3, length(times)+1); -positionsIMUe(:,1) = IMUinBody.compose(currentPoseGlobalIMU).translation.vector; +positionsIMUe(:,1) = IMUinBody.compose(currentPoseGlobalIMU).translation; posesIMUe(1).p = positionsIMUe(:,1); posesIMUe(1).R = poses(1).R * IMUinBody.rotation.matrix; % Integrated IMU trajectory (from IMU measurements) positionsIMU = zeros(3, length(times)+1); -positionsIMU(:,1) = IMUinBody.compose(currentPoseGlobalIMU).translation.vector; +positionsIMU(:,1) = IMUinBody.compose(currentPoseGlobalIMU).translation; posesIMU(1).p = positionsIMU(:,1); posesIMU(1).R = IMUinBody.compose(currentPoseGlobalIMU).rotation.matrix; @@ -62,9 +62,9 @@ for t = times currentPoseGlobalIMU, currentVelocityGlobalIMU, acc_omega, deltaT); % Store data in some structure for statistics and plots - positions(:,i) = currentPoseGlobal.translation.vector; - positionsIMUe(:,i) = currentPoseGlobal.translation.vector + currentPoseGlobal.rotation.matrix * IMUinBody.translation.vector; - positionsIMU(:,i) = IMUinBody.compose(currentPoseGlobalIMU).translation.vector; + positions(:,i) = currentPoseGlobal.translation; + positionsIMUe(:,i) = currentPoseGlobal.translation + currentPoseGlobal.rotation.matrix * IMUinBody.translation; + positionsIMU(:,i) = IMUinBody.compose(currentPoseGlobalIMU).translation; poses(i).p = positions(:,i); posesIMUe(i).p = positionsIMUe(:,i); diff --git a/matlab/unstable_examples/+imuSimulator/test4circle.m b/matlab/unstable_examples/+imuSimulator/test4circle.m index 22ee175dd..ab2c546db 100644 --- a/matlab/unstable_examples/+imuSimulator/test4circle.m +++ b/matlab/unstable_examples/+imuSimulator/test4circle.m @@ -34,19 +34,19 @@ currentVelocityGlobalIMUbody = currentVelocityGlobal; %% Prepare data structures for actual trajectory and estimates % Actual trajectory positions = zeros(3, length(times)+1); -positions(:,1) = currentPoseGlobal.translation.vector; +positions(:,1) = currentPoseGlobal.translation; poses(1).p = positions(:,1); poses(1).R = currentPoseGlobal.rotation.matrix; % Trajectory estimate (integrated in the navigation frame) positionsIMUnav = zeros(3, length(times)+1); -positionsIMUnav(:,1) = currentPoseGlobalIMUbody.translation.vector; +positionsIMUnav(:,1) = currentPoseGlobalIMUbody.translation; posesIMUnav(1).p = positionsIMUnav(:,1); posesIMUnav(1).R = poses(1).R; % Trajectory estimate (integrated in the body frame) positionsIMUbody = zeros(3, length(times)+1); -positionsIMUbody(:,1) = currentPoseGlobalIMUbody.translation.vector; +positionsIMUbody(:,1) = currentPoseGlobalIMUbody.translation; posesIMUbody(1).p = positionsIMUbody(:,1); posesIMUbody(1).R = poses(1).R; @@ -72,9 +72,9 @@ for t = times currentPoseGlobalIMUnav, currentVelocityGlobalIMUnav, acc_omega, deltaT); %% Store data in some structure for statistics and plots - positions(:,i) = currentPoseGlobal.translation.vector; - positionsIMUbody(:,i) = currentPoseGlobalIMUbody.translation.vector; - positionsIMUnav(:,i) = currentPoseGlobalIMUnav.translation.vector; + positions(:,i) = currentPoseGlobal.translation; + positionsIMUbody(:,i) = currentPoseGlobalIMUbody.translation; + positionsIMUnav(:,i) = currentPoseGlobalIMUnav.translation; % - poses(i).p = positions(:,i); posesIMUbody(i).p = positionsIMUbody(:,i); diff --git a/matlab/unstable_examples/FlightCameraTransformIMU.m b/matlab/unstable_examples/FlightCameraTransformIMU.m index 9a8a27344..d2f2bc34d 100644 --- a/matlab/unstable_examples/FlightCameraTransformIMU.m +++ b/matlab/unstable_examples/FlightCameraTransformIMU.m @@ -120,7 +120,7 @@ for i=1:size(trajectory)-1 end % current ground-truth position indicator - h_cursor = plot3(a1, pose_t.x,pose_t.y,pose_t.z,'*'); + h_cursor = plot3(a1, pose_t(1),pose_t(2),pose_t(3),'*'); camera_pose = pose.compose(camera_transform); @@ -198,7 +198,7 @@ for i=1:size(trajectory)-1 if ~result.exists(lKey) p = landmarks.atPoint3(lKey); n = normrnd(0,landmark_noise,3,1); - noisy_landmark = Point3(p.x()+n(1),p.y()+n(2),p.z()+n(3)); + noisy_landmark = p + n; initial.insert(lKey, noisy_landmark); % and add a prior since its position is known @@ -245,32 +245,33 @@ for i=1:size(trajectory)-1 initial = Values; fg = NonlinearFactorGraph; - currentVelocityGlobal = result.at(currentVelKey); - currentBias = result.at(currentBiasKey); + currentVelocityGlobal = result.atPoint3(currentVelKey); + currentBias = result.atConstantBias(currentBiasKey); %% plot current pose result - isam_pose = result.at(xKey); + isam_pose = result.atPose3(xKey); pose_t = isam_pose.translation(); if exist('h_result','var') delete(h_result); end - h_result = plot3(a1, pose_t.x,pose_t.y,pose_t.z,'^b', 'MarkerSize', 10); + h_result = plot3(a1, pose_t(1),pose_t(2),pose_t(3),'^b', 'MarkerSize', 10); title(a1, sprintf('Step %d', i)); if exist('h_text1(1)', 'var') delete(h_text1(1)); % delete(h_text2(1)); end - ty = result.at(transformKey).translation().y(); - K_estimate = result.at(calibrationKey); + t = result.atPose3(transformKey).translation(); + ty = t(2); + K_estimate = result.atCal3_S2(calibrationKey); K_errors = K.localCoordinates(K_estimate); - camera_transform_estimate = result.at(transformKey); + camera_transform_estimate = result.atPose3(transformKey); - fx = result.at(calibrationKey).fx(); - fy = result.at(calibrationKey).fy(); + fx = result.atCal3_S2(calibrationKey).fx(); + fy = result.atCal3_S2(calibrationKey).fy(); % h_text1 = text(-600,0,0,sprintf('Y-Transform(0.0): %0.2f',ty)); text(0,1300,0,sprintf('Calibration and IMU-cam transform errors:')); @@ -304,7 +305,7 @@ for i=1:size(trajectory)-1 end %% print out final camera transform and write video -result.at(transformKey); +result.atPose3(transformKey); if(write_video) close(videoObj); end \ No newline at end of file diff --git a/matlab/unstable_examples/TransformCalProjectionFactorIMUExampleISAM.m b/matlab/unstable_examples/TransformCalProjectionFactorIMUExampleISAM.m index 4557d711f..9796a9737 100644 --- a/matlab/unstable_examples/TransformCalProjectionFactorIMUExampleISAM.m +++ b/matlab/unstable_examples/TransformCalProjectionFactorIMUExampleISAM.m @@ -53,7 +53,7 @@ y_shift = Point3(0,0.5,0); % insert shifted points for i=1:nrPoints - initial.insert(100+i,landmarks{i}.compose(y_shift)); + initial.insert(100+i,landmarks{i} + y_shift); end figure(1); @@ -134,7 +134,7 @@ for i=1:steps end if i == 2 fg.add(PriorFactorPose3(2, Pose3(Rot3(),Point3(1,0,0)),pose_cov)); - fg.add(PriorFactorLieVector(currentVelKey, currentVelocityGlobal, sigma_init_v)); + fg.add(PriorFactorVector(currentVelKey, currentVelocityGlobal, sigma_init_v)); fg.add(PriorFactorConstantBias(currentBiasKey, currentBias, sigma_init_b)); end if i > 1 @@ -144,7 +144,7 @@ for i=1:steps step = move_circle; end - initial.insert(i,result.at(i-1).compose(step)); + initial.insert(i,result.atPose3(i-1).compose(step)); fg.add(BetweenFactorPose3(i-1,i, step, covariance)); deltaT = 1; @@ -158,10 +158,13 @@ for i=1:steps [ currentIMUPoseGlobal, currentVelocityGlobal ] = imuSimulator.integrateTrajectory( ... currentIMUPoseGlobal, omega, velocity, velocity, deltaT); - - currentSummarizedMeasurement = gtsam.ImuFactorPreintegratedMeasurements( ... - currentBias, IMU_metadata.AccelerometerSigma.^2 * eye(3), ... - IMU_metadata.GyroscopeSigma.^2 * eye(3), IMU_metadata.IntegrationSigma.^2 * eye(3)); + + params = gtsam.PreintegrationParams.MakeSharedD(9.81); + params.setAccelerometerCovariance(IMU_metadata.AccelerometerSigma.^2 * eye(3)); + params.setGyroscopeCovariance(IMU_metadata.GyroscopeSigma.^2 * eye(3)); + params.setIntegrationCovariance(IMU_metadata.IntegrationSigma.^2 * eye(3)); + currentSummarizedMeasurement = gtsam.PreintegratedImuMeasurements( ... + params, currentBias); accMeas = acc_omega(1:3)-g; omegaMeas = acc_omega(4:6); @@ -171,7 +174,7 @@ for i=1:steps fg.add(ImuFactor( ... i-1, currentVelKey-1, ... i, currentVelKey, ... - currentBiasKey, currentSummarizedMeasurement, g, w_coriolis)); + currentBiasKey, currentSummarizedMeasurement)); % Bias evolution as given in the IMU metadata fg.add(BetweenFactorConstantBias(currentBiasKey-1, currentBiasKey, imuBias.ConstantBias(zeros(3,1), zeros(3,1)), ... @@ -204,8 +207,8 @@ for i=1:steps initial = Values; fg = NonlinearFactorGraph; - currentVelocityGlobal = isam.calculateEstimate(currentVelKey); - currentBias = isam.calculateEstimate(currentBiasKey); + currentVelocityGlobal = isam.calculateEstimate().atVector(currentVelKey); + currentBias = isam.calculateEstimate().atConstantBias(currentBiasKey); %% Compute some marginals marginal = isam.marginalCovariance(calibrationKey); @@ -249,10 +252,10 @@ for i=1:steps gtsam.plotPose3(currentIMUPoseGlobal, [], 2); %% plot results - result_camera_transform = result.at(transformKey); + result_camera_transform = result.atPose3(transformKey); for j=1:i - gtsam.plotPose3(result.at(j),[],0.5); - gtsam.plotPose3(result.at(j).compose(result_camera_transform),[],0.5); + gtsam.plotPose3(result.atPose3(j),[],0.5); + gtsam.plotPose3(result.atPose3(j).compose(result_camera_transform),[],0.5); end xlabel('x (m)'); @@ -265,14 +268,15 @@ for i=1:steps % axis equal for l=101:100+nrPoints - plotPoint3(result.at(l),'g'); + plotPoint3(result.atPoint3(l),'g'); end - ty = result.at(transformKey).translation().y(); - fx = result.at(calibrationKey).fx(); - fy = result.at(calibrationKey).fy(); - px = result.at(calibrationKey).px(); - py = result.at(calibrationKey).py(); + t = result.atPose3(transformKey).translation(); + ty = t(2); + fx = result.atCal3_S2(calibrationKey).fx(); + fy = result.atCal3_S2(calibrationKey).fy(); + px = result.atCal3_S2(calibrationKey).px(); + py = result.atCal3_S2(calibrationKey).py(); text(1,5,5,sprintf('Y-Transform(0.0): %0.2f',ty)); text(1,5,3,sprintf('fx(900): %.0f',fx)); text(1,5,1,sprintf('fy(900): %.0f',fy)); @@ -342,10 +346,10 @@ end fprintf('Cheirality Exception count: %d\n', cheirality_exception_count); disp('Transform after optimization'); -result.at(transformKey) +result.atPose3(transformKey) disp('Calibration after optimization'); -result.at(calibrationKey) +result.atCal3_S2(calibrationKey) disp('Bias after optimization'); currentBias diff --git a/matlab/unstable_examples/plot_projected_landmarks.m b/matlab/unstable_examples/plot_projected_landmarks.m index 6b8101844..30e222016 100644 --- a/matlab/unstable_examples/plot_projected_landmarks.m +++ b/matlab/unstable_examples/plot_projected_landmarks.m @@ -25,9 +25,9 @@ for i = 0:measurement_keys.size-1 key_index = gtsam.symbolIndex(key); p = landmarks.atPoint3(gtsam.symbol('l',key_index)); - x(i+1) = p.x; - y(i+1) = p.y; - z(i+1) = p.z; + x(i+1) = p(1); + y(i+1) = p(2); + z(i+1) = p(3); end diff --git a/matlab/unstable_examples/project_landmarks.m b/matlab/unstable_examples/project_landmarks.m index aaccc9248..3bccef94b 100644 --- a/matlab/unstable_examples/project_landmarks.m +++ b/matlab/unstable_examples/project_landmarks.m @@ -11,9 +11,9 @@ function [ measurements ] = project_landmarks( pose, landmarks, K ) z = camera.project(landmarks.atPoint3(symbol('l',i))); % check bounding box - if z.x < 0 || z.x > 1280 + if z(1) < 0 || z(1) > 1280 continue - elseif z.y < 0 || z.y > 960 + elseif z(2) < 0 || z(2) > 960 continue end From b0d100b8fb735f09c994305c64961850725d86f6 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Mon, 17 Aug 2020 14:38:00 -0400 Subject: [PATCH 084/142] Python supplementary files --- python/CMakeLists.txt | 104 +++++++++++++++++++++++ python/README.md | 87 +++++++++++++++++++ python/gtsam/gtsam.tpl | 30 +++++++ python/gtsam/preamble.h | 4 + python/gtsam/specializations.h | 4 + python/gtsam_unstable/gtsam_unstable.tpl | 32 +++++++ python/gtsam_unstable/preamble.h | 0 python/gtsam_unstable/specializations.h | 0 python/requirements.txt | 2 + python/setup.py.in | 49 +++++++++++ 10 files changed, 312 insertions(+) create mode 100644 python/CMakeLists.txt create mode 100644 python/README.md create mode 100644 python/gtsam/gtsam.tpl create mode 100644 python/gtsam/preamble.h create mode 100644 python/gtsam/specializations.h create mode 100644 python/gtsam_unstable/gtsam_unstable.tpl create mode 100644 python/gtsam_unstable/preamble.h create mode 100644 python/gtsam_unstable/specializations.h create mode 100644 python/requirements.txt create mode 100644 python/setup.py.in diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt new file mode 100644 index 000000000..81a6c6c4d --- /dev/null +++ b/python/CMakeLists.txt @@ -0,0 +1,104 @@ +set(GTSAM_PYTHON_BUILD_DIRECTORY ${PROJECT_BINARY_DIR}/python) + +if(GTSAM_BUILD_PYTHON) + # Generate setup.py. + file(READ "${PROJECT_SOURCE_DIR}/README.md" README_CONTENTS) + configure_file(${PROJECT_SOURCE_DIR}/python/setup.py.in + ${GTSAM_PYTHON_BUILD_DIRECTORY}/setup.py) + + set(WRAP_USE_CUSTOM_PYTHON_LIBRARY ${GTSAM_USE_CUSTOM_PYTHON_LIBRARY}) + set(WRAP_PYTHON_VERSION ${GTSAM_PYTHON_VERSION}) + + include(PybindWrap) + + add_custom_target(gtsam_header DEPENDS "${PROJECT_SOURCE_DIR}/gtsam/gtsam.i") + add_custom_target(gtsam_unstable_header DEPENDS "${PROJECT_SOURCE_DIR}/gtsam_unstable/gtsam_unstable.i") + + # ignoring the non-concrete types (type aliases) + set(ignore + gtsam::Point2 + gtsam::Point3 + gtsam::LieVector + gtsam::LieMatrix + gtsam::ISAM2ThresholdMapValue + gtsam::FactorIndices + gtsam::FactorIndexSet + gtsam::BetweenFactorPose3s + gtsam::Point2Vector + gtsam::Pose3Vector + gtsam::KeyVector) + + pybind_wrap(gtsam_py # target + ${PROJECT_SOURCE_DIR}/gtsam/gtsam.i # interface_header + "gtsam.cpp" # generated_cpp + "gtsam" # module_name + "gtsam" # top_namespace + "${ignore}" # ignore_classes + ${PROJECT_SOURCE_DIR}/python/gtsam/gtsam.tpl + gtsam # libs + "gtsam;gtsam_header" # dependencies + ON # use_boost + ) + + set_target_properties(gtsam_py PROPERTIES + INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib" + INSTALL_RPATH_USE_LINK_PATH TRUE + OUTPUT_NAME "gtsam" + LIBRARY_OUTPUT_DIRECTORY "${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam" + DEBUG_POSTFIX "" # Otherwise you will have a wrong name + RELWITHDEBINFO_POSTFIX "" # Otherwise you will have a wrong name + ) + + set(GTSAM_MODULE_PATH ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam) + + # Symlink all tests .py files to build folder. + create_symlinks("${CMAKE_CURRENT_SOURCE_DIR}/gtsam" + "${GTSAM_MODULE_PATH}") + + if(GTSAM_UNSTABLE_BUILD_PYTHON) + set(ignore + gtsam::Point2 + gtsam::Point3 + gtsam::LieVector + gtsam::LieMatrix + gtsam::ISAM2ThresholdMapValue + gtsam::FactorIndices + gtsam::FactorIndexSet + gtsam::BetweenFactorPose3s + gtsam::Point2Vector + gtsam::Pose3Vector + gtsam::KeyVector + gtsam::FixedLagSmootherKeyTimestampMapValue) + pybind_wrap(gtsam_unstable_py # target + ${PROJECT_SOURCE_DIR}/gtsam_unstable/gtsam_unstable.i # interface_header + "gtsam_unstable.cpp" # generated_cpp + "gtsam_unstable" # module_name + "gtsam" # top_namespace + "${ignore}" # ignore_classes + ${PROJECT_SOURCE_DIR}/python/gtsam_unstable/gtsam_unstable.tpl + gtsam_unstable # libs + "gtsam_unstable;gtsam_unstable_header" # dependencies + ON # use_boost + ) + + set_target_properties(gtsam_unstable_py PROPERTIES + INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib" + INSTALL_RPATH_USE_LINK_PATH TRUE + OUTPUT_NAME "gtsam_unstable" + LIBRARY_OUTPUT_DIRECTORY "${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam_unstable" + DEBUG_POSTFIX "" # Otherwise you will have a wrong name + RELWITHDEBINFO_POSTFIX "" # Otherwise you will have a wrong name + ) + + set(GTSAM_UNSTABLE_MODULE_PATH ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam_unstable) + + # Symlink all tests .py files to build folder. + create_symlinks("${CMAKE_CURRENT_SOURCE_DIR}/gtsam_unstable" + "${GTSAM_UNSTABLE_MODULE_PATH}") + endif() + + set(GTSAM_PYTHON_INSTALL_TARGET python-install) + add_custom_target(${GTSAM_PYTHON_INSTALL_TARGET} + COMMAND ${PYTHON_EXECUTABLE} ${GTSAM_PYTHON_BUILD_DIRECTORY}/setup.py install + WORKING_DIRECTORY ${GTSAM_PYTHON_BUILD_DIRECTORY}) +endif() diff --git a/python/README.md b/python/README.md new file mode 100644 index 000000000..e418cbede --- /dev/null +++ b/python/README.md @@ -0,0 +1,87 @@ +# README + +# Python Wrapper + +This is the Python wrapper around the GTSAM C++ library. We use Cython to generate the bindings to the underlying C++ code. + +## Requirements + +- If you want to build the GTSAM python library for a specific python version (eg 3.6), + use the `-DGTSAM_PYTHON_VERSION=3.6` option when running `cmake` otherwise the default interpreter will be used. +- If the interpreter is inside an environment (such as an anaconda environment or virtualenv environment), + then the environment should be active while building GTSAM. +- This wrapper needs `Cython(>=0.25.2)`, `backports_abc(>=0.5)`, and `numpy(>=1.11.0)`. These can be installed as follows: + + ```bash + pip install -r /cython/requirements.txt + ``` + +- For compatibility with GTSAM's Eigen version, it contains its own cloned version of [Eigency](https://github.com/wouterboomsma/eigency.git), + named `gtsam_eigency`, to interface between C++'s Eigen and Python's numpy. + +## Install + +- Run cmake with the `GTSAM_INSTALL_CYTHON_TOOLBOX` cmake flag enabled to configure building the wrapper. The wrapped module will be built and copied to the directory defined by `GTSAM_CYTHON_INSTALL_PATH`, which is by default `/cython` in Release mode and `/cython` for other modes. + +- Build GTSAM and the wrapper with `make`. + +- To install, simply run `make python-install`. + - The same command can be used to install into a virtual environment if it is active. + - **NOTE**: if you don't want GTSAM to install to a system directory such as `/usr/local`, pass `-DCMAKE_INSTALL_PREFIX="./install"` to cmake to install GTSAM to a subdirectory of the build directory. + +- You can also directly run `make python-install` without running `make`, and it will compile all the dependencies accordingly. + +## Unit Tests + +The Cython toolbox also has a small set of unit tests located in the +test directory. To run them: + + ```bash + cd + python -m unittest discover + ``` + +## Utils + +TODO + +## Examples + +TODO + +## Writing Your Own Scripts + +See the tests for examples. + +### Some Important Notes: + +- Vector/Matrix: + + - GTSAM expects double-precision floating point vectors and matrices. + Hence, you should pass numpy matrices with `dtype=float`, or `float64`. + - Also, GTSAM expects _column-major_ matrices, unlike the default storage + scheme in numpy. Hence, you should pass column-major matrices to GTSAM using + the flag order='F'. And you always get column-major matrices back. + For more details, see [this link](https://github.com/wouterboomsma/eigency#storage-layout---why-arrays-are-sometimes-transposed). + - Passing row-major matrices of different dtype, e.g. `int`, will also work + as the wrapper converts them to column-major and dtype float for you, + using numpy.array.astype(float, order='F', copy=False). + However, this will result a copy if your matrix is not in the expected type + and storage order. + +- Inner namespace: Classes in inner namespace will be prefixed by \_ in Python. + + Examples: `noiseModel_Gaussian`, `noiseModel_mEstimator_Tukey` + +- Casting from a base class to a derive class must be done explicitly. + + Examples: + + ```python + noiseBase = factor.noiseModel() + noiseGaussian = dynamic_cast_noiseModel_Gaussian_noiseModel_Base(noiseBase) + ``` + +## Wrapping Custom GTSAM-based Project + +Please refer to the template project and the corresponding tutorial available [here](https://github.com/borglab/GTSAM-project-python). diff --git a/python/gtsam/gtsam.tpl b/python/gtsam/gtsam.tpl new file mode 100644 index 000000000..de3efbee0 --- /dev/null +++ b/python/gtsam/gtsam.tpl @@ -0,0 +1,30 @@ +{include_boost} + +#include +#include +#include +#include "gtsam/base/serialization.h" +#include "gtsam/nonlinear/utilities.h" // for RedirectCout. + +{includes} +#include + +{boost_class_export} + +{hoder_type} + +#include "python/gtsam/preamble.h" + +using namespace std; + +namespace py = pybind11; + +PYBIND11_MODULE({module_name}, m_) {{ + m_.doc() = "pybind11 wrapper of {module_name}"; + +{wrapped_namespace} + +#include "python/gtsam/specializations.h" + +}} + diff --git a/python/gtsam/preamble.h b/python/gtsam/preamble.h new file mode 100644 index 000000000..f8e5804d0 --- /dev/null +++ b/python/gtsam/preamble.h @@ -0,0 +1,4 @@ +PYBIND11_MAKE_OPAQUE(std::vector); +PYBIND11_MAKE_OPAQUE(std::vector >); +PYBIND11_MAKE_OPAQUE(std::vector); +PYBIND11_MAKE_OPAQUE(std::vector>); diff --git a/python/gtsam/specializations.h b/python/gtsam/specializations.h new file mode 100644 index 000000000..c1114059b --- /dev/null +++ b/python/gtsam/specializations.h @@ -0,0 +1,4 @@ +py::bind_vector >(m_, "KeyVector"); +py::bind_vector > >(m_, "Point2Vector"); +py::bind_vector >(m_, "Pose3Vector"); +py::bind_vector > > >(m_, "BetweenFactorPose3s"); \ No newline at end of file diff --git a/python/gtsam_unstable/gtsam_unstable.tpl b/python/gtsam_unstable/gtsam_unstable.tpl new file mode 100644 index 000000000..f8d2f231e --- /dev/null +++ b/python/gtsam_unstable/gtsam_unstable.tpl @@ -0,0 +1,32 @@ +{include_boost} + +#include +#include +#include +#include "gtsam/base/serialization.h" +#include "gtsam/nonlinear/utilities.h" // for RedirectCout. + +{includes} +#include + +{boost_class_export} + +{hoder_type} + +#include "python/gtsam_unstable/preamble.h" + +using namespace std; + +namespace py = pybind11; + +PYBIND11_MODULE({module_name}, m_) {{ + m_.doc() = "pybind11 wrapper of {module_name}"; + + py::module::import("gtsam"); + +{wrapped_namespace} + +#include "python/gtsam_unstable/specializations.h" + +}} + diff --git a/python/gtsam_unstable/preamble.h b/python/gtsam_unstable/preamble.h new file mode 100644 index 000000000..e69de29bb diff --git a/python/gtsam_unstable/specializations.h b/python/gtsam_unstable/specializations.h new file mode 100644 index 000000000..e69de29bb diff --git a/python/requirements.txt b/python/requirements.txt new file mode 100644 index 000000000..481d27d8e --- /dev/null +++ b/python/requirements.txt @@ -0,0 +1,2 @@ +numpy>=1.11.0 +pyparsing>=2.4.2 diff --git a/python/setup.py.in b/python/setup.py.in new file mode 100644 index 000000000..55431a9ad --- /dev/null +++ b/python/setup.py.in @@ -0,0 +1,49 @@ +import os +import sys + +try: + from setuptools import setup, find_packages +except ImportError: + from distutils.core import setup, find_packages + +packages = find_packages(where=".") +print("PACKAGES: ", packages) +package_data = { + '': [ + './*.so', + './*.dll', + ] +} + +# Cleaner to read in the contents rather than copy them over. +readme_contents = open("${PROJECT_SOURCE_DIR}/README.md").read() + +setup( + name='gtsam', + description='Georgia Tech Smoothing And Mapping library', + url='https://gtsam.org/', + version='${GTSAM_VERSION_STRING}', # https://www.python.org/dev/peps/pep-0440/ + author='Frank Dellaert et. al.', + author_email='frank.dellaert@gtsam.org', + license='Simplified BSD license', + keywords='slam sam robotics localization mapping optimization', + long_description=readme_contents, + # https://pypi.org/pypi?%3Aaction=list_classifiers + classifiers=[ + 'Development Status :: 5 - Production/Stable', + 'Intended Audience :: Education', + 'Intended Audience :: Developers', + 'Intended Audience :: Science/Research', + 'Operating System :: MacOS', + 'Operating System :: Microsoft :: Windows', + 'Operating System :: POSIX', + 'License :: OSI Approved :: BSD License', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 3', + ], + packages=packages, + package_data=package_data, + test_suite="gtsam.tests", + install_requires=["numpy"], + zip_safe=False, +) From c7eb02969abc42696db93efc37988519d5a7f7a0 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Mon, 17 Aug 2020 14:44:14 -0400 Subject: [PATCH 085/142] Remove old wrap --- wrap/Argument.cpp | 316 --- wrap/Argument.h | 242 -- wrap/CMakeLists.txt | 45 - wrap/Class.cpp | 897 ------- wrap/Class.h | 315 --- wrap/Constructor.cpp | 160 -- wrap/Constructor.h | 99 - wrap/Deconstructor.cpp | 73 - wrap/Deconstructor.h | 61 - wrap/FileWriter.cpp | 52 - wrap/FileWriter.h | 35 - wrap/ForwardDeclaration.h | 36 - wrap/FullyOverloadedFunction.cpp | 34 - wrap/FullyOverloadedFunction.h | 147 -- wrap/Function.cpp | 78 - wrap/Function.h | 73 - wrap/GlobalFunction.cpp | 227 -- wrap/GlobalFunction.h | 148 -- wrap/Method.cpp | 205 -- wrap/Method.h | 74 - wrap/MethodBase.cpp | 135 - wrap/MethodBase.h | 70 - wrap/Module.cpp | 649 ----- wrap/Module.h | 95 - wrap/OverloadedFunction.h | 140 -- wrap/Qualified.cpp | 5 - wrap/Qualified.h | 370 --- wrap/README.md | 27 - wrap/ReturnType.cpp | 101 - wrap/ReturnType.h | 89 - wrap/ReturnValue.cpp | 102 - wrap/ReturnValue.h | 126 - wrap/StaticMethod.cpp | 151 -- wrap/StaticMethod.h | 51 - wrap/Template.h | 146 -- wrap/TemplateInstantiationTypedef.cpp | 69 - wrap/TemplateInstantiationTypedef.h | 36 - wrap/TemplateMethod.cpp | 53 - wrap/TemplateMethod.h | 42 - wrap/TemplateSubstitution.h | 76 - wrap/TypeAttributesTable.cpp | 93 - wrap/TypeAttributesTable.h | 71 - wrap/TypedefPair.h | 28 - wrap/matlab.h | 443 ---- wrap/python/pybind11/.appveyor.yml | 70 - wrap/python/pybind11/.gitignore | 38 - wrap/python/pybind11/.gitmodules | 3 - wrap/python/pybind11/.readthedocs.yml | 3 - wrap/python/pybind11/.travis.yml | 280 --- wrap/python/pybind11/CMakeLists.txt | 157 -- wrap/python/pybind11/CONTRIBUTING.md | 49 - wrap/python/pybind11/ISSUE_TEMPLATE.md | 17 - wrap/python/pybind11/LICENSE | 29 - wrap/python/pybind11/MANIFEST.in | 2 - wrap/python/pybind11/README.md | 129 - wrap/python/pybind11/docs/Doxyfile | 20 - .../pybind11/docs/_static/theme_overrides.css | 11 - .../pybind11/docs/advanced/cast/chrono.rst | 81 - .../pybind11/docs/advanced/cast/custom.rst | 91 - .../pybind11/docs/advanced/cast/eigen.rst | 310 --- .../docs/advanced/cast/functional.rst | 109 - .../pybind11/docs/advanced/cast/index.rst | 42 - .../pybind11/docs/advanced/cast/overview.rst | 165 -- .../pybind11/docs/advanced/cast/stl.rst | 240 -- .../pybind11/docs/advanced/cast/strings.rst | 305 --- .../python/pybind11/docs/advanced/classes.rst | 1125 --------- .../pybind11/docs/advanced/embedding.rst | 261 -- .../pybind11/docs/advanced/exceptions.rst | 142 -- .../pybind11/docs/advanced/functions.rst | 507 ---- wrap/python/pybind11/docs/advanced/misc.rst | 306 --- .../pybind11/docs/advanced/pycpp/index.rst | 13 - .../pybind11/docs/advanced/pycpp/numpy.rst | 386 --- .../pybind11/docs/advanced/pycpp/object.rst | 170 -- .../docs/advanced/pycpp/utilities.rst | 144 -- .../pybind11/docs/advanced/smart_ptrs.rst | 173 -- wrap/python/pybind11/docs/basics.rst | 293 --- wrap/python/pybind11/docs/benchmark.py | 88 - wrap/python/pybind11/docs/benchmark.rst | 97 - wrap/python/pybind11/docs/changelog.rst | 1158 --------- wrap/python/pybind11/docs/classes.rst | 521 ---- wrap/python/pybind11/docs/compiling.rst | 289 --- wrap/python/pybind11/docs/conf.py | 332 --- wrap/python/pybind11/docs/faq.rst | 297 --- wrap/python/pybind11/docs/index.rst | 47 - wrap/python/pybind11/docs/intro.rst | 93 - wrap/python/pybind11/docs/limitations.rst | 20 - wrap/python/pybind11/docs/pybind11-logo.png | Bin 58510 -> 0 bytes .../docs/pybind11_vs_boost_python1.png | Bin 44653 -> 0 bytes .../docs/pybind11_vs_boost_python1.svg | 427 ---- .../docs/pybind11_vs_boost_python2.png | Bin 41121 -> 0 bytes .../docs/pybind11_vs_boost_python2.svg | 427 ---- wrap/python/pybind11/docs/reference.rst | 117 - wrap/python/pybind11/docs/release.rst | 25 - wrap/python/pybind11/docs/requirements.txt | 1 - wrap/python/pybind11/docs/upgrade.rst | 404 --- wrap/python/pybind11/include/pybind11/attr.h | 493 ---- .../pybind11/include/pybind11/buffer_info.h | 108 - wrap/python/pybind11/include/pybind11/cast.h | 2128 ---------------- .../python/pybind11/include/pybind11/chrono.h | 162 -- .../python/pybind11/include/pybind11/common.h | 2 - .../pybind11/include/pybind11/complex.h | 65 - .../pybind11/include/pybind11/detail/class.h | 623 ----- .../pybind11/include/pybind11/detail/common.h | 807 ------ .../pybind11/include/pybind11/detail/descr.h | 100 - .../pybind11/include/pybind11/detail/init.h | 335 --- .../include/pybind11/detail/internals.h | 291 --- .../pybind11/include/pybind11/detail/typeid.h | 55 - wrap/python/pybind11/include/pybind11/eigen.h | 607 ----- wrap/python/pybind11/include/pybind11/embed.h | 200 -- wrap/python/pybind11/include/pybind11/eval.h | 117 - .../pybind11/include/pybind11/functional.h | 94 - .../pybind11/include/pybind11/iostream.h | 207 -- wrap/python/pybind11/include/pybind11/numpy.h | 1610 ------------ .../pybind11/include/pybind11/operators.h | 168 -- .../pybind11/include/pybind11/options.h | 65 - .../pybind11/include/pybind11/pybind11.h | 2162 ----------------- .../pybind11/include/pybind11/pytypes.h | 1471 ----------- wrap/python/pybind11/include/pybind11/stl.h | 386 --- .../pybind11/include/pybind11/stl_bind.h | 630 ----- wrap/python/pybind11/pybind11/__init__.py | 28 - wrap/python/pybind11/pybind11/__main__.py | 37 - wrap/python/pybind11/pybind11/_version.py | 2 - wrap/python/pybind11/setup.cfg | 12 - wrap/python/pybind11/setup.py | 108 - wrap/python/pybind11/tests/CMakeLists.txt | 239 -- wrap/python/pybind11/tests/conftest.py | 239 -- .../python/pybind11/tests/constructor_stats.h | 276 --- wrap/python/pybind11/tests/local_bindings.h | 64 - wrap/python/pybind11/tests/object.h | 175 -- .../tests/pybind11_cross_module_tests.cpp | 123 - wrap/python/pybind11/tests/pybind11_tests.cpp | 93 - wrap/python/pybind11/tests/pybind11_tests.h | 65 - wrap/python/pybind11/tests/pytest.ini | 16 - wrap/python/pybind11/tests/test_buffers.cpp | 169 -- wrap/python/pybind11/tests/test_buffers.py | 87 - .../pybind11/tests/test_builtin_casters.cpp | 170 -- .../pybind11/tests/test_builtin_casters.py | 342 --- .../pybind11/tests/test_call_policies.cpp | 100 - .../pybind11/tests/test_call_policies.py | 187 -- wrap/python/pybind11/tests/test_callbacks.cpp | 168 -- wrap/python/pybind11/tests/test_callbacks.py | 136 -- wrap/python/pybind11/tests/test_chrono.cpp | 55 - wrap/python/pybind11/tests/test_chrono.py | 107 - wrap/python/pybind11/tests/test_class.cpp | 422 ---- wrap/python/pybind11/tests/test_class.py | 281 --- .../tests/test_cmake_build/CMakeLists.txt | 58 - .../pybind11/tests/test_cmake_build/embed.cpp | 21 - .../installed_embed/CMakeLists.txt | 15 - .../installed_function/CMakeLists.txt | 12 - .../installed_target/CMakeLists.txt | 22 - .../pybind11/tests/test_cmake_build/main.cpp | 6 - .../subdirectory_embed/CMakeLists.txt | 25 - .../subdirectory_function/CMakeLists.txt | 8 - .../subdirectory_target/CMakeLists.txt | 15 - .../pybind11/tests/test_cmake_build/test.py | 5 - .../tests/test_constants_and_functions.cpp | 127 - .../tests/test_constants_and_functions.py | 39 - wrap/python/pybind11/tests/test_copy_move.cpp | 213 -- wrap/python/pybind11/tests/test_copy_move.py | 112 - .../pybind11/tests/test_docstring_options.cpp | 61 - .../pybind11/tests/test_docstring_options.py | 38 - wrap/python/pybind11/tests/test_eigen.cpp | 329 --- wrap/python/pybind11/tests/test_eigen.py | 694 ------ .../pybind11/tests/test_embed/CMakeLists.txt | 41 - .../pybind11/tests/test_embed/catch.cpp | 22 - .../tests/test_embed/external_module.cpp | 23 - .../tests/test_embed/test_interpreter.cpp | 284 --- .../tests/test_embed/test_interpreter.py | 9 - wrap/python/pybind11/tests/test_enum.cpp | 85 - wrap/python/pybind11/tests/test_enum.py | 167 -- wrap/python/pybind11/tests/test_eval.cpp | 91 - wrap/python/pybind11/tests/test_eval.py | 17 - wrap/python/pybind11/tests/test_eval_call.py | 4 - .../python/pybind11/tests/test_exceptions.cpp | 196 -- wrap/python/pybind11/tests/test_exceptions.py | 146 -- .../tests/test_factory_constructors.cpp | 338 --- .../tests/test_factory_constructors.py | 459 ---- .../python/pybind11/tests/test_gil_scoped.cpp | 44 - wrap/python/pybind11/tests/test_gil_scoped.py | 80 - wrap/python/pybind11/tests/test_iostream.cpp | 73 - wrap/python/pybind11/tests/test_iostream.py | 214 -- .../tests/test_kwargs_and_defaults.cpp | 102 - .../tests/test_kwargs_and_defaults.py | 147 -- .../pybind11/tests/test_local_bindings.cpp | 101 - .../pybind11/tests/test_local_bindings.py | 226 -- .../tests/test_methods_and_attributes.cpp | 454 ---- .../tests/test_methods_and_attributes.py | 512 ---- wrap/python/pybind11/tests/test_modules.cpp | 98 - wrap/python/pybind11/tests/test_modules.py | 72 - .../tests/test_multiple_inheritance.cpp | 220 -- .../tests/test_multiple_inheritance.py | 349 --- .../pybind11/tests/test_numpy_array.cpp | 309 --- .../python/pybind11/tests/test_numpy_array.py | 421 ---- .../pybind11/tests/test_numpy_dtypes.cpp | 466 ---- .../pybind11/tests/test_numpy_dtypes.py | 310 --- .../pybind11/tests/test_numpy_vectorize.cpp | 89 - .../pybind11/tests/test_numpy_vectorize.py | 196 -- .../pybind11/tests/test_opaque_types.cpp | 67 - .../pybind11/tests/test_opaque_types.py | 46 - .../tests/test_operator_overloading.cpp | 169 -- .../tests/test_operator_overloading.py | 106 - wrap/python/pybind11/tests/test_pickling.cpp | 130 - wrap/python/pybind11/tests/test_pickling.py | 42 - wrap/python/pybind11/tests/test_pytypes.cpp | 296 --- wrap/python/pybind11/tests/test_pytypes.py | 253 -- .../tests/test_sequences_and_iterators.cpp | 353 --- .../tests/test_sequences_and_iterators.py | 171 -- wrap/python/pybind11/tests/test_smart_ptr.cpp | 366 --- wrap/python/pybind11/tests/test_smart_ptr.py | 285 --- wrap/python/pybind11/tests/test_stl.cpp | 284 --- wrap/python/pybind11/tests/test_stl.py | 241 -- .../pybind11/tests/test_stl_binders.cpp | 107 - .../python/pybind11/tests/test_stl_binders.py | 225 -- .../tests/test_tagbased_polymorphic.cpp | 136 -- .../tests/test_tagbased_polymorphic.py | 20 - wrap/python/pybind11/tests/test_union.cpp | 22 - wrap/python/pybind11/tests/test_union.py | 8 - .../pybind11/tests/test_virtual_functions.cpp | 479 ---- .../pybind11/tests/test_virtual_functions.py | 377 --- wrap/python/pybind11/tools/FindCatch.cmake | 57 - wrap/python/pybind11/tools/FindEigen3.cmake | 81 - .../pybind11/tools/FindPythonLibsNew.cmake | 202 -- wrap/python/pybind11/tools/check-style.sh | 70 - wrap/python/pybind11/tools/libsize.py | 38 - wrap/python/pybind11/tools/mkdoc.py | 379 --- .../pybind11/tools/pybind11Config.cmake.in | 104 - .../python/pybind11/tools/pybind11Tools.cmake | 227 -- wrap/spirit.h | 172 -- wrap/tests/CMakeLists.txt | 1 - wrap/tests/expected-cython/geometry.pxd | 153 -- wrap/tests/expected-cython/geometry.pyx | 483 ---- .../tests/expected-python/geometry_python.cpp | 96 - wrap/tests/expected/+gtsam/Point2.m | 101 - wrap/tests/expected/+gtsam/Point3.m | 93 - wrap/tests/expected/.gitignore | 1 - wrap/tests/expected/MyBase.m | 35 - wrap/tests/expected/MyFactorPosePoint2.m | 36 - wrap/tests/expected/MyTemplateMatrix.m | 156 -- wrap/tests/expected/MyTemplatePoint2.m | 156 -- wrap/tests/expected/MyVector12.m | 36 - wrap/tests/expected/MyVector3.m | 36 - wrap/tests/expected/Point2.m | 90 - wrap/tests/expected/Point3.m | 107 - wrap/tests/expected/Test.m | 218 -- wrap/tests/expected/aGlobalFunction.m | 6 - wrap/tests/expected/geometry_wrapper.cpp | 1264 ---------- .../tests/expected/overloadedGlobalFunction.m | 8 - wrap/tests/expected2/+gtsam/Point2.m | 101 - wrap/tests/expected2/+gtsam/Point3.m | 61 - wrap/tests/expected2/MyBase.m | 35 - wrap/tests/expected2/MyFactorPosePoint2.m | 36 - wrap/tests/expected2/MyTemplateMatrix.m | 156 -- wrap/tests/expected2/MyTemplatePoint2.m | 156 -- wrap/tests/expected2/MyVector12.m | 36 - wrap/tests/expected2/MyVector3.m | 36 - wrap/tests/expected2/Test.m | 218 -- wrap/tests/expected2/aGlobalFunction.m | 6 - wrap/tests/expected2/geometry_wrapper.cpp | 1230 ---------- .../expected2/overloadedGlobalFunction.m | 8 - wrap/tests/expected_namespaces/+ns1/ClassA.m | 36 - wrap/tests/expected_namespaces/+ns1/ClassB.m | 36 - .../+ns1/aGlobalFunction.m | 6 - .../expected_namespaces/+ns2/+ns3/ClassB.m | 36 - wrap/tests/expected_namespaces/+ns2/ClassA.m | 72 - wrap/tests/expected_namespaces/+ns2/ClassC.m | 36 - .../+ns2/aGlobalFunction.m | 6 - .../+ns2/overloadedGlobalFunction.m | 8 - wrap/tests/expected_namespaces/ClassD.m | 36 - .../testNamespaces_wrapper.cpp | 448 ---- wrap/tests/geometry.h | 140 -- wrap/tests/testArgument.cpp | 131 - wrap/tests/testClass.cpp | 285 --- wrap/tests/testDependencies.h | 8 - wrap/tests/testGlobalFunction.cpp | 55 - wrap/tests/testMemory.m | 7 - wrap/tests/testMethod.cpp | 63 - wrap/tests/testNamespaces.h | 60 - wrap/tests/testReturnValue.cpp | 119 - wrap/tests/testSpirit.cpp | 151 -- wrap/tests/testTemplate.cpp | 78 - wrap/tests/testType.cpp | 137 -- wrap/tests/testWrap.cpp | 505 ---- wrap/utilities.cpp | 148 -- wrap/utilities.h | 136 -- wrap/wrap.cpp | 89 - 285 files changed, 54751 deletions(-) delete mode 100644 wrap/Argument.cpp delete mode 100644 wrap/Argument.h delete mode 100644 wrap/CMakeLists.txt delete mode 100644 wrap/Class.cpp delete mode 100644 wrap/Class.h delete mode 100644 wrap/Constructor.cpp delete mode 100644 wrap/Constructor.h delete mode 100644 wrap/Deconstructor.cpp delete mode 100644 wrap/Deconstructor.h delete mode 100644 wrap/FileWriter.cpp delete mode 100644 wrap/FileWriter.h delete mode 100644 wrap/ForwardDeclaration.h delete mode 100644 wrap/FullyOverloadedFunction.cpp delete mode 100644 wrap/FullyOverloadedFunction.h delete mode 100644 wrap/Function.cpp delete mode 100644 wrap/Function.h delete mode 100644 wrap/GlobalFunction.cpp delete mode 100644 wrap/GlobalFunction.h delete mode 100644 wrap/Method.cpp delete mode 100644 wrap/Method.h delete mode 100644 wrap/MethodBase.cpp delete mode 100644 wrap/MethodBase.h delete mode 100644 wrap/Module.cpp delete mode 100644 wrap/Module.h delete mode 100644 wrap/OverloadedFunction.h delete mode 100644 wrap/Qualified.cpp delete mode 100644 wrap/Qualified.h delete mode 100644 wrap/README.md delete mode 100644 wrap/ReturnType.cpp delete mode 100644 wrap/ReturnType.h delete mode 100644 wrap/ReturnValue.cpp delete mode 100644 wrap/ReturnValue.h delete mode 100644 wrap/StaticMethod.cpp delete mode 100644 wrap/StaticMethod.h delete mode 100644 wrap/Template.h delete mode 100644 wrap/TemplateInstantiationTypedef.cpp delete mode 100644 wrap/TemplateInstantiationTypedef.h delete mode 100644 wrap/TemplateMethod.cpp delete mode 100644 wrap/TemplateMethod.h delete mode 100644 wrap/TemplateSubstitution.h delete mode 100644 wrap/TypeAttributesTable.cpp delete mode 100644 wrap/TypeAttributesTable.h delete mode 100644 wrap/TypedefPair.h delete mode 100644 wrap/matlab.h delete mode 100644 wrap/python/pybind11/.appveyor.yml delete mode 100644 wrap/python/pybind11/.gitignore delete mode 100644 wrap/python/pybind11/.gitmodules delete mode 100644 wrap/python/pybind11/.readthedocs.yml delete mode 100644 wrap/python/pybind11/.travis.yml delete mode 100644 wrap/python/pybind11/CMakeLists.txt delete mode 100644 wrap/python/pybind11/CONTRIBUTING.md delete mode 100644 wrap/python/pybind11/ISSUE_TEMPLATE.md delete mode 100644 wrap/python/pybind11/LICENSE delete mode 100644 wrap/python/pybind11/MANIFEST.in delete mode 100644 wrap/python/pybind11/README.md delete mode 100644 wrap/python/pybind11/docs/Doxyfile delete mode 100644 wrap/python/pybind11/docs/_static/theme_overrides.css delete mode 100644 wrap/python/pybind11/docs/advanced/cast/chrono.rst delete mode 100644 wrap/python/pybind11/docs/advanced/cast/custom.rst delete mode 100644 wrap/python/pybind11/docs/advanced/cast/eigen.rst delete mode 100644 wrap/python/pybind11/docs/advanced/cast/functional.rst delete mode 100644 wrap/python/pybind11/docs/advanced/cast/index.rst delete mode 100644 wrap/python/pybind11/docs/advanced/cast/overview.rst delete mode 100644 wrap/python/pybind11/docs/advanced/cast/stl.rst delete mode 100644 wrap/python/pybind11/docs/advanced/cast/strings.rst delete mode 100644 wrap/python/pybind11/docs/advanced/classes.rst delete mode 100644 wrap/python/pybind11/docs/advanced/embedding.rst delete mode 100644 wrap/python/pybind11/docs/advanced/exceptions.rst delete mode 100644 wrap/python/pybind11/docs/advanced/functions.rst delete mode 100644 wrap/python/pybind11/docs/advanced/misc.rst delete mode 100644 wrap/python/pybind11/docs/advanced/pycpp/index.rst delete mode 100644 wrap/python/pybind11/docs/advanced/pycpp/numpy.rst delete mode 100644 wrap/python/pybind11/docs/advanced/pycpp/object.rst delete mode 100644 wrap/python/pybind11/docs/advanced/pycpp/utilities.rst delete mode 100644 wrap/python/pybind11/docs/advanced/smart_ptrs.rst delete mode 100644 wrap/python/pybind11/docs/basics.rst delete mode 100644 wrap/python/pybind11/docs/benchmark.py delete mode 100644 wrap/python/pybind11/docs/benchmark.rst delete mode 100644 wrap/python/pybind11/docs/changelog.rst delete mode 100644 wrap/python/pybind11/docs/classes.rst delete mode 100644 wrap/python/pybind11/docs/compiling.rst delete mode 100644 wrap/python/pybind11/docs/conf.py delete mode 100644 wrap/python/pybind11/docs/faq.rst delete mode 100644 wrap/python/pybind11/docs/index.rst delete mode 100644 wrap/python/pybind11/docs/intro.rst delete mode 100644 wrap/python/pybind11/docs/limitations.rst delete mode 100644 wrap/python/pybind11/docs/pybind11-logo.png delete mode 100644 wrap/python/pybind11/docs/pybind11_vs_boost_python1.png delete mode 100644 wrap/python/pybind11/docs/pybind11_vs_boost_python1.svg delete mode 100644 wrap/python/pybind11/docs/pybind11_vs_boost_python2.png delete mode 100644 wrap/python/pybind11/docs/pybind11_vs_boost_python2.svg delete mode 100644 wrap/python/pybind11/docs/reference.rst delete mode 100644 wrap/python/pybind11/docs/release.rst delete mode 100644 wrap/python/pybind11/docs/requirements.txt delete mode 100644 wrap/python/pybind11/docs/upgrade.rst delete mode 100644 wrap/python/pybind11/include/pybind11/attr.h delete mode 100644 wrap/python/pybind11/include/pybind11/buffer_info.h delete mode 100644 wrap/python/pybind11/include/pybind11/cast.h delete mode 100644 wrap/python/pybind11/include/pybind11/chrono.h delete mode 100644 wrap/python/pybind11/include/pybind11/common.h delete mode 100644 wrap/python/pybind11/include/pybind11/complex.h delete mode 100644 wrap/python/pybind11/include/pybind11/detail/class.h delete mode 100644 wrap/python/pybind11/include/pybind11/detail/common.h delete mode 100644 wrap/python/pybind11/include/pybind11/detail/descr.h delete mode 100644 wrap/python/pybind11/include/pybind11/detail/init.h delete mode 100644 wrap/python/pybind11/include/pybind11/detail/internals.h delete mode 100644 wrap/python/pybind11/include/pybind11/detail/typeid.h delete mode 100644 wrap/python/pybind11/include/pybind11/eigen.h delete mode 100644 wrap/python/pybind11/include/pybind11/embed.h delete mode 100644 wrap/python/pybind11/include/pybind11/eval.h delete mode 100644 wrap/python/pybind11/include/pybind11/functional.h delete mode 100644 wrap/python/pybind11/include/pybind11/iostream.h delete mode 100644 wrap/python/pybind11/include/pybind11/numpy.h delete mode 100644 wrap/python/pybind11/include/pybind11/operators.h delete mode 100644 wrap/python/pybind11/include/pybind11/options.h delete mode 100644 wrap/python/pybind11/include/pybind11/pybind11.h delete mode 100644 wrap/python/pybind11/include/pybind11/pytypes.h delete mode 100644 wrap/python/pybind11/include/pybind11/stl.h delete mode 100644 wrap/python/pybind11/include/pybind11/stl_bind.h delete mode 100644 wrap/python/pybind11/pybind11/__init__.py delete mode 100644 wrap/python/pybind11/pybind11/__main__.py delete mode 100644 wrap/python/pybind11/pybind11/_version.py delete mode 100644 wrap/python/pybind11/setup.cfg delete mode 100644 wrap/python/pybind11/setup.py delete mode 100644 wrap/python/pybind11/tests/CMakeLists.txt delete mode 100644 wrap/python/pybind11/tests/conftest.py delete mode 100644 wrap/python/pybind11/tests/constructor_stats.h delete mode 100644 wrap/python/pybind11/tests/local_bindings.h delete mode 100644 wrap/python/pybind11/tests/object.h delete mode 100644 wrap/python/pybind11/tests/pybind11_cross_module_tests.cpp delete mode 100644 wrap/python/pybind11/tests/pybind11_tests.cpp delete mode 100644 wrap/python/pybind11/tests/pybind11_tests.h delete mode 100644 wrap/python/pybind11/tests/pytest.ini delete mode 100644 wrap/python/pybind11/tests/test_buffers.cpp delete mode 100644 wrap/python/pybind11/tests/test_buffers.py delete mode 100644 wrap/python/pybind11/tests/test_builtin_casters.cpp delete mode 100644 wrap/python/pybind11/tests/test_builtin_casters.py delete mode 100644 wrap/python/pybind11/tests/test_call_policies.cpp delete mode 100644 wrap/python/pybind11/tests/test_call_policies.py delete mode 100644 wrap/python/pybind11/tests/test_callbacks.cpp delete mode 100644 wrap/python/pybind11/tests/test_callbacks.py delete mode 100644 wrap/python/pybind11/tests/test_chrono.cpp delete mode 100644 wrap/python/pybind11/tests/test_chrono.py delete mode 100644 wrap/python/pybind11/tests/test_class.cpp delete mode 100644 wrap/python/pybind11/tests/test_class.py delete mode 100644 wrap/python/pybind11/tests/test_cmake_build/CMakeLists.txt delete mode 100644 wrap/python/pybind11/tests/test_cmake_build/embed.cpp delete mode 100644 wrap/python/pybind11/tests/test_cmake_build/installed_embed/CMakeLists.txt delete mode 100644 wrap/python/pybind11/tests/test_cmake_build/installed_function/CMakeLists.txt delete mode 100644 wrap/python/pybind11/tests/test_cmake_build/installed_target/CMakeLists.txt delete mode 100644 wrap/python/pybind11/tests/test_cmake_build/main.cpp delete mode 100644 wrap/python/pybind11/tests/test_cmake_build/subdirectory_embed/CMakeLists.txt delete mode 100644 wrap/python/pybind11/tests/test_cmake_build/subdirectory_function/CMakeLists.txt delete mode 100644 wrap/python/pybind11/tests/test_cmake_build/subdirectory_target/CMakeLists.txt delete mode 100644 wrap/python/pybind11/tests/test_cmake_build/test.py delete mode 100644 wrap/python/pybind11/tests/test_constants_and_functions.cpp delete mode 100644 wrap/python/pybind11/tests/test_constants_and_functions.py delete mode 100644 wrap/python/pybind11/tests/test_copy_move.cpp delete mode 100644 wrap/python/pybind11/tests/test_copy_move.py delete mode 100644 wrap/python/pybind11/tests/test_docstring_options.cpp delete mode 100644 wrap/python/pybind11/tests/test_docstring_options.py delete mode 100644 wrap/python/pybind11/tests/test_eigen.cpp delete mode 100644 wrap/python/pybind11/tests/test_eigen.py delete mode 100644 wrap/python/pybind11/tests/test_embed/CMakeLists.txt delete mode 100644 wrap/python/pybind11/tests/test_embed/catch.cpp delete mode 100644 wrap/python/pybind11/tests/test_embed/external_module.cpp delete mode 100644 wrap/python/pybind11/tests/test_embed/test_interpreter.cpp delete mode 100644 wrap/python/pybind11/tests/test_embed/test_interpreter.py delete mode 100644 wrap/python/pybind11/tests/test_enum.cpp delete mode 100644 wrap/python/pybind11/tests/test_enum.py delete mode 100644 wrap/python/pybind11/tests/test_eval.cpp delete mode 100644 wrap/python/pybind11/tests/test_eval.py delete mode 100644 wrap/python/pybind11/tests/test_eval_call.py delete mode 100644 wrap/python/pybind11/tests/test_exceptions.cpp delete mode 100644 wrap/python/pybind11/tests/test_exceptions.py delete mode 100644 wrap/python/pybind11/tests/test_factory_constructors.cpp delete mode 100644 wrap/python/pybind11/tests/test_factory_constructors.py delete mode 100644 wrap/python/pybind11/tests/test_gil_scoped.cpp delete mode 100644 wrap/python/pybind11/tests/test_gil_scoped.py delete mode 100644 wrap/python/pybind11/tests/test_iostream.cpp delete mode 100644 wrap/python/pybind11/tests/test_iostream.py delete mode 100644 wrap/python/pybind11/tests/test_kwargs_and_defaults.cpp delete mode 100644 wrap/python/pybind11/tests/test_kwargs_and_defaults.py delete mode 100644 wrap/python/pybind11/tests/test_local_bindings.cpp delete mode 100644 wrap/python/pybind11/tests/test_local_bindings.py delete mode 100644 wrap/python/pybind11/tests/test_methods_and_attributes.cpp delete mode 100644 wrap/python/pybind11/tests/test_methods_and_attributes.py delete mode 100644 wrap/python/pybind11/tests/test_modules.cpp delete mode 100644 wrap/python/pybind11/tests/test_modules.py delete mode 100644 wrap/python/pybind11/tests/test_multiple_inheritance.cpp delete mode 100644 wrap/python/pybind11/tests/test_multiple_inheritance.py delete mode 100644 wrap/python/pybind11/tests/test_numpy_array.cpp delete mode 100644 wrap/python/pybind11/tests/test_numpy_array.py delete mode 100644 wrap/python/pybind11/tests/test_numpy_dtypes.cpp delete mode 100644 wrap/python/pybind11/tests/test_numpy_dtypes.py delete mode 100644 wrap/python/pybind11/tests/test_numpy_vectorize.cpp delete mode 100644 wrap/python/pybind11/tests/test_numpy_vectorize.py delete mode 100644 wrap/python/pybind11/tests/test_opaque_types.cpp delete mode 100644 wrap/python/pybind11/tests/test_opaque_types.py delete mode 100644 wrap/python/pybind11/tests/test_operator_overloading.cpp delete mode 100644 wrap/python/pybind11/tests/test_operator_overloading.py delete mode 100644 wrap/python/pybind11/tests/test_pickling.cpp delete mode 100644 wrap/python/pybind11/tests/test_pickling.py delete mode 100644 wrap/python/pybind11/tests/test_pytypes.cpp delete mode 100644 wrap/python/pybind11/tests/test_pytypes.py delete mode 100644 wrap/python/pybind11/tests/test_sequences_and_iterators.cpp delete mode 100644 wrap/python/pybind11/tests/test_sequences_and_iterators.py delete mode 100644 wrap/python/pybind11/tests/test_smart_ptr.cpp delete mode 100644 wrap/python/pybind11/tests/test_smart_ptr.py delete mode 100644 wrap/python/pybind11/tests/test_stl.cpp delete mode 100644 wrap/python/pybind11/tests/test_stl.py delete mode 100644 wrap/python/pybind11/tests/test_stl_binders.cpp delete mode 100644 wrap/python/pybind11/tests/test_stl_binders.py delete mode 100644 wrap/python/pybind11/tests/test_tagbased_polymorphic.cpp delete mode 100644 wrap/python/pybind11/tests/test_tagbased_polymorphic.py delete mode 100644 wrap/python/pybind11/tests/test_union.cpp delete mode 100644 wrap/python/pybind11/tests/test_union.py delete mode 100644 wrap/python/pybind11/tests/test_virtual_functions.cpp delete mode 100644 wrap/python/pybind11/tests/test_virtual_functions.py delete mode 100644 wrap/python/pybind11/tools/FindCatch.cmake delete mode 100644 wrap/python/pybind11/tools/FindEigen3.cmake delete mode 100644 wrap/python/pybind11/tools/FindPythonLibsNew.cmake delete mode 100755 wrap/python/pybind11/tools/check-style.sh delete mode 100644 wrap/python/pybind11/tools/libsize.py delete mode 100755 wrap/python/pybind11/tools/mkdoc.py delete mode 100644 wrap/python/pybind11/tools/pybind11Config.cmake.in delete mode 100644 wrap/python/pybind11/tools/pybind11Tools.cmake delete mode 100644 wrap/spirit.h delete mode 100644 wrap/tests/CMakeLists.txt delete mode 100644 wrap/tests/expected-cython/geometry.pxd delete mode 100644 wrap/tests/expected-cython/geometry.pyx delete mode 100644 wrap/tests/expected-python/geometry_python.cpp delete mode 100644 wrap/tests/expected/+gtsam/Point2.m delete mode 100644 wrap/tests/expected/+gtsam/Point3.m delete mode 100644 wrap/tests/expected/.gitignore delete mode 100644 wrap/tests/expected/MyBase.m delete mode 100644 wrap/tests/expected/MyFactorPosePoint2.m delete mode 100644 wrap/tests/expected/MyTemplateMatrix.m delete mode 100644 wrap/tests/expected/MyTemplatePoint2.m delete mode 100644 wrap/tests/expected/MyVector12.m delete mode 100644 wrap/tests/expected/MyVector3.m delete mode 100644 wrap/tests/expected/Point2.m delete mode 100644 wrap/tests/expected/Point3.m delete mode 100644 wrap/tests/expected/Test.m delete mode 100644 wrap/tests/expected/aGlobalFunction.m delete mode 100644 wrap/tests/expected/geometry_wrapper.cpp delete mode 100644 wrap/tests/expected/overloadedGlobalFunction.m delete mode 100644 wrap/tests/expected2/+gtsam/Point2.m delete mode 100644 wrap/tests/expected2/+gtsam/Point3.m delete mode 100644 wrap/tests/expected2/MyBase.m delete mode 100644 wrap/tests/expected2/MyFactorPosePoint2.m delete mode 100644 wrap/tests/expected2/MyTemplateMatrix.m delete mode 100644 wrap/tests/expected2/MyTemplatePoint2.m delete mode 100644 wrap/tests/expected2/MyVector12.m delete mode 100644 wrap/tests/expected2/MyVector3.m delete mode 100644 wrap/tests/expected2/Test.m delete mode 100644 wrap/tests/expected2/aGlobalFunction.m delete mode 100644 wrap/tests/expected2/geometry_wrapper.cpp delete mode 100644 wrap/tests/expected2/overloadedGlobalFunction.m delete mode 100644 wrap/tests/expected_namespaces/+ns1/ClassA.m delete mode 100644 wrap/tests/expected_namespaces/+ns1/ClassB.m delete mode 100644 wrap/tests/expected_namespaces/+ns1/aGlobalFunction.m delete mode 100644 wrap/tests/expected_namespaces/+ns2/+ns3/ClassB.m delete mode 100644 wrap/tests/expected_namespaces/+ns2/ClassA.m delete mode 100644 wrap/tests/expected_namespaces/+ns2/ClassC.m delete mode 100644 wrap/tests/expected_namespaces/+ns2/aGlobalFunction.m delete mode 100644 wrap/tests/expected_namespaces/+ns2/overloadedGlobalFunction.m delete mode 100644 wrap/tests/expected_namespaces/ClassD.m delete mode 100644 wrap/tests/expected_namespaces/testNamespaces_wrapper.cpp delete mode 100644 wrap/tests/geometry.h delete mode 100644 wrap/tests/testArgument.cpp delete mode 100644 wrap/tests/testClass.cpp delete mode 100644 wrap/tests/testDependencies.h delete mode 100644 wrap/tests/testGlobalFunction.cpp delete mode 100644 wrap/tests/testMemory.m delete mode 100644 wrap/tests/testMethod.cpp delete mode 100644 wrap/tests/testNamespaces.h delete mode 100644 wrap/tests/testReturnValue.cpp delete mode 100644 wrap/tests/testSpirit.cpp delete mode 100644 wrap/tests/testTemplate.cpp delete mode 100644 wrap/tests/testType.cpp delete mode 100644 wrap/tests/testWrap.cpp delete mode 100644 wrap/utilities.cpp delete mode 100644 wrap/utilities.h delete mode 100644 wrap/wrap.cpp diff --git a/wrap/Argument.cpp b/wrap/Argument.cpp deleted file mode 100644 index f85aed72e..000000000 --- a/wrap/Argument.cpp +++ /dev/null @@ -1,316 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Argument.ccp - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#include "Argument.h" -#include "Class.h" - -#include - -#include -#include -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -Argument Argument::expandTemplate(const TemplateSubstitution& ts) const { - Argument instArg = *this; - instArg.type = ts.tryToSubstitite(type); - return instArg; -} - -/* ************************************************************************* */ -ArgumentList ArgumentList::expandTemplate( - const TemplateSubstitution& ts) const { - ArgumentList instArgList; - for(const Argument& arg: *this) { - Argument instArg = arg.expandTemplate(ts); - instArgList.push_back(instArg); - } - return instArgList; -} - -/* ************************************************************************* */ -string Argument::matlabClass(const string& delim) const { - string result; - for(const string& ns: type.namespaces()) - result += ns + delim; - if (type.name() == "string" || type.name() == "unsigned char" - || type.name() == "char") - return result + "char"; - if (type.name() == "Vector" || type.name() == "Matrix") - return result + "double"; - if (type.name() == "int" || type.name() == "size_t") - return result + "numeric"; - if (type.name() == "bool") - return result + "logical"; - return result + type.name(); -} - -/* ************************************************************************* */ -void Argument::matlab_unwrap(FileWriter& file, const string& matlabName) const { - file.oss << " "; - - string cppType = type.qualifiedName("::"); - string matlabUniqueType = type.qualifiedName(); - bool isNotScalar = !type.isScalar(); - - // We cannot handle scalar non const references - if (!isNotScalar && is_ref && !is_const) { - throw std::runtime_error("Cannot unwrap a scalar non-const reference"); - } - - if (is_ptr && type.category != Qualified::EIGEN) - // A pointer: emit an "unwrap_shared_ptr" call which returns a pointer - file.oss << "boost::shared_ptr<" << cppType << "> " << name - << " = unwrap_shared_ptr< "; - else if (is_ref && isNotScalar && type.category != Qualified::EIGEN) - // A reference: emit an "unwrap_shared_ptr" call and de-reference the pointer - file.oss << cppType << "& " << name << " = *unwrap_shared_ptr< "; - else - // Not a pointer, or a reference to a scalar type. Therefore, emit an "unwrap" call - // unwrap is specified in matlab.h as a series of template specializations - // that know how to unpack the expected MATLAB object - // example: double tol = unwrap< double >(in[2]); - // example: Vector v = unwrap< Vector >(in[1]); - file.oss << cppType << " " << name << " = unwrap< "; - - file.oss << cppType << " >(" << matlabName; - if( (is_ptr || is_ref) && isNotScalar && type.category != Qualified::EIGEN) - file.oss << ", \"ptr_" << matlabUniqueType << "\""; - file.oss << ");" << endl; -} - -/* ************************************************************************* */ -void Argument::proxy_check(FileWriter& proxyFile, const string& s) const { - proxyFile.oss << "isa(" << s << ",'" << matlabClass(".") << "')"; - if (type.name() == "Vector") - proxyFile.oss << " && size(" << s << ",2)==1"; -} - -/* ************************************************************************* */ -void Argument::emit_cython_pxd( - FileWriter& file, const std::string& className, - const std::vector& templateArgs) const { - string cythonType = type.pxdClassName(); - if (cythonType == "This") cythonType = className; - else if (type.isEigen()) - cythonType = "const " + cythonType + "&"; - else if (type.match(templateArgs)) - cythonType = type.name(); - - // add modifier - if (!type.isEigen()) { - if (is_ptr) cythonType = "shared_ptr[" + cythonType + "]&"; - if (is_ref) cythonType = cythonType + "&"; - if (is_const) cythonType = "const " + cythonType; - } - - file.oss << cythonType << " " << name; -} - -/* ************************************************************************* */ -void Argument::emit_cython_pyx(FileWriter& file) const { - file.oss << type.pyxArgumentType() << " " << name; -} - -/* ************************************************************************* */ -std::string Argument::pyx_convertEigenTypeAndStorageOrder() const { - if (!type.isEigen()) - return ""; - return name + " = " + name + ".astype(float, order=\'F\', copy=False)"; -} - -/* ************************************************************************* */ -std::string Argument::pyx_asParam() const { - string cythonType = type.pxdClassName(); - string cythonVar; - if (type.isNonBasicType()) { - cythonVar = name + "." + type.shared_pxd_obj_in_pyx(); - if (!is_ptr) cythonVar = "deref(" + cythonVar + ")"; - } else if (type.isEigen()) { - cythonVar = "<" + cythonType + ">" + "(Map[" + cythonType + "](" + name + "))"; - } else { - cythonVar = name; - } - return cythonVar; -} - -/* ************************************************************************* */ -string ArgumentList::types() const { - string str; - bool first = true; - for(Argument arg: *this) { - if (!first) - str += ","; - str += arg.type.name(); - first = false; - } - return str; -} - -/* ************************************************************************* */ -string ArgumentList::signature() const { - string sig; - bool cap = false; - - for(Argument arg: *this) { - for(char ch: arg.type.name()) - if (isupper(ch)) { - sig += ch; - //If there is a capital letter, we don't want to read it below - cap = true; - } - if (!cap) - sig += arg.type.name()[0]; - //Reset to default - cap = false; - } - - return sig; -} - -/* ************************************************************************* */ -string ArgumentList::names() const { - string str; - bool first = true; - for(Argument arg: *this) { - if (!first) - str += ","; - str += arg.name; - first = false; - } - return str; -} - -/* ************************************************************************* */ -bool ArgumentList::allScalar() const { - for(Argument arg: *this) - if (!arg.type.isScalar()) - return false; - return true; -} - -/* ************************************************************************* */ -void ArgumentList::matlab_unwrap(FileWriter& file, int start) const { - int index = start; - for(Argument arg: *this) { - stringstream buf; - buf << "in[" << index << "]"; - arg.matlab_unwrap(file, buf.str()); - index++; - } -} - -/* ************************************************************************* */ -void ArgumentList::emit_prototype(FileWriter& file, const string& name) const { - file.oss << name << "("; - bool first = true; - for(Argument arg: *this) { - if (!first) - file.oss << ", "; - file.oss << arg.type.name() << " " << arg.name; - first = false; - } - file.oss << ")"; -} - -/* ************************************************************************* */ -void ArgumentList::emit_cython_pxd( - FileWriter& file, const std::string& className, - const std::vector& templateArgs) const { - for (size_t j = 0; j(__params[" + std::to_string(j) + "])\n"; - return s; -} - -/* ************************************************************************* */ -void ArgumentList::proxy_check(FileWriter& proxyFile) const { - // Check nr of arguments - proxyFile.oss << "if length(varargin) == " << size(); - if (size() > 0) - proxyFile.oss << " && "; - // ...and their type.names - bool first = true; - for (size_t i = 0; i < size(); i++) { - if (!first) - proxyFile.oss << " && "; - string s = "varargin{" + boost::lexical_cast(i + 1) + "}"; - (*this)[i].proxy_check(proxyFile, s); - first = false; - } - proxyFile.oss << "\n"; -} - -/* ************************************************************************* */ - diff --git a/wrap/Argument.h b/wrap/Argument.h deleted file mode 100644 index c08eb0be9..000000000 --- a/wrap/Argument.h +++ /dev/null @@ -1,242 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Argument.h - * @brief arguments to constructors and methods - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#pragma once - -#include "TemplateSubstitution.h" -#include "FileWriter.h" -#include "ReturnValue.h" - -namespace wrap { - -/// Argument class -struct Argument { - Qualified type; - std::string name; - bool is_const, is_ref, is_ptr; - - Argument() : - is_const(false), is_ref(false), is_ptr(false) { - } - - Argument(const Qualified& t, const std::string& n) : - type(t), name(n), is_const(false), is_ref(false), is_ptr(false) { - } - - bool isSameSignature(const Argument& other) const { - return type == other.type - && is_const == other.is_const && is_ref == other.is_ref - && is_ptr == other.is_ptr; - } - - bool operator==(const Argument& other) const { - return type == other.type && name == other.name - && is_const == other.is_const && is_ref == other.is_ref - && is_ptr == other.is_ptr; - } - - Argument expandTemplate(const TemplateSubstitution& ts) const; - - /// return MATLAB class for use in isa(x,class) - std::string matlabClass(const std::string& delim = "") const; - - /// MATLAB code generation, MATLAB to C++ - void matlab_unwrap(FileWriter& file, const std::string& matlabName) const; - - /** - * emit checking argument to MATLAB proxy - * @param proxyFile output stream - */ - void proxy_check(FileWriter& proxyFile, const std::string& s) const; - - /** - * emit arguments for cython pxd - * @param file output stream - */ - void emit_cython_pxd(FileWriter& file, const std::string& className, - const std::vector& templateArgs) const; - void emit_cython_pyx(FileWriter& file) const; - std::string pyx_asParam() const; - std::string pyx_convertEigenTypeAndStorageOrder() const; - - friend std::ostream& operator<<(std::ostream& os, const Argument& arg) { - os << (arg.is_const ? "const " : "") << arg.type << (arg.is_ptr ? "*" : "") - << (arg.is_ref ? "&" : ""); - return os; - } - -}; - -/// Argument list is just a container with Arguments -struct ArgumentList: public std::vector { - - /// create a comma-separated string listing all argument types (not used) - std::string types() const; - - /// create a short "signature" string - std::string signature() const; - - /// create a comma-separated string listing all argument names, used in m-files - std::string names() const; - - /// Check if all arguments scalar - bool allScalar() const; - - ArgumentList expandTemplate(const TemplateSubstitution& ts) const; - - bool isSameSignature(const ArgumentList& other) const { - for(size_t i = 0; i& templateArgs) const; - void emit_cython_pyx(FileWriter& file) const; - std::string pyx_asParams() const; - std::string pyx_paramsList() const; - std::string pyx_castParamsToPythonType(const std::string& indent) const; - std::string pyx_convertEigenTypeAndStorageOrder(const std::string& indent) const; - - /** - * emit checking arguments to MATLAB proxy - * @param proxyFile output stream - */ - void proxy_check(FileWriter& proxyFile) const; - - /// Output stream operator - friend std::ostream& operator<<(std::ostream& os, - const ArgumentList& argList) { - os << "("; - if (argList.size() > 0) - os << argList.front(); - if (argList.size() > 1) - for (size_t i = 1; i < argList.size(); i++) - os << ", " << argList[i]; - os << ")"; - return os; - } - -}; - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct ArgumentGrammar: public classic::grammar { - - wrap::Argument& result_; ///< successful parse will be placed in here - TypeGrammar argument_type_g; ///< Type parser for Argument::type - - /// Construct type grammar and specify where result is placed - ArgumentGrammar(wrap::Argument& result) : - result_(result), argument_type_g(result.type) { - } - - /// Definition of type grammar - template - struct definition: BasicRules { - - typedef classic::rule Rule; - - Rule argument_p; - - definition(ArgumentGrammar const& self) { - using namespace classic; - - // NOTE: allows for pointers to all types - // Slightly more permissive than before on basis/eigen type qualification - // Also, currently parses Point2*&, can't make it work otherwise :-( - argument_p = !str_p("const")[assign_a(self.result_.is_const, T)] // - >> self.argument_type_g // - >> !ch_p('*')[assign_a(self.result_.is_ptr, T)] - >> !ch_p('&')[assign_a(self.result_.is_ref, T)] - >> BasicRules::name_p[assign_a(self.result_.name)]; - } - - Rule const& start() const { - return argument_p; - } - - }; -}; -// ArgumentGrammar - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct ArgumentListGrammar: public classic::grammar { - - wrap::ArgumentList& result_; ///< successful parse will be placed in here - - /// Construct type grammar and specify where result is placed - ArgumentListGrammar(wrap::ArgumentList& result) : - result_(result) { - } - - /// Definition of type grammar - template - struct definition { - - const Argument arg0; ///< used to reset arg - Argument arg; ///< temporary argument for use during parsing - ArgumentGrammar argument_g; ///< single Argument parser - - classic::rule argument_p, argumentList_p; - - definition(ArgumentListGrammar const& self) : - argument_g(arg) { - using namespace classic; - - argument_p = argument_g // - [classic::push_back_a(self.result_, arg)] // - [assign_a(arg, arg0)]; - - argumentList_p = '(' >> !argument_p >> *(',' >> argument_p) >> ')'; - } - - classic::rule const& start() const { - return argumentList_p; - } - - }; -}; -// ArgumentListGrammar - -/* ************************************************************************* */ - -}// \namespace wrap - diff --git a/wrap/CMakeLists.txt b/wrap/CMakeLists.txt deleted file mode 100644 index c04a44edb..000000000 --- a/wrap/CMakeLists.txt +++ /dev/null @@ -1,45 +0,0 @@ -# Build/install Wrap - -set(WRAP_BOOST_LIBRARIES - Boost::system - Boost::filesystem - Boost::thread -) - -# Allow for disabling serialization to handle errors related to Clang's linker -option(GTSAM_WRAP_SERIALIZATION "If enabled, allows for wrapped objects to be saved via boost.serialization" ON) - -# Build the executable itself -file(GLOB wrap_srcs "*.cpp") -file(GLOB wrap_headers "*.h") -list(REMOVE_ITEM wrap_srcs ${CMAKE_CURRENT_SOURCE_DIR}/wrap.cpp) -add_library(wrap_lib STATIC ${wrap_srcs} ${wrap_headers}) -target_include_directories(wrap_lib PUBLIC - $ -) -if (NOT GTSAM_WRAP_SERIALIZATION) - target_compile_definitions(wrap_lib PUBLIC -DWRAP_DISABLE_SERIALIZE) -endif() - -# Apply build flags: -gtsam_apply_build_flags(wrap_lib) - -target_link_libraries(wrap_lib ${WRAP_BOOST_LIBRARIES}) -gtsam_assign_source_folders(${wrap_srcs} ${wrap_headers}) -add_executable(wrap wrap.cpp) -target_link_libraries(wrap PRIVATE wrap_lib) - -# Set folder in Visual Studio -file(RELATIVE_PATH relative_path "${PROJECT_SOURCE_DIR}" "${CMAKE_CURRENT_SOURCE_DIR}") -set_target_properties(wrap_lib wrap PROPERTIES FOLDER "${relative_path}") - -# Install wrap binary and export target -install(TARGETS wrap EXPORT GTSAM-exports DESTINATION ${CMAKE_INSTALL_BINDIR}) -list(APPEND GTSAM_EXPORTED_TARGETS wrap) -set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE) - -# Install matlab header -install(FILES matlab.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/wrap) - -# Build tests -add_subdirectory(tests) diff --git a/wrap/Class.cpp b/wrap/Class.cpp deleted file mode 100644 index 65ce9eab7..000000000 --- a/wrap/Class.cpp +++ /dev/null @@ -1,897 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Class.cpp - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#include "Class.h" -#include "utilities.h" -#include "Argument.h" -#include - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include // std::ostream_iterator -//#include // on Linux GCC: fails with error regarding needing C++0x std flags -//#include // same failure as above -#include // works on Linux GCC -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -void Class::assignParent(const Qualified& parent) { - parentClass.reset(parent); -} - -/* ************************************************************************* */ -boost::optional Class::qualifiedParent() const { - boost::optional result = boost::none; - if (parentClass) - result = parentClass->qualifiedName("::"); - return result; -} - -/* ************************************************************************* */ -static void handleException(const out_of_range& oor, - const Class::Methods& methods) { - cerr << "Class::method: key not found: " << oor.what() << ", methods are:\n"; - using boost::adaptors::map_keys; - ostream_iterator out_it(cerr, "\n"); - boost::copy(methods | map_keys, out_it); -} - -/* ************************************************************************* */ -// Method& Class::mutableMethod(Str key) { -// try { -// return methods_.at(key); -// } catch (const out_of_range& oor) { -// handleException(oor, methods_); -// throw runtime_error("Internal error in wrap"); -// } -// } - -/* ************************************************************************* */ -const Method& Class::method(Str key) const { - try { - return methods_.at(key); - } catch (const out_of_range& oor) { - handleException(oor, methods_); - throw runtime_error("Internal error in wrap"); - } -} - -/* ************************************************************************* */ -void Class::matlab_proxy(Str toolboxPath, Str wrapperName, - const TypeAttributesTable& typeAttributes, FileWriter& wrapperFile, - vector& functionNames) const { - - // Create namespace folders - createNamespaceStructure(namespaces(), toolboxPath); - - // open destination classFile - string classFile = matlabName(toolboxPath); - FileWriter proxyFile(classFile, verbose_, "%"); - - // get the name of actual matlab object - const string matlabQualName = qualifiedName("."); - const string matlabUniqueName = qualifiedName(); - const string cppName = qualifiedName("::"); - - // emit class proxy code - // we want our class to inherit the handle class for memory purposes - const string parent = - parentClass ? parentClass->qualifiedName(".") : "handle"; - comment_fragment(proxyFile); - proxyFile.oss << "classdef " << name() << " < " << parent << endl; - proxyFile.oss << " properties\n"; - proxyFile.oss << " ptr_" << matlabUniqueName << " = 0\n"; - proxyFile.oss << " end\n"; - proxyFile.oss << " methods\n"; - - // Constructor - proxyFile.oss << " function obj = " << name() << "(varargin)\n"; - // Special pointer constructors - one in MATLAB to create an object and - // assign a pointer returned from a C++ function. In turn this MATLAB - // constructor calls a special C++ function that just adds the object to - // its collector. This allows wrapped functions to return objects in - // other wrap modules - to add these to their collectors the pointer is - // passed from one C++ module into matlab then back into the other C++ - // module. - pointer_constructor_fragments(proxyFile, wrapperFile, wrapperName, - functionNames); - wrapperFile.oss << "\n"; - - // Regular constructors - boost::optional cppBaseName = qualifiedParent(); - for (size_t i = 0; i < constructor.nrOverloads(); i++) { - ArgumentList args = constructor.argumentList(i); - const int id = (int) functionNames.size(); - constructor.proxy_fragment(proxyFile, wrapperName, (bool) parentClass, id, - args); - const string wrapFunctionName = constructor.wrapper_fragment(wrapperFile, - cppName, matlabUniqueName, cppBaseName, id, args); - wrapperFile.oss << "\n"; - functionNames.push_back(wrapFunctionName); - } - proxyFile.oss << " else\n"; - proxyFile.oss << " error('Arguments do not match any overload of " - << matlabQualName << " constructor');\n"; - proxyFile.oss << " end\n"; - if (parentClass) - proxyFile.oss << " obj = obj@" << parentClass->qualifiedName(".") - << "(uint64(" << ptr_constructor_key << "), base_ptr);\n"; - proxyFile.oss << " obj.ptr_" << matlabUniqueName << " = my_ptr;\n"; - proxyFile.oss << " end\n\n"; - - // Deconstructor - { - const int id = (int) functionNames.size(); - deconstructor.proxy_fragment(proxyFile, wrapperName, matlabUniqueName, id); - proxyFile.oss << "\n"; - const string functionName = deconstructor.wrapper_fragment(wrapperFile, - cppName, matlabUniqueName, id); - wrapperFile.oss << "\n"; - functionNames.push_back(functionName); - } - proxyFile.oss - << " function display(obj), obj.print(''); end\n %DISPLAY Calls print on the object\n"; - proxyFile.oss - << " function disp(obj), obj.display; end\n %DISP Calls print on the object\n"; - - // Methods - for(const Methods::value_type& name_m: methods_) { - const Method& m = name_m.second; - m.proxy_wrapper_fragments(proxyFile, wrapperFile, cppName, matlabQualName, - matlabUniqueName, wrapperName, typeAttributes, functionNames); - proxyFile.oss << "\n"; - wrapperFile.oss << "\n"; - } - if (hasSerialization) - serialization_fragments(proxyFile, wrapperFile, wrapperName, functionNames); - - proxyFile.oss << " end\n"; - proxyFile.oss << "\n"; - proxyFile.oss << " methods(Static = true)\n"; - - // Static methods - for(const StaticMethods::value_type& name_m: static_methods) { - const StaticMethod& m = name_m.second; - m.proxy_wrapper_fragments(proxyFile, wrapperFile, cppName, matlabQualName, - matlabUniqueName, wrapperName, typeAttributes, functionNames); - proxyFile.oss << "\n"; - wrapperFile.oss << "\n"; - } - if (hasSerialization) - deserialization_fragments(proxyFile, wrapperFile, wrapperName, - functionNames); - - proxyFile.oss << " end\n"; - proxyFile.oss << "end\n"; - - // Close file - proxyFile.emit(true); -} - -/* ************************************************************************* */ -void Class::pointer_constructor_fragments(FileWriter& proxyFile, - FileWriter& wrapperFile, Str wrapperName, - vector& functionNames) const { - - const string matlabUniqueName = qualifiedName(); - const string cppName = qualifiedName("::"); - - const int collectorInsertId = (int) functionNames.size(); - const string collectorInsertFunctionName = matlabUniqueName - + "_collectorInsertAndMakeBase_" - + boost::lexical_cast(collectorInsertId); - functionNames.push_back(collectorInsertFunctionName); - - int upcastFromVoidId; - string upcastFromVoidFunctionName; - if (isVirtual) { - upcastFromVoidId = (int) functionNames.size(); - upcastFromVoidFunctionName = matlabUniqueName + "_upcastFromVoid_" - + boost::lexical_cast(upcastFromVoidId); - functionNames.push_back(upcastFromVoidFunctionName); - } - - // MATLAB constructor that assigns pointer to matlab object then calls c++ - // function to add the object to the collector. - if (isVirtual) { - proxyFile.oss - << " if (nargin == 2 || (nargin == 3 && strcmp(varargin{3}, 'void')))"; - } else { - proxyFile.oss << " if nargin == 2"; - } - proxyFile.oss << " && isa(varargin{1}, 'uint64') && varargin{1} == uint64(" - << ptr_constructor_key << ")\n"; - if (isVirtual) { - proxyFile.oss << " if nargin == 2\n"; - proxyFile.oss << " my_ptr = varargin{2};\n"; - proxyFile.oss << " else\n"; - proxyFile.oss << " my_ptr = " << wrapperName << "(" - << upcastFromVoidId << ", varargin{2});\n"; - proxyFile.oss << " end\n"; - } else { - proxyFile.oss << " my_ptr = varargin{2};\n"; - } - if (!parentClass) // If this class has a base class, we'll get a base class pointer back - proxyFile.oss << " "; - else - proxyFile.oss << " base_ptr = "; - proxyFile.oss << wrapperName << "(" << collectorInsertId << ", my_ptr);\n"; // Call collector insert and get base class ptr - - // C++ function to add pointer from MATLAB to collector. The pointer always - // comes from a C++ return value; this mechanism allows the object to be added - // to a collector in a different wrap module. If this class has a base class, - // a new pointer to the base class is allocated and returned. - wrapperFile.oss << "void " << collectorInsertFunctionName - << "(int nargout, mxArray *out[], int nargin, const mxArray *in[])\n"; - wrapperFile.oss << "{\n"; - wrapperFile.oss << " mexAtExit(&_deleteAllObjects);\n"; - // Typedef boost::shared_ptr - wrapperFile.oss << " typedef boost::shared_ptr<" << cppName << "> Shared;\n"; - wrapperFile.oss << "\n"; - // Get self pointer passed in - wrapperFile.oss - << " Shared *self = *reinterpret_cast (mxGetData(in[0]));\n"; - // Add to collector - wrapperFile.oss << " collector_" << matlabUniqueName << ".insert(self);\n"; - // If we have a base class, return the base class pointer (MATLAB will call the base class collectorInsertAndMakeBase to add this to the collector and recurse the heirarchy) - boost::optional cppBaseName = qualifiedParent(); - if (cppBaseName) { - wrapperFile.oss << "\n"; - wrapperFile.oss << " typedef boost::shared_ptr<" << *cppBaseName - << "> SharedBase;\n"; - wrapperFile.oss - << " out[0] = mxCreateNumericMatrix(1, 1, mxUINT32OR64_CLASS, mxREAL);\n"; - wrapperFile.oss - << " *reinterpret_cast(mxGetData(out[0])) = new SharedBase(*self);\n"; - } - wrapperFile.oss << "}\n"; - - // If this is a virtual function, C++ function to dynamic upcast it from a - // shared_ptr. This mechanism allows automatic dynamic creation of the - // real underlying derived-most class when a C++ method returns a virtual - // base class. - if (isVirtual) - wrapperFile.oss << "\n" - "void " << upcastFromVoidFunctionName - << "(int nargout, mxArray *out[], int nargin, const mxArray *in[]) {\n" - " mexAtExit(&_deleteAllObjects);\n" - " typedef boost::shared_ptr<" << cppName - << "> Shared;\n" - " boost::shared_ptr *asVoid = *reinterpret_cast**> (mxGetData(in[0]));\n" - " out[0] = mxCreateNumericMatrix(1, 1, mxUINT32OR64_CLASS, mxREAL);\n" - " Shared *self = new Shared(boost::static_pointer_cast<" << cppName - << ">(*asVoid));\n" - " *reinterpret_cast(mxGetData(out[0])) = self;\n" - "}\n"; -} - -/* ************************************************************************* */ -Class Class::expandTemplate(const TemplateSubstitution& ts) const { - Class inst = *this; - inst.methods_ = expandMethodTemplate(methods_, ts); - inst.static_methods = expandMethodTemplate(static_methods, ts); - inst.constructor = constructor.expandTemplate(ts); - inst.deconstructor.name = inst.name(); - return inst; -} - -/* ************************************************************************* */ -vector Class::expandTemplate(Str templateArg, - const vector& instantiations) const { - vector result; - for(const Qualified& instName: instantiations) { - Qualified expandedClass = (Qualified) (*this); - expandedClass.expand(instName.name()); - const TemplateSubstitution ts(templateArg, instName, expandedClass); - Class inst = expandTemplate(ts); - inst.name_ = expandedClass.name(); - inst.templateArgs.clear(); - inst.typedefName = qualifiedName("::") + "<" + instName.qualifiedName("::") - + ">"; - inst.templateInstTypeList.push_back(instName); - inst.templateClass = *this; - result.push_back(inst); - } - return result; -} - -/* ************************************************************************* */ -vector Class::expandTemplate(Str templateArg, - const vector& integers) const { - vector result; - for(int i: integers) { - Qualified expandedClass = (Qualified) (*this); - stringstream ss; ss << i; - string instName = ss.str(); - expandedClass.expand(instName); - const TemplateSubstitution ts(templateArg, instName, expandedClass); - Class inst = expandTemplate(ts); - inst.name_ = expandedClass.name(); - inst.templateArgs.clear(); - inst.typedefName = qualifiedName("::") + "<" + instName + ">"; - result.push_back(inst); - } - return result; -} - -/* ************************************************************************* */ -void Class::addMethod(bool verbose, bool is_const, Str methodName, - const ArgumentList& argumentList, - const ReturnValue& returnValue, const Template& tmplate) { - // Check if templated - if (tmplate.valid()) { - try { - templateMethods_[methodName].addOverload(methodName, argumentList, - returnValue, is_const, - tmplate.argName(), verbose); - } catch (const std::runtime_error& e) { - throw std::runtime_error("Class::addMethod: error adding " + name_ + - "::" + methodName + "\n" + e.what()); - } - // Create method to expand - // For all values of the template argument, create a new method - for (const Qualified& instName : tmplate.argValues()) { - const TemplateSubstitution ts(tmplate.argName(), instName, *this); - // substitute template in arguments - ArgumentList expandedArgs = argumentList.expandTemplate(ts); - // do the same for return type - ReturnValue expandedRetVal = returnValue.expandTemplate(ts); - // Now stick in new overload stack with expandedMethodName key - // but note we use the same, unexpanded methodName in overload - string expandedMethodName = methodName + instName.name(); - try { - methods_[expandedMethodName].addOverload(methodName, expandedArgs, - expandedRetVal, is_const, - instName, verbose); - } catch (const std::runtime_error& e) { - throw std::runtime_error("Class::addMethod: error adding " + name_ + - "::" + expandedMethodName + "\n" + e.what()); - } - } - } else { - try { - // just add overload - methods_[methodName].addOverload(methodName, argumentList, returnValue, - is_const, boost::none, verbose); - nontemplateMethods_[methodName].addOverload(methodName, argumentList, - returnValue, is_const, - boost::none, verbose); - } catch (const std::runtime_error& e) { - throw std::runtime_error("Class::addMethod: error adding " + name_ + - "::" + methodName + "\n" + e.what()); - } - } -} - -/* ************************************************************************* */ -void Class::erase_serialization(Methods& methods) { - Methods::iterator it = methods.find("serializable"); - if (it != methods.end()) { -#ifndef WRAP_DISABLE_SERIALIZE - isSerializable = true; -#else - // cout << "Ignoring serializable() flag in class " << name << endl; -#endif - methods.erase(it); - } - - it = methods.find("serialize"); - if (it != methods.end()) { -#ifndef WRAP_DISABLE_SERIALIZE - isSerializable = true; - hasSerialization = true; -#else - // cout << "Ignoring serialize() flag in class " << name << endl; -#endif - methods.erase(it); - } -} - -void Class::erase_serialization() { - erase_serialization(methods_); - erase_serialization(nontemplateMethods_); -} - -/* ************************************************************************* */ -void Class::verifyAll(vector& validTypes, bool& hasSerialiable) const { - - hasSerialiable |= isSerializable; - - // verify all of the function arguments - //TODO:verifyArguments(validTypes, constructor.args_list); - verifyArguments(validTypes, static_methods); - verifyArguments(validTypes, methods_); - - // verify function return types - verifyReturnTypes(validTypes, static_methods); - verifyReturnTypes(validTypes, methods_); - - // verify parents - boost::optional parent = qualifiedParent(); - if (parent - && find(validTypes.begin(), validTypes.end(), *parent) - == validTypes.end()) - throw DependencyMissing(*parent, qualifiedName("::")); -} - -/* ************************************************************************* */ -void Class::appendInheritedMethods(const Class& cls, - const vector& classes) { - - if (cls.parentClass) { - - // Find parent - for(const Class& parent: classes) { - // We found a parent class for our parent, TODO improve ! - if (parent.name() == cls.parentClass->name()) { - methods_.insert(parent.methods_.begin(), parent.methods_.end()); - appendInheritedMethods(parent, classes); - } - } - } -} - -/* ************************************************************************* */ -void Class::removeInheritedNontemplateMethods(vector& classes) { - if (!parentClass) return; - // Find parent - auto parentIt = std::find_if(classes.begin(), classes.end(), - [&](const Class& cls) { return cls.name() == parentClass->name(); }); - if (parentIt == classes.end()) return; // ignore if parent not found - Class& parent = *parentIt; - - // Only check nontemplateMethods_ - for(const string& methodName: nontemplateMethods_ | boost::adaptors::map_keys) { - // check if the method exists in its parent - // Check against parent's methods_ because all the methods of grand - // parent and grand-grand-parent, etc. are already included there - // This is to avoid looking into higher level grand parents... - auto it = parent.methods_.find(methodName); - if (it == parent.methods_.end()) continue; // if not: ignore! - - Method& parentMethod = it->second; - Method& method = nontemplateMethods_[methodName]; - // check if they have the same modifiers (const/static/templateArgs) - if (!method.isSameModifiers(parentMethod)) continue; // if not: ignore - - // check and remove duplicate overloads - auto methodOverloads = boost::combine(method.returnVals_, method.argLists_); - auto parentMethodOverloads = boost::combine(parentMethod.returnVals_, parentMethod.argLists_); - auto result = boost::remove_if( - methodOverloads, - [&](boost::tuple const& overload) { - bool found = std::find_if( - parentMethodOverloads.begin(), - parentMethodOverloads.end(), - [&](boost::tuple const& - parentOverload) { - return overload.get<0>() == parentOverload.get<0>() && - overload.get<1>().isSameSignature(parentOverload.get<1>()); - }) != parentMethodOverloads.end(); - return found; - }); - // remove all duplicate overloads - method.returnVals_.erase(boost::get<0>(result.get_iterator_tuple()), - method.returnVals_.end()); - method.argLists_.erase(boost::get<1>(result.get_iterator_tuple()), - method.argLists_.end()); - } - // [Optional] remove the entire method if it has no overload - for (auto it = nontemplateMethods_.begin(), ite = nontemplateMethods_.end(); it != ite;) - if (it->second.nrOverloads() == 0) it = nontemplateMethods_.erase(it); else ++it; -} - -/* ************************************************************************* */ -string Class::getTypedef() const { - string result; - for(Str namesp: namespaces()) { - result += ("namespace " + namesp + " { "); - } - result += ("typedef " + typedefName + " " + name() + ";"); - for (size_t i = 0; i < namespaces().size(); ++i) { - result += " }"; - } - return result; -} - -/* ************************************************************************* */ -void Class::comment_fragment(FileWriter& proxyFile) const { - proxyFile.oss << "%class " << name() << ", see Doxygen page for details\n"; - proxyFile.oss - << "%at http://research.cc.gatech.edu/borg/sites/edu.borg/html/index.html\n"; - - constructor.comment_fragment(proxyFile); - - if (!methods_.empty()) - proxyFile.oss << "%\n%-------Methods-------\n"; - for(const Methods::value_type& name_m: methods_) - name_m.second.comment_fragment(proxyFile); - - if (!static_methods.empty()) - proxyFile.oss << "%\n%-------Static Methods-------\n"; - for(const StaticMethods::value_type& name_m: static_methods) - name_m.second.comment_fragment(proxyFile); - - if (hasSerialization) { - proxyFile.oss << "%\n%-------Serialization Interface-------\n"; - proxyFile.oss << "%string_serialize() : returns string\n"; - proxyFile.oss << "%string_deserialize(string serialized) : returns " - << name() << "\n"; - } - - proxyFile.oss << "%\n"; -} - -/* ************************************************************************* */ -void Class::serialization_fragments(FileWriter& proxyFile, - FileWriter& wrapperFile, Str wrapperName, - vector& functionNames) const { - -//void Point3_string_serialize_17(int nargout, mxArray *out[], int nargin, const mxArray *in[]) -//{ -// typedef boost::shared_ptr Shared; -// checkArguments("string_serialize",nargout,nargin-1,0); -// Shared obj = unwrap_shared_ptr(in[0], "ptr_Point3"); -// ostringstream out_archive_stream; -// boost::archive::text_oarchive out_archive(out_archive_stream); -// out_archive << *obj; -// out[0] = wrap< string >(out_archive_stream.str()); -//} - - int serialize_id = functionNames.size(); - const string matlabQualName = qualifiedName("."); - const string matlabUniqueName = qualifiedName(); - const string cppClassName = qualifiedName("::"); - const string wrapFunctionNameSerialize = matlabUniqueName - + "_string_serialize_" + boost::lexical_cast(serialize_id); - functionNames.push_back(wrapFunctionNameSerialize); - - // call - //void Point3_string_serialize_17(int nargout, mxArray *out[], int nargin, const mxArray *in[]) - wrapperFile.oss << "void " << wrapFunctionNameSerialize - << "(int nargout, mxArray *out[], int nargin, const mxArray *in[])\n"; - wrapperFile.oss << "{\n"; - wrapperFile.oss << " typedef boost::shared_ptr<" << cppClassName - << "> Shared;" << endl; - - // check arguments - for serialize, no arguments - // example: checkArguments("string_serialize",nargout,nargin-1,0); - wrapperFile.oss - << " checkArguments(\"string_serialize\",nargout,nargin-1,0);\n"; - - // get class pointer - // example: Shared obj = unwrap_shared_ptr(in[0], "ptr_Point3"); - wrapperFile.oss << " Shared obj = unwrap_shared_ptr<" << cppClassName - << ">(in[0], \"ptr_" << matlabUniqueName << "\");" << endl; - - // Serialization boilerplate - wrapperFile.oss << " ostringstream out_archive_stream;\n"; - wrapperFile.oss - << " boost::archive::text_oarchive out_archive(out_archive_stream);\n"; - wrapperFile.oss << " out_archive << *obj;\n"; - wrapperFile.oss << " out[0] = wrap< string >(out_archive_stream.str());\n"; - - // finish - wrapperFile.oss << "}\n"; - - // Generate code for matlab function -// function varargout string_serialize(this, varargin) -// % STRING_SERIALIZE usage: string_serialize() : returns string -// % Doxygen can be found at http://research.cc.gatech.edu/borg/sites/edu.borg/html/index.html -// if length(varargin) == 0 -// varargout{1} = geometry_wrapper(15, this, varargin{:}); -// else -// error('Arguments do not match any overload of function Point3.string_serialize'); -// end -// end - - proxyFile.oss - << " function varargout = string_serialize(this, varargin)\n"; - proxyFile.oss - << " % STRING_SERIALIZE usage: string_serialize() : returns string\n"; - proxyFile.oss - << " % Doxygen can be found at http://research.cc.gatech.edu/borg/sites/edu.borg/html/index.html\n"; - proxyFile.oss << " if length(varargin) == 0\n"; - proxyFile.oss << " varargout{1} = " << wrapperName << "(" - << boost::lexical_cast(serialize_id) << ", this, varargin{:});\n"; - proxyFile.oss << " else\n"; - proxyFile.oss - << " error('Arguments do not match any overload of function " - << matlabQualName << ".string_serialize');\n"; - proxyFile.oss << " end\n"; - proxyFile.oss << " end\n\n"; - - // Generate code for matlab save function -// function sobj = saveobj(obj) -// % SAVEOBJ Saves the object to a matlab-readable format -// sobj = obj.string_serialize(); -// end - - proxyFile.oss << " function sobj = saveobj(obj)\n"; - proxyFile.oss - << " % SAVEOBJ Saves the object to a matlab-readable format\n"; - proxyFile.oss << " sobj = obj.string_serialize();\n"; - proxyFile.oss << " end\n"; -} - -/* ************************************************************************* */ -void Class::deserialization_fragments(FileWriter& proxyFile, - FileWriter& wrapperFile, Str wrapperName, - vector& functionNames) const { - //void Point3_string_deserialize_18(int nargout, mxArray *out[], int nargin, const mxArray *in[]) - //{ - // typedef boost::shared_ptr Shared; - // checkArguments("Point3.string_deserialize",nargout,nargin,1); - // string serialized = unwrap< string >(in[0]); - // istringstream in_archive_stream(serialized); - // boost::archive::text_iarchive in_archive(in_archive_stream); - // Shared output(new Point3(0,0,0)); - // in_archive >> *output; - // out[0] = wrap_shared_ptr(output,"Point3", false); - //} - int deserialize_id = functionNames.size(); - const string matlabQualName = qualifiedName("."); - const string matlabUniqueName = qualifiedName(); - const string cppClassName = qualifiedName("::"); - const string wrapFunctionNameDeserialize = matlabUniqueName - + "_string_deserialize_" + boost::lexical_cast(deserialize_id); - functionNames.push_back(wrapFunctionNameDeserialize); - - // call - wrapperFile.oss << "void " << wrapFunctionNameDeserialize - << "(int nargout, mxArray *out[], int nargin, const mxArray *in[])\n"; - wrapperFile.oss << "{\n"; - wrapperFile.oss << " typedef boost::shared_ptr<" << cppClassName - << "> Shared;" << endl; - - // check arguments - for deserialize, 1 string argument - wrapperFile.oss << " checkArguments(\"" << matlabUniqueName - << ".string_deserialize\",nargout,nargin,1);\n"; - - // string argument with deserialization boilerplate - wrapperFile.oss << " string serialized = unwrap< string >(in[0]);\n"; - wrapperFile.oss << " istringstream in_archive_stream(serialized);\n"; - wrapperFile.oss - << " boost::archive::text_iarchive in_archive(in_archive_stream);\n"; - wrapperFile.oss << " Shared output(new " << cppClassName << "());\n"; - wrapperFile.oss << " in_archive >> *output;\n"; - wrapperFile.oss << " out[0] = wrap_shared_ptr(output,\"" << matlabQualName - << "\", false);\n"; - wrapperFile.oss << "}\n"; - - // Generate matlab function -// function varargout = string_deserialize(varargin) -// % STRING_DESERIALIZE usage: string_deserialize() : returns Point3 -// % Doxygen can be found at http://research.cc.gatech.edu/borg/sites/edu.borg/html/index.html -// if length(varargin) == 1 -// varargout{1} = geometry_wrapper(18, varargin{:}); -// else -// error('Arguments do not match any overload of function Point3.string_deserialize'); -// end -// end - - proxyFile.oss << " function varargout = string_deserialize(varargin)\n"; - proxyFile.oss - << " % STRING_DESERIALIZE usage: string_deserialize() : returns " - << matlabQualName << "\n"; - proxyFile.oss - << " % Doxygen can be found at http://research.cc.gatech.edu/borg/sites/edu.borg/html/index.html\n"; - proxyFile.oss << " if length(varargin) == 1\n"; - proxyFile.oss << " varargout{1} = " << wrapperName << "(" - << boost::lexical_cast(deserialize_id) << ", varargin{:});\n"; - proxyFile.oss << " else\n"; - proxyFile.oss - << " error('Arguments do not match any overload of function " - << matlabQualName << ".string_deserialize');\n"; - proxyFile.oss << " end\n"; - proxyFile.oss << " end\n\n"; - - // Generate matlab load function -// function obj = loadobj(sobj) -// % LOADOBJ Saves the object to a matlab-readable format -// obj = Point3.string_deserialize(sobj); -// end - - proxyFile.oss << " function obj = loadobj(sobj)\n"; - proxyFile.oss - << " % LOADOBJ Saves the object to a matlab-readable format\n"; - proxyFile.oss << " obj = " << matlabQualName - << ".string_deserialize(sobj);\n"; - proxyFile.oss << " end" << endl; -} - -/* ************************************************************************* */ -string Class::getSerializationExport() const { - //BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsamSharedDiagonal"); - return "BOOST_CLASS_EXPORT_GUID(" + qualifiedName("::") + ", \"" - + qualifiedName() + "\");"; -} - -/* ************************************************************************* */ -void Class::python_wrapper(FileWriter& wrapperFile) const { - wrapperFile.oss << "class_<" << name() << ">(\"" << name() << "\")\n"; - constructor.python_wrapper(wrapperFile, name()); - for(const StaticMethod& m: static_methods | boost::adaptors::map_values) - m.python_wrapper(wrapperFile, name()); - for(const Method& m: methods_ | boost::adaptors::map_values) - m.python_wrapper(wrapperFile, name()); - wrapperFile.oss << ";\n\n"; -} - -/* ************************************************************************* */ -void Class::emit_cython_pxd(FileWriter& pxdFile) const { - pxdFile.oss << "cdef extern from \"" << includeFile << "\""; - string ns = qualifiedNamespaces("::"); - if (!ns.empty()) - pxdFile.oss << " namespace \"" << ns << "\""; - pxdFile.oss << ":" << endl; - pxdFile.oss << " cdef cppclass " << pxdClassName() << " \"" << qualifiedName("::") << "\""; - if (templateArgs.size()>0) { - pxdFile.oss << "["; - for(size_t i = 0; ipxdClassName() << ")"; - pxdFile.oss << ":\n"; - - constructor.emit_cython_pxd(pxdFile, *this); - if (constructor.nrOverloads()>0) pxdFile.oss << "\n"; - - for(const StaticMethod& m: static_methods | boost::adaptors::map_values) - m.emit_cython_pxd(pxdFile, *this); - if (static_methods.size()>0) pxdFile.oss << "\n"; - - for(const Method& m: nontemplateMethods_ | boost::adaptors::map_values) - m.emit_cython_pxd(pxdFile, *this); - - for(const TemplateMethod& m: templateMethods_ | boost::adaptors::map_values) - m.emit_cython_pxd(pxdFile, *this); - size_t numMethods = constructor.nrOverloads() + static_methods.size() + - methods_.size() + templateMethods_.size(); - if (numMethods == 0) - pxdFile.oss << " pass\n"; -} -/* ************************************************************************* */ -void Class::emit_cython_wrapper_pxd(FileWriter& pxdFile) const { - pxdFile.oss << "\ncdef class " << pyxClassName(); - if (getParent()) - pxdFile.oss << "(" << getParent()->pyxClassName() << ")"; - pxdFile.oss << ":\n"; - pxdFile.oss << " cdef " << shared_pxd_class_in_pyx() << " " - << shared_pxd_obj_in_pyx() << "\n"; - // cyCreateFromShared - pxdFile.oss << " @staticmethod\n"; - pxdFile.oss << " cdef " << pyxClassName() << " cyCreateFromShared(const " - << shared_pxd_class_in_pyx() << "& other)\n"; - for(const StaticMethod& m: static_methods | boost::adaptors::map_values) - m.emit_cython_wrapper_pxd(pxdFile, *this); - if (static_methods.size()>0) pxdFile.oss << "\n"; -} - -/* ************************************************************************* */ -void Class::pyxInitParentObj(FileWriter& pyxFile, const std::string& pyObj, - const std::string& cySharedObj, - const std::vector& allClasses) const { - if (parentClass) { - pyxFile.oss << pyObj << "." << parentClass->shared_pxd_obj_in_pyx() << " = " - << "<" << parentClass->shared_pxd_class_in_pyx() << ">(" - << cySharedObj << ")\n"; - // Find the parent class with name "parentClass" and point its cython obj - // to the same pointer - auto parent_it = find_if(allClasses.begin(), allClasses.end(), - [this](const Class& cls) { - return cls.pxdClassName() == - this->parentClass->pxdClassName(); - }); - if (parent_it == allClasses.end()) { - cerr << "Can't find parent class: " << parentClass->pxdClassName(); - throw std::runtime_error("Parent class not found!"); - } - parent_it->pyxInitParentObj(pyxFile, pyObj, cySharedObj, allClasses); - } -} - -/* ************************************************************************* */ -void Class::pyxDynamicCast(FileWriter& pyxFile, const Class& curLevel, - const std::vector& allClasses) const { - std::string me = this->pyxClassName(), sharedMe = this->shared_pxd_class_in_pyx(); - if (curLevel.parentClass) { - std::string parent = curLevel.parentClass->pyxClassName(), - parentObj = curLevel.parentClass->shared_pxd_obj_in_pyx(), - parentCythonClass = curLevel.parentClass->pxd_class_in_pyx(); - pyxFile.oss << "def dynamic_cast_" << me << "_" << parent << "(" << parent - << " parent):\n"; - pyxFile.oss << " try:\n"; - pyxFile.oss << " return " << me << ".cyCreateFromShared(<" << sharedMe - << ">dynamic_pointer_cast[" << pxd_class_in_pyx() << "," - << parentCythonClass << "](parent." << parentObj - << "))\n"; - pyxFile.oss << " except:\n"; - pyxFile.oss << " raise TypeError('dynamic cast failed!')\n"; - // Move up higher to one level: Find the parent class with name "parentClass" - auto parent_it = find_if(allClasses.begin(), allClasses.end(), - [&curLevel](const Class& cls) { - return cls.pxdClassName() == - curLevel.parentClass->pxdClassName(); - }); - if (parent_it == allClasses.end()) { - cerr << "Can't find parent class: " << parentClass->pxdClassName(); - throw std::runtime_error("Parent class not found!"); - } - pyxDynamicCast(pyxFile, *parent_it, allClasses); - } -} - -/* ************************************************************************* */ -void Class::emit_cython_pyx(FileWriter& pyxFile, const std::vector& allClasses) const { - pyxFile.oss << "cdef class " << pyxClassName(); - if (parentClass) pyxFile.oss << "(" << parentClass->pyxClassName() << ")"; - pyxFile.oss << ":\n"; - - // __init___ - pyxFile.oss << " def __init__(self, *args, **kwargs):\n"; - pyxFile.oss << " cdef list __params\n"; - pyxFile.oss << " self." << shared_pxd_obj_in_pyx() << " = " << shared_pxd_class_in_pyx() << "()\n"; - pyxFile.oss << " if len(args)==0 and len(kwargs)==1 and kwargs.has_key('cyCreateFromShared'):\n return\n"; - - // Constructors - constructor.emit_cython_pyx(pyxFile, *this); - pyxFile.oss << " if (self." << shared_pxd_obj_in_pyx() << ".use_count()==0):\n"; - pyxFile.oss << " raise TypeError('" << pyxClassName() - << " construction failed!')\n"; - pyxInitParentObj(pyxFile, " self", "self." + shared_pxd_obj_in_pyx(), allClasses); - pyxFile.oss << "\n"; - - // cyCreateFromShared - pyxFile.oss << " @staticmethod\n"; - pyxFile.oss << " cdef " << pyxClassName() << " cyCreateFromShared(const " - << shared_pxd_class_in_pyx() << "& other):\n" - << " if other.get() == NULL:\n" - << " raise RuntimeError('Cannot create object from a nullptr!')\n" - << " cdef " << pyxClassName() << " return_value = " << pyxClassName() << "(cyCreateFromShared=True)\n" - << " return_value." << shared_pxd_obj_in_pyx() << " = other\n"; - pyxInitParentObj(pyxFile, " return_value", "other", allClasses); - pyxFile.oss << " return return_value" << "\n\n"; - - for(const StaticMethod& m: static_methods | boost::adaptors::map_values) - m.emit_cython_pyx(pyxFile, *this); - if (static_methods.size()>0) pyxFile.oss << "\n"; - - for(const Method& m: methods_ | boost::adaptors::map_values) - m.emit_cython_pyx(pyxFile, *this); - - pyxDynamicCast(pyxFile, *this, allClasses); - - pyxFile.oss << "\n\n"; -} - -/* ************************************************************************* */ diff --git a/wrap/Class.h b/wrap/Class.h deleted file mode 100644 index 3df37fe67..000000000 --- a/wrap/Class.h +++ /dev/null @@ -1,315 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Class.h - * @brief describe the C++ class that is being wrapped - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#pragma once - -#include "spirit.h" -#include "Template.h" -#include "Constructor.h" -#include "Deconstructor.h" -#include "Method.h" -#include "StaticMethod.h" -#include "TemplateMethod.h" -#include "TypeAttributesTable.h" - -#ifdef __GNUC__ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-variable" -#endif -#include -#include -#ifdef __GNUC__ -#pragma GCC diagnostic pop -#endif - -namespace bl = boost::lambda; - -#include -#include - -#include -#include - -namespace wrap { - -/// Class has name, constructors, methods -class Class: public Qualified { - -public: - typedef const std::string& Str; - typedef std::map Methods; - typedef std::map StaticMethods; - typedef std::map TemplateMethods; - -private: - - boost::optional parentClass; ///< The *single* parent - Methods methods_; ///< Class methods, including all expanded/instantiated template methods -- to be serialized to matlab and Python classes in Cython pyx - Methods nontemplateMethods_; ///< only nontemplate methods -- to be serialized into Cython pxd - TemplateMethods templateMethods_; ///< only template methods -- to be serialized into Cython pxd - // Method& mutableMethod(Str key); - -public: - - StaticMethods static_methods; ///< Static methods - - // Then the instance variables are set directly by the Module constructor - std::vector templateArgs; ///< Template arguments - std::string typedefName; ///< The name to typedef *from*, if this class is actually a typedef, i.e. typedef [typedefName] [name] - std::vector templateInstTypeList; ///< the original typelist used to instantiate this class from a template. - ///< Empty if it's not an instantiation. Needed for template classes in Cython pxd. - boost::optional templateClass = boost::none; ///< qualified name of the original template class from which this class was instantiated. - ///< boost::none if not an instantiation. Needed for template classes in Cython pxd. - bool isVirtual; ///< Whether the class is part of a virtual inheritance chain - bool isSerializable; ///< Whether we can use boost.serialization to serialize the class - creates exports - bool hasSerialization; ///< Whether we should create the serialization functions - Constructor constructor; ///< Class constructors - Deconstructor deconstructor; ///< Deconstructor to deallocate C++ object - bool verbose_; ///< verbose flag - std::string includeFile; - - /// Constructor creates an empty class - Class(bool verbose = true) : - parentClass(boost::none), isVirtual(false), isSerializable(false), hasSerialization( - false), deconstructor(verbose), verbose_(verbose) { - } - - Class(const std::string& name, bool verbose = true) - : Qualified(name, Qualified::Category::CLASS), - parentClass(boost::none), - isVirtual(false), - isSerializable(false), - hasSerialization(false), - deconstructor(verbose), - verbose_(verbose) {} - - void assignParent(const Qualified& parent); - - boost::optional qualifiedParent() const; - boost::optional getParent() const { return parentClass; } - - size_t nrMethods() const { - return methods_.size(); - } - - const Method& method(Str key) const; - - bool exists(Str name) const { - return methods_.find(name) != methods_.end(); - } - - // And finally MATLAB code is emitted, methods below called by Module::matlab_code - void matlab_proxy(Str toolboxPath, Str wrapperName, - const TypeAttributesTable& typeAttributes, FileWriter& wrapperFile, - std::vector& functionNames) const; ///< emit proxy class - - Class expandTemplate(const TemplateSubstitution& ts) const; - - std::vector expandTemplate(Str templateArg, - const std::vector& instantiations) const; - - // Create new classes with integer template arguments - std::vector expandTemplate(Str templateArg, - const std::vector& integers) const; - - /// Add potentially overloaded, potentially templated method - void addMethod(bool verbose, bool is_const, Str methodName, - const ArgumentList& argumentList, const ReturnValue& returnValue, - const Template& tmplate); - - /// Post-process classes for serialization markers - void erase_serialization(); // non-const ! - void erase_serialization(Methods& methods); // non-const ! - - /// verify all of the function arguments - void verifyAll(std::vector& functionNames, - bool& hasSerialiable) const; - - void appendInheritedMethods(const Class& cls, - const std::vector& classes); - - void removeInheritedNontemplateMethods(std::vector& classes); - - /// The typedef line for this class, if this class is a typedef, otherwise returns an empty string. - std::string getTypedef() const; - - /// Returns the string for an export flag - std::string getSerializationExport() const; - - /// Creates a member function that performs serialization - void serialization_fragments(FileWriter& proxyFile, FileWriter& wrapperFile, - Str wrapperName, std::vector& functionNames) const; - - /// Creates a static member function that performs deserialization - void deserialization_fragments(FileWriter& proxyFile, FileWriter& wrapperFile, - Str wrapperName, std::vector& functionNames) const; - - // emit python wrapper - void python_wrapper(FileWriter& wrapperFile) const; - - // emit cython wrapper - void emit_cython_pxd(FileWriter& pxdFile) const; - void emit_cython_wrapper_pxd(FileWriter& pxdFile) const; - void emit_cython_pyx(FileWriter& pyxFile, - const std::vector& allClasses) const; - void pyxInitParentObj(FileWriter& pyxFile, const std::string& pyObj, - const std::string& cySharedObj, - const std::vector& allClasses) const; - void pyxDynamicCast(FileWriter& pyxFile, const Class& curLevel, - const std::vector& allClasses) const; - - friend std::ostream& operator<<(std::ostream& os, const Class& cls) { - os << "class " << cls.name() << "{\n"; - os << cls.constructor << ";\n"; - for(const StaticMethod& m: cls.static_methods | boost::adaptors::map_values) - os << m << ";\n"; - for(const Method& m: cls.methods_ | boost::adaptors::map_values) - os << m << ";\n"; - os << "};" << std::endl; - return os; - } - -private: - - void pointer_constructor_fragments(FileWriter& proxyFile, - FileWriter& wrapperFile, Str wrapperName, - std::vector& functionNames) const; - - void comment_fragment(FileWriter& proxyFile) const; -}; - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct ClassGrammar: public classic::grammar { - - Class& cls_; ///< successful parse will be placed in here - Template& template_; ///< result needs to be visible outside - - /// Construct type grammar and specify where result is placed - ClassGrammar(Class& cls, Template& t) : - cls_(cls), template_(t) { - } - - /// Definition of type grammar - template - struct definition: BasicRules { - - using BasicRules::name_p; - using BasicRules::className_p; - using BasicRules::comments_p; - - // NOTE: allows for pointers to all types - ArgumentList args; - ArgumentListGrammar argumentList_g; - - Constructor constructor0, constructor; - - ReturnValue retVal0, retVal; - ReturnValueGrammar returnValue_g; - - Template methodTemplate; - TemplateGrammar methodTemplate_g, classTemplate_g; - - std::string methodName; - bool isConst, T, F; - - // Parent class - Qualified possibleParent; - TypeGrammar classParent_g; - - classic::rule constructor_p, methodName_p, method_p, - staticMethodName_p, static_method_p, templateList_p, classParent_p, - functions_p, class_p; - - definition(ClassGrammar const& self) : - argumentList_g(args), returnValue_g(retVal), // - methodTemplate_g(methodTemplate), classTemplate_g(self.template_), // - T(true), F(false), classParent_g(possibleParent) { - - using namespace classic; - bool verbose = false; // TODO - - // ConstructorGrammar - constructor_p = (className_p >> argumentList_g >> ';' >> !comments_p) // - [bl::bind(&Constructor::push_back, bl::var(constructor), - bl::var(args))] // - [clear_a(args)]; - - // MethodGrammar - methodName_p = lexeme_d[(upper_p | lower_p) >> *(alnum_p | '_')]; - - // gtsam::Values retract(const gtsam::VectorValues& delta) const; - method_p = !methodTemplate_g - >> (returnValue_g >> methodName_p[assign_a(methodName)] - >> argumentList_g >> !str_p("const")[assign_a(isConst, T)] >> ';' - >> *comments_p) // - [bl::bind(&Class::addMethod, bl::var(self.cls_), verbose, - bl::var(isConst), bl::var(methodName), bl::var(args), - bl::var(retVal), bl::var(methodTemplate))] // - [assign_a(retVal, retVal0)][clear_a(args)] // - [clear_a(methodTemplate)][assign_a(isConst, F)]; - - // StaticMethodGrammar - staticMethodName_p = lexeme_d[(upper_p | lower_p) >> *(alnum_p | '_')]; - - static_method_p = (str_p("static") >> returnValue_g - >> staticMethodName_p[assign_a(methodName)] >> argumentList_g >> ';' - >> *comments_p) // - [bl::bind(&StaticMethod::addOverload, - bl::var(self.cls_.static_methods)[bl::var(methodName)], - bl::var(methodName), bl::var(args), bl::var(retVal), boost::none, - verbose)] // - [assign_a(retVal, retVal0)][clear_a(args)]; - - // template - templateList_p = (str_p("template") >> '<' - >> name_p[push_back_a(self.cls_.templateArgs)] - >> *(',' >> name_p[push_back_a(self.cls_.templateArgs)]) >> '>'); - - // parse a full class - classParent_p = (':' >> classParent_g >> '{') // - [bl::bind(&Class::assignParent, bl::var(self.cls_), - bl::var(possibleParent))][clear_a(possibleParent)]; - - functions_p = constructor_p | method_p | static_method_p; - - // parse a full class - class_p = (!(classTemplate_g[push_back_a(self.cls_.templateArgs, - self.template_.argName())] | templateList_p) - >> !(str_p("virtual")[assign_a(self.cls_.isVirtual, T)]) - >> str_p("class") >> className_p[assign_a(self.cls_.name_)] - >> (classParent_p | '{') >> // - *(functions_p | comments_p) >> str_p("};")) // - [bl::bind(&Constructor::initializeOrCheck, bl::var(constructor), - bl::var(self.cls_.name_), boost::none, verbose)][assign_a( - self.cls_.constructor, constructor)] // - [assign_a(self.cls_.deconstructor.name, self.cls_.name_)] // - [assign_a(constructor, constructor0)]; - } - - classic::rule const& start() const { - return class_p; - } - - }; -}; -// ClassGrammar - -}// \namespace wrap - diff --git a/wrap/Constructor.cpp b/wrap/Constructor.cpp deleted file mode 100644 index 74719b289..000000000 --- a/wrap/Constructor.cpp +++ /dev/null @@ -1,160 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Constructor.ccp - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#include -#include -#include - -#include - -#include "utilities.h" -#include "Constructor.h" -#include "Class.h" - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -string Constructor::matlab_wrapper_name(Str className) const { - string str = "new_" + className; - return str; -} - -/* ************************************************************************* */ -void Constructor::proxy_fragment(FileWriter& file, - const std::string& wrapperName, bool hasParent, - const int id, const ArgumentList args) const { - size_t nrArgs = args.size(); - // check for number of arguments... - file.oss << " elseif nargin == " << nrArgs; - if (nrArgs > 0) file.oss << " && "; - // ...and their types - bool first = true; - for (size_t i = 0; i < nrArgs; i++) { - if (!first) file.oss << " && "; - file.oss << "isa(varargin{" << i + 1 << "},'" << args[i].matlabClass(".") - << "')"; - first = false; - } - // emit code for calling constructor - if (hasParent) - file.oss << "\n [ my_ptr, base_ptr ] = "; - else - file.oss << "\n my_ptr = "; - file.oss << wrapperName << "(" << id; - // emit constructor arguments - for (size_t i = 0; i < nrArgs; i++) { - file.oss << ", "; - file.oss << "varargin{" << i + 1 << "}"; - } - file.oss << ");\n"; -} - -/* ************************************************************************* */ -string Constructor::wrapper_fragment(FileWriter& file, Str cppClassName, - Str matlabUniqueName, - boost::optional cppBaseClassName, - int id, const ArgumentList& al) const { - const string wrapFunctionName = - matlabUniqueName + "_constructor_" + boost::lexical_cast(id); - - file.oss << "void " << wrapFunctionName - << "(int nargout, mxArray *out[], int nargin, const mxArray *in[])" - << endl; - file.oss << "{\n"; - file.oss << " mexAtExit(&_deleteAllObjects);\n"; - // Typedef boost::shared_ptr - file.oss << " typedef boost::shared_ptr<" << cppClassName << "> Shared;\n"; - file.oss << "\n"; - - // Check to see if there will be any arguments and remove {} for consiseness - if (al.size() > 0) al.matlab_unwrap(file); // unwrap arguments - file.oss << " Shared *self = new Shared(new " << cppClassName << "(" - << al.names() << "));" << endl; - file.oss << " collector_" << matlabUniqueName << ".insert(self);\n"; - - if (verbose_) - file.oss << " std::cout << \"constructed \" << self << std::endl;" << endl; - file.oss - << " out[0] = mxCreateNumericMatrix(1, 1, mxUINT32OR64_CLASS, mxREAL);" - << endl; - file.oss << " *reinterpret_cast (mxGetData(out[0])) = self;" - << endl; - - // If we have a base class, return the base class pointer (MATLAB will call - // the base class collectorInsertAndMakeBase to add this to the collector and - // recurse the heirarchy) - if (cppBaseClassName) { - file.oss << "\n"; - file.oss << " typedef boost::shared_ptr<" << *cppBaseClassName - << "> SharedBase;\n"; - file.oss << " out[1] = mxCreateNumericMatrix(1, 1, mxUINT32OR64_CLASS, " - "mxREAL);\n"; - file.oss << " *reinterpret_cast(mxGetData(out[1])) = new " - "SharedBase(*self);\n"; - } - - file.oss << "}" << endl; - - return wrapFunctionName; -} - -/* ************************************************************************* */ -void Constructor::python_wrapper(FileWriter& wrapperFile, Str className) const { - wrapperFile.oss << " .def(\"" << name_ << "\", &" << className - << "::" << name_ << ");\n"; -} - -/* ************************************************************************* */ -bool Constructor::hasDefaultConstructor() const { - for (size_t i = 0; i < nrOverloads(); i++) { - if (argumentList(i).size() == 0) return true; - } - return false; -} - -/* ************************************************************************* */ -void Constructor::emit_cython_pxd(FileWriter& pxdFile, const Class& cls) const { - for (size_t i = 0; i < nrOverloads(); i++) { - ArgumentList args = argumentList(i); - - // generate the constructor - pxdFile.oss << " " << cls.pxdClassName() << "("; - args.emit_cython_pxd(pxdFile, cls.pxdClassName(), cls.templateArgs); - pxdFile.oss << ") " << "except +\n"; - } -} - -/* ************************************************************************* */ -void Constructor::emit_cython_pyx(FileWriter& pyxFile, const Class& cls) const { - for (size_t i = 0; i < nrOverloads(); i++) { - ArgumentList args = argumentList(i); - pyxFile.oss << " try:\n"; - pyxFile.oss << pyx_resolveOverloadParams(args, true, 3); - pyxFile.oss - << argumentList(i).pyx_convertEigenTypeAndStorageOrder(" "); - - pyxFile.oss << " self." << cls.shared_pxd_obj_in_pyx() << " = " - << cls.shared_pxd_class_in_pyx() << "(new " << cls.pxd_class_in_pyx() - << "(" << args.pyx_asParams() << "))\n"; - pyxFile.oss << " except (AssertionError, ValueError):\n"; - pyxFile.oss << " pass\n"; - } -} - -/* ************************************************************************* */ diff --git a/wrap/Constructor.h b/wrap/Constructor.h deleted file mode 100644 index 172cd24a4..000000000 --- a/wrap/Constructor.h +++ /dev/null @@ -1,99 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Constructor.h - * @brief class describing a constructor + code generation - * @author Frank Dellaert - * @author Richard Roberts - **/ - -#pragma once - -#include "OverloadedFunction.h" -#include -#include -#include - -namespace wrap { - -// Forward declaration -class Class; - -// Constructor class -struct Constructor: public OverloadedFunction { - - typedef const std::string& Str; - - /// Constructor creates an empty class - Constructor(bool verbose = false) { - verbose_ = verbose; - } - - Constructor expandTemplate(const TemplateSubstitution& ts) const { - Constructor inst = *this; - inst.argLists_ = expandArgumentListsTemplate(ts); - inst.name_ = ts.expandedClassName(); - return inst; - } - - /// return true if the default constructor exists - bool hasDefaultConstructor() const; - - // MATLAB code generation - // toolboxPath is main toolbox directory, e.g., ../matlab - // classFile is class proxy file, e.g., ../matlab/@Point2/Point2.m - - /// wrapper name - std::string matlab_wrapper_name(Str className) const; - - void comment_fragment(FileWriter& proxyFile) const { - if (nrOverloads() > 0) - proxyFile.oss << "%\n%-------Constructors-------\n"; - for (size_t i = 0; i < nrOverloads(); i++) { - proxyFile.oss << "%"; - argumentList(i).emit_prototype(proxyFile, name_); - proxyFile.oss << "\n"; - } - } - - /** - * Create fragment to select constructor in proxy class, e.g., - * if nargin == 2, obj.self = new_Pose3_RP(varargin{1},varargin{2}); end - */ - void proxy_fragment(FileWriter& file, Str wrapperName, bool hasParent, - const int id, const ArgumentList args) const; - - /// cpp wrapper - std::string wrapper_fragment(FileWriter& file, Str cppClassName, - Str matlabUniqueName, boost::optional cppBaseClassName, int id, - const ArgumentList& al) const; - - /// constructor function - void generate_construct(FileWriter& file, Str cppClassName, - std::vector& args_list) const; - - // emit python wrapper - void python_wrapper(FileWriter& wrapperFile, Str className) const; - - // emit cython wrapper - void emit_cython_pxd(FileWriter& pxdFile, const Class& cls) const; - void emit_cython_pyx(FileWriter& pyxFile, const Class& cls) const; - - friend std::ostream& operator<<(std::ostream& os, const Constructor& m) { - for (size_t i = 0; i < m.nrOverloads(); i++) - os << m.name_ << m.argLists_[i]; - return os; - } - -}; - -} // \namespace wrap diff --git a/wrap/Deconstructor.cpp b/wrap/Deconstructor.cpp deleted file mode 100644 index 7bb366e3f..000000000 --- a/wrap/Deconstructor.cpp +++ /dev/null @@ -1,73 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Deconstructor.ccp - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#include -#include - -#include - -#include "utilities.h" -#include "Deconstructor.h" - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -string Deconstructor::matlab_wrapper_name(const string& className) const { - string str = "delete_" + className; - return str; -} - -/* ************************************************************************* */ -void Deconstructor::proxy_fragment(FileWriter& file, - const std::string& wrapperName, - const std::string& matlabUniqueName, int id) const { - - file.oss << " function delete(obj)\n"; - file.oss << " " << wrapperName << "(" << id << ", obj.ptr_" << matlabUniqueName << ");\n"; - file.oss << " end\n"; -} - -/* ************************************************************************* */ -string Deconstructor::wrapper_fragment(FileWriter& file, - const string& cppClassName, - const string& matlabUniqueName, - int id) const { - - const string matlabName = matlab_wrapper_name(matlabUniqueName); - - const string wrapFunctionName = matlabUniqueName + "_deconstructor_" + boost::lexical_cast(id); - - file.oss << "void " << wrapFunctionName << "(int nargout, mxArray *out[], int nargin, const mxArray *in[])" << endl; - file.oss << "{" << endl; - file.oss << " typedef boost::shared_ptr<" << cppClassName << "> Shared;" << endl; - //Deconstructor takes 1 arg, the mxArray obj - file.oss << " checkArguments(\"" << matlabName << "\",nargout,nargin," << "1" << ");" << endl; - file.oss << " Shared *self = *reinterpret_cast(mxGetData(in[0]));\n"; - file.oss << " Collector_" << matlabUniqueName << "::iterator item;\n"; - file.oss << " item = collector_" << matlabUniqueName << ".find(self);\n"; - file.oss << " if(item != collector_" << matlabUniqueName << ".end()) {\n"; - file.oss << " delete self;\n"; - file.oss << " collector_" << matlabUniqueName << ".erase(item);\n"; - file.oss << " }\n"; - file.oss << "}" << endl; - - return wrapFunctionName; -} - -/* ************************************************************************* */ diff --git a/wrap/Deconstructor.h b/wrap/Deconstructor.h deleted file mode 100644 index ee2f4ea19..000000000 --- a/wrap/Deconstructor.h +++ /dev/null @@ -1,61 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Deconstructor.h - * @brief class describing a constructor + code generation - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#pragma once - -#include -#include - -#include "Argument.h" - -namespace wrap { - -// Deconstructor class -struct Deconstructor { - - /// Deconstructor creates an empty class - Deconstructor(bool verbose = true) : - verbose_(verbose) { - } - - // Then the instance variables are set directly by the Module deconstructor - std::string name; - bool verbose_; - - // MATLAB code generation - // toolboxPath is main toolbox directory, e.g., ../matlab - // classFile is class proxy file, e.g., ../matlab/@Point2/Point2.m - - /// wrapper name - std::string matlab_wrapper_name(const std::string& className) const; - - /// m-file - void proxy_fragment(FileWriter& file, - const std::string& wrapperName, - const std::string& matlabUniqueName, int id) const; - - /// cpp wrapper - std::string wrapper_fragment(FileWriter& file, - const std::string& cppClassName, - const std::string& matlabUniqueName, - int id) const; -}; - -} // \namespace wrap - diff --git a/wrap/FileWriter.cpp b/wrap/FileWriter.cpp deleted file mode 100644 index c07de0eb0..000000000 --- a/wrap/FileWriter.cpp +++ /dev/null @@ -1,52 +0,0 @@ -/** - * @file FileWriter.cpp - * - * @date Jan 15, 2012 - * @author Alex Cunningham - */ - -#include "FileWriter.h" -#include "utilities.h" - -#include -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -FileWriter::FileWriter(const string& filename, bool verbose, - const string& comment_str) : - verbose_(verbose), filename_(filename), comment_str_(comment_str) { -} - -/* ************************************************************************* */ -void FileWriter::emit(bool add_header, bool force_overwrite) const { - if (verbose_) - cerr << "generating " << filename_ << " "; - // read in file if it exists - string existing_contents; - bool file_exists = true; - try { - existing_contents = file_contents(filename_.c_str(), add_header); - } catch (const CantOpenFile& ) { - file_exists = false; - } - - // Only write a file if it is new, an update, or overwrite is forced - string new_contents = oss.str(); - if (force_overwrite || !file_exists || existing_contents != new_contents) { - // Binary to use LF line endings instead of CRLF - ofstream ofs(filename_.c_str(), ios::binary); - if (!ofs) - throw CantOpenFile(filename_); - - // dump in stringstream - ofs << new_contents; - ofs.close(); - } - if (verbose_) - cerr << " ...no update" << endl; -} -/* ************************************************************************* */ - diff --git a/wrap/FileWriter.h b/wrap/FileWriter.h deleted file mode 100644 index 12e033fdf..000000000 --- a/wrap/FileWriter.h +++ /dev/null @@ -1,35 +0,0 @@ -/** - * @file FileWriter.h - * - * @brief Wrapper for writing files and avoiding overwriting existing files - * This class wraps a stream object and will check that the file is - * actually different to write the new generated file. - * - * @date Jan 15, 2012 - * @author Alex Cunningham - */ - -#pragma once - -#include - -namespace wrap { - -class FileWriter { -protected: - bool verbose_; - std::string filename_; - std::string comment_str_; - -public: - std::ostringstream oss; ///< Primary stream for operating on the file - - /** Create a writer with a filename and delimiter for the header comment */ - FileWriter(const std::string& filename, bool verbose, const std::string& comment_str); - - /** Writes the contents of the stringstream to the file, checking if actually new */ - void emit(bool add_header, bool force=false) const; - -}; - -} // \namespace wrap diff --git a/wrap/ForwardDeclaration.h b/wrap/ForwardDeclaration.h deleted file mode 100644 index 190387ecc..000000000 --- a/wrap/ForwardDeclaration.h +++ /dev/null @@ -1,36 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Class.h - * @brief describe the C++ class that is being wrapped - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#pragma once - -#include - -namespace wrap { - - class Class; - - struct ForwardDeclaration { - Class cls; - bool isVirtual; - ForwardDeclaration() : isVirtual(false) {} - explicit ForwardDeclaration(const std::string& s) : cls(s), isVirtual(false) {} - std::string name() const { return cls.qualifiedName("::"); } - }; - -} diff --git a/wrap/FullyOverloadedFunction.cpp b/wrap/FullyOverloadedFunction.cpp deleted file mode 100644 index 4db4c8713..000000000 --- a/wrap/FullyOverloadedFunction.cpp +++ /dev/null @@ -1,34 +0,0 @@ -#include "FullyOverloadedFunction.h" - -using namespace std; - -namespace wrap { -const std::array FullyOverloadedFunction::pythonKeywords{ - {"print", "lambda"}}; - -/* ************************************************************************* */ -std::string FullyOverloadedFunction::pyx_functionCall( - const std::string& caller, - const std::string& funcName, size_t iOverload) const { - - string ret; - if (!returnVals_[iOverload].isPair && !returnVals_[iOverload].type1.isPtr && - returnVals_[iOverload].type1.isNonBasicType()) { - ret = returnVals_[iOverload].type1.make_shared_pxd_class_in_pyx() + "("; - } - - // actual function call ... - if (!caller.empty()) ret += caller + "."; - ret += funcName; - if (templateArgValue_) ret += "[" + templateArgValue_->pxd_class_in_pyx() + "]"; - //... with argument list - ret += "(" + argumentList(iOverload).pyx_asParams() + ")"; - - if (!returnVals_[iOverload].isPair && !returnVals_[iOverload].type1.isPtr && - returnVals_[iOverload].type1.isNonBasicType()) - ret += ")"; - - return ret; -} - -} diff --git a/wrap/FullyOverloadedFunction.h b/wrap/FullyOverloadedFunction.h deleted file mode 100644 index 6b40f6a70..000000000 --- a/wrap/FullyOverloadedFunction.h +++ /dev/null @@ -1,147 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file FullyOverloadedFunction.h - * @brief Function that can be fully overloaded: arguments and return values - * @author Frank Dellaert - * @date Nov 13, 2014 - **/ - -#pragma once - -#include "OverloadedFunction.h" -#include - -namespace wrap { - -/** - * Signature Overload (including return value) - */ -class SignatureOverloads: public ArgumentOverloads { - -public: - - std::vector returnVals_; - -public: - - const ReturnValue& returnValue(size_t i) const { - return returnVals_.at(i); - } - - void push_back(const ArgumentList& args, const ReturnValue& retVal) { - argLists_.push_back(args); - returnVals_.push_back(retVal); - } - - void verifyReturnTypes(const std::vector& validtypes, - const std::string& s) const { - for(const ReturnValue& retval: returnVals_) { - retval.type1.verify(validtypes, s); - if (retval.isPair) - retval.type2.verify(validtypes, s); - } - } - - // TODO use transform ? - std::vector expandReturnValuesTemplate( - const TemplateSubstitution& ts) const { - std::vector result; - for(const ReturnValue& retVal: returnVals_) { - ReturnValue instRetVal = retVal.expandTemplate(ts); - result.push_back(instRetVal); - } - return result; - } - - /// Expand templates, imperative ! - void expandTemplate(const TemplateSubstitution& ts) { - // substitute template in arguments - argLists_ = expandArgumentListsTemplate(ts); - // do the same for return types - returnVals_ = expandReturnValuesTemplate(ts); - } - - // emit a list of comments, one for each overload - void usage_fragment(FileWriter& proxyFile, const std::string& name) const { - unsigned int argLCount = 0; - for(ArgumentList argList: argLists_) { - argList.emit_prototype(proxyFile, name); - if (argLCount != nrOverloads() - 1) - proxyFile.oss << ", "; - else - proxyFile.oss << " : returns " << returnValue(0).returnType() - << std::endl; - argLCount++; - } - } - - // emit a list of comments, one for each overload - void comment_fragment(FileWriter& proxyFile, const std::string& name) const { - size_t i = 0; - for(ArgumentList argList: argLists_) { - proxyFile.oss << "%"; - argList.emit_prototype(proxyFile, name); - proxyFile.oss << " : returns " << returnVals_[i++].returnType() - << std::endl; - } - } - - friend std::ostream& operator<<(std::ostream& os, - const SignatureOverloads& overloads) { - for (size_t i = 0; i < overloads.nrOverloads(); i++) - os << overloads.returnVals_[i] << overloads.argLists_[i] << std::endl; - return os; - } - -}; - -class FullyOverloadedFunction: public Function, public SignatureOverloads { - -public: - - bool addOverload(const std::string& name, const ArgumentList& args, - const ReturnValue& retVal, boost::optional instName = - boost::none, bool verbose = false) { - bool first = initializeOrCheck(name, instName, verbose); - SignatureOverloads::push_back(args, retVal); - return first; - } - - // emit cython pyx function call - std::string pyx_functionCall(const std::string& caller, const std::string& funcName, - size_t iOverload) const; - - /// Cython: Rename functions which names are python keywords - static const std::array pythonKeywords; - static std::string pyRename(const std::string& name) { - if (std::find(pythonKeywords.begin(), pythonKeywords.end(), name) == - pythonKeywords.end()) - return name; - else - return name + "_"; - } -}; - -// Templated checking functions -// TODO: do this via polymorphism, use transform ? - -template -inline void verifyReturnTypes(const std::vector& validTypes, - const std::map& vt) { - typedef typename std::map::value_type NamedMethod; - for(const NamedMethod& namedMethod: vt) - namedMethod.second.verifyReturnTypes(validTypes); -} - -} // \namespace wrap - diff --git a/wrap/Function.cpp b/wrap/Function.cpp deleted file mode 100644 index 80b0adbbe..000000000 --- a/wrap/Function.cpp +++ /dev/null @@ -1,78 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Function.ccp - * @author Frank Dellaert - * @date Nov 13, 2014 - **/ - -#include "Function.h" -#include "utilities.h" - -#include -#include - -#include -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -bool Function::initializeOrCheck(const string& name, - boost::optional instName, bool verbose) { - - if (name.empty()) - throw runtime_error("Function::initializeOrCheck called with empty name"); - - // Check if this overload is give to the correct method - if (name_.empty()) { - name_ = name; - templateArgValue_ = instName; - verbose_ = verbose; - return true; - } else { - if (name_ != name || verbose_ != verbose - || ((bool) templateArgValue_ != (bool) instName) - || ((bool) templateArgValue_ && (bool) instName - && !(*templateArgValue_ == *instName))) - throw runtime_error( - "Function::initializeOrCheck called with different arguments"); - - return false; - } -} - -/* ************************************************************************* */ -void Function::emit_call(FileWriter& proxyFile, const ReturnValue& returnVal, - const string& wrapperName, int id) const { - returnVal.emit_matlab(proxyFile); - proxyFile.oss << wrapperName << "(" << id; - if (!isStatic()) - proxyFile.oss << ", this"; - proxyFile.oss << ", varargin{:});\n"; -} - -/* ************************************************************************* */ -void Function::emit_conditional_call(FileWriter& proxyFile, - const ReturnValue& returnVal, const ArgumentList& args, - const string& wrapperName, int id) const { - - // Check all arguments - args.proxy_check(proxyFile); - - // output call to C++ wrapper - proxyFile.oss << " "; - emit_call(proxyFile, returnVal, wrapperName, id); -} - -/* ************************************************************************* */ diff --git a/wrap/Function.h b/wrap/Function.h deleted file mode 100644 index c39b3231c..000000000 --- a/wrap/Function.h +++ /dev/null @@ -1,73 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Function.h - * @brief Base class for global functions and methods - * @author Frank Dellaert - * @date Nov 13, 2014 - **/ - -#pragma once - -#include "Argument.h" -#include - -namespace wrap { - -/// Function class -class Function { - -protected: - - std::string name_; ///< name of method - boost::optional templateArgValue_; ///< value of template argument if applicable - bool verbose_; - -public: - - /** - * @brief first time, fill in instance variables, otherwise check if same - * @return true if first time, false thereafter - */ - bool initializeOrCheck(const std::string& name, - boost::optional instName = boost::none, bool verbose = - false); - - std::string name() const { - return name_; - } - - /// Only Methods are non-static - virtual bool isStatic() const { - return true; - } - - std::string matlabName() const { - if (templateArgValue_) - return name_ + templateArgValue_->name(); - else - return name_; - } - - /// Emit function call to MATLAB (no argument checking) - void emit_call(FileWriter& proxyFile, const ReturnValue& returnVal, - const std::string& wrapperName, int id) const; - - /// Emit checking arguments and function call to MATLAB - void emit_conditional_call(FileWriter& proxyFile, - const ReturnValue& returnVal, const ArgumentList& args, - const std::string& wrapperName, int id) const; - -}; - -} // \namespace wrap - diff --git a/wrap/GlobalFunction.cpp b/wrap/GlobalFunction.cpp deleted file mode 100644 index 02ab19657..000000000 --- a/wrap/GlobalFunction.cpp +++ /dev/null @@ -1,227 +0,0 @@ -/** - * @file GlobalFunction.cpp - * - * @date Jul 22, 2012 - * @author Alex Cunningham - */ - -#include "GlobalFunction.h" -#include "Class.h" -#include "utilities.h" - -#include - -namespace wrap { - -using namespace std; - -/* ************************************************************************* */ -void GlobalFunction::addOverload(const Qualified& overload, - const ArgumentList& args, const ReturnValue& retVal, const std::string& _includeFile, - boost::optional instName, bool verbose) { - FullyOverloadedFunction::addOverload(overload.name(), args, retVal, instName, - verbose); - overloads.push_back(overload); - includeFile = _includeFile; -} - -/* ************************************************************************* */ -void GlobalFunction::matlab_proxy(const string& toolboxPath, - const string& wrapperName, const TypeAttributesTable& typeAttributes, - FileWriter& file, vector& functionNames) const { - - // cluster overloads with same namespace - // create new GlobalFunction structures around namespaces - same namespaces and names are overloads - // map of namespace to global function - typedef map GlobalFunctionMap; - GlobalFunctionMap grouped_functions; - for (size_t i = 0; i < overloads.size(); ++i) { - Qualified overload = overloads.at(i); - // use concatenated namespaces as key - string str_ns = qualifiedName("", overload.namespaces()); - const ReturnValue& ret = returnValue(i); - const ArgumentList& args = argumentList(i); - grouped_functions[str_ns].addOverload(overload, args, ret); - } - - size_t lastcheck = grouped_functions.size(); - for(const GlobalFunctionMap::value_type& p: grouped_functions) { - p.second.generateSingleFunction(toolboxPath, wrapperName, typeAttributes, - file, functionNames); - if (--lastcheck != 0) - file.oss << endl; - } -} - -/* ************************************************************************* */ -void GlobalFunction::generateSingleFunction(const string& toolboxPath, - const string& wrapperName, const TypeAttributesTable& typeAttributes, - FileWriter& file, vector& functionNames) const { - - // create the folder for the namespace - const Qualified& overload1 = overloads.front(); - createNamespaceStructure(overload1.namespaces(), toolboxPath); - - // open destination mfunctionFileName - string mfunctionFileName = overload1.matlabName(toolboxPath); - FileWriter mfunctionFile(mfunctionFileName, verbose_, "%"); - - // get the name of actual matlab object - const string matlabQualName = overload1.qualifiedName("."); - const string matlabUniqueName = overload1.qualifiedName(""); - const string cppName = overload1.qualifiedName("::"); - - mfunctionFile.oss << "function varargout = " << name_ << "(varargin)\n"; - - for (size_t i = 0; i < nrOverloads(); ++i) { - const ArgumentList& args = argumentList(i); - const ReturnValue& returnVal = returnValue(i); - - const int id = functionNames.size(); - - // Output proxy matlab code - mfunctionFile.oss << " " << (i == 0 ? "" : "else"); - emit_conditional_call(mfunctionFile, returnVal, args, wrapperName, id); - - // Output C++ wrapper code - - const string wrapFunctionName = matlabUniqueName + "_" - + boost::lexical_cast(id); - - // call - file.oss << "void " << wrapFunctionName - << "(int nargout, mxArray *out[], int nargin, const mxArray *in[])\n"; - // start - file.oss << "{\n"; - - // check arguments - // NOTE: for static functions, there is no object passed - file.oss << " checkArguments(\"" << matlabUniqueName - << "\",nargout,nargin," << args.size() << ");\n"; - - // unwrap arguments, see Argument.cpp - args.matlab_unwrap(file, 0); // We start at 0 because there is no self object - - // call method with default type and wrap result - if (returnVal.type1.name() != "void") - returnVal.wrap_result(cppName + "(" + args.names() + ")", file, - typeAttributes); - else - file.oss << cppName + "(" + args.names() + ");\n"; - - // finish - file.oss << "}\n"; - - // Add to function list - functionNames.push_back(wrapFunctionName); - } - - mfunctionFile.oss << " else\n"; - mfunctionFile.oss - << " error('Arguments do not match any overload of function " - << matlabQualName << "');" << endl; - mfunctionFile.oss << " end" << endl; - - // Close file - mfunctionFile.emit(true); -} - -/* ************************************************************************* */ -void GlobalFunction::python_wrapper(FileWriter& wrapperFile) const { - wrapperFile.oss << "def(\"" << name_ << "\", " << name_ << ");\n"; -} - -/* ************************************************************************* */ -void GlobalFunction::emit_cython_pxd(FileWriter& file) const { - file.oss << "cdef extern from \"" << includeFile << "\" namespace \"" - << overloads[0].qualifiedNamespaces("::") - << "\":" << endl; - for (size_t i = 0; i < nrOverloads(); ++i) { - file.oss << " "; - returnVals_[i].emit_cython_pxd(file, "", vector()); - file.oss << pxdName() + " \"" + overloads[0].qualifiedName("::") + - "\"("; - argumentList(i).emit_cython_pxd(file, "", vector()); - file.oss << ")"; - file.oss << " except +"; - file.oss << "\n"; - } -} - -/* ************************************************************************* */ -void GlobalFunction::emit_cython_pyx_no_overload(FileWriter& file) const { - string funcName = pyxName(); - - // Function definition - file.oss << "def " << funcName; - - // modify name of function instantiation as python doesn't allow overloads - // e.g. template funcName(...) --> funcNameA, funcNameB, funcNameC - if (templateArgValue_) file.oss << templateArgValue_->pyxClassName(); - - // funtion arguments - file.oss << "("; - argumentList(0).emit_cython_pyx(file); - file.oss << "):\n"; - - /// Call cython corresponding function and return - file.oss << argumentList(0).pyx_convertEigenTypeAndStorageOrder(" "); - string ret = pyx_functionCall("", pxdName(), 0); - if (!returnVals_[0].isVoid()) { - file.oss << " cdef " << returnVals_[0].pyx_returnType() - << " ret = " << ret << "\n"; - file.oss << " return " << returnVals_[0].pyx_casting("ret") << "\n"; - } else { - file.oss << " " << ret << "\n"; - } -} - -/* ************************************************************************* */ -void GlobalFunction::emit_cython_pyx(FileWriter& file) const { - string funcName = pyxName(); - - size_t N = nrOverloads(); - if (N == 1) { - emit_cython_pyx_no_overload(file); - return; - } - - // Dealing with overloads.. - file.oss << "def " << funcName << "(*args, **kwargs):\n"; - for (size_t i = 0; i < N; ++i) { - file.oss << " success, results = " << funcName << "_" << i - << "(args, kwargs)\n"; - file.oss << " if success:\n return results\n"; - } - file.oss << " raise TypeError('Could not find the correct overload')\n"; - - for (size_t i = 0; i < N; ++i) { - ArgumentList args = argumentList(i); - file.oss << "def " + funcName + "_" + to_string(i) + "(args, kwargs):\n"; - file.oss << " cdef list __params\n"; - if (!returnVals_[i].isVoid()) { - file.oss << " cdef " << returnVals_[i].pyx_returnType() << " return_value\n"; - } - file.oss << " try:\n"; - file.oss << pyx_resolveOverloadParams(args, false, 2); // lazy: always return None even if it's a void function - - /// Call corresponding cython function - file.oss << argumentList(i).pyx_convertEigenTypeAndStorageOrder(" "); - // catch exception which indicates the parameters passed are incorrect. - file.oss << " except:\n"; - file.oss << " return False, None\n\n"; - - string call = pyx_functionCall("", pxdName(), i); - if (!returnVals_[i].isVoid()) { - file.oss << " return_value = " << call << "\n"; - file.oss << " return True, " << returnVals_[i].pyx_casting("return_value") << "\n"; - } else { - file.oss << " " << call << "\n"; - file.oss << " return True, None\n"; - } - } -} -/* ************************************************************************* */ - -} // \namespace wrap - diff --git a/wrap/GlobalFunction.h b/wrap/GlobalFunction.h deleted file mode 100644 index 099cefa70..000000000 --- a/wrap/GlobalFunction.h +++ /dev/null @@ -1,148 +0,0 @@ -/** - * @file GlobalFunction.h - * - * @brief Implements codegen for a global function wrapped in matlab - * - * @date Jul 22, 2012 - * @author Alex Cunningham - */ - -#pragma once - -#include "FullyOverloadedFunction.h" - -#ifdef __GNUC__ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-variable" -#endif -#include -#include -#ifdef __GNUC__ -#pragma GCC diagnostic pop -#endif - -namespace bl = boost::lambda; - -namespace wrap { - -struct GlobalFunction: public FullyOverloadedFunction { - - std::vector overloads; ///< Stack of qualified names - std::string includeFile; - - // adds an overloaded version of this function, - void addOverload(const Qualified& overload, const ArgumentList& args, - const ReturnValue& retVal, const std::string& _includeFile = "", boost::optional instName = - boost::none, bool verbose = false); - - void verifyArguments(const std::vector& validArgs) const { - SignatureOverloads::verifyArguments(validArgs, name_); - } - - void verifyReturnTypes(const std::vector& validtypes) const { - SignatureOverloads::verifyReturnTypes(validtypes, name_); - } - - // codegen function called from Module to build the cpp and matlab versions of the function - void matlab_proxy(const std::string& toolboxPath, - const std::string& wrapperName, const TypeAttributesTable& typeAttributes, - FileWriter& file, std::vector& functionNames) const; - - // emit python wrapper - void python_wrapper(FileWriter& wrapperFile) const; - - // function name in Cython pxd - std::string pxdName() const { return "pxd_" + pyRename(name_); } - // function name in Python pyx - std::string pyxName() const { - std::string result = ""; - for(size_t i=0; i= 1) { - result += (overloads[0].namespaces_[i] + "_"); - } - } - result += pyRename(name_); - return result; - } - - // emit cython wrapper - void emit_cython_pxd(FileWriter& pxdFile) const; - void emit_cython_pyx(FileWriter& pyxFile) const; - void emit_cython_pyx_no_overload(FileWriter& pyxFile) const; - -private: - - // Creates a single global function - all in same namespace - void generateSingleFunction(const std::string& toolboxPath, - const std::string& wrapperName, const TypeAttributesTable& typeAttributes, - FileWriter& file, std::vector& functionNames) const; - -}; - -typedef std::map GlobalFunctions; - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct GlobalFunctionGrammar: public classic::grammar { - - GlobalFunctions& global_functions_; ///< successful parse will be placed in here - std::vector& namespaces_; - std::string& includeFile; - - /// Construct type grammar and specify where result is placed - GlobalFunctionGrammar(GlobalFunctions& global_functions, - std::vector& namespaces, - std::string& includeFile) - : global_functions_(global_functions), - namespaces_(namespaces), - includeFile(includeFile) {} - - /// Definition of type grammar - template - struct definition: BasicRules { - -// using BasicRules::name_p; -// using BasicRules::className_p; - using BasicRules::comments_p; - - ArgumentList args; - ArgumentListGrammar argumentList_g; - - ReturnValue retVal0, retVal; - ReturnValueGrammar returnValue_g; - - Qualified globalFunction; - - classic::rule globalFunctionName_p, global_function_p; - - definition(GlobalFunctionGrammar const& self) : - argumentList_g(args), returnValue_g(retVal) { - - using namespace classic; - bool verbose = false; // TODO - - globalFunctionName_p = lexeme_d[(upper_p | lower_p) >> *(alnum_p | '_')]; - - // parse a global function - global_function_p = (returnValue_g >> globalFunctionName_p[assign_a( - globalFunction.name_)] >> - argumentList_g >> ';' >> *comments_p) // - [assign_a(globalFunction.namespaces_, self.namespaces_)] // - [bl::bind( - &GlobalFunction::addOverload, - bl::var(self.global_functions_)[bl::var(globalFunction.name_)], - bl::var(globalFunction), bl::var(args), bl::var(retVal), bl::var(self.includeFile), - boost::none, verbose)] // - [assign_a(retVal, retVal0)][clear_a(globalFunction)][clear_a(args)]; - } - - classic::rule const& start() const { - return global_function_p; - } - - }; -}; -// GlobalFunctionGrammar - -}// \namespace wrap - diff --git a/wrap/Method.cpp b/wrap/Method.cpp deleted file mode 100644 index 2a4b0b3af..000000000 --- a/wrap/Method.cpp +++ /dev/null @@ -1,205 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Method.ccp - * @author Frank Dellaert - * @author Richard Roberts - **/ - -#include "Method.h" -#include "Class.h" -#include "utilities.h" - -#include -#include - -#include -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -bool Method::addOverload(Str name, const ArgumentList& args, - const ReturnValue& retVal, bool is_const, - boost::optional instName, - bool verbose) { - bool first = MethodBase::addOverload(name, args, retVal, instName, verbose); - if (first) - is_const_ = is_const; - else if (is_const && !is_const_) - throw std::runtime_error( - "Method::addOverload: " + name + - " now designated as const whereas before it was not"); - else if (!is_const && is_const_) - throw std::runtime_error( - "Method::addOverload: " + name + - " now designated as non-const whereas before it was"); - return first; -} - -/* ************************************************************************* */ -void Method::proxy_header(FileWriter& proxyFile) const { - proxyFile.oss << " function varargout = " << matlabName() - << "(this, varargin)\n"; -} - -/* ************************************************************************* */ -string Method::wrapper_call(FileWriter& wrapperFile, Str cppClassName, - Str matlabUniqueName, - const ArgumentList& args) const { - // check arguments - // extra argument obj -> nargin-1 is passed ! - // example: checkArguments("equals",nargout,nargin-1,2); - wrapperFile.oss << " checkArguments(\"" << matlabName() - << "\",nargout,nargin-1," << args.size() << ");\n"; - - // get class pointer - // example: auto obj = unwrap_shared_ptr< Test >(in[0], "Test"); - wrapperFile.oss << " auto obj = unwrap_shared_ptr<" << cppClassName - << ">(in[0], \"ptr_" << matlabUniqueName << "\");" << endl; - - // unwrap arguments, see Argument.cpp, we start at 1 as first is obj - args.matlab_unwrap(wrapperFile, 1); - - // call method and wrap result - // example: out[0]=wrap(obj->return_field(t)); - string expanded = "obj->" + name_; - if (templateArgValue_) - expanded += ("<" + templateArgValue_->qualifiedName("::") + ">"); - - return expanded; -} - -/* ************************************************************************* */ -void Method::emit_cython_pxd(FileWriter& file, const Class& cls) const { - for (size_t i = 0; i < nrOverloads(); ++i) { - file.oss << " "; - returnVals_[i].emit_cython_pxd(file, cls.pxdClassName(), cls.templateArgs); - const string renamed = pyRename(name_); - if (renamed != name_) { - file.oss << pyRename(name_) + " \"" + name_ + "\"" << "("; - } else { - file.oss << name_ << "("; - } - argumentList(i).emit_cython_pxd(file, cls.pxdClassName(), cls.templateArgs); - file.oss << ")"; - // if (is_const_) file.oss << " const"; - file.oss << " except +"; - file.oss << "\n"; - } -} - -/* ************************************************************************* */ -void Method::emit_cython_pyx_no_overload(FileWriter& file, - const Class& cls) const { - string funcName = pyRename(name_); - - // leverage python's special treatment for print - if (funcName == "print_") { - file.oss << " def __repr__(self):\n"; - file.oss << " strBuf = RedirectCout()\n"; - file.oss << " self.print_('')\n"; - file.oss << " return strBuf.str()\n"; - } - - // Function definition - file.oss << " def " << funcName; - - // modify name of function instantiation as python doesn't allow overloads - // e.g. template funcName(...) --> funcNameA, funcNameB, funcNameC - if (templateArgValue_) file.oss << templateArgValue_->pyxClassName(); - - // function arguments - file.oss << "(self"; - if (argumentList(0).size() > 0) file.oss << ", "; - argumentList(0).emit_cython_pyx(file); - file.oss << "):\n"; - - /// Call cython corresponding function and return - file.oss << argumentList(0).pyx_convertEigenTypeAndStorageOrder(" "); - string caller = "self." + cls.shared_pxd_obj_in_pyx() + ".get()"; - string ret = pyx_functionCall(caller, funcName, 0); - if (!returnVals_[0].isVoid()) { - file.oss << " cdef " << returnVals_[0].pyx_returnType() - << " ret = " << ret << "\n"; - file.oss << " return " << returnVals_[0].pyx_casting("ret") << "\n"; - } else { - file.oss << " " << ret << "\n"; - } -} - -/* ************************************************************************* */ -void Method::emit_cython_pyx(FileWriter& file, const Class& cls) const { - string funcName = pyRename(name_); - // For template function: modify name of function instantiation as python - // doesn't allow overloads - // e.g. template funcName(...) --> funcNameA, funcNameB, funcNameC - string instantiatedName = - (templateArgValue_) ? funcName + templateArgValue_->pyxClassName() : - funcName; - - size_t N = nrOverloads(); - // It's easy if there's no overload - if (N == 1) { - emit_cython_pyx_no_overload(file, cls); - return; - } - - // Dealing with overloads.. - file.oss << " def " << instantiatedName << "(self, *args, **kwargs):\n"; - file.oss << " cdef list __params\n"; - - // Define return values for all possible overloads - vector return_type; // every overload has a return type, possibly void - map return_value; // we only define one return value for every distinct type - size_t j = 1; - for (size_t i = 0; i < nrOverloads(); ++i) { - if (returnVals_[i].isVoid()) { - return_type.push_back("void"); - } else { - const string type = returnVals_[i].pyx_returnType(); - return_type.push_back(type); - if (return_value.count(type) == 0) { - const string value = "return_value_" + to_string(j++); - return_value[type] = value; - file.oss << " cdef " << type << " " << value << "\n"; - } - } - } - - for (size_t i = 0; i < nrOverloads(); ++i) { - ArgumentList args = argumentList(i); - file.oss << " try:\n"; - file.oss << pyx_resolveOverloadParams(args, false, 3); // lazy: always return None even if it's a void function - - /// Call corresponding cython function - file.oss << args.pyx_convertEigenTypeAndStorageOrder(" "); - string caller = "self." + cls.shared_pxd_obj_in_pyx() + ".get()"; - string call = pyx_functionCall(caller, funcName, i); - if (!returnVals_[i].isVoid()) { - const string type = return_type[i]; - const string value = return_value[type]; - file.oss << " " << value << " = " << call << "\n"; - file.oss << " return " << returnVals_[i].pyx_casting(value) - << "\n"; - } else { - file.oss << " " << call << "\n"; - file.oss << " return\n"; - } - file.oss << " except (AssertionError, ValueError):\n"; - file.oss << " pass\n"; - } - file.oss - << " raise TypeError('Incorrect arguments or types for method call.')\n\n"; -} -/* ************************************************************************* */ diff --git a/wrap/Method.h b/wrap/Method.h deleted file mode 100644 index 4d3c8d909..000000000 --- a/wrap/Method.h +++ /dev/null @@ -1,74 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Method.h - * @brief describes and generates code for methods - * @author Frank Dellaert - * @author Richard Roberts - **/ - -#pragma once - -#include "MethodBase.h" - -namespace wrap { - -/// Method class -class Method: public MethodBase { - -protected: - bool is_const_; - -public: - - typedef const std::string& Str; - - bool addOverload(Str name, const ArgumentList& args, - const ReturnValue& retVal, bool is_const, - boost::optional instName = boost::none, bool verbose = - false); - - bool isStatic() const override { - return false; - } - - virtual bool isConst() const { - return is_const_; - } - - bool isSameModifiers(const Method& other) const { - return is_const_ == other.is_const_ && - ((templateArgValue_ && other.templateArgValue_) || - (!templateArgValue_ && !other.templateArgValue_)); - } - - friend std::ostream& operator<<(std::ostream& os, const Method& m) { - for (size_t i = 0; i < m.nrOverloads(); i++) - os << m.returnVals_[i] << " " << m.name_ << m.argLists_[i]; - return os; - } - - void emit_cython_pxd(FileWriter& file, const Class& cls) const; - void emit_cython_pyx(FileWriter& file, const Class& cls) const; - void emit_cython_pyx_no_overload(FileWriter& file, const Class& cls) const; - -private: - - // Emit method header - void proxy_header(FileWriter& proxyFile) const override; - - std::string wrapper_call(FileWriter& wrapperFile, Str cppClassName, - Str matlabUniqueName, const ArgumentList& args) const override; -}; - -} // \namespace wrap - diff --git a/wrap/MethodBase.cpp b/wrap/MethodBase.cpp deleted file mode 100644 index a2ed68780..000000000 --- a/wrap/MethodBase.cpp +++ /dev/null @@ -1,135 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file MethodBase.ccp - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#include "Method.h" -#include "Class.h" -#include "utilities.h" - -#include -#include - -#include -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -void MethodBase::proxy_wrapper_fragments( - FileWriter& proxyFile, FileWriter& wrapperFile, Str cppClassName, - Str matlabQualName, Str matlabUniqueName, Str wrapperName, - const TypeAttributesTable& typeAttributes, - vector& functionNames) const { - // emit header, e.g., function varargout = templatedMethod(this, varargin) - proxy_header(proxyFile); - - // Emit comments for documentation - string up_name = boost::to_upper_copy(matlabName()); - proxyFile.oss << " % " << up_name << " usage: "; - usage_fragment(proxyFile, matlabName()); - - // Emit URL to Doxygen page - proxyFile.oss << " % " - << "Doxygen can be found at " - "http://research.cc.gatech.edu/borg/sites/edu.borg/html/" - "index.html" << endl; - - // Handle special case of single overload with all numeric arguments - if (nrOverloads() == 1 && argumentList(0).allScalar()) { - // Output proxy matlab code - // TODO: document why is it OK to not check arguments in this case - proxyFile.oss << " "; - const int id = (int)functionNames.size(); - emit_call(proxyFile, returnValue(0), wrapperName, id); - - // Output C++ wrapper code - const string wrapFunctionName = wrapper_fragment( - wrapperFile, cppClassName, matlabUniqueName, 0, id, typeAttributes); - - // Add to function list - functionNames.push_back(wrapFunctionName); - } else { - // Check arguments for all overloads - for (size_t i = 0; i < nrOverloads(); ++i) { - // Output proxy matlab code - proxyFile.oss << " " << (i == 0 ? "" : "else"); - const int id = (int)functionNames.size(); - emit_conditional_call(proxyFile, returnValue(i), argumentList(i), - wrapperName, id); - - // Output C++ wrapper code - const string wrapFunctionName = wrapper_fragment( - wrapperFile, cppClassName, matlabUniqueName, i, id, typeAttributes); - - // Add to function list - functionNames.push_back(wrapFunctionName); - } - proxyFile.oss << " else\n"; - proxyFile.oss - << " error('Arguments do not match any overload of function " - << matlabQualName << "." << name_ << "');" << endl; - proxyFile.oss << " end\n"; - } - - proxyFile.oss << " end\n"; -} - -/* ************************************************************************* */ -string MethodBase::wrapper_fragment( - FileWriter& wrapperFile, Str cppClassName, Str matlabUniqueName, - int overload, int id, const TypeAttributesTable& typeAttributes) const { - // generate code - - const string wrapFunctionName = - matlabUniqueName + "_" + name_ + "_" + boost::lexical_cast(id); - - const ArgumentList& args = argumentList(overload); - const ReturnValue& returnVal = returnValue(overload); - - // call - wrapperFile.oss - << "void " << wrapFunctionName - << "(int nargout, mxArray *out[], int nargin, const mxArray *in[])\n"; - // start - wrapperFile.oss << "{\n"; - - // get call - // for static methods: cppClassName::staticMethod - // for instance methods: obj->instanceMethod - string expanded = - wrapper_call(wrapperFile, cppClassName, matlabUniqueName, args); - - expanded += ("(" + args.names() + ")"); - if (returnVal.type1.name() != "void") - returnVal.wrap_result(expanded, wrapperFile, typeAttributes); - else - wrapperFile.oss << " " + expanded + ";\n"; - - // finish - wrapperFile.oss << "}\n"; - - return wrapFunctionName; -} - -/* ************************************************************************* */ -void MethodBase::python_wrapper(FileWriter& wrapperFile, Str className) const { - wrapperFile.oss << " .def(\"" << name_ << "\", &" << className - << "::" << name_ << ");\n"; -} - -/* ************************************************************************* */ diff --git a/wrap/MethodBase.h b/wrap/MethodBase.h deleted file mode 100644 index ee72a6a53..000000000 --- a/wrap/MethodBase.h +++ /dev/null @@ -1,70 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file MethodBase.h - * @brief describes and generates code for static methods - * @author Frank Dellaert - * @author Alex Cunningham - * @author Richard Roberts - **/ - -#pragma once - -#include "FullyOverloadedFunction.h" - -namespace wrap { - -// Forward declaration -class Class; - -/// MethodBase class -struct MethodBase : public FullyOverloadedFunction { - typedef const std::string& Str; - - // emit a list of comments, one for each overload - void comment_fragment(FileWriter& proxyFile) const { - SignatureOverloads::comment_fragment(proxyFile, matlabName()); - } - - void verifyArguments(const std::vector& validArgs) const { - SignatureOverloads::verifyArguments(validArgs, name_); - } - - void verifyReturnTypes(const std::vector& validtypes) const { - SignatureOverloads::verifyReturnTypes(validtypes, name_); - } - - // MATLAB code generation - // classPath is class directory, e.g., ../matlab/@Point2 - void proxy_wrapper_fragments(FileWriter& proxyFile, FileWriter& wrapperFile, - Str cppClassName, Str matlabQualName, - Str matlabUniqueName, Str wrapperName, - const TypeAttributesTable& typeAttributes, - std::vector& functionNames) const; - - // emit python wrapper - void python_wrapper(FileWriter& wrapperFile, Str className) const; - -protected: - virtual void proxy_header(FileWriter& proxyFile) const = 0; - - std::string wrapper_fragment( - FileWriter& wrapperFile, Str cppClassName, Str matlabUniqueName, - int overload, int id, - const TypeAttributesTable& typeAttributes) const; ///< cpp wrapper - - virtual std::string wrapper_call(FileWriter& wrapperFile, Str cppClassName, - Str matlabUniqueName, - const ArgumentList& args) const = 0; -}; - -} // \namespace wrap diff --git a/wrap/Module.cpp b/wrap/Module.cpp deleted file mode 100644 index 780c6f8da..000000000 --- a/wrap/Module.cpp +++ /dev/null @@ -1,649 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Module.ccp - * @author Frank Dellaert - * @author Alex Cunningham - * @author Andrew Melim - * @author Richard Roberts - **/ - -#include "Module.h" -#include "FileWriter.h" -#include "TypeAttributesTable.h" -#include "utilities.h" - -#include -#include - -#include -#include - -using namespace std; -using namespace wrap; -using namespace BOOST_SPIRIT_CLASSIC_NS; -namespace bl = boost::lambda; -namespace fs = boost::filesystem; - -/* ************************************************************************* */ -// We parse an interface file into a Module object. -// The grammar is defined using the boost/spirit combinatorial parser. -// For example, str_p("const") parses the string "const", and the >> -// operator creates a sequence parser. The grammar below, composed of rules -// and with start rule [class_p], doubles as the specs for our interface files. -/* ************************************************************************* */ - -/* ************************************************************************* */ -// If a number of template arguments were given, generate a number of expanded -// class names, e.g., PriorFactor -> PriorFactorPose2, and add those classes -static void handle_possible_template(vector& classes, - vector& uninstantiatedClasses, - const Class& cls, const Template& t) { - uninstantiatedClasses.push_back(cls); - if (cls.templateArgs.empty() || t.empty()) { - classes.push_back(cls); - } else { - if (cls.templateArgs.size() != 1) - throw std::runtime_error( - "In-line template instantiations only handle a single template argument"); - string arg = cls.templateArgs.front(); - vector classInstantiations = - (t.nrValues() > 0) ? cls.expandTemplate(arg, t.argValues()) : - cls.expandTemplate(arg, t.intList()); - for(const Class& c: classInstantiations) - classes.push_back(c); - } -} - -static void push_typedef_pair(vector& typedefs, - const Qualified& oldType, - const Qualified& newType, - const string& includeFile) { - typedefs.push_back(TypedefPair(oldType, newType, includeFile)); -} - -/* ************************************************************************* */ -Module::Module(const std::string& moduleName, bool enable_verbose) -: name(moduleName), verbose(enable_verbose) -{ -} - -/* ************************************************************************* */ -Module::Module(const string& interfacePath, - const string& moduleName, bool enable_verbose) -: name(moduleName), verbose(enable_verbose) -{ - // read interface file - string interfaceFile = interfacePath + "/" + moduleName + ".h"; - string contents = file_contents(interfaceFile); - - // execute parsing - parseMarkup(contents); -} - -/* ************************************************************************* */ -void Module::parseMarkup(const std::string& data) { - // The parse imperatively :-( updates variables gradually during parse - // The one with postfix 0 are used to reset the variables after parse. - - //---------------------------------------------------------------------------- - // Grammar with actions that build the Class object. Actions are - // defined within the square brackets [] and are executed whenever a - // rule is successfully parsed. Define BOOST_SPIRIT_DEBUG to debug. - // The grammar is allows a very restricted C++ header - // lexeme_d turns off white space skipping - // http://www.boost.org/doc/libs/1_37_0/libs/spirit/classic/doc/directives.html - // ---------------------------------------------------------------------------- - - // Define Rule and instantiate basic rules - typedef rule Rule; - BasicRules basic; - - vector namespaces; // current namespace tag - string currentInclude; - - // parse a full class - Class cls0(verbose),cls(verbose); - Template classTemplate; - ClassGrammar class_g(cls,classTemplate); - Rule class_p = class_g // - [assign_a(cls.namespaces_, namespaces)] - [assign_a(cls.includeFile, currentInclude)][bl::bind( - &handle_possible_template, bl::var(classes), - bl::var(uninstantiatedClasses), bl::var(cls), - bl::var(classTemplate))][clear_a(classTemplate)] // - [assign_a(cls, cls0)]; - - // parse "gtsam::Pose2" and add to singleInstantiation.typeList - TemplateInstantiationTypedef singleInstantiation, singleInstantiation0; - TypeListGrammar<'<','>'> typelist_g(singleInstantiation.typeList); - - // typedef gtsam::RangeFactor RangeFactor2D; - TypeGrammar instantiationClass_g(singleInstantiation.class_); - Rule templateSingleInstantiation_p = - (str_p("typedef") >> instantiationClass_g >> - typelist_g >> - basic.className_p[assign_a(singleInstantiation.name_)] >> - ';') - [assign_a(singleInstantiation.namespaces_, namespaces)] - [push_back_a(templateInstantiationTypedefs, singleInstantiation)] - [assign_a(singleInstantiation, singleInstantiation0)]; - - Qualified oldType, newType; - TypeGrammar typedefOldClass_g(oldType), typedefNewClass_g(newType); - Rule typedef_p = - (str_p("typedef") >> typedefOldClass_g >> typedefNewClass_g >> - ';') - [assign_a(oldType.namespaces_, namespaces)] - [assign_a(newType.namespaces_, namespaces)] - [bl::bind(&push_typedef_pair, bl::var(typedefs), bl::var(oldType), - bl::var(newType), bl::var(currentInclude))]; - - // Create grammar for global functions - GlobalFunctionGrammar global_function_g(global_functions, namespaces, - currentInclude); - - Rule include_p = str_p("#include") >> ch_p('<') >> - (*(anychar_p - '>'))[push_back_a(includes)] - [assign_a(currentInclude)] >> - ch_p('>'); - -#ifdef __clang__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wuninitialized" -#endif - - Rule namespace_def_p = - (str_p("namespace") - >> basic.namespace_p[push_back_a(namespaces)] - >> ch_p('{') - >> *(include_p | class_p | templateSingleInstantiation_p | typedef_p | global_function_g | namespace_def_p | basic.comments_p) - >> ch_p('}')) - [pop_a(namespaces)]; - -#ifdef __clang__ -#pragma clang diagnostic pop -#endif - - // parse forward declaration - ForwardDeclaration fwDec0, fwDec; - Class fwParentClass; - TypeGrammar className_g(fwDec.cls); - TypeGrammar classParent_g(fwParentClass); - Rule classParent_p = (':' >> classParent_g >> ';') // - [bl::bind(&Class::assignParent, bl::var(fwDec.cls), - bl::var(fwParentClass))][clear_a(fwParentClass)]; - - Rule forward_declaration_p = - !(str_p("virtual")[assign_a(fwDec.isVirtual, T)]) - >> str_p("class") >> className_g - >> (classParent_p | ';') - [push_back_a(forward_declarations, fwDec)] - [assign_a(cls,cls0)] // also clear class to avoid partial parse - [assign_a(fwDec, fwDec0)]; - - Rule module_content_p = basic.comments_p | include_p | class_p - | templateSingleInstantiation_p | forward_declaration_p - | global_function_g | namespace_def_p; - - Rule module_p = *module_content_p >> !end_p; - - // and parse contents - parse_info info = parse(data.c_str(), module_p, space_p); - if(!info.full) { - printf("parsing stopped at \n%.20s\n",info.stop); - cout << "Stopped in:\n" - "class '" << cls.name_ << "'" << endl; - throw ParseFailed((int)info.length); - } - - // Post-process classes for serialization markers - for(Class& cls: classes) - cls.erase_serialization(); - - for(Class& cls: uninstantiatedClasses) - cls.erase_serialization(); - - // Explicitly add methods to the classes from parents so it shows in documentation - for(Class& cls: classes) - cls.appendInheritedMethods(cls, classes); - - // - Remove inherited methods for Cython classes in the pxd, otherwise Cython can't decide which one to call. - // - Only inherited nontemplateMethods_ in uninstantiatedClasses need to be removed - // because that what we serialized to the pxd. - // - However, we check against the class parent's *methods_* to avoid looking into - // its grand parent and grand-grand parent, etc., because all those are already - // added in its direct parent. - // - So this must be called *after* the above code appendInheritedMethods!! - for(Class& cls: uninstantiatedClasses) - cls.removeInheritedNontemplateMethods(uninstantiatedClasses); - - // Expand templates - This is done first so that template instantiations are - // counted in the list of valid types, have their attributes and dependencies - // checked, etc. - expandedClasses = ExpandTypedefInstantiations(classes, - templateInstantiationTypedefs); - - // Dependency check list - vector validTypes = GenerateValidTypes(expandedClasses, - forward_declarations, typedefs); - - // Check that all classes have been defined somewhere - verifyArguments(validTypes, global_functions); - verifyReturnTypes(validTypes, global_functions); - - hasSerialiable = false; - for(const Class& cls: expandedClasses) - cls.verifyAll(validTypes,hasSerialiable); - - // Create type attributes table and check validity - typeAttributes.addClasses(expandedClasses); - typeAttributes.addForwardDeclarations(forward_declarations); - for (const TypedefPair& p: typedefs) - typeAttributes.addType(p.newType); - // add Eigen types as template arguments are also checked ? - vector eigen; - eigen.push_back(ForwardDeclaration("Vector")); - eigen.push_back(ForwardDeclaration("Matrix")); - typeAttributes.addForwardDeclarations(eigen); - typeAttributes.checkValidity(expandedClasses); -} - -/* ************************************************************************* */ -void Module::generate_matlab_wrapper(const string& toolboxPath) const { - - fs::create_directories(toolboxPath); - - // create the unified .cpp switch file - const string wrapperName = name + "_wrapper"; - string wrapperFileName = toolboxPath + "/" + wrapperName + ".cpp"; - FileWriter wrapperFile(wrapperFileName, verbose, "//"); - wrapperFile.oss << "#include \n"; - wrapperFile.oss << "#include \n"; - wrapperFile.oss << "\n"; - - // Include boost.serialization archive headers before other class headers - if (hasSerialiable) { - wrapperFile.oss << "#include \n"; - wrapperFile.oss << "#include \n"; - wrapperFile.oss << "#include \n\n"; - } - - // Generate includes while avoiding redundant includes - generateIncludes(wrapperFile); - - // create typedef classes - we put this at the top of the wrap file so that - // collectors and method arguments can use these typedefs - for(const Class& cls: expandedClasses) - if(!cls.typedefName.empty()) - wrapperFile.oss << cls.getTypedef() << "\n"; - wrapperFile.oss << "\n"; - - // Generate boost.serialization export flags (needs typedefs from above) - if (hasSerialiable) { - for(const Class& cls: expandedClasses) - if(cls.isSerializable) - wrapperFile.oss << cls.getSerializationExport() << "\n"; - wrapperFile.oss << "\n"; - } - - // Generate collectors and cleanup function to be called from mexAtExit - WriteCollectorsAndCleanupFcn(wrapperFile, name, expandedClasses); - - // generate RTTI registry (for returning derived-most types) - WriteRTTIRegistry(wrapperFile, name, expandedClasses); - - vector functionNames; // Function names stored by index for switch - - // create proxy class and wrapper code - for(const Class& cls: expandedClasses) - cls.matlab_proxy(toolboxPath, wrapperName, typeAttributes, wrapperFile, functionNames); - - // create matlab files and wrapper code for global functions - for(const GlobalFunctions::value_type& p: global_functions) - p.second.matlab_proxy(toolboxPath, wrapperName, typeAttributes, wrapperFile, functionNames); - - // finish wrapper file - wrapperFile.oss << "\n"; - finish_wrapper(wrapperFile, functionNames); - - wrapperFile.emit(true); -} - -/* ************************************************************************* */ -void Module::generate_cython_wrapper(const string& toolboxPath, const std::string& pxdImports) const { - fs::create_directories(toolboxPath); - string pxdFileName = toolboxPath + "/" + name + ".pxd"; - FileWriter pxdFile(pxdFileName, verbose, "#"); - pxdFile.oss << pxdImports << "\n"; - emit_cython_pxd(pxdFile); - string pyxFileName = toolboxPath + "/" + name + ".pyx"; - FileWriter pyxFile(pyxFileName, verbose, "#"); - emit_cython_pyx(pyxFile); -} - -/* ************************************************************************* */ -void Module::emit_cython_pxd(FileWriter& pxdFile) const { - // headers - pxdFile.oss << "from gtsam_eigency.core cimport *\n" - "from libcpp.string cimport string\n" - "from libcpp.vector cimport vector\n" - "from libcpp.pair cimport pair\n" - "from libcpp.set cimport set\n" - "from libcpp.map cimport map\n" - "from libcpp cimport bool\n\n"; - - // boost shared_ptr - pxdFile.oss << "cdef extern from \"boost/shared_ptr.hpp\" namespace \"boost\":\n" - " cppclass shared_ptr[T]:\n" - " shared_ptr()\n" - " shared_ptr(T*)\n" - " T* get()\n" - " long use_count() const\n" - " T& operator*()\n\n" - " cdef shared_ptr[T] dynamic_pointer_cast[T,U](const shared_ptr[U]& r)\n\n"; - - // gtsam alignment-friendly shared_ptr - pxdFile.oss << "cdef extern from \"gtsam/base/make_shared.h\" namespace \"gtsam\":\n" - " cdef shared_ptr[T] make_shared[T](const T& r)\n\n"; - - for(const TypedefPair& types: typedefs) - types.emit_cython_pxd(pxdFile); - - //... wrap all classes - for (const Class& cls : uninstantiatedClasses) { - cls.emit_cython_pxd(pxdFile); - - for (const Class& expCls : expandedClasses) { - bool matchingNonTemplated = !expCls.templateClass - && expCls.pxdClassName() == cls.pxdClassName(); - bool isTemplatedFromCls = expCls.templateClass - && expCls.templateClass->pxdClassName() == cls.pxdClassName(); - - // ctypedef for template instantiations - if (isTemplatedFromCls) { - pxdFile.oss << "\n"; - pxdFile.oss << "ctypedef " << expCls.templateClass->pxdClassName() - << "["; - for (size_t i = 0; i < expCls.templateInstTypeList.size(); ++i) - pxdFile.oss << expCls.templateInstTypeList[i].pxdClassName() - << ((i == expCls.templateInstTypeList.size() - 1) ? "" : ", "); - pxdFile.oss << "] " << expCls.pxdClassName() << "\n"; - } - - // Python wrapper class - if (isTemplatedFromCls || matchingNonTemplated) { - expCls.emit_cython_wrapper_pxd(pxdFile); - } - } - pxdFile.oss << "\n\n"; - } - - //... wrap global functions - for(const GlobalFunctions::value_type& p: global_functions) - p.second.emit_cython_pxd(pxdFile); - - pxdFile.emit(true); -} - -/* ************************************************************************* */ -void Module::emit_cython_pyx(FileWriter& pyxFile) const { - // directives... - // allow str to automatically coerce to std::string and back (for python3) - pyxFile.oss << "# cython: c_string_type=str, c_string_encoding=ascii\n\n"; - - // headers... - string pxdHeader = name; - pyxFile.oss << "cimport numpy as np\n" - "import numpy as npp\n" - "cimport " << pxdHeader << "\n" - "from ."<< pxdHeader << " cimport shared_ptr\n" - "from ."<< pxdHeader << " cimport dynamic_pointer_cast\n" - "from ."<< pxdHeader << " cimport make_shared\n"; - - pyxFile.oss << "# C helper function that copies all arguments into a positional list.\n" - "cdef list process_args(list keywords, tuple args, dict kwargs):\n" - " cdef str keyword\n" - " cdef int n = len(args), m = len(keywords)\n" - " cdef list params = list(args)\n" - " assert len(args)+len(kwargs) == m, 'Expected {} arguments'.format(m)\n" - " try:\n" - " return params + [kwargs[keyword] for keyword in keywords[n:]]\n" - " except:\n" - " raise ValueError('Epected arguments ' + str(keywords))\n"; - - // import all typedefs, e.g. from gtsam_wrapper cimport Key, so we don't need to say gtsam.Key - for(const Qualified& q: Qualified::BasicTypedefs) { - pyxFile.oss << "from " << pxdHeader << " cimport " << q.pxdClassName() << "\n"; - } - pyxFile.oss << "from gtsam_eigency.core cimport *\n" - "from libcpp cimport bool\n\n" - "from libcpp.pair cimport pair\n" - "from libcpp.string cimport string\n" - "from cython.operator cimport dereference as deref\n\n\n"; - - // all classes include all forward declarations - std::vector allClasses = expandedClasses; - for(const ForwardDeclaration& fd: forward_declarations) - allClasses.push_back(fd.cls); - - for(const Class& cls: expandedClasses) - cls.emit_cython_pyx(pyxFile, allClasses); - pyxFile.oss << "\n"; - - //... wrap global functions - for(const GlobalFunctions::value_type& p: global_functions) - p.second.emit_cython_pyx(pyxFile); - pyxFile.emit(true); -} - -/* ************************************************************************* */ -void Module::generateIncludes(FileWriter& file) const { - - // collect includes - vector all_includes(includes); - - // sort and remove duplicates - sort(all_includes.begin(), all_includes.end()); - vector::const_iterator last_include = unique(all_includes.begin(), all_includes.end()); - vector::const_iterator it = all_includes.begin(); - // add includes to file - for (; it != last_include; ++it) - file.oss << "#include <" << *it << ">" << endl; - file.oss << "\n"; -} - - -/* ************************************************************************* */ - void Module::finish_wrapper(FileWriter& file, const std::vector& functionNames) const { - file.oss << "void mexFunction(int nargout, mxArray *out[], int nargin, const mxArray *in[])\n"; - file.oss << "{\n"; - file.oss << " mstream mout;\n"; // Send stdout to MATLAB console - file.oss << " std::streambuf *outbuf = std::cout.rdbuf(&mout);\n\n"; - file.oss << " _" << name << "_RTTIRegister();\n\n"; - file.oss << " int id = unwrap(in[0]);\n\n"; - file.oss << " try {\n"; - file.oss << " switch(id) {\n"; - for(size_t id = 0; id < functionNames.size(); ++id) { - file.oss << " case " << id << ":\n"; - file.oss << " " << functionNames[id] << "(nargout, out, nargin-1, in+1);\n"; - file.oss << " break;\n"; - } - file.oss << " }\n"; - file.oss << " } catch(const std::exception& e) {\n"; - file.oss << " mexErrMsgTxt((\"Exception from gtsam:\\n\" + std::string(e.what()) + \"\\n\").c_str());\n"; - file.oss << " }\n"; - file.oss << "\n"; - file.oss << " std::cout.rdbuf(outbuf);\n"; // Restore cout - file.oss << "}\n"; - } - -/* ************************************************************************* */ -vector Module::ExpandTypedefInstantiations(const vector& classes, const vector instantiations) { - - vector expandedClasses = classes; - - for(const TemplateInstantiationTypedef& inst: instantiations) { - // Add the new class to the list - expandedClasses.push_back(inst.findAndExpand(classes)); - } - - // Remove all template classes - for(size_t i = 0; i < expandedClasses.size(); ++i) - if(!expandedClasses[i].templateArgs.empty()) { - expandedClasses.erase(expandedClasses.begin() + size_t(i)); - -- i; - } - - return expandedClasses; -} - -/* ************************************************************************* */ -vector Module::GenerateValidTypes(const vector& classes, const vector& forwardDeclarations, const vector& typedefs) { - vector validTypes; - for(const ForwardDeclaration& fwDec: forwardDeclarations) { - validTypes.push_back(fwDec.name()); - } - validTypes.push_back("void"); - validTypes.push_back("string"); - validTypes.push_back("int"); - validTypes.push_back("bool"); - validTypes.push_back("char"); - validTypes.push_back("unsigned char"); - validTypes.push_back("size_t"); - validTypes.push_back("double"); - validTypes.push_back("Vector"); - validTypes.push_back("Matrix"); - //Create a list of parsed classes for dependency checking - for(const Class& cls: classes) { - validTypes.push_back(cls.qualifiedName("::")); - } - for(const TypedefPair& p: typedefs) { - validTypes.push_back(p.newType.qualifiedName("::")); - } - - return validTypes; -} - -/* ************************************************************************* */ -void Module::WriteCollectorsAndCleanupFcn(FileWriter& wrapperFile, const std::string& moduleName, const std::vector& classes) { - // Generate all collectors - for(const Class& cls: classes) { - const string matlabUniqueName = cls.qualifiedName(), - cppName = cls.qualifiedName("::"); - wrapperFile.oss << "typedef std::set*> " - << "Collector_" << matlabUniqueName << ";\n"; - wrapperFile.oss << "static Collector_" << matlabUniqueName << - " collector_" << matlabUniqueName << ";\n"; - } - - // generate mexAtExit cleanup function - wrapperFile.oss << - "\nvoid _deleteAllObjects()\n" - "{\n" - " mstream mout;\n" // Send stdout to MATLAB console - " std::streambuf *outbuf = std::cout.rdbuf(&mout);\n\n" - " bool anyDeleted = false;\n"; - for(const Class& cls: classes) { - const string matlabUniqueName = cls.qualifiedName(); - const string cppName = cls.qualifiedName("::"); - const string collectorType = "Collector_" + matlabUniqueName; - const string collectorName = "collector_" + matlabUniqueName; - // The extra curly-braces around the for loops work around a limitation in MSVC (existing - // since 2005!) preventing more than 248 blocks. - wrapperFile.oss << - " { for(" << collectorType << "::iterator iter = " << collectorName << ".begin();\n" - " iter != " << collectorName << ".end(); ) {\n" - " delete *iter;\n" - " " << collectorName << ".erase(iter++);\n" - " anyDeleted = true;\n" - " } }\n"; - } - wrapperFile.oss << - " if(anyDeleted)\n" - " cout <<\n" - " \"WARNING: Wrap modules with variables in the workspace have been reloaded due to\\n\"\n" - " \"calling destructors, call 'clear all' again if you plan to now recompile a wrap\\n\"\n" - " \"module, so that your recompiled module is used instead of the old one.\" << endl;\n" - " std::cout.rdbuf(outbuf);\n" // Restore cout - "}\n\n"; -} - -/* ************************************************************************* */ -void Module::WriteRTTIRegistry(FileWriter& wrapperFile, const std::string& moduleName, const std::vector& classes) { - wrapperFile.oss << - "void _" << moduleName << "_RTTIRegister() {\n" - " const mxArray *alreadyCreated = mexGetVariablePtr(\"global\", \"gtsam_" + moduleName + "_rttiRegistry_created\");\n" - " if(!alreadyCreated) {\n" - " std::map types;\n"; - for(const Class& cls: classes) { - if(cls.isVirtual) - wrapperFile.oss << - " types.insert(std::make_pair(typeid(" << cls.qualifiedName("::") << ").name(), \"" << cls.qualifiedName(".") << "\"));\n"; - } - wrapperFile.oss << "\n"; - - wrapperFile.oss << - " mxArray *registry = mexGetVariable(\"global\", \"gtsamwrap_rttiRegistry\");\n" - " if(!registry)\n" - " registry = mxCreateStructMatrix(1, 1, 0, NULL);\n" - " typedef std::pair StringPair;\n" - " for(const StringPair& rtti_matlab: types) {\n" - " int fieldId = mxAddField(registry, rtti_matlab.first.c_str());\n" - " if(fieldId < 0)\n" - " mexErrMsgTxt(\"gtsam wrap: Error indexing RTTI types, inheritance will not work correctly\");\n" - " mxArray *matlabName = mxCreateString(rtti_matlab.second.c_str());\n" - " mxSetFieldByNumber(registry, 0, fieldId, matlabName);\n" - " }\n" - " if(mexPutVariable(\"global\", \"gtsamwrap_rttiRegistry\", registry) != 0)\n" - " mexErrMsgTxt(\"gtsam wrap: Error indexing RTTI types, inheritance will not work correctly\");\n" - " mxDestroyArray(registry);\n" - " \n" - " mxArray *newAlreadyCreated = mxCreateNumericMatrix(0, 0, mxINT8_CLASS, mxREAL);\n" - " if(mexPutVariable(\"global\", \"gtsam_" + moduleName + "_rttiRegistry_created\", newAlreadyCreated) != 0)\n" - " mexErrMsgTxt(\"gtsam wrap: Error indexing RTTI types, inheritance will not work correctly\");\n" - " mxDestroyArray(newAlreadyCreated);\n" - " }\n" - "}\n" - "\n"; -} - -/* ************************************************************************* */ -void Module::generate_python_wrapper(const string& toolboxPath) const { - - fs::create_directories(toolboxPath); - - // create the unified .cpp switch file - const string wrapperName = name + "_python"; - string wrapperFileName = toolboxPath + "/" + wrapperName + ".cpp"; - FileWriter wrapperFile(wrapperFileName, verbose, "//"); - wrapperFile.oss << "#include \n\n"; - wrapperFile.oss << "using namespace boost::python;\n"; - wrapperFile.oss << "BOOST_PYTHON_MODULE(" + name + ")\n"; - wrapperFile.oss << "{\n"; - - // write out classes - for(const Class& cls: expandedClasses) { - cls.python_wrapper(wrapperFile); - } - - // write out global functions - for(const GlobalFunctions::value_type& p: global_functions) - p.second.python_wrapper(wrapperFile); - - // finish wrapper file - wrapperFile.oss << "}\n"; - - wrapperFile.emit(true); -} - -/* ************************************************************************* */ diff --git a/wrap/Module.h b/wrap/Module.h deleted file mode 100644 index 2a8344551..000000000 --- a/wrap/Module.h +++ /dev/null @@ -1,95 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Module.h - * @brief describes module to be wrapped - * @author Frank Dellaert - * @author Richard Roberts - **/ - -#pragma once - -#include "Class.h" -#include "GlobalFunction.h" -#include "TemplateInstantiationTypedef.h" -#include "ForwardDeclaration.h" -#include "TypedefPair.h" - -#include -#include -#include - -namespace wrap { - -/** - * A module just has a name and a list of classes - */ -struct Module { - - // Filled during parsing: - std::string name; ///< module name - bool verbose; ///< verbose flag - std::vector classes; ///< list of classes - std::vector uninstantiatedClasses; ///< list of template classes after instantiated - std::vector templateInstantiationTypedefs; ///< list of template instantiations - std::vector forward_declarations; - std::vector includes; ///< Include statements - GlobalFunctions global_functions; - std::vector typedefs; - - // After parsing: - std::vector expandedClasses; - bool hasSerialiable; - TypeAttributesTable typeAttributes; - - /// constructor that parses interface file - Module(const std::string& interfacePath, const std::string& moduleName, - bool enable_verbose = true); - - /// Dummy constructor that does no parsing - use only for testing - Module(const std::string& moduleName, bool enable_verbose = true); - - /// non-const function that performs parsing - typically called by constructor - /// Throws exception on failure - void parseMarkup(const std::string& data); - - /// MATLAB code generation: - void generate_matlab_wrapper(const std::string& path) const; - - /// Cython code generation: - void generate_cython_wrapper(const std::string& path, const std::string& pxdImports = "") const; - void emit_cython_pxd(FileWriter& file) const; - void emit_cython_pyx(FileWriter& file) const; - - void generateIncludes(FileWriter& file) const; - - void finish_wrapper(FileWriter& file, - const std::vector& functionNames) const; - - /// Python code generation: - void generate_python_wrapper(const std::string& path) const; - -private: - static std::vector ExpandTypedefInstantiations( - const std::vector& classes, - const std::vector instantiations); - static std::vector GenerateValidTypes( - const std::vector& classes, - const std::vector& forwardDeclarations, - const std::vector& typedefs); - static void WriteCollectorsAndCleanupFcn(FileWriter& wrapperFile, - const std::string& moduleName, const std::vector& classes); - static void WriteRTTIRegistry(FileWriter& wrapperFile, - const std::string& moduleName, const std::vector& classes); -}; - -} // \namespace wrap diff --git a/wrap/OverloadedFunction.h b/wrap/OverloadedFunction.h deleted file mode 100644 index 6bcb72d94..000000000 --- a/wrap/OverloadedFunction.h +++ /dev/null @@ -1,140 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file OverloadedFunction.h - * @brief Function that can overload its arguments only - * @author Frank Dellaert - * @date Nov 13, 2014 - **/ - -#pragma once - -#include "Function.h" -#include "Argument.h" -#include -namespace wrap { - -/** - * ArgumentList Overloads - */ -class ArgumentOverloads { -public: - std::vector argLists_; - -public: - size_t nrOverloads() const { return argLists_.size(); } - - const ArgumentList& argumentList(size_t i) const { return argLists_.at(i); } - - void push_back(const ArgumentList& args) { argLists_.push_back(args); } - - std::vector expandArgumentListsTemplate( - const TemplateSubstitution& ts) const { - std::vector result; - for (const ArgumentList& argList : argLists_) { - ArgumentList instArgList = argList.expandTemplate(ts); - result.push_back(instArgList); - } - return result; - } - - /// Expand templates, imperative ! - virtual void ExpandTemplate(const TemplateSubstitution& ts) { - argLists_ = expandArgumentListsTemplate(ts); - } - - void verifyArguments(const std::vector& validArgs, - const std::string s) const { - for (const ArgumentList& argList : argLists_) { - for (Argument arg : argList) { - std::string fullType = arg.type.qualifiedName("::"); - if (find(validArgs.begin(), validArgs.end(), fullType) == - validArgs.end()) - throw DependencyMissing(fullType, "checking argument of " + s); - } - } - } - - friend std::ostream& operator<<(std::ostream& os, - const ArgumentOverloads& overloads) { - for (const ArgumentList& argList : overloads.argLists_) - os << argList << std::endl; - return os; - } - - std::string pyx_resolveOverloadParams(const ArgumentList& args, bool isVoid, - size_t indentLevel = 2) const { - std::string indent; - for (size_t i = 0; i < indentLevel; ++i) - indent += " "; - std::string s; - s += indent + "__params = process_args([" + args.pyx_paramsList() - + "], args, kwargs)\n"; - s += args.pyx_castParamsToPythonType(indent); - if (args.size() > 0) { - for (size_t i = 0; i < args.size(); ++i) { - // For python types we can do the assert after the assignment and save list accesses - if (args[i].type.isNonBasicType() || args[i].type.isEigen()) { - std::string param = args[i].name; - s += indent + "assert isinstance(" + param + ", " - + args[i].type.pyxArgumentType() + ")"; - if (args[i].type.isEigen()) { - s += " and " + param + ".ndim == " - + ((args[i].type.pyxClassName() == "Vector") ? "1" : "2"); - } - s += "\n"; - } - } - } - return s; - } -}; - -class OverloadedFunction : public Function, public ArgumentOverloads { -public: - bool addOverload(const std::string& name, const ArgumentList& args, - boost::optional instName = boost::none, - bool verbose = false) { - bool first = initializeOrCheck(name, instName, verbose); - ArgumentOverloads::push_back(args); - return first; - } - -private: -}; - -// Templated checking functions -// TODO: do this via polymorphism, use transform ? - -template -static std::map expandMethodTemplate( - const std::map& methods, const TemplateSubstitution& ts) { - std::map result; - typedef std::pair NamedMethod; - for (NamedMethod namedMethod : methods) { - F instMethod = namedMethod.second; - instMethod.expandTemplate(ts); - namedMethod.second = instMethod; - result.insert(namedMethod); - } - return result; -} - -template -inline void verifyArguments(const std::vector& validArgs, - const std::map& vt) { - typedef typename std::map::value_type NamedMethod; - for (const NamedMethod& namedMethod : vt) - namedMethod.second.verifyArguments(validArgs); -} - -} // \namespace wrap diff --git a/wrap/Qualified.cpp b/wrap/Qualified.cpp deleted file mode 100644 index 947e51d54..000000000 --- a/wrap/Qualified.cpp +++ /dev/null @@ -1,5 +0,0 @@ -#include - -namespace wrap { - std::vector Qualified::BasicTypedefs; -} diff --git a/wrap/Qualified.h b/wrap/Qualified.h deleted file mode 100644 index 416db239d..000000000 --- a/wrap/Qualified.h +++ /dev/null @@ -1,370 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Qualified.h - * @brief Qualified name - * @author Frank Dellaert - * @date Nov 11, 2014 - **/ - -#pragma once - -#include -#include -#include -#include - -namespace wrap { - -/** - * Class to encapuslate a qualified name, i.e., with (nested) namespaces - */ -class Qualified { - -//protected: -public: - - std::vector namespaces_; ///< Stack of namespaces - std::string name_; ///< type name - static std::vector BasicTypedefs; - - friend struct TypeGrammar; - friend class TemplateSubstitution; - -public: - - /// the different categories - typedef enum { - CLASS = 1, EIGEN = 2, BASIS = 3, VOID = 4 - } Category; - Category category; - - /// Default constructor - Qualified() : - category(VOID) { - } - - /// Construct from name and optional category - Qualified(const std::string& n, Category c = CLASS) : - name_(n), category(c) { - } - - /// Construct from scoped name and optional category - Qualified(const std::string& ns1, const std::string& n, Category c = CLASS) : - name_(n), category(c) { - namespaces_.push_back(ns1); - } - - /// Construct from doubly scoped name and optional category - Qualified(const std::string& ns1, const std::string& ns2, - const std::string& n, Category c = CLASS) : - name_(n), category(c) { - namespaces_.push_back(ns1); - namespaces_.push_back(ns2); - } - - /// Construct from arbitrarily scoped name - Qualified(std::vector ns, const std::string& name) : - namespaces_(ns), name_(name), category(CLASS) { - } - - // Destructor - virtual ~Qualified() {} - - std::string name() const { - return name_; - } - - std::vector namespaces() const { - return namespaces_; - } - - // Qualified is 'abused' as template argument name as well - // this function checks whether *this matches with templateArg - bool match(const std::string& templateArg) const { - return (name_ == templateArg && namespaces_.empty()); //TODO && category == CLASS); - } - - bool match(const std::vector& templateArgs) const { - for(const std::string& s: templateArgs) - if (match(s)) return true; - return false; - } - - void rename(const Qualified& q) { - namespaces_ = q.namespaces_; - name_ = q.name_; - category = q.category; - } - - void expand(const std::string& expansion) { - name_ += expansion; - } - - bool operator==(const Qualified& other) const { - return namespaces_ == other.namespaces_ && name_ == other.name_ - && category == other.category; - } - - bool empty() const { - return namespaces_.empty() && name_.empty(); - } - - virtual void clear() { - namespaces_.clear(); - name_.clear(); - category = VOID; - } - - bool isScalar() const { - return (name() == "bool" || name() == "char" - || name() == "unsigned char" || name() == "int" - || name() == "size_t" || name() == "double"); - } - - bool isVoid() const { - return name() == "void"; - } - - bool isString() const { - return name() == "string"; - } - - bool isEigen() const { - return name() == "Vector" || name() == "Matrix"; - } - - bool isBasicTypedef() const { - return std::find(Qualified::BasicTypedefs.begin(), - Qualified::BasicTypedefs.end(), - *this) != Qualified::BasicTypedefs.end(); - } - - bool isNonBasicType() const { - return name() != "This" && !isString() && !isScalar() && !isEigen() && - !isVoid() && !isBasicTypedef(); - } - -public: - - static Qualified MakeClass(std::vector namespaces, - const std::string& name) { - return Qualified(namespaces, name); - } - - static Qualified MakeEigen(const std::string& name) { - return Qualified(name, EIGEN); - } - - static Qualified MakeBasis(const std::string& name) { - return Qualified(name, BASIS); - } - - static Qualified MakeVoid() { - return Qualified("void", VOID); - } - - /// Return a qualified namespace using given delimiter - std::string qualifiedNamespaces(const std::string& delimiter = "") const { - std::string result; - for (std::size_t i = 0; i < namespaces_.size(); ++i) - result += (namespaces_[i] + ((i VectorXd, Matrix --> MatrixXd - std::string pxdClassName() const { - if (isEigen()) - return name_ + "Xd"; - else if (isNonBasicType()) - return "C" + qualifiedName("_", 1); - else return name_; - } - - /// name of Python classes in pyx - /// They have the same name with the corresponding Cython classes in pxd - /// But note that they are different: These are Python classes in the pyx file - /// To refer to a Cython class in pyx, we need to add "pxd.", e.g. pxd.noiseModel_Gaussian - /// see the other function pxd_class_in_pyx for that purpose. - std::string pyxClassName() const { - if (isEigen()) - return name_; - else - return qualifiedName("_", 1); - } - - /// Python type of function arguments in pyx to interface with normal python scripts - /// Eigen types become np.ndarray (There's no Eigen types, e.g. VectorXd, in - /// Python. We have to pass in numpy array in the arguments, which will then be - /// converted to Eigen types in Cython) - std::string pyxArgumentType() const { - if (isEigen()) - return "np.ndarray"; - else - return qualifiedName("_", 1); - } - - /// return the Cython class in pxd corresponding to a Python class in pyx - std::string pxd_class_in_pyx() const { - if (isNonBasicType()) { - return pxdClassName(); - } else if (isEigen()) { - return name_ + "Xd"; - } else // basic types and not Eigen - return name_; - } - - /// the internal Cython shared obj in a Python class wrappper - std::string shared_pxd_obj_in_pyx() const { - return pxdClassName() + "_"; - } - - std::string make_shared_pxd_class_in_pyx() const { - return "make_shared[" + pxd_class_in_pyx() + "]"; - } - - std::string shared_pxd_class_in_pyx() const { - return "shared_ptr[" + pxd_class_in_pyx() + "]"; - } - - friend std::ostream& operator<<(std::ostream& os, const Qualified& q) { - os << q.qualifiedName("::"); - return os; - } -}; - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct TypeGrammar: classic::grammar { - - wrap::Qualified& result_; ///< successful parse will be placed in here - - /// Construct type grammar and specify where result is placed - TypeGrammar(wrap::Qualified& result) : - result_(result) { - } - - /// Definition of type grammar - template - struct definition: BasicRules { - - typedef classic::rule Rule; - - Rule void_p, basisType_p, eigenType_p, namespace_del_p, class_p, type_p; - - definition(TypeGrammar const& self) { - - using namespace wrap; - using namespace classic; - typedef BasicRules Basic; - - // HACK: use const values instead of using enums themselves - somehow this doesn't result in values getting assigned to gibberish - static const Qualified::Category EIGEN = Qualified::EIGEN; - static const Qualified::Category BASIS = Qualified::BASIS; - static const Qualified::Category CLASS = Qualified::CLASS; - static const Qualified::Category VOID = Qualified::VOID; - - void_p = str_p("void") // - [assign_a(self.result_.name_)] // - [assign_a(self.result_.category, VOID)]; - - basisType_p = Basic::basisType_p // - [assign_a(self.result_.name_)] // - [assign_a(self.result_.category, BASIS)]; - - eigenType_p = Basic::eigenType_p // - [assign_a(self.result_.name_)] // - [assign_a(self.result_.category, EIGEN)]; - - namespace_del_p = Basic::namespace_p // - [push_back_a(self.result_.namespaces_)] >> str_p("::"); - - class_p = *namespace_del_p >> Basic::className_p // - [assign_a(self.result_.name_)] // - [assign_a(self.result_.category, CLASS)]; - - type_p = void_p | basisType_p | class_p | eigenType_p; - } - - Rule const& start() const { - return type_p; - } - - }; -}; -// type_grammar - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -template -struct TypeListGrammar: public classic::grammar > { - - typedef std::vector TypeList; - TypeList& result_; ///< successful parse will be placed in here - - /// Construct type grammar and specify where result is placed - TypeListGrammar(TypeList& result) : - result_(result) { - } - - /// Definition of type grammar - template - struct definition { - - wrap::Qualified type; ///< temporary for use during parsing - TypeGrammar type_g; ///< Individual Type grammars - - classic::rule type_p, typeList_p; - - definition(TypeListGrammar const& self) : - type_g(type) { - using namespace classic; - - type_p = type_g[push_back_a(self.result_, type)][clear_a(type)]; - - typeList_p = OPEN >> !type_p >> *(',' >> type_p) >> CLOSE; - } - - classic::rule const& start() const { - return typeList_p; - } - - }; -}; -// TypeListGrammar - -/* ************************************************************************* */ -// Needed for other parsers in Argument.h and ReturnType.h -static const bool T = true; - -} // \namespace wrap - diff --git a/wrap/README.md b/wrap/README.md deleted file mode 100644 index 014577b5a..000000000 --- a/wrap/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# WRAP README - -The wrap library wraps the GTSAM library into a MATLAB toolbox. - -It was designed to be more general than just wrapping GTSAM, but a small amount of GTSAM specific code exists in matlab.h, the include file that is included by the mex files. The GTSAM-specific functionality consists primarily of handling of Eigen Matrix and Vector classes. - -For notes on creating a wrap interface, see gtsam.h for what features can be wrapped into a toolbox, as well as the current state of the toolbox for gtsam. For more technical details on the interface, please read comments in matlab.h - -Some good things to know: - -OBJECT CREATION - -- Classes are created by special constructors, e.g., new_GaussianFactorGraph_.cpp. - These constructors are called from the MATLAB class @GaussianFactorGraph. - new_GaussianFactorGraph_ calls wrap_constructed in matlab.h, see documentation there - -METHOD (AND CONSTRUCTOR) ARGUMENTS - -- Simple argument types of methods, such as "double", will be converted in the - mex wrappers by calling unwrap, defined in matlab.h -- Vector and Matrix arguments are normally passed by reference in GTSAM, but - in gtsam.h you need to pretend they are passed by value, to trigger the - generation of the correct conversion routines unwrap and unwrap -- passing classes as arguments works, provided they are passed by reference. - This triggers a call to unwrap_shared_ptr - - \ No newline at end of file diff --git a/wrap/ReturnType.cpp b/wrap/ReturnType.cpp deleted file mode 100644 index fdf86d975..000000000 --- a/wrap/ReturnType.cpp +++ /dev/null @@ -1,101 +0,0 @@ -/** - * @file ReturnType.cpp - * @date Nov 13, 2014 - * @author Frank Dellaert - */ - -#include "ReturnType.h" -#include "Class.h" -#include "utilities.h" -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -void ReturnType::wrap_result(const string& out, const string& result, - FileWriter& wrapperFile, - const TypeAttributesTable& typeAttributes) const { - string cppType = qualifiedName("::"), matlabType = qualifiedName("."); - - if (category == CLASS) { - // Handle Classes - string objCopy, ptrType; - const bool isVirtual = typeAttributes.attributes(cppType).isVirtual; - if (isPtr) - objCopy = result; // a shared pointer can always be passed as is - else { - // but if we want an actual new object, things get more complex - if (isVirtual) - // A virtual class needs to be cloned, so the whole hierarchy is - // returned - objCopy = result + ".clone()"; - else { - // ...but a non-virtual class can just be copied - objCopy = "boost::make_shared<" + cppType + ">(" + result + ")"; - } - } - // e.g. out[1] = wrap_shared_ptr(pairResult.second,"gtsam.Point3", false); - wrapperFile.oss << out << " = wrap_shared_ptr(" << objCopy << ",\"" - << matlabType << "\", " << (isVirtual ? "true" : "false") - << ");\n"; - - } else if (isPtr) { - // Handle shared pointer case for BASIS/EIGEN/VOID - // This case does not actually occur in GTSAM wrappers, so untested! - wrapperFile.oss << " {\n boost::shared_ptr<" << qualifiedName("::") - << "> shared(" << result << ");" << endl; - wrapperFile.oss << out << " = wrap_shared_ptr(shared,\"" << matlabType - << "\");\n }\n"; - - } else if (matlabType != "void") - // Handle normal case case for BASIS/EIGEN - wrapperFile.oss << out << " = wrap< " << qualifiedName("::") << " >(" << result - << ");\n"; -} - -/* ************************************************************************* */ -void ReturnType::emit_cython_pxd( - FileWriter& file, const std::string& className, - const std::vector& templateArgs) const { - string cythonType; - if (name() == "This") - cythonType = className; - else if (match(templateArgs)) - cythonType = name(); - else - cythonType = pxdClassName(); - if (isPtr) cythonType = "shared_ptr[" + cythonType + "]"; - file.oss << cythonType; -} - -/* ************************************************************************* */ -std::string ReturnType::pyx_returnType(bool addShared) const { - string retType = pxd_class_in_pyx(); - if (isPtr || (isNonBasicType() && addShared)) - retType = "shared_ptr[" + retType + "]"; - return retType; -} - -/* ************************************************************************* */ -std::string ReturnType::pyx_casting(const std::string& var, - bool isSharedVar) const { - if (isEigen()) { - string s = "ndarray_copy(" + var + ")"; - if (pyxClassName() == "Vector") - return s + ".squeeze()"; - else return s; - } - else if (isNonBasicType()) { - if (isPtr || isSharedVar) - return pyxClassName() + ".cyCreateFromShared(" + var + ")"; - else { - // construct a shared_ptr if var is not a shared ptr - return pyxClassName() + ".cyCreateFromShared(" + make_shared_pxd_class_in_pyx() + - + "(" + var + "))"; - } - } else - return var; -} - -/* ************************************************************************* */ diff --git a/wrap/ReturnType.h b/wrap/ReturnType.h deleted file mode 100644 index 8d78bb48f..000000000 --- a/wrap/ReturnType.h +++ /dev/null @@ -1,89 +0,0 @@ -/** - * @file ReturnValue.h - * @brief Encapsulates a return type of a method - * @date Nov 13, 2014 - * @author Frank Dellaert - */ - -#include "Qualified.h" -#include "FileWriter.h" -#include "TypeAttributesTable.h" -#include "utilities.h" -#include - -#pragma once - -namespace wrap { - -/** - * Encapsulates return value of a method or function - */ -struct ReturnType : public Qualified { - bool isPtr; - - friend struct ReturnValueGrammar; - - /// Makes a void type - ReturnType() : isPtr(false) {} - - /// Constructor, no namespaces - ReturnType(const std::string& name, Category c = CLASS, bool ptr = false) - : Qualified(name, c), isPtr(ptr) {} - - void clear() override { - Qualified::clear(); - isPtr = false; - } - - /// Check if this type is in a set of valid types - template - void verify(TYPES validtypes, const std::string& s) const { - std::string key = qualifiedName("::"); - if (find(validtypes.begin(), validtypes.end(), key) == validtypes.end()) - throw DependencyMissing(key, "checking return type of " + s); - } - - /// @param className the actual class name to use when "This" is specified - void emit_cython_pxd(FileWriter& file, const std::string& className, - const std::vector& templateArgs) const; - - std::string pyx_returnType(bool addShared = true) const; - std::string pyx_casting(const std::string& var, - bool isSharedVar = true) const; - -private: - friend struct ReturnValue; - - /// Example: out[1] = wrap_shared_ptr(pairResult.second,"Test", false); - void wrap_result(const std::string& out, const std::string& result, - FileWriter& wrapperFile, - const TypeAttributesTable& typeAttributes) const; -}; - -//****************************************************************************** -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct ReturnTypeGrammar : public classic::grammar { - wrap::ReturnType& result_; ///< successful parse will be placed in here - - TypeGrammar type_g; - - /// Construct ReturnType grammar and specify where result is placed - ReturnTypeGrammar(wrap::ReturnType& result) - : result_(result), type_g(result_) {} - - /// Definition of type grammar - template - struct definition { - classic::rule type_p; - - definition(ReturnTypeGrammar const& self) { - using namespace classic; - type_p = self.type_g >> !ch_p('*')[assign_a(self.result_.isPtr, T)]; - } - - classic::rule const& start() const { return type_p; } - }; -}; -// ReturnTypeGrammar - -} // \namespace wrap diff --git a/wrap/ReturnValue.cpp b/wrap/ReturnValue.cpp deleted file mode 100644 index e58e85602..000000000 --- a/wrap/ReturnValue.cpp +++ /dev/null @@ -1,102 +0,0 @@ -/** - * @file ReturnValue.cpp - * @date Dec 1, 2011 - * @author Alex Cunningham - * @author Andrew Melim - * @author Richard Roberts - */ - -#include "ReturnValue.h" -#include "utilities.h" -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -ReturnValue ReturnValue::expandTemplate(const TemplateSubstitution& ts) const { - ReturnValue instRetVal = *this; - instRetVal.type1 = ts.tryToSubstitite(type1); - if (isPair) instRetVal.type2 = ts.tryToSubstitite(type2); - return instRetVal; -} - -/* ************************************************************************* */ -string ReturnValue::returnType() const { - if (isPair) - return "pair< " + type1.qualifiedName("::") + ", " + - type2.qualifiedName("::") + " >"; - else - return type1.qualifiedName("::"); -} - -/* ************************************************************************* */ -string ReturnValue::matlab_returnType() const { - return isPair ? "[first,second]" : "result"; -} - -/* ************************************************************************* */ -void ReturnValue::wrap_result(const string& result, FileWriter& wrapperFile, - const TypeAttributesTable& typeAttributes) const { - if (isPair) { - // For a pair, store the returned pair so we do not evaluate the function - // twice - wrapperFile.oss << " auto pairResult = " << result - << ";\n"; - type1.wrap_result(" out[0]", "pairResult.first", wrapperFile, - typeAttributes); - type2.wrap_result(" out[1]", "pairResult.second", wrapperFile, - typeAttributes); - } else { // Not a pair - type1.wrap_result(" out[0]", result, wrapperFile, typeAttributes); - } -} - -/* ************************************************************************* */ -void ReturnValue::emit_matlab(FileWriter& proxyFile) const { - string output; - if (isPair) - proxyFile.oss << "[ varargout{1} varargout{2} ] = "; - else if (type1.category != ReturnType::VOID) - proxyFile.oss << "varargout{1} = "; -} - -/* ************************************************************************* */ -void ReturnValue::emit_cython_pxd( - FileWriter& file, const std::string& className, - const std::vector& templateArgs) const { - if (isPair) { - file.oss << "pair["; - type1.emit_cython_pxd(file, className, templateArgs); - file.oss << ","; - type2.emit_cython_pxd(file, className, templateArgs); - file.oss << "] "; - } else { - type1.emit_cython_pxd(file, className, templateArgs); - file.oss << " "; - } -} - -/* ************************************************************************* */ -std::string ReturnValue::pyx_returnType() const { - if (isVoid()) return ""; - if (isPair) { - return "pair [" + type1.pyx_returnType(false) + "," + - type2.pyx_returnType(false) + "]"; - } else { - return type1.pyx_returnType(true); - } -} - -/* ************************************************************************* */ -std::string ReturnValue::pyx_casting(const std::string& var) const { - if (isVoid()) return ""; - if (isPair) { - return "(" + type1.pyx_casting(var + ".first", false) + "," + - type2.pyx_casting(var + ".second", false) + ")"; - } else { - return type1.pyx_casting(var); - } -} - -/* ************************************************************************* */ diff --git a/wrap/ReturnValue.h b/wrap/ReturnValue.h deleted file mode 100644 index 721132797..000000000 --- a/wrap/ReturnValue.h +++ /dev/null @@ -1,126 +0,0 @@ -/** - * @file ReturnValue.h - * - * @brief Encapsulates a return value from a method - * @date Dec 1, 2011 - * @author Alex Cunningham - * @author Richard Roberts - */ - -#include "ReturnType.h" -#include "TemplateSubstitution.h" -#include "FileWriter.h" -#include "TypeAttributesTable.h" -#include "utilities.h" - -#pragma once - -namespace wrap { - -/** - * Encapsulates return type of a method or function, possibly a pair - */ -struct ReturnValue { - - bool isPair; - ReturnType type1, type2; - - friend struct ReturnValueGrammar; - - /// Default constructor - ReturnValue() : - isPair(false) { - } - - /// Construct from type - ReturnValue(const ReturnType& type) : - isPair(false), type1(type) { - } - - /// Construct from pair type arguments - ReturnValue(const ReturnType& t1, const ReturnType& t2) : - isPair(true), type1(t1), type2(t2) { - } - - /// Destructor - virtual ~ReturnValue() {} - - virtual void clear() { - type1.clear(); - type2.clear(); - isPair = false; - } - - bool isVoid() const { - return !isPair && !type1.isPtr && (type1.name() == "void"); - } - - bool operator==(const ReturnValue& other) const { - return isPair == other.isPair && type1 == other.type1 - && type2 == other.type2; - } - - /// Substitute template argument - ReturnValue expandTemplate(const TemplateSubstitution& ts) const; - - std::string returnType() const; - - std::string matlab_returnType() const; - - void wrap_result(const std::string& result, FileWriter& wrapperFile, - const TypeAttributesTable& typeAttributes) const; - - void emit_matlab(FileWriter& proxyFile) const; - - /// @param className the actual class name to use when "This" is specified - void emit_cython_pxd(FileWriter& file, const std::string& className, - const std::vector& templateArgs) const; - std::string pyx_returnType() const; - std::string pyx_casting(const std::string& var) const; - - friend std::ostream& operator<<(std::ostream& os, const ReturnValue& r) { - if (!r.isPair && r.type1.category == ReturnType::VOID) - os << "void"; - else - os << r.returnType(); - return os; - } - -}; - -//****************************************************************************** -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct ReturnValueGrammar: public classic::grammar { - - wrap::ReturnValue& result_; ///< successful parse will be placed in here - ReturnTypeGrammar returnType1_g, returnType2_g; ///< Type parsers - - /// Construct type grammar and specify where result is placed - ReturnValueGrammar(wrap::ReturnValue& result) : - result_(result), returnType1_g(result.type1), returnType2_g(result.type2) { - } - - /// Definition of type grammar - template - struct definition { - - classic::rule pair_p, returnValue_p; - - definition(ReturnValueGrammar const& self) { - using namespace classic; - - pair_p = (str_p("pair") >> '<' >> self.returnType1_g >> ',' - >> self.returnType2_g >> '>')[assign_a(self.result_.isPair, T)]; - - returnValue_p = pair_p | self.returnType1_g; - } - - classic::rule const& start() const { - return returnValue_p; - } - - }; -}; -// ReturnValueGrammar - -}// \namespace wrap diff --git a/wrap/StaticMethod.cpp b/wrap/StaticMethod.cpp deleted file mode 100644 index 0f812ea61..000000000 --- a/wrap/StaticMethod.cpp +++ /dev/null @@ -1,151 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file StaticMethod.ccp - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#include "StaticMethod.h" -#include "utilities.h" -#include "Class.h" - -#include -#include - -#include -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -void StaticMethod::proxy_header(FileWriter& proxyFile) const { - string upperName = matlabName(); - upperName[0] = toupper(upperName[0], locale()); - proxyFile.oss << " function varargout = " << upperName << "(varargin)\n"; -} - -/* ************************************************************************* */ -string StaticMethod::wrapper_call(FileWriter& wrapperFile, Str cppClassName, - Str matlabUniqueName, const ArgumentList& args) const { - // check arguments - // NOTE: for static functions, there is no object passed - wrapperFile.oss << " checkArguments(\"" << matlabUniqueName << "." << name_ - << "\",nargout,nargin," << args.size() << ");\n"; - - // unwrap arguments, see Argument.cpp - args.matlab_unwrap(wrapperFile, 0); // We start at 0 because there is no self object - - // call method and wrap result - // example: out[0]=wrap(staticMethod(t)); - string expanded = cppClassName + "::" + name_; - if (templateArgValue_) - expanded += ("<" + templateArgValue_->qualifiedName("::") + ">"); - - return expanded; -} - -/* ************************************************************************* */ -void StaticMethod::emit_cython_pxd(FileWriter& file, const Class& cls) const { - for(size_t i = 0; i < nrOverloads(); ++i) { - file.oss << " @staticmethod\n"; - file.oss << " "; - returnVals_[i].emit_cython_pxd(file, cls.pxdClassName(), cls.templateArgs); - file.oss << name_ + ((i>0)?"_" + to_string(i):"") << " \"" << name_ << "\"" << "("; - argumentList(i).emit_cython_pxd(file, cls.pxdClassName(), cls.templateArgs); - file.oss << ") except +\n"; - } -} - -/* ************************************************************************* */ -void StaticMethod::emit_cython_wrapper_pxd(FileWriter& file, - const Class& cls) const { - if (nrOverloads() > 1) { - for (size_t i = 0; i < nrOverloads(); ++i) { - string funcName = name_ + "_" + to_string(i); - file.oss << " @staticmethod\n"; - file.oss << " cdef tuple " + funcName + "(tuple args, dict kwargs)\n"; - } - } -} - -/* ************************************************************************* */ -void StaticMethod::emit_cython_pyx_no_overload(FileWriter& file, - const Class& cls) const { - assert(nrOverloads() == 1); - file.oss << " @staticmethod\n"; - file.oss << " def " << name_ << "("; - argumentList(0).emit_cython_pyx(file); - file.oss << "):\n"; - - /// Call cython corresponding function and return - file.oss << argumentList(0).pyx_convertEigenTypeAndStorageOrder(" "); - string call = pyx_functionCall(cls.pxd_class_in_pyx(), name_, 0); - file.oss << " "; - if (!returnVals_[0].isVoid()) { - file.oss << "return " << returnVals_[0].pyx_casting(call) << "\n"; - } else - file.oss << call << "\n"; - file.oss << "\n"; -} - -/* ************************************************************************* */ -void StaticMethod::emit_cython_pyx(FileWriter& file, const Class& cls) const { - size_t N = nrOverloads(); - if (N == 1) { - emit_cython_pyx_no_overload(file, cls); - return; - } - - // Dealing with overloads.. - file.oss << " @staticmethod # overloaded\n"; - file.oss << " def " << name_ << "(*args, **kwargs):\n"; - for (size_t i = 0; i < N; ++i) { - string funcName = name_ + "_" + to_string(i); - file.oss << " success, results = " << cls.pyxClassName() << "." - << funcName << "(args, kwargs)\n"; - file.oss << " if success:\n return results\n"; - } - file.oss << " raise TypeError('Could not find the correct overload')\n\n"; - - // Create cdef methods for all overloaded methods - for(size_t i = 0; i < N; ++i) { - string funcName = name_ + "_" + to_string(i); - file.oss << " @staticmethod\n"; - file.oss << " cdef tuple " + funcName + "(tuple args, dict kwargs):\n"; - file.oss << " cdef list __params\n"; - if (!returnVals_[i].isVoid()) { - file.oss << " cdef " << returnVals_[i].pyx_returnType() << " return_value\n"; - } - file.oss << " try:\n"; - ArgumentList args = argumentList(i); - file.oss << pyx_resolveOverloadParams(args, false, 3); - - /// Call cython corresponding function and return - file.oss << args.pyx_convertEigenTypeAndStorageOrder(" "); - string pxdFuncName = name_ + ((i>0)?"_" + to_string(i):""); - string call = pyx_functionCall(cls.pxd_class_in_pyx(), pxdFuncName, i); - if (!returnVals_[i].isVoid()) { - file.oss << " return_value = " << call << "\n"; - file.oss << " return True, " << returnVals_[i].pyx_casting("return_value") << "\n"; - } else { - file.oss << " " << call << "\n"; - file.oss << " return True, None\n"; - } - file.oss << " except:\n"; - file.oss << " return False, None\n\n"; - } -} - -/* ************************************************************************* */ diff --git a/wrap/StaticMethod.h b/wrap/StaticMethod.h deleted file mode 100644 index dbb918596..000000000 --- a/wrap/StaticMethod.h +++ /dev/null @@ -1,51 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file StaticMethod.h - * @brief describes and generates code for static methods - * @author Frank Dellaert - * @author Alex Cunningham - * @author Richard Roberts - **/ - -#pragma once - -#include "MethodBase.h" - -namespace wrap { - -/// StaticMethod class -struct StaticMethod: public MethodBase { - - typedef const std::string& Str; - - friend std::ostream& operator<<(std::ostream& os, const StaticMethod& m) { - for (size_t i = 0; i < m.nrOverloads(); i++) - os << "static " << m.returnVals_[i] << " " << m.name_ << m.argLists_[i]; - return os; - } - - void emit_cython_pxd(FileWriter& file, const Class& cls) const; - void emit_cython_wrapper_pxd(FileWriter& file, const Class& cls) const; - void emit_cython_pyx(FileWriter& file, const Class& cls) const; - void emit_cython_pyx_no_overload(FileWriter& file, const Class& cls) const; - -protected: - - void proxy_header(FileWriter& proxyFile) const override; - - std::string wrapper_call(FileWriter& wrapperFile, Str cppClassName, - Str matlabUniqueName, const ArgumentList& args) const override; -}; - -} // \namespace wrap - diff --git a/wrap/Template.h b/wrap/Template.h deleted file mode 100644 index 32f8e9761..000000000 --- a/wrap/Template.h +++ /dev/null @@ -1,146 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Template.h - * @brief Template name - * @author Frank Dellaert - * @date Nov 11, 2014 - **/ - -#pragma once - -#include - -namespace wrap { - -/// The template specification that goes before a method or a class -class Template { - std::string argName_; - std::vector argValues_; - std::vector intList_; - friend struct TemplateGrammar; -public: - /// The only way to get values into a Template is via our friendly Grammar - Template() { - } - void clear() { - argName_.clear(); - argValues_.clear(); - intList_.clear(); - } - const std::string& argName() const { - return argName_; - } - const std::vector& intList() const { - return intList_; - } - const std::vector& argValues() const { - return argValues_; - } - bool empty() const { - return argValues_.empty() && intList_.empty(); - } - size_t nrValues() const { - return argValues_.size(); - } - const Qualified& operator[](size_t i) const { - return argValues_[i]; - } - bool valid() const { - return !argName_.empty() && argValues_.size() > 0; - } - -}; - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct IntListGrammar: public classic::grammar { - - typedef std::vector IntList; - IntList& result_; ///< successful parse will be placed in here - - /// Construct type grammar and specify where result is placed - IntListGrammar(IntList& result) : - result_(result) { - } - - /// Definition of type grammar - template - struct definition { - - classic::rule integer_p, intList_p; - - definition(IntListGrammar const& self) { - using namespace classic; - - integer_p = int_p[push_back_a(self.result_)]; - - intList_p = '{' >> !integer_p >> *(',' >> integer_p) >> '}'; - } - - classic::rule const& start() const { - return intList_p; - } - - }; -}; -// IntListGrammar - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct TemplateGrammar: public classic::grammar { - - Template& result_; ///< successful parse will be placed in here - TypeListGrammar<'{', '}'> argValues_g; ///< TypeList parser - IntListGrammar intList_g; ///< TypeList parser - - /// Construct type grammar and specify where result is placed - TemplateGrammar(Template& result) : - result_(result), argValues_g(result.argValues_), // - intList_g(result.intList_) { - } - - /// Definition of type grammar - template - struct definition: BasicRules { - - classic::rule templateArgValues_p; - - definition(TemplateGrammar const& self) { - using classic::str_p; - using classic::assign_a; - templateArgValues_p = (str_p("template") >> '<' - >> (BasicRules::name_p)[assign_a(self.result_.argName_)] - >> '=' >> (self.argValues_g | self.intList_g) >> '>'); - } - - classic::rule const& start() const { - return templateArgValues_p; - } - - }; -}; -// TemplateGrammar - -/// Cool initializer for tests -static inline boost::optional