naming and other changes - review1
parent
4b06616dfe
commit
fbb26eea07
|
@ -14,22 +14,28 @@ Author: Akshay Krishnan
|
||||||
Date: September 2020
|
Date: September 2020
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
from collections import defaultdict
|
||||||
|
from typing import Tuple, List
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
import gtsam
|
import gtsam
|
||||||
from gtsam.examples import SFMdata
|
from gtsam.examples import SFMdata
|
||||||
|
|
||||||
|
|
||||||
def get_data():
|
def get_data() -> Tuple[gtsam.Values, List[gtsam.BinaryMeasurementUnit3]]:
|
||||||
""""Returns data from SfMData.createPoses(). This contains global rotations and unit translations directions."""
|
""""Returns data from SfMData.createPoses(). This contains global rotations and unit translations directions."""
|
||||||
# Using toy dataset in SfMdata for example.
|
# Using toy dataset in SfMdata for example.
|
||||||
poses = SFMdata.createPoses(gtsam.Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0))
|
poses = SFMdata.createPoses(gtsam.Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0))
|
||||||
|
# Rotations of the cameras in the world frame - wRc.
|
||||||
rotations = gtsam.Values()
|
rotations = gtsam.Values()
|
||||||
|
# Normalized translation directions for pairs of cameras - from first camera to second,
|
||||||
|
# in the coordinate frame of the first camera.
|
||||||
translation_directions = []
|
translation_directions = []
|
||||||
for i in range(0, len(poses) - 2):
|
for i in range(0, len(poses) - 2):
|
||||||
# Add the rotation
|
# Add the rotation.
|
||||||
rotations.insert(i, poses[i].rotation())
|
rotations.insert(i, poses[i].rotation())
|
||||||
# Create unit translation measurements with next two poses
|
# Create unit translation measurements with next two poses.
|
||||||
for j in range(i + 1, i + 3):
|
for j in range(i + 1, i + 3):
|
||||||
i_Z_j = gtsam.Unit3(poses[i].rotation().unrotate(
|
i_Z_j = gtsam.Unit3(poses[i].rotation().unrotate(
|
||||||
poses[j].translation() - poses[i].translation()))
|
poses[j].translation() - poses[i].translation()))
|
||||||
|
@ -41,68 +47,72 @@ def get_data():
|
||||||
return (rotations, translation_directions)
|
return (rotations, translation_directions)
|
||||||
|
|
||||||
|
|
||||||
def estimate_poses_given_rot(measurements: gtsam.BinaryMeasurementsUnit3,
|
def estimate_poses(relative_translations: gtsam.BinaryMeasurementsUnit3,
|
||||||
rotations: gtsam.Values):
|
rotations: gtsam.Values) -> gtsam.Values:
|
||||||
"""Estimate poses given normalized translation directions and rotations between nodes.
|
"""Estimate poses given rotations normalized translation directions between cameras.
|
||||||
|
|
||||||
Arguments:
|
Args:
|
||||||
measurements {BinaryMeasurementsUnit3}- List of translation direction from the first node to
|
relative_translations -- List of normalized translation directions between camera pairs, each direction
|
||||||
the second node in the coordinate frame of the first node.
|
is from the first camera to the second, in the frame of the first camera.
|
||||||
rotations {Values} -- Estimated rotations
|
rotations -- Rotations of the cameras in the world frame.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Values -- Estimated poses.
|
Values -- Estimated poses.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Some hyperparameters.
|
# Some hyperparameters, values used from 1dsfm.
|
||||||
max_1dsfm_projection_directions = 50
|
max_1dsfm_projection_directions = 48
|
||||||
outlier_weight_threshold = 0.1
|
outlier_weight_threshold = 0.1
|
||||||
|
|
||||||
# Convert the translation directions to global frame using the rotations.
|
# Convert the translation directions to global frame using the rotations.
|
||||||
w_measurements = gtsam.BinaryMeasurementsUnit3()
|
w_relative_translations = gtsam.BinaryMeasurementsUnit3()
|
||||||
for measurement in measurements:
|
for relative_translation in relative_translations:
|
||||||
w_measurements.append(gtsam.BinaryMeasurementUnit3(measurement.key1(), measurement.key2(), gtsam.Unit3(
|
w_relative_translation = gtsam.Unit3(rotations.atRot3(relative_translation.key1())
|
||||||
rotations.atRot3(measurement.key1()).rotate(measurement.measured().point3())), measurement.noiseModel()))
|
.rotate(relative_translation.measured().point3()))
|
||||||
|
w_relative_translations.append(gtsam.BinaryMeasurementUnit3(relative_translation.key1(),
|
||||||
|
relative_translation.key2(),
|
||||||
|
w_relative_translation,
|
||||||
|
relative_translation.noiseModel()))
|
||||||
|
|
||||||
# Indices of measurements that are to be used as projection directions. These are randomly chosen.
|
# Indices of measurements that are to be used as projection directions. These are randomly chosen.
|
||||||
indices = np.random.choice(len(w_measurements), min(
|
sampled_indices = np.random.choice(len(w_relative_translations), min(
|
||||||
max_1dsfm_projection_directions, len(w_measurements)), replace=False)
|
max_1dsfm_projection_directions, len(w_relative_translations)), replace=False)
|
||||||
# Sample projection directions from the measurements.
|
# Sample projection directions from the measurements.
|
||||||
projection_directions = [w_measurements[idx].measured() for idx in indices]
|
projection_directions = [
|
||||||
|
w_relative_translations[idx].measured() for idx in sampled_indices]
|
||||||
|
|
||||||
outlier_weights = []
|
outlier_weights = []
|
||||||
# Find the outlier weights for each direction using MFAS.
|
# Find the outlier weights for each direction using MFAS.
|
||||||
for direction in projection_directions:
|
for direction in projection_directions:
|
||||||
algorithm = gtsam.MFAS(w_measurements, direction)
|
algorithm = gtsam.MFAS(w_relative_translations, direction)
|
||||||
outlier_weights.append(algorithm.computeOutlierWeights())
|
outlier_weights.append(algorithm.computeOutlierWeights())
|
||||||
|
|
||||||
# Compute average of outlier weights.
|
# Compute average of outlier weights. Each outlier weight is a map from a pair of Keys (camera IDs) to a weight,
|
||||||
avg_outlier_weights = {}
|
# where weights are proportional to the probability of the edge being an outlier.
|
||||||
|
avg_outlier_weights = defaultdict(lambda: 0.0)
|
||||||
for outlier_weight_dict in outlier_weights:
|
for outlier_weight_dict in outlier_weights:
|
||||||
for k, v in outlier_weight_dict.items():
|
for keypair, weight in outlier_weight_dict.items():
|
||||||
if k in avg_outlier_weights:
|
avg_outlier_weights[keypair] += weight / len(outlier_weights)
|
||||||
avg_outlier_weights[k] += v / len(outlier_weights)
|
|
||||||
else:
|
|
||||||
avg_outlier_weights[k] = v / len(outlier_weights)
|
|
||||||
|
|
||||||
# Remove measurements that have weight greater than threshold.
|
# Remove w_relative_tranlsations that have weight greater than threshold, these are outliers.
|
||||||
inlier_measurements = gtsam.BinaryMeasurementsUnit3()
|
inlier_w_relative_translations = gtsam.BinaryMeasurementsUnit3()
|
||||||
[inlier_measurements.append(m) for m in w_measurements if avg_outlier_weights[(
|
[inlier_w_relative_translations.append(Z) for Z in w_relative_translations
|
||||||
m.key1(), m.key2())] < outlier_weight_threshold]
|
if avg_outlier_weights[(Z.key1(), Z.key2())] < outlier_weight_threshold]
|
||||||
|
|
||||||
# Run the optimizer to obtain translations for normalized directions.
|
# Run the optimizer to obtain translations for normalized directions.
|
||||||
translations = gtsam.TranslationRecovery(inlier_measurements).run()
|
w_translations = gtsam.TranslationRecovery(
|
||||||
|
inlier_w_relative_translations).run()
|
||||||
|
|
||||||
poses = gtsam.Values()
|
poses = gtsam.Values()
|
||||||
for key in rotations.keys():
|
for key in rotations.keys():
|
||||||
poses.insert(key, gtsam.Pose3(
|
poses.insert(key, gtsam.Pose3(
|
||||||
rotations.atRot3(key), translations.atPoint3(key)))
|
rotations.atRot3(key), w_translations.atPoint3(key)))
|
||||||
return poses
|
return poses
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
rotations, translation_directions = get_data()
|
rotations, translation_directions = get_data()
|
||||||
poses = estimate_poses_given_rot(translation_directions, rotations)
|
poses = estimate_poses(translation_directions, rotations)
|
||||||
print("**** Translation averaging output ****")
|
print("**** Translation averaging output ****")
|
||||||
print(poses)
|
print(poses)
|
||||||
print("**************************************")
|
print("**************************************")
|
||||||
|
|
Loading…
Reference in New Issue