Fixed Doxygen warnings.
parent
b5ea7e1ba0
commit
b9927a1b7e
|
@ -36,7 +36,7 @@ namespace gtsam {
|
|||
/** access the underlying value */
|
||||
double value() const { return d_; }
|
||||
|
||||
/** print @param s optional string naming the object */
|
||||
/** print @param name optional string naming the object */
|
||||
inline void print(const std::string& name="") const {
|
||||
std::cout << name << ": " << d_ << std::endl;
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ struct LieVector : public Vector, public DerivedValue<LieVector> {
|
|||
return static_cast<Vector>(*this);
|
||||
}
|
||||
|
||||
/** print @param s optional string naming the object */
|
||||
/** print @param name optional string naming the object */
|
||||
inline void print(const std::string& name="") const {
|
||||
gtsam::print(vector(), name);
|
||||
}
|
||||
|
|
|
@ -217,8 +217,8 @@ void insertSub(Matrix& fullMatrix, const Matrix& subMatrix, size_t i, size_t j);
|
|||
|
||||
/**
|
||||
* Extracts a column view from a matrix that avoids a copy
|
||||
* @param matrix to extract column from
|
||||
* @param index of the column
|
||||
* @param A matrix to extract column from
|
||||
* @param j index of the column
|
||||
* @return a const view of the matrix
|
||||
*/
|
||||
template<class MATRIX>
|
||||
|
@ -228,8 +228,8 @@ const typename MATRIX::ConstColXpr column(const MATRIX& A, size_t j) {
|
|||
|
||||
/**
|
||||
* Extracts a row view from a matrix that avoids a copy
|
||||
* @param matrix to extract row from
|
||||
* @param index of the row
|
||||
* @param A matrix to extract row from
|
||||
* @param j index of the row
|
||||
* @return a const view of the matrix
|
||||
*/
|
||||
template<class MATRIX>
|
||||
|
@ -362,7 +362,7 @@ Vector backSubstituteUpper(const Vector& b, const Matrix& U, bool unit=false);
|
|||
* @param unit, set true if unit triangular
|
||||
* @return the solution x of L*x=b
|
||||
*/
|
||||
Vector backSubstituteLower(const Matrix& L, const Vector& d, bool unit=false);
|
||||
Vector backSubstituteLower(const Matrix& L, const Vector& b, bool unit=false);
|
||||
|
||||
/**
|
||||
* create a matrix by stacking other matrices
|
||||
|
|
|
@ -81,7 +81,7 @@ bool assert_equal(const V& expected, const boost::optional<const V&>& actual, do
|
|||
|
||||
/**
|
||||
* Version of assert_equals to work with vectors
|
||||
* @DEPRECIATED: use container equals instead
|
||||
* \deprecated: use container equals instead
|
||||
*/
|
||||
template<class V>
|
||||
bool assert_equal(const std::vector<V>& expected, const std::vector<V>& actual, double tol = 1e-9) {
|
||||
|
|
|
@ -73,8 +73,8 @@ Vector Vector_(const std::vector<double>& data);
|
|||
|
||||
/**
|
||||
* Create vector initialized to a constant value
|
||||
* @param size
|
||||
* @param constant value
|
||||
* @param n is the size of the vector
|
||||
* @param value is a constant value to insert into the vector
|
||||
*/
|
||||
Vector repeat(size_t n, double value);
|
||||
|
||||
|
@ -82,7 +82,7 @@ Vector repeat(size_t n, double value);
|
|||
* Create basis vector of dimension n,
|
||||
* with a constant in spot i
|
||||
* @param n is the size of the vector
|
||||
* @param index of the one
|
||||
* @param i index of the one
|
||||
* @param value is the value to insert into the vector
|
||||
* @return delta vector
|
||||
*/
|
||||
|
@ -92,20 +92,20 @@ Vector delta(size_t n, size_t i, double value);
|
|||
* Create basis vector of dimension n,
|
||||
* with one in spot i
|
||||
* @param n is the size of the vector
|
||||
* @param index of the one
|
||||
* @param i index of the one
|
||||
* @return basis vector
|
||||
*/
|
||||
inline Vector basis(size_t n, size_t i) { return delta(n, i, 1.0); }
|
||||
|
||||
/**
|
||||
* Create zero vector
|
||||
* @param size
|
||||
* @param n size
|
||||
*/
|
||||
inline Vector zero(size_t n) { return Vector::Zero(n);}
|
||||
|
||||
/**
|
||||
* Create vector initialized to ones
|
||||
* @param size
|
||||
* @param n size
|
||||
*/
|
||||
inline Vector ones(size_t n) { return Vector::Ones(n); }
|
||||
|
||||
|
@ -249,35 +249,35 @@ double sum(const Vector &a);
|
|||
/**
|
||||
* Calculates L2 norm for a vector
|
||||
* modeled after boost.ublas for compatibility
|
||||
* @param vector
|
||||
* @param v vector
|
||||
* @return the L2 norm
|
||||
*/
|
||||
double norm_2(const Vector& v);
|
||||
|
||||
/**
|
||||
* elementwise reciprocal of vector elements
|
||||
* Elementwise reciprocal of vector elements
|
||||
* @param a vector
|
||||
* @return [1/a(i)]
|
||||
*/
|
||||
Vector reciprocal(const Vector &a);
|
||||
|
||||
/**
|
||||
* elementwise sqrt of vector elements
|
||||
* @param a vector
|
||||
* Elementwise sqrt of vector elements
|
||||
* @param v is a vector
|
||||
* @return [sqrt(a(i))]
|
||||
*/
|
||||
Vector esqrt(const Vector& v);
|
||||
|
||||
/**
|
||||
* absolut values of vector elements
|
||||
* @param a vector
|
||||
* Absolute values of vector elements
|
||||
* @param v is a vector
|
||||
* @return [abs(a(i))]
|
||||
*/
|
||||
Vector abs(const Vector& v);
|
||||
|
||||
/**
|
||||
* return the max element of a vector
|
||||
* @param a vector
|
||||
* Return the max element of a vector
|
||||
* @param a is a vector
|
||||
* @return max(a)
|
||||
*/
|
||||
double max(const Vector &a);
|
||||
|
@ -300,13 +300,13 @@ inline double inner_prod(const V1 &a, const V2& b) {
|
|||
|
||||
/**
|
||||
* BLAS Level 1 scal: x <- alpha*x
|
||||
* @DEPRECIATED: use operators instead
|
||||
* \deprecated: use operators instead
|
||||
*/
|
||||
inline void scal(double alpha, Vector& x) { x *= alpha; }
|
||||
|
||||
/**
|
||||
* BLAS Level 1 axpy: y <- alpha*x + y
|
||||
* @DEPRECIATED: use operators instead
|
||||
* \deprecated: use operators instead
|
||||
*/
|
||||
template<class V1, class V2>
|
||||
inline void axpy(double alpha, const V1& x, V2& y) {
|
||||
|
|
|
@ -89,14 +89,14 @@ namespace gtsam {
|
|||
|
||||
/**
|
||||
* solve a conditional
|
||||
* @param parentsAssignment Known values of the parents
|
||||
* @param parentsValues Known values of the parents
|
||||
* @return MPE value of the child (1 frontal variable).
|
||||
*/
|
||||
size_t solve(const Values& parentsValues) const;
|
||||
|
||||
/**
|
||||
* sample
|
||||
* @param parentsAssignment Known values of the parents
|
||||
* @param parentsValues Known values of the parents
|
||||
* @return sample from conditional
|
||||
*/
|
||||
size_t sample(const Values& parentsValues) const;
|
||||
|
|
|
@ -55,7 +55,7 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/** Compute the marginal of a single variable
|
||||
* @param KEY DiscreteKey of the Variable
|
||||
* @param key DiscreteKey of the Variable
|
||||
* @return Vector of marginal probabilities
|
||||
*/
|
||||
Vector marginalProbabilities(const DiscreteKey& key) const {
|
||||
|
|
|
@ -83,7 +83,7 @@ namespace gtsam {
|
|||
/**
|
||||
* Named constructor with derivative
|
||||
* Calculate relative bearing to a landmark in local coordinate frame
|
||||
* @param point 2D location of landmark
|
||||
* @param d 2D location of landmark
|
||||
* @param H optional reference for Jacobian
|
||||
* @return 2D rotation \in SO(2)
|
||||
*/
|
||||
|
|
|
@ -394,7 +394,7 @@ namespace gtsam {
|
|||
* such that A = R*Q = R*Qz'*Qy'*Qx'. When A is a rotation matrix, R will
|
||||
* be the identity and Q is a yaw-pitch-roll decomposition of A.
|
||||
* The implementation uses Givens rotations and is based on Hartley-Zisserman.
|
||||
* @param a 3 by 3 matrix A=RQ
|
||||
* @param A 3 by 3 matrix A=RQ
|
||||
* @return an upper triangular matrix R
|
||||
* @return a vector [thetax, thetay, thetaz] in radians.
|
||||
*/
|
||||
|
|
|
@ -157,7 +157,7 @@ public:
|
|||
* disconnected graphs, a workaround is to create zero-information factors to
|
||||
* bridge the disconnects. To do this, create any factor type (e.g.
|
||||
* BetweenFactor or RangeFactor) with the noise model
|
||||
* <tt>\ref sharedPrecision(dim, 0.0)</tt>, where \c dim is the appropriate
|
||||
* <tt>sharedPrecision(dim, 0.0)</tt>, where \c dim is the appropriate
|
||||
* dimensionality for that factor.
|
||||
*/
|
||||
struct DisconnectedGraphException : public std::exception {
|
||||
|
|
|
@ -45,7 +45,7 @@ template<class KEY> class Conditional;
|
|||
* typedefs to refer to the associated conditional and shared_ptr types of the
|
||||
* derived class. See IndexFactor, JacobianFactor, etc. for examples.
|
||||
*
|
||||
* This class is \bold{not} virtual for performance reasons - derived symbolic classes,
|
||||
* This class is \b not virtual for performance reasons - derived symbolic classes,
|
||||
* IndexFactor and IndexConditional, need to be created and destroyed quickly
|
||||
* during symbolic elimination. GaussianFactor and NonlinearFactor are virtual.
|
||||
* \nosubgrouping
|
||||
|
|
|
@ -245,8 +245,8 @@ template<class CONDITIONAL, class CLIQUE> class BayesTree;
|
|||
|
||||
/**
|
||||
* static function that combines two factor graphs
|
||||
* @param const &fg1 Linear factor graph
|
||||
* @param const &fg2 Linear factor graph
|
||||
* @param fg1 Linear factor graph
|
||||
* @param fg2 Linear factor graph
|
||||
* @return a new combined factor graph
|
||||
*/
|
||||
template<class FACTORGRAPH>
|
||||
|
|
|
@ -151,9 +151,9 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/**
|
||||
* static function that combines two factor graphs
|
||||
* @param const &lfg1 Linear factor graph
|
||||
* @param const &lfg2 Linear factor graph
|
||||
* Static function that combines two factor graphs.
|
||||
* @param lfg1 Linear factor graph
|
||||
* @param lfg2 Linear factor graph
|
||||
* @return a new combined factor graph
|
||||
*/
|
||||
static GaussianFactorGraph combine2(const GaussianFactorGraph& lfg1,
|
||||
|
|
|
@ -116,7 +116,7 @@ public:
|
|||
* all of the other variables. This function returns the result as a mean
|
||||
* vector and covariance matrix. Compared to marginalCanonical, which
|
||||
* returns a GaussianConditional, this function back-substitutes the R factor
|
||||
* to obtain the mean, then computes \Sigma = (R^T * R)^-1.
|
||||
* to obtain the mean, then computes \f$ \Sigma = (R^T * R)^{-1} \f$.
|
||||
*/
|
||||
Matrix marginalCovariance(Index j) const;
|
||||
|
||||
|
|
|
@ -115,7 +115,7 @@ public:
|
|||
* all of the other variables. This function returns the result as a mean
|
||||
* vector and covariance matrix. Compared to marginalCanonical, which
|
||||
* returns a GaussianConditional, this function back-substitutes the R factor
|
||||
* to obtain the mean, then computes \Sigma = (R^T * R)^-1.
|
||||
* to obtain the mean, then computes \f$ \Sigma = (R^T * R)^{-1} \f$.
|
||||
*/
|
||||
Matrix marginalCovariance(Index j) const;
|
||||
|
||||
|
|
|
@ -237,7 +237,7 @@ namespace gtsam {
|
|||
/** Return the number of columns and rows of the Hessian matrix */
|
||||
size_t rows() const { return info_.rows(); }
|
||||
|
||||
/** Return a view of the block at (j1,j2) of the <emph>upper-triangular part</emph> of the
|
||||
/** Return a view of the block at (j1,j2) of the <em>upper-triangular part</em> of the
|
||||
* information matrix \f$ H \f$, no data is copied. See HessianFactor class documentation
|
||||
* above to explain that only the upper-triangular part of the information matrix is stored
|
||||
* and returned by this function.
|
||||
|
@ -249,7 +249,7 @@ namespace gtsam {
|
|||
*/
|
||||
constBlock info(const_iterator j1, const_iterator j2) const { return info_(j1-begin(), j2-begin()); }
|
||||
|
||||
/** Return the <emph>upper-triangular part</emph> of the full *augmented* information matrix,
|
||||
/** Return the <em>upper-triangular part</em> of the full *augmented* information matrix,
|
||||
* as described above. See HessianFactor class documentation above to explain that only the
|
||||
* upper-triangular part of the information matrix is stored and returned by this function.
|
||||
*/
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/**
|
||||
* @file JacobianFactorGraph.h
|
||||
* @date Jun 6, 2012
|
||||
* @Author Yong Dian Jian
|
||||
* @author Yong Dian Jian
|
||||
*/
|
||||
|
||||
#include <gtsam/linear/JacobianFactorGraph.h>
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
* @file JacobianFactorGraph.cpp
|
||||
* @date Jun 6, 2012
|
||||
* @brief Linear Algebra Operations for a JacobianFactorGraph
|
||||
* @Author Yong Dian Jian
|
||||
* @author Yong Dian Jian
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
|
|
|
@ -147,7 +147,7 @@ namespace gtsam {
|
|||
|
||||
/** Insert a vector \c value with index \c j.
|
||||
* Causes reallocation. Can be used to insert values in any order, but
|
||||
* throws an invalid_argument exception if the index \j is already used.
|
||||
* throws an invalid_argument exception if the index \c j is already used.
|
||||
* @param value The vector to be inserted.
|
||||
* @param j The index with which the value will be associated.
|
||||
*/
|
||||
|
@ -171,10 +171,10 @@ namespace gtsam {
|
|||
/** equals required by Testable for unit testing */
|
||||
bool equals(const VectorValues& x, double tol = 1e-9) const;
|
||||
|
||||
/// @}
|
||||
/// @{
|
||||
/// \anchor AdvancedConstructors
|
||||
/// @name Advanced Constructors
|
||||
/// @{
|
||||
/// @}
|
||||
|
||||
/** Construct from a container of variable dimensions (in variable order), without initializing any values. */
|
||||
template<class CONTAINER>
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
* @file DoglegOptimizer.cpp
|
||||
* @brief
|
||||
* @author Richard Roberts
|
||||
* @created Feb 26, 2012
|
||||
* @date Feb 26, 2012
|
||||
*/
|
||||
|
||||
#include <gtsam/nonlinear/DoglegOptimizer.h>
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
* @file DoglegOptimizer.h
|
||||
* @brief
|
||||
* @author Richard Roberts
|
||||
* @created Feb 26, 2012
|
||||
* @date Feb 26, 2012
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
|
|
@ -72,7 +72,7 @@ struct DoglegOptimizerImpl {
|
|||
*
|
||||
* The update is computed using a quadratic approximation \f$ M(\delta x) \f$
|
||||
* of an original nonlinear error function (a NonlinearFactorGraph) \f$ f(x) \f$.
|
||||
* The quadratic approximation is represented as a GaussianBayesNet \bayesNet, which is
|
||||
* The quadratic approximation is represented as a GaussianBayesNet \f$ \bayesNet \f$, which is
|
||||
* obtained by eliminating a GaussianFactorGraph resulting from linearizing
|
||||
* the nonlinear factor graph \f$ f(x) \f$. Thus, \f$ M(\delta x) \f$ is
|
||||
* \f[
|
||||
|
@ -93,8 +93,8 @@ struct DoglegOptimizerImpl {
|
|||
* @param Rd The Bayes' net or tree as described above.
|
||||
* @param f The original nonlinear factor graph with which to evaluate the
|
||||
* accuracy of \f$ M(\delta x) \f$ to adjust \f$ \Delta \f$.
|
||||
* @param x0 The linearization point about which \bayesNet was created
|
||||
* @param ordering The variable ordering used to create \bayesNet
|
||||
* @param x0 The linearization point about which \f$ \bayesNet \f$ was created
|
||||
* @param ordering The variable ordering used to create\f$ \bayesNet \f$
|
||||
* @param f_error The result of <tt>f.error(x0)</tt>.
|
||||
* @return A DoglegIterationResult containing the new \c Delta, the linear
|
||||
* update \c dx_d, and the resulting nonlinear error \c f_error.
|
||||
|
@ -111,7 +111,7 @@ struct DoglegOptimizerImpl {
|
|||
*
|
||||
* The update is computed using a quadratic approximation \f$ M(\delta x) \f$
|
||||
* of an original nonlinear error function (a NonlinearFactorGraph) \f$ f(x) \f$.
|
||||
* The quadratic approximation is represented as a GaussianBayesNet \bayesNet, which is
|
||||
* The quadratic approximation is represented as a GaussianBayesNet \f$ \bayesNet \f$, which is
|
||||
* obtained by eliminating a GaussianFactorGraph resulting from linearizing
|
||||
* the nonlinear factor graph \f$ f(x) \f$. Thus, \f$ M(\delta x) \f$ is
|
||||
* \f[
|
||||
|
@ -134,8 +134,8 @@ struct DoglegOptimizerImpl {
|
|||
* \f$ \| (1-\tau)\delta x_u + \tau\delta x_n \| = \Delta \f$, where \f$ \Delta \f$
|
||||
* is the trust region radius.
|
||||
* @param Delta Trust region radius
|
||||
* @param xu Steepest descent minimizer
|
||||
* @param xn Newton's method minimizer
|
||||
* @param x_u Steepest descent minimizer
|
||||
* @param x_n Newton's method minimizer
|
||||
*/
|
||||
static VectorValues ComputeBlend(double Delta, const VectorValues& x_u, const VectorValues& x_n, const bool verbose=false);
|
||||
};
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
* @file GaussNewtonOptimizer.cpp
|
||||
* @brief
|
||||
* @author Richard Roberts
|
||||
* @created Feb 26, 2012
|
||||
* @date Feb 26, 2012
|
||||
*/
|
||||
|
||||
#include <gtsam/inference/EliminationTree.h>
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
* @file GaussNewtonOptimizer.h
|
||||
* @brief
|
||||
* @author Richard Roberts
|
||||
* @created Feb 26, 2012
|
||||
* @date Feb 26, 2012
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
* @file LevenbergMarquardtOptimizer.cpp
|
||||
* @brief
|
||||
* @author Richard Roberts
|
||||
* @created Feb 26, 2012
|
||||
* @date Feb 26, 2012
|
||||
*/
|
||||
|
||||
#include <cmath>
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
* @file LevenbergMarquardtOptimizer.h
|
||||
* @brief
|
||||
* @author Richard Roberts
|
||||
* @created Feb 26, 2012
|
||||
* @date Feb 26, 2012
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
|
|
@ -229,7 +229,7 @@ protected:
|
|||
};
|
||||
|
||||
/** Check whether the relative error decrease is less than relativeErrorTreshold,
|
||||
* the absolute error decrease is less than absoluteErrorTreshold, <emph>or</emph>
|
||||
* the absolute error decrease is less than absoluteErrorTreshold, <em>or</em>
|
||||
* the error itself is less than errorThreshold.
|
||||
*/
|
||||
bool checkConvergence(double relativeErrorTreshold,
|
||||
|
|
Loading…
Reference in New Issue