Merge branch 'develop' into feature/LPSolver

# Conflicts:
#	gtsam_unstable/linear/tests/testQPSolver.cpp
release/4.3a0
Duy-Nguyen Ta 2016-04-27 01:10:48 -04:00
commit 94e8f7073c
196 changed files with 3216 additions and 2188 deletions

View File

@ -305,7 +305,7 @@ endif()
# paths so that the compiler uses GTSAM headers in our source directory instead
# of any previously installed GTSAM headers.
include_directories(BEFORE
gtsam/3rdparty/UFconfig
gtsam/3rdparty/SuiteSparse_config
gtsam/3rdparty/CCOLAMD/Include
${METIS_INCLUDE_DIRECTORIES}
${PROJECT_SOURCE_DIR}

33
gtsam.h
View File

@ -1798,9 +1798,6 @@ class Values {
void insert(size_t j, Vector t);
void insert(size_t j, Matrix t);
// Fixed size version
void insertFixed(size_t j, Vector t, size_t n);
void update(size_t j, const gtsam::Point2& t);
void update(size_t j, const gtsam::Point3& t);
void update(size_t j, const gtsam::Rot2& t);
@ -1818,12 +1815,6 @@ class Values {
template<T = {gtsam::Point2, gtsam::Point3, gtsam::Rot2, gtsam::Pose2, gtsam::Rot3, gtsam::Pose3, gtsam::Cal3_S2, gtsam::Cal3DS2, gtsam::Cal3Bundler, gtsam::EssentialMatrix, gtsam::imuBias::ConstantBias, Vector, Matrix}>
T at(size_t j);
/// Fixed size versions, for n in 1..9
void insertFixed(size_t j, Vector t, size_t n);
/// Fixed size versions, for n in 1..9
Vector atFixed(size_t j, size_t n);
/// version for double
void insertDouble(size_t j, double c);
double atDouble(size_t j) const;
@ -2489,10 +2480,30 @@ class NavState {
gtsam::Pose3 pose() const;
};
#include <gtsam/navigation/PreintegratedRotation.h>
virtual class PreintegratedRotationParams {
PreintegratedRotationParams();
void setGyroscopeCovariance(Matrix cov);
void setOmegaCoriolis(Vector omega);
void setBodyPSensor(const gtsam::Pose3& pose);
Matrix getGyroscopeCovariance() const;
// TODO(frank): allow optional
// boost::optional<Vector3> getOmegaCoriolis() const;
// boost::optional<Pose3> getBodyPSensor() const;
};
#include <gtsam/navigation/PreintegrationParams.h>
class PreintegrationParams {
virtual class PreintegrationParams : gtsam::PreintegratedRotationParams {
PreintegrationParams(Vector n_gravity);
// TODO(frank): add setters/getters or make this MATLAB wrapper feature
void setAccelerometerCovariance(Matrix cov);
void setIntegrationCovariance(Matrix cov);
void setUse2ndOrderCoriolis(bool flag);
Matrix getAccelerometerCovariance() const;
Matrix getIntegrationCovariance() const;
bool getUse2ndOrderCoriolis() const;
};
#include <gtsam/navigation/PreintegrationBase.h>

49
gtsam/3rdparty/CCOLAMD/Demo/Makefile vendored Normal file
View File

@ -0,0 +1,49 @@
#-----------------------------------------------------------------------------
# compile the CCOLAMD demo
#-----------------------------------------------------------------------------
default: all
include ../../SuiteSparse_config/SuiteSparse_config.mk
I = -I../../include
C = $(CC) $(CF) $(I)
LIB2 = $(LDFLAGS) -L../../lib -lccolamd -lsuitesparseconfig $(LDLIBS)
all: library ccolamd_example ccolamd_l_example
library:
( cd ../../SuiteSparse_config ; $(MAKE) )
( cd ../Lib ; $(MAKE) )
#------------------------------------------------------------------------------
# Create the demo program, run it, and compare the output
#------------------------------------------------------------------------------
dist:
ccolamd_example: ccolamd_example.c
$(C) -o ccolamd_example ccolamd_example.c $(LIB2)
- ./ccolamd_example > my_ccolamd_example.out
- diff ccolamd_example.out my_ccolamd_example.out
ccolamd_l_example: ccolamd_l_example.c
$(C) -o ccolamd_l_example ccolamd_l_example.c $(LIB2)
- ./ccolamd_l_example > my_ccolamd_l_example.out
- diff ccolamd_l_example.out my_ccolamd_l_example.out
#------------------------------------------------------------------------------
# Remove all but the files in the original distribution
#------------------------------------------------------------------------------
clean:
- $(RM) -r $(CLEAN)
purge: distclean
distclean: clean
- $(RM) ccolamd_example ccolamd_l_example
- $(RM) my_ccolamd_example.out my_ccolamd_l_example.out
- $(RM) -r $(PURGE)

View File

@ -5,8 +5,6 @@
/* ----------------------------------------------------------------------------
* CCOLAMD Copyright (C), Univ. of Florida. Authors: Timothy A. Davis,
* Sivasankaran Rajamanickam, and Stefan Larimore
* See License.txt for the Version 2.1 of the GNU Lesser General Public License
* http://www.cise.ufl.edu/research/sparse
* -------------------------------------------------------------------------- */
/*

View File

@ -15,7 +15,7 @@ Column 3, with 2 entries:
row 1
row 3
ccolamd version 2.7, Jan 25, 2011: OK.
ccolamd version 2.9, Apr 1, 2016: OK.
ccolamd: number of dense or empty rows ignored: 0
ccolamd: number of dense or empty columns ignored: 0
ccolamd: number of garbage collections performed: 0
@ -38,7 +38,7 @@ Column 3, with 1 entries:
row 4
Column 4, with 0 entries:
csymamd version 2.7, Jan 25, 2011: OK.
csymamd version 2.9, Apr 1, 2016: OK.
csymamd: number of dense or empty rows ignored: 0
csymamd: number of dense or empty columns ignored: 0
csymamd: number of garbage collections performed: 0

View File

@ -1,12 +1,10 @@
/* ========================================================================== */
/* === ccolamd and csymamd example (UF_long integer version) ================ */
/* === ccolamd and csymamd example (long integer version) =================== */
/* ========================================================================== */
/* ----------------------------------------------------------------------------
* CCOLAMD Copyright (C), Univ. of Florida. Authors: Timothy A. Davis,
* Sivasankaran Rajamanickam, and Stefan Larimore
* See License.txt for the Version 2.1 of the GNU Lesser General Public License
* http://www.cise.ufl.edu/research/sparse
* -------------------------------------------------------------------------- */
/*
@ -46,9 +44,6 @@
#define B_NNZ 4
#define B_N 5
/* define UF_long */
#include "UFconfig.h"
int main (void)
{
@ -56,14 +51,14 @@ int main (void)
/* input matrix A definition */
/* ====================================================================== */
UF_long A [ALEN] = {
SuiteSparse_long A [ALEN] = {
0, 1, 4, /* row indices of nonzeros in column 0 */
2, 4, /* row indices of nonzeros in column 1 */
0, 1, 2, 3, /* row indices of nonzeros in column 2 */
1, 3} ; /* row indices of nonzeros in column 3 */
UF_long p [ ] = {
SuiteSparse_long p [ ] = {
0, /* column 0 is in A [0..2] */
3, /* column 1 is in A [3..4] */
@ -75,7 +70,7 @@ int main (void)
/* input matrix B definition */
/* ====================================================================== */
UF_long B [ ] = { /* Note: only strictly lower triangular part */
SuiteSparse_long B [ ] = { /* Note: only strictly lower triangular part */
/* is included, since symamd ignores the */
/* diagonal and upper triangular part of B. */
@ -85,7 +80,7 @@ int main (void)
4 /* row indices of nonzeros in column 3 */
} ; /* row indices of nonzeros in column 4 (none) */
UF_long q [ ] = {
SuiteSparse_long q [ ] = {
0, /* column 0 is in B [0] */
1, /* column 1 is in B [1..2] */
@ -98,10 +93,9 @@ int main (void)
/* other variable definitions */
/* ====================================================================== */
UF_long perm [B_N+1] ; /* note the size is N+1 */
UF_long stats [CCOLAMD_STATS] ; /* for ccolamd and csymamd output stats */
UF_long row, col, pp, length, ok ;
SuiteSparse_long perm [B_N+1] ; /* note the size is N+1 */
SuiteSparse_long stats [CCOLAMD_STATS] ; /* ccolamd/csymamd output stats */
SuiteSparse_long row, col, pp, length, ok ;
/* ====================================================================== */
/* dump the input matrix A */

View File

@ -15,7 +15,7 @@ Column 3, with 2 entries:
row 1
row 3
ccolamd version 2.7, Jan 25, 2011: OK.
ccolamd version 2.9, Apr 1, 2016: OK.
ccolamd: number of dense or empty rows ignored: 0
ccolamd: number of dense or empty columns ignored: 0
ccolamd: number of garbage collections performed: 0
@ -38,7 +38,7 @@ Column 3, with 1 entries:
row 4
Column 4, with 0 entries:
csymamd version 2.7, Jan 25, 2011: OK.
csymamd version 2.9, Apr 1, 2016: OK.
csymamd: number of dense or empty rows ignored: 0
csymamd: number of dense or empty columns ignored: 0
csymamd: number of garbage collections performed: 0

View File

@ -1,3 +1,37 @@
Apr 1, 2016: version 2.9.5
* licensing simplified (no other change); refer to CCOLAMD/Doc/License.txt
Feb 1, 2016: version 2.9.4
* update to Makefiles
Jan 30, 2016: version 2.9.3
* modifications to Makefiles
Jan 1, 2016: version 2.9.2
* modified Makefile to create shared libraries
No change to C code except version number.
The empty file ccolamd_global.c removed.
Oct 10, 2014: version 2.9.1
modified MATLAB/ccolamd_make.m. No change to C code except version number.
July 31, 2013: version 2.9.0
* changed malloc and printf pointers to use SuiteSparse_config
Jun 1, 2012: version 2.8.0
* changed from UFconfig to SuiteSparse_config
Dec 7, 2011: version 2.7.4
* fixed the Makefile to better align with CFLAGS and other standards
Jan 25, 2011: version 2.7.3
* minor fix to "make install"

21
gtsam/3rdparty/CCOLAMD/Doc/License.txt vendored Normal file
View File

@ -0,0 +1,21 @@
CCOLAMD: constrained column approximate minimum degree ordering
Copyright (C) 2005-2016, Univ. of Florida. Authors: Timothy A. Davis,
Sivasankaran Rajamanickam, and Stefan Larimore. Closely based on COLAMD by
Davis, Stefan Larimore, in collaboration with Esmond Ng, and John Gilbert.
http://www.suitesparse.com
--------------------------------------------------------------------------------
CCOLAMD is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
CCOLAMD is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this Module; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA

View File

@ -5,8 +5,6 @@
/* ----------------------------------------------------------------------------
* CCOLAMD Copyright (C), Univ. of Florida. Authors: Timothy A. Davis,
* Sivasankaran Rajamanickam, and Stefan Larimore
* See License.txt for the Version 2.1 of the GNU Lesser General Public License
* http://www.cise.ufl.edu/research/sparse
* -------------------------------------------------------------------------- */
/*
@ -32,24 +30,24 @@ extern "C" {
/* All versions of CCOLAMD will include the following definitions.
* As an example, to test if the version you are using is 1.3 or later:
*
* if (CCOLAMD_VERSION >= CCOLAMD_VERSION_CODE (1,3)) ...
* if (CCOLAMD_VERSION >= CCOLAMD_VERSION_CODE (1,3)) ...
*
* This also works during compile-time:
*
* #if CCOLAMD_VERSION >= CCOLAMD_VERSION_CODE (1,3)
* printf ("This is version 1.3 or later\n") ;
* #else
* printf ("This is an early version\n") ;
* #endif
* #if CCOLAMD_VERSION >= CCOLAMD_VERSION_CODE (1,3)
* printf ("This is version 1.3 or later\n") ;
* #else
* printf ("This is an early version\n") ;
* #endif
*/
#define CCOLAMD_DATE "Jan 25, 2011"
#define CCOLAMD_DATE "Apr 1, 2016"
#define CCOLAMD_VERSION_CODE(main,sub) ((main) * 1000 + (sub))
#define CCOLAMD_MAIN_VERSION 2
#define CCOLAMD_SUB_VERSION 7
#define CCOLAMD_SUBSUB_VERSION 3
#define CCOLAMD_SUB_VERSION 9
#define CCOLAMD_SUBSUB_VERSION 5
#define CCOLAMD_VERSION \
CCOLAMD_VERSION_CODE(CCOLAMD_MAIN_VERSION,CCOLAMD_SUB_VERSION)
CCOLAMD_VERSION_CODE(CCOLAMD_MAIN_VERSION,CCOLAMD_SUB_VERSION)
/* ========================================================================== */
/* === Knob and statistics definitions ====================================== */
@ -94,106 +92,105 @@ extern "C" {
#define CCOLAMD_NEWLY_EMPTY_COL 10
/* error codes returned in stats [3]: */
#define CCOLAMD_OK (0)
#define CCOLAMD_OK_BUT_JUMBLED (1)
#define CCOLAMD_ERROR_A_not_present (-1)
#define CCOLAMD_ERROR_p_not_present (-2)
#define CCOLAMD_ERROR_nrow_negative (-3)
#define CCOLAMD_ERROR_ncol_negative (-4)
#define CCOLAMD_ERROR_nnz_negative (-5)
#define CCOLAMD_ERROR_p0_nonzero (-6)
#define CCOLAMD_ERROR_A_too_small (-7)
#define CCOLAMD_ERROR_col_length_negative (-8)
#define CCOLAMD_ERROR_row_index_out_of_bounds (-9)
#define CCOLAMD_ERROR_out_of_memory (-10)
#define CCOLAMD_ERROR_invalid_cmember (-11)
#define CCOLAMD_ERROR_internal_error (-999)
#define CCOLAMD_OK (0)
#define CCOLAMD_OK_BUT_JUMBLED (1)
#define CCOLAMD_ERROR_A_not_present (-1)
#define CCOLAMD_ERROR_p_not_present (-2)
#define CCOLAMD_ERROR_nrow_negative (-3)
#define CCOLAMD_ERROR_ncol_negative (-4)
#define CCOLAMD_ERROR_nnz_negative (-5)
#define CCOLAMD_ERROR_p0_nonzero (-6)
#define CCOLAMD_ERROR_A_too_small (-7)
#define CCOLAMD_ERROR_col_length_negative (-8)
#define CCOLAMD_ERROR_row_index_out_of_bounds (-9)
#define CCOLAMD_ERROR_out_of_memory (-10)
#define CCOLAMD_ERROR_invalid_cmember (-11)
#define CCOLAMD_ERROR_internal_error (-999)
/* ========================================================================== */
/* === Prototypes of user-callable routines ================================= */
/* ========================================================================== */
/* define UF_long */
#include "UFconfig.h"
#include "SuiteSparse_config.h"
size_t ccolamd_recommended /* returns recommended value of Alen, */
/* or 0 if input arguments are erroneous */
size_t ccolamd_recommended /* returns recommended value of Alen, */
/* or 0 if input arguments are erroneous */
(
int nnz, /* nonzeros in A */
int n_row, /* number of rows in A */
int n_col /* number of columns in A */
int nnz, /* nonzeros in A */
int n_row, /* number of rows in A */
int n_col /* number of columns in A */
) ;
size_t ccolamd_l_recommended /* returns recommended value of Alen, */
/* or 0 if input arguments are erroneous */
size_t ccolamd_l_recommended /* returns recommended value of Alen, */
/* or 0 if input arguments are erroneous */
(
UF_long nnz, /* nonzeros in A */
UF_long n_row, /* number of rows in A */
UF_long n_col /* number of columns in A */
SuiteSparse_long nnz, /* nonzeros in A */
SuiteSparse_long n_row, /* number of rows in A */
SuiteSparse_long n_col /* number of columns in A */
) ;
void ccolamd_set_defaults /* sets default parameters */
( /* knobs argument is modified on output */
double knobs [CCOLAMD_KNOBS] /* parameter settings for ccolamd */
void ccolamd_set_defaults /* sets default parameters */
( /* knobs argument is modified on output */
double knobs [CCOLAMD_KNOBS] /* parameter settings for ccolamd */
) ;
void ccolamd_l_set_defaults /* sets default parameters */
( /* knobs argument is modified on output */
double knobs [CCOLAMD_KNOBS] /* parameter settings for ccolamd */
void ccolamd_l_set_defaults /* sets default parameters */
( /* knobs argument is modified on output */
double knobs [CCOLAMD_KNOBS] /* parameter settings for ccolamd */
) ;
int ccolamd /* returns (1) if successful, (0) otherwise*/
( /* A and p arguments are modified on output */
int n_row, /* number of rows in A */
int n_col, /* number of columns in A */
int Alen, /* size of the array A */
int A [ ], /* row indices of A, of size Alen */
int p [ ], /* column pointers of A, of size n_col+1 */
int ccolamd /* returns (1) if successful, (0) otherwise*/
( /* A and p arguments are modified on output */
int n_row, /* number of rows in A */
int n_col, /* number of columns in A */
int Alen, /* size of the array A */
int A [ ], /* row indices of A, of size Alen */
int p [ ], /* column pointers of A, of size n_col+1 */
double knobs [CCOLAMD_KNOBS],/* parameter settings for ccolamd */
int stats [CCOLAMD_STATS], /* ccolamd output statistics and error codes */
int cmember [ ] /* Constraint set of A, of size n_col */
int stats [CCOLAMD_STATS], /* ccolamd output statistics and error codes */
int cmember [ ] /* Constraint set of A, of size n_col */
) ;
UF_long ccolamd_l /* same as ccolamd, but with UF_long integers */
SuiteSparse_long ccolamd_l /* as ccolamd w/ SuiteSparse_long integers */
(
UF_long n_row,
UF_long n_col,
UF_long Alen,
UF_long A [ ],
UF_long p [ ],
SuiteSparse_long n_row,
SuiteSparse_long n_col,
SuiteSparse_long Alen,
SuiteSparse_long A [ ],
SuiteSparse_long p [ ],
double knobs [CCOLAMD_KNOBS],
UF_long stats [CCOLAMD_STATS],
UF_long cmember [ ]
SuiteSparse_long stats [CCOLAMD_STATS],
SuiteSparse_long cmember [ ]
) ;
int csymamd /* return (1) if OK, (0) otherwise */
int csymamd /* return (1) if OK, (0) otherwise */
(
int n, /* number of rows and columns of A */
int A [ ], /* row indices of A */
int p [ ], /* column pointers of A */
int perm [ ], /* output permutation, size n_col+1 */
int n, /* number of rows and columns of A */
int A [ ], /* row indices of A */
int p [ ], /* column pointers of A */
int perm [ ], /* output permutation, size n_col+1 */
double knobs [CCOLAMD_KNOBS],/* parameters (uses defaults if NULL) */
int stats [CCOLAMD_STATS], /* output statistics and error codes */
int stats [CCOLAMD_STATS], /* output statistics and error codes */
void * (*allocate) (size_t, size_t), /* pointer to calloc (ANSI C) or */
/* mxCalloc (for MATLAB mexFunction) */
void (*release) (void *), /* pointer to free (ANSI C) or */
/* mxFree (for MATLAB mexFunction) */
int cmember [ ], /* Constraint set of A */
int stype /* 0: use both parts, >0: upper, <0: lower */
/* mxCalloc (for MATLAB mexFunction) */
void (*release) (void *), /* pointer to free (ANSI C) or */
/* mxFree (for MATLAB mexFunction) */
int cmember [ ], /* Constraint set of A */
int stype /* 0: use both parts, >0: upper, <0: lower */
) ;
UF_long csymamd_l /* same as csymamd, but with UF_long integers */
SuiteSparse_long csymamd_l /* as csymamd, w/ SuiteSparse_long integers */
(
UF_long n,
UF_long A [ ],
UF_long p [ ],
UF_long perm [ ],
SuiteSparse_long n,
SuiteSparse_long A [ ],
SuiteSparse_long p [ ],
SuiteSparse_long perm [ ],
double knobs [CCOLAMD_KNOBS],
UF_long stats [CCOLAMD_STATS],
SuiteSparse_long stats [CCOLAMD_STATS],
void * (*allocate) (size_t, size_t),
void (*release) (void *),
UF_long cmember [ ],
UF_long stype
SuiteSparse_long cmember [ ],
SuiteSparse_long stype
) ;
void ccolamd_report
@ -203,7 +200,7 @@ void ccolamd_report
void ccolamd_l_report
(
UF_long stats [CCOLAMD_STATS]
SuiteSparse_long stats [CCOLAMD_STATS]
) ;
void csymamd_report
@ -213,7 +210,7 @@ void csymamd_report
void csymamd_l_report
(
UF_long stats [CCOLAMD_STATS]
SuiteSparse_long stats [CCOLAMD_STATS]
) ;
@ -227,42 +224,42 @@ void csymamd_l_report
*/
int ccolamd2
( /* A and p arguments are modified on output */
int n_row, /* number of rows in A */
int n_col, /* number of columns in A */
int Alen, /* size of the array A */
int A [ ], /* row indices of A, of size Alen */
int p [ ], /* column pointers of A, of size n_col+1 */
( /* A and p arguments are modified on output */
int n_row, /* number of rows in A */
int n_col, /* number of columns in A */
int Alen, /* size of the array A */
int A [ ], /* row indices of A, of size Alen */
int p [ ], /* column pointers of A, of size n_col+1 */
double knobs [CCOLAMD_KNOBS],/* parameter settings for ccolamd */
int stats [CCOLAMD_STATS], /* ccolamd output statistics and error codes */
int stats [CCOLAMD_STATS], /* ccolamd output statistics and error codes */
/* each Front_ array is of size n_col+1: */
int Front_npivcol [ ], /* # pivot cols in each front */
int Front_nrows [ ], /* # of rows in each front (incl. pivot rows) */
int Front_ncols [ ], /* # of cols in each front (incl. pivot cols) */
int Front_parent [ ], /* parent of each front */
int Front_cols [ ], /* link list of pivot columns for each front */
int *p_nfr, /* total number of frontal matrices */
int InFront [ ], /* InFront [row] = f if row in front f */
int cmember [ ] /* Constraint set of A */
int Front_npivcol [ ], /* # pivot cols in each front */
int Front_nrows [ ], /* # of rows in each front (incl. pivot rows) */
int Front_ncols [ ], /* # of cols in each front (incl. pivot cols) */
int Front_parent [ ], /* parent of each front */
int Front_cols [ ], /* link list of pivot columns for each front */
int *p_nfr, /* total number of frontal matrices */
int InFront [ ], /* InFront [row] = f if row in front f */
int cmember [ ] /* Constraint set of A */
) ;
UF_long ccolamd2_l /* same as ccolamd2, but with UF_long integers */
SuiteSparse_long ccolamd2_l /* as ccolamd2, w/ SuiteSparse_long integers */
(
UF_long n_row,
UF_long n_col,
UF_long Alen,
UF_long A [ ],
UF_long p [ ],
SuiteSparse_long n_row,
SuiteSparse_long n_col,
SuiteSparse_long Alen,
SuiteSparse_long A [ ],
SuiteSparse_long p [ ],
double knobs [CCOLAMD_KNOBS],
UF_long stats [CCOLAMD_STATS],
UF_long Front_npivcol [ ],
UF_long Front_nrows [ ],
UF_long Front_ncols [ ],
UF_long Front_parent [ ],
UF_long Front_cols [ ],
UF_long *p_nfr,
UF_long InFront [ ],
UF_long cmember [ ]
SuiteSparse_long stats [CCOLAMD_STATS],
SuiteSparse_long Front_npivcol [ ],
SuiteSparse_long Front_nrows [ ],
SuiteSparse_long Front_ncols [ ],
SuiteSparse_long Front_parent [ ],
SuiteSparse_long Front_cols [ ],
SuiteSparse_long *p_nfr,
SuiteSparse_long InFront [ ],
SuiteSparse_long cmember [ ]
) ;
void ccolamd_apply_order
@ -276,11 +273,11 @@ void ccolamd_apply_order
void ccolamd_l_apply_order
(
UF_long Front [ ],
const UF_long Order [ ],
UF_long Temp [ ],
UF_long nn,
UF_long nfr
SuiteSparse_long Front [ ],
const SuiteSparse_long Order [ ],
SuiteSparse_long Temp [ ],
SuiteSparse_long nn,
SuiteSparse_long nfr
) ;
@ -296,12 +293,12 @@ void ccolamd_fsize
void ccolamd_l_fsize
(
UF_long nn,
UF_long MaxFsize [ ],
UF_long Fnrows [ ],
UF_long Fncols [ ],
UF_long Parent [ ],
UF_long Npiv [ ]
SuiteSparse_long nn,
SuiteSparse_long MaxFsize [ ],
SuiteSparse_long Fnrows [ ],
SuiteSparse_long Fncols [ ],
SuiteSparse_long Parent [ ],
SuiteSparse_long Npiv [ ]
) ;
void ccolamd_postorder
@ -320,16 +317,16 @@ void ccolamd_postorder
void ccolamd_l_postorder
(
UF_long nn,
UF_long Parent [ ],
UF_long Npiv [ ],
UF_long Fsize [ ],
UF_long Order [ ],
UF_long Child [ ],
UF_long Sibling [ ],
UF_long Stack [ ],
UF_long Front_cols [ ],
UF_long cmember [ ]
SuiteSparse_long nn,
SuiteSparse_long Parent [ ],
SuiteSparse_long Npiv [ ],
SuiteSparse_long Fsize [ ],
SuiteSparse_long Order [ ],
SuiteSparse_long Child [ ],
SuiteSparse_long Sibling [ ],
SuiteSparse_long Stack [ ],
SuiteSparse_long Front_cols [ ],
SuiteSparse_long cmember [ ]
) ;
int ccolamd_post_tree
@ -342,22 +339,16 @@ int ccolamd_post_tree
int Stack [ ]
) ;
UF_long ccolamd_l_post_tree
SuiteSparse_long ccolamd_l_post_tree
(
UF_long root,
UF_long k,
UF_long Child [ ],
const UF_long Sibling [ ],
UF_long Order [ ],
UF_long Stack [ ]
SuiteSparse_long root,
SuiteSparse_long k,
SuiteSparse_long Child [ ],
const SuiteSparse_long Sibling [ ],
SuiteSparse_long Order [ ],
SuiteSparse_long Stack [ ]
) ;
#ifndef EXTERN
#define EXTERN extern
#endif
EXTERN int (*ccolamd_printf) (const char *, ...) ;
#ifdef __cplusplus
}
#endif

71
gtsam/3rdparty/CCOLAMD/Lib/Makefile vendored Normal file
View File

@ -0,0 +1,71 @@
#-------------------------------------------------------------------------------
# CCOLAMD Lib/Makefile
#-------------------------------------------------------------------------------
LIBRARY = libccolamd
VERSION = 2.9.5
SO_VERSION = 2
default: library
include ../../SuiteSparse_config/SuiteSparse_config.mk
# CCOLAMD depends on SuiteSparse_config
LDLIBS += -lsuitesparseconfig
# compile and install in SuiteSparse/lib
library:
$(MAKE) install INSTALL=$(SUITESPARSE)
I = -I../Include -I../../SuiteSparse_config
INC = ../Include/ccolamd.h ../../SuiteSparse_config/SuiteSparse_config.h
SRC = ../Source/ccolamd.c
OBJ = ccolamd.o ccolamd_l.o
ccolamd.o: $(SRC) $(INC)
$(CC) $(CF) $(I) -c ../Source/ccolamd.c
ccolamd_l.o: $(SRC) $(INC)
$(CC) $(CF) $(I) -c ../Source/ccolamd.c -DDLONG -o ccolamd_l.o
# creates libccolamd.a, a C-callable CCOLAMD library
$(AR_TARGET): $(OBJ)
$(ARCHIVE) $@ $^
- $(RANLIB) $@
ccode: library
clean:
- $(RM) -r $(CLEAN)
purge: distclean
distclean: clean
- $(RM) -r $(PURGE)
# install CCOLAMD
install: $(AR_TARGET) $(INSTALL_LIB)/$(SO_TARGET)
$(INSTALL_LIB)/$(SO_TARGET): $(OBJ)
@mkdir -p $(INSTALL_LIB)
@mkdir -p $(INSTALL_INCLUDE)
@mkdir -p $(INSTALL_DOC)
$(CC) $(SO_OPTS) $^ -o $@ $(LDLIBS)
( cd $(INSTALL_LIB) ; ln -sf $(SO_TARGET) $(SO_PLAIN) )
( cd $(INSTALL_LIB) ; ln -sf $(SO_TARGET) $(SO_MAIN) )
$(CP) ../Include/ccolamd.h $(INSTALL_INCLUDE)
$(CP) ../README.txt $(INSTALL_DOC)/CCOLAMD_README.txt
chmod 755 $(INSTALL_LIB)/$(SO_TARGET)
chmod 644 $(INSTALL_INCLUDE)/ccolamd.h
chmod 644 $(INSTALL_DOC)/CCOLAMD_README.txt
uninstall:
$(RM) $(INSTALL_LIB)/$(SO_TARGET)
$(RM) $(INSTALL_LIB)/$(SO_PLAIN)
$(RM) $(INSTALL_LIB)/$(SO_MAIN)
$(RM) $(INSTALL_INCLUDE)/ccolamd.h
$(RM) $(INSTALL_DOC)/CCOLAMD_README.txt

View File

@ -47,9 +47,8 @@ spparms ('default') ;
A = sprandn (n, n, 2/n) + speye (n) ;
b = (1:n)' ;
figure (1)
clf ;
subplot (2,2,1)
subplot (3,4,1)
spy (A)
title ('original matrix')
@ -62,7 +61,7 @@ fl = luflops (L, U) ;
x = Q * (U \ (L \ (P * b))) ;
fprintf (1, '\nFlop count for [L,U,P] = lu (A*Q): %d\n', fl) ;
fprintf (1, 'residual: %e\n', norm (A*x-b));
subplot (2,2,2) ;
subplot (3,4,2) ;
spy (L|U) ;
title ('LU with ccolamd') ;
@ -76,7 +75,7 @@ fl = luflops (L, U) ;
x = Q * (U \ (L \ (P * b))) ;
fprintf (1, '\nFlop count for [L,U,P] = lu (A*Q): %d\n', fl) ;
fprintf (1, 'residual: %e\n', norm (A*x-b));
subplot (2,2,3) ;
subplot (3,4,3) ;
spy (L|U) ;
title ('LU with colamd') ;
catch
@ -89,7 +88,7 @@ fl = luflops (L, U) ;
x = U \ (L \ (P * b)) ;
fprintf (1, '\nFlop count for [L,U,P] = lu (A*Q): %d\n', fl) ;
fprintf (1, 'residual: %e\n', norm (A*x-b));
subplot (2,2,4) ;
subplot (3,4,4) ;
spy (L|U) ;
title ('LU with no ordering') ;
@ -111,9 +110,7 @@ n = 1000 ;
fprintf (1, 'Generating a random %d-by-%d sparse matrix.\n', n, n) ;
A = sprandn (n, n, 2/n) + speye (n) ;
figure (2)
clf ;
subplot (2,2,1)
subplot (3,4,5)
spy (A)
title ('original matrix')
@ -121,7 +118,7 @@ fprintf (1, '\n\nUnordered matrix:\n') ;
[lnz,h,parent,post,R] = symbfact (A, 'col') ;
fprintf (1, 'nz in Cholesky factors of A''A: %d\n', sum (lnz)) ;
fprintf (1, 'flop count for Cholesky of A''A: %d\n', sum (lnz.^2)) ;
subplot (2,2,4) ;
subplot (3,4,6) ;
spy (R) ;
title ('Cholesky with no ordering') ;
@ -133,7 +130,7 @@ fprintf (1, '\n\nccolamd run time: %f\n', t) ;
fprintf (1, 'ccolamd ordering quality: \n') ;
fprintf (1, 'nz in Cholesky factors of A(:,p)''A(:,p): %d\n', sum (lnz)) ;
fprintf (1, 'flop count for Cholesky of A(:,p)''A(:,p): %d\n', sum (lnz.^2)) ;
subplot (2,2,2) ;
subplot (3,4,7) ;
spy (R) ;
title ('Cholesky with ccolamd') ;
@ -146,7 +143,7 @@ fprintf (1, '\n\ncolamd run time: %f\n', t) ;
fprintf (1, 'colamd ordering quality: \n') ;
fprintf (1, 'nz in Cholesky factors of A(:,p)''A(:,p): %d\n', sum (lnz)) ;
fprintf (1, 'flop count for Cholesky of A(:,p)''A(:,p): %d\n', sum (lnz.^2)) ;
subplot (2,2,3) ;
subplot (3,4,8) ;
spy (R) ;
title ('Cholesky with colamd') ;
catch
@ -164,9 +161,7 @@ fprintf (1, '\n-----------------------------------------------------------\n') ;
fprintf (1, 'Generating a random symmetric %d-by-%d sparse matrix.\n', n, n) ;
A = A+A' ;
figure (3)
clf ;
subplot (2,2,1)
subplot (3,4,9) ;
spy (A)
title ('original matrix')
@ -174,7 +169,7 @@ fprintf (1, '\n\nUnordered matrix:\n') ;
[lnz,h,parent,post,R] = symbfact (A, 'sym') ;
fprintf (1, 'nz in Cholesky factors of A: %d\n', sum (lnz)) ;
fprintf (1, 'flop count for Cholesky of A: %d\n', sum (lnz.^2)) ;
subplot (2,2,4) ;
subplot (3,4,10) ;
spy (R) ;
title ('Cholesky with no ordering') ;
@ -186,7 +181,7 @@ fprintf (1, '\n\ncsymamd run time: %f\n', t) ;
fprintf (1, 'csymamd ordering quality: \n') ;
fprintf (1, 'nz in Cholesky factors of A(p,p): %d\n', sum (lnz)) ;
fprintf (1, 'flop count for Cholesky of A(p,p): %d\n', sum (lnz.^2)) ;
subplot (2,2,2) ;
subplot (3,4,11) ;
spy (R) ;
title ('Cholesky with csymamd') ;
@ -199,7 +194,7 @@ fprintf (1, '\n\nsymamd run time: %f\n', t) ;
fprintf (1, 'symamd ordering quality: \n') ;
fprintf (1, 'nz in Cholesky factors of A(p,p): %d\n', sum (lnz)) ;
fprintf (1, 'flop count for Cholesky of A(p,p): %d\n', sum (lnz.^2)) ;
subplot (2,2,3) ;
subplot (3,4,12) ;
spy (R) ;
title ('Cholesky with symamd') ;
catch

View File

@ -14,14 +14,33 @@ d = '' ;
if (~isempty (strfind (computer, '64')))
d = '-largeArrayDims' ;
end
src = '../Source/ccolamd.c ../Source/ccolamd_global.c' ;
cmd = sprintf ('mex -DDLONG -O %s -I../../UFconfig -I../Include -output ', d) ;
% MATLAB 8.3.0 now has a -silent option to keep 'mex' from burbling too much
if (~verLessThan ('matlab', '8.3.0'))
d = ['-silent ' d] ;
end
src = '../Source/ccolamd.c ../../SuiteSparse_config/SuiteSparse_config.c' ;
cmd = sprintf ( ...
'mex -DDLONG -O %s -I../../SuiteSparse_config -I../Include -output ', d) ;
s = [cmd 'ccolamd ccolamdmex.c ' src] ;
if (~(ispc || ismac))
% for POSIX timing routine
s = [s ' -lrt'] ;
end
if (details)
fprintf ('%s\n', s) ;
end
eval (s) ;
s = [cmd 'csymamd csymamdmex.c ' src] ;
if (~(ispc || ismac))
% for POSIX timing routine
s = [s ' -lrt'] ;
end
if (details)
fprintf ('%s\n', s) ;
end

View File

@ -22,8 +22,13 @@ csymamd_default_knobs = [10 1 0] ;
if (~isempty (strfind (computer, '64')))
d = '-largeArrayDims' ;
end
src = '../Source/ccolamd.c ../Source/ccolamd_global.c' ;
cmd = sprintf ('mex -DDLONG -O %s -I../../UFconfig -I../Include ', d) ;
cmd = sprintf ( ...
'mex -DDLONG -O %s -I../../SuiteSparse_config -I../Include ', d) ;
src = '../Source/ccolamd.c ../../SuiteSparse_config/SuiteSparse_config.c' ;
if (~(ispc || ismac))
% for POSIX timing routine
src = [src ' -lrt'] ;
end
eval ([cmd 'ccolamdtestmex.c ' src]) ;
eval ([cmd 'csymamdtestmex.c ' src]) ;
fprintf ('Done compiling.\n') ;

View File

@ -5,8 +5,6 @@
/* ----------------------------------------------------------------------------
* CCOLAMD, Copyright (C), Univ. of Florida. Authors: Timothy A. Davis,
* Sivasankaran Rajamanickam, and Stefan Larimore
* See License.txt for the Version 2.1 of the GNU Lesser General Public License
* http://www.cise.ufl.edu/research/sparse
* -------------------------------------------------------------------------- */
/*
@ -26,7 +24,7 @@
#include "matrix.h"
#include <stdlib.h>
#include <string.h>
#include "UFconfig.h"
#define Long SuiteSparse_long
/* ========================================================================== */
/* === ccolamd mexFunction ================================================== */
@ -44,24 +42,24 @@ void mexFunction
{
/* === Local variables ================================================== */
UF_long *A ; /* ccolamd's copy of the matrix and workspace */
UF_long *cmember ; /* ccolamd's copy of the constraint set */
double *in_cmember ; /* input constraint set */
UF_long *p ; /* ccolamd's copy of the column pointers */
UF_long Alen ; /* size of A */
UF_long cslen ; /* size of CS */
UF_long n_col ; /* number of columns of A */
UF_long n_row ; /* number of rows of A */
UF_long nnz ; /* number of entries in A */
UF_long full ; /* TRUE if input matrix full, FALSE if sparse */
Long *A ; /* ccolamd's copy of the matrix and workspace */
Long *cmember ; /* ccolamd's copy of the constraint set */
double *in_cmember ; /* input constraint set */
Long *p ; /* ccolamd's copy of the column pointers */
Long Alen ; /* size of A */
Long cslen ; /* size of CS */
Long n_col ; /* number of columns of A */
Long n_row ; /* number of rows of A */
Long nnz ; /* number of entries in A */
Long full ; /* TRUE if input matrix full, FALSE if sparse */
double knobs [CCOLAMD_KNOBS] ; /* ccolamd user-controllable parameters */
double *out_perm ; /* output permutation vector */
double *out_stats ; /* output stats vector */
double *in_knobs ; /* input knobs vector */
UF_long i ; /* loop counter */
mxArray *Ainput ; /* input matrix handle */
UF_long spumoni ; /* verbosity variable */
UF_long stats [CCOLAMD_STATS] ; /* stats for ccolamd */
double *out_perm ; /* output permutation vector */
double *out_stats ; /* output stats vector */
double *in_knobs ; /* input knobs vector */
Long i ; /* loop counter */
mxArray *Ainput ; /* input matrix handle */
Long spumoni ; /* verbosity variable */
Long stats [CCOLAMD_STATS] ;/* stats for ccolamd */
/* === Check inputs ===================================================== */
@ -80,11 +78,11 @@ void mexFunction
cslen = mxGetNumberOfElements (pargin [2]) ;
if (cslen != 0)
{
cmember = (UF_long *) mxCalloc (cslen, sizeof (UF_long)) ;
cmember = (Long *) mxCalloc (cslen, sizeof (Long)) ;
for (i = 0 ; i < cslen ; i++)
{
/* convert cmember from 1-based to 0-based */
cmember[i] = ((UF_long) in_cmember [i] - 1) ;
cmember[i] = ((Long) in_cmember [i] - 1) ;
}
}
}
@ -157,10 +155,10 @@ void mexFunction
n_col = mxGetN (Ainput) ;
/* get column pointer vector */
p = (UF_long *) mxCalloc (n_col+1, sizeof (UF_long)) ;
(void) memcpy (p, mxGetJc (Ainput), (n_col+1)*sizeof (UF_long)) ;
p = (Long *) mxCalloc (n_col+1, sizeof (Long)) ;
(void) memcpy (p, mxGetJc (Ainput), (n_col+1)*sizeof (Long)) ;
nnz = p [n_col] ;
Alen = (UF_long) ccolamd_l_recommended (nnz, n_row, n_col) ;
Alen = (Long) ccolamd_l_recommended (nnz, n_row, n_col) ;
if (Alen == 0)
{
mexErrMsgTxt ("ccolamd: problem too large") ;
@ -168,8 +166,8 @@ void mexFunction
/* === Copy input matrix into workspace ================================= */
A = (UF_long *) mxCalloc (Alen, sizeof (UF_long)) ;
(void) memcpy (A, mxGetIr (Ainput), nnz*sizeof (UF_long)) ;
A = (Long *) mxCalloc (Alen, sizeof (Long)) ;
(void) memcpy (A, mxGetIr (Ainput), nnz*sizeof (Long)) ;
if (full)
{

View File

@ -5,8 +5,6 @@
/* ----------------------------------------------------------------------------
* CCOLAMD Copyright (C), Univ. of Florida. Authors: Timothy A. Davis,
* Sivasankaran Rajamanickam, and Stefan Larimore
* See License.txt for the Version 2.1 of the GNU Lesser General Public License
* http://www.cise.ufl.edu/research/sparse
* -------------------------------------------------------------------------- */
/*
@ -43,7 +41,7 @@
#include "matrix.h"
#include <stdlib.h>
#include <string.h>
#include "UFconfig.h"
#define Long SuiteSparse_long
/* Here only for testing */
#undef MIN
@ -61,15 +59,15 @@
static void dump_matrix
(
UF_long A [ ],
UF_long p [ ],
UF_long n_row,
UF_long n_col,
UF_long Alen,
UF_long limit
Long A [ ],
Long p [ ],
Long n_row,
Long n_col,
Long Alen,
Long limit
)
{
UF_long col, k, row ;
Long col, k, row ;
mexPrintf ("dump matrix: nrow %d ncol %d Alen %d\n", n_row, n_col, Alen) ;
@ -102,24 +100,24 @@ void mexFunction
{
/* === Local variables ================================================== */
UF_long *A ; /* ccolamd's copy of the matrix and workspace */
UF_long *p ; /* ccolamd's copy of the column pointers */
UF_long Alen ; /* size of A */
UF_long n_col ; /* number of columns of A */
UF_long n_row ; /* number of rows of A */
UF_long nnz ; /* number of entries in A */
UF_long full ; /* TRUE if input matrix full, FALSE if sparse */
Long *A ; /* ccolamd's copy of the matrix and workspace */
Long *p ; /* ccolamd's copy of the column pointers */
Long Alen ; /* size of A */
Long n_col ; /* number of columns of A */
Long n_row ; /* number of rows of A */
Long nnz ; /* number of entries in A */
Long full ; /* TRUE if input matrix full, FALSE if sparse */
double knobs [CCOLAMD_KNOBS] ; /* ccolamd user-controllable parameters */
double *out_perm ; /* output permutation vector */
double *out_stats ; /* output stats vector */
double *in_knobs ; /* input knobs vector */
UF_long i ; /* loop counter */
mxArray *Ainput ; /* input matrix handle */
UF_long spumoni ; /* verbosity variable */
UF_long stats2 [CCOLAMD_STATS] ; /* stats for ccolamd */
double *out_perm ; /* output permutation vector */
double *out_stats ; /* output stats vector */
double *in_knobs ; /* input knobs vector */
Long i ; /* loop counter */
mxArray *Ainput ; /* input matrix handle */
Long spumoni ; /* verbosity variable */
Long stats2 [CCOLAMD_STATS] ; /* stats for ccolamd */
UF_long *cp, *cp_end, result, col, length, ok ;
UF_long *stats ;
Long *cp, *cp_end, result, col, length, ok ;
Long *stats ;
stats = stats2 ;
/* === Check inputs ===================================================== */
@ -199,10 +197,10 @@ void mexFunction
n_col = mxGetN (Ainput) ;
/* get column pointer vector so we can find nnz */
p = (UF_long *) mxCalloc (n_col+1, sizeof (UF_long)) ;
(void) memcpy (p, mxGetJc (Ainput), (n_col+1)*sizeof (UF_long)) ;
p = (Long *) mxCalloc (n_col+1, sizeof (Long)) ;
(void) memcpy (p, mxGetJc (Ainput), (n_col+1)*sizeof (Long)) ;
nnz = p [n_col] ;
Alen = (UF_long) ccolamd_l_recommended (nnz, n_row, n_col) ;
Alen = (Long) ccolamd_l_recommended (nnz, n_row, n_col) ;
if (Alen == 0)
{
mexErrMsgTxt ("ccolamd: problem too large") ;
@ -230,8 +228,8 @@ void mexFunction
/* === Copy input matrix into workspace ================================= */
A = (UF_long *) mxCalloc (Alen, sizeof (UF_long)) ;
(void) memcpy (A, mxGetIr (Ainput), nnz*sizeof (UF_long)) ;
A = (Long *) mxCalloc (Alen, sizeof (Long)) ;
(void) memcpy (A, mxGetIr (Ainput), nnz*sizeof (Long)) ;
if (full)
{
@ -261,7 +259,7 @@ void mexFunction
*/
/* jumble appropriately */
switch ((UF_long) in_knobs [6])
switch ((Long) in_knobs [6])
{
case 0 :
@ -359,7 +357,7 @@ void mexFunction
mexPrintf ("ccolamdtest: A not present\n") ;
}
result = 0 ; /* A not present */
A = (UF_long *) NULL ;
A = (Long *) NULL ;
break ;
case 8 :
@ -368,7 +366,7 @@ void mexFunction
mexPrintf ("ccolamdtest: p not present\n") ;
}
result = 0 ; /* p not present */
p = (UF_long *) NULL ;
p = (Long *) NULL ;
break ;
case 9 :
@ -456,7 +454,7 @@ void mexFunction
mexPrintf ("ccolamdtest: stats not present\n") ;
}
result = 0 ; /* stats not present */
stats = (UF_long *) NULL ;
stats = (Long *) NULL ;
break ;
case 13 :

View File

@ -34,10 +34,10 @@ function [p, stats] = csymamd (S, knobs, cmember) %#ok
% p = csymamd(S) is about the same as p = symamd(S). knobs and its default
% values differ.
%
% Authors: S. Larimore, T. Davis (Univ of Florida), and S. Rajamanickam, in
% Authors: S. Larimore, T. Davis, and S. Rajamanickam, in
% collaboration with J. Gilbert and E. Ng. Supported by the National
% Science Foundation (DMS-9504974, DMS-9803599, CCR-0203270), and a grant
% from Sandia National Lab. See http://www.cise.ufl.edu/research/sparse
% from Sandia National Lab. See http://www.suitesparse.com
% for ccolamd, csymamd, amd, colamd, symamd, and other related orderings.
%
% See also AMD, CCOLAMD, COLAMD, SYMAMD, SYMRCM.

View File

@ -5,8 +5,6 @@
/* ----------------------------------------------------------------------------
* CCOLAMD, Copyright (C), Univ. of Florida. Authors: Timothy A. Davis,
* Sivasankaran Rajamanickam, and Stefan Larimore
* See License.txt for the Version 2.1 of the GNU Lesser General Public License
* http://www.cise.ufl.edu/research/sparse
* -------------------------------------------------------------------------- */
/*
@ -25,7 +23,7 @@
#include "mex.h"
#include "matrix.h"
#include <stdlib.h>
#include "UFconfig.h"
#define Long SuiteSparse_long
/* ========================================================================== */
/* === csymamd mexFunction ================================================== */
@ -43,23 +41,23 @@ void mexFunction
{
/* === Local variables ================================================== */
UF_long *A ; /* row indices of input matrix A */
UF_long *perm ; /* column ordering of M and ordering of A */
UF_long *cmember ; /* csymamd's copy of the constraint set */
double *in_cmember ; /* input constraint set */
UF_long *p ; /* column pointers of input matrix A */
UF_long cslen ; /* size of constraint set */
UF_long n_col ; /* number of columns of A */
UF_long n_row ; /* number of rows of A */
UF_long full ; /* TRUE if input matrix full, FALSE if sparse */
Long *A ; /* row indices of input matrix A */
Long *perm ; /* column ordering of M and ordering of A */
Long *cmember ; /* csymamd's copy of the constraint set */
double *in_cmember ; /* input constraint set */
Long *p ; /* column pointers of input matrix A */
Long cslen ; /* size of constraint set */
Long n_col ; /* number of columns of A */
Long n_row ; /* number of rows of A */
Long full ; /* TRUE if input matrix full, FALSE if sparse */
double knobs [CCOLAMD_KNOBS] ; /* csymamd user-controllable parameters */
double *out_perm ; /* output permutation vector */
double *out_stats ; /* output stats vector */
double *in_knobs ; /* input knobs vector */
UF_long i ; /* loop counter */
mxArray *Ainput ; /* input matrix handle */
UF_long spumoni ; /* verbosity variable */
UF_long stats [CCOLAMD_STATS] ; /* stats for symamd */
double *out_perm ; /* output permutation vector */
double *out_stats ; /* output stats vector */
double *in_knobs ; /* input knobs vector */
Long i ; /* loop counter */
mxArray *Ainput ; /* input matrix handle */
Long spumoni ; /* verbosity variable */
Long stats [CCOLAMD_STATS] ;/* stats for symamd */
/* === Check inputs ===================================================== */
@ -78,11 +76,11 @@ void mexFunction
cslen = mxGetNumberOfElements (pargin [2]) ;
if (cslen != 0)
{
cmember = (UF_long *) mxCalloc (cslen, sizeof (UF_long)) ;
cmember = (Long *) mxCalloc (cslen, sizeof (Long)) ;
for (i = 0 ; i < cslen ; i++)
{
/* convert cmember from 1-based to 0-based */
cmember[i] = ((UF_long) in_cmember [i] - 1) ;
cmember[i] = ((Long) in_cmember [i] - 1) ;
}
}
}
@ -153,9 +151,9 @@ void mexFunction
mexErrMsgTxt ("csymamd: cmember must be of length equal to #cols of A");
}
A = (UF_long *) mxGetIr (Ainput) ;
p = (UF_long *) mxGetJc (Ainput) ;
perm = (UF_long *) mxCalloc (n_col+1, sizeof (UF_long)) ;
A = (Long *) mxGetIr (Ainput) ;
p = (Long *) mxGetJc (Ainput) ;
perm = (Long *) mxCalloc (n_col+1, sizeof (Long)) ;
/* === Order the rows and columns of A (does not destroy A) ============= */

View File

@ -5,8 +5,6 @@
/* ----------------------------------------------------------------------------
* CCOLAMD Copyright (C), Univ. of Florida. Authors: Timothy A. Davis,
* Sivasankaran Rajamanickam, and Stefan Larimore
* See License.txt for the Version 2.1 of the GNU Lesser General Public License
* http://www.cise.ufl.edu/research/sparse
* -------------------------------------------------------------------------- */
/*
@ -37,7 +35,7 @@
#include "matrix.h"
#include <stdlib.h>
#include <string.h>
#include "UFconfig.h"
#define Long SuiteSparse_long
#ifdef MIN
#undef MIN
@ -47,15 +45,15 @@
static void dump_matrix
(
UF_long A [ ],
UF_long p [ ],
UF_long n_row,
UF_long n_col,
UF_long Alen,
UF_long limit
Long A [ ],
Long p [ ],
Long n_row,
Long n_col,
Long Alen,
Long limit
)
{
UF_long col, k, row ;
Long col, k, row ;
mexPrintf ("dump matrix: nrow %d ncol %d Alen %d\n", n_row, n_col, Alen) ;
@ -100,23 +98,23 @@ void mexFunction
{
/* === Local variables ================================================== */
UF_long *perm ; /* column ordering of M and ordering of A */
UF_long *A ; /* row indices of input matrix A */
UF_long *p ; /* column pointers of input matrix A */
UF_long n_col ; /* number of columns of A */
UF_long n_row ; /* number of rows of A */
UF_long full ; /* TRUE if input matrix full, FALSE if sparse */
Long *perm ; /* column ordering of M and ordering of A */
Long *A ; /* row indices of input matrix A */
Long *p ; /* column pointers of input matrix A */
Long n_col ; /* number of columns of A */
Long n_row ; /* number of rows of A */
Long full ; /* TRUE if input matrix full, FALSE if sparse */
double knobs [CCOLAMD_KNOBS] ; /* ccolamd user-controllable parameters */
double *out_perm ; /* output permutation vector */
double *out_stats ; /* output stats vector */
double *in_knobs ; /* input knobs vector */
UF_long i ; /* loop counter */
mxArray *Ainput ; /* input matrix handle */
UF_long spumoni ; /* verbosity variable */
UF_long stats2 [CCOLAMD_STATS] ;/* stats for csymamd */
double *out_perm ; /* output permutation vector */
double *out_stats ; /* output stats vector */
double *in_knobs ; /* input knobs vector */
Long i ; /* loop counter */
mxArray *Ainput ; /* input matrix handle */
Long spumoni ; /* verbosity variable */
Long stats2 [CCOLAMD_STATS] ;/* stats for csymamd */
UF_long *cp, *cp_end, result, nnz, col, length, ok ;
UF_long *stats ;
Long *cp, *cp_end, result, nnz, col, length, ok ;
Long *stats ;
stats = stats2 ;
/* === Check inputs ===================================================== */
@ -192,8 +190,8 @@ void mexFunction
}
/* p = mxGetJc (Ainput) ; */
p = (UF_long *) mxCalloc (n_col+1, sizeof (UF_long)) ;
(void) memcpy (p, mxGetJc (Ainput), (n_col+1)*sizeof (UF_long)) ;
p = (Long *) mxCalloc (n_col+1, sizeof (Long)) ;
(void) memcpy (p, mxGetJc (Ainput), (n_col+1)*sizeof (Long)) ;
nnz = p [n_col] ;
if (spumoni)
@ -202,10 +200,10 @@ void mexFunction
}
/* A = mxGetIr (Ainput) ; */
A = (UF_long *) mxCalloc (nnz+1, sizeof (UF_long)) ;
(void) memcpy (A, mxGetIr (Ainput), nnz*sizeof (UF_long)) ;
A = (Long *) mxCalloc (nnz+1, sizeof (Long)) ;
(void) memcpy (A, mxGetIr (Ainput), nnz*sizeof (Long)) ;
perm = (UF_long *) mxCalloc (n_col+1, sizeof (UF_long)) ;
perm = (Long *) mxCalloc (n_col+1, sizeof (Long)) ;
/* === Jumble matrix ==================================================== */
@ -230,7 +228,7 @@ void mexFunction
*/
/* jumble appropriately */
switch ((UF_long) in_knobs [3])
switch ((Long) in_knobs [3])
{
case 0 :
@ -321,7 +319,7 @@ void mexFunction
mexPrintf ("csymamdtest: A not present\n") ;
}
result = 0 ; /* A not present */
A = (UF_long *) NULL ;
A = (Long *) NULL ;
break ;
case 8 :
@ -330,7 +328,7 @@ void mexFunction
mexPrintf ("csymamdtest: p not present\n") ;
}
result = 0 ; /* p not present */
p = (UF_long *) NULL ;
p = (Long *) NULL ;
break ;
case 9 :
@ -418,7 +416,7 @@ void mexFunction
mexPrintf ("csymamdtest: stats not present\n") ;
}
result = 0 ; /* stats not present */
stats = (UF_long *) NULL ;
stats = (Long *) NULL ;
break ;
case 13 :

51
gtsam/3rdparty/CCOLAMD/Makefile vendored Normal file
View File

@ -0,0 +1,51 @@
#------------------------------------------------------------------------------
# CCOLAMD Makefile
#------------------------------------------------------------------------------
SUITESPARSE ?= $(realpath $(CURDIR)/..)
export SUITESPARSE
default: all
include ../SuiteSparse_config/SuiteSparse_config.mk
demos: all
# Compile all C code
all:
( cd Lib ; $(MAKE) )
( cd Demo ; $(MAKE) )
# compile just the C-callable libraries (not Demos)
library:
( cd Lib ; $(MAKE) )
# remove object files, but keep the compiled programs and library archives
clean:
( cd Lib ; $(MAKE) clean )
( cd Demo ; $(MAKE) clean )
( cd MATLAB ; $(RM) $(CLEAN) )
# clean, and then remove compiled programs and library archives
purge:
( cd Lib ; $(MAKE) purge )
( cd Demo ; $(MAKE) purge )
( cd MATLAB ; $(RM) $(CLEAN) ; $(RM) *.mex* )
distclean: purge
# get ready for distribution
dist: purge
( cd Demo ; $(MAKE) dist )
ccode: library
lib: library
# install CCOLAMD
install:
( cd Lib ; $(MAKE) install )
# uninstall CCOLAMD
uninstall:
( cd Lib ; $(MAKE) uninstall )

View File

@ -1,8 +1,8 @@
CCOLAMD: constrained column approximate minimum degree ordering
Copyright (C) 2005-2011, Univ. of Florida. Authors: Timothy A. Davis,
Copyright (C) 2005-2016, Univ. of Florida. Authors: Timothy A. Davis,
Sivasankaran Rajamanickam, and Stefan Larimore. Closely based on COLAMD by
Davis, Stefan Larimore, in collaboration with Esmond Ng, and John Gilbert.
http://www.cise.ufl.edu/research/sparse
http://www.suitesparse.com
-------------------------------------------------------------------------------
The CCOLAMD column approximate minimum degree ordering algorithm computes
@ -14,7 +14,8 @@ available as a MATLAB-callable function. It constructs a matrix M such
that M'*M has the same pattern as A, and then uses CCOLAMD to compute a column
ordering of M.
Requires UFconfig, in the ../UFconfig directory relative to this directory.
Requires SuiteSparse_config, in the ../SuiteSparse_config directory relative to
this directory.
To compile and install the ccolamd m-files and mexFunctions, just cd to
CCOLAMD/MATLAB and type ccolamd_install in the MATLAB command window.
@ -22,47 +23,27 @@ A short demo will run. Optionally, type ccolamd_test to run an extensive tests.
Type "make" in Unix in the CCOLAMD directory to compile the C-callable
library and to run a short demo.
If you have MATLAB 7.2 or earlier, you must first edit UFconfig/UFconfig.h to
remove the "-largeArrayDims" option from the MEX command (or just use
ccolamd_install.m inside MATLAB).
Other "make" targets:
make mex compiles MATLAB mexFunctions only
make libccolamd.a compiles a C-callable library containing ccolamd
make clean removes all files not in the distribution, except for
libccolamd.a
make library compiles a C-callable library containing ccolamd
make clean removes all files not in the distribution
but keeps the compiled libraries.
make distclean removes all files not in the distribution
make install installs the library in /usr/local/lib and
/usr/local/include
make uninstall uninstalls the library from /usr/local/lib and
/usr/local/include
To use ccolamd and csymamd within an application written in C, all you need are
ccolamd.c and ccolamd.h, which are the C-callable ccolamd/csymamd codes.
See ccolamd.c for more information on how to call ccolamd from a C program.
It contains a complete description of the C-interface to CCOLAMD and CSYMAMD.
Copyright (c) 1998-2007 by the University of Florida.
All Rights Reserved.
Licensed under the GNU LESSER GENERAL PUBLIC LICENSE.
See CCOLAMD/Doc/License.txt for the license.
-------------------------------------------------------------------------------
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-------------------------------------------------------------------------------
Related papers:
T. A. Davis and W. W. Hager, Rajamanickam, Multiple-rank updates
@ -86,21 +67,18 @@ Related papers:
"An approximate minimum degree column ordering algorithm",
S. I. Larimore, MS Thesis, Dept. of Computer and Information
Science and Engineering, University of Florida, Gainesville, FL,
1998. CISE Tech Report TR-98-016. Available at
ftp://ftp.cise.ufl.edu/cis/tech-reports/tr98/tr98-016.ps
via anonymous ftp.
1998. CISE Tech Report TR-98-016.
Approximate Deficiency for Ordering the Columns of a Matrix,
J. L. Kern, Senior Thesis, Dept. of Computer and Information
Science and Engineering, University of Florida, Gainesville, FL,
1999. Available at http://www.cise.ufl.edu/~davis/Kern/kern.ps
1999.
Authors: Timothy A. Davis, Sivasankaran Rajamanickam, and Stefan Larimore.
Closely based on COLAMD by Stefan I. Larimore and Timothy A. Davis,
University of Florida, in collaboration with John Gilbert, Xerox PARC
(now at UC Santa Barbara), and Esmong Ng, Lawrence Berkeley National
Laboratory (much of this work he did while at Oak Ridge National
Laboratory).
in collaboration with John Gilbert, Xerox PARC (now at UC Santa
Barbara), and Esmong Ng, Lawrence Berkeley National Laboratory (much of
this work he did while at Oak Ridge National Laboratory).
CCOLAMD files:
@ -122,7 +100,6 @@ CCOLAMD files:
./Doc:
ChangeLog change log
lesser.txt license
./Include:
ccolamd.h include file
@ -147,4 +124,3 @@ CCOLAMD files:
./Source:
ccolamd.c primary source code
ccolamd_global.c globally defined function pointers (malloc, free, ...)

View File

@ -5,8 +5,6 @@
/* ----------------------------------------------------------------------------
* CCOLAMD, Copyright (C) Univ. of Florida. Authors: Timothy A. Davis,
* Sivasankaran Rajamanickam, and Stefan Larimore
* See License.txt for the Version 2.1 of the GNU Lesser General Public License
* http://www.cise.ufl.edu/research/sparse
* -------------------------------------------------------------------------- */
/*
@ -58,39 +56,13 @@
* COLAMD is also available under alternate licenses, contact T. Davis
* for details.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
* USA
*
* Permission is hereby granted to use or copy this program under the
* terms of the GNU LGPL, provided that the Copyright, this License,
* and the Availability of the original version is retained on all copies.
* User documentation of any code that uses this code or any modified
* version of this code must cite the Copyright, this License, the
* Availability note, and "Used by permission." Permission to modify
* the code and to distribute modified code is granted, provided the
* Copyright, this License, and the Availability note are retained,
* and a notice that the code was modified is included.
* See CCOLAMD/Doc/License.txt for the license.
*
* Availability:
*
* The CCOLAMD/CSYMAMD library is available at
*
* http://www.cise.ufl.edu/research/sparse/ccolamd/
*
* This is the http://www.cise.ufl.edu/research/sparse/ccolamd/ccolamd.c
* file.
* http://www.suitesparse.com
*
* See the ChangeLog file for changes since Version 1.0.
*/
@ -99,10 +71,10 @@
/* === Description of user-callable routines ================================ */
/* ========================================================================== */
/* CCOLAMD includes both int and UF_long versions of all its routines. The
* description below is for the int version. For UF_long, all int arguments
* become UF_long integers. UF_long is normally defined as long, except for
* WIN64 */
/* CCOLAMD includes both int and SuiteSparse_long versions of all its routines.
* The description below is for the int version. For SuiteSparse_long, all
* int arguments become SuiteSparse_long integers. SuiteSparse_long is
* normally defined as long, except for WIN64 */
/* ----------------------------------------------------------------------------
* ccolamd_recommended:
@ -112,8 +84,8 @@
*
* #include "ccolamd.h"
* size_t ccolamd_recommended (int nnz, int n_row, int n_col) ;
* size_t ccolamd_l_recommended (UF_long nnz, UF_long n_row,
* UF_long n_col) ;
* size_t ccolamd_l_recommended (SuiteSparse_long nnz,
* SuiteSparse_long n_row, SuiteSparse_long n_col) ;
*
* Purpose:
*
@ -209,9 +181,12 @@
* double knobs [CCOLAMD_KNOBS], int stats [CCOLAMD_STATS],
* int *cmember) ;
*
* UF_long ccolamd_l (UF_long n_row, UF_long n_col, UF_long Alen,
* UF_long *A, UF_long *p, double knobs [CCOLAMD_KNOBS],
* UF_long stats [CCOLAMD_STATS], UF_long *cmember) ;
* SuiteSparse_long ccolamd_l (SuiteSparse_long n_row,
* SuiteSparse_long n_col, SuiteSparse_long Alen,
* SuiteSparse_long *A, SuiteSparse_long *p,
* double knobs [CCOLAMD_KNOBS],
* SuiteSparse_long stats [CCOLAMD_STATS],
* SuiteSparse_long *cmember) ;
*
* Purpose:
*
@ -385,9 +360,7 @@
*
* Example:
*
* See
* http://www.cise.ufl.edu/research/sparse/ccolamd/ccolamd_example.c
* for a complete example.
* See ccolamd_example.c for a complete example.
*
* To order the columns of a 5-by-4 matrix with 11 nonzero entries in
* the following nonzero pattern
@ -423,10 +396,12 @@
* void (*allocate) (size_t, size_t), void (*release) (void *),
* int *cmember, int stype) ;
*
* UF_long csymamd_l (UF_long n, UF_long *A, UF_long *p, UF_long *perm,
* double knobs [CCOLAMD_KNOBS], UF_long stats [CCOLAMD_STATS],
* void (*allocate) (size_t, size_t), void (*release) (void *),
* UF_long *cmember, UF_long stype) ;
* SuiteSparse_long csymamd_l (SuiteSparse_long n,
* SuiteSparse_long *A, SuiteSparse_long *p,
* SuiteSparse_long *perm, double knobs [CCOLAMD_KNOBS],
* SuiteSparse_long stats [CCOLAMD_STATS], void (*allocate)
* (size_t, size_t), void (*release) (void *),
* SuiteSparse_long *cmember, SuiteSparse_long stype) ;
*
* Purpose:
*
@ -562,7 +537,7 @@
*
* #include "ccolamd.h"
* ccolamd_report (int stats [CCOLAMD_STATS]) ;
* ccolamd_l_report (UF_long stats [CCOLAMD_STATS]) ;
* ccolamd_l_report (SuiteSparse_long stats [CCOLAMD_STATS]) ;
*
* Purpose:
*
@ -583,7 +558,7 @@
*
* #include "ccolamd.h"
* csymamd_report (int stats [CCOLAMD_STATS]) ;
* csymamd_l_report (UF_long stats [CCOLAMD_STATS]) ;
* csymamd_l_report (SuiteSparse_long stats [CCOLAMD_STATS]) ;
*
* Purpose:
*
@ -617,12 +592,11 @@
#include "ccolamd.h"
#include <stdlib.h>
#include <math.h>
#include <limits.h>
#ifdef MATLAB_MEX_FILE
#include <stdint.h>
typedef uint16_t char16_t;
#include "mex.h"
#include "matrix.h"
#endif
@ -636,17 +610,14 @@ typedef uint16_t char16_t;
#endif
/* ========================================================================== */
/* === int or UF_long ======================================================= */
/* === int or SuiteSparse_long ============================================== */
/* ========================================================================== */
/* define UF_long */
#include "UFconfig.h"
#ifdef DLONG
#define Int UF_long
#define ID UF_long_id
#define Int_MAX UF_long_max
#define Int SuiteSparse_long
#define ID SuiteSparse_long_id
#define Int_MAX SuiteSparse_long_max
#define CCOLAMD_recommended ccolamd_l_recommended
#define CCOLAMD_set_defaults ccolamd_l_set_defaults
@ -811,9 +782,6 @@ typedef struct CColamd_Row_struct
#define INDEX(i) (i)
#endif
/* All output goes through the PRINTF macro. */
#define PRINTF(params) { if (ccolamd_printf != NULL) (void) ccolamd_printf params ; }
/* ========================================================================== */
/* === Debugging prototypes and definitions ================================= */
@ -827,11 +795,11 @@ typedef struct CColamd_Row_struct
PRIVATE Int ccolamd_debug ;
/* debug print statements */
#define DEBUG0(params) { PRINTF (params) ; }
#define DEBUG1(params) { if (ccolamd_debug >= 1) PRINTF (params) ; }
#define DEBUG2(params) { if (ccolamd_debug >= 2) PRINTF (params) ; }
#define DEBUG3(params) { if (ccolamd_debug >= 3) PRINTF (params) ; }
#define DEBUG4(params) { if (ccolamd_debug >= 4) PRINTF (params) ; }
#define DEBUG0(params) { SUITESPARSE_PRINTF (params) ; }
#define DEBUG1(params) { if (ccolamd_debug >= 1) SUITESPARSE_PRINTF (params) ; }
#define DEBUG2(params) { if (ccolamd_debug >= 2) SUITESPARSE_PRINTF (params) ; }
#define DEBUG3(params) { if (ccolamd_debug >= 3) SUITESPARSE_PRINTF (params) ; }
#define DEBUG4(params) { if (ccolamd_debug >= 4) SUITESPARSE_PRINTF (params) ; }
#ifdef MATLAB_MEX_FILE
#define ASSERT(expression) (mxAssert ((expression), ""))
@ -3752,12 +3720,12 @@ PRIVATE void print_report
Int i1, i2, i3 ;
PRINTF (("\n%s version %d.%d, %s: ", method,
SUITESPARSE_PRINTF (("\n%s version %d.%d, %s: ", method,
CCOLAMD_MAIN_VERSION, CCOLAMD_SUB_VERSION, CCOLAMD_DATE)) ;
if (!stats)
{
PRINTF (("No statistics available.\n")) ;
SUITESPARSE_PRINTF (("No statistics available.\n")) ;
return ;
}
@ -3767,11 +3735,11 @@ PRIVATE void print_report
if (stats [CCOLAMD_STATUS] >= 0)
{
PRINTF(("OK. ")) ;
SUITESPARSE_PRINTF(("OK. ")) ;
}
else
{
PRINTF(("ERROR. ")) ;
SUITESPARSE_PRINTF(("ERROR. ")) ;
}
switch (stats [CCOLAMD_STATUS])
@ -3779,91 +3747,105 @@ PRIVATE void print_report
case CCOLAMD_OK_BUT_JUMBLED:
PRINTF(("Matrix has unsorted or duplicate row indices.\n")) ;
SUITESPARSE_PRINTF((
"Matrix has unsorted or duplicate row indices.\n")) ;
PRINTF(("%s: duplicate or out-of-order row indices: "ID"\n",
method, i3)) ;
SUITESPARSE_PRINTF((
"%s: duplicate or out-of-order row indices: "ID"\n",
method, i3)) ;
PRINTF(("%s: last seen duplicate or out-of-order row: "ID"\n",
method, INDEX (i2))) ;
SUITESPARSE_PRINTF((
"%s: last seen duplicate or out-of-order row: "ID"\n",
method, INDEX (i2))) ;
PRINTF(("%s: last seen in column: "ID"",
method, INDEX (i1))) ;
SUITESPARSE_PRINTF((
"%s: last seen in column: "ID"",
method, INDEX (i1))) ;
/* no break - fall through to next case instead */
case CCOLAMD_OK:
PRINTF(("\n")) ;
SUITESPARSE_PRINTF(("\n")) ;
PRINTF(("%s: number of dense or empty rows ignored: "ID"\n",
method, stats [CCOLAMD_DENSE_ROW])) ;
SUITESPARSE_PRINTF((
"%s: number of dense or empty rows ignored: "ID"\n",
method, stats [CCOLAMD_DENSE_ROW])) ;
PRINTF(("%s: number of dense or empty columns ignored: "ID"\n",
method, stats [CCOLAMD_DENSE_COL])) ;
SUITESPARSE_PRINTF((
"%s: number of dense or empty columns ignored: "ID"\n",
method, stats [CCOLAMD_DENSE_COL])) ;
PRINTF(("%s: number of garbage collections performed: "ID"\n",
method, stats [CCOLAMD_DEFRAG_COUNT])) ;
SUITESPARSE_PRINTF((
"%s: number of garbage collections performed: "ID"\n",
method, stats [CCOLAMD_DEFRAG_COUNT])) ;
break ;
case CCOLAMD_ERROR_A_not_present:
PRINTF(("Array A (row indices of matrix) not present.\n")) ;
SUITESPARSE_PRINTF((
"Array A (row indices of matrix) not present.\n")) ;
break ;
case CCOLAMD_ERROR_p_not_present:
PRINTF(("Array p (column pointers for matrix) not present.\n")) ;
SUITESPARSE_PRINTF((
"Array p (column pointers for matrix) not present.\n")) ;
break ;
case CCOLAMD_ERROR_nrow_negative:
PRINTF(("Invalid number of rows ("ID").\n", i1)) ;
SUITESPARSE_PRINTF(("Invalid number of rows ("ID").\n", i1)) ;
break ;
case CCOLAMD_ERROR_ncol_negative:
PRINTF(("Invalid number of columns ("ID").\n", i1)) ;
SUITESPARSE_PRINTF(("Invalid number of columns ("ID").\n", i1)) ;
break ;
case CCOLAMD_ERROR_nnz_negative:
PRINTF(("Invalid number of nonzero entries ("ID").\n", i1)) ;
SUITESPARSE_PRINTF((
"Invalid number of nonzero entries ("ID").\n", i1)) ;
break ;
case CCOLAMD_ERROR_p0_nonzero:
PRINTF(("Invalid column pointer, p [0] = "ID", must be 0.\n", i1)) ;
SUITESPARSE_PRINTF((
"Invalid column pointer, p [0] = "ID", must be 0.\n", i1)) ;
break ;
case CCOLAMD_ERROR_A_too_small:
PRINTF(("Array A too small.\n")) ;
PRINTF((" Need Alen >= "ID", but given only Alen = "ID".\n",
i1, i2)) ;
SUITESPARSE_PRINTF(("Array A too small.\n")) ;
SUITESPARSE_PRINTF((
" Need Alen >= "ID", but given only Alen = "ID".\n",
i1, i2)) ;
break ;
case CCOLAMD_ERROR_col_length_negative:
PRINTF(("Column "ID" has a negative number of entries ("ID").\n",
INDEX (i1), i2)) ;
SUITESPARSE_PRINTF((
"Column "ID" has a negative number of entries ("ID").\n",
INDEX (i1), i2)) ;
break ;
case CCOLAMD_ERROR_row_index_out_of_bounds:
PRINTF(("Row index (row "ID") out of bounds ("ID" to "ID") in"
"column "ID".\n", INDEX (i2), INDEX (0), INDEX (i3-1),
INDEX (i1))) ;
SUITESPARSE_PRINTF((
"Row index (row "ID") out of bounds ("ID" to "ID") in"
"column "ID".\n", INDEX (i2), INDEX (0), INDEX (i3-1),
INDEX (i1))) ;
break ;
case CCOLAMD_ERROR_out_of_memory:
PRINTF(("Out of memory.\n")) ;
SUITESPARSE_PRINTF(("Out of memory.\n")) ;
break ;
case CCOLAMD_ERROR_invalid_cmember:
PRINTF(("cmember invalid\n")) ;
SUITESPARSE_PRINTF(("cmember invalid\n")) ;
break ;
}
}

View File

@ -1,6 +1,6 @@
# install CCOLAMD headers
install(FILES CCOLAMD/Include/ccolamd.h DESTINATION include/gtsam/3rdparty/CCOLAMD)
install(FILES UFconfig/UFconfig.h DESTINATION include/gtsam/3rdparty/UFconfig)
install(FILES SuiteSparse_config/SuiteSparse_config.h DESTINATION include/gtsam/3rdparty/SuiteSparse_config)
if(NOT GTSAM_USE_SYSTEM_EIGEN)
# Find plain .h files

View File

@ -0,0 +1,70 @@
#-------------------------------------------------------------------------------
# SuiteSparse_config Makefile
#-------------------------------------------------------------------------------
SUITESPARSE ?= $(realpath $(CURDIR)/..)
export SUITESPARSE
# version of SuiteSparse_config is also version of SuiteSparse meta-package
LIBRARY = libsuitesparseconfig
VERSION = 4.5.2
SO_VERSION = 4
default: library
include SuiteSparse_config.mk
ccode: all
all: library
# compile and install in SuiteSparse/lib
library: $(AR_TARGET)
$(MAKE) install INSTALL=$(SUITESPARSE)
OBJ = SuiteSparse_config.o
SuiteSparse_config.o: SuiteSparse_config.c SuiteSparse_config.h
$(CC) $(CF) -c SuiteSparse_config.c
$(AR_TARGET): $(OBJ)
$(ARCHIVE) $(AR_TARGET) SuiteSparse_config.o
$(RANLIB) $(AR_TARGET)
distclean: purge
purge: clean
( cd xerbla ; $(MAKE) purge )
- $(RM) -r $(PURGE)
clean:
( cd xerbla ; $(MAKE) clean )
- $(RM) -r $(CLEAN)
# install SuiteSparse_config
install: $(AR_TARGET) $(INSTALL_LIB)/$(SO_TARGET)
$(INSTALL_LIB)/$(SO_TARGET): $(OBJ)
@mkdir -p $(INSTALL_LIB)
@mkdir -p $(INSTALL_INCLUDE)
@mkdir -p $(INSTALL_DOC)
$(CC) $(SO_OPTS) $^ -o $@ $(LDLIBS)
( cd $(INSTALL_LIB) ; ln -sf $(SO_TARGET) $(SO_PLAIN) )
( cd $(INSTALL_LIB) ; ln -sf $(SO_TARGET) $(SO_MAIN) )
$(CP) SuiteSparse_config.h $(INSTALL_INCLUDE)
$(CP) README.txt $(INSTALL_DOC)/SUITESPARSECONFIG_README.txt
chmod 755 $(INSTALL_LIB)/$(SO_TARGET)
chmod 755 $(INSTALL_LIB)/$(SO_PLAIN)
chmod 644 $(INSTALL_INCLUDE)/SuiteSparse_config.h
chmod 644 $(INSTALL_DOC)/SUITESPARSECONFIG_README.txt
# uninstall SuiteSparse_config
uninstall:
$(RM) $(INSTALL_LIB)/$(SO_TARGET)
$(RM) $(INSTALL_LIB)/$(SO_PLAIN)
$(RM) $(INSTALL_LIB)/$(SO_MAIN)
$(RM) $(INSTALL_INCLUDE)/SuiteSparse_config.h
$(RM) $(INSTALL_DOC)/SUITESPARSECONFIG_README.txt
( cd xerbla ; $(MAKE) uninstall )

View File

@ -0,0 +1,51 @@
SuiteSparse_config, 2016, Timothy A. Davis, http://www.suitesparse.com
(formerly the UFconfig package)
This directory contains a default SuiteSparse_config.mk file. It tries to
detect your system (Linux, SunOS, or Mac), which compiler to use (icc or cc),
which BLAS and LAPACK library to use (OpenBLAS or MKL), and whether or not to
compile with CUDA.
For alternatives, see the comments in the SuiteSparse_config.mk file.
License: No licensing restrictions apply to this file or to the
SuiteSparse_config directory.
--------------------------------------------------------------------------------
SuiteSparse_config contains configuration settings for all many of the software
packages that I develop or co-author. Note that older versions of some of
these packages do not require SuiteSparse_config.
Package Description
------- -----------
AMD approximate minimum degree ordering
CAMD constrained AMD
COLAMD column approximate minimum degree ordering
CCOLAMD constrained approximate minimum degree ordering
UMFPACK sparse LU factorization, with the BLAS
CXSparse int/long/real/complex version of CSparse
CHOLMOD sparse Cholesky factorization, update/downdate
KLU sparse LU factorization, BLAS-free
BTF permutation to block triangular form
LDL concise sparse LDL'
LPDASA LP Dual Active Set Algorithm
RBio read/write files in Rutherford/Boeing format
SPQR sparse QR factorization (full name: SuiteSparseQR)
SuiteSparse_config is not required by these packages:
CSparse a Concise Sparse matrix package
MATLAB_Tools toolboxes for use in MATLAB
In addition, the xerbla/ directory contains Fortan and C versions of the
BLAS/LAPACK xerbla routine, which is called when an invalid input is passed to
the BLAS or LAPACK. The xerbla provided here does not print any message, so
the entire Fortran I/O library does not need to be linked into a C application.
Most versions of the BLAS contain xerbla, but those from K. Goto do not. Use
this if you need too.
If you edit this directory (SuiteSparse_config.mk in particular) then you
must do "make purge ; make" in the parent directory to recompile all of
SuiteSparse. Otherwise, the changes will not necessarily be applied.

View File

@ -0,0 +1,531 @@
/* ========================================================================== */
/* === SuiteSparse_config =================================================== */
/* ========================================================================== */
/* SuiteSparse configuration : memory manager and printf functions. */
/* Copyright (c) 2013, Timothy A. Davis. No licensing restrictions
* apply to this file or to the SuiteSparse_config directory.
* Author: Timothy A. Davis.
*/
#include <math.h>
#include <stdlib.h>
#ifndef NPRINT
#include <stdio.h>
#endif
#ifdef MATLAB_MEX_FILE
#include "mex.h"
#include "matrix.h"
#endif
#ifndef NULL
#define NULL ((void *) 0)
#endif
#include "SuiteSparse_config.h"
/* -------------------------------------------------------------------------- */
/* SuiteSparse_config : a global extern struct */
/* -------------------------------------------------------------------------- */
/* The SuiteSparse_config struct is available to all SuiteSparse functions and
to all applications that use those functions. It must be modified with
care, particularly in a multithreaded context. Normally, the application
will initialize this object once, via SuiteSparse_start, possibily followed
by application-specific modifications if the applications wants to use
alternative memory manager functions.
The user can redefine these global pointers at run-time to change the
memory manager and printf function used by SuiteSparse.
If -DNMALLOC is defined at compile-time, then no memory-manager is
specified. You must define them at run-time, after calling
SuiteSparse_start.
If -DPRINT is defined a compile time, then printf is disabled, and
SuiteSparse will not use printf.
*/
struct SuiteSparse_config_struct SuiteSparse_config =
{
/* memory management functions */
#ifndef NMALLOC
#ifdef MATLAB_MEX_FILE
/* MATLAB mexFunction: */
mxMalloc, mxCalloc, mxRealloc, mxFree,
#else
/* standard ANSI C: */
malloc, calloc, realloc, free,
#endif
#else
/* no memory manager defined; you must define one at run-time: */
NULL, NULL, NULL, NULL,
#endif
/* printf function */
#ifndef NPRINT
#ifdef MATLAB_MEX_FILE
/* MATLAB mexFunction: */
mexPrintf,
#else
/* standard ANSI C: */
printf,
#endif
#else
/* printf is disabled */
NULL,
#endif
SuiteSparse_hypot,
SuiteSparse_divcomplex
} ;
/* -------------------------------------------------------------------------- */
/* SuiteSparse_start */
/* -------------------------------------------------------------------------- */
/* All applications that use SuiteSparse should call SuiteSparse_start prior
to using any SuiteSparse function. Only a single thread should call this
function, in a multithreaded application. Currently, this function is
optional, since all this function currently does is to set the four memory
function pointers to NULL (which tells SuiteSparse to use the default
functions). In a multi- threaded application, only a single thread should
call this function.
Future releases of SuiteSparse might enforce a requirement that
SuiteSparse_start be called prior to calling any SuiteSparse function.
*/
void SuiteSparse_start ( void )
{
/* memory management functions */
#ifndef NMALLOC
#ifdef MATLAB_MEX_FILE
/* MATLAB mexFunction: */
SuiteSparse_config.malloc_func = mxMalloc ;
SuiteSparse_config.calloc_func = mxCalloc ;
SuiteSparse_config.realloc_func = mxRealloc ;
SuiteSparse_config.free_func = mxFree ;
#else
/* standard ANSI C: */
SuiteSparse_config.malloc_func = malloc ;
SuiteSparse_config.calloc_func = calloc ;
SuiteSparse_config.realloc_func = realloc ;
SuiteSparse_config.free_func = free ;
#endif
#else
/* no memory manager defined; you must define one after calling
SuiteSparse_start */
SuiteSparse_config.malloc_func = NULL ;
SuiteSparse_config.calloc_func = NULL ;
SuiteSparse_config.realloc_func = NULL ;
SuiteSparse_config.free_func = NULL ;
#endif
/* printf function */
#ifndef NPRINT
#ifdef MATLAB_MEX_FILE
/* MATLAB mexFunction: */
SuiteSparse_config.printf_func = mexPrintf ;
#else
/* standard ANSI C: */
SuiteSparse_config.printf_func = printf ;
#endif
#else
/* printf is disabled */
SuiteSparse_config.printf_func = NULL ;
#endif
/* math functions */
SuiteSparse_config.hypot_func = SuiteSparse_hypot ;
SuiteSparse_config.divcomplex_func = SuiteSparse_divcomplex ;
}
/* -------------------------------------------------------------------------- */
/* SuiteSparse_finish */
/* -------------------------------------------------------------------------- */
/* This currently does nothing, but in the future, applications should call
SuiteSparse_start before calling any SuiteSparse function, and then
SuiteSparse_finish after calling the last SuiteSparse function, just before
exiting. In a multithreaded application, only a single thread should call
this function.
Future releases of SuiteSparse might use this function for any
SuiteSparse-wide cleanup operations or finalization of statistics.
*/
void SuiteSparse_finish ( void )
{
/* do nothing */ ;
}
/* -------------------------------------------------------------------------- */
/* SuiteSparse_malloc: malloc wrapper */
/* -------------------------------------------------------------------------- */
void *SuiteSparse_malloc /* pointer to allocated block of memory */
(
size_t nitems, /* number of items to malloc */
size_t size_of_item /* sizeof each item */
)
{
void *p ;
size_t size ;
if (nitems < 1) nitems = 1 ;
if (size_of_item < 1) size_of_item = 1 ;
size = nitems * size_of_item ;
if (size != ((double) nitems) * size_of_item)
{
/* size_t overflow */
p = NULL ;
}
else
{
p = (void *) (SuiteSparse_config.malloc_func) (size) ;
}
return (p) ;
}
/* -------------------------------------------------------------------------- */
/* SuiteSparse_calloc: calloc wrapper */
/* -------------------------------------------------------------------------- */
void *SuiteSparse_calloc /* pointer to allocated block of memory */
(
size_t nitems, /* number of items to calloc */
size_t size_of_item /* sizeof each item */
)
{
void *p ;
size_t size ;
if (nitems < 1) nitems = 1 ;
if (size_of_item < 1) size_of_item = 1 ;
size = nitems * size_of_item ;
if (size != ((double) nitems) * size_of_item)
{
/* size_t overflow */
p = NULL ;
}
else
{
p = (void *) (SuiteSparse_config.calloc_func) (nitems, size_of_item) ;
}
return (p) ;
}
/* -------------------------------------------------------------------------- */
/* SuiteSparse_realloc: realloc wrapper */
/* -------------------------------------------------------------------------- */
/* If p is non-NULL on input, it points to a previously allocated object of
size nitems_old * size_of_item. The object is reallocated to be of size
nitems_new * size_of_item. If p is NULL on input, then a new object of that
size is allocated. On success, a pointer to the new object is returned,
and ok is returned as 1. If the allocation fails, ok is set to 0 and a
pointer to the old (unmodified) object is returned.
*/
void *SuiteSparse_realloc /* pointer to reallocated block of memory, or
to original block if the realloc failed. */
(
size_t nitems_new, /* new number of items in the object */
size_t nitems_old, /* old number of items in the object */
size_t size_of_item, /* sizeof each item */
void *p, /* old object to reallocate */
int *ok /* 1 if successful, 0 otherwise */
)
{
size_t size ;
if (nitems_old < 1) nitems_old = 1 ;
if (nitems_new < 1) nitems_new = 1 ;
if (size_of_item < 1) size_of_item = 1 ;
size = nitems_new * size_of_item ;
if (size != ((double) nitems_new) * size_of_item)
{
/* size_t overflow */
(*ok) = 0 ;
}
else if (p == NULL)
{
/* a fresh object is being allocated */
p = SuiteSparse_malloc (nitems_new, size_of_item) ;
(*ok) = (p != NULL) ;
}
else if (nitems_old == nitems_new)
{
/* the object does not change; do nothing */
(*ok) = 1 ;
}
else
{
/* change the size of the object from nitems_old to nitems_new */
void *pnew ;
pnew = (void *) (SuiteSparse_config.realloc_func) (p, size) ;
if (pnew == NULL)
{
if (nitems_new < nitems_old)
{
/* the attempt to reduce the size of the block failed, but
the old block is unchanged. So pretend to succeed. */
(*ok) = 1 ;
}
else
{
/* out of memory */
(*ok) = 0 ;
}
}
else
{
/* success */
p = pnew ;
(*ok) = 1 ;
}
}
return (p) ;
}
/* -------------------------------------------------------------------------- */
/* SuiteSparse_free: free wrapper */
/* -------------------------------------------------------------------------- */
void *SuiteSparse_free /* always returns NULL */
(
void *p /* block to free */
)
{
if (p)
{
(SuiteSparse_config.free_func) (p) ;
}
return (NULL) ;
}
/* -------------------------------------------------------------------------- */
/* SuiteSparse_tic: return current wall clock time */
/* -------------------------------------------------------------------------- */
/* Returns the number of seconds (tic [0]) and nanoseconds (tic [1]) since some
* unspecified but fixed time in the past. If no timer is installed, zero is
* returned. A scalar double precision value for 'tic' could be used, but this
* might cause loss of precision because clock_getttime returns the time from
* some distant time in the past. Thus, an array of size 2 is used.
*
* The timer is enabled by default. To disable the timer, compile with
* -DNTIMER. If enabled on a POSIX C 1993 system, the timer requires linking
* with the -lrt library.
*
* example:
*
* double tic [2], r, s, t ;
* SuiteSparse_tic (tic) ; // start the timer
* // do some work A
* t = SuiteSparse_toc (tic) ; // t is time for work A, in seconds
* // do some work B
* s = SuiteSparse_toc (tic) ; // s is time for work A and B, in seconds
* SuiteSparse_tic (tic) ; // restart the timer
* // do some work C
* r = SuiteSparse_toc (tic) ; // s is time for work C, in seconds
*
* A double array of size 2 is used so that this routine can be more easily
* ported to non-POSIX systems. The caller does not rely on the POSIX
* <time.h> include file.
*/
#ifdef SUITESPARSE_TIMER_ENABLED
#include <time.h>
void SuiteSparse_tic
(
double tic [2] /* output, contents undefined on input */
)
{
/* POSIX C 1993 timer, requires -librt */
struct timespec t ;
clock_gettime (CLOCK_MONOTONIC, &t) ;
tic [0] = (double) (t.tv_sec) ;
tic [1] = (double) (t.tv_nsec) ;
}
#else
void SuiteSparse_tic
(
double tic [2] /* output, contents undefined on input */
)
{
/* no timer installed */
tic [0] = 0 ;
tic [1] = 0 ;
}
#endif
/* -------------------------------------------------------------------------- */
/* SuiteSparse_toc: return time since last tic */
/* -------------------------------------------------------------------------- */
/* Assuming SuiteSparse_tic is accurate to the nanosecond, this function is
* accurate down to the nanosecond for 2^53 nanoseconds since the last call to
* SuiteSparse_tic, which is sufficient for SuiteSparse (about 104 days). If
* additional accuracy is required, the caller can use two calls to
* SuiteSparse_tic and do the calculations differently.
*/
double SuiteSparse_toc /* returns time in seconds since last tic */
(
double tic [2] /* input, not modified from last call to SuiteSparse_tic */
)
{
double toc [2] ;
SuiteSparse_tic (toc) ;
return ((toc [0] - tic [0]) + 1e-9 * (toc [1] - tic [1])) ;
}
/* -------------------------------------------------------------------------- */
/* SuiteSparse_time: return current wallclock time in seconds */
/* -------------------------------------------------------------------------- */
/* This function might not be accurate down to the nanosecond. */
double SuiteSparse_time /* returns current wall clock time in seconds */
(
void
)
{
double toc [2] ;
SuiteSparse_tic (toc) ;
return (toc [0] + 1e-9 * toc [1]) ;
}
/* -------------------------------------------------------------------------- */
/* SuiteSparse_version: return the current version of SuiteSparse */
/* -------------------------------------------------------------------------- */
int SuiteSparse_version
(
int version [3]
)
{
if (version != NULL)
{
version [0] = SUITESPARSE_MAIN_VERSION ;
version [1] = SUITESPARSE_SUB_VERSION ;
version [2] = SUITESPARSE_SUBSUB_VERSION ;
}
return (SUITESPARSE_VERSION) ;
}
/* -------------------------------------------------------------------------- */
/* SuiteSparse_hypot */
/* -------------------------------------------------------------------------- */
/* There is an equivalent routine called hypot in <math.h>, which conforms
* to ANSI C99. However, SuiteSparse does not assume that ANSI C99 is
* available. You can use the ANSI C99 hypot routine with:
*
* #include <math.h>
*i SuiteSparse_config.hypot_func = hypot ;
*
* Default value of the SuiteSparse_config.hypot_func pointer is
* SuiteSparse_hypot, defined below.
*
* s = hypot (x,y) computes s = sqrt (x*x + y*y) but does so more accurately.
* The NaN cases for the double relops x >= y and x+y == x are safely ignored.
*
* Source: Algorithm 312, "Absolute value and square root of a complex number,"
* P. Friedland, Comm. ACM, vol 10, no 10, October 1967, page 665.
*/
double SuiteSparse_hypot (double x, double y)
{
double s, r ;
x = fabs (x) ;
y = fabs (y) ;
if (x >= y)
{
if (x + y == x)
{
s = x ;
}
else
{
r = y / x ;
s = x * sqrt (1.0 + r*r) ;
}
}
else
{
if (y + x == y)
{
s = y ;
}
else
{
r = x / y ;
s = y * sqrt (1.0 + r*r) ;
}
}
return (s) ;
}
/* -------------------------------------------------------------------------- */
/* SuiteSparse_divcomplex */
/* -------------------------------------------------------------------------- */
/* c = a/b where c, a, and b are complex. The real and imaginary parts are
* passed as separate arguments to this routine. The NaN case is ignored
* for the double relop br >= bi. Returns 1 if the denominator is zero,
* 0 otherwise.
*
* This uses ACM Algo 116, by R. L. Smith, 1962, which tries to avoid
* underflow and overflow.
*
* c can be the same variable as a or b.
*
* Default value of the SuiteSparse_config.divcomplex_func pointer is
* SuiteSparse_divcomplex.
*/
int SuiteSparse_divcomplex
(
double ar, double ai, /* real and imaginary parts of a */
double br, double bi, /* real and imaginary parts of b */
double *cr, double *ci /* real and imaginary parts of c */
)
{
double tr, ti, r, den ;
if (fabs (br) >= fabs (bi))
{
r = bi / br ;
den = br + r * bi ;
tr = (ar + ai * r) / den ;
ti = (ai - ar * r) / den ;
}
else
{
r = br / bi ;
den = r * br + bi ;
tr = (ar * r + ai) / den ;
ti = (ai * r - ar) / den ;
}
*cr = tr ;
*ci = ti ;
return (den == 0.) ;
}

View File

@ -0,0 +1,248 @@
/* ========================================================================== */
/* === SuiteSparse_config =================================================== */
/* ========================================================================== */
/* Configuration file for SuiteSparse: a Suite of Sparse matrix packages
* (AMD, COLAMD, CCOLAMD, CAMD, CHOLMOD, UMFPACK, CXSparse, and others).
*
* SuiteSparse_config.h provides the definition of the long integer. On most
* systems, a C program can be compiled in LP64 mode, in which long's and
* pointers are both 64-bits, and int's are 32-bits. Windows 64, however, uses
* the LLP64 model, in which int's and long's are 32-bits, and long long's and
* pointers are 64-bits.
*
* SuiteSparse packages that include long integer versions are
* intended for the LP64 mode. However, as a workaround for Windows 64
* (and perhaps other systems), the long integer can be redefined.
*
* If _WIN64 is defined, then the __int64 type is used instead of long.
*
* The long integer can also be defined at compile time. For example, this
* could be added to SuiteSparse_config.mk:
*
* CFLAGS = -O -D'SuiteSparse_long=long long' \
* -D'SuiteSparse_long_max=9223372036854775801' -D'SuiteSparse_long_idd="lld"'
*
* This file defines SuiteSparse_long as either long (on all but _WIN64) or
* __int64 on Windows 64. The intent is that a SuiteSparse_long is always a
* 64-bit integer in a 64-bit code. ptrdiff_t might be a better choice than
* long; it is always the same size as a pointer.
*
* This file also defines the SUITESPARSE_VERSION and related definitions.
*
* Copyright (c) 2012, Timothy A. Davis. No licensing restrictions apply
* to this file or to the SuiteSparse_config directory.
* Author: Timothy A. Davis.
*/
#ifndef SUITESPARSE_CONFIG_H
#define SUITESPARSE_CONFIG_H
#ifdef __cplusplus
extern "C" {
#endif
#include <limits.h>
#include <stdlib.h>
/* ========================================================================== */
/* === SuiteSparse_long ===================================================== */
/* ========================================================================== */
#ifndef SuiteSparse_long
#ifdef _WIN64
#define SuiteSparse_long __int64
#define SuiteSparse_long_max _I64_MAX
#define SuiteSparse_long_idd "I64d"
#else
#define SuiteSparse_long long
#define SuiteSparse_long_max LONG_MAX
#define SuiteSparse_long_idd "ld"
#endif
#define SuiteSparse_long_id "%" SuiteSparse_long_idd
#endif
/* ========================================================================== */
/* === SuiteSparse_config parameters and functions ========================== */
/* ========================================================================== */
/* SuiteSparse-wide parameters are placed in this struct. It is meant to be
an extern, globally-accessible struct. It is not meant to be updated
frequently by multiple threads. Rather, if an application needs to modify
SuiteSparse_config, it should do it once at the beginning of the application,
before multiple threads are launched.
The intent of these function pointers is that they not be used in your
application directly, except to assign them to the desired user-provided
functions. Rather, you should use the
*/
struct SuiteSparse_config_struct
{
void *(*malloc_func) (size_t) ; /* pointer to malloc */
void *(*calloc_func) (size_t, size_t) ; /* pointer to calloc */
void *(*realloc_func) (void *, size_t) ; /* pointer to realloc */
void (*free_func) (void *) ; /* pointer to free */
int (*printf_func) (const char *, ...) ; /* pointer to printf */
double (*hypot_func) (double, double) ; /* pointer to hypot */
int (*divcomplex_func) (double, double, double, double, double *, double *);
} ;
extern struct SuiteSparse_config_struct SuiteSparse_config ;
void SuiteSparse_start ( void ) ; /* called to start SuiteSparse */
void SuiteSparse_finish ( void ) ; /* called to finish SuiteSparse */
void *SuiteSparse_malloc /* pointer to allocated block of memory */
(
size_t nitems, /* number of items to malloc (>=1 is enforced) */
size_t size_of_item /* sizeof each item */
) ;
void *SuiteSparse_calloc /* pointer to allocated block of memory */
(
size_t nitems, /* number of items to calloc (>=1 is enforced) */
size_t size_of_item /* sizeof each item */
) ;
void *SuiteSparse_realloc /* pointer to reallocated block of memory, or
to original block if the realloc failed. */
(
size_t nitems_new, /* new number of items in the object */
size_t nitems_old, /* old number of items in the object */
size_t size_of_item, /* sizeof each item */
void *p, /* old object to reallocate */
int *ok /* 1 if successful, 0 otherwise */
) ;
void *SuiteSparse_free /* always returns NULL */
(
void *p /* block to free */
) ;
void SuiteSparse_tic /* start the timer */
(
double tic [2] /* output, contents undefined on input */
) ;
double SuiteSparse_toc /* return time in seconds since last tic */
(
double tic [2] /* input: from last call to SuiteSparse_tic */
) ;
double SuiteSparse_time /* returns current wall clock time in seconds */
(
void
) ;
/* returns sqrt (x^2 + y^2), computed reliably */
double SuiteSparse_hypot (double x, double y) ;
/* complex division of c = a/b */
int SuiteSparse_divcomplex
(
double ar, double ai, /* real and imaginary parts of a */
double br, double bi, /* real and imaginary parts of b */
double *cr, double *ci /* real and imaginary parts of c */
) ;
/* determine which timer to use, if any */
#ifndef NTIMER
#ifdef _POSIX_C_SOURCE
#if _POSIX_C_SOURCE >= 199309L
#define SUITESPARSE_TIMER_ENABLED
#endif
#endif
#endif
/* SuiteSparse printf macro */
#define SUITESPARSE_PRINTF(params) \
{ \
if (SuiteSparse_config.printf_func != NULL) \
{ \
(void) (SuiteSparse_config.printf_func) params ; \
} \
}
/* ========================================================================== */
/* === SuiteSparse version ================================================== */
/* ========================================================================== */
/* SuiteSparse is not a package itself, but a collection of packages, some of
* which must be used together (UMFPACK requires AMD, CHOLMOD requires AMD,
* COLAMD, CAMD, and CCOLAMD, etc). A version number is provided here for the
* collection itself. The versions of packages within each version of
* SuiteSparse are meant to work together. Combining one package from one
* version of SuiteSparse, with another package from another version of
* SuiteSparse, may or may not work.
*
* SuiteSparse contains the following packages:
*
* SuiteSparse_config version 4.5.2 (version always the same as SuiteSparse)
* AMD version 2.4.5
* BTF version 1.2.5
* CAMD version 2.4.5
* CCOLAMD version 2.9.5
* CHOLMOD version 3.0.10
* COLAMD version 2.9.5
* CSparse version 3.1.8
* CXSparse version 3.1.8
* GPUQREngine version 1.0.4
* KLU version 1.3.7
* LDL version 2.2.5
* RBio version 2.2.5
* SPQR version 2.0.6
* SuiteSparse_GPURuntime version 1.0.4
* UMFPACK version 5.7.5
* MATLAB_Tools various packages & M-files
* xerbla version 1.0.2
*
* Other package dependencies:
* BLAS required by CHOLMOD and UMFPACK
* LAPACK required by CHOLMOD
* METIS 5.1.0 required by CHOLMOD (optional) and KLU (optional)
* CUBLAS, CUDART NVIDIA libraries required by CHOLMOD and SPQR when
* they are compiled with GPU acceleration.
*/
int SuiteSparse_version /* returns SUITESPARSE_VERSION */
(
/* output, not defined on input. Not used if NULL. Returns
the three version codes in version [0..2]:
version [0] is SUITESPARSE_MAIN_VERSION
version [1] is SUITESPARSE_SUB_VERSION
version [2] is SUITESPARSE_SUBSUB_VERSION
*/
int version [3]
) ;
/* Versions prior to 4.2.0 do not have the above function. The following
code fragment will work with any version of SuiteSparse:
#ifdef SUITESPARSE_HAS_VERSION_FUNCTION
v = SuiteSparse_version (NULL) ;
#else
v = SUITESPARSE_VERSION ;
#endif
*/
#define SUITESPARSE_HAS_VERSION_FUNCTION
#define SUITESPARSE_DATE "Apr 1, 2016"
#define SUITESPARSE_VER_CODE(main,sub) ((main) * 1000 + (sub))
#define SUITESPARSE_MAIN_VERSION 4
#define SUITESPARSE_SUB_VERSION 5
#define SUITESPARSE_SUBSUB_VERSION 2
#define SUITESPARSE_VERSION \
SUITESPARSE_VER_CODE(SUITESPARSE_MAIN_VERSION,SUITESPARSE_SUB_VERSION)
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,600 @@
#===============================================================================
# SuiteSparse_config.mk: common configuration file for the SuiteSparse
#===============================================================================
# This file contains all configuration settings for all packages in SuiteSparse,
# except for CSparse (which is stand-alone) and the packages in MATLAB_Tools.
SUITESPARSE_VERSION = 4.5.2
#===============================================================================
# Options you can change without editing this file:
#===============================================================================
# To list the options you can modify at the 'make' command line, type
# 'make config', which also lists their default values. You can then
# change them with 'make OPTION=value'. For example, to use an INSTALL
# path of /my/path, and to use your own BLAS and LAPACK libraries, do:
#
# make install INSTALL=/my/path BLAS=-lmyblas LAPACK=-lmylapackgoeshere
#
# which will install the package into /my/path/lib and /my/path/include,
# and use -lmyblas -lmylapackgoes here when building the demo program.
#===============================================================================
# Defaults for any system
#===============================================================================
#---------------------------------------------------------------------------
# SuiteSparse root directory
#---------------------------------------------------------------------------
# Most Makefiles are in SuiteSparse/Pkg/Lib or SuiteSparse/Pkg/Demo, so
# the top-level of SuiteSparse is in ../.. unless otherwise specified.
# This is true for all but the SuiteSparse_config package.
SUITESPARSE ?= $(realpath $(CURDIR)/../..)
#---------------------------------------------------------------------------
# installation location
#---------------------------------------------------------------------------
# For "make install" and "make uninstall", the default location is
# SuiteSparse/lib, SuiteSparse/include, and
# SuiteSparse/share/doc/suitesparse-x.y.z
# If you do this:
# make install INSTALL=/usr/local
# then the libraries are installed in /usr/local/lib, include files in
# /usr/local/include, and documentation in
# /usr/local/share/doc/suitesparse-x.y.z.
# You can instead specify the install location of each of these 3 components
# separately, via (for example):
# make install INSTALL_LIB=/yada/mylibs INSTALL_INCLUDE=/yoda/myinc \
# INSTALL_DOC=/solo/mydox
# which puts the libraries in /yada/mylibs, include files in /yoda/myinc,
# and documentation in /solo/mydox.
INSTALL ?= $(SUITESPARSE)
INSTALL_LIB ?= $(INSTALL)/lib
INSTALL_INCLUDE ?= $(INSTALL)/include
INSTALL_DOC ?= $(INSTALL)/share/doc/suitesparse-$(SUITESPARSE_VERSION)
#---------------------------------------------------------------------------
# optimization level
#---------------------------------------------------------------------------
OPTIMIZATION ?= -O3
#---------------------------------------------------------------------------
# statement coverage for */Tcov
#---------------------------------------------------------------------------
ifeq ($(TCOV),yes)
# Each package has a */Tcov directory for extensive testing, including
# statement coverage. The Tcov tests require Linux and gcc, and use
# the vanilla BLAS. For those tests, the packages use 'make TCOV=yes',
# which overrides the following settings:
MKLROOT =
AUTOCC = no
CC = gcc
CXX = g++
BLAS = -lrefblas -lgfortran -lstdc++
LAPACK = -llapack
CFLAGS += --coverage
OPTIMIZATION = -g
LDFLAGS += --coverage
endif
#---------------------------------------------------------------------------
# CFLAGS for the C/C++ compiler
#---------------------------------------------------------------------------
# The CF macro is used by SuiteSparse Makefiles as a combination of
# CFLAGS, CPPFLAGS, TARGET_ARCH, and system-dependent settings.
CF ?= $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) $(OPTIMIZATION) -fexceptions -fPIC
#---------------------------------------------------------------------------
# OpenMP is used in CHOLMOD
#---------------------------------------------------------------------------
# with gcc, enable OpenMP directives via -fopenmp
# This is not supported on Darwin, so this string is cleared, below.
CFOPENMP ?= -fopenmp
#---------------------------------------------------------------------------
# compiler
#---------------------------------------------------------------------------
# By default, look for the Intel compilers. If present, they are used
# instead of $(CC), $(CXX), and $(F77). To disable this feature and
# use the $(CC), $(CXX), and $(F77) compilers, use 'make AUTOCC=no'
AUTOCC ?= yes
ifneq ($(AUTOCC),no)
ifneq ($(shell which icc 2>/dev/null),)
# use the Intel icc compiler for C codes, and -qopenmp for OpenMP
CC = icc -D_GNU_SOURCE
CXX = $(CC)
CFOPENMP = -qopenmp -I$(MKLROOT)/include
endif
ifneq ($(shell which ifort 2>/dev/null),)
# use the Intel ifort compiler for Fortran codes
F77 = ifort
endif
endif
#---------------------------------------------------------------------------
# code formatting (for Tcov only)
#---------------------------------------------------------------------------
PRETTY ?= grep -v "^\#" | indent -bl -nce -bli0 -i4 -sob -l120
#---------------------------------------------------------------------------
# required libraries
#---------------------------------------------------------------------------
# SuiteSparse requires the BLAS, LAPACK, and -lm (Math) libraries.
# It places its shared *.so libraries in SuiteSparse/lib.
# Linux also requires the -lrt library (see below)
LDLIBS ?= -lm
LDFLAGS += -L$(INSTALL_LIB)
# See http://www.openblas.net for a recent and freely available optimzed
# BLAS. LAPACK is at http://www.netlib.org/lapack/ . You can use the
# standard Fortran LAPACK along with OpenBLAS to obtain very good
# performance. This script can also detect if the Intel MKL BLAS is
# installed.
LAPACK ?= -llapack
ifndef BLAS
ifdef MKLROOT
# use the Intel MKL for BLAS and LAPACK
# using static linking:
# BLAS = -Wl,--start-group \
# $(MKLROOT)/lib/intel64/libmkl_intel_lp64.a \
# $(MKLROOT)/lib/intel64/libmkl_core.a \
# $(MKLROOT)/lib/intel64/libmkl_intel_thread.a \
# -Wl,--end-group -lpthread -lm
# using dynamic linking:
BLAS = -lmkl_intel_lp64 -lmkl_core -lmkl_intel_thread -lpthread -lm
LAPACK =
else
# use the OpenBLAS at http://www.openblas.net
BLAS = -lopenblas
endif
endif
# For ACML, use this instead:
# make BLAS='-lacml -lgfortran'
#---------------------------------------------------------------------------
# shell commands
#---------------------------------------------------------------------------
# ranlib, and ar, for generating libraries. If you don't need ranlib,
# just change it to RANLAB = echo
RANLIB ?= ranlib
ARCHIVE ?= $(AR) $(ARFLAGS)
CP ?= cp -f
MV ?= mv -f
#---------------------------------------------------------------------------
# Fortran compiler (not required for 'make' or 'make library')
#---------------------------------------------------------------------------
# A Fortran compiler is optional. Only required for the optional Fortran
# interfaces to AMD and UMFPACK. Not needed by 'make' or 'make install'
F77 ?= gfortran
F77FLAGS ?= $(FFLAGS) $(OPTIMIZATION)
#---------------------------------------------------------------------------
# NVIDIA CUDA configuration for CHOLMOD and SPQR
#---------------------------------------------------------------------------
# CUDA is detected automatically, and used if found. To disable CUDA,
# use CUDA=no
ifneq ($(CUDA),no)
CUDA_PATH = $(shell which nvcc 2>/dev/null | sed "s/\/bin\/nvcc//")
endif
ifeq ($(wildcard $(CUDA_PATH)),)
# CUDA is not present
CUDA_PATH =
GPU_BLAS_PATH =
GPU_CONFIG =
CUDART_LIB =
CUBLAS_LIB =
CUDA_INC_PATH =
CUDA_INC =
NVCC = echo
NVCCFLAGS =
else
# with CUDA for CHOLMOD and SPQR
GPU_BLAS_PATH = $(CUDA_PATH)
# GPU_CONFIG must include -DGPU_BLAS to compile SuiteSparse for the
# GPU. You can add additional GPU-related flags to it as well.
# with 4 cores (default):
GPU_CONFIG = -DGPU_BLAS
# For example, to compile CHOLMOD for 10 CPU cores when using the GPU:
# GPU_CONFIG = -DGPU_BLAS -DCHOLMOD_OMP_NUM_THREADS=10
CUDART_LIB = $(CUDA_PATH)/lib64/libcudart.so
CUBLAS_LIB = $(CUDA_PATH)/lib64/libcublas.so
CUDA_INC_PATH = $(CUDA_PATH)/include/
CUDA_INC = -I$(CUDA_INC_PATH)
NVCC = $(CUDA_PATH)/bin/nvcc
NVCCFLAGS = -Xcompiler -fPIC -O3 \
-gencode=arch=compute_20,code=sm_20 \
-gencode=arch=compute_30,code=sm_30 \
-gencode=arch=compute_35,code=sm_35 \
-gencode=arch=compute_50,code=sm_50 \
-gencode=arch=compute_50,code=compute_50
endif
#---------------------------------------------------------------------------
# UMFPACK configuration:
#---------------------------------------------------------------------------
# Configuration for UMFPACK. See UMFPACK/Source/umf_config.h for details.
#
# -DNBLAS do not use the BLAS. UMFPACK will be very slow.
# -D'LONGBLAS=long' or -DLONGBLAS='long long' defines the integers used by
# LAPACK and the BLAS (defaults to 'int')
# -DNSUNPERF do not use the Sun Perf. Library on Solaris
# -DNRECIPROCAL do not multiply by the reciprocal
# -DNO_DIVIDE_BY_ZERO do not divide by zero
# -DNCHOLMOD do not use CHOLMOD as a ordering method. If -DNCHOLMOD is
# included in UMFPACK_CONFIG, then UMFPACK does not rely on
# CHOLMOD, CAMD, CCOLAMD, COLAMD, and METIS.
UMFPACK_CONFIG ?=
# For example, uncomment this line to compile UMFPACK without CHOLMOD:
# UMFPACK_CONFIG = -DNCHOLMOD
# or use 'make UMFPACK_CONFIG=-DNCHOLMOD'
#---------------------------------------------------------------------------
# CHOLMOD configuration
#---------------------------------------------------------------------------
# CHOLMOD Library Modules, which appear in -lcholmod
# Core requires: none
# Check requires: Core
# Cholesky requires: Core, AMD, COLAMD. optional: Partition, Supernodal
# MatrixOps requires: Core
# Modify requires: Core
# Partition requires: Core, CCOLAMD, METIS. optional: Cholesky
# Supernodal requires: Core, BLAS, LAPACK
#
# CHOLMOD test/demo Modules (these do not appear in -lcholmod):
# Tcov requires: Core, Check, Cholesky, MatrixOps, Modify, Supernodal
# optional: Partition
# Valgrind same as Tcov
# Demo requires: Core, Check, Cholesky, MatrixOps, Supernodal
# optional: Partition
#
# Configuration flags:
# -DNCHECK do not include the Check module.
# -DNCHOLESKY do not include the Cholesky module.
# -DNPARTITION do not include the Partition module.
# also do not include METIS.
# -DNCAMD do not use CAMD & CCOLAMD in Parition Module.
# -DNMATRIXOPS do not include the MatrixOps module.
# -DNMODIFY do not include the Modify module.
# -DNSUPERNODAL do not include the Supernodal module.
#
# -DNPRINT do not print anything.
# -D'LONGBLAS=long' or -DLONGBLAS='long long' defines the integers used by
# LAPACK and the BLAS (defaults to 'int')
# -DNSUNPERF for Solaris only. If defined, do not use the Sun
# Performance Library
# -DGPU_BLAS enable the use of the CUDA BLAS
CHOLMOD_CONFIG ?= $(GPU_CONFIG)
#---------------------------------------------------------------------------
# SuiteSparseQR configuration:
#---------------------------------------------------------------------------
# The SuiteSparseQR library can be compiled with the following options:
#
# -DNPARTITION do not include the CHOLMOD partition module
# -DNEXPERT do not include the functions in SuiteSparseQR_expert.cpp
# -DHAVE_TBB enable the use of Intel's Threading Building Blocks
# -DGPU_BLAS enable the use of the CUDA BLAS
SPQR_CONFIG ?= $(GPU_CONFIG)
# to compile with Intel's TBB, use TBB=-ltbb SPQR_CONFIG=-DHAVE_TBB
TBB ?=
# TODO: this *mk file should auto-detect the presence of Intel's TBB,
# and set the compiler flags accordingly.
#===============================================================================
# System-dependent configurations
#===============================================================================
#---------------------------------------------------------------------------
# determine what system we are on
#---------------------------------------------------------------------------
# To disable these auto configurations, use 'make UNAME=custom'
ifndef UNAME
ifeq ($(OS),Windows_NT)
# Cygwin Make on Windows has an $(OS) variable, but not uname.
# Note that this option is untested.
UNAME = Windows
else
# Linux and Darwin (Mac OSX) have been tested.
UNAME := $(shell uname)
endif
endif
#---------------------------------------------------------------------------
# Linux
#---------------------------------------------------------------------------
ifeq ($(UNAME),Linux)
# add the realtime library, librt, and SuiteSparse/lib
LDLIBS += -lrt -Wl,-rpath=$(INSTALL_LIB)
endif
#---------------------------------------------------------------------------
# Mac
#---------------------------------------------------------------------------
ifeq ($(UNAME), Darwin)
# To compile on the Mac, you must install Xcode. Then do this at the
# command line in the Terminal, before doing 'make':
# xcode-select --install
CF += -fno-common
BLAS = -framework Accelerate
LAPACK = -framework Accelerate
# OpenMP is not yet supported by default in clang
CFOPENMP =
endif
#---------------------------------------------------------------------------
# Solaris
#---------------------------------------------------------------------------
ifeq ($(UNAME), SunOS)
# Using the Sun compiler and the Sun Performance Library
# This hasn't been tested recently.
# I leave it here in case you need it. It likely needs updating.
CF += -fast -KPIC -xc99=%none -xlibmieee -xlibmil -m64 -Xc
F77FLAGS = -O -fast -KPIC -dalign -xlibmil -m64
BLAS = -xlic_lib=sunperf
LAPACK =
# Using the GCC compiler and the reference BLAS
## CC = gcc
## CXX = g++
## MAKE = gmake
## BLAS = -lrefblas -lgfortran
## LAPACK = -llapack
endif
#---------------------------------------------------------------------------
# IBM AIX
#---------------------------------------------------------------------------
ifeq ($(UNAME), AIX)
# hasn't been tested for a very long time...
# I leave it here in case you need it. It likely needs updating.
CF += -O4 -qipa -qmaxmem=16384 -q64 -qproto -DBLAS_NO_UNDERSCORE
F77FLAGS = -O4 -qipa -qmaxmem=16384 -q64
BLAS = -lessl
LAPACK =
endif
#===============================================================================
# finalize the CF compiler flags
#===============================================================================
CF += $(CFOPENMP)
#===============================================================================
# internal configuration
#===============================================================================
# The user should not have to change these definitions, and they are
# not displayed by 'make config'
#---------------------------------------------------------------------------
# for removing files not in the distribution
#---------------------------------------------------------------------------
# remove object files, but keep compiled libraries via 'make clean'
CLEAN = *.o *.obj *.ln *.bb *.bbg *.da *.tcov *.gcov gmon.out *.bak *.d \
*.gcda *.gcno *.aux *.bbl *.blg *.log *.toc *.dvi *.lof *.lot
# also remove compiled libraries, via 'make distclean'
PURGE = *.so* *.a *.dll *.dylib *.dSYM
# location of TCOV test output
TCOV_TMP ?= /tmp
#===============================================================================
# Building the shared and static libraries
#===============================================================================
# How to build/install shared and static libraries for Mac and Linux/Unix.
# This assumes that LIBRARY and VERSION have already been defined by the
# Makefile that includes this file.
SO_OPTS = $(LDFLAGS)
ifeq ($(UNAME),Windows)
# Cygwin Make on Windows (untested)
AR_TARGET = $(LIBRARY).lib
SO_PLAIN = $(LIBRARY).dll
SO_MAIN = $(LIBRARY).$(SO_VERSION).dll
SO_TARGET = $(LIBRARY).$(VERSION).dll
SO_INSTALL_NAME = echo
else
# Mac or Linux/Unix
AR_TARGET = $(LIBRARY).a
ifeq ($(UNAME),Darwin)
# Mac
SO_PLAIN = $(LIBRARY).dylib
SO_MAIN = $(LIBRARY).$(SO_VERSION).dylib
SO_TARGET = $(LIBRARY).$(VERSION).dylib
SO_OPTS += -dynamiclib -compatibility_version $(SO_VERSION) \
-current_version $(VERSION) \
-shared -undefined dynamic_lookup
# When a Mac *.dylib file is moved, this command is required
# to change its internal name to match its location in the filesystem:
SO_INSTALL_NAME = install_name_tool -id
else
# Linux and other variants of Unix
SO_PLAIN = $(LIBRARY).so
SO_MAIN = $(LIBRARY).so.$(SO_VERSION)
SO_TARGET = $(LIBRARY).so.$(VERSION)
SO_OPTS += -shared -Wl,-soname -Wl,$(SO_MAIN) -Wl,--no-undefined
# Linux/Unix *.so files can be moved without modification:
SO_INSTALL_NAME = echo
endif
endif
#===============================================================================
# Configure CHOLMOD/Partition module with METIS, CAMD, and CCOLAMD
#===============================================================================
# By default, SuiteSparse uses METIS 5.1.0 in the SuiteSparse/metis-5.1.0
# directory. SuiteSparse's interface to METIS is only through the
# SuiteSparse/CHOLMOD/Partition module, which also requires SuiteSparse/CAMD
# and SuiteSparse/CCOLAMD.
#
# If you wish to use your own pre-installed copy of METIS, use the MY_METIS_LIB
# and MY_METIS_INC options passed to 'make'. For example:
# make MY_METIS_LIB=-lmetis
# make MY_METIS_LIB=/home/myself/mylibraries/libmetis.so
# make MY_METIS_LIB='-L/home/myself/mylibraries -lmetis'
# If you need to tell the compiler where to find the metis.h include file,
# then add MY_METIS_INC=/home/myself/metis-5.1.0/include as well, which points
# to the directory containing metis.h. If metis.h is already installed in
# a location known to the compiler (/usr/local/include/metis.h for example)
# then you do not need to add MY_METIS_INC.
I_WITH_PARTITION =
LIB_WITH_PARTITION =
CONFIG_PARTITION = -DNPARTITION -DNCAMD
# check if CAMD/CCOLAMD and METIS are requested and available
ifeq (,$(findstring -DNCAMD, $(CHOLMOD_CONFIG)))
# CAMD and CCOLAMD are requested. See if they are available in
# SuiteSparse/CAMD and SuiteSparse/CCOLAMD
ifneq (, $(wildcard $(SUITESPARSE)/CAMD))
ifneq (, $(wildcard $(SUITESPARSE)/CCOLAMD))
# CAMD and CCOLAMD are requested and available
LIB_WITH_PARTITION = -lccolamd -lcamd
I_WITH_PARTITION = -I$(SUITESPARSE)/CCOLAMD/Include -I$(SUITESPARSE)/CAMD/Include
CONFIG_PARTITION = -DNPARTITION
# check if METIS is requested and available
ifeq (,$(findstring -DNPARTITION, $(CHOLMOD_CONFIG)))
# METIS is requested. See if it is available.
ifneq (,$(MY_METIS_LIB))
# METIS 5.1.0 is provided elsewhere, and we are not using
# SuiteSparse/metis-5.1.0. To do so, we link with
# $(MY_METIS_LIB) and add the -I$(MY_METIS_INC) option for
# the compiler. The latter can be empty if you have METIS
# installed in a place where the compiler can find the
# metis.h include file by itself without any -I option
# (/usr/local/include/metis.h for example).
LIB_WITH_PARTITION += $(MY_METIS_LIB)
ifneq (,$(MY_METIS_INC))
I_WITH_PARTITION += -I$(MY_METIS_INC)
endif
CONFIG_PARTITION =
else
# see if METIS is in SuiteSparse/metis-5.1.0
ifneq (, $(wildcard $(SUITESPARSE)/metis-5.1.0))
# SuiteSparse/metis5.1.0 is available
ifeq ($(UNAME), Darwin)
LIB_WITH_PARTITION += $(SUITESPARSE)/lib/libmetis.dylib
else
LIB_WITH_PARTITION += -lmetis
endif
I_WITH_PARTITION += -I$(SUITESPARSE)/metis-5.1.0/include
CONFIG_PARTITION =
endif
endif
endif
endif
endif
endif
#===============================================================================
# display configuration
#===============================================================================
ifeq ($(LIBRARY),)
# placeholders, for 'make config' in the top-level SuiteSparse
LIBRARY=PackageNameWillGoHere
VERSION=x.y.z
SO_VERSION=x
endif
# 'make config' lists the primary installation options
config:
@echo ' '
@echo '----------------------------------------------------------------'
@echo 'SuiteSparse package compilation options:'
@echo '----------------------------------------------------------------'
@echo ' '
@echo 'SuiteSparse Version: ' '$(SUITESPARSE_VERSION)'
@echo 'SuiteSparse top folder: ' '$(SUITESPARSE)'
@echo 'Package: LIBRARY= ' '$(LIBRARY)'
@echo 'Version: VERSION= ' '$(VERSION)'
@echo 'SO version: SO_VERSION= ' '$(SO_VERSION)'
@echo 'System: UNAME= ' '$(UNAME)'
@echo 'Install directory: INSTALL= ' '$(INSTALL)'
@echo 'Install libraries in: INSTALL_LIB= ' '$(INSTALL_LIB)'
@echo 'Install include files in: INSTALL_INCLUDE=' '$(INSTALL_INCLUDE)'
@echo 'Install documentation in: INSTALL_DOC= ' '$(INSTALL_DOC)'
@echo 'Optimization level: OPTIMIZATION= ' '$(OPTIMIZATION)'
@echo 'BLAS library: BLAS= ' '$(BLAS)'
@echo 'LAPACK library: LAPACK= ' '$(LAPACK)'
@echo 'Intel TBB library: TBB= ' '$(TBB)'
@echo 'Other libraries: LDLIBS= ' '$(LDLIBS)'
@echo 'static library: AR_TARGET= ' '$(AR_TARGET)'
@echo 'shared library (full): SO_TARGET= ' '$(SO_TARGET)'
@echo 'shared library (main): SO_MAIN= ' '$(SO_MAIN)'
@echo 'shared library (short): SO_PLAIN= ' '$(SO_PLAIN)'
@echo 'shared library options: SO_OPTS= ' '$(SO_OPTS)'
@echo 'shared library name tool: SO_INSTALL_NAME=' '$(SO_INSTALL_NAME)'
@echo 'ranlib, for static libs: RANLIB= ' '$(RANLIB)'
@echo 'static library command: ARCHIVE= ' '$(ARCHIVE)'
@echo 'copy file: CP= ' '$(CP)'
@echo 'move file: MV= ' '$(MV)'
@echo 'remove file: RM= ' '$(RM)'
@echo 'pretty (for Tcov tests): PRETTY= ' '$(PRETTY)'
@echo 'C compiler: CC= ' '$(CC)'
@echo 'C++ compiler: CXX= ' '$(CXX)'
@echo 'CUDA compiler: NVCC= ' '$(NVCC)'
@echo 'CUDA root directory: CUDA_PATH= ' '$(CUDA_PATH)'
@echo 'OpenMP flags: CFOPENMP= ' '$(CFOPENMP)'
@echo 'C/C++ compiler flags: CF= ' '$(CF)'
@echo 'LD flags: LDFLAGS= ' '$(LDFLAGS)'
@echo 'Fortran compiler: F77= ' '$(F77)'
@echo 'Fortran flags: F77FLAGS= ' '$(F77FLAGS)'
@echo 'Intel MKL root: MKLROOT= ' '$(MKLROOT)'
@echo 'Auto detect Intel icc: AUTOCC= ' '$(AUTOCC)'
@echo 'UMFPACK config: UMFPACK_CONFIG= ' '$(UMFPACK_CONFIG)'
@echo 'CHOLMOD config: CHOLMOD_CONFIG= ' '$(CHOLMOD_CONFIG)'
@echo 'SuiteSparseQR config: SPQR_CONFIG= ' '$(SPQR_CONFIG)'
@echo 'CUDA library: CUDART_LIB= ' '$(CUDART_LIB)'
@echo 'CUBLAS library: CUBLAS_LIB= ' '$(CUBLAS_LIB)'
@echo 'METIS and CHOLMOD/Partition configuration:'
@echo 'Your METIS library: MY_METIS_LIB= ' '$(MY_METIS_LIB)'
@echo 'Your metis.h is in: MY_METIS_INC= ' '$(MY_METIS_INC)'
@echo 'METIS is used via the CHOLMOD/Partition module, configured as follows.'
@echo 'If the next line has -DNPARTITION then METIS will not be used:'
@echo 'CHOLMOD Partition config: ' '$(CONFIG_PARTITION)'
@echo 'CHOLMOD Partition libs: ' '$(LIB_WITH_PARTITION)'
@echo 'CHOLMOD Partition include:' '$(I_WITH_PARTITION)'
ifeq ($(TCOV),yes)
@echo 'TCOV=yes, for extensive testing only (gcc, g++, vanilla BLAS)'
endif

View File

@ -0,0 +1,73 @@
# Makefile for null-output xerbla, both C and Fortran versions.
# By default, the C version (libcerbla.a and *.so) is compiled and installed.
# Set the USE_FORTRAN option to 1 to create the Fortran instead (libxerbla):
USE_FORTRAN = 0
# USE_FORTRAN = 1
VERSION = 1.0.2
SO_VERSION = 1
default: library
# compile and install in SuiteSparse/lib
library:
$(MAKE) install INSTALL=$(SUITESPARSE)
all: library
ifeq ($(USE_FORTRAN),0)
LIBRARY = libcerbla
else
LIBRARY = libxerbla
endif
include ../SuiteSparse_config.mk
ifeq ($(USE_FORTRAN),0)
COMPILE = $(CC) $(CF) -c xerbla.c
DEPENDS = xerbla.c xerbla.h
else
COMPILE = $(F77) $(F77FLAGS) -c xerbla.f
DEPENDS = xerbla.f
endif
ccode: all
fortran: all
$(AR_TARGET): $(DEPENDS)
$(COMPILE)
$(ARCHIVE) $(AR_TARGET) xerbla.o
- $(RANLIB) $(AR_TARGET)
- $(RM) xerbla.o
# install libcerbla / libxerbla
install: $(AR_TARGET) $(INSTALL_LIB)/$(SO_TARGET)
$(INSTALL_LIB)/$(SO_TARGET): $(DEPENDS)
@mkdir -p $(INSTALL_LIB)
@mkdir -p $(INSTALL_INCLUDE)
@mkdir -p $(INSTALL_DOC)
$(COMPILE)
$(CC) $(SO_OPTS) xerbla.o -o $@
- $(RM) xerbla.o
( cd $(INSTALL_LIB) ; ln -sf $(SO_TARGET) $(SO_PLAIN) )
( cd $(INSTALL_LIB) ; ln -sf $(SO_TARGET) $(SO_MAIN) )
$(CP) xerbla.h $(INSTALL_INCLUDE)
chmod 755 $(INSTALL_LIB)/$(SO_TARGET)
chmod 644 $(INSTALL_INCLUDE)/xerbla.h
# uninstall libcerbla / libxerbla
uninstall:
$(RM) $(INSTALL_LIB)/$(SO_TARGET)
$(RM) $(INSTALL_LIB)/$(SO_PLAIN)
$(RM) $(INSTALL_INCLUDE)/xerbla.h
distclean: purge
purge: clean
- $(RM) -r $(PURGE)
clean:
- $(RM) -r $(CLEAN)

View File

@ -1,35 +0,0 @@
UFconfig contains configuration settings for all many of the software packages
that I develop or co-author. Note that older versions of some of these packages
do not require UFconfig.
Package Description
------- -----------
AMD approximate minimum degree ordering
CAMD constrained AMD
COLAMD column approximate minimum degree ordering
CCOLAMD constrained approximate minimum degree ordering
UMFPACK sparse LU factorization, with the BLAS
CXSparse int/long/real/complex version of CSparse
CHOLMOD sparse Cholesky factorization, update/downdate
KLU sparse LU factorization, BLAS-free
BTF permutation to block triangular form
LDL concise sparse LDL'
LPDASA LP Dual Active Set Algorithm
SuiteSparseQR sparse QR factorization
UFconfig is not required by:
CSparse a Concise Sparse matrix package
RBio read/write files in Rutherford/Boeing format
UFcollection tools for managing the UF Sparse Matrix Collection
LINFACTOR simple m-file to show how to use LU and CHOL to solve Ax=b
MESHND 2D and 3D mesh generation and nested dissection ordering
MATLAB_Tools misc collection of m-files
SSMULT sparse matrix times sparse matrix, for use in MATLAB
In addition, the xerbla/ directory contains Fortan and C versions of the
BLAS/LAPACK xerbla routine, which is called when an invalid input is passed to
the BLAS or LAPACK. The xerbla provided here does not print any message, so
the entire Fortran I/O library does not need to be linked into a C application.
Most versions of the BLAS contain xerbla, but those from K. Goto do not. Use
this if you need too.

View File

@ -1,71 +0,0 @@
/* ========================================================================== */
/* === UFconfig ============================================================= */
/* ========================================================================== */
/* Copyright (c) 2009, University of Florida. No licensing restrictions
* apply to this file or to the UFconfig directory. Author: Timothy A. Davis.
*/
#include "UFconfig.h"
/* -------------------------------------------------------------------------- */
/* UFmalloc: malloc wrapper */
/* -------------------------------------------------------------------------- */
void *UFmalloc /* pointer to allocated block of memory */
(
size_t nitems, /* number of items to malloc (>=1 is enforced) */
size_t size_of_item, /* sizeof each item */
int *ok, /* TRUE if successful, FALSE otherwise */
UFconfig *config /* SuiteSparse-wide configuration */
)
{
void *p ;
if (nitems < 1) nitems = 1 ;
if (nitems * size_of_item != ((double) nitems) * size_of_item)
{
/* Int overflow */
*ok = 0 ;
return (NULL) ;
}
if (!config || config->malloc_memory == NULL)
{
/* use malloc by default */
p = (void *) malloc (nitems * size_of_item) ;
}
else
{
/* use the pointer to malloc in the config */
p = (void *) (config->malloc_memory) (nitems * size_of_item) ;
}
*ok = (p != NULL) ;
return (p) ;
}
/* -------------------------------------------------------------------------- */
/* UFfree: free wrapper */
/* -------------------------------------------------------------------------- */
void *UFfree /* always returns NULL */
(
void *p, /* block to free */
UFconfig *config /* SuiteSparse-wide configuration */
)
{
if (p)
{
if (!config || config->free_memory == NULL)
{
/* use free by default */
free (p) ;
}
else
{
/* use the pointer to free in the config */
(config->free_memory) (p) ;
}
}
return (NULL) ;
}

View File

@ -1,152 +0,0 @@
/* ========================================================================== */
/* === UFconfig.h =========================================================== */
/* ========================================================================== */
/* Configuration file for SuiteSparse: a Suite of Sparse matrix packages
* (AMD, COLAMD, CCOLAMD, CAMD, CHOLMOD, UMFPACK, CXSparse, and others).
*
* UFconfig.h provides the definition of the long integer. On most systems,
* a C program can be compiled in LP64 mode, in which long's and pointers are
* both 64-bits, and int's are 32-bits. Windows 64, however, uses the LLP64
* model, in which int's and long's are 32-bits, and long long's and pointers
* are 64-bits.
*
* SuiteSparse packages that include long integer versions are
* intended for the LP64 mode. However, as a workaround for Windows 64
* (and perhaps other systems), the long integer can be redefined.
*
* If _WIN64 is defined, then the __int64 type is used instead of long.
*
* The long integer can also be defined at compile time. For example, this
* could be added to UFconfig.mk:
*
* CFLAGS = -O -D'UF_long=long long' -D'UF_long_max=9223372036854775801' \
* -D'UF_long_idd="lld"'
*
* This file defines UF_long as either long (on all but _WIN64) or
* __int64 on Windows 64. The intent is that a UF_long is always a 64-bit
* integer in a 64-bit code. ptrdiff_t might be a better choice than long;
* it is always the same size as a pointer.
*
* This file also defines the SUITESPARSE_VERSION and related definitions.
*
* Copyright (c) 2007, University of Florida. No licensing restrictions
* apply to this file or to the UFconfig directory. Author: Timothy A. Davis.
*/
#ifndef _UFCONFIG_H
#define _UFCONFIG_H
#ifdef __cplusplus
extern "C" {
#endif
#include <limits.h>
#include <stdlib.h>
/* ========================================================================== */
/* === UF_long ============================================================== */
/* ========================================================================== */
#ifndef UF_long
#ifdef _WIN64
#define UF_long __int64
#define UF_long_max _I64_MAX
#define UF_long_idd "I64d"
#else
#define UF_long long
#define UF_long_max LONG_MAX
#define UF_long_idd "ld"
#endif
#define UF_long_id "%" UF_long_idd
#endif
/* ========================================================================== */
/* === UFconfig parameters and functions ==================================== */
/* ========================================================================== */
/* SuiteSparse-wide parameters will be placed in this struct. So far, they
are only used by RBio. */
typedef struct UFconfig_struct
{
void *(*malloc_memory) (size_t) ; /* pointer to malloc */
void *(*realloc_memory) (void *, size_t) ; /* pointer to realloc */
void (*free_memory) (void *) ; /* pointer to free */
void *(*calloc_memory) (size_t, size_t) ; /* pointer to calloc */
} UFconfig ;
void *UFmalloc /* pointer to allocated block of memory */
(
size_t nitems, /* number of items to malloc (>=1 is enforced) */
size_t size_of_item, /* sizeof each item */
int *ok, /* TRUE if successful, FALSE otherwise */
UFconfig *config /* SuiteSparse-wide configuration */
) ;
void *UFfree /* always returns NULL */
(
void *p, /* block to free */
UFconfig *config /* SuiteSparse-wide configuration */
) ;
/* ========================================================================== */
/* === SuiteSparse version ================================================== */
/* ========================================================================== */
/* SuiteSparse is not a package itself, but a collection of packages, some of
* which must be used together (UMFPACK requires AMD, CHOLMOD requires AMD,
* COLAMD, CAMD, and CCOLAMD, etc). A version number is provided here for the
* collection itself. The versions of packages within each version of
* SuiteSparse are meant to work together. Combining one packge from one
* version of SuiteSparse, with another package from another version of
* SuiteSparse, may or may not work.
*
* SuiteSparse Version 3.6.1 contains the following packages:
*
* AMD version 2.2.2
* BTF version 1.1.2
* CAMD version 2.2.2
* CCOLAMD version 2.7.3
* CHOLMOD version 1.7.3
* COLAMD version 2.7.3
* CSparse version 2.2.5
* CSparse3 version 3.0.1
* CXSparse version 2.2.5
* KLU version 1.1.2
* LDL version 2.0.3
* RBio version 2.0.1
* SPQR version 1.2.2 (also called SuiteSparseQR)
* UFcollection version 1.5.0
* UFconfig version number is the same as SuiteSparse
* UMFPACK version 5.5.1
* LINFACTOR version 1.1.0
* MESHND version 1.1.1
* SSMULT version 2.0.2
* MATLAB_Tools no specific version number
*
* Other package dependencies:
* BLAS required by CHOLMOD and UMFPACK
* LAPACK required by CHOLMOD
* METIS 4.0.1 required by CHOLMOD (optional) and KLU (optional)
*/
#define SUITESPARSE_DATE "May 10, 2011"
#define SUITESPARSE_VER_CODE(main,sub) ((main) * 1000 + (sub))
#define SUITESPARSE_MAIN_VERSION 3
#define SUITESPARSE_SUB_VERSION 6
#define SUITESPARSE_SUBSUB_VERSION 1
#define SUITESPARSE_VERSION \
SUITESPARSE_VER_CODE(SUITESPARSE_MAIN_VERSION,SUITESPARSE_SUB_VERSION)
#ifdef __cplusplus
}
#endif
#endif

View File

@ -1,386 +0,0 @@
#===============================================================================
# UFconfig.mk: common configuration file for the SuiteSparse
#===============================================================================
# This file contains all configuration settings for all packages authored or
# co-authored by Tim Davis at the University of Florida:
#
# Package Version Description
# ------- ------- -----------
# AMD 1.2 or later approximate minimum degree ordering
# COLAMD 2.4 or later column approximate minimum degree ordering
# CCOLAMD 1.0 or later constrained column approximate minimum degree ordering
# CAMD any constrained approximate minimum degree ordering
# UMFPACK 4.5 or later sparse LU factorization, with the BLAS
# CHOLMOD any sparse Cholesky factorization, update/downdate
# KLU 0.8 or later sparse LU factorization, BLAS-free
# BTF 0.8 or later permutation to block triangular form
# LDL 1.2 or later concise sparse LDL'
# LPDASA any linear program solve (dual active set algorithm)
# CXSparse any extended version of CSparse (int/long, real/complex)
# SuiteSparseQR any sparse QR factorization
#
# The UFconfig directory and the above packages should all appear in a single
# directory, in order for the Makefile's within each package to find this file.
#
# To enable an option of the form "# OPTION = ...", edit this file and
# delete the "#" in the first column of the option you wish to use.
#------------------------------------------------------------------------------
# Generic configuration
#------------------------------------------------------------------------------
# C compiler and compiler flags: These will normally not give you optimal
# performance. You should select the optimization parameters that are best
# for your system. On Linux, use "CFLAGS = -O3 -fexceptions" for example.
CC = cc
CFLAGS = -O3 -fexceptions
# C++ compiler (also uses CFLAGS)
CPLUSPLUS = g++
# ranlib, and ar, for generating libraries
RANLIB = ranlib
AR = ar cr
# copy, delete, and rename a file
CP = cp -f
RM = rm -f
MV = mv -f
# Fortran compiler (not normally required)
F77 = f77
F77FLAGS = -O
F77LIB =
# C and Fortran libraries
LIB = -lm
# For compiling MATLAB mexFunctions (MATLAB 7.5 or later)
MEX = mex -O -largeArrayDims -lmwlapack -lmwblas
# For compiling MATLAB mexFunctions (MATLAB 7.3 and 7.4)
# MEX = mex -O -largeArrayDims -lmwlapack
# For MATLAB 7.2 or earlier, you must use one of these options:
# MEX = mex -O -lmwlapack
# MEX = mex -O
# Which version of MAKE you are using (default is "make")
# MAKE = make
# MAKE = gmake
# For "make install"
INSTALL_LIB = /usr/local/lib
INSTALL_INCLUDE = /usr/local/include
#------------------------------------------------------------------------------
# BLAS and LAPACK configuration:
#------------------------------------------------------------------------------
# UMFPACK and CHOLMOD both require the BLAS. CHOLMOD also requires LAPACK.
# See Kazushige Goto's BLAS at http://www.cs.utexas.edu/users/flame/goto/ or
# http://www.tacc.utexas.edu/~kgoto/ for the best BLAS to use with CHOLMOD.
# LAPACK is at http://www.netlib.org/lapack/ . You can use the standard
# Fortran LAPACK along with Goto's BLAS to obtain very good performance.
# CHOLMOD gets a peak numeric factorization rate of 3.6 Gflops on a 3.2 GHz
# Pentium 4 (512K cache, 4GB main memory) with the Goto BLAS, and 6 Gflops
# on a 2.5Ghz dual-core AMD Opteron.
# These settings will probably not work, since there is no fixed convention for
# naming the BLAS and LAPACK library (*.a or *.so) files.
# This is probably slow ... it might connect to the Standard Reference BLAS:
BLAS = -lblas -lgfortran
LAPACK = -llapack
# NOTE: this next option for the "Goto BLAS" has nothing to do with a "goto"
# statement. Rather, the Goto BLAS is written by Dr. Kazushige Goto.
# Using the Goto BLAS:
# BLAS = -lgoto -lgfortran -lgfortranbegin
# Using non-optimized versions:
# BLAS = -lblas_plain -lgfortran -lgfortranbegin
# LAPACK = -llapack_plain
# BLAS = -lblas_plain -lgfortran -lgfortranbegin
# LAPACK = -llapack
# The BLAS might not contain xerbla, an error-handling routine for LAPACK and
# the BLAS. Also, the standard xerbla requires the Fortran I/O library, and
# stops the application program if an error occurs. A C version of xerbla
# distributed with this software (UFconfig/xerbla/libcerbla.a) includes a
# Fortran-callable xerbla routine that prints nothing and does not stop the
# application program. This is optional.
# XERBLA = ../../UFconfig/xerbla/libcerbla.a
# If you wish to use the XERBLA in LAPACK and/or the BLAS instead,
# use this option:
XERBLA =
# If you wish to use the Fortran UFconfig/xerbla/xerbla.f instead, use this:
# XERBLA = ../../UFconfig/xerbla/libxerbla.a
#------------------------------------------------------------------------------
# METIS, optionally used by CHOLMOD
#------------------------------------------------------------------------------
# If you do not have METIS, or do not wish to use it in CHOLMOD, you must
# compile CHOLMOD with the -DNPARTITION flag. You must also use the
# "METIS =" option, below.
# The path is relative to where it is used, in CHOLMOD/Lib, CHOLMOD/MATLAB, etc.
# You may wish to use an absolute path. METIS is optional. Compile
# CHOLMOD with -DNPARTITION if you do not wish to use METIS.
METIS_PATH = ../../metis-4.0
METIS = ../../metis-4.0/libmetis.a
# If you use CHOLMOD_CONFIG = -DNPARTITION then you must use the following
# options:
# METIS_PATH =
# METIS =
#------------------------------------------------------------------------------
# UMFPACK configuration:
#------------------------------------------------------------------------------
# Configuration flags for UMFPACK. See UMFPACK/Source/umf_config.h for details.
#
# -DNBLAS do not use the BLAS. UMFPACK will be very slow.
# -D'LONGBLAS=long' or -DLONGBLAS='long long' defines the integers used by
# LAPACK and the BLAS (defaults to 'int')
# -DNSUNPERF do not use the Sun Perf. Library (default is use it on Solaris)
# -DNPOSIX do not use POSIX routines sysconf and times.
# -DGETRUSAGE use getrusage
# -DNO_TIMER do not use any timing routines
# -DNRECIPROCAL do not multiply by the reciprocal
# -DNO_DIVIDE_BY_ZERO do not divide by zero
UMFPACK_CONFIG =
#------------------------------------------------------------------------------
# CHOLMOD configuration
#------------------------------------------------------------------------------
# CHOLMOD Library Modules, which appear in libcholmod.a:
# Core requires: none
# Check requires: Core
# Cholesky requires: Core, AMD, COLAMD. optional: Partition, Supernodal
# MatrixOps requires: Core
# Modify requires: Core
# Partition requires: Core, CCOLAMD, METIS. optional: Cholesky
# Supernodal requires: Core, BLAS, LAPACK
#
# CHOLMOD test/demo Modules (all are GNU GPL, do not appear in libcholmod.a):
# Tcov requires: Core, Check, Cholesky, MatrixOps, Modify, Supernodal
# optional: Partition
# Valgrind same as Tcov
# Demo requires: Core, Check, Cholesky, MatrixOps, Supernodal
# optional: Partition
#
# Configuration flags:
# -DNCHECK do not include the Check module. License GNU LGPL
# -DNCHOLESKY do not include the Cholesky module. License GNU LGPL
# -DNPARTITION do not include the Partition module. License GNU LGPL
# also do not include METIS.
# -DNGPL do not include any GNU GPL Modules in the CHOLMOD library:
# -DNMATRIXOPS do not include the MatrixOps module. License GNU GPL
# -DNMODIFY do not include the Modify module. License GNU GPL
# -DNSUPERNODAL do not include the Supernodal module. License GNU GPL
#
# -DNPRINT do not print anything.
# -D'LONGBLAS=long' or -DLONGBLAS='long long' defines the integers used by
# LAPACK and the BLAS (defaults to 'int')
# -DNSUNPERF for Solaris only. If defined, do not use the Sun
# Performance Library
CHOLMOD_CONFIG =
#------------------------------------------------------------------------------
# SuiteSparseQR configuration:
#------------------------------------------------------------------------------
# The SuiteSparseQR library can be compiled with the following options:
#
# -DNPARTITION do not include the CHOLMOD partition module
# -DNEXPERT do not include the functions in SuiteSparseQR_expert.cpp
# -DTIMING enable timing and flop counts
# -DHAVE_TBB enable the use of Intel's Threading Building Blocks (TBB)
# default, without timing, without TBB:
SPQR_CONFIG =
# with timing and TBB:
# SPQR_CONFIG = -DTIMING -DHAVE_TBB
# with timing
# SPQR_CONFIG = -DTIMING
# This is needed for IBM AIX: (but not for and C codes, just C++)
# SPQR_CONFIG = -DBLAS_NO_UNDERSCORE
# with TBB, you must select this:
# TBB = -ltbb
# without TBB:
TBB =
# with timing, you must include the timing library:
# RTLIB = -lrt
# without timing
RTLIB =
#------------------------------------------------------------------------------
# Linux
#------------------------------------------------------------------------------
# Using default compilers:
# CC = gcc
# CFLAGS = -O3 -fexceptions
# alternatives:
# CFLAGS = -g -fexceptions \
-Wall -W -Wshadow -Wmissing-prototypes -Wstrict-prototypes \
-Wredundant-decls -Wnested-externs -Wdisabled-optimization -ansi \
-funit-at-a-time
# CFLAGS = -O3 -fexceptions \
-Wall -W -Werror -Wshadow -Wmissing-prototypes -Wstrict-prototypes \
-Wredundant-decls -Wnested-externs -Wdisabled-optimization -ansi
# CFLAGS = -O3 -fexceptions -D_FILE_OFFSET_BITS=64 -D_LARGEFILE64_SOURCE
# CFLAGS = -O3
# CFLAGS = -O3 -g -fexceptions
# CFLAGS = -g -fexceptions \
-Wall -W -Wshadow \
-Wredundant-decls -Wdisabled-optimization -ansi
# consider:
# -fforce-addr -fmove-all-movables -freduce-all-givs -ftsp-ordering
# -frename-registers -ffast-math -funroll-loops
# Using the Goto BLAS:
# BLAS = -lgoto -lfrtbegin -lg2c $(XERBLA) -lpthread
# Using Intel's icc and ifort compilers:
# (does not work for mexFunctions unless you add a mexopts.sh file)
# F77 = ifort
# CC = icc
# CFLAGS = -O3 -xN -vec_report=0
# CFLAGS = -g
# old (broken): CFLAGS = -ansi -O3 -ip -tpp7 -xW -vec_report0
# 64bit:
# F77FLAGS = -O -m64
# CFLAGS = -O3 -fexceptions -m64
# BLAS = -lgoto64 -lfrtbegin -lg2c -lpthread $(XERBLA)
# LAPACK = -llapack64
# SUSE Linux 10.1, AMD Opteron, with GOTO Blas
# F77 = gfortran
# BLAS = -lgoto_opteron64 -lgfortran
# SUSE Linux 10.1, Intel Pentium, with GOTO Blas
# F77 = gfortran
# BLAS = -lgoto -lgfortran
#------------------------------------------------------------------------------
# Mac
#------------------------------------------------------------------------------
# As recommended by macports, http://suitesparse.darwinports.com/
# I've tested them myself on Mac OSX 10.6.1 (Snow Leopard), on my MacBook Air.
# F77 = gfortran
# CFLAGS = -O3 -fno-common -no-cpp-precomp -fexceptions
# BLAS = -framework Accelerate
# LAPACK = -framework Accelerate
# Using netlib.org LAPACK and BLAS compiled by gfortran, with and without
# optimzation:
# BLAS = -lblas_plain -lgfortran
# LAPACK = -llapack_plain
# BLAS = -lblas_optimized -lgfortran
# LAPACK = -llapack_optimized
#------------------------------------------------------------------------------
# Solaris
#------------------------------------------------------------------------------
# 32-bit
# CFLAGS = -KPIC -dalign -xc99=%none -Xc -xlibmieee -xO5 -xlibmil -m32
# 64-bit
# CFLAGS = -fast -KPIC -xc99=%none -xlibmieee -xlibmil -m64 -Xc
# FFLAGS = -fast -KPIC -dalign -xlibmil -m64
# The Sun Performance Library includes both LAPACK and the BLAS:
# BLAS = -xlic_lib=sunperf
# LAPACK =
#------------------------------------------------------------------------------
# Compaq Alpha
#------------------------------------------------------------------------------
# 64-bit mode only
# CFLAGS = -O2 -std1
# BLAS = -ldxml
# LAPACK =
#------------------------------------------------------------------------------
# Macintosh
#------------------------------------------------------------------------------
# CC = gcc
# CFLAGS = -O3 -fno-common -no-cpp-precomp -fexceptions
# LIB = -lstdc++
# BLAS = -framework Accelerate
# LAPACK = -framework Accelerate
#------------------------------------------------------------------------------
# IBM RS 6000
#------------------------------------------------------------------------------
# BLAS = -lessl
# LAPACK =
# 32-bit mode:
# CFLAGS = -O4 -qipa -qmaxmem=16384 -qproto
# F77FLAGS = -O4 -qipa -qmaxmem=16384
# 64-bit mode:
# CFLAGS = -O4 -qipa -qmaxmem=16384 -q64 -qproto
# F77FLAGS = -O4 -qipa -qmaxmem=16384 -q64
# AR = ar -X64
#------------------------------------------------------------------------------
# SGI IRIX
#------------------------------------------------------------------------------
# BLAS = -lscsl
# LAPACK =
# 32-bit mode
# CFLAGS = -O
# 64-bit mode (32 bit int's and 64-bit long's):
# CFLAGS = -64
# F77FLAGS = -64
# SGI doesn't have ranlib
# RANLIB = echo
#------------------------------------------------------------------------------
# AMD Opteron (64 bit)
#------------------------------------------------------------------------------
# BLAS = -lgoto_opteron64 -lg2c
# LAPACK = -llapack_opteron64
# SUSE Linux 10.1, AMD Opteron
# F77 = gfortran
# BLAS = -lgoto_opteron64 -lgfortran
# LAPACK = -llapack_opteron64
#------------------------------------------------------------------------------
# remove object files and profile output
#------------------------------------------------------------------------------
CLEAN = *.o *.obj *.ln *.bb *.bbg *.da *.tcov *.gcov gmon.out *.bak *.d *.gcda *.gcno

View File

@ -26,7 +26,7 @@ set (3rdparty_srcs
${eigen_headers} # Set by 3rdparty/CMakeLists.txt
${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/CCOLAMD/Source/ccolamd.c
${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/CCOLAMD/Source/ccolamd_global.c
${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/UFconfig/UFconfig.c)
${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/SuiteSparse_config/SuiteSparse_config.c)
gtsam_assign_source_folders("${3rdparty_srcs}") # Create MSVC structure
# To exclude a source from the library build (in any subfolder)

View File

@ -38,26 +38,6 @@ using namespace std;
namespace gtsam {
/* ************************************************************************* */
Matrix zeros( size_t m, size_t n ) {
return Matrix::Zero(m,n);
}
/* ************************************************************************* */
Matrix ones( size_t m, size_t n ) {
return Matrix::Ones(m,n);
}
/* ************************************************************************* */
Matrix eye( size_t m, size_t n) {
return Matrix::Identity(m, n);
}
/* ************************************************************************* */
Matrix diag(const Vector& v) {
return v.asDiagonal();
}
/* ************************************************************************* */
bool assert_equal(const Matrix& expected, const Matrix& actual, double tol) {
@ -146,16 +126,6 @@ bool linear_dependent(const Matrix& A, const Matrix& B, double tol) {
}
}
/* ************************************************************************* */
void multiplyAdd(double alpha, const Matrix& A, const Vector& x, Vector& e) {
e += alpha * A * x;
}
/* ************************************************************************* */
void multiplyAdd(const Matrix& A, const Vector& x, Vector& e) {
e += A * x;
}
/* ************************************************************************* */
Vector operator^(const Matrix& A, const Vector & v) {
if (A.rows()!=v.size()) throw std::invalid_argument(
@ -166,21 +136,6 @@ Vector operator^(const Matrix& A, const Vector & v) {
return A.transpose() * v;
}
/* ************************************************************************* */
void transposeMultiplyAdd(double alpha, const Matrix& A, const Vector& e, Vector& x) {
x += alpha * A.transpose() * e;
}
/* ************************************************************************* */
void transposeMultiplyAdd(const Matrix& A, const Vector& e, Vector& x) {
x += A.transpose() * e;
}
/* ************************************************************************* */
void transposeMultiplyAdd(double alpha, const Matrix& A, const Vector& e, SubVector x) {
x += alpha * A.transpose() * e;
}
/* ************************************************************************* */
//3 argument call
void print(const Matrix& A, const string &s, ostream& stream) {
@ -250,7 +205,7 @@ Matrix diag(const std::vector<Matrix>& Hs) {
rows+= Hs[i].rows();
cols+= Hs[i].cols();
}
Matrix results = zeros(rows,cols);
Matrix results = Matrix::Zero(rows,cols);
size_t r = 0, c = 0;
for (size_t i = 0; i<Hs.size(); ++i) {
insertSub(results, Hs[i], r, c);
@ -260,16 +215,6 @@ Matrix diag(const std::vector<Matrix>& Hs) {
return results;
}
/* ************************************************************************* */
void insertColumn(Matrix& A, const Vector& col, size_t j) {
A.col(j) = col;
}
/* ************************************************************************* */
void insertColumn(Matrix& A, const Vector& col, size_t i, size_t j) {
A.col(j).segment(i, col.size()) = col;
}
/* ************************************************************************* */
Vector columnNormSquare(const Matrix &A) {
Vector v (A.cols()) ;
@ -279,24 +224,13 @@ Vector columnNormSquare(const Matrix &A) {
return v ;
}
/* ************************************************************************* */
void solve(Matrix& A, Matrix& B) {
// Eigen version - untested
B = A.fullPivLu().solve(B);
}
/* ************************************************************************* */
Matrix inverse(const Matrix& A) {
return A.inverse();
}
/* ************************************************************************* */
/** Householder QR factorization, Golub & Van Loan p 224, explicit version */
/* ************************************************************************* */
pair<Matrix,Matrix> qr(const Matrix& A) {
const size_t m = A.rows(), n = A.cols(), kprime = min(m,n);
Matrix Q=eye(m,m),R(A);
Matrix Q=Matrix::Identity(m,m),R(A);
Vector v(m);
// loop over the kprime first columns
@ -319,7 +253,7 @@ pair<Matrix,Matrix> qr(const Matrix& A) {
v(k) = k<j ? 0.0 : vjm(k-j);
// create Householder reflection matrix Qj = I-beta*v*v'
Matrix Qj = eye(m) - beta * v * v.transpose();
Matrix Qj = Matrix::Identity(m,m) - beta * v * v.transpose();
R = Qj * R; // update R
Q = Q * Qj; // update Q
@ -356,7 +290,7 @@ weighted_eliminate(Matrix& A, Vector& b, const Vector& sigmas) {
if (precision < 1e-8) continue;
// create solution and copy into r
Vector r(basis(n, j));
Vector r(Vector::Unit(n,j));
for (size_t j2=j+1; j2<n; ++j2)
r(j2) = pseudo.dot(A.col(j2));
@ -600,7 +534,7 @@ Matrix RtR(const Matrix &A)
Matrix cholesky_inverse(const Matrix &A)
{
Eigen::LLT<Matrix> llt(A);
Matrix inv = eye(A.rows());
Matrix inv = Matrix::Identity(A.rows(),A.rows());
llt.matrixU().solveInPlace<Eigen::OnTheRight>(inv);
return inv*inv.transpose();
}
@ -612,7 +546,7 @@ Matrix cholesky_inverse(const Matrix &A)
// inv(B' * B) == A
Matrix inverse_square_root(const Matrix& A) {
Eigen::LLT<Matrix> llt(A);
Matrix inv = eye(A.rows());
Matrix inv = Matrix::Identity(A.rows(),A.rows());
llt.matrixU().solveInPlace<Eigen::OnTheRight>(inv);
return inv.transpose();
}
@ -648,7 +582,7 @@ boost::tuple<int, double, Vector> DLT(const Matrix& A, double rank_tol) {
/* ************************************************************************* */
Matrix expm(const Matrix& A, size_t K) {
Matrix E = eye(A.rows()), A_k = eye(A.rows());
Matrix E = Matrix::Identity(A.rows(),A.rows()), A_k = Matrix::Identity(A.rows(),A.rows());
for(size_t k=1;k<=K;k++) {
A_k = A_k*A/double(k);
E = E + A_k;

View File

@ -16,6 +16,7 @@
* @author Kai Ni
* @author Frank Dellaert
* @author Alex Cunningham
* @author Alex Hagiopol
*/
// \callgraph
@ -23,17 +24,17 @@
#pragma once
#include <gtsam/base/OptionalJacobian.h>
#include <gtsam/base/Vector.h>
#include <gtsam/config.h> // Configuration from CMake
#include <gtsam/config.h>
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
#include <Eigen/Core>
#include <Eigen/Cholesky>
#include <Eigen/LU>
#endif
#include <boost/format.hpp>
#include <boost/function.hpp>
#include <boost/tuple/tuple.hpp>
#include <boost/math/special_functions/fpclassify.hpp>
/**
* Matrix is a typedef in the gtsam namespace
* TODO: make a version to work with matlab wrapping
@ -74,40 +75,8 @@ GTSAM_MAKE_MATRIX_DEFS(9);
typedef Eigen::Block<Matrix> SubMatrix;
typedef Eigen::Block<const Matrix> ConstSubMatrix;
// Matlab-like syntax
/**
* Creates an zeros matrix, with matlab-like syntax
*
* Note: if assigning a block (created from an Eigen block() function) of a matrix to zeros,
* don't use this function, instead use ".setZero(m,n)" to avoid an Eigen error.
*/
GTSAM_EXPORT Matrix zeros(size_t m, size_t n);
/**
* Creates an ones matrix, with matlab-like syntax
*/
GTSAM_EXPORT Matrix ones(size_t m, size_t n);
/**
* Creates an identity matrix, with matlab-like syntax
*
* Note: if assigning a block (created from an Eigen block() function) of a matrix to identity,
* don't use this function, instead use ".setIdentity(m,n)" to avoid an Eigen error.
*/
GTSAM_EXPORT Matrix eye(size_t m, size_t n);
/**
* Creates a square identity matrix, with matlab-like syntax
*
* Note: if assigning a block (created from an Eigen block() function) of a matrix to identity,
* don't use this function, instead use ".setIdentity(m)" to avoid an Eigen error.
*/
inline Matrix eye( size_t m ) { return eye(m,m); }
GTSAM_EXPORT Matrix diag(const Vector& v);
/**
* equals with an tolerance
* equals with a tolerance
*/
template <class MATRIX>
bool equal_with_abs_tol(const Eigen::DenseBase<MATRIX>& A, const Eigen::DenseBase<MATRIX>& B, double tol = 1e-9) {
@ -166,37 +135,12 @@ GTSAM_EXPORT bool linear_independent(const Matrix& A, const Matrix& B, double to
*/
GTSAM_EXPORT bool linear_dependent(const Matrix& A, const Matrix& B, double tol = 1e-9);
/**
* BLAS Level-2 style e <- e + alpha*A*x
*/
GTSAM_EXPORT void multiplyAdd(double alpha, const Matrix& A, const Vector& x, Vector& e);
/**
* BLAS Level-2 style e <- e + A*x
*/
GTSAM_EXPORT void multiplyAdd(const Matrix& A, const Vector& x, Vector& e);
/**
* overload ^ for trans(A)*v
* We transpose the vectors for speed.
*/
GTSAM_EXPORT Vector operator^(const Matrix& A, const Vector & v);
/**
* BLAS Level-2 style x <- x + alpha*A'*e
*/
GTSAM_EXPORT void transposeMultiplyAdd(double alpha, const Matrix& A, const Vector& e, Vector& x);
/**
* BLAS Level-2 style x <- x + A'*e
*/
GTSAM_EXPORT void transposeMultiplyAdd(const Matrix& A, const Vector& e, Vector& x);
/**
* BLAS Level-2 style x <- x + alpha*A'*e
*/
GTSAM_EXPORT void transposeMultiplyAdd(double alpha, const Matrix& A, const Vector& e, SubVector x);
/** products using old-style format to improve compatibility */
template<class MATRIX>
inline MATRIX prod(const MATRIX& A, const MATRIX&B) {
@ -281,19 +225,6 @@ const typename MATRIX::ConstRowXpr row(const MATRIX& A, size_t j) {
return A.row(j);
}
/**
* inserts a column into a matrix IN PLACE
* NOTE: there is no size checking
* Alternate form allows for vectors smaller than the whole column to be inserted
* @param A matrix to be modified in place
* @param col is the vector to be inserted
* @param j is the index to insert the column
*/
GTSAM_EXPORT void insertColumn(Matrix& A, const Vector& col, size_t j);
GTSAM_EXPORT void insertColumn(Matrix& A, const Vector& col, size_t i, size_t j);
GTSAM_EXPORT Vector columnNormSquare(const Matrix &A);
/**
* Zeros all of the elements below the diagonal of a matrix, in place
* @param A is a matrix, to be modified in place
@ -355,17 +286,6 @@ inline typename Reshape<OutM, OutN, OutOptions, InM, InN, InOptions>::ReshapedTy
return Reshape<OutM, OutN, OutOptions, InM, InN, InOptions>::reshape(m);
}
/**
* solve AX=B via in-place Lu factorization and backsubstitution
* After calling, A contains LU, B the solved RHS vectors
*/
GTSAM_EXPORT void solve(Matrix& A, Matrix& B);
/**
* invert A
*/
GTSAM_EXPORT Matrix inverse(const Matrix& A);
/**
* QR factorization, inefficient, best use imperative householder below
* m*n matrix -> m*m Q, m*n R
@ -492,12 +412,6 @@ inline Matrix3 skewSymmetric(const Eigen::MatrixBase<Derived>& w) {
/** Use Cholesky to calculate inverse square root of a matrix */
GTSAM_EXPORT Matrix inverse_square_root(const Matrix& A);
/** Calculate the LL^t decomposition of a S.P.D matrix */
GTSAM_EXPORT Matrix LLt(const Matrix& A);
/** Calculate the R^tR decomposition of a S.P.D matrix */
GTSAM_EXPORT Matrix RtR(const Matrix& A);
/** Return the inverse of a S.P.D. matrix. Inversion is done via Cholesky decomposition. */
GTSAM_EXPORT Matrix cholesky_inverse(const Matrix &A);
@ -603,6 +517,28 @@ struct MultiplyWithInverseFunction {
const Operator phi_;
};
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
inline Matrix zeros( size_t m, size_t n ) { return Matrix::Zero(m,n); }
inline Matrix ones( size_t m, size_t n ) { return Matrix::Ones(m,n); }
inline Matrix eye( size_t m, size_t n) { return Matrix::Identity(m, n); }
inline Matrix eye( size_t m ) { return eye(m,m); }
inline Matrix diag(const Vector& v) { return v.asDiagonal(); }
inline void multiplyAdd(double alpha, const Matrix& A, const Vector& x, Vector& e) { e += alpha * A * x; }
inline void multiplyAdd(const Matrix& A, const Vector& x, Vector& e) { e += A * x; }
inline void transposeMultiplyAdd(double alpha, const Matrix& A, const Vector& e, Vector& x) { x += alpha * A.transpose() * e; }
inline void transposeMultiplyAdd(const Matrix& A, const Vector& e, Vector& x) { x += A.transpose() * e; }
inline void transposeMultiplyAdd(double alpha, const Matrix& A, const Vector& e, SubVector x) { x += alpha * A.transpose() * e; }
inline void insertColumn(Matrix& A, const Vector& col, size_t j) { A.col(j) = col; }
inline void insertColumn(Matrix& A, const Vector& col, size_t i, size_t j) { A.col(j).segment(i, col.size()) = col; }
inline void solve(Matrix& A, Matrix& B) { B = A.fullPivLu().solve(B); }
inline Matrix inverse(const Matrix& A) { return A.inverse(); }
#endif
GTSAM_EXPORT Matrix LLt(const Matrix& A);
GTSAM_EXPORT Matrix RtR(const Matrix& A);
GTSAM_EXPORT Vector columnNormSquare(const Matrix &A);
} // namespace gtsam
#include <boost/serialization/nvp.hpp>

View File

@ -33,20 +33,6 @@ using namespace std;
namespace gtsam {
/* ************************************************************************* */
bool zero(const Vector& v) {
bool result = true;
size_t n = v.size();
for( size_t j = 0 ; j < n ; j++)
result = result && (v(j) == 0.0);
return result;
}
/* ************************************************************************* */
Vector delta(size_t n, size_t i, double value) {
return Vector::Unit(n, i) * value;
}
/* ************************************************************************* */
//3 argument call
void print(const Vector& v, const string& s, ostream& stream) {
@ -235,7 +221,7 @@ double weightedPseudoinverse(const Vector& a, const Vector& weights,
// Basically, instead of doing a normal QR step with the weighted
// pseudoinverse, we enforce the constraint by turning
// ax + AS = b into x + (A/a)S = b/a, for the first row where a!=0
pseudo = delta(m, i, 1.0 / a[i]);
pseudo = Vector::Unit(m,i)*(1.0/a[i]);
return inf;
}
}

View File

@ -14,11 +14,11 @@
* @brief typedef and functions to augment Eigen's VectorXd
* @author Kai Ni
* @author Frank Dellaert
* @author Alex Hagiopol
*/
// \callgraph
#pragma once
#ifndef MKL_BLAS
#define MKL_BLAS MKL_DOMAIN_BLAS
@ -63,47 +63,6 @@ GTSAM_MAKE_VECTOR_DEFS(12);
typedef Eigen::VectorBlock<Vector> SubVector;
typedef Eigen::VectorBlock<const Vector> ConstSubVector;
/**
* Create basis vector of dimension n,
* with a constant in spot i
* @param n is the size of the vector
* @param i index of the one
* @param value is the value to insert into the vector
* @return delta vector
*/
GTSAM_EXPORT Vector delta(size_t n, size_t i, double value);
/**
* Create basis vector of dimension n,
* with one in spot i
* @param n is the size of the vector
* @param i index of the one
* @return basis vector
*/
inline Vector basis(size_t n, size_t i) { return delta(n, i, 1.0); }
/**
* Create zero vector
* @param n size
*/
inline Vector zero(size_t n) { return Vector::Zero(n);}
/**
* Create vector initialized to ones
* @param n size
*/
inline Vector ones(size_t n) { return Vector::Ones(n); }
/**
* check if all zero
*/
GTSAM_EXPORT bool zero(const Vector& v);
/**
* dimensionality == size
*/
inline size_t dim(const Vector& v) { return v.size(); }
/**
* print without optional string, must specify cout yourself
*/
@ -272,21 +231,25 @@ GTSAM_EXPORT Vector concatVectors(const std::list<Vector>& vs);
*/
GTSAM_EXPORT Vector concatVectors(size_t nrVectors, ...);
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
GTSAM_EXPORT inline Vector abs(const Vector& v){return v.cwiseAbs();}
GTSAM_EXPORT inline Vector ediv(const Vector &a, const Vector &b) {assert (b.size()==a.size()); return a.cwiseQuotient(b);}
GTSAM_EXPORT inline Vector esqrt(const Vector& v) { return v.cwiseSqrt();}
GTSAM_EXPORT inline Vector emul(const Vector &a, const Vector &b) {assert (b.size()==a.size()); return a.cwiseProduct(b);}
GTSAM_EXPORT inline double max(const Vector &a){return a.maxCoeff();}
GTSAM_EXPORT inline double norm_2(const Vector& v) {return v.norm();}
GTSAM_EXPORT inline Vector reciprocal(const Vector &a) {return a.array().inverse();}
GTSAM_EXPORT inline Vector repeat(size_t n, double value) {return Vector::Constant(n, value);}
GTSAM_EXPORT inline const Vector sub(const Vector &v, size_t i1, size_t i2) {return v.segment(i1,i2-i1);}
GTSAM_EXPORT inline void subInsert(Vector& fullVector, const Vector& subVector, size_t i) {fullVector.segment(i, subVector.size()) = subVector;}
GTSAM_EXPORT inline double sum(const Vector &a){return a.sum();}
inline Vector abs(const Vector& v){return v.cwiseAbs();}
inline Vector basis(size_t n, size_t i) { return Vector::Unit(n,i); }
inline Vector delta(size_t n, size_t i, double value){ return Vector::Unit(n, i) * value;}
inline size_t dim(const Vector& v) { return v.size(); }
inline Vector ediv(const Vector &a, const Vector &b) {assert (b.size()==a.size()); return a.cwiseQuotient(b);}
inline Vector esqrt(const Vector& v) { return v.cwiseSqrt();}
inline Vector emul(const Vector &a, const Vector &b) {assert (b.size()==a.size()); return a.cwiseProduct(b);}
inline double max(const Vector &a){return a.maxCoeff();}
inline double norm_2(const Vector& v) {return v.norm();}
inline Vector ones(size_t n) { return Vector::Ones(n); }
inline Vector reciprocal(const Vector &a) {return a.array().inverse();}
inline Vector repeat(size_t n, double value) {return Vector::Constant(n, value);}
inline const Vector sub(const Vector &v, size_t i1, size_t i2) {return v.segment(i1,i2-i1);}
inline void subInsert(Vector& fullVector, const Vector& subVector, size_t i) {fullVector.segment(i, subVector.size()) = subVector;}
inline double sum(const Vector &a){return a.sum();}
inline bool zero(const Vector& v){ return v.isZero(); }
inline Vector zero(size_t n) { return Vector::Zero(n); }
#endif
} // namespace gtsam
#include <boost/serialization/nvp.hpp>

View File

@ -88,7 +88,7 @@ typename internal::FixedSizeMatrix<X>::type numericalGradient(boost::function<do
TangentX d;
d.setZero();
Vector g = zero(N); // Can be fixed size
Eigen::Matrix<double,N,1> g; g.setZero(); // Can be fixed size
for (int j = 0; j < N; j++) {
d(j) = delta;
double hxplus = h(traits<X>::Retract(x, d));
@ -142,7 +142,7 @@ typename internal::FixedSizeMatrix<Y,X>::type numericalDerivative11(boost::funct
dx.setZero();
// Fill in Jacobian H
Matrix H = zeros(m, N);
Matrix H = Matrix::Zero(m, N);
const double factor = 1.0 / (2.0 * delta);
for (int j = 0; j < N; j++) {
dx(j) = delta;

View File

@ -156,8 +156,8 @@ TEST(Matrix, collect2 )
TEST(Matrix, collect3 )
{
Matrix A, B;
A = eye(2, 3);
B = eye(2, 3);
A = Matrix::Identity(2,3);
B = Matrix::Identity(2,3);
vector<const Matrix*> matrices;
matrices.push_back(&A);
matrices.push_back(&B);
@ -211,48 +211,6 @@ TEST(Matrix, column )
EXPECT(assert_equal(a3, exp3));
}
/* ************************************************************************* */
TEST(Matrix, insert_column )
{
Matrix big = zeros(5, 6);
Vector col = ones(5);
size_t j = 3;
insertColumn(big, col, j);
Matrix expected = (Matrix(5, 6) <<
0.0, 0.0, 0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0, 0.0, 0.0).finished();
EXPECT(assert_equal(expected, big));
}
/* ************************************************************************* */
TEST(Matrix, insert_subcolumn )
{
Matrix big = zeros(5, 6);
Vector col1 = ones(2);
size_t i = 1;
size_t j = 3;
insertColumn(big, col1, i, j); // check 1
Vector col2 = ones(1);
insertColumn(big, col2, 4, 5); // check 2
Matrix expected = (Matrix(5, 6) <<
0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 1.0).finished();
EXPECT(assert_equal(expected, big));
}
/* ************************************************************************* */
TEST(Matrix, row )
{
@ -272,26 +230,10 @@ TEST(Matrix, row )
EXPECT(assert_equal(a3, exp3));
}
/* ************************************************************************* */
TEST(Matrix, zeros )
{
Matrix A(2, 3);
A(0, 0) = 0;
A(0, 1) = 0;
A(0, 2) = 0;
A(1, 0) = 0;
A(1, 1) = 0;
A(1, 2) = 0;
Matrix zero = zeros(2, 3);
EQUALITY(A , zero);
}
/* ************************************************************************* */
TEST(Matrix, insert_sub )
{
Matrix big = zeros(5, 6), small = (Matrix(2, 3) << 1.0, 1.0, 1.0, 1.0, 1.0,
Matrix big = Matrix::Zero(5,6), small = (Matrix(2, 3) << 1.0, 1.0, 1.0, 1.0, 1.0,
1.0).finished();
insertSub(big, small, 1, 2);
@ -307,9 +249,9 @@ TEST(Matrix, insert_sub )
TEST(Matrix, diagMatrices )
{
std::vector<Matrix> Hs;
Hs.push_back(ones(3,3));
Hs.push_back(ones(4,4)*2);
Hs.push_back(ones(2,2)*3);
Hs.push_back(Matrix::Ones(3,3));
Hs.push_back(Matrix::Ones(4,4)*2);
Hs.push_back(Matrix::Ones(2,2)*3);
Matrix actual = diag(Hs);
@ -723,9 +665,9 @@ TEST(Matrix, inverse )
A(2, 1) = 0;
A(2, 2) = 6;
Matrix Ainv = inverse(A);
EXPECT(assert_equal(eye(3), A*Ainv));
EXPECT(assert_equal(eye(3), Ainv*A));
Matrix Ainv = A.inverse();
EXPECT(assert_equal((Matrix) I_3x3, A*Ainv));
EXPECT(assert_equal((Matrix) I_3x3, Ainv*A));
Matrix expected(3, 3);
expected(0, 0) = 1.0909;
@ -746,13 +688,13 @@ TEST(Matrix, inverse )
0.0, -1.0, 1.0,
1.0, 0.0, 2.0,
0.0, 0.0, 1.0).finished(),
inverse(lMg)));
lMg.inverse()));
Matrix gMl((Matrix(3, 3) << 0.0, -1.0, 1.0, 1.0, 0.0, 2.0, 0.0, 0.0, 1.0).finished());
EXPECT(assert_equal((Matrix(3, 3) <<
0.0, 1.0,-2.0,
-1.0, 0.0, 1.0,
0.0, 0.0, 1.0).finished(),
inverse(gMl)));
gMl.inverse()));
}
/* ************************************************************************* */
@ -769,7 +711,7 @@ TEST(Matrix, inverse2 )
A(2, 1) = 0;
A(2, 2) = 1;
Matrix Ainv = inverse(A);
Matrix Ainv = A.inverse();
Matrix expected(3, 3);
expected(0, 0) = 0;
@ -996,7 +938,7 @@ TEST(Matrix, inverse_square_root )
10.0).finished();
EQUALITY(expected,actual);
EQUALITY(measurement_covariance,inverse(actual*actual));
EQUALITY(measurement_covariance,(actual*actual).inverse());
// Randomly generated test. This test really requires inverse to
// be working well; if it's not, there's the possibility of a
@ -1052,28 +994,6 @@ TEST(Matrix, cholesky_inverse )
EQUALITY(cholesky::M.inverse(), cholesky_inverse(cholesky::M));
}
/* ************************************************************************* */
TEST(Matrix, multiplyAdd )
{
Matrix A = (Matrix(3, 4) << 4., 0., 0., 1., 0., 4., 0., 2., 0., 0., 1., 3.).finished();
Vector x = (Vector(4) << 1., 2., 3., 4.).finished(), e = Vector3(5., 6., 7.),
expected = e + A * x;
multiplyAdd(1, A, x, e);
EXPECT(assert_equal(expected, e));
}
/* ************************************************************************* */
TEST(Matrix, transposeMultiplyAdd )
{
Matrix A = (Matrix(3, 4) << 4., 0., 0., 1., 0., 4., 0., 2., 0., 0., 1., 3.).finished();
Vector x = (Vector(4) << 1., 2., 3., 4.).finished(), e = Vector3(5., 6., 7.),
expected = x + trans(A) * e;
transposeMultiplyAdd(1, A, e, x);
EXPECT(assert_equal(expected, x));
}
/* ************************************************************************* */
TEST(Matrix, linear_dependent )
{
@ -1102,12 +1022,12 @@ TEST(Matrix, linear_dependent3 )
TEST(Matrix, svd1 )
{
Vector v = Vector3(2., 1., 0.);
Matrix U1 = eye(4, 3), S1 = diag(v), V1 = eye(3, 3), A = (U1 * S1)
Matrix U1 = Matrix::Identity(4, 3), S1 = v.asDiagonal(), V1 = I_3x3, A = (U1 * S1)
* Matrix(trans(V1));
Matrix U, V;
Vector s;
svd(A, U, s, V);
Matrix S = diag(s);
Matrix S = s.asDiagonal();
EXPECT(assert_equal(U*S*Matrix(trans(V)),A));
EXPECT(assert_equal(S,S1));
}
@ -1158,7 +1078,7 @@ TEST(Matrix, svd3 )
V = -V;
}
Matrix S = diag(s);
Matrix S = s.asDiagonal();
Matrix t = U * S;
Matrix Vt = trans(V);
@ -1202,7 +1122,7 @@ TEST(Matrix, svd4 )
V.col(1) = -V.col(1);
}
Matrix reconstructed = U * diag(s) * trans(V);
Matrix reconstructed = U * s.asDiagonal() * trans(V);
EXPECT(assert_equal(A, reconstructed, 1e-4));
EXPECT(assert_equal(expectedU,U, 1e-3));

View File

@ -42,7 +42,7 @@ TEST(SymmetricBlockMatrix, ReadBlocks)
23, 29).finished();
Matrix actual1 = testBlockMatrix(1, 1);
// Test only writing the upper triangle for efficiency
Matrix actual1t = Matrix::Zero(2, 2);
Matrix actual1t = Z_2x2;
actual1t.triangularView<Eigen::Upper>() = testBlockMatrix(1, 1).triangularView();
EXPECT(assert_equal(expected1, actual1));
EXPECT(assert_equal(Matrix(expected1.triangularView<Eigen::Upper>()), actual1t));

View File

@ -79,22 +79,6 @@ TEST(Vector, copy )
EXPECT(assert_equal(a, b));
}
/* ************************************************************************* */
TEST(Vector, zero1 )
{
Vector v = Vector::Zero(2);
EXPECT(zero(v));
}
/* ************************************************************************* */
TEST(Vector, zero2 )
{
Vector a = zero(2);
Vector b = Vector::Zero(2);
EXPECT(a==b);
EXPECT(assert_equal(a, b));
}
/* ************************************************************************* */
TEST(Vector, scalar_multiply )
{
@ -256,7 +240,7 @@ TEST(Vector, equals )
TEST(Vector, greater_than )
{
Vector v1 = Vector3(1.0, 2.0, 3.0),
v2 = zero(3);
v2 = Z_3x1;
EXPECT(greaterThanOrEqual(v1, v1)); // test basic greater than
EXPECT(greaterThanOrEqual(v1, v2)); // test equals
}

View File

@ -167,7 +167,7 @@ TEST_UNSAFE( DiscreteMarginals, truss2 ) {
// Calculate the marginals by brute force
vector<DiscreteFactor::Values> allPosbValues = cartesianProduct(
key[0] & key[1] & key[2] & key[3] & key[4]);
Vector T = zero(5), F = zero(5);
Vector T = Z_5x1, F = Z_5x1;
for (size_t i = 0; i < allPosbValues.size(); ++i) {
DiscreteFactor::Values x = allPosbValues[i];
double px = graph(x);

View File

@ -42,7 +42,7 @@ OrientedPlane3 OrientedPlane3::transform(const Pose3& xr, OptionalJacobian<3, 3>
double pred_d = n_.unitVector().dot(xr.translation()) + d_;
if (Hr) {
*Hr = zeros(3, 6);
*Hr = Matrix::Zero(3,6);
Hr->block<2, 3>(0, 0) = D_rotated_plane;
Hr->block<1, 3>(2, 3) = unit_vec;
}

View File

@ -132,7 +132,7 @@ Matrix3 Pose2::AdjointMap() const {
/* ************************************************************************* */
Matrix3 Pose2::adjointMap(const Vector3& v) {
// See Chirikjian12book2, vol.2, pg. 36
Matrix3 ad = zeros(3,3);
Matrix3 ad = Z_3x3;
ad(0,1) = -v[2];
ad(1,0) = v[2];
ad(0,2) = v[1];

View File

@ -63,7 +63,7 @@ Rot2& Rot2::normalize() {
Rot2 Rot2::Expmap(const Vector1& v, OptionalJacobian<1, 1> H) {
if (H)
*H = I_1x1;
if (zero(v))
if (v.isZero())
return (Rot2());
else
return Rot2::fromAngle(v(0));

View File

@ -119,12 +119,12 @@ namespace gtsam {
/// Left-trivialized derivative of the exponential map
static Matrix ExpmapDerivative(const Vector& /*v*/) {
return ones(1);
return I_1x1;
}
/// Left-trivialized derivative inverse of the exponential map
static Matrix LogmapDerivative(const Vector& /*v*/) {
return ones(1);
return I_1x1;
}
// Chart at origin simply uses exponential map and its inverse

View File

@ -122,8 +122,8 @@ TEST(Cal3_S2, between) {
Matrix H1, H2;
EXPECT(assert_equal(Cal3_S2(0,1,2,3,4), k1.between(k2, H1, H2)));
EXPECT(assert_equal(-eye(5), H1));
EXPECT(assert_equal(eye(5), H2));
EXPECT(assert_equal(-I_5x5, H1));
EXPECT(assert_equal(I_5x5, H2));
}

View File

@ -62,7 +62,7 @@ TEST (EssentialMatrix, FromPose3) {
//*******************************************************************************
TEST(EssentialMatrix, localCoordinates0) {
EssentialMatrix E;
Vector expected = zero(5);
Vector expected = Z_5x1;
Vector actual = E.localCoordinates(E);
EXPECT(assert_equal(expected, actual, 1e-8));
}
@ -74,7 +74,7 @@ TEST (EssentialMatrix, localCoordinates) {
Pose3 pose(trueRotation, trueTranslation);
EssentialMatrix hx = EssentialMatrix::FromPose3(pose);
Vector actual = hx.localCoordinates(EssentialMatrix::FromPose3(pose));
EXPECT(assert_equal(zero(5), actual, 1e-8));
EXPECT(assert_equal(Z_5x1, actual, 1e-8));
Vector6 d;
d << 0.1, 0.2, 0.3, 0, 0, 0;
@ -85,7 +85,7 @@ TEST (EssentialMatrix, localCoordinates) {
//*************************************************************************
TEST (EssentialMatrix, retract0) {
EssentialMatrix actual = trueE.retract(zero(5));
EssentialMatrix actual = trueE.retract(Z_5x1);
EXPECT(assert_equal(trueE, actual));
}

View File

@ -96,8 +96,8 @@ inline static Vector randomVector(const Vector& minLimits,
const Vector& maxLimits) {
// Get the number of dimensions and create the return vector
size_t numDims = dim(minLimits);
Vector vector = zero(numDims);
size_t numDims = minLimits.size();
Vector vector = Vector::Zero(numDims);
// Create the random vector
for (size_t i = 0; i < numDims; i++) {
@ -145,7 +145,7 @@ TEST (OrientedPlane3, error2) {
OrientedPlane3 plane2(-1.1, 0.2, 0.3, 5.4);
// Hard-coded regression values, to ensure the result doesn't change.
EXPECT(assert_equal(zero(3), plane1.errorVector(plane1), 1e-8));
EXPECT(assert_equal((Vector) Z_3x1, plane1.errorVector(plane1), 1e-8));
EXPECT(assert_equal(Vector3(-0.0677674148, -0.0760543588, -0.4), plane1.errorVector(plane2), 1e-5));
// Test the jacobians of transform

View File

@ -116,7 +116,7 @@ TEST( PinholeCamera, lookat)
Matrix R = camera2.pose().rotation().matrix();
Matrix I = trans(R)*R;
EXPECT(assert_equal(I, eye(3)));
EXPECT(assert_equal(I, I_3x3));
}
/* ************************************************************************* */

View File

@ -87,7 +87,7 @@ TEST( PinholePose, lookat)
Matrix R = camera2.pose().rotation().matrix();
Matrix I = trans(R)*R;
EXPECT(assert_equal(I, eye(3)));
EXPECT(assert_equal(I, I_3x3));
}
/* ************************************************************************* */

View File

@ -73,12 +73,12 @@ TEST(Point2, Lie) {
Matrix H1, H2;
EXPECT(assert_equal(Point2(5,7), traits<Point2>::Compose(p1, p2, H1, H2)));
EXPECT(assert_equal(eye(2), H1));
EXPECT(assert_equal(eye(2), H2));
EXPECT(assert_equal(I_2x2, H1));
EXPECT(assert_equal(I_2x2, H2));
EXPECT(assert_equal(Point2(3,3), traits<Point2>::Between(p1, p2, H1, H2)));
EXPECT(assert_equal(-eye(2), H1));
EXPECT(assert_equal(eye(2), H2));
EXPECT(assert_equal(-I_2x2, H1));
EXPECT(assert_equal(I_2x2, H2));
EXPECT(assert_equal(Point2(5,7), traits<Point2>::Retract(p1, Vector2(4., 5.))));
EXPECT(assert_equal(Vector2(3.,3.), traits<Point2>::Local(p1,p2)));

View File

@ -47,12 +47,12 @@ TEST(Point3, Lie) {
Matrix H1, H2;
EXPECT(assert_equal(Point3(5, 7, 9), traits<Point3>::Compose(p1, p2, H1, H2)));
EXPECT(assert_equal(eye(3), H1));
EXPECT(assert_equal(eye(3), H2));
EXPECT(assert_equal(I_3x3, H1));
EXPECT(assert_equal(I_3x3, H2));
EXPECT(assert_equal(Point3(3, 3, 3), traits<Point3>::Between(p1, p2, H1, H2)));
EXPECT(assert_equal(-eye(3), H1));
EXPECT(assert_equal(eye(3), H2));
EXPECT(assert_equal(-I_3x3, H1));
EXPECT(assert_equal(I_3x3, H2));
EXPECT(assert_equal(Point3(5, 7, 9), traits<Point3>::Retract(p1, Vector3(4,5,6))));
EXPECT(assert_equal(Vector3(3, 3, 3), traits<Point3>::Local(p1,p2)));

View File

@ -102,7 +102,7 @@ TEST(Pose2, expmap3) {
0.99, 0.0, -0.015,
0.0, 0.0, 0.0).finished();
Matrix A2 = A*A/2.0, A3 = A2*A/3.0, A4=A3*A/4.0;
Matrix expected = eye(3) + A + A2 + A3 + A4;
Matrix expected = I_3x3 + A + A2 + A3 + A4;
Vector v = Vector3(0.01, -0.015, 0.99);
Pose2 pose = Pose2::Expmap(v);
@ -311,7 +311,7 @@ TEST(Pose2, compose_a)
-1.0, 0.0, 2.0,
0.0, 0.0, 1.0
).finished();
Matrix expectedH2 = eye(3);
Matrix expectedH2 = I_3x3;
Matrix numericalH1 = numericalDerivative21<Pose2, Pose2, Pose2>(testing::compose, pose1, pose2);
Matrix numericalH2 = numericalDerivative22<Pose2, Pose2, Pose2>(testing::compose, pose1, pose2);
EXPECT(assert_equal(expectedH1,actualDcompose1));

View File

@ -61,7 +61,7 @@ TEST( Pose3, constructors)
TEST( Pose3, retract_first_order)
{
Pose3 id;
Vector v = zero(6);
Vector v = Z_6x1;
v(0) = 0.3;
EXPECT(assert_equal(Pose3(R, Point3(0,0,0)), id.retract(v),1e-2));
v(3)=0.2;v(4)=0.7;v(5)=-2;
@ -71,7 +71,7 @@ TEST( Pose3, retract_first_order)
/* ************************************************************************* */
TEST( Pose3, retract_expmap)
{
Vector v = zero(6); v(0) = 0.3;
Vector v = Z_6x1; v(0) = 0.3;
Pose3 pose = Pose3::Expmap(v);
EXPECT(assert_equal(Pose3(R, Point3(0,0,0)), pose, 1e-2));
EXPECT(assert_equal(v,Pose3::Logmap(pose),1e-2));
@ -81,7 +81,7 @@ TEST( Pose3, retract_expmap)
TEST( Pose3, expmap_a_full)
{
Pose3 id;
Vector v = zero(6);
Vector v = Z_6x1;
v(0) = 0.3;
EXPECT(assert_equal(expmap_default<Pose3>(id, v), Pose3(R, Point3(0,0,0))));
v(3)=0.2;v(4)=0.394742;v(5)=-2.08998;
@ -92,7 +92,7 @@ TEST( Pose3, expmap_a_full)
TEST( Pose3, expmap_a_full2)
{
Pose3 id;
Vector v = zero(6);
Vector v = Z_6x1;
v(0) = 0.3;
EXPECT(assert_equal(expmap_default<Pose3>(id, v), Pose3(R, Point3(0,0,0))));
v(3)=0.2;v(4)=0.394742;v(5)=-2.08998;
@ -153,7 +153,7 @@ Pose3 Agrawal06iros(const Vector& xi) {
return Pose3(Rot3(), Point3(v));
else {
Matrix W = skewSymmetric(w/t);
Matrix A = eye(3) + ((1 - cos(t)) / t) * W + ((t - sin(t)) / t) * (W * W);
Matrix A = I_3x3 + ((1 - cos(t)) / t) * W + ((t - sin(t)) / t) * (W * W);
return Pose3(Rot3::Expmap (w), Point3(A * v));
}
}
@ -267,7 +267,7 @@ TEST( Pose3, inverse)
{
Matrix actualDinverse;
Matrix actual = T.inverse(actualDinverse).matrix();
Matrix expected = inverse(T.matrix());
Matrix expected = T.matrix().inverse();
EXPECT(assert_equal(actual,expected,1e-8));
Matrix numericalH = numericalDerivative11(testing::inverse<Pose3>, T);
@ -293,7 +293,7 @@ TEST( Pose3, inverseDerivatives2)
TEST( Pose3, compose_inverse)
{
Matrix actual = (T*T.inverse()).matrix();
Matrix expected = eye(4,4);
Matrix expected = I_4x4;
EXPECT(assert_equal(actual,expected,1e-8));
}
@ -712,7 +712,7 @@ TEST(Pose3, Bearing2) {
TEST( Pose3, unicycle )
{
// velocity in X should be X in inertial frame, rather than global frame
Vector x_step = delta(6,3,1.0);
Vector x_step = Vector::Unit(6,3)*1.0;
EXPECT(assert_equal(Pose3(Rot3::Ypr(0,0,0), l1), expmap_default<Pose3>(x1, x_step), tol));
EXPECT(assert_equal(Pose3(Rot3::Ypr(0,0,0), Point3(2,1,0)), expmap_default<Pose3>(x2, x_step), tol));
EXPECT(assert_equal(Pose3(Rot3::Ypr(M_PI/4.0,0,0), Point3(2,2,0)), expmap_default<Pose3>(x3, sqrt(2.0) * x_step), tol));
@ -723,9 +723,8 @@ TEST( Pose3, adjointMap) {
Matrix res = Pose3::adjointMap(screwPose3::xi);
Matrix wh = skewSymmetric(screwPose3::xi(0), screwPose3::xi(1), screwPose3::xi(2));
Matrix vh = skewSymmetric(screwPose3::xi(3), screwPose3::xi(4), screwPose3::xi(5));
Matrix Z3 = zeros(3,3);
Matrix6 expected;
expected << wh, Z3, vh, wh;
expected << wh, Z_3x3, vh, wh;
EXPECT(assert_equal(expected,res,1e-5));
}

View File

@ -62,8 +62,8 @@ TEST( Rot2, compose)
Matrix H1, H2;
(void) Rot2::fromAngle(1.0).compose(Rot2::fromAngle(2.0), H1, H2);
EXPECT(assert_equal(eye(1), H1));
EXPECT(assert_equal(eye(1), H2));
EXPECT(assert_equal(I_1x1, H1));
EXPECT(assert_equal(I_1x1, H2));
}
/* ************************************************************************* */
@ -74,8 +74,8 @@ TEST( Rot2, between)
Matrix H1, H2;
(void) Rot2::fromAngle(1.0).between(Rot2::fromAngle(2.0), H1, H2);
EXPECT(assert_equal(-eye(1), H1));
EXPECT(assert_equal(eye(1), H2));
EXPECT(assert_equal(-I_1x1, H1));
EXPECT(assert_equal(I_1x1, H2));
}
/* ************************************************************************* */
@ -89,7 +89,7 @@ TEST( Rot2, equals)
/* ************************************************************************* */
TEST( Rot2, expmap)
{
Vector v = zero(1);
Vector v = Z_1x1;
CHECK(assert_equal(R.retract(v), R));
}

View File

@ -152,7 +152,7 @@ TEST( Rot3, Rodrigues4)
/* ************************************************************************* */
TEST( Rot3, retract)
{
Vector v = zero(3);
Vector v = Z_3x1;
CHECK(assert_equal(R, R.retract(v)));
// // test Canonical coordinates
@ -213,7 +213,7 @@ TEST(Rot3, log)
#define CHECK_OMEGA_ZERO(X,Y,Z) \
w = (Vector(3) << (double)X, (double)Y, double(Z)).finished(); \
R = Rot3::Rodrigues(w); \
EXPECT(assert_equal(zero(3), Rot3::Logmap(R)));
EXPECT(assert_equal((Vector) Z_3x1, Rot3::Logmap(R)));
CHECK_OMEGA_ZERO( 2.0*PI, 0, 0)
CHECK_OMEGA_ZERO( 0, 2.0*PI, 0)
@ -555,8 +555,8 @@ TEST(Rot3, quaternion) {
/* ************************************************************************* */
Matrix Cayley(const Matrix& A) {
Matrix::Index n = A.cols();
const Matrix I = eye(n);
return (I-A)*inverse(I+A);
const Matrix I = Matrix::Identity(n,n);
return (I-A)*(I+A).inverse();
}
TEST( Rot3, Cayley ) {

View File

@ -76,7 +76,7 @@ TEST( SimpleCamera, lookat)
Matrix R = camera2.pose().rotation().matrix();
Matrix I = trans(R)*R;
CHECK(assert_equal(I, eye(3)));
CHECK(assert_equal(I, I_3x3));
}
/* ************************************************************************* */

View File

@ -237,7 +237,7 @@ TEST(Unit3, distance) {
TEST(Unit3, localCoordinates0) {
Unit3 p;
Vector actual = p.localCoordinates(p);
EXPECT(assert_equal(zero(2), actual, 1e-8));
EXPECT(assert_equal(Z_2x1, actual, 1e-8));
}
TEST(Unit3, localCoordinates) {
@ -245,14 +245,14 @@ TEST(Unit3, localCoordinates) {
Unit3 p, q;
Vector2 expected = Vector2::Zero();
Vector2 actual = p.localCoordinates(q);
EXPECT(assert_equal(zero(2), actual, 1e-8));
EXPECT(assert_equal((Vector) Z_2x1, actual, 1e-8));
EXPECT(assert_equal(q, p.retract(expected), 1e-8));
}
{
Unit3 p, q(1, 6.12385e-21, 0);
Vector2 expected = Vector2::Zero();
Vector2 actual = p.localCoordinates(q);
EXPECT(assert_equal(zero(2), actual, 1e-8));
EXPECT(assert_equal((Vector) Z_2x1, actual, 1e-8));
EXPECT(assert_equal(q, p.retract(expected), 1e-8));
}
{

View File

@ -31,7 +31,7 @@ Vector4 triangulateHomogeneousDLT(
size_t m = projection_matrices.size();
// Allocate DLT matrix
Matrix A = zeros(m * 2, 4);
Matrix A = Matrix::Zero(m * 2, 4);
for (size_t i = 0; i < m; i++) {
size_t row = i * 2;

View File

@ -53,10 +53,10 @@ Ordering Ordering::ColamdConstrained(const VariableIndex& variableIndex,
gttic(Ordering_COLAMDConstrained);
gttic(Prepare);
size_t nEntries = variableIndex.nEntries(), nFactors =
const size_t nEntries = variableIndex.nEntries(), nFactors =
variableIndex.nFactors(), nVars = variableIndex.size();
// Convert to compressed column major format colamd wants it in (== MATLAB format!)
size_t Alen = ccolamd_recommended((int) nEntries, (int) nFactors,
const size_t Alen = ccolamd_recommended((int) nEntries, (int) nFactors,
(int) nVars); /* colamd arg 3: size of the array A */
vector<int> A = vector<int>(Alen); /* colamd arg 4: row indices of A, of size Alen */
vector<int> p = vector<int>(nVars + 1); /* colamd arg 5: column pointers of A, of size n_col+1 */
@ -66,13 +66,10 @@ Ordering Ordering::ColamdConstrained(const VariableIndex& variableIndex,
int count = 0;
vector<Key> keys(nVars); // Array to store the keys in the order we add them so we can retrieve them in permuted order
size_t index = 0;
BOOST_FOREACH(const VariableIndex::value_type key_factors, variableIndex) {
for (auto key_factors: variableIndex) {
// Arrange factor indices into COLAMD format
const VariableIndex::Factors& column = key_factors.second;
size_t lastFactorId = numeric_limits<size_t>::max();
BOOST_FOREACH(size_t factorIndex, column) {
if (lastFactorId != numeric_limits<size_t>::max())
assert(factorIndex > lastFactorId);
for(size_t factorIndex: column) {
A[count++] = (int) factorIndex; // copy sparse column
}
p[index + 1] = count; // column j (base 1) goes from A[j-1] to A[j]-1
@ -106,8 +103,8 @@ Ordering Ordering::ColamdConstrained(const VariableIndex& variableIndex,
// ccolamd_report(stats);
gttic(Fill_Ordering);
// Convert elimination ordering in p to an ordering
gttic(Fill_Ordering);
Ordering result;
result.resize(nVars);
for (size_t j = 0; j < nVars; ++j)
@ -128,13 +125,13 @@ Ordering Ordering::ColamdConstrainedLast(const VariableIndex& variableIndex,
// Build a mapping to look up sorted Key indices by Key
FastMap<Key, size_t> keyIndices;
size_t j = 0;
BOOST_FOREACH(const VariableIndex::value_type key_factors, variableIndex)
for (auto key_factors: variableIndex)
keyIndices.insert(keyIndices.end(), make_pair(key_factors.first, j++));
// If at least some variables are not constrained to be last, constrain the
// ones that should be constrained.
int group = (constrainLast.size() != n ? 1 : 0);
BOOST_FOREACH(Key key, constrainLast) {
for (Key key: constrainLast) {
cmember[keyIndices.at(key)] = group;
if (forceOrder)
++group;
@ -155,13 +152,13 @@ Ordering Ordering::ColamdConstrainedFirst(const VariableIndex& variableIndex,
// Build a mapping to look up sorted Key indices by Key
FastMap<Key, size_t> keyIndices;
size_t j = 0;
BOOST_FOREACH(const VariableIndex::value_type key_factors, variableIndex)
for (auto key_factors: variableIndex)
keyIndices.insert(keyIndices.end(), make_pair(key_factors.first, j++));
// If at least some variables are not constrained to be last, constrain the
// ones that should be constrained.
int group = 0;
BOOST_FOREACH(Key key, constrainFirst) {
for (Key key: constrainFirst) {
cmember[keyIndices.at(key)] = group;
if (forceOrder)
++group;
@ -169,7 +166,7 @@ Ordering Ordering::ColamdConstrainedFirst(const VariableIndex& variableIndex,
if (!forceOrder && !constrainFirst.empty())
++group;
BOOST_FOREACH(int& c, cmember)
for(int& c: cmember)
if (c == none)
c = group;
@ -186,12 +183,12 @@ Ordering Ordering::ColamdConstrained(const VariableIndex& variableIndex,
// Build a mapping to look up sorted Key indices by Key
FastMap<Key, size_t> keyIndices;
size_t j = 0;
BOOST_FOREACH(const VariableIndex::value_type key_factors, variableIndex)
for (auto key_factors: variableIndex)
keyIndices.insert(keyIndices.end(), make_pair(key_factors.first, j++));
// Assign groups
typedef FastMap<Key, int>::value_type key_group;
BOOST_FOREACH(const key_group& p, groups) {
for(const key_group& p: groups) {
// FIXME: check that no groups are skipped
cmember[keyIndices.at(p.first)] = p.second;
}

View File

@ -172,6 +172,7 @@ TEST(Ordering, csr_format_3) {
}
/* ************************************************************************* */
#ifdef GTSAM_SUPPORT_NESTED_DISSECTION
TEST(Ordering, csr_format_4) {
SymbolicFactorGraph sfg;
@ -206,8 +207,9 @@ TEST(Ordering, csr_format_4) {
Ordering metOrder2 = Ordering::Metis(sfg);
}
#endif
/* ************************************************************************* */
#ifdef GTSAM_SUPPORT_NESTED_DISSECTION
TEST(Ordering, metis) {
SymbolicFactorGraph sfg;
@ -228,8 +230,9 @@ TEST(Ordering, metis) {
Ordering metis = Ordering::Metis(sfg);
}
#endif
/* ************************************************************************* */
#ifdef GTSAM_SUPPORT_NESTED_DISSECTION
TEST(Ordering, MetisLoop) {
// create linear graph
@ -261,7 +264,7 @@ TEST(Ordering, MetisLoop) {
}
#endif
}
#endif
/* ************************************************************************* */
TEST(Ordering, Create) {
@ -280,6 +283,7 @@ TEST(Ordering, Create) {
EXPECT(assert_equal(expected, actual));
}
#ifdef GTSAM_SUPPORT_NESTED_DISSECTION
// METIS
{
Ordering actual = Ordering::Create(Ordering::METIS, sfg);
@ -289,6 +293,7 @@ TEST(Ordering, Create) {
Ordering expected = Ordering(list_of(5)(3)(4)(1)(0)(2));
EXPECT(assert_equal(expected, actual));
}
#endif
// CUSTOM
CHECK_EXCEPTION(Ordering::Create(Ordering::CUSTOM, sfg), runtime_error);

View File

@ -183,7 +183,7 @@ namespace gtsam {
if (frontalVec.hasNaN()) throw IndeterminantLinearSystemException(this->keys().front());
for (const_iterator it = beginParents(); it!= endParents(); it++)
gtsam::transposeMultiplyAdd(-1.0, Matrix(getA(it)), frontalVec, gy[*it]);
gy[*it] += -1.0 * Matrix(getA(it)).transpose() * frontalVec;
// Scale by sigmas
if(model_)

View File

@ -377,7 +377,7 @@ void HessianFactor::multiplyHessianAdd(double alpha, const VectorValues& x,
vector<Vector> y;
y.reserve(size());
for (const_iterator it = begin(); it != end(); it++)
y.push_back(zero(getDim(it)));
y.push_back(Vector::Zero(getDim(it)));
// Accessing the VectorValues one by one is expensive
// So we will loop over columns to access x only once per column
@ -427,7 +427,7 @@ void HessianFactor::gradientAtZero(double* d) const {
Vector HessianFactor::gradient(Key key, const VectorValues& x) const {
Factor::const_iterator i = find(key);
// Sum over G_ij*xj for all xj connecting to xi
Vector b = zero(x.at(key).size());
Vector b = Vector::Zero(x.at(key).size());
for (Factor::const_iterator j = begin(); j != end(); ++j) {
// Obtain Gij from the Hessian factor
// Hessian factor only stores an upper triangular matrix, so be careful when i>j

View File

@ -543,7 +543,7 @@ void JacobianFactor::updateHessian(const FastVector<Key>& infoKeys,
/* ************************************************************************* */
Vector JacobianFactor::operator*(const VectorValues& x) const {
Vector Ax = zero(Ab_.rows());
Vector Ax = Vector::Zero(Ab_.rows());
if (empty())
return Ax;
@ -565,8 +565,7 @@ void JacobianFactor::transposeMultiplyAdd(double alpha, const Vector& e,
pair<VectorValues::iterator, bool> xi = x.tryInsert(j, Vector());
if (xi.second)
xi.first->second = Vector::Zero(getDim(begin() + pos));
gtsam::transposeMultiplyAdd(Ab_(pos), E, xi.first->second);
xi.first->second += Ab_(pos).transpose()*E;
}
}
@ -595,7 +594,7 @@ void JacobianFactor::multiplyHessianAdd(double alpha, const double* x, double* y
if (empty())
return;
Vector Ax = zero(Ab_.rows());
Vector Ax = Vector::Zero(Ab_.rows());
/// Just iterate over all A matrices and multiply in correct config part (looping over keys)
/// E.g.: Jacobian A = [A0 A1 A2] multiplies x = [x0 x1 x2]'

View File

@ -115,7 +115,7 @@ KalmanFilter::State KalmanFilter::predictQ(const State& p, const Matrix& F,
// f2(x_{t},x_{t+1}) = (F*x_{t} + B*u - x_{t+1}) * Q^-1 * (F*x_{t} + B*u - x_{t+1})^T
// See documentation in HessianFactor, we have A1 = -F, A2 = I_, b = B*u:
// TODO: starts to seem more elaborate than straight-up KF equations?
Matrix M = inverse(Q), Ft = trans(F);
Matrix M = Q.inverse(), Ft = trans(F);
Matrix G12 = -Ft * M, G11 = -G12 * F, G22 = M;
Vector b = B * u, g2 = M * b, g1 = -Ft * g2;
double f = dot(b, g2);
@ -147,7 +147,7 @@ KalmanFilter::State KalmanFilter::update(const State& p, const Matrix& H,
KalmanFilter::State KalmanFilter::updateQ(const State& p, const Matrix& H,
const Vector& z, const Matrix& Q) const {
Key k = step(p);
Matrix M = inverse(Q), Ht = trans(H);
Matrix M = Q.inverse(), Ht = trans(H);
Matrix G = Ht * M * H;
Vector g = Ht * M * z;
double f = dot(z, M * z);

View File

@ -69,7 +69,7 @@ public:
// Constructor
KalmanFilter(size_t n, Factorization method =
KALMANFILTER_DEFAULT_FACTORIZATION) :
n_(n), I_(eye(n_, n_)), function_(
n_(n), I_(Matrix::Identity(n_, n_)), function_(
method == QR ? GaussianFactorGraph::Eliminate(EliminateQR) :
GaussianFactorGraph::Eliminate(EliminateCholesky)) {
}

View File

@ -405,7 +405,7 @@ void Constrained::WhitenInPlace(Eigen::Block<Matrix> H) const {
/* ************************************************************************* */
Constrained::shared_ptr Constrained::unit() const {
Vector sigmas = ones(dim());
Vector sigmas = Vector::Ones(dim());
for (size_t i=0; i<dim(); ++i)
if (constrained(i))
sigmas(i) = 0.0;

View File

@ -341,7 +341,7 @@ namespace gtsam {
* Return R itself, but note that Whiten(H) is cheaper than R*H
*/
virtual Matrix R() const {
return diag(invsigmas());
return invsigmas().asDiagonal();
}
private:
@ -381,7 +381,7 @@ namespace gtsam {
* from appearing in invsigmas or precisions.
* mu set to large default value (1000.0)
*/
Constrained(const Vector& sigmas = zero(1));
Constrained(const Vector& sigmas = Z_1x1);
/**
* Constructor that prevents any inf values

View File

@ -83,7 +83,7 @@ public:
void multiplyHessianAdd(double alpha, const double* x, double* y) const {
if (empty())
return;
Vector Ax = zero(Ab_.rows());
Vector Ax = Vector::Zero(Ab_.rows());
// Just iterate over all A matrices and multiply in correct config part
for (size_t pos = 0; pos < size(); ++pos)
@ -173,7 +173,7 @@ public:
* RAW memory access! Assumes keys start at 0 and go to M-1, and x is laid out that way
*/
Vector operator*(const double* x) const {
Vector Ax = zero(Ab_.rows());
Vector Ax = Vector::Zero(Ab_.rows());
if (empty())
return Ax;

View File

@ -86,7 +86,7 @@ namespace gtsam {
/** x += alpha* A'*e */
void transposeMultiplyAdd(double alpha, const Vector& e, Vector& x) const {
gtsam::transposeMultiplyAdd(alpha, A(), e, x);
x += alpha * A().transpose() * e;
}
};

View File

@ -170,7 +170,7 @@ TEST(GaussianBayesTree, complicatedMarginal) {
LONGS_EQUAL(1, (long)actualJacobianQR.size());
LONGS_EQUAL(5, (long)actualJacobianQR.keys()[0]);
Matrix actualA = actualJacobianQR.getA(actualJacobianQR.begin());
Matrix actualCov = inverse(actualA.transpose() * actualA);
Matrix actualCov = (actualA.transpose() * actualA).inverse();
EXPECT(assert_equal(expectedCov, actualCov, 1e-1));
// Marginal on 6
@ -187,7 +187,7 @@ TEST(GaussianBayesTree, complicatedMarginal) {
LONGS_EQUAL(1, (long)actualJacobianQR.size());
LONGS_EQUAL(6, (long)actualJacobianQR.keys()[0]);
actualA = actualJacobianQR.getA(actualJacobianQR.begin());
actualCov = inverse(actualA.transpose() * actualA);
actualCov = (actualA.transpose() * actualA).inverse();
EXPECT(assert_equal(expectedCov, actualCov, 1e1));
}

View File

@ -47,10 +47,10 @@ TEST(GaussianFactorGraph, initialization) {
SharedDiagonal unit2 = noiseModel::Unit::Create(2);
fg +=
JacobianFactor(0, 10*eye(2), -1.0*ones(2), unit2),
JacobianFactor(0, -10*eye(2),1, 10*eye(2), Vector2(2.0, -1.0), unit2),
JacobianFactor(0, -5*eye(2), 2, 5*eye(2), Vector2(0.0, 1.0), unit2),
JacobianFactor(1, -5*eye(2), 2, 5*eye(2), Vector2(-1.0, 1.5), unit2);
JacobianFactor(0, 10*I_2x2, -1.0*Vector::Ones(2), unit2),
JacobianFactor(0, -10*I_2x2,1, 10*I_2x2, Vector2(2.0, -1.0), unit2),
JacobianFactor(0, -5*I_2x2, 2, 5*I_2x2, Vector2(0.0, 1.0), unit2),
JacobianFactor(1, -5*I_2x2, 2, 5*I_2x2, Vector2(-1.0, 1.5), unit2);
EXPECT_LONGS_EQUAL(4, (long)fg.size());
@ -166,13 +166,13 @@ static GaussianFactorGraph createSimpleGaussianFactorGraph() {
GaussianFactorGraph fg;
SharedDiagonal unit2 = noiseModel::Unit::Create(2);
// linearized prior on x1: c[_x1_]+x1=0 i.e. x1=-c[_x1_]
fg += JacobianFactor(2, 10*eye(2), -1.0*ones(2), unit2);
fg += JacobianFactor(2, 10*I_2x2, -1.0*Vector::Ones(2), unit2);
// odometry between x1 and x2: x2-x1=[0.2;-0.1]
fg += JacobianFactor(0, 10*eye(2), 2, -10*eye(2), Vector2(2.0, -1.0), unit2);
fg += JacobianFactor(0, 10*I_2x2, 2, -10*I_2x2, Vector2(2.0, -1.0), unit2);
// measurement between x1 and l1: l1-x1=[0.0;0.2]
fg += JacobianFactor(1, 5*eye(2), 2, -5*eye(2), Vector2(0.0, 1.0), unit2);
fg += JacobianFactor(1, 5*I_2x2, 2, -5*I_2x2, Vector2(0.0, 1.0), unit2);
// measurement between x2 and l1: l1-x2=[-0.2;0.3]
fg += JacobianFactor(0, -5*eye(2), 1, 5*eye(2), Vector2(-1.0, 1.5), unit2);
fg += JacobianFactor(0, -5*I_2x2, 1, 5*I_2x2, Vector2(-1.0, 1.5), unit2);
return fg;
}
@ -280,8 +280,8 @@ TEST( GaussianFactorGraph, multiplyHessianAdd )
/* ************************************************************************* */
static GaussianFactorGraph createGaussianFactorGraphWithHessianFactor() {
GaussianFactorGraph gfg = createSimpleGaussianFactorGraph();
gfg += HessianFactor(1, 2, 100*eye(2,2), zeros(2,2), Vector2(0.0, 1.0),
400*eye(2,2), Vector2(1.0, 1.0), 3.0);
gfg += HessianFactor(1, 2, 100*I_2x2, Z_2x2, Vector2(0.0, 1.0),
400*I_2x2, Vector2(1.0, 1.0), 3.0);
return gfg;
}

View File

@ -54,7 +54,7 @@ TEST(HessianFactor, emptyConstructor)
HessianFactor f;
DOUBLES_EQUAL(0.0, f.constantTerm(), 1e-9); // Constant term should be zero
EXPECT(assert_equal(Vector(), f.linearTerm())); // Linear term should be empty
EXPECT(assert_equal(zeros(1,1), f.info())); // Full matrix should be 1-by-1 zero matrix
EXPECT(assert_equal((Matrix) Z_1x1, f.info())); // Full matrix should be 1-by-1 zero matrix
DOUBLES_EQUAL(0.0, f.error(VectorValues()), 1e-9); // Should have zero error
}
@ -123,11 +123,11 @@ TEST(HessianFactor, Constructor1)
TEST(HessianFactor, Constructor1b)
{
Vector mu = Vector2(1.0,2.0);
Matrix Sigma = eye(2,2);
Matrix Sigma = I_2x2;
HessianFactor factor(0, mu, Sigma);
Matrix G = eye(2,2);
Matrix G = I_2x2;
Vector g = G*mu;
double f = dot(g,mu);
@ -484,7 +484,7 @@ TEST(HessianFactor, combine) {
-8.94427191, 0.0,
0.0, -8.94427191).finished();
Vector b = Vector2(2.23606798,-1.56524758);
SharedDiagonal model = noiseModel::Diagonal::Sigmas(ones(2));
SharedDiagonal model = noiseModel::Diagonal::Sigmas(Vector::Ones(2));
GaussianFactor::shared_ptr f(new JacobianFactor(0, A0, 1, A1, 2, A2, b, model));
GaussianFactorGraph factors = list_of(f);

View File

@ -168,19 +168,19 @@ namespace simple_graph {
Key keyX(10), keyY(8), keyZ(12);
double sigma1 = 0.1;
Matrix A11 = Matrix::Identity(2, 2);
Matrix A11 = I_2x2;
Vector2 b1(2, -1);
JacobianFactor factor1(keyX, A11, b1, noiseModel::Isotropic::Sigma(2, sigma1));
double sigma2 = 0.5;
Matrix A21 = -2 * Matrix::Identity(2, 2);
Matrix A22 = 3 * Matrix::Identity(2, 2);
Matrix A21 = -2 * I_2x2;
Matrix A22 = 3 * I_2x2;
Vector2 b2(4, -5);
JacobianFactor factor2(keyX, A21, keyY, A22, b2, noiseModel::Isotropic::Sigma(2, sigma2));
double sigma3 = 1.0;
Matrix A32 = -4 * Matrix::Identity(2, 2);
Matrix A33 = 5 * Matrix::Identity(2, 2);
Matrix A32 = -4 * I_2x2;
Matrix A33 = 5 * I_2x2;
Vector2 b3(3, -6);
JacobianFactor factor3(keyY, A32, keyZ, A33, b3, noiseModel::Isotropic::Sigma(2, sigma3));
@ -193,8 +193,8 @@ TEST( JacobianFactor, construct_from_graph)
{
using namespace simple_graph;
Matrix A1(6,2); A1 << A11, A21, Matrix::Zero(2,2);
Matrix A2(6,2); A2 << Matrix::Zero(2,2), A22, A32;
Matrix A1(6,2); A1 << A11, A21, Z_2x2;
Matrix A2(6,2); A2 << Z_2x2, A22, A32;
Matrix A3(6,2); A3 << Matrix::Zero(4,2), A33;
Vector b(6); b << b1, b2, b3;
Vector sigmas(6); sigmas << sigma1, sigma1, sigma2, sigma2, sigma3, sigma3;
@ -260,17 +260,17 @@ TEST(JacobianFactor, matrices_NULL)
// hessianDiagonal
VectorValues expectDiagonal;
expectDiagonal.insert(5, ones(3));
expectDiagonal.insert(10, 4*ones(3));
expectDiagonal.insert(15, 9*ones(3));
expectDiagonal.insert(5, Vector::Ones(3));
expectDiagonal.insert(10, 4*Vector::Ones(3));
expectDiagonal.insert(15, 9*Vector::Ones(3));
EXPECT(assert_equal(expectDiagonal, factor.hessianDiagonal()));
// hessianBlockDiagonal
map<Key,Matrix> actualBD = factor.hessianBlockDiagonal();
LONGS_EQUAL(3,actualBD.size());
EXPECT(assert_equal(1*eye(3),actualBD[5]));
EXPECT(assert_equal(4*eye(3),actualBD[10]));
EXPECT(assert_equal(9*eye(3),actualBD[15]));
EXPECT(assert_equal(1*I_3x3,actualBD[5]));
EXPECT(assert_equal(4*I_3x3,actualBD[10]));
EXPECT(assert_equal(9*I_3x3,actualBD[15]));
}
/* ************************************************************************* */
@ -314,9 +314,9 @@ TEST(JacobianFactor, matrices)
// hessianBlockDiagonal
map<Key,Matrix> actualBD = factor.hessianBlockDiagonal();
LONGS_EQUAL(3,actualBD.size());
EXPECT(assert_equal(4*eye(3),actualBD[5]));
EXPECT(assert_equal(16*eye(3),actualBD[10]));
EXPECT(assert_equal(36*eye(3),actualBD[15]));
EXPECT(assert_equal(4*I_3x3,actualBD[5]));
EXPECT(assert_equal(16*I_3x3,actualBD[10]));
EXPECT(assert_equal(36*I_3x3,actualBD[15]));
}
/* ************************************************************************* */
@ -324,7 +324,7 @@ TEST(JacobianFactor, operators )
{
SharedDiagonal sigma0_1 = noiseModel::Isotropic::Sigma(2,0.1);
Matrix I = eye(2);
Matrix I = I_2x2;
Vector b = Vector2(0.2,-0.1);
JacobianFactor lf(1, -I, 2, I, b, sigma0_1);
@ -405,7 +405,7 @@ TEST(JacobianFactor, eliminate)
gfg.add(0, A10, 1, A11, b1, noiseModel::Diagonal::Sigmas(s1, true));
gfg.add(1, A21, b2, noiseModel::Diagonal::Sigmas(s2, true));
Matrix zero3x3 = zeros(3,3);
Matrix zero3x3 = Matrix::Zero(3,3);
Matrix A0 = gtsam::stack(3, &A10, &zero3x3, &zero3x3);
Matrix A1 = gtsam::stack(3, &A11, &A01, &A21);
Vector9 b; b << b1, b0, b2;
@ -561,7 +561,7 @@ TEST ( JacobianFactor, constraint_eliminate1 )
{
// construct a linear constraint
Vector v(2); v(0)=1.2; v(1)=3.4;
JacobianFactor lc(1, eye(2), v, noiseModel::Constrained::All(2));
JacobianFactor lc(1, I_2x2, v, noiseModel::Constrained::All(2));
// eliminate it
pair<GaussianConditional::shared_ptr, JacobianFactor::shared_ptr>
@ -572,7 +572,7 @@ TEST ( JacobianFactor, constraint_eliminate1 )
// verify conditional Gaussian
Vector sigmas = Vector2(0.0, 0.0);
GaussianConditional expCG(1, v, eye(2), noiseModel::Diagonal::Sigmas(sigmas));
GaussianConditional expCG(1, v, I_2x2, noiseModel::Diagonal::Sigmas(sigmas));
EXPECT(assert_equal(expCG, *actual.first));
}

View File

@ -51,7 +51,7 @@ TEST( KalmanFilter, constructor ) {
EXPECT(assert_equal(x_initial, p1->mean()));
Matrix Sigma = (Matrix(2, 2) << 0.01, 0.0, 0.0, 0.01).finished();
EXPECT(assert_equal(Sigma, p1->covariance()));
EXPECT(assert_equal(inverse(Sigma), p1->information()));
EXPECT(assert_equal(Sigma.inverse(), p1->information()));
// Create one with a sharedGaussian
KalmanFilter::State p2 = kf1.init(x_initial, Sigma);
@ -65,33 +65,33 @@ TEST( KalmanFilter, constructor ) {
TEST( KalmanFilter, linear1 ) {
// Create the controls and measurement properties for our example
Matrix F = eye(2, 2);
Matrix B = eye(2, 2);
Matrix F = I_2x2;
Matrix B = I_2x2;
Vector u = Vector2(1.0, 0.0);
SharedDiagonal modelQ = noiseModel::Isotropic::Sigma(2, 0.1);
Matrix Q = 0.01*eye(2, 2);
Matrix H = eye(2, 2);
Matrix Q = 0.01*I_2x2;
Matrix H = I_2x2;
State z1(1.0, 0.0);
State z2(2.0, 0.0);
State z3(3.0, 0.0);
SharedDiagonal modelR = noiseModel::Isotropic::Sigma(2, 0.1);
Matrix R = 0.01*eye(2, 2);
Matrix R = 0.01*I_2x2;
// Create the set of expected output TestValues
State expected0(0.0, 0.0);
Matrix P00 = 0.01*eye(2, 2);
Matrix P00 = 0.01*I_2x2;
State expected1(1.0, 0.0);
Matrix P01 = P00 + Q;
Matrix I11 = inverse(P01) + inverse(R);
Matrix I11 = P01.inverse() + R.inverse();
State expected2(2.0, 0.0);
Matrix P12 = inverse(I11) + Q;
Matrix I22 = inverse(P12) + inverse(R);
Matrix P12 = I11.inverse() + Q;
Matrix I22 = P12.inverse() + R.inverse();
State expected3(3.0, 0.0);
Matrix P23 = inverse(I22) + Q;
Matrix I33 = inverse(P23) + inverse(R);
Matrix P23 = I22.inverse() + Q;
Matrix I33 = P23.inverse() + R.inverse();
// Create a Kalman filter of dimension 2
KalmanFilter kf(2);
@ -140,7 +140,7 @@ TEST( KalmanFilter, predict ) {
Vector u = Vector3(1.0, 0.0, 2.0);
Matrix R = (Matrix(2, 2) << 1.0, 0.5, 0.0, 3.0).finished();
Matrix M = trans(R)*R;
Matrix Q = inverse(M);
Matrix Q = M.inverse();
// Create a Kalman filter of dimension 2
KalmanFilter kf(2);
@ -167,7 +167,7 @@ TEST( KalmanFilter, predict ) {
// Test both QR and Cholesky versions in case of a realistic (AHRS) dynamics update
TEST( KalmanFilter, QRvsCholesky ) {
Vector mean = ones(9);
Vector mean = Vector::Ones(9);
Matrix covariance = 1e-6 * (Matrix(9, 9) <<
15.0, -6.2, 0.0, 0.0, 0.0, 0.0, 0.0, 63.8, -0.6,
-6.2, 21.9, -0.0, 0.0, 0.0, 0.0, -63.8, -0.0, -0.1,
@ -197,8 +197,8 @@ TEST( KalmanFilter, QRvsCholesky ) {
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1000000.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1000000.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1000000.0).finished();
Matrix B = zeros(9, 1);
Vector u = zero(1);
Matrix B = Matrix::Zero(9, 1);
Vector u = Z_1x1;
Matrix dt_Q_k = 1e-6 * (Matrix(9, 9) <<
33.7, 3.1, -0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
3.1, 126.4, -0.3, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
@ -270,7 +270,7 @@ TEST( KalmanFilter, QRvsCholesky ) {
EXPECT(assert_equal(expected2, pb2->covariance(), 1e-7));
// do the above update again, this time with a full Matrix Q
Matrix modelQ = diag(sigmas.array().square());
Matrix modelQ = ((Matrix) sigmas.array().square()).asDiagonal();
KalmanFilter::State pa3 = kfa.updateQ(pa, H, z, modelQ);
KalmanFilter::State pb3 = kfb.updateQ(pb, H, z, modelQ);

View File

@ -112,7 +112,7 @@ TEST(NoiseModel, Unit)
TEST(NoiseModel, equals)
{
Gaussian::shared_ptr g1 = Gaussian::SqrtInformation(R),
g2 = Gaussian::SqrtInformation(eye(3,3));
g2 = Gaussian::SqrtInformation(I_3x3);
Diagonal::shared_ptr d1 = Diagonal::Sigmas(Vector3(kSigma, kSigma, kSigma)),
d2 = Diagonal::Sigmas(Vector3(0.1, 0.2, 0.3));
Isotropic::shared_ptr i1 = Isotropic::Sigma(3, kSigma),
@ -395,7 +395,7 @@ TEST(NoiseModel, SmartSqrtInformation )
{
bool smart = true;
gtsam::SharedGaussian expected = Unit::Create(3);
gtsam::SharedGaussian actual = Gaussian::SqrtInformation(eye(3), smart);
gtsam::SharedGaussian actual = Gaussian::SqrtInformation(I_3x3, smart);
EXPECT(assert_equal(*expected,*actual));
}
@ -404,7 +404,7 @@ TEST(NoiseModel, SmartSqrtInformation2 )
{
bool smart = true;
gtsam::SharedGaussian expected = Unit::Isotropic::Sigma(3,2);
gtsam::SharedGaussian actual = Gaussian::SqrtInformation(0.5*eye(3), smart);
gtsam::SharedGaussian actual = Gaussian::SqrtInformation(0.5*I_3x3, smart);
EXPECT(assert_equal(*expected,*actual));
}
@ -413,7 +413,7 @@ TEST(NoiseModel, SmartInformation )
{
bool smart = true;
gtsam::SharedGaussian expected = Unit::Isotropic::Variance(3,2);
Matrix M = 0.5*eye(3);
Matrix M = 0.5*I_3x3;
EXPECT(checkIfDiagonal(M));
gtsam::SharedGaussian actual = Gaussian::Information(M, smart);
EXPECT(assert_equal(*expected,*actual));
@ -424,7 +424,7 @@ TEST(NoiseModel, SmartCovariance )
{
bool smart = true;
gtsam::SharedGaussian expected = Unit::Create(3);
gtsam::SharedGaussian actual = Gaussian::Covariance(eye(3), smart);
gtsam::SharedGaussian actual = Gaussian::Covariance(I_3x3, smart);
EXPECT(assert_equal(*expected,*actual));
}
@ -433,7 +433,7 @@ TEST(NoiseModel, ScalarOrVector )
{
bool smart = true;
SharedGaussian expected = Unit::Create(3);
SharedGaussian actual = Gaussian::Covariance(eye(3), smart);
SharedGaussian actual = Gaussian::Covariance(I_3x3, smart);
EXPECT(assert_equal(*expected,*actual));
}
@ -442,9 +442,9 @@ TEST(NoiseModel, WhitenInPlace)
{
Vector sigmas = Vector3(0.1, 0.1, 0.1);
SharedDiagonal model = Diagonal::Sigmas(sigmas);
Matrix A = eye(3);
Matrix A = I_3x3;
model->WhitenInPlace(A);
Matrix expected = eye(3) * 10;
Matrix expected = I_3x3 * 10;
EXPECT(assert_equal(expected, A));
}

View File

@ -51,7 +51,7 @@ BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal");
/* ************************************************************************* */
// example noise models
static noiseModel::Diagonal::shared_ptr diag3 = noiseModel::Diagonal::Sigmas(Vector3(0.1, 0.2, 0.3));
static noiseModel::Gaussian::shared_ptr gaussian3 = noiseModel::Gaussian::SqrtInformation(2.0 * eye(3,3));
static noiseModel::Gaussian::shared_ptr gaussian3 = noiseModel::Gaussian::SqrtInformation(2.0 * I_3x3);
static noiseModel::Isotropic::shared_ptr iso3 = noiseModel::Isotropic::Sigma(3, 0.2);
static noiseModel::Constrained::shared_ptr constrained3 = noiseModel::Constrained::MixedSigmas(Vector3(0.0, 0.0, 0.1));
static noiseModel::Unit::shared_ptr unit3 = noiseModel::Unit::Create(3);
@ -144,8 +144,8 @@ TEST (Serialization, linear_factors) {
EXPECT(equalsBinary<VectorValues>(values));
Key i1 = 4, i2 = 7;
Matrix A1 = eye(3), A2 = -1.0 * eye(3);
Vector b = ones(3);
Matrix A1 = I_3x3, A2 = -1.0 * I_3x3;
Vector b = Vector::Ones(3);
SharedDiagonal model = noiseModel::Diagonal::Sigmas(Vector3(1.0, 2.0, 3.0));
JacobianFactor jacobianfactor(i1, A1, i2, A2, b, model);
EXPECT(equalsObj(jacobianfactor));
@ -185,8 +185,8 @@ TEST (Serialization, gaussian_factor_graph) {
{
Key i1 = 4, i2 = 7;
Matrix A1 = eye(3), A2 = -1.0 * eye(3);
Vector b = ones(3);
Matrix A1 = I_3x3, A2 = -1.0 * I_3x3;
Vector b = Vector::Ones(3);
SharedDiagonal model = noiseModel::Diagonal::Sigmas(Vector3(1.0, 2.0, 3.0));
JacobianFactor jacobianfactor(i1, A1, i2, A2, b, model);
HessianFactor hessianfactor(jacobianfactor);

View File

@ -189,7 +189,7 @@ public:
Vector e = attitudeError(nTb.rotation(), H);
if (H) {
Matrix H23 = *H;
*H = Matrix::Zero(2, 6);
*H = Matrix::Zero(2,6);
H->block<2,3>(0,0) = H23;
}
return e;

View File

@ -154,7 +154,7 @@ public:
// measured bM = nRb<52> * nM + b, where b is unknown bias
Point3 hx = bRn_.rotate(nM, boost::none, H1) + bias;
if (H2)
*H2 = eye(3);
*H2 = I_3x3;
return (hx - measured_);
}
};
@ -205,7 +205,7 @@ public:
*H2 = scale * H * (*H2);
}
if (H3)
*H3 = eye(3);
*H3 = I_3x3;
return (hx - measured_);
}
};

View File

@ -26,6 +26,38 @@
namespace gtsam {
/// Parameters for pre-integration:
/// Usage: Create just a single Params and pass a shared pointer to the constructor
struct PreintegratedRotationParams {
Matrix3 gyroscopeCovariance; ///< continuous-time "Covariance" of gyroscope measurements
boost::optional<Vector3> omegaCoriolis; ///< Coriolis constant
boost::optional<Pose3> body_P_sensor; ///< The pose of the sensor in the body frame
PreintegratedRotationParams() : gyroscopeCovariance(I_3x3) {}
virtual void print(const std::string& s) const;
virtual bool equals(const PreintegratedRotationParams& other, double tol=1e-9) const;
void setGyroscopeCovariance(const Matrix3& cov) { gyroscopeCovariance = cov; }
void setOmegaCoriolis(const Vector3& omega) { omegaCoriolis.reset(omega); }
void setBodyPSensor(const Pose3& pose) { body_P_sensor.reset(pose); }
const Matrix3& getGyroscopeCovariance() const { return gyroscopeCovariance; }
boost::optional<Vector3> getOmegaCoriolis() const { return omegaCoriolis; }
boost::optional<Pose3> getBodyPSensor() const { return body_P_sensor; }
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
namespace bs = ::boost::serialization;
ar & bs::make_nvp("gyroscopeCovariance", bs::make_array(gyroscopeCovariance.data(), gyroscopeCovariance.size()));
ar & BOOST_SERIALIZATION_NVP(omegaCoriolis);
ar & BOOST_SERIALIZATION_NVP(body_P_sensor);
}
};
/**
* PreintegratedRotation is the base class for all PreintegratedMeasurements
* classes (in AHRSFactor, ImuFactor, and CombinedImuFactor).
@ -33,29 +65,7 @@ namespace gtsam {
*/
class PreintegratedRotation {
public:
/// Parameters for pre-integration:
/// Usage: Create just a single Params and pass a shared pointer to the constructor
struct Params {
Matrix3 gyroscopeCovariance; ///< continuous-time "Covariance" of gyroscope measurements
boost::optional<Vector3> omegaCoriolis; ///< Coriolis constant
boost::optional<Pose3> body_P_sensor; ///< The pose of the sensor in the body frame
Params() : gyroscopeCovariance(I_3x3) {}
virtual void print(const std::string& s) const;
virtual bool equals(const Params& other, double tol=1e-9) const;
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
namespace bs = ::boost::serialization;
ar & bs::make_nvp("gyroscopeCovariance", bs::make_array(gyroscopeCovariance.data(), gyroscopeCovariance.size()));
ar & BOOST_SERIALIZATION_NVP(omegaCoriolis);
ar & BOOST_SERIALIZATION_NVP(body_P_sensor);
}
};
typedef PreintegratedRotationParams Params;
protected:
/// Parameters

View File

@ -105,7 +105,7 @@ pair<Vector3, Vector3> PreintegrationBase::correctMeasurementsBySensorPose(
// Update derivative: centrifugal causes the correlation between acc and omega!!!
if (correctedAcc_H_unbiasedOmega) {
double wdp = correctedOmega.dot(b_arm);
*correctedAcc_H_unbiasedOmega = -(diag(Vector3::Constant(wdp))
*correctedAcc_H_unbiasedOmega = -( (Matrix) Vector3::Constant(wdp).asDiagonal()
+ correctedOmega * b_arm.transpose()) * bRs.matrix()
+ 2 * b_arm * unbiasedOmega.transpose();
}

View File

@ -23,7 +23,7 @@ namespace gtsam {
/// Parameters for pre-integration:
/// Usage: Create just a single Params and pass a shared pointer to the constructor
struct PreintegrationParams: PreintegratedRotation::Params {
struct PreintegrationParams: PreintegratedRotationParams {
Matrix3 accelerometerCovariance; ///< continuous-time "Covariance" of accelerometer
Matrix3 integrationCovariance; ///< continuous-time "Covariance" describing integration uncertainty
bool use2ndOrderCoriolis; ///< Whether to use second order Coriolis integration
@ -50,6 +50,14 @@ struct PreintegrationParams: PreintegratedRotation::Params {
void print(const std::string& s) const;
bool equals(const PreintegratedRotation::Params& other, double tol) const;
void setAccelerometerCovariance(const Matrix3& cov) { accelerometerCovariance = cov; }
void setIntegrationCovariance(const Matrix3& cov) { integrationCovariance = cov; }
void setUse2ndOrderCoriolis(bool flag) { use2ndOrderCoriolis = flag; }
const Matrix3& getAccelerometerCovariance() const { return accelerometerCovariance; }
const Matrix3& getIntegrationCovariance() const { return integrationCovariance; }
bool getUse2ndOrderCoriolis() const { return use2ndOrderCoriolis; }
protected:
/// Default constructor for serialization only: uninitialized!
PreintegrationParams() {}
@ -60,7 +68,7 @@ protected:
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
namespace bs = ::boost::serialization;
ar & boost::serialization::make_nvp("PreintegratedRotation_Params",
boost::serialization::base_object<PreintegratedRotation::Params>(*this));
boost::serialization::base_object<PreintegratedRotationParams>(*this));
ar & bs::make_nvp("accelerometerCovariance", bs::make_array(accelerometerCovariance.data(), accelerometerCovariance.size()));
ar & bs::make_nvp("integrationCovariance", bs::make_array(integrationCovariance.data(), integrationCovariance.size()));
ar & BOOST_SERIALIZATION_NVP(use2ndOrderCoriolis);

View File

@ -42,7 +42,7 @@ TEST( Rot3AttitudeFactor, Constructor ) {
// Create a linearization point at the zero-error point
Rot3 nRb;
EXPECT(assert_equal(zero(2),factor.evaluateError(nRb),1e-5));
EXPECT(assert_equal((Vector) Z_2x1,factor.evaluateError(nRb),1e-5));
// Calculate numerical derivatives
Matrix expectedH = numericalDerivative11<Vector,Rot3>(
@ -75,7 +75,7 @@ TEST( Pose3AttitudeFactor, Constructor ) {
// Create a linearization point at the zero-error point
Pose3 T(Rot3(), Point3(-5.0, 8.0, -11.0));
EXPECT(assert_equal(zero(2),factor.evaluateError(T),1e-5));
EXPECT(assert_equal((Vector) Z_2x1,factor.evaluateError(T),1e-5));
// Calculate numerical derivatives
Matrix expectedH = numericalDerivative11<Vector,Pose3>(

View File

@ -58,7 +58,7 @@ TEST( GPSFactor, Constructor ) {
// Create a linearization point at zero error
Pose3 T(Rot3::RzRyRx(0.15, -0.30, 0.45), Point3(E, N, U));
EXPECT(assert_equal(zero(3),factor.evaluateError(T),1e-5));
EXPECT(assert_equal(Z_3x1,factor.evaluateError(T),1e-5));
// Calculate numerical derivatives
Matrix expectedH = numericalDerivative11<Vector,Pose3>(
@ -87,7 +87,7 @@ TEST( GPSFactor2, Constructor ) {
// Create a linearization point at zero error
NavState T(Rot3::RzRyRx(0.15, -0.30, 0.45), Point3(E, N, U), Vector3::Zero());
EXPECT(assert_equal(zero(3),factor.evaluateError(T),1e-5));
EXPECT(assert_equal(Z_3x1,factor.evaluateError(T),1e-5));
// Calculate numerical derivatives
Matrix expectedH = numericalDerivative11<Vector,NavState>(

View File

@ -71,19 +71,19 @@ TEST( MagFactor, Factors ) {
// MagFactor
MagFactor f(1, measured, s, dir, bias, model);
EXPECT( assert_equal(zero(3),f.evaluateError(theta,H1),1e-5));
EXPECT( assert_equal(Z_3x1,f.evaluateError(theta,H1),1e-5));
EXPECT( assert_equal((Matrix)numericalDerivative11<Vector,Rot2> //
(boost::bind(&MagFactor::evaluateError, &f, _1, none), theta), H1, 1e-7));
// MagFactor1
MagFactor1 f1(1, measured, s, dir, bias, model);
EXPECT( assert_equal(zero(3),f1.evaluateError(nRb,H1),1e-5));
EXPECT( assert_equal(Z_3x1,f1.evaluateError(nRb,H1),1e-5));
EXPECT( assert_equal(numericalDerivative11<Vector,Rot3> //
(boost::bind(&MagFactor1::evaluateError, &f1, _1, none), nRb), H1, 1e-7));
// MagFactor2
MagFactor2 f2(1, 2, measured, nRb, model);
EXPECT( assert_equal(zero(3),f2.evaluateError(scaled,bias,H1,H2),1e-5));
EXPECT( assert_equal(Z_3x1,f2.evaluateError(scaled,bias,H1,H2),1e-5));
EXPECT( assert_equal(numericalDerivative11<Vector,Point3> //
(boost::bind(&MagFactor2::evaluateError, &f2, _1, bias, none, none), scaled),//
H1, 1e-7));
@ -93,7 +93,7 @@ TEST( MagFactor, Factors ) {
// MagFactor2
MagFactor3 f3(1, 2, 3, measured, nRb, model);
EXPECT(assert_equal(zero(3),f3.evaluateError(s,dir,bias,H1,H2,H3),1e-5));
EXPECT(assert_equal(Z_3x1,f3.evaluateError(s,dir,bias,H1,H2,H3),1e-5));
EXPECT(assert_equal((Matrix)numericalDerivative11<Vector,double> //
(boost::bind(&MagFactor3::evaluateError, &f3, _1, dir, bias, none, none, none), s),//
H1, 1e-7));

View File

@ -142,12 +142,12 @@ public:
const size_t nj = traits<T>::GetDimension(feasible_);
if (allow_error_) {
if (H)
*H = eye(nj); // FIXME: this is not the right linearization for nonlinear compare
*H = Matrix::Identity(nj,nj); // FIXME: this is not the right linearization for nonlinear compare
return traits<T>::Local(xj,feasible_);
} else if (compare_(feasible_, xj)) {
if (H)
*H = eye(nj);
return zero(nj); // set error to zero if equal
*H = Matrix::Identity(nj,nj);
return Vector::Zero(nj); // set error to zero if equal
} else {
if (H)
throw std::invalid_argument(
@ -249,7 +249,7 @@ public:
Vector evaluateError(const X& x1,
boost::optional<Matrix&> H = boost::none) const {
if (H)
(*H) = eye(traits<X>::GetDimension(x1));
(*H) = Matrix::Identity(traits<X>::GetDimension(x1),traits<X>::GetDimension(x1));
// manifold equivalent of h(x)-z -> log(z,h(x))
return traits<X>::Local(value_,x1);
}
@ -322,8 +322,8 @@ public:
Vector evaluateError(const X& x1, const X& x2, boost::optional<Matrix&> H1 =
boost::none, boost::optional<Matrix&> H2 = boost::none) const {
static const size_t p = traits<X>::dimension;
if (H1) *H1 = -eye(p);
if (H2) *H2 = eye(p);
if (H1) *H1 = -Matrix::Identity(p,p);
if (H2) *H2 = Matrix::Identity(p,p);
return traits<X>::Local(x1,x2);
}

View File

@ -313,7 +313,7 @@ public:
return evaluateError(x1);
}
} else {
return zero(this->dim());
return Vector::Zero(this->dim());
}
}
@ -388,7 +388,7 @@ public:
return evaluateError(x1, x2);
}
} else {
return zero(this->dim());
return Vector::Zero(this->dim());
}
}
@ -463,7 +463,7 @@ public:
else
return evaluateError(x.at<X1>(keys_[0]), x.at<X2>(keys_[1]), x.at<X3>(keys_[2]));
} else {
return zero(this->dim());
return Vector::Zero(this->dim());
}
}
@ -543,7 +543,7 @@ public:
else
return evaluateError(x.at<X1>(keys_[0]), x.at<X2>(keys_[1]), x.at<X3>(keys_[2]), x.at<X4>(keys_[3]));
} else {
return zero(this->dim());
return Vector::Zero(this->dim());
}
}
@ -627,7 +627,7 @@ public:
else
return evaluateError(x.at<X1>(keys_[0]), x.at<X2>(keys_[1]), x.at<X3>(keys_[2]), x.at<X4>(keys_[3]), x.at<X5>(keys_[4]));
} else {
return zero(this->dim());
return Vector::Zero(this->dim());
}
}
@ -715,7 +715,7 @@ public:
else
return evaluateError(x.at<X1>(keys_[0]), x.at<X2>(keys_[1]), x.at<X3>(keys_[2]), x.at<X4>(keys_[3]), x.at<X5>(keys_[4]), x.at<X6>(keys_[5]));
} else {
return zero(this->dim());
return Vector::Zero(this->dim());
}
}

View File

@ -269,25 +269,114 @@ namespace gtsam {
return filter(key_value.key);
}
/* ************************************************************************* */
template<typename ValueType>
const ValueType& Values::at(Key j) const {
// Find the item
KeyValueMap::const_iterator item = values_.find(j);
/* ************************************************************************* */
// Throw exception if it does not exist
if(item == values_.end())
throw ValuesKeyDoesNotExist("retrieve", j);
namespace internal {
// Check the type and throw exception if incorrect
const Value& value = *item->second;
try {
return dynamic_cast<const GenericValue<ValueType>&>(value).value();
} catch (std::bad_cast &) {
// NOTE(abe): clang warns about potential side effects if done in typeid
const Value* value = item->second;
throw ValuesIncorrectType(j, typeid(*value), typeid(ValueType));
}
// Generic version, partially specialized below for various Eigen Matrix types
template<typename ValueType>
struct handle {
ValueType operator()(Key j, const gtsam::Value * const pointer) {
try {
// value returns a const ValueType&, and the return makes a copy !!!!!
return dynamic_cast<const GenericValue<ValueType>&>(*pointer).value();
} catch (std::bad_cast &) {
throw ValuesIncorrectType(j, typeid(*pointer), typeid(ValueType));
}
}
};
// Handle dynamic vectors
template<>
struct handle<Eigen::Matrix<double, -1, 1> > {
Eigen::Matrix<double, -1, 1> operator()(Key j,
const gtsam::Value * const pointer) {
try {
// value returns a const Vector&, and the return makes a copy !!!!!
return dynamic_cast<const GenericValue<Eigen::Matrix<double, -1, 1> >&>(*pointer).value();
} catch (std::bad_cast &) {
// If a fixed vector was stored, we end up here as well.
throw ValuesIncorrectType(j, typeid(*pointer),
typeid(Eigen::Matrix<double, -1, 1>));
}
}
};
// Handle dynamic matrices
template<int N>
struct handle<Eigen::Matrix<double, -1, N> > {
Eigen::Matrix<double, -1, N> operator()(Key j,
const gtsam::Value * const pointer) {
try {
// value returns a const Matrix&, and the return makes a copy !!!!!
return dynamic_cast<const GenericValue<Eigen::Matrix<double, -1, N> >&>(*pointer).value();
} catch (std::bad_cast &) {
// If a fixed matrix was stored, we end up here as well.
throw ValuesIncorrectType(j, typeid(*pointer),
typeid(Eigen::Matrix<double, -1, N>));
}
}
};
// Request for a fixed vector
// TODO(jing): is this piece of code really needed ???
template<int M>
struct handle<Eigen::Matrix<double, M, 1> > {
Eigen::Matrix<double, M, 1> operator()(Key j,
const gtsam::Value * const pointer) {
try {
// value returns a const VectorM&, and the return makes a copy !!!!!
return dynamic_cast<const GenericValue<Eigen::Matrix<double, M, 1> >&>(*pointer).value();
} catch (std::bad_cast &) {
// Check if a dynamic vector was stored
Matrix A = handle<Eigen::VectorXd>()(j, pointer); // will throw if not....
// Yes: check size, and throw if not a match
if (A.rows() != M || A.cols() != 1)
throw NoMatchFoundForFixed(M, 1, A.rows(), A.cols());
else
// This is not a copy because of RVO
return A;
}
}
};
// Request for a fixed matrix
template<int M, int N>
struct handle<Eigen::Matrix<double, M, N> > {
Eigen::Matrix<double, M, N> operator()(Key j,
const gtsam::Value * const pointer) {
try {
// value returns a const MatrixMN&, and the return makes a copy !!!!!
return dynamic_cast<const GenericValue<Eigen::Matrix<double, M, N> >&>(*pointer).value();
} catch (std::bad_cast &) {
// Check if a dynamic matrix was stored
Matrix A = handle<Eigen::MatrixXd>()(j, pointer); // will throw if not....
// Yes: check size, and throw if not a match
if (A.rows() != M || A.cols() != N)
throw NoMatchFoundForFixed(M, N, A.rows(), A.cols());
else
// This is not a copy because of RVO
return A;
}
}
};
} // internal
/* ************************************************************************* */
template<typename ValueType>
ValueType Values::at(Key j) const {
// Find the item
KeyValueMap::const_iterator item = values_.find(j);
// Throw exception if it does not exist
if(item == values_.end())
throw ValuesKeyDoesNotExist("at", j);
// Check the type and throw exception if incorrect
return internal::handle<ValueType>()(j,item->second);
}
/* ************************************************************************* */
@ -312,16 +401,48 @@ namespace gtsam {
}
/* ************************************************************************* */
// wrap all fixed in dynamics when insert and update
namespace internal {
// general type
template<typename ValueType>
struct handle_wrap {
inline gtsam::GenericValue<ValueType> operator()(Key j, const ValueType& val) {
return gtsam::GenericValue<ValueType>(val);
}
};
// when insert/update a fixed size vector: convert to dynamic size
template<int M>
struct handle_wrap<Eigen::Matrix<double, M, 1> > {
inline gtsam::GenericValue<Eigen::Matrix<double, -1, 1> > operator()(
Key j, const Eigen::Matrix<double, M, 1>& val) {
return gtsam::GenericValue<Eigen::Matrix<double, -1, 1> >(val);
}
};
// when insert/update a fixed size matrix: convert to dynamic size
template<int M, int N>
struct handle_wrap<Eigen::Matrix<double, M, N> > {
inline gtsam::GenericValue<Eigen::Matrix<double, -1, -1> > operator()(
Key j, const Eigen::Matrix<double, M, N>& val) {
return gtsam::GenericValue<Eigen::Matrix<double, -1, -1> >(val);
}
};
}
// insert a templated value
template<typename ValueType>
void Values::insert(Key j, const ValueType& val) {
insert(j, static_cast<const Value&>(GenericValue<ValueType>(val)));
}
void Values::insert(Key j, const ValueType& val) {
insert(j, static_cast<const Value&>(internal::handle_wrap<ValueType>()(j, val)));
}
// update with templated value
template <typename ValueType>
void Values::update(Key j, const ValueType& val) {
update(j, static_cast<const Value&>(GenericValue<ValueType >(val)));
update(j, static_cast<const Value&>(internal::handle_wrap<ValueType>()(j, val)));
}
}

View File

@ -25,8 +25,6 @@
#include <gtsam/nonlinear/Values.h>
#include <gtsam/linear/VectorValues.h>
#include <list>
#include <boost/foreach.hpp>
#ifdef __GNUC__
#pragma GCC diagnostic push
@ -38,6 +36,9 @@
#endif
#include <boost/iterator/transform_iterator.hpp>
#include <list>
#include <sstream>
using namespace std;
namespace gtsam {
@ -112,24 +113,6 @@ namespace gtsam {
return result;
}
/* ************************************************************************* */
Vector Values::atFixed(Key j, size_t n) {
switch (n) {
case 1: return at<Vector1>(j);
case 2: return at<Vector2>(j);
case 3: return at<Vector3>(j);
case 4: return at<Vector4>(j);
case 5: return at<Vector5>(j);
case 6: return at<Vector6>(j);
case 7: return at<Vector7>(j);
case 8: return at<Vector8>(j);
case 9: return at<Vector9>(j);
default:
throw runtime_error(
"Values::at fixed size can only handle n in 1..9");
}
}
/* ************************************************************************* */
const Value& Values::at(Key j) const {
// Find the item
@ -148,24 +131,6 @@ namespace gtsam {
throw ValuesKeyAlreadyExists(j);
}
/* ************************************************************************* */
void Values::insertFixed(Key j, const Vector& v, size_t n) {
switch (n) {
case 1: insert<Vector1>(j,v); break;
case 2: insert<Vector2>(j,v); break;
case 3: insert<Vector3>(j,v); break;
case 4: insert<Vector4>(j,v); break;
case 5: insert<Vector5>(j,v); break;
case 6: insert<Vector6>(j,v); break;
case 7: insert<Vector7>(j,v); break;
case 8: insert<Vector8>(j,v); break;
case 9: insert<Vector9>(j,v); break;
default:
throw runtime_error(
"Values::insert fixed size can only handle n in 1..9");
}
}
/* ************************************************************************* */
void Values::insert(const Values& values) {
for(const_iterator key_value = values.begin(); key_value != values.end(); ++key_value) {
@ -269,4 +234,18 @@ namespace gtsam {
return message_.c_str();
}
/* ************************************************************************* */
const char* NoMatchFoundForFixed::what() const throw() {
if(message_.empty()) {
ostringstream oss;
oss
<< "Attempting to retrieve fixed-size matrix with dimensions " //
<< M1_ << "x" << N1_
<< ", but found dynamic Matrix with mismatched dimensions " //
<< M2_ << "x" << N2_;
message_ = oss.str();
}
return message_.c_str();
}
}

View File

@ -168,16 +168,13 @@ namespace gtsam {
/** Retrieve a variable by key \c j. The type of the value associated with
* this key is supplied as a template argument to this function.
* @param j Retrieve the value associated with this key
* @tparam Value The type of the value stored with this key, this method
* throws DynamicValuesIncorrectType if this requested type is not correct.
* @return A const reference to the stored value
* @tparam ValueType The type of the value stored with this key, this method
* Throws DynamicValuesIncorrectType if this requested type is not correct.
* Dynamic matrices/vectors can be retrieved as fixed-size, but not vice-versa.
* @return The stored value
*/
template<typename ValueType>
const ValueType& at(Key j) const;
/// Special version for small fixed size vectors, for matlab/python
/// throws truntime error if n<1 || n>9
Vector atFixed(Key j, size_t n);
ValueType at(Key j) const;
/// version for double
double atDouble(size_t key) const { return at<double>(key);}
@ -259,10 +256,6 @@ namespace gtsam {
template <typename ValueType>
void insert(Key j, const ValueType& val);
/// Special version for small fixed size vectors, for matlab/python
/// throws truntime error if n<1 || n>9
void insertFixed(Key j, const Vector& v, size_t n);
/// version for double
void insertDouble(Key j, double c) { insert<double>(j,c); }
@ -500,6 +493,28 @@ namespace gtsam {
}
};
/* ************************************************************************* */
class GTSAM_EXPORT NoMatchFoundForFixed: public std::exception {
protected:
const size_t M1_, N1_;
const size_t M2_, N2_;
private:
mutable std::string message_;
public:
NoMatchFoundForFixed(size_t M1, size_t N1, size_t M2, size_t N2) throw () :
M1_(M1), N1_(N1), M2_(M2), N2_(N2) {
}
virtual ~NoMatchFoundForFixed() throw () {
}
virtual const char* what() const throw ();
};
/* ************************************************************************* */
/// traits
template<>
struct traits<Values> : public Testable<Values> {

View File

@ -25,7 +25,7 @@ Expression<T> compose(const Expression<T>& t1, const Expression<T>& t2) {
}
// Some typedefs
typedef Expression<double> double_;
typedef Expression<double> Double_;
typedef Expression<Vector1> Vector1_;
typedef Expression<Vector2> Vector2_;
typedef Expression<Vector3> Vector3_;

View File

@ -17,7 +17,7 @@
* @brief unit tests for Block Automatic Differentiation
*/
#include <gtsam/nonlinear/Expression.h>
#include <gtsam/nonlinear/expressions.h>
#include <gtsam/geometry/Cal3_S2.h>
#include <gtsam/geometry/PinholeCamera.h>
#include <gtsam/geometry/Point3.h>
@ -32,9 +32,7 @@ using boost::assign::map_list_of;
using namespace std;
using namespace gtsam;
typedef Expression<double> double_;
typedef Expression<Point3> Point3_;
typedef Expression<Vector3> Vector3_;
typedef Expression<Pose3> Pose3_;
typedef Expression<Rot3> Rot3_;
@ -101,7 +99,7 @@ TEST(Expression, Unary1) {
}
TEST(Expression, Unary2) {
using namespace unary;
double_ e(f2, p);
Double_ e(f2, p);
EXPECT(expected == e.keys());
}
@ -156,7 +154,7 @@ Point3_ p_cam(x, &Pose3::transform_to, p);
// Check that creating an expression to double compiles
TEST(Expression, BinaryToDouble) {
using namespace binary;
double_ p_cam(doubleF, x, p);
Double_ p_cam(doubleF, x, p);
}
/* ************************************************************************* */
@ -269,11 +267,11 @@ Rot3 composeThree(const Rot3& R1, const Rot3& R2, const Rot3& R3, OptionalJacobi
OptionalJacobian<3, 3> H2, OptionalJacobian<3, 3> H3) {
// return dummy derivatives (not correct, but that's ok for testing here)
if (H1)
*H1 = eye(3);
*H1 = I_3x3;
if (H2)
*H2 = eye(3);
*H2 = I_3x3;
if (H3)
*H3 = eye(3);
*H3 = I_3x3;
return R1 * (R2 * R3);
}
@ -372,8 +370,8 @@ TEST(Expression, TripleSum) {
/* ************************************************************************* */
TEST(Expression, SumOfUnaries) {
const Key key(67);
const double_ norm_(&gtsam::norm, Point3_(key));
const double_ sum_ = norm_ + norm_;
const Double_ norm_(&gtsam::norm, Point3_(key));
const Double_ sum_ = norm_ + norm_;
Values values;
values.insert<Point3>(key, Point3(6, 0, 0));
@ -391,7 +389,7 @@ TEST(Expression, SumOfUnaries) {
TEST(Expression, UnaryOfSum) {
const Key key1(42), key2(67);
const Point3_ sum_ = Point3_(key1) + Point3_(key2);
const double_ norm_(&gtsam::norm, sum_);
const Double_ norm_(&gtsam::norm, sum_);
map<Key, int> actual_dims, expected_dims = map_list_of<Key, int>(key1, 3)(key2, 3);
norm_.dims(actual_dims);

View File

@ -234,7 +234,7 @@ TEST( testLinearContainerFactor, creation ) {
// create a linear factor
SharedDiagonal model = noiseModel::Unit::Create(2);
JacobianFactor::shared_ptr linear_factor(new JacobianFactor(
l3, eye(2,2), l5, 2.0 * eye(2,2), zero(2), model));
l3, I_2x2, l5, 2.0 * I_2x2, Z_2x1, model));
// create a set of values - build with full set of values
gtsam::Values full_values, exp_values;

View File

@ -477,13 +477,59 @@ TEST(Values, Destructors) {
}
/* ************************************************************************* */
TEST(Values, FixedSize) {
TEST(Values, VectorDynamicInsertFixedRead) {
Values values;
Vector v(3); v << 5.0, 6.0, 7.0;
values.insertFixed(key1, v, 3);
values.insert(key1, v);
Vector3 expected(5.0, 6.0, 7.0);
CHECK(assert_equal((Vector)expected, values.at<Vector3>(key1)));
CHECK_EXCEPTION(values.insertFixed(key1, v, 12),runtime_error);
Vector3 actual = values.at<Vector3>(key1);
CHECK(assert_equal(expected, actual));
CHECK_EXCEPTION(values.at<Vector7>(key1), exception);
}
/* ************************************************************************* */
TEST(Values, VectorDynamicInsertDynamicRead) {
Values values;
Vector v(3); v << 5.0, 6.0, 7.0;
values.insert(key1, v);
Vector expected(3); expected << 5.0, 6.0, 7.0;
Vector actual = values.at<Vector>(key1);
LONGS_EQUAL(3, actual.rows());
LONGS_EQUAL(1, actual.cols());
CHECK(assert_equal(expected, actual));
}
/* ************************************************************************* */
TEST(Values, VectorFixedInsertFixedRead) {
Values values;
Vector3 v; v << 5.0, 6.0, 7.0;
values.insert(key1, v);
Vector3 expected; expected << 5.0, 6.0, 7.0;
Vector3 actual = values.at<Vector3>(key1);
CHECK(assert_equal(expected, actual));
CHECK_EXCEPTION(values.at<Vector7>(key1), exception);
}
/* ************************************************************************* */
TEST(Values, VectorFixedInsertDynamicRead) {
Values values;
Vector3 v; v << 5.0, 6.0, 7.0;
values.insert(key1, v);
Vector expected(3); expected << 5.0, 6.0, 7.0;
Vector actual = values.at<Vector>(key1);
LONGS_EQUAL(3, actual.rows());
LONGS_EQUAL(1, actual.cols());
CHECK(assert_equal(expected, actual));
}
/* ************************************************************************* */
TEST(Values, MatrixDynamicInsertFixedRead) {
Values values;
Matrix v(1,3); v << 5.0, 6.0, 7.0;
values.insert(key1, v);
Vector3 expected(5.0, 6.0, 7.0);
CHECK(assert_equal((Vector)expected, values.at<Matrix13>(key1)));
CHECK_EXCEPTION(values.at<Matrix23>(key1), exception);
}
/* ************************************************************************* */
int main() { TestResult tr; return TestRegistry::runAllTests(tr); }

View File

@ -129,7 +129,7 @@ public:
if (H1) *H1 = JacobianC::Zero();
if (H2) *H2 = JacobianL::Zero();
// TODO warn if verbose output asked for
return zero(2);
return Z_2x1;
}
}
@ -266,13 +266,13 @@ public:
return reprojError.vector();
}
catch( CheiralityException& e) {
if (H1) *H1 = zeros(2, 6);
if (H2) *H2 = zeros(2, 3);
if (H3) *H3 = zeros(2, DimK);
if (H1) *H1 = Matrix::Zero(2, 6);
if (H2) *H2 = Matrix::Zero(2, 3);
if (H3) *H3 = Matrix::Zero(2, DimK);
std::cout << e.what() << ": Landmark "<< DefaultKeyFormatter(this->key2())
<< " behind Camera " << DefaultKeyFormatter(this->key1()) << std::endl;
}
return zero(2);
return Z_2x1;
}
/** return the measured */

Some files were not shown because too many files have changed in this diff Show More