From ef316754313c4b0c1dcdcba8724a62814a4613a7 Mon Sep 17 00:00:00 2001 From: p-zach Date: Thu, 3 Apr 2025 16:38:20 -0400 Subject: [PATCH 01/21] Script to generate .ipynb doc files --- doc/generating/gpt_generate.py | 105 +++++++++++++++++++++++++++++++++ 1 file changed, 105 insertions(+) create mode 100644 doc/generating/gpt_generate.py diff --git a/doc/generating/gpt_generate.py b/doc/generating/gpt_generate.py new file mode 100644 index 000000000..3fae00978 --- /dev/null +++ b/doc/generating/gpt_generate.py @@ -0,0 +1,105 @@ +import os +import time +import requests +import argparse +import nbformat as nbf +from openai import OpenAI + +_output_folder = "output" +_gtsam_gh_base = "https://raw.githubusercontent.com/borglab/gtsam/refs/heads/develop/" +_asst_id = "asst_na7wYBtXyGU0x5t2RdcnpxzP" +_request_text = "Document the file found at {}." + + +def is_url_valid(url): + """Verify that the supplied URL does not return a 404.""" + try: + response = requests.head(url, allow_redirects=True) + return response.status_code != 404 + except requests.RequestException: + return False + + +def save_ipynb(text: str, file_path: str): + """Save text to a single Markdown cell in a new .ipynb file.""" + script_dir = os.path.dirname(os.path.abspath(__file__)) + output_dir = os.path.join(script_dir, _output_folder) + os.makedirs(output_dir, exist_ok=True) + output_file = os.path.splitext(os.path.basename(file_path))[0] + ".ipynb" + output_full_path = os.path.join(output_dir, output_file) + + nb = nbf.v4.new_notebook() + new_cell = nbf.v4.new_markdown_cell(text) + nb['cells'].append(new_cell) + + with open(output_full_path, 'w', encoding='utf-8') as file: + nbf.write(nb, file) + + return output_file + + +def generate_ipynb(file_path: str, openai_client): + """Generate an interactive Python notebook for the given GTSAM header file. + + Args: + file_path (str): The fully-qualified path from the root of the gtsam + repository to the header file that will be documented. + openai_client (openai.OpenAI): The OpenAI client to use. + """ + # Create the URL to get the header file from. + url = _gtsam_gh_base + file_path + + if not is_url_valid(url): + print(f"{url} was not found on the server, or an error occurred.") + return + + print(f"Sending request to OpenAI to document {url}.") + + # Create a new thread and send the request + thread = openai_client.beta.threads.create() + openai_client.beta.threads.messages.create( + thread_id=thread.id, role="user", content=_request_text.format(url)) + + run = openai_client.beta.threads.runs.create(thread_id=thread.id, + assistant_id=_asst_id) + + print("Waiting for the assistant to process the request...") + + # Wait for request to be processed + while True: + run_status = openai_client.beta.threads.runs.retrieve( + thread_id=thread.id, run_id=run.id) + if run_status.status == "completed": + break + time.sleep(2) + + print("Request processed. Retrieving response...") + + # Fetch messages + messages = openai_client.beta.threads.messages.list(thread_id=thread.id) + # Retrieve response text and strip ```markdown ... ``` + text = messages.data[0].content[0].text.value.strip('`').strip('markdown') + + # Write output to file + output_filename = save_ipynb(text, file_path) + + print(f"Response retrieved. Find output in {output_filename}.") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + prog="gpt_generate", + description= + "Generates .ipynb documentation files given paths to GTSAM header files." + ) + parser.add_argument( + "file_paths", + nargs='+', + help="The paths to the header files from the root gtsam directory.") + args = parser.parse_args() + + # Retrieves API key from environment variable OPENAI_API_KEY + client = OpenAI() + + for file_path in args.file_paths: + generate_ipynb(file_path, client) From f4171711756b92b7cb256b95532af483be0d6c0d Mon Sep 17 00:00:00 2001 From: p-zach Date: Thu, 3 Apr 2025 16:38:54 -0400 Subject: [PATCH 02/21] First run of generating docs; most of nonlinear --- .../output/BatchFixedLagSmoother.ipynb | 54 ++++++++++++++ doc/generating/output/CustomFactor.ipynb | 15 ++++ doc/generating/output/DoglegOptimizer.ipynb | 70 ++++++++++++++++++ doc/generating/output/ExpressionFactor.ipynb | 67 +++++++++++++++++ .../output/ExpressionFactorGraph.ipynb | 59 +++++++++++++++ .../output/ExtendedKalmanFilter.ipynb | 74 +++++++++++++++++++ doc/generating/output/FixedLagSmoother.ipynb | 63 ++++++++++++++++ .../output/GaussNewtonOptimizer.ipynb | 66 +++++++++++++++++ doc/generating/output/GncOptimizer.ipynb | 70 ++++++++++++++++++ doc/generating/output/ISAM2.ipynb | 15 ++++ .../output/LevenbergMarquardtOptimizer.ipynb | 73 ++++++++++++++++++ .../output/LinearContainerFactor.ipynb | 65 ++++++++++++++++ .../NonlinearConjugateGradientOptimizer.ipynb | 66 +++++++++++++++++ doc/generating/output/NonlinearFactor.ipynb | 15 ++++ .../output/NonlinearFactorGraph.ipynb | 66 +++++++++++++++++ doc/generating/output/NonlinearISAM.ipynb | 15 ++++ .../output/NonlinearOptimizer.ipynb | 66 +++++++++++++++++ doc/generating/output/PriorFactor.ipynb | 55 ++++++++++++++ doc/generating/output/WhiteNoiseFactor.ipynb | 66 +++++++++++++++++ 19 files changed, 1040 insertions(+) create mode 100644 doc/generating/output/BatchFixedLagSmoother.ipynb create mode 100644 doc/generating/output/CustomFactor.ipynb create mode 100644 doc/generating/output/DoglegOptimizer.ipynb create mode 100644 doc/generating/output/ExpressionFactor.ipynb create mode 100644 doc/generating/output/ExpressionFactorGraph.ipynb create mode 100644 doc/generating/output/ExtendedKalmanFilter.ipynb create mode 100644 doc/generating/output/FixedLagSmoother.ipynb create mode 100644 doc/generating/output/GaussNewtonOptimizer.ipynb create mode 100644 doc/generating/output/GncOptimizer.ipynb create mode 100644 doc/generating/output/ISAM2.ipynb create mode 100644 doc/generating/output/LevenbergMarquardtOptimizer.ipynb create mode 100644 doc/generating/output/LinearContainerFactor.ipynb create mode 100644 doc/generating/output/NonlinearConjugateGradientOptimizer.ipynb create mode 100644 doc/generating/output/NonlinearFactor.ipynb create mode 100644 doc/generating/output/NonlinearFactorGraph.ipynb create mode 100644 doc/generating/output/NonlinearISAM.ipynb create mode 100644 doc/generating/output/NonlinearOptimizer.ipynb create mode 100644 doc/generating/output/PriorFactor.ipynb create mode 100644 doc/generating/output/WhiteNoiseFactor.ipynb diff --git a/doc/generating/output/BatchFixedLagSmoother.ipynb b/doc/generating/output/BatchFixedLagSmoother.ipynb new file mode 100644 index 000000000..079489abf --- /dev/null +++ b/doc/generating/output/BatchFixedLagSmoother.ipynb @@ -0,0 +1,54 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "283174f8", + "metadata": {}, + "source": [ + "# BatchFixedLagSmoother Class Documentation\n", + "\n", + "*Disclaimer: This documentation was generated by AI and requires human revision for accuracy and completeness.*\n", + "\n", + "## Overview\n", + "\n", + "The `BatchFixedLagSmoother` class in GTSAM is a specialized smoother designed for fixed-lag smoothing in nonlinear factor graphs. It extends the capabilities of fixed-lag smoothing by maintaining a sliding window of the most recent variables and marginalizing out older variables. This is particularly useful in real-time applications where memory and computational efficiency are critical.\n", + "\n", + "## Key Functionalities\n", + "\n", + "### Smoothing and Optimization\n", + "\n", + "- **update**: This method is the core of the `BatchFixedLagSmoother`. It processes new factors and variables, updating the current estimate of the state. The update method also manages the marginalization of variables that fall outside the fixed lag window.\n", + "\n", + "### Factor Graph Management\n", + "\n", + "- **marginalize**: This function handles the marginalization of variables that are no longer within the fixed lag window. Marginalization is a crucial step in maintaining the size of the factor graph, ensuring that only relevant variables are kept for optimization.\n", + "\n", + "### Parameter Management\n", + "\n", + "- **Params**: The `Params` structure within the class allows users to configure various settings for the smoother, such as the lag duration and optimization parameters. This provides flexibility in tuning the smoother for specific applications.\n", + "\n", + "## Mathematical Formulation\n", + "\n", + "The `BatchFixedLagSmoother` operates on the principle of fixed-lag smoothing, where the objective is to estimate the state $\\mathbf{x}_t$ given all measurements up to time $t$, but only retaining a fixed window of recent states. The optimization problem can be expressed as:\n", + "\n", + "$$\n", + "\\min_{\\mathbf{x}_{t-L:t}} \\sum_{i=1}^{N} \\| \\mathbf{h}_i(\\mathbf{x}_{t-L:t}) - \\mathbf{z}_i \\|^2\n", + "$$\n", + "\n", + "where $L$ is the fixed lag, $\\mathbf{h}_i$ are the measurement functions, and $\\mathbf{z}_i$ are the measurements.\n", + "\n", + "## Usage Considerations\n", + "\n", + "- **Real-time Applications**: The `BatchFixedLagSmoother` is ideal for applications requiring real-time processing, such as robotics and autonomous vehicles, where the computational burden must be managed efficiently.\n", + "- **Configuration**: Proper configuration of the lag duration and optimization parameters is essential for optimal performance. Users should experiment with different settings to achieve the desired balance between accuracy and computational load.\n", + "\n", + "## Conclusion\n", + "\n", + "The `BatchFixedLagSmoother` class provides a robust framework for fixed-lag smoothing in nonlinear systems. Its ability to efficiently manage the factor graph and perform real-time updates makes it a valuable tool in various applications requiring dynamic state estimation." + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/doc/generating/output/CustomFactor.ipynb b/doc/generating/output/CustomFactor.ipynb new file mode 100644 index 000000000..e361df53f --- /dev/null +++ b/doc/generating/output/CustomFactor.ipynb @@ -0,0 +1,15 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "736fa438", + "metadata": {}, + "source": [ + "I'm unable to access external URLs directly. However, if you upload the file `CustomFactor.h`, I can help generate the documentation for it." + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/doc/generating/output/DoglegOptimizer.ipynb b/doc/generating/output/DoglegOptimizer.ipynb new file mode 100644 index 000000000..42a100a85 --- /dev/null +++ b/doc/generating/output/DoglegOptimizer.ipynb @@ -0,0 +1,70 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "f851cef5", + "metadata": {}, + "source": [ + "# DoglegOptimizer Class Documentation\n", + "\n", + "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "\n", + "## Overview\n", + "\n", + "The `DoglegOptimizer` class in GTSAM is a specialized optimization algorithm designed for solving nonlinear least squares problems. It implements the Dogleg method, which is a hybrid approach combining the steepest descent and Gauss-Newton methods. This optimizer is particularly effective for problems where the Hessian is difficult to compute or when the initial guess is far from the solution.\n", + "\n", + "## Key Features\n", + "\n", + "- **Hybrid Approach**: Combines the strengths of both the steepest descent and Gauss-Newton methods.\n", + "- **Trust Region Method**: Utilizes a trust region to determine the step size, balancing between the accuracy of Gauss-Newton and the robustness of steepest descent.\n", + "- **Efficient for Nonlinear Problems**: Designed to handle complex nonlinear least squares problems effectively.\n", + "\n", + "## Key Methods\n", + "\n", + "### Initialization and Setup\n", + "\n", + "- **Constructor**: Initializes the optimizer with default or specified parameters.\n", + "- **setDeltaInitial**: Sets the initial trust region radius, $\\Delta_0$, which influences the step size in the optimization process.\n", + "\n", + "### Optimization Process\n", + "\n", + "- **optimize**: Executes the optimization process, iteratively refining the solution to minimize the error in the nonlinear least squares problem.\n", + "- **iterate**: Performs a single iteration of the Dogleg optimization, updating the current estimate based on the trust region and the computed step.\n", + "\n", + "### Result Evaluation\n", + "\n", + "- **error**: Computes the error of the current estimate, providing a measure of how well the current solution fits the problem constraints.\n", + "- **values**: Returns the optimized values after the optimization process is complete.\n", + "\n", + "### Trust Region Management\n", + "\n", + "- **getDelta**: Retrieves the current trust region radius, $\\Delta$, which is crucial for understanding the optimizer's step size decisions.\n", + "- **setDelta**: Manually sets the trust region radius, allowing for fine-tuned control over the optimization process.\n", + "\n", + "## Mathematical Formulation\n", + "\n", + "The Dogleg method is characterized by its use of two distinct steps:\n", + "\n", + "1. **Cauchy Point**: The steepest descent direction, calculated as:\n", + " $$ p_u = -\\alpha \\nabla f(x) $$\n", + " where $\\alpha$ is a scalar step size.\n", + "\n", + "2. **Gauss-Newton Step**: The solution to the linearized problem, providing a more accurate but computationally expensive step:\n", + " $$ p_{gn} = -(J^T J)^{-1} J^T r $$\n", + " where $J$ is the Jacobian matrix and $r$ is the residual vector.\n", + "\n", + "The Dogleg step, $p_{dl}$, is a combination of these two steps, determined by the trust region radius $\\Delta$.\n", + "\n", + "## Usage Considerations\n", + "\n", + "- **Initial Guess**: The performance of the Dogleg optimizer can be sensitive to the initial guess. A good initial estimate can significantly speed up convergence.\n", + "- **Parameter Tuning**: The choice of the initial trust region radius and other parameters can affect the convergence rate and stability of the optimization.\n", + "\n", + "The `DoglegOptimizer` is a powerful tool for solving nonlinear optimization problems, particularly when dealing with large-scale systems where computational efficiency is crucial. By leveraging the hybrid approach of the Dogleg method, it provides a robust solution capable of handling a wide range of problem complexities." + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/doc/generating/output/ExpressionFactor.ipynb b/doc/generating/output/ExpressionFactor.ipynb new file mode 100644 index 000000000..79a7b9018 --- /dev/null +++ b/doc/generating/output/ExpressionFactor.ipynb @@ -0,0 +1,67 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "59407eaf", + "metadata": {}, + "source": [ + "# ExpressionFactor Class Documentation\n", + "\n", + "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "\n", + "## Overview\n", + "\n", + "The `ExpressionFactor` class in GTSAM is a template class designed to work with factor graphs in the context of nonlinear optimization. It represents a factor that can be constructed from an expression, allowing for flexible and efficient computation of error terms in optimization problems.\n", + "\n", + "## Key Features\n", + "\n", + "- **Expression-Based Factor**: The `ExpressionFactor` class allows users to define factors based on expressions, which can represent complex mathematical relationships between variables.\n", + "- **Error Calculation**: It computes the error based on the difference between the predicted and observed values, typically used in least-squares optimization.\n", + "- **Jacobian Computation**: The class can compute the Jacobian matrix, which is essential for gradient-based optimization methods.\n", + "\n", + "## Main Methods\n", + "\n", + "### Constructor\n", + "\n", + "The `ExpressionFactor` class provides constructors that allow for the initialization of the factor with a specific expression and measurement. The constructors are designed to handle various types of expressions and measurements, making the class versatile for different applications.\n", + "\n", + "### `evaluateError`\n", + "\n", + "This method calculates the error vector for the factor. The error is typically defined as the difference between the predicted value from the expression and the actual measurement. Mathematically, this can be represented as:\n", + "\n", + "$$\n", + "\\text{error} = \\text{measurement} - \\text{expression}\n", + "$$\n", + "\n", + "where `measurement` is the observed value, and `expression` is the predicted value based on the current estimate of the variables.\n", + "\n", + "### `linearize`\n", + "\n", + "The `linearize` method is used to linearize the factor around a given linearization point. This involves computing the Jacobian matrix, which represents the partial derivatives of the error with respect to the variables. The Jacobian is crucial for iterative optimization algorithms such as Gauss-Newton or Levenberg-Marquardt.\n", + "\n", + "### `clone`\n", + "\n", + "The `clone` method creates a deep copy of the factor. This is useful when factors need to be duplicated, ensuring that changes to one copy do not affect the other.\n", + "\n", + "## Mathematical Background\n", + "\n", + "The `ExpressionFactor` class is grounded in the principles of nonlinear optimization, particularly in the context of factor graphs. Factor graphs are bipartite graphs used to represent the factorization of a function, often used in probabilistic graphical models and optimization problems.\n", + "\n", + "In the context of GTSAM, factors represent constraints or relationships between variables. The `ExpressionFactor` allows these relationships to be defined using mathematical expressions, providing a flexible and powerful tool for modeling complex systems.\n", + "\n", + "## Usage\n", + "\n", + "The `ExpressionFactor` class is typically used in scenarios where the relationships between variables can be naturally expressed as mathematical expressions. This includes applications in robotics, computer vision, and other fields where optimization problems are prevalent.\n", + "\n", + "By leveraging the power of expressions, users can define custom factors that capture the nuances of their specific problem, leading to more accurate and efficient optimization solutions.\n", + "\n", + "---\n", + "\n", + "This documentation provides a high-level overview of the `ExpressionFactor` class, highlighting its main features and methods. For detailed usage and examples, users should refer to the GTSAM library documentation and source code." + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/doc/generating/output/ExpressionFactorGraph.ipynb b/doc/generating/output/ExpressionFactorGraph.ipynb new file mode 100644 index 000000000..540dc1082 --- /dev/null +++ b/doc/generating/output/ExpressionFactorGraph.ipynb @@ -0,0 +1,59 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "a1c00a8c", + "metadata": {}, + "source": [ + "# ExpressionFactorGraph Class Documentation\n", + "\n", + "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "\n", + "## Overview\n", + "\n", + "The `ExpressionFactorGraph` class in GTSAM is a specialized factor graph designed to work with expressions. It extends the capabilities of a standard factor graph by allowing the incorporation of symbolic expressions, which can be particularly useful in applications requiring symbolic computation and automatic differentiation.\n", + "\n", + "## Key Features\n", + "\n", + "- **Expression Handling**: The class allows for the creation and manipulation of factors that are expressed symbolically. This can be advantageous in scenarios where the relationships between variables are best described using mathematical expressions.\n", + "\n", + "- **Automatic Differentiation**: By leveraging expressions, the class supports automatic differentiation, which is crucial for optimizing complex systems where derivatives are needed.\n", + "\n", + "- **Integration with GTSAM**: As part of the GTSAM library, `ExpressionFactorGraph` seamlessly integrates with other components, allowing for robust and efficient factor graph optimization.\n", + "\n", + "## Main Methods\n", + "\n", + "### Adding Factors\n", + "\n", + "- **addExpressionFactor**: This method allows the user to add a new factor to the graph based on a symbolic expression. The expression defines the relationship between the variables involved in the factor.\n", + "\n", + "### Graph Operations\n", + "\n", + "- **update**: This method updates the factor graph with new information. It recalculates the necessary components to ensure that the graph remains consistent with the added expressions.\n", + "\n", + "- **linearize**: Converts the expression-based factor graph into a linear factor graph. This is a crucial step for optimization, as many algorithms operate on linear approximations of the problem.\n", + "\n", + "### Optimization\n", + "\n", + "- **optimize**: This method runs the optimization process on the factor graph. It uses the symbolic expressions to guide the optimization, ensuring that the solution respects the relationships defined by the expressions.\n", + "\n", + "## Mathematical Foundations\n", + "\n", + "The `ExpressionFactorGraph` leverages several mathematical concepts to perform its functions:\n", + "\n", + "- **Factor Graphs**: A factor graph is a bipartite graph representing the factorization of a function. In the context of GTSAM, it is used to represent the joint probability distribution of a set of variables.\n", + "\n", + "- **Expressions**: Symbolic expressions are used to define the relationships between variables. These expressions can be differentiated and manipulated symbolically, providing flexibility and power in modeling complex systems.\n", + "\n", + "- **Automatic Differentiation**: This technique is used to compute derivatives of functions defined by expressions. It is essential for optimization algorithms that require gradient information.\n", + "\n", + "## Conclusion\n", + "\n", + "The `ExpressionFactorGraph` class is a powerful tool within the GTSAM library, offering advanced capabilities for working with symbolic expressions in factor graphs. Its integration of automatic differentiation and symbolic computation makes it particularly useful for complex optimization problems where traditional numerical methods may fall short. Users familiar with factor graphs and symbolic mathematics will find this class to be a valuable addition to their toolkit." + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/doc/generating/output/ExtendedKalmanFilter.ipynb b/doc/generating/output/ExtendedKalmanFilter.ipynb new file mode 100644 index 000000000..9b379152a --- /dev/null +++ b/doc/generating/output/ExtendedKalmanFilter.ipynb @@ -0,0 +1,74 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "93869c17", + "metadata": {}, + "source": [ + "# ExtendedKalmanFilter Class Documentation\n", + "\n", + "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "\n", + "## Overview\n", + "\n", + "The `ExtendedKalmanFilter` class in GTSAM is a robust implementation of the Extended Kalman Filter (EKF), which is a powerful tool for estimating the state of a nonlinear dynamic system. The EKF extends the capabilities of the traditional Kalman Filter by linearizing about the current mean and covariance, making it suitable for nonlinear systems.\n", + "\n", + "## Key Features\n", + "\n", + "- **Nonlinear State Estimation**: The EKF is designed to handle systems where the state transition and observation models are nonlinear.\n", + "- **Predict and Update Cycles**: The class provides mechanisms to predict the future state and update the current state estimate based on new measurements.\n", + "- **Covariance Management**: It maintains and updates the state covariance matrix, which represents the uncertainty of the state estimate.\n", + "\n", + "## Mathematical Foundation\n", + "\n", + "The EKF operates on the principle of linearizing nonlinear functions around the current estimate. The primary equations involved in the EKF are:\n", + "\n", + "1. **State Prediction**:\n", + " $$ \\hat{x}_{k|k-1} = f(\\hat{x}_{k-1|k-1}, u_k) $$\n", + " $$ P_{k|k-1} = F_k P_{k-1|k-1} F_k^T + Q_k $$\n", + "\n", + "2. **Measurement Update**:\n", + " $$ y_k = z_k - h(\\hat{x}_{k|k-1}) $$\n", + " $$ S_k = H_k P_{k|k-1} H_k^T + R_k $$\n", + " $$ K_k = P_{k|k-1} H_k^T S_k^{-1} $$\n", + " $$ \\hat{x}_{k|k} = \\hat{x}_{k|k-1} + K_k y_k $$\n", + " $$ P_{k|k} = (I - K_k H_k) P_{k|k-1} $$\n", + "\n", + "Where:\n", + "- $f$ and $h$ are the nonlinear state transition and measurement functions, respectively.\n", + "- $F_k$ and $H_k$ are the Jacobians of $f$ and $h$.\n", + "- $Q_k$ and $R_k$ are the process and measurement noise covariance matrices.\n", + "\n", + "## Key Methods\n", + "\n", + "### Initialization\n", + "\n", + "- **Constructor**: Initializes the filter with a given initial state and covariance.\n", + "\n", + "### Prediction\n", + "\n", + "- **predict**: Advances the state estimate to the next time step using the state transition model. It computes the predicted state and updates the state covariance matrix.\n", + "\n", + "### Update\n", + "\n", + "- **update**: Incorporates a new measurement into the state estimate. It calculates the innovation, updates the state estimate, and adjusts the covariance matrix accordingly.\n", + "\n", + "### Accessors\n", + "\n", + "- **getState**: Returns the current estimated state.\n", + "- **getCovariance**: Provides the current state covariance matrix, representing the uncertainty of the estimate.\n", + "\n", + "## Usage\n", + "\n", + "The `ExtendedKalmanFilter` class is typically used in a loop where the `predict` method is called to project the state forward in time, and the `update` method is called whenever a new measurement is available. This cycle continues, refining the state estimate and reducing uncertainty over time.\n", + "\n", + "## Conclusion\n", + "\n", + "The `ExtendedKalmanFilter` class in GTSAM is a versatile tool for state estimation in nonlinear systems. By leveraging the power of linearization, it provides accurate and efficient estimation capabilities, making it suitable for a wide range of applications in robotics, navigation, and control systems." + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/doc/generating/output/FixedLagSmoother.ipynb b/doc/generating/output/FixedLagSmoother.ipynb new file mode 100644 index 000000000..1a3fc856d --- /dev/null +++ b/doc/generating/output/FixedLagSmoother.ipynb @@ -0,0 +1,63 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "cdd2fdc5", + "metadata": {}, + "source": [ + "# FixedLagSmoother Class Documentation\n", + "\n", + "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "\n", + "## Overview\n", + "\n", + "The `FixedLagSmoother` class in GTSAM is designed for incremental smoothing and mapping in robotics and computer vision applications. It maintains a fixed-size window of the most recent states, allowing for efficient updates and marginalization of older states. This is particularly useful in scenarios where real-time performance is crucial, and memory usage needs to be controlled.\n", + "\n", + "## Key Features\n", + "\n", + "- **Incremental Updates**: The `FixedLagSmoother` allows for efficient updates as new measurements are received, making it suitable for real-time applications.\n", + "- **Fixed-Lag Smoothing**: It maintains a fixed window of recent states, which helps in managing computational resources by marginalizing out older states.\n", + "- **Nonlinear Optimization**: Utilizes nonlinear optimization techniques to refine the estimates of the states within the fixed lag window.\n", + "\n", + "## Main Methods\n", + "\n", + "### Update\n", + "\n", + "The `update` method is central to the `FixedLagSmoother` class. It incorporates new measurements and updates the state estimates within the fixed lag window. The method ensures that the estimates are consistent with the new information while maintaining computational efficiency.\n", + "\n", + "### Marginalization\n", + "\n", + "Marginalization is a key process in fixed-lag smoothing, where older states are removed from the optimization problem to keep the problem size manageable. This is done while preserving the essential information about the past states in the form of a prior.\n", + "\n", + "### Optimization\n", + "\n", + "The class employs nonlinear optimization techniques to solve the smoothing problem. The optimization process aims to minimize the error between the predicted and observed measurements, leading to refined state estimates.\n", + "\n", + "## Mathematical Formulation\n", + "\n", + "The `FixedLagSmoother` operates on the principle of minimizing a cost function that represents the sum of squared errors between the predicted and observed measurements. Mathematically, this can be expressed as:\n", + "\n", + "$$\n", + "\\min_x \\sum_i \\| h(x_i) - z_i \\|^2\n", + "$$\n", + "\n", + "where $h(x_i)$ is the predicted measurement, $z_i$ is the observed measurement, and $x_i$ represents the state variables within the fixed lag window.\n", + "\n", + "## Applications\n", + "\n", + "The `FixedLagSmoother` is widely used in applications such as:\n", + "\n", + "- **Simultaneous Localization and Mapping (SLAM)**: Helps in maintaining a consistent map and robot trajectory in real-time.\n", + "- **Visual-Inertial Odometry (VIO)**: Used for estimating the motion of a camera-equipped device by fusing visual and inertial data.\n", + "- **Sensor Fusion**: Combines data from multiple sensors to improve the accuracy of state estimates.\n", + "\n", + "## Conclusion\n", + "\n", + "The `FixedLagSmoother` class is a powerful tool for real-time state estimation in dynamic environments. Its ability to handle incremental updates and maintain a fixed-size problem makes it ideal for applications where computational resources are limited. By leveraging nonlinear optimization, it provides accurate and consistent state estimates within the fixed lag window." + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/doc/generating/output/GaussNewtonOptimizer.ipynb b/doc/generating/output/GaussNewtonOptimizer.ipynb new file mode 100644 index 000000000..0f590e43b --- /dev/null +++ b/doc/generating/output/GaussNewtonOptimizer.ipynb @@ -0,0 +1,66 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "6463d580", + "metadata": {}, + "source": [ + "# GaussNewtonOptimizer Class Documentation\n", + "\n", + "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "\n", + "## Overview\n", + "\n", + "The `GaussNewtonOptimizer` class in GTSAM is designed to optimize nonlinear factor graphs using the Gauss-Newton algorithm. This class is particularly suited for problems where the cost function can be approximated well by a quadratic function near the minimum. The Gauss-Newton method is an iterative optimization technique that updates the solution by linearizing the nonlinear system at each iteration.\n", + "\n", + "## Key Features\n", + "\n", + "- **Iterative Optimization**: The optimizer refines the solution iteratively by linearizing the nonlinear system around the current estimate.\n", + "- **Convergence Control**: It provides mechanisms to control the convergence through parameters such as maximum iterations and relative error tolerance.\n", + "- **Integration with GTSAM**: Seamlessly integrates with GTSAM's factor graph framework, allowing it to be used with various types of factors and variables.\n", + "\n", + "## Key Methods\n", + "\n", + "### Constructor\n", + "\n", + "- **GaussNewtonOptimizer**: Initializes the optimizer with a given factor graph and initial values. The constructor sets up the optimization problem and prepares it for iteration.\n", + "\n", + "### Optimization\n", + "\n", + "- **optimize**: Executes the optimization process. This method runs the Gauss-Newton iterations until convergence criteria are met, such as reaching the maximum number of iterations or achieving a relative error below a specified threshold.\n", + "\n", + "### Convergence Criteria\n", + "\n", + "- **checkConvergence**: Evaluates whether the optimization process has converged based on the change in error and the specified tolerance levels.\n", + "\n", + "### Accessors\n", + "\n", + "- **error**: Returns the current error of the factor graph with respect to the current estimate. This is useful for monitoring the progress of the optimization.\n", + "- **values**: Retrieves the current estimate of the variable values after optimization.\n", + "\n", + "## Mathematical Background\n", + "\n", + "The Gauss-Newton algorithm is based on the idea of linearizing the nonlinear residuals $r(x)$ around the current estimate $x_k$. The update step is derived from solving the normal equations:\n", + "\n", + "$$ J(x_k)^T J(x_k) \\Delta x = -J(x_k)^T r(x_k) $$\n", + "\n", + "where $J(x_k)$ is the Jacobian of the residuals with respect to the variables. The solution $\\Delta x$ is used to update the estimate:\n", + "\n", + "$$ x_{k+1} = x_k + \\Delta x $$\n", + "\n", + "This process is repeated iteratively until convergence.\n", + "\n", + "## Usage Considerations\n", + "\n", + "- **Initial Guess**: The quality of the initial guess can significantly affect the convergence and performance of the Gauss-Newton optimizer.\n", + "- **Non-convexity**: Since the method relies on linear approximations, it may struggle with highly non-convex problems or those with poor initial estimates.\n", + "- **Performance**: The Gauss-Newton method is generally faster than other nonlinear optimization methods like Levenberg-Marquardt for problems that are well-approximated by a quadratic model near the solution.\n", + "\n", + "In summary, the `GaussNewtonOptimizer` is a powerful tool for solving nonlinear optimization problems in factor graphs, particularly when the problem is well-suited to quadratic approximation. Its integration with GTSAM makes it a versatile choice for various applications in robotics and computer vision." + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/doc/generating/output/GncOptimizer.ipynb b/doc/generating/output/GncOptimizer.ipynb new file mode 100644 index 000000000..12b2b6aca --- /dev/null +++ b/doc/generating/output/GncOptimizer.ipynb @@ -0,0 +1,70 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "c950beef", + "metadata": {}, + "source": [ + "# GTSAM GncOptimizer Class Documentation\n", + "\n", + "*Disclaimer: This documentation was generated by AI and requires human revision to ensure accuracy and completeness.*\n", + "\n", + "## Overview\n", + "\n", + "The `GncOptimizer` class in GTSAM is designed to perform robust optimization using Graduated Non-Convexity (GNC). This method is particularly useful in scenarios where the optimization problem is affected by outliers. The GNC approach gradually transitions from a convex approximation of the problem to the original non-convex problem, thereby improving robustness and convergence.\n", + "\n", + "## Key Features\n", + "\n", + "- **Robust Optimization**: The `GncOptimizer` is specifically tailored to handle optimization problems with outliers, using a robust cost function that can mitigate their effects.\n", + "- **Graduated Non-Convexity**: This technique allows the optimizer to start with a convex problem and gradually transform it into the original non-convex problem, which helps in avoiding local minima.\n", + "- **Customizable Parameters**: Users can adjust various parameters to control the behavior of the optimizer, such as the type of robust loss function and the parameters governing the GNC process.\n", + "\n", + "## Key Methods\n", + "\n", + "### Initialization and Setup\n", + "\n", + "- **Constructor**: The class constructor initializes the optimizer with a given nonlinear factor graph and initial estimate. It also accepts parameters specific to the GNC process.\n", + "\n", + "### Optimization Process\n", + "\n", + "- **optimize()**: This method performs the optimization process. It iteratively refines the solution by adjusting the influence of the robust cost function, following the principles of graduated non-convexity.\n", + "\n", + "### Configuration and Parameters\n", + "\n", + "- **setParams()**: Allows users to set the parameters for the GNC optimization process, including the type of robust loss function and other algorithm-specific settings.\n", + "- **getParams()**: Retrieves the current parameters used by the optimizer, providing insight into the configuration of the optimization process.\n", + "\n", + "### Utility Functions\n", + "\n", + "- **cost()**: Computes the cost of the current estimate, which is useful for evaluating the progress of the optimization.\n", + "- **error()**: Returns the error associated with the current estimate, offering a measure of how well the optimization is performing.\n", + "\n", + "## Mathematical Formulation\n", + "\n", + "The `GncOptimizer` leverages a robust cost function $\\rho(e)$, where $e$ is the error term. The goal is to minimize the sum of these robust costs over all measurements:\n", + "\n", + "$$\n", + "\\min_x \\sum_i \\rho(e_i(x))\n", + "$$\n", + "\n", + "In the context of GNC, the robust cost function is gradually transformed from a convex approximation to the original non-convex form. This transformation is controlled by a parameter $\\mu$, which is adjusted during the optimization process:\n", + "\n", + "$$\n", + "\\rho_\\mu(e) = \\frac{1}{\\mu} \\rho(\\mu e)\n", + "$$\n", + "\n", + "As $\\mu$ increases, the function $\\rho_\\mu(e)$ transitions from a convex to a non-convex shape, allowing the optimizer to handle outliers effectively.\n", + "\n", + "## Usage Considerations\n", + "\n", + "- **Outlier Rejection**: The `GncOptimizer` is particularly effective in scenarios with significant outlier presence, such as SLAM or bundle adjustment problems.\n", + "- **Parameter Tuning**: Proper tuning of the GNC parameters is crucial for achieving optimal performance. Users should experiment with different settings to find the best configuration for their specific problem.\n", + "\n", + "This high-level overview provides a starting point for understanding and utilizing the `GncOptimizer` class in GTSAM. For detailed implementation and advanced usage, users should refer to the source code and additional GTSAM documentation." + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/doc/generating/output/ISAM2.ipynb b/doc/generating/output/ISAM2.ipynb new file mode 100644 index 000000000..0b4ae227d --- /dev/null +++ b/doc/generating/output/ISAM2.ipynb @@ -0,0 +1,15 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "7f0a9feb", + "metadata": {}, + "source": [ + "I'm unable to directly access or search the content of the uploaded file. However, if you can provide the text or key excerpts from the file, I can help generate the documentation based on that information." + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/doc/generating/output/LevenbergMarquardtOptimizer.ipynb b/doc/generating/output/LevenbergMarquardtOptimizer.ipynb new file mode 100644 index 000000000..2fa5a867c --- /dev/null +++ b/doc/generating/output/LevenbergMarquardtOptimizer.ipynb @@ -0,0 +1,73 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "29642bb2", + "metadata": {}, + "source": [ + "# LevenbergMarquardtOptimizer Class Documentation\n", + "\n", + "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "\n", + "## Overview\n", + "\n", + "The `LevenbergMarquardtOptimizer` class in GTSAM is a specialized optimizer that implements the Levenberg-Marquardt algorithm. This algorithm is a popular choice for solving non-linear least squares problems, which are common in various applications such as computer vision, robotics, and machine learning.\n", + "\n", + "The Levenberg-Marquardt algorithm is an iterative technique that interpolates between the Gauss-Newton algorithm and the method of gradient descent. It is particularly useful for optimizing problems where the solution is expected to be near the initial guess.\n", + "\n", + "## Key Features\n", + "\n", + "- **Non-linear Optimization**: The class is designed to handle non-linear optimization problems efficiently.\n", + "- **Damping Mechanism**: It incorporates a damping parameter to control the step size, balancing between the Gauss-Newton and gradient descent methods.\n", + "- **Iterative Improvement**: The optimizer iteratively refines the solution, reducing the error at each step.\n", + "\n", + "## Mathematical Formulation\n", + "\n", + "The Levenberg-Marquardt algorithm seeks to minimize a cost function $F(x)$ of the form:\n", + "\n", + "$$\n", + "F(x) = \\frac{1}{2} \\sum_{i=1}^{m} r_i(x)^2\n", + "$$\n", + "\n", + "where $r_i(x)$ are the residuals. The update rule for the algorithm is given by:\n", + "\n", + "$$\n", + "x_{k+1} = x_k - (J^T J + \\lambda I)^{-1} J^T r\n", + "$$\n", + "\n", + "Here, $J$ is the Jacobian matrix of the residuals, $\\lambda$ is the damping parameter, and $I$ is the identity matrix.\n", + "\n", + "## Key Methods\n", + "\n", + "### Initialization\n", + "\n", + "- **Constructor**: Initializes the optimizer with the given parameters and initial values.\n", + "\n", + "### Optimization\n", + "\n", + "- **optimize**: Executes the optimization process, iteratively updating the solution to minimize the cost function.\n", + "\n", + "### Parameter Control\n", + "\n", + "- **setLambda**: Sets the damping parameter $\\lambda$, which influences the convergence behavior.\n", + "- **getLambda**: Retrieves the current value of the damping parameter.\n", + "\n", + "### Convergence and Termination\n", + "\n", + "- **checkConvergence**: Evaluates whether the optimization process has converged based on predefined criteria.\n", + "- **terminate**: Stops the optimization process when certain conditions are met.\n", + "\n", + "## Usage Notes\n", + "\n", + "- The choice of the initial guess can significantly affect the convergence speed and the quality of the solution.\n", + "- Proper tuning of the damping parameter $\\lambda$ is crucial for balancing the convergence rate and stability.\n", + "- The optimizer is most effective when the residuals are approximately linear near the solution.\n", + "\n", + "This class is a powerful tool for tackling complex optimization problems where traditional linear methods fall short. By leveraging the strengths of both Gauss-Newton and gradient descent, the `LevenbergMarquardtOptimizer` provides a robust framework for achieving accurate solutions in non-linear least squares problems." + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/doc/generating/output/LinearContainerFactor.ipynb b/doc/generating/output/LinearContainerFactor.ipynb new file mode 100644 index 000000000..edb00e936 --- /dev/null +++ b/doc/generating/output/LinearContainerFactor.ipynb @@ -0,0 +1,65 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "f4c73cc1", + "metadata": {}, + "source": [ + "# LinearContainerFactor Class Documentation\n", + "\n", + "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "\n", + "## Overview\n", + "\n", + "The `LinearContainerFactor` class in GTSAM is a specialized factor that encapsulates a linear factor within a nonlinear factor graph. This class allows for the seamless integration of linear factors into a nonlinear optimization problem, providing flexibility in problem modeling and solution.\n", + "\n", + "## Key Features\n", + "\n", + "- **Encapsulation of Linear Factors**: The primary function of the `LinearContainerFactor` is to store a linear factor and its associated values, enabling it to be used within a nonlinear context.\n", + "- **Error Calculation**: It provides mechanisms to compute the error of the factor given a set of values.\n", + "- **Jacobian Computation**: The class can compute the Jacobian matrix, which is essential for optimization processes.\n", + "\n", + "## Key Methods\n", + "\n", + "### Constructor\n", + "\n", + "- **LinearContainerFactor**: This constructor initializes the `LinearContainerFactor` with a linear factor and optionally with values. It serves as the entry point for creating an instance of this class.\n", + "\n", + "### Error Evaluation\n", + "\n", + "- **error**: This method calculates the error of the factor given a set of values. The error is typically defined as the difference between the predicted and observed measurements, and it plays a crucial role in optimization.\n", + "\n", + "### Jacobian Computation\n", + "\n", + "- **linearize**: This method computes the Jacobian matrix of the factor. The Jacobian is a matrix of partial derivatives that describes how the error changes with respect to changes in the variables. It is a critical component in gradient-based optimization algorithms.\n", + "\n", + "### Accessors\n", + "\n", + "- **keys**: This method returns the keys associated with the factor. Keys are identifiers for the variables involved in the factor, and they are essential for understanding the structure of the factor graph.\n", + "\n", + "### Utility Methods\n", + "\n", + "- **equals**: This method checks for equality between two `LinearContainerFactor` instances. It is useful for testing and validation purposes.\n", + "\n", + "## Mathematical Background\n", + "\n", + "The `LinearContainerFactor` operates within the context of factor graphs, where the goal is to minimize the total error across all factors. The error for a linear factor can be expressed as:\n", + "\n", + "$$ e(x) = A \\cdot x - b $$\n", + "\n", + "where $A$ is the coefficient matrix, $x$ is the vector of variables, and $b$ is the measurement vector. The optimization process aims to find the values of $x$ that minimize the sum of squared errors:\n", + "\n", + "$$ \\text{minimize} \\quad \\sum e(x)^T \\cdot e(x) $$\n", + "\n", + "The Jacobian matrix, which is derived from the linearization of the error function, is crucial for iterative optimization techniques such as Gauss-Newton or Levenberg-Marquardt.\n", + "\n", + "## Conclusion\n", + "\n", + "The `LinearContainerFactor` class is a powerful tool in GTSAM for integrating linear factors into nonlinear optimization problems. By providing mechanisms for error evaluation and Jacobian computation, it facilitates the efficient solution of complex estimation problems in robotics and computer vision." + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/doc/generating/output/NonlinearConjugateGradientOptimizer.ipynb b/doc/generating/output/NonlinearConjugateGradientOptimizer.ipynb new file mode 100644 index 000000000..f8dbdf8fe --- /dev/null +++ b/doc/generating/output/NonlinearConjugateGradientOptimizer.ipynb @@ -0,0 +1,66 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "48970ca0", + "metadata": {}, + "source": [ + "# NonlinearConjugateGradientOptimizer Class Documentation\n", + "\n", + "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "\n", + "## Overview\n", + "\n", + "The `NonlinearConjugateGradientOptimizer` class in GTSAM is an implementation of the nonlinear conjugate gradient method for optimizing nonlinear functions. This optimizer is particularly useful for solving large-scale optimization problems where the Hessian matrix is not easily computed or stored. The conjugate gradient method is an iterative algorithm that seeks to find the minimum of a function by following a series of conjugate directions.\n", + "\n", + "## Key Features\n", + "\n", + "- **Optimization Method**: Implements the nonlinear conjugate gradient method, which is an extension of the linear conjugate gradient method to nonlinear optimization problems.\n", + "- **Efficiency**: Suitable for large-scale problems due to its iterative nature and reduced memory requirements compared to methods that require the Hessian matrix.\n", + "- **Flexibility**: Can be used with various line search strategies and conjugate gradient update formulas.\n", + "\n", + "## Main Methods\n", + "\n", + "### Constructor\n", + "\n", + "- **NonlinearConjugateGradientOptimizer**: Initializes the optimizer with a given nonlinear factor graph and initial values. The user can specify optimization parameters, including the choice of line search method and conjugate gradient update formula.\n", + "\n", + "### Optimization\n", + "\n", + "- **optimize**: Executes the optimization process. This method iteratively updates the solution by computing search directions and performing line searches to minimize the objective function along these directions.\n", + "\n", + "### Accessors\n", + "\n", + "- **error**: Returns the current error value of the objective function. This is useful for monitoring the convergence of the optimization process.\n", + "- **values**: Retrieves the current estimate of the optimized variables. This allows users to access the solution at any point during the optimization.\n", + "\n", + "## Mathematical Background\n", + "\n", + "The nonlinear conjugate gradient method seeks to minimize a nonlinear function $f(x)$ by iteratively updating the solution $x_k$ according to:\n", + "\n", + "$$ x_{k+1} = x_k + \\alpha_k p_k $$\n", + "\n", + "where $p_k$ is the search direction and $\\alpha_k$ is the step size determined by a line search. The search direction $p_k$ is computed using the gradient of the function and a conjugate gradient update formula, such as the Fletcher-Reeves or Polak-Ribiere formulas:\n", + "\n", + "- **Fletcher-Reeves**: \n", + " $$ \\beta_k^{FR} = \\frac{\\nabla f(x_{k+1})^T \\nabla f(x_{k+1})}{\\nabla f(x_k)^T \\nabla f(x_k)} $$\n", + " \n", + "- **Polak-Ribiere**: \n", + " $$ \\beta_k^{PR} = \\frac{\\nabla f(x_{k+1})^T (\\nabla f(x_{k+1}) - \\nabla f(x_k))}{\\nabla f(x_k)^T \\nabla f(x_k)} $$\n", + "\n", + "The choice of $\\beta_k$ affects the convergence properties of the algorithm.\n", + "\n", + "## Usage Notes\n", + "\n", + "- The `NonlinearConjugateGradientOptimizer` is most effective when the problem size is large and the computation of the Hessian is impractical.\n", + "- Users should choose an appropriate line search method and conjugate gradient update formula based on the specific characteristics of their optimization problem.\n", + "- Monitoring the error and values during optimization can provide insights into the convergence behavior and help diagnose potential issues.\n", + "\n", + "This class provides a robust framework for solving complex nonlinear optimization problems efficiently, leveraging the power of the conjugate gradient method." + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/doc/generating/output/NonlinearFactor.ipynb b/doc/generating/output/NonlinearFactor.ipynb new file mode 100644 index 000000000..7ab33cbc9 --- /dev/null +++ b/doc/generating/output/NonlinearFactor.ipynb @@ -0,0 +1,15 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "37ed0b18", + "metadata": {}, + "source": [ + "It seems there was an issue with accessing the file content directly. Could you please provide the content of the file or any specific details you would like to be documented?" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/doc/generating/output/NonlinearFactorGraph.ipynb b/doc/generating/output/NonlinearFactorGraph.ipynb new file mode 100644 index 000000000..643118c47 --- /dev/null +++ b/doc/generating/output/NonlinearFactorGraph.ipynb @@ -0,0 +1,66 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "a58d890a", + "metadata": {}, + "source": [ + "# NonlinearFactorGraph Class Documentation\n", + "\n", + "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "\n", + "## Overview\n", + "\n", + "The `NonlinearFactorGraph` class in GTSAM is a key component for representing and solving nonlinear factor graphs. A factor graph is a bipartite graph that represents the factorization of a function, commonly used in probabilistic graphical models. In the context of GTSAM, it is used to represent the structure of optimization problems, particularly in the domain of simultaneous localization and mapping (SLAM) and structure from motion (SfM).\n", + "\n", + "## Key Functionalities\n", + "\n", + "### Construction and Initialization\n", + "\n", + "- **Constructor**: The class provides a default constructor to initialize an empty nonlinear factor graph.\n", + "\n", + "### Factor Management\n", + "\n", + "- **add**: This method allows adding a new factor to the graph. Factors represent constraints or measurements in the optimization problem.\n", + "- **reserve**: Pre-allocates space for a specified number of factors, optimizing memory usage when the number of factors is known in advance.\n", + "\n", + "### Graph Operations\n", + "\n", + "- **resize**: Adjusts the size of the factor graph, which can be useful when dynamically modifying the graph structure.\n", + "- **remove**: Removes a factor from the graph, identified by its index.\n", + "\n", + "### Querying and Access\n", + "\n", + "- **size**: Returns the number of factors currently in the graph.\n", + "- **empty**: Checks if the graph contains any factors.\n", + "- **at**: Accesses a specific factor by its index.\n", + "- **back**: Retrieves the last factor in the graph.\n", + "- **front**: Retrieves the first factor in the graph.\n", + "\n", + "### Optimization and Linearization\n", + "\n", + "- **linearize**: Converts the nonlinear factor graph into a linear factor graph at a given linearization point. This is a crucial step in iterative optimization algorithms like Gauss-Newton or Levenberg-Marquardt.\n", + " \n", + " The linearization process involves computing the Jacobian matrices of the nonlinear functions, resulting in a linear approximation:\n", + " \n", + " $$ f(x) \\approx f(x_0) + J(x - x_0) $$\n", + " \n", + " where $J$ is the Jacobian matrix evaluated at the point $x_0$.\n", + "\n", + "### Utilities\n", + "\n", + "- **equals**: Compares two nonlinear factor graphs for equality, considering both the structure and the factors themselves.\n", + "- **clone**: Creates a deep copy of the factor graph, including all its factors.\n", + "\n", + "## Usage Notes\n", + "\n", + "The `NonlinearFactorGraph` class is designed to be flexible and efficient, allowing users to construct complex optimization problems by adding and managing factors. It integrates seamlessly with GTSAM's optimization algorithms, enabling robust solutions to large-scale nonlinear problems.\n", + "\n", + "For effective use, it is important to understand the nature of the factors being added and the implications of linearization on the optimization process. The class provides a robust interface for managing the lifecycle of a factor graph, from construction through to optimization and solution extraction." + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/doc/generating/output/NonlinearISAM.ipynb b/doc/generating/output/NonlinearISAM.ipynb new file mode 100644 index 000000000..83f92ddfd --- /dev/null +++ b/doc/generating/output/NonlinearISAM.ipynb @@ -0,0 +1,15 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "e31da023", + "metadata": {}, + "source": [ + "It seems there is an issue with accessing the file directly. However, I can guide you on how to document the class if you can provide the class definition and its key methods. You can paste the relevant parts of the file here, and I'll help you create the Markdown documentation." + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/doc/generating/output/NonlinearOptimizer.ipynb b/doc/generating/output/NonlinearOptimizer.ipynb new file mode 100644 index 000000000..17057df52 --- /dev/null +++ b/doc/generating/output/NonlinearOptimizer.ipynb @@ -0,0 +1,66 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "2e4812da", + "metadata": {}, + "source": [ + "# NonlinearOptimizer Class Documentation\n", + "\n", + "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "\n", + "## Overview\n", + "\n", + "The `NonlinearOptimizer` class in GTSAM is a foundational component for solving nonlinear optimization problems. It provides a flexible interface for optimizing nonlinear factor graphs, which are commonly used in robotics and computer vision applications.\n", + "\n", + "The primary purpose of the `NonlinearOptimizer` is to iteratively refine an initial estimate of a solution to minimize a nonlinear cost function. This class serves as a base class for specific optimization algorithms like Gauss-Newton, Levenberg-Marquardt, and Dogleg.\n", + "\n", + "## Key Methods\n", + "\n", + "### `optimize()`\n", + "The `optimize()` method is the core function of the `NonlinearOptimizer` class. It performs the optimization process, iteratively updating the estimate to converge to a local minimum of the cost function.\n", + "\n", + "### `error()`\n", + "The `error()` method computes the total error of the current estimate. This is typically the sum of squared errors for all factors in the graph. Mathematically, the error can be expressed as:\n", + "\n", + "$$\n", + "E(x) = \\sum_{i} \\| f_i(x) \\|^2\n", + "$$\n", + "\n", + "where $f_i(x)$ represents the residual error of the $i$-th factor.\n", + "\n", + "### `values()`\n", + "The `values()` method returns the current set of variable estimates. These estimates are updated during the optimization process.\n", + "\n", + "### `iterations()`\n", + "The `iterations()` method provides the number of iterations performed during the optimization process. This can be useful for analyzing the convergence behavior of the optimizer.\n", + "\n", + "### `params()`\n", + "The `params()` method returns the parameters used by the optimizer. These parameters can include settings like convergence thresholds, maximum iterations, and other algorithm-specific options.\n", + "\n", + "## Usage\n", + "\n", + "The `NonlinearOptimizer` class is typically not used directly. Instead, one of its derived classes, such as `GaussNewtonOptimizer`, `LevenbergMarquardtOptimizer`, or `DoglegOptimizer`, is used to perform specific types of optimization. These derived classes implement the `optimize()` method according to their respective algorithms.\n", + "\n", + "## Mathematical Foundations\n", + "\n", + "The optimization process in `NonlinearOptimizer` is based on iterative methods that solve for the minimum of a nonlinear cost function. The general approach involves linearizing the nonlinear problem at the current estimate and solving the resulting linear system to update the estimate. This process is repeated until convergence criteria are met.\n", + "\n", + "The optimization problem can be formally defined as:\n", + "\n", + "$$\n", + "\\min_{x} \\sum_{i} \\| f_i(x) \\|^2\n", + "$$\n", + "\n", + "where $x$ is the vector of variables to be optimized, and $f_i(x)$ are the residuals of the factors in the graph.\n", + "\n", + "## Conclusion\n", + "\n", + "The `NonlinearOptimizer` class is a crucial component in GTSAM for solving nonlinear optimization problems. By providing a common interface and shared functionality, it enables the implementation of various optimization algorithms tailored to specific problem requirements. Understanding the key methods and their roles is essential for effectively utilizing this class in practical applications." + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/doc/generating/output/PriorFactor.ipynb b/doc/generating/output/PriorFactor.ipynb new file mode 100644 index 000000000..d485cd2aa --- /dev/null +++ b/doc/generating/output/PriorFactor.ipynb @@ -0,0 +1,55 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ec35011c", + "metadata": {}, + "source": [ + "# GTSAM PriorFactor Class Documentation\n", + "\n", + "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "\n", + "## Overview\n", + "\n", + "The `PriorFactor` class in GTSAM is a specialized factor used in probabilistic graphical models, particularly within the context of nonlinear optimization and estimation problems. It represents a prior belief about a variable in the form of a Gaussian distribution. This class is crucial for incorporating prior knowledge into the optimization process, which can significantly enhance the accuracy and robustness of the solutions.\n", + "\n", + "## Key Functionalities\n", + "\n", + "### PriorFactor Construction\n", + "\n", + "The `PriorFactor` is constructed by specifying a key, a prior value, and a noise model. The key identifies the variable in the factor graph, the prior value represents the expected value of the variable, and the noise model encapsulates the uncertainty associated with this prior belief.\n", + "\n", + "### Error Calculation\n", + "\n", + "The primary role of the `PriorFactor` is to compute the error between the estimated value of a variable and its prior. This error is typically defined as:\n", + "\n", + "$$\n", + "e(x) = x - \\mu\n", + "$$\n", + "\n", + "where $x$ is the estimated value, and $\\mu$ is the prior mean. The error is then weighted by the noise model to form the contribution of this factor to the overall objective function.\n", + "\n", + "### Jacobian Computation\n", + "\n", + "In the context of optimization, the `PriorFactor` provides methods to compute the Jacobian of the error function. This is essential for gradient-based optimization algorithms, which rely on derivatives to iteratively improve the solution.\n", + "\n", + "### Contribution to Factor Graph\n", + "\n", + "The `PriorFactor` contributes to the factor graph by adding a term to the objective function that penalizes deviations from the prior. This term is integrated into the overall optimization problem, ensuring that the solution respects the prior knowledge encoded by the factor.\n", + "\n", + "## Usage Considerations\n", + "\n", + "- **Noise Model**: The choice of noise model is critical as it determines how strongly the prior is enforced. A tighter noise model implies a stronger belief in the prior.\n", + "- **Integration with Other Factors**: The `PriorFactor` is typically used in conjunction with other factors that model the system dynamics and measurements. It helps anchor the solution, especially in scenarios with limited or noisy measurements.\n", + "- **Applications**: Common applications include SLAM (Simultaneous Localization and Mapping), where priors on initial poses or landmarks can significantly improve map accuracy and convergence speed.\n", + "\n", + "## Conclusion\n", + "\n", + "The `PriorFactor` class is a fundamental component in GTSAM for incorporating prior information into the factor graph framework. By understanding its construction, error computation, and integration into the optimization process, users can effectively leverage prior knowledge to enhance their estimation and optimization tasks." + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/doc/generating/output/WhiteNoiseFactor.ipynb b/doc/generating/output/WhiteNoiseFactor.ipynb new file mode 100644 index 000000000..b34b170b3 --- /dev/null +++ b/doc/generating/output/WhiteNoiseFactor.ipynb @@ -0,0 +1,66 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "5a0c879e", + "metadata": {}, + "source": [ + "# WhiteNoiseFactor Class Documentation\n", + "\n", + "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "\n", + "## Overview\n", + "\n", + "The `WhiteNoiseFactor` class in GTSAM is a specialized factor used in nonlinear optimization problems, particularly in the context of probabilistic graphical models. This class models the effect of white noise on a measurement, which is a common assumption in many estimation problems. The primary purpose of this class is to incorporate the uncertainty due to white noise into the optimization process.\n", + "\n", + "## Key Functionalities\n", + "\n", + "### Noise Modeling\n", + "\n", + "- **White Noise Assumption**: The class assumes that the noise affecting the measurements is Gaussian and uncorrelated, which is often referred to as \"white noise\". This assumption simplifies the mathematical treatment of noise in the optimization problem.\n", + "\n", + "### Factor Operations\n", + "\n", + "- **Error Calculation**: The `WhiteNoiseFactor` computes the error between the predicted and observed measurements, incorporating the noise model. This error is crucial for the optimization process as it influences the adjustment of variables to minimize the overall error in the system.\n", + "\n", + "- **Jacobian Computation**: The class provides methods to compute the Jacobian of the error function with respect to the variables involved. The Jacobian is essential for gradient-based optimization techniques, as it provides the necessary derivatives to guide the optimization algorithm.\n", + "\n", + "### Mathematical Formulation\n", + "\n", + "The error function for a `WhiteNoiseFactor` can be represented as:\n", + "\n", + "$$ e(x) = h(x) - z $$\n", + "\n", + "where:\n", + "- $e(x)$ is the error function.\n", + "- $h(x)$ is the predicted measurement based on the current estimate of the variables.\n", + "- $z$ is the observed measurement.\n", + "\n", + "The noise is assumed to be Gaussian with zero mean and a certain covariance, which is often represented as:\n", + "\n", + "$$ \\text{Cov}(e) = \\sigma^2 I $$\n", + "\n", + "where $\\sigma^2$ is the variance of the noise and $I$ is the identity matrix.\n", + "\n", + "### Optimization Integration\n", + "\n", + "- **Factor Graphs**: The `WhiteNoiseFactor` is integrated into factor graphs, which are a key structure in GTSAM for representing and solving large-scale estimation problems. Each factor in the graph contributes to the overall error that the optimization process seeks to minimize.\n", + "\n", + "- **Nonlinear Optimization**: The class is designed to work seamlessly with GTSAM's nonlinear optimization framework, allowing it to handle complex, real-world estimation problems that involve non-linear relationships between variables.\n", + "\n", + "## Usage Notes\n", + "\n", + "- **Assumptions**: Users should ensure that the white noise assumption is valid for their specific application, as deviations from this assumption can lead to suboptimal estimation results.\n", + "\n", + "- **Integration**: The `WhiteNoiseFactor` should be used in conjunction with other factors and variables in a factor graph to effectively model the entire system being estimated.\n", + "\n", + "- **Performance**: The efficiency of the optimization process can be influenced by the choice of noise model and the structure of the factor graph. Proper tuning and validation are recommended to achieve optimal performance.\n", + "\n", + "In summary, the `WhiteNoiseFactor` class is a powerful tool in GTSAM for modeling and mitigating the effects of white noise in nonlinear estimation problems. Its integration into factor graphs and compatibility with GTSAM's optimization algorithms make it a versatile component for a wide range of applications." + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} From 0715411101d3efde292b22947adc29d8b2abdcad Mon Sep 17 00:00:00 2001 From: p-zach Date: Thu, 3 Apr 2025 17:00:44 -0400 Subject: [PATCH 03/21] Usage example for script --- doc/generating/gpt_generate.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/doc/generating/gpt_generate.py b/doc/generating/gpt_generate.py index 3fae00978..98bc5165d 100644 --- a/doc/generating/gpt_generate.py +++ b/doc/generating/gpt_generate.py @@ -95,7 +95,9 @@ if __name__ == "__main__": parser.add_argument( "file_paths", nargs='+', - help="The paths to the header files from the root gtsam directory.") + help= + "The paths to the header files from the root gtsam directory, e.g. 'gtsam/geometry/Pose3.h'." + ) args = parser.parse_args() # Retrieves API key from environment variable OPENAI_API_KEY From b873550f77fd2c686be0fdba43193a6badb956f3 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sat, 5 Apr 2025 11:27:48 -0400 Subject: [PATCH 04/21] Added comments --- doc/generating/gpt_generate.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/doc/generating/gpt_generate.py b/doc/generating/gpt_generate.py index 98bc5165d..ef5bf7c0f 100644 --- a/doc/generating/gpt_generate.py +++ b/doc/generating/gpt_generate.py @@ -1,3 +1,33 @@ +""" +GTSAM Copyright 2010-2025, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Author: Porter Zach + +This script generates interactive Python notebooks (.ipynb) that document GTSAM +header files. It retrieves the header file content from the GTSAM GitHub repository, +sends it to OpenAI's API for processing, and saves the generated documentation +as a Jupyter notebook. + +Functions: + is_url_valid(url: str) -> bool: + Verifies that the supplied URL does not return a 404. + + save_ipynb(text: str, file_path: str) -> str: + Saves the provided text to a single Markdown cell in a new .ipynb file. + + generate_ipynb(file_path: str, openai_client): + Generates an interactive Python notebook for the given GTSAM header file + by sending a request to OpenAI's API and saving the response. + +Usage: + Run the script with paths to GTSAM header files as arguments. For example: + python gpt_generate.py gtsam/geometry/Pose3.h +""" + import os import time import requests From 007877426685841dc51ea02966450b197fb35f83 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sat, 5 Apr 2025 12:02:02 -0400 Subject: [PATCH 05/21] Moved BatchFixedLagSmoother in right spot --- .../output/BatchFixedLagSmoother.ipynb | 54 ------------- gtsam/nonlinear/BatchFixedLagSmoother.h | 13 +++- .../nonlinear/doc/BatchFixedLagSmoother.ipynb | 78 +++++++++++++++++++ gtsam/nonlinear/nonlinear.md | 21 +++++ myst.yml | 3 + 5 files changed, 113 insertions(+), 56 deletions(-) delete mode 100644 doc/generating/output/BatchFixedLagSmoother.ipynb create mode 100644 gtsam/nonlinear/doc/BatchFixedLagSmoother.ipynb create mode 100644 gtsam/nonlinear/nonlinear.md diff --git a/doc/generating/output/BatchFixedLagSmoother.ipynb b/doc/generating/output/BatchFixedLagSmoother.ipynb deleted file mode 100644 index 079489abf..000000000 --- a/doc/generating/output/BatchFixedLagSmoother.ipynb +++ /dev/null @@ -1,54 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "283174f8", - "metadata": {}, - "source": [ - "# BatchFixedLagSmoother Class Documentation\n", - "\n", - "*Disclaimer: This documentation was generated by AI and requires human revision for accuracy and completeness.*\n", - "\n", - "## Overview\n", - "\n", - "The `BatchFixedLagSmoother` class in GTSAM is a specialized smoother designed for fixed-lag smoothing in nonlinear factor graphs. It extends the capabilities of fixed-lag smoothing by maintaining a sliding window of the most recent variables and marginalizing out older variables. This is particularly useful in real-time applications where memory and computational efficiency are critical.\n", - "\n", - "## Key Functionalities\n", - "\n", - "### Smoothing and Optimization\n", - "\n", - "- **update**: This method is the core of the `BatchFixedLagSmoother`. It processes new factors and variables, updating the current estimate of the state. The update method also manages the marginalization of variables that fall outside the fixed lag window.\n", - "\n", - "### Factor Graph Management\n", - "\n", - "- **marginalize**: This function handles the marginalization of variables that are no longer within the fixed lag window. Marginalization is a crucial step in maintaining the size of the factor graph, ensuring that only relevant variables are kept for optimization.\n", - "\n", - "### Parameter Management\n", - "\n", - "- **Params**: The `Params` structure within the class allows users to configure various settings for the smoother, such as the lag duration and optimization parameters. This provides flexibility in tuning the smoother for specific applications.\n", - "\n", - "## Mathematical Formulation\n", - "\n", - "The `BatchFixedLagSmoother` operates on the principle of fixed-lag smoothing, where the objective is to estimate the state $\\mathbf{x}_t$ given all measurements up to time $t$, but only retaining a fixed window of recent states. The optimization problem can be expressed as:\n", - "\n", - "$$\n", - "\\min_{\\mathbf{x}_{t-L:t}} \\sum_{i=1}^{N} \\| \\mathbf{h}_i(\\mathbf{x}_{t-L:t}) - \\mathbf{z}_i \\|^2\n", - "$$\n", - "\n", - "where $L$ is the fixed lag, $\\mathbf{h}_i$ are the measurement functions, and $\\mathbf{z}_i$ are the measurements.\n", - "\n", - "## Usage Considerations\n", - "\n", - "- **Real-time Applications**: The `BatchFixedLagSmoother` is ideal for applications requiring real-time processing, such as robotics and autonomous vehicles, where the computational burden must be managed efficiently.\n", - "- **Configuration**: Proper configuration of the lag duration and optimization parameters is essential for optimal performance. Users should experiment with different settings to achieve the desired balance between accuracy and computational load.\n", - "\n", - "## Conclusion\n", - "\n", - "The `BatchFixedLagSmoother` class provides a robust framework for fixed-lag smoothing in nonlinear systems. Its ability to efficiently manage the factor graph and perform real-time updates makes it a valuable tool in various applications requiring dynamic state estimation." - ] - } - ], - "metadata": {}, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/gtsam/nonlinear/BatchFixedLagSmoother.h b/gtsam/nonlinear/BatchFixedLagSmoother.h index 8dde520a7..f4d06bd24 100644 --- a/gtsam/nonlinear/BatchFixedLagSmoother.h +++ b/gtsam/nonlinear/BatchFixedLagSmoother.h @@ -33,9 +33,18 @@ public: /// Typedef for a shared pointer to an Incremental Fixed-Lag Smoother typedef std::shared_ptr shared_ptr; - /** default constructor */ + /** + * Construct with parameters + * + * @param smootherLag The length of the smoother lag. Any variable older than this amount will be marginalized out. + * @param parameters The L-M optimization parameters + * @param enforceConsistency A flag indicating if the optimizer should enforce probabilistic consistency by maintaining the + * linearization point of all variables involved in linearized/marginal factors at the edge of the + * smoothing window. + */ BatchFixedLagSmoother(double smootherLag = 0.0, const LevenbergMarquardtParams& parameters = LevenbergMarquardtParams(), bool enforceConsistency = true) : - FixedLagSmoother(smootherLag), parameters_(parameters), enforceConsistency_(enforceConsistency) { } + FixedLagSmoother(smootherLag), parameters_(parameters), enforceConsistency_(enforceConsistency) { + } /** destructor */ ~BatchFixedLagSmoother() override {} diff --git a/gtsam/nonlinear/doc/BatchFixedLagSmoother.ipynb b/gtsam/nonlinear/doc/BatchFixedLagSmoother.ipynb new file mode 100644 index 000000000..a698bd430 --- /dev/null +++ b/gtsam/nonlinear/doc/BatchFixedLagSmoother.ipynb @@ -0,0 +1,78 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "283174f8", + "metadata": {}, + "source": [ + "# BatchFixedLagSmoother\n", + "\n", + "## Overview\n", + "\n", + "The `BatchFixedLagSmoother` class in GTSAM is designed for fixed-lag smoothing in nonlinear factor graphs. It maintains a sliding window of the most recent variables and marginalizes out older variables. This is particularly useful in real-time applications where memory and computational efficiency are critical.\n", + "\n", + "This fixed lag smoother will **batch-optimize** at every iteration, but warm-started from the last estimate." + ] + }, + { + "cell_type": "markdown", + "id": "42c80522", + "metadata": {}, + "source": [ + "## Mathematical Formulation\n", + "\n", + "The `BatchFixedLagSmoother` operates on the principle of fixed-lag smoothing, where the objective is to estimate the state $\\mathbf{x}_t$ given all measurements up to time $t$, but only retaining a fixed window of recent states. The optimization problem can be expressed as:\n", + "$$\n", + "\\min_{\\mathbf{x}_{t-L:t}} \\sum_{i=1}^{N} \\| \\mathbf{h}_i(\\mathbf{x}_{t-L:t}) - \\mathbf{z}_i \\|^2\n", + "$$\n", + "where $L$ is the fixed lag, $\\mathbf{h}_i$ are the measurement functions, and $\\mathbf{z}_i$ are the measurements.\n", + "In practice, the functions $\\mathbf{h}_i$ depend only on a subset of the state variables $\\mathbf{X}_i$, and the optimization is performed over a set of $N$ *factors* $\\phi_i$ instead:\n", + "$$\n", + "\\min_{\\mathbf{x}_{t-L:t}} \\sum_{i=1}^{N} \\| \\phi_i(\\mathbf{X}_i; \\mathbf{z}_i) \\|^2\n", + "$$\n", + "The API below allows the user to add new factors at every iteration, which will be automatically pruned after they no longer depend on any variables in the lag." + ] + }, + { + "cell_type": "markdown", + "id": "92b4f851", + "metadata": {}, + "source": [ + "## API\n", + "\n", + "### Constructor\n", + "\n", + "You construct a `BatchFixedLagSmoother` object with the following parameters:\n", + "\n", + "- **smootherLag**: The length of the smoother lag. Any variable older than this amount will be marginalized out. *(Default: 0.0)*\n", + "- **parameters**: The Levenberg-Marquardt optimization parameters. *(Default: `LevenbergMarquardtParams()`)* \n", + "- **enforceConsistency**: A flag indicating whether the optimizer should enforce probabilistic consistency by maintaining the linearization point of all variables involved in linearized/marginal factors at the edge of the smoothing window. *(Default: `true`)*\n", + "\n", + "### Smoothing and Optimization\n", + "\n", + "- **update**: This method is the core of the `BatchFixedLagSmoother`. It processes new factors and variables, updating the current estimate of the state. The update method also manages the marginalization of variables that fall outside the fixed lag window.\n", + "\n", + "### Computational Considerations\n", + "\n", + "Every call to `update` triggers a batch LM optimization: use the parameters to control the convergence thresholds to bound computation to fit within your application." + ] + }, + { + "cell_type": "markdown", + "id": "4a2bdd3e", + "metadata": {}, + "source": [ + "## Internals\n", + "\n", + "- **marginalize**: This function handles the marginalization of variables that are no longer within the fixed lag window. Marginalization is a crucial step in maintaining the size of the factor graph, ensuring that only relevant variables are kept for optimization.\n" + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/gtsam/nonlinear/nonlinear.md b/gtsam/nonlinear/nonlinear.md new file mode 100644 index 000000000..f334a3eac --- /dev/null +++ b/gtsam/nonlinear/nonlinear.md @@ -0,0 +1,21 @@ +# Nonlinear + +The `nonlinear` module in GTSAM focuses on solving nonlinear optimization problems using factor graphs and incremental solvers. + +Key Concepts: +- **Nonlinear Factors**: Represent constraints or measurements in a nonlinear optimization problem. +- **Factor Graphs**: A graphical representation of the optimization problem. +- **Nonlinear Solvers**: Various optimization methods, typically calling linear solves in `linear`. + +## Basics +- **`NonlinearFactor.h`**: Defines the base classes for nonlinear factors, `NonlinearFactor` and `NoiseModelFactor`. +- **`NonlinearFactorGraph.h`**: Implements a factor graph consisting of nonlinear factors. +- **`Values.h`**: Stores variable assignments for optimization. + +## Optimizers: +- **`GaussNewtonOptimizer.h`**: Implements the Gauss-Newton optimization algorithm. +- **`LevenbergMarquardtOptimizer.h`**: Provides the Levenberg-Marquardt optimization algorithm. +- **`DoglegOptimizer.h`**: Implements the Dogleg optimization algorithm. + +## Incremental Optimizers: +- **`ISAM2.h`**: Implements the iSAM2 incremental solver. diff --git a/myst.yml b/myst.yml index aa7f4a9d2..7984b2d20 100644 --- a/myst.yml +++ b/myst.yml @@ -13,6 +13,9 @@ project: - file: ./gtsam/geometry/geometry.md children: - pattern: ./gtsam/geometry/doc/* + - file: ./gtsam/nonlinear/nonlinear.md + children: + - pattern: ./gtsam/nonlinear/doc/* site: nav: - title: Getting started From 1fa20f544e2acf593231ef2ea025d36a044e1cf6 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sat, 5 Apr 2025 12:05:17 -0400 Subject: [PATCH 06/21] Move optimizer docs to nonlinear --- .../output => gtsam/nonlinear/doc}/DoglegOptimizer.ipynb | 0 .../output => gtsam/nonlinear/doc}/GaussNewtonOptimizer.ipynb | 0 {doc/generating/output => gtsam/nonlinear/doc}/GncOptimizer.ipynb | 0 .../nonlinear/doc}/LevenbergMarquardtOptimizer.ipynb | 0 .../nonlinear/doc}/NonlinearConjugateGradientOptimizer.ipynb | 0 .../output => gtsam/nonlinear/doc}/NonlinearOptimizer.ipynb | 0 6 files changed, 0 insertions(+), 0 deletions(-) rename {doc/generating/output => gtsam/nonlinear/doc}/DoglegOptimizer.ipynb (100%) rename {doc/generating/output => gtsam/nonlinear/doc}/GaussNewtonOptimizer.ipynb (100%) rename {doc/generating/output => gtsam/nonlinear/doc}/GncOptimizer.ipynb (100%) rename {doc/generating/output => gtsam/nonlinear/doc}/LevenbergMarquardtOptimizer.ipynb (100%) rename {doc/generating/output => gtsam/nonlinear/doc}/NonlinearConjugateGradientOptimizer.ipynb (100%) rename {doc/generating/output => gtsam/nonlinear/doc}/NonlinearOptimizer.ipynb (100%) diff --git a/doc/generating/output/DoglegOptimizer.ipynb b/gtsam/nonlinear/doc/DoglegOptimizer.ipynb similarity index 100% rename from doc/generating/output/DoglegOptimizer.ipynb rename to gtsam/nonlinear/doc/DoglegOptimizer.ipynb diff --git a/doc/generating/output/GaussNewtonOptimizer.ipynb b/gtsam/nonlinear/doc/GaussNewtonOptimizer.ipynb similarity index 100% rename from doc/generating/output/GaussNewtonOptimizer.ipynb rename to gtsam/nonlinear/doc/GaussNewtonOptimizer.ipynb diff --git a/doc/generating/output/GncOptimizer.ipynb b/gtsam/nonlinear/doc/GncOptimizer.ipynb similarity index 100% rename from doc/generating/output/GncOptimizer.ipynb rename to gtsam/nonlinear/doc/GncOptimizer.ipynb diff --git a/doc/generating/output/LevenbergMarquardtOptimizer.ipynb b/gtsam/nonlinear/doc/LevenbergMarquardtOptimizer.ipynb similarity index 100% rename from doc/generating/output/LevenbergMarquardtOptimizer.ipynb rename to gtsam/nonlinear/doc/LevenbergMarquardtOptimizer.ipynb diff --git a/doc/generating/output/NonlinearConjugateGradientOptimizer.ipynb b/gtsam/nonlinear/doc/NonlinearConjugateGradientOptimizer.ipynb similarity index 100% rename from doc/generating/output/NonlinearConjugateGradientOptimizer.ipynb rename to gtsam/nonlinear/doc/NonlinearConjugateGradientOptimizer.ipynb diff --git a/doc/generating/output/NonlinearOptimizer.ipynb b/gtsam/nonlinear/doc/NonlinearOptimizer.ipynb similarity index 100% rename from doc/generating/output/NonlinearOptimizer.ipynb rename to gtsam/nonlinear/doc/NonlinearOptimizer.ipynb From f2745c47ef248bffaae97b49967e1d89cb75a06b Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sat, 5 Apr 2025 12:14:44 -0400 Subject: [PATCH 07/21] Better overview --- gtsam/nonlinear/nonlinear.md | 60 +++++++++++++++++++++++++++--------- 1 file changed, 45 insertions(+), 15 deletions(-) diff --git a/gtsam/nonlinear/nonlinear.md b/gtsam/nonlinear/nonlinear.md index f334a3eac..99a9907f7 100644 --- a/gtsam/nonlinear/nonlinear.md +++ b/gtsam/nonlinear/nonlinear.md @@ -1,21 +1,51 @@ # Nonlinear -The `nonlinear` module in GTSAM focuses on solving nonlinear optimization problems using factor graphs and incremental solvers. +The `nonlinear` module in GTSAM includes a comprehensive set of tools for nonlinear optimization using factor graphs. Here's an overview of key components organized by category: -Key Concepts: -- **Nonlinear Factors**: Represent constraints or measurements in a nonlinear optimization problem. -- **Factor Graphs**: A graphical representation of the optimization problem. -- **Nonlinear Solvers**: Various optimization methods, typically calling linear solves in `linear`. +## Core Classes -## Basics -- **`NonlinearFactor.h`**: Defines the base classes for nonlinear factors, `NonlinearFactor` and `NoiseModelFactor`. -- **`NonlinearFactorGraph.h`**: Implements a factor graph consisting of nonlinear factors. -- **`Values.h`**: Stores variable assignments for optimization. +- **[NonlinearFactorGraph](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/NonlinearFactorGraph.h)**: Represents the optimization problem as a graph of factors. +- **[NonlinearFactor](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/NonlinearFactor.h)**: Base class for all nonlinear factors. +- **[NoiseModelFactor](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/NoiseModelFactor.h)**: Base class for factors with noise models. +- **[Values](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/Values.h)**: Container for variable assignments used in optimization. -## Optimizers: -- **`GaussNewtonOptimizer.h`**: Implements the Gauss-Newton optimization algorithm. -- **`LevenbergMarquardtOptimizer.h`**: Provides the Levenberg-Marquardt optimization algorithm. -- **`DoglegOptimizer.h`**: Implements the Dogleg optimization algorithm. +## Batch Optimizers -## Incremental Optimizers: -- **`ISAM2.h`**: Implements the iSAM2 incremental solver. +- **[NonlinearOptimizer](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/NonlinearOptimizer.h)**: Base class for all batch optimizers. + - **[NonlinearOptimizerParams](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/NonlinearOptimizerParams.h)**: Base parameters class for all optimizers. + +- **[GaussNewtonOptimizer](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/GaussNewtonOptimizer.h)**: Implements Gauss-Newton optimization. + - **[GaussNewtonParams](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/GaussNewtonParams.h)**: Parameters for Gauss-Newton optimization. + +- **[LevenbergMarquardtOptimizer](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/LevenbergMarquardtOptimizer.h)**: Implements Levenberg-Marquardt optimization. + - **[LevenbergMarquardtParams](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/LevenbergMarquardtParams.h)**: Parameters for Levenberg-Marquardt optimization. + +- **[DoglegOptimizer](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/DoglegOptimizer.h)**: Implements Powell's Dogleg optimization. + - **[DoglegParams](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/DoglegParams.h)**: Parameters for Dogleg optimization. + +- **[GncOptimizer](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/GncOptimizer.h)**: Implements robust optimization using Graduated Non-Convexity. + - **[GncParams](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/GncParams.h)**: Parameters for Graduated Non-Convexity optimization. + +## Incremental Optimizers + +- **[ISAM2](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/ISAM2.h)**: Incremental Smoothing and Mapping 2, with fluid relinearization. + - **[ISAM2Params](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/ISAM2Params.h)**: Parameters controlling the ISAM2 algorithm. + - **[ISAM2Result](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/ISAM2Result.h)**: Results from ISAM2 update operations. +- **[NonlinearISAM](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/NonlinearISAM.h)**: Original iSAM implementation (mostly superseded by ISAM2). + +## Specialized Factors + +- **[PriorFactor](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/PriorFactor.h)**: Imposes a prior constraint on a variable. +- **[NonlinearEquality](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/NonlinearEquality.h)**: Enforces equality constraints between variables. +- **[LinearContainerFactor](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/LinearContainerFactor.h)**: Wraps linear factors for inclusion in nonlinear factor graphs. + +## Filtering and Smoothing + +- **[ExtendedKalmanFilter](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/ExtendedKalmanFilter.h)**: Nonlinear Kalman filter implementation. +- **[FixedLagSmoother](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/FixedLagSmoother.h)**: Base class for fixed-lag smoothers. +- **[BatchFixedLagSmoother](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/BatchFixedLagSmoother.h)**: Implementation of a fixed-lag smoother using batch optimization. + +## Analysis and Visualization + +- **[Marginals](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/Marginals.h)**: Computes marginal covariances from optimization results. +- **[GraphvizFormatting](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/GraphvizFormatting.h)**: Provides customization for factor graph visualization. \ No newline at end of file From 8a0521bde951e196f5275945f751d7e3d80fbd64 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sat, 5 Apr 2025 12:30:30 -0400 Subject: [PATCH 08/21] NLO and Dogleg docs --- gtsam/nonlinear/doc/DoglegOptimizer.ipynb | 80 +++++++++++--------- gtsam/nonlinear/doc/NonlinearOptimizer.ipynb | 61 ++++++--------- 2 files changed, 69 insertions(+), 72 deletions(-) diff --git a/gtsam/nonlinear/doc/DoglegOptimizer.ipynb b/gtsam/nonlinear/doc/DoglegOptimizer.ipynb index 42a100a85..846e2d1ae 100644 --- a/gtsam/nonlinear/doc/DoglegOptimizer.ipynb +++ b/gtsam/nonlinear/doc/DoglegOptimizer.ipynb @@ -7,41 +7,9 @@ "source": [ "# DoglegOptimizer Class Documentation\n", "\n", - "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", - "\n", "## Overview\n", "\n", - "The `DoglegOptimizer` class in GTSAM is a specialized optimization algorithm designed for solving nonlinear least squares problems. It implements the Dogleg method, which is a hybrid approach combining the steepest descent and Gauss-Newton methods. This optimizer is particularly effective for problems where the Hessian is difficult to compute or when the initial guess is far from the solution.\n", - "\n", - "## Key Features\n", - "\n", - "- **Hybrid Approach**: Combines the strengths of both the steepest descent and Gauss-Newton methods.\n", - "- **Trust Region Method**: Utilizes a trust region to determine the step size, balancing between the accuracy of Gauss-Newton and the robustness of steepest descent.\n", - "- **Efficient for Nonlinear Problems**: Designed to handle complex nonlinear least squares problems effectively.\n", - "\n", - "## Key Methods\n", - "\n", - "### Initialization and Setup\n", - "\n", - "- **Constructor**: Initializes the optimizer with default or specified parameters.\n", - "- **setDeltaInitial**: Sets the initial trust region radius, $\\Delta_0$, which influences the step size in the optimization process.\n", - "\n", - "### Optimization Process\n", - "\n", - "- **optimize**: Executes the optimization process, iteratively refining the solution to minimize the error in the nonlinear least squares problem.\n", - "- **iterate**: Performs a single iteration of the Dogleg optimization, updating the current estimate based on the trust region and the computed step.\n", - "\n", - "### Result Evaluation\n", - "\n", - "- **error**: Computes the error of the current estimate, providing a measure of how well the current solution fits the problem constraints.\n", - "- **values**: Returns the optimized values after the optimization process is complete.\n", - "\n", - "### Trust Region Management\n", - "\n", - "- **getDelta**: Retrieves the current trust region radius, $\\Delta$, which is crucial for understanding the optimizer's step size decisions.\n", - "- **setDelta**: Manually sets the trust region radius, allowing for fine-tuned control over the optimization process.\n", - "\n", - "## Mathematical Formulation\n", + "The `DoglegOptimizer` class in GTSAM is a specialized optimization algorithm designed for solving nonlinear least squares problems. It implements the Dogleg method, which is a hybrid approach combining the steepest descent and Gauss-Newton methods.\n", "\n", "The Dogleg method is characterized by its use of two distinct steps:\n", "\n", @@ -55,16 +23,58 @@ "\n", "The Dogleg step, $p_{dl}$, is a combination of these two steps, determined by the trust region radius $\\Delta$.\n", "\n", + "It's key features:\n", + "\n", + "- **Hybrid Approach**: Combines the strengths of both the steepest descent and Gauss-Newton methods.\n", + "- **Trust Region Method**: Utilizes a trust region to determine the step size, balancing between the accuracy of Gauss-Newton and the robustness of steepest descent.\n", + "- **Efficient for Nonlinear Problems**: Designed to handle complex nonlinear least squares problems effectively." + ] + }, + { + "cell_type": "markdown", + "id": "758e347b", + "metadata": {}, + "source": [ + "## Key Methods\n", + "\n", + "Please see the base class [NonlinearOptimizer.ipynb](NonlinearOptimizer.ipynb).\n", + "\n", + "## Parameters\n", + "\n", + "The `DoglegParams` class defines parameters specific to Powell's Dogleg optimization algorithm:\n", + "\n", + "| Parameter | Description |\n", + "|-----------|-------------|\n", + "| `deltaInitial` | Initial trust region radius that controls step size (default: 1.0) |\n", + "| `verbosityDL` | Controls algorithm-specific diagnostic output (options: SILENT, VERBOSE) |\n", + "\n", + "These parameters complement the standard optimization parameters inherited from `NonlinearOptimizerParams`, which include:\n", + "\n", + "- Maximum iterations\n", + "- Relative and absolute error thresholds\n", + "- Error function verbosity\n", + "- Linear solver type\n", + "\n", + "Powell's Dogleg algorithm combines Gauss-Newton and gradient descent approaches within a trust region framework. The `deltaInitial` parameter defines the initial size of this trust region, which adaptively changes during optimization based on how well the linear approximation matches the nonlinear function.\n", + "\n", "## Usage Considerations\n", "\n", "- **Initial Guess**: The performance of the Dogleg optimizer can be sensitive to the initial guess. A good initial estimate can significantly speed up convergence.\n", "- **Parameter Tuning**: The choice of the initial trust region radius and other parameters can affect the convergence rate and stability of the optimization.\n", "\n", - "The `DoglegOptimizer` is a powerful tool for solving nonlinear optimization problems, particularly when dealing with large-scale systems where computational efficiency is crucial. By leveraging the hybrid approach of the Dogleg method, it provides a robust solution capable of handling a wide range of problem complexities." + "## Files\n", + "\n", + "- [DoglegOptimizer.h](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/DoglegOptimizer.h)\n", + "- [DoglegOptimizer.cpp](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/DoglegOptimizer.cpp)\n", + "- [DoglegParams.h](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/DoglegParams.h)" ] } ], - "metadata": {}, + "metadata": { + "language_info": { + "name": "python" + } + }, "nbformat": 4, "nbformat_minor": 5 } diff --git a/gtsam/nonlinear/doc/NonlinearOptimizer.ipynb b/gtsam/nonlinear/doc/NonlinearOptimizer.ipynb index 17057df52..c459b4de1 100644 --- a/gtsam/nonlinear/doc/NonlinearOptimizer.ipynb +++ b/gtsam/nonlinear/doc/NonlinearOptimizer.ipynb @@ -7,60 +7,47 @@ "source": [ "# NonlinearOptimizer Class Documentation\n", "\n", - "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", - "\n", "## Overview\n", "\n", - "The `NonlinearOptimizer` class in GTSAM is a foundational component for solving nonlinear optimization problems. It provides a flexible interface for optimizing nonlinear factor graphs, which are commonly used in robotics and computer vision applications.\n", + "The `NonlinearOptimizer` class in GTSAM is a the base class for (batch) nonlinear optimization solvers. It provides the basic API for optimizing nonlinear factor graphs, commonly used in robotics and computer vision applications.\n", "\n", - "The primary purpose of the `NonlinearOptimizer` is to iteratively refine an initial estimate of a solution to minimize a nonlinear cost function. This class serves as a base class for specific optimization algorithms like Gauss-Newton, Levenberg-Marquardt, and Dogleg.\n", + "The primary purpose of the `NonlinearOptimizer` is to iteratively refine an initial estimate of a solution to minimize a nonlinear cost function. Specific optimization algorithms like Gauss-Newton, Levenberg-Marquardt, and Dogleg and implemented in derived class.\n", "\n", - "## Key Methods\n", - "\n", - "### `optimize()`\n", - "The `optimize()` method is the core function of the `NonlinearOptimizer` class. It performs the optimization process, iteratively updating the estimate to converge to a local minimum of the cost function.\n", - "\n", - "### `error()`\n", - "The `error()` method computes the total error of the current estimate. This is typically the sum of squared errors for all factors in the graph. Mathematically, the error can be expressed as:\n", - "\n", - "$$\n", - "E(x) = \\sum_{i} \\| f_i(x) \\|^2\n", - "$$\n", - "\n", - "where $f_i(x)$ represents the residual error of the $i$-th factor.\n", - "\n", - "### `values()`\n", - "The `values()` method returns the current set of variable estimates. These estimates are updated during the optimization process.\n", - "\n", - "### `iterations()`\n", - "The `iterations()` method provides the number of iterations performed during the optimization process. This can be useful for analyzing the convergence behavior of the optimizer.\n", - "\n", - "### `params()`\n", - "The `params()` method returns the parameters used by the optimizer. These parameters can include settings like convergence thresholds, maximum iterations, and other algorithm-specific options.\n", - "\n", - "## Usage\n", - "\n", - "The `NonlinearOptimizer` class is typically not used directly. Instead, one of its derived classes, such as `GaussNewtonOptimizer`, `LevenbergMarquardtOptimizer`, or `DoglegOptimizer`, is used to perform specific types of optimization. These derived classes implement the `optimize()` method according to their respective algorithms.\n", - "\n", - "## Mathematical Foundations\n", + "## Mathematical Foundation\n", "\n", "The optimization process in `NonlinearOptimizer` is based on iterative methods that solve for the minimum of a nonlinear cost function. The general approach involves linearizing the nonlinear problem at the current estimate and solving the resulting linear system to update the estimate. This process is repeated until convergence criteria are met.\n", "\n", "The optimization problem can be formally defined as:\n", "\n", "$$\n", - "\\min_{x} \\sum_{i} \\| f_i(x) \\|^2\n", + "\\min_{x} \\sum_{i} \\| \\phi_i(x) \\|^2\n", "$$\n", "\n", - "where $x$ is the vector of variables to be optimized, and $f_i(x)$ are the residuals of the factors in the graph.\n", + "where $x$ is the vector of variables to be optimized, and $\\phi_i(x)$ are the residuals of the factors in the graph.\n", "\n", - "## Conclusion\n", + "## Key Methods\n", "\n", - "The `NonlinearOptimizer` class is a crucial component in GTSAM for solving nonlinear optimization problems. By providing a common interface and shared functionality, it enables the implementation of various optimization algorithms tailored to specific problem requirements. Understanding the key methods and their roles is essential for effectively utilizing this class in practical applications." + "- The `optimize()` method is the core function of the `NonlinearOptimizer` class. It performs the optimization process, iteratively updating the estimate to converge to a local minimum of the cost function.\n", + "- The `error()` method computes the total error of the current estimate. This is typically the sum of squared errors for all factors in the graph. Mathematically, the error can be expressed as:\n", + " $$\n", + " E(x) = \\sum_{i} \\| \\phi_i(x) \\|^2\n", + " $$\n", + " where $\\phi_i(x)$ represents the residual error of the $i$-th factor.\n", + "- The `values()` method returns the current set of variable estimates. These estimates are updated during the optimization process.\n", + "- The `iterations()` method provides the number of iterations performed during the optimization process. This can be useful for analyzing the convergence behavior of the optimizer.\n", + "- The `params()` method returns the parameters used by the optimizer. These parameters can include settings like convergence thresholds, maximum iterations, and other algorithm-specific options.\n", + "\n", + "## Usage\n", + "\n", + "The `NonlinearOptimizer` class is typically not used directly. Instead, one of its derived classes, such as `GaussNewtonOptimizer`, `LevenbergMarquardtOptimizer`, or `DoglegOptimizer`, is used to perform specific types of optimization. These derived classes implement the `optimize()` method according to their respective algorithms." ] } ], - "metadata": {}, + "metadata": { + "language_info": { + "name": "python" + } + }, "nbformat": 4, "nbformat_minor": 5 } From bea6979b1802c5943c3bdbac61846b4752c0a00e Mon Sep 17 00:00:00 2001 From: p-zach Date: Sat, 5 Apr 2025 15:20:19 -0400 Subject: [PATCH 09/21] Explanatory text --- doc/generating/gpt_generate.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/doc/generating/gpt_generate.py b/doc/generating/gpt_generate.py index ef5bf7c0f..3936023e6 100644 --- a/doc/generating/gpt_generate.py +++ b/doc/generating/gpt_generate.py @@ -8,9 +8,10 @@ See LICENSE for the license information Author: Porter Zach This script generates interactive Python notebooks (.ipynb) that document GTSAM -header files. It retrieves the header file content from the GTSAM GitHub repository, -sends it to OpenAI's API for processing, and saves the generated documentation -as a Jupyter notebook. +header files. Since inserting the text of the file directly into the prompt +might be too many tokens, it retrieves the header file content from the GTSAM +GitHub repository. It then sends it to OpenAI's API for processing, and saves +the generated documentation as a Jupyter notebook. Functions: is_url_valid(url: str) -> bool: From fab57eec541830c542da3682fb1be67e1b7267d0 Mon Sep 17 00:00:00 2001 From: p-zach Date: Sat, 5 Apr 2025 15:21:30 -0400 Subject: [PATCH 10/21] Regenerated failed files --- doc/generating/output/CustomFactor.ipynb | 52 ++++++++++++++++- doc/generating/output/ISAM2.ipynb | 55 +++++++++++++++++- doc/generating/output/NonlinearFactor.ipynb | 58 ++++++++++++++++++- doc/generating/output/NonlinearISAM.ipynb | 62 ++++++++++++++++++++- 4 files changed, 219 insertions(+), 8 deletions(-) diff --git a/doc/generating/output/CustomFactor.ipynb b/doc/generating/output/CustomFactor.ipynb index e361df53f..9bccd7f74 100644 --- a/doc/generating/output/CustomFactor.ipynb +++ b/doc/generating/output/CustomFactor.ipynb @@ -2,10 +2,58 @@ "cells": [ { "cell_type": "markdown", - "id": "736fa438", + "id": "31f395c5", "metadata": {}, "source": [ - "I'm unable to access external URLs directly. However, if you upload the file `CustomFactor.h`, I can help generate the documentation for it." + "# CustomFactor Class Documentation\n", + "\n", + "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "\n", + "## Overview\n", + "\n", + "The `CustomFactor` class in GTSAM is a specialized type of factor used in factor graphs, which allows users to define custom error functions and Jacobians. This class is particularly useful when the standard factors provided by GTSAM do not meet specific application needs, allowing for greater flexibility and customization in defining the behavior of the factor.\n", + "\n", + "## Key Functionalities\n", + "\n", + "### Custom Error Function\n", + "\n", + "The `CustomFactor` class allows users to define a custom error function. This is a critical feature as it enables the modeling of specific constraints or measurements that are not covered by existing GTSAM factors. The error function typically represents the difference between predicted and observed measurements and is central to the optimization process in factor graphs.\n", + "\n", + "### Jacobian Calculation\n", + "\n", + "In addition to the error function, users can define custom Jacobians. The Jacobian is the matrix of all first-order partial derivatives of a vector-valued function. In the context of `CustomFactor`, it represents the sensitivity of the error function with respect to the variables involved. This is crucial for optimization algorithms, which rely on gradient information to find the minimum of the error function.\n", + "\n", + "### Integration with Factor Graphs\n", + "\n", + "`CustomFactor` seamlessly integrates with GTSAM's factor graph framework. It can be added to a factor graph just like any other factor, participating in the graph optimization process. This integration ensures that custom factors can be used alongside standard factors, providing a flexible and powerful tool for solving complex estimation problems.\n", + "\n", + "## Mathematical Formulation\n", + "\n", + "### Error Function\n", + "\n", + "The error function $e(\\mathbf{x})$ is defined by the user and represents the discrepancy between the observed and predicted measurements. It is a vector-valued function:\n", + "\n", + "$$ e(\\mathbf{x}) = h(\\mathbf{x}) - \\mathbf{z} $$\n", + "\n", + "where $h(\\mathbf{x})$ is the predicted measurement based on the current estimate of the variables $\\mathbf{x}$, and $\\mathbf{z}$ is the observed measurement.\n", + "\n", + "### Jacobian\n", + "\n", + "The Jacobian matrix $J$ of the error function with respect to the variables $\\mathbf{x}$ is given by:\n", + "\n", + "$$ J = \\frac{\\partial e}{\\partial \\mathbf{x}} $$\n", + "\n", + "This matrix is used in the optimization process to update the estimates of the variables in a direction that reduces the error.\n", + "\n", + "## Usage\n", + "\n", + "To use `CustomFactor`, users must:\n", + "\n", + "1. Define the custom error function that models the specific measurement or constraint.\n", + "2. Implement the calculation of the Jacobian matrix for the error function.\n", + "3. Add the `CustomFactor` to a factor graph, specifying the keys of the variables it depends on.\n", + "\n", + "By following these steps, users can leverage the flexibility of `CustomFactor` to incorporate custom measurements and constraints into their factor graph models, enhancing the capability of GTSAM to solve a wide range of estimation problems." ] } ], diff --git a/doc/generating/output/ISAM2.ipynb b/doc/generating/output/ISAM2.ipynb index 0b4ae227d..dcab771a3 100644 --- a/doc/generating/output/ISAM2.ipynb +++ b/doc/generating/output/ISAM2.ipynb @@ -2,10 +2,61 @@ "cells": [ { "cell_type": "markdown", - "id": "7f0a9feb", + "id": "867a20bc", "metadata": {}, "source": [ - "I'm unable to directly access or search the content of the uploaded file. However, if you can provide the text or key excerpts from the file, I can help generate the documentation based on that information." + "# ISAM2 Class Documentation\n", + "\n", + "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "\n", + "## Overview\n", + "\n", + "The `ISAM2` class in GTSAM is an incremental smoothing and mapping algorithm that efficiently updates the solution to a nonlinear optimization problem as new measurements are added. This class is particularly useful in applications such as SLAM (Simultaneous Localization and Mapping) where real-time performance is crucial.\n", + "\n", + "## Key Features\n", + "\n", + "- **Incremental Updates**: `ISAM2` allows for incremental updates to the factor graph, avoiding the need to solve the entire problem from scratch with each new measurement.\n", + "- **Bayesian Inference**: Utilizes Bayes' rule to update beliefs about the state of the system as new information becomes available.\n", + "- **Nonlinear Optimization**: Capable of handling nonlinear systems, leveraging iterative optimization techniques to refine estimates.\n", + "- **Efficient Variable Reordering**: Dynamically reorders variables to maintain sparsity and improve computational efficiency.\n", + "\n", + "## Main Methods\n", + "\n", + "### Initialization and Configuration\n", + "\n", + "- **ISAM2 Constructor**: Initializes the `ISAM2` object with optional parameters for configuring the behavior of the algorithm, such as relinearization thresholds and ordering strategies.\n", + "\n", + "### Updating the Graph\n", + "\n", + "- **update**: Incorporates new factors and variables into the existing factor graph. This method performs the core incremental update, refining the solution based on new measurements.\n", + "\n", + "### Accessing Results\n", + "\n", + "- **calculateEstimate**: Retrieves the current estimate of the variables in the factor graph. This method can be called with specific variable keys to obtain their estimates.\n", + "- **marginalCovariance**: Computes the marginal covariance of a specified variable, providing insight into the uncertainty of the estimate.\n", + "\n", + "### Advanced Features\n", + "\n", + "- **relinearize**: Forces relinearization of the entire factor graph, which can be useful in scenarios where significant nonlinearities are introduced.\n", + "- **getFactorsUnsafe**: Provides access to the internal factor graph, allowing for advanced manipulations and custom analyses.\n", + "\n", + "## Mathematical Formulation\n", + "\n", + "The `ISAM2` algorithm is based on the factor graph representation of the problem, where the joint probability distribution is expressed as a product of factors:\n", + "\n", + "$$ P(X|Z) \\propto \\prod_{i} \\phi_i(X_i, Z_i) $$\n", + "\n", + "Here, $X$ represents the set of variables, $Z$ the measurements, and $\\phi_i$ the individual factors.\n", + "\n", + "The update process involves solving a nonlinear optimization problem, typically using the Gauss-Newton or Levenberg-Marquardt algorithms, to minimize the error:\n", + "\n", + "$$ \\min_{X} \\sum_{i} \\| h_i(X_i) - Z_i \\|^2 $$\n", + "\n", + "where $h_i(X_i)$ are the measurement functions.\n", + "\n", + "## Conclusion\n", + "\n", + "The `ISAM2` class is a powerful tool for real-time estimation in dynamic environments. Its ability to efficiently update solutions with new data makes it ideal for applications requiring continuous adaptation and refinement of estimates. Users can leverage its advanced features to customize the behavior and performance of the algorithm to suit specific needs." ] } ], diff --git a/doc/generating/output/NonlinearFactor.ipynb b/doc/generating/output/NonlinearFactor.ipynb index 7ab33cbc9..32b0685ca 100644 --- a/doc/generating/output/NonlinearFactor.ipynb +++ b/doc/generating/output/NonlinearFactor.ipynb @@ -2,10 +2,64 @@ "cells": [ { "cell_type": "markdown", - "id": "37ed0b18", + "id": "381ccaaa", "metadata": {}, "source": [ - "It seems there was an issue with accessing the file content directly. Could you please provide the content of the file or any specific details you would like to be documented?" + "# NonlinearFactor Class Documentation\n", + "\n", + "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "\n", + "## Overview\n", + "\n", + "The `NonlinearFactor` class in GTSAM is a fundamental component used in nonlinear optimization problems. It represents a factor in a factor graph, which is a key concept in probabilistic graphical models. The class is designed to work with nonlinear functions, making it suitable for a wide range of applications in robotics and computer vision, such as SLAM (Simultaneous Localization and Mapping) and structure from motion.\n", + "\n", + "## Key Functionalities\n", + "\n", + "### Error Calculation\n", + "\n", + "- **evaluateError**: This method computes the error vector for the factor given a set of variable values. The error is typically the difference between the predicted measurement and the actual measurement. The function can also return the Jacobian matrices if needed, which are crucial for optimization algorithms like Gauss-Newton or Levenberg-Marquardt.\n", + "\n", + "### Jacobian and Hessian\n", + "\n", + "- **linearize**: This method linearizes the nonlinear factor around a linearization point. It returns a `GaussianFactor`, which is an approximation of the `NonlinearFactor` using a first-order Taylor expansion. This is a critical step in iterative optimization methods, where the problem is repeatedly linearized and solved.\n", + "\n", + "### Active Flag\n", + "\n", + "- **active**: This function checks whether the factor should be included in the optimization process. A factor might be inactive if it does not contribute to the error, which can occur in cases of conditional constraints or gating functions.\n", + "\n", + "### Dimensionality\n", + "\n", + "- **dim**: Returns the dimensionality of the factor, which corresponds to the size of the error vector. This is important for understanding the contribution of the factor to the overall optimization problem.\n", + "\n", + "### Key Management\n", + "\n", + "- **keys**: Provides access to the keys (or variable indices) involved in the factor. This is essential for understanding which variables the factor is connected to in the factor graph.\n", + "\n", + "## Mathematical Formulation\n", + "\n", + "The `NonlinearFactor` is generally represented by a function $f(x)$, where $x$ is a vector of variables. The error is given by:\n", + "\n", + "$$ e(x) = z - f(x) $$\n", + "\n", + "where $z$ is the observed measurement. The optimization process aims to minimize the sum of squared errors:\n", + "\n", + "$$ \\min_x \\sum_i \\| e_i(x) \\|^2 $$\n", + "\n", + "Linearization involves approximating $f(x)$ around a point $x_0$:\n", + "\n", + "$$ f(x) \\approx f(x_0) + J(x - x_0) $$\n", + "\n", + "where $J$ is the Jacobian matrix of $f$ at $x_0$. This leads to a linearized error:\n", + "\n", + "$$ e(x) \\approx z - (f(x_0) + J(x - x_0)) $$\n", + "\n", + "## Usage Notes\n", + "\n", + "- The `NonlinearFactor` class is typically used in conjunction with a `NonlinearFactorGraph`, which is a collection of such factors.\n", + "- Users need to implement the `evaluateError` method in derived classes to define the specific measurement model.\n", + "- The class is designed to be flexible and extensible, allowing for custom factors to be created for specific applications.\n", + "\n", + "In summary, the `NonlinearFactor` class is a versatile and essential component for building and solving nonlinear optimization problems in GTSAM. Its ability to handle nonlinear relationships and provide linear approximations makes it suitable for a wide range of applications in robotics and beyond." ] } ], diff --git a/doc/generating/output/NonlinearISAM.ipynb b/doc/generating/output/NonlinearISAM.ipynb index 83f92ddfd..c7296bab8 100644 --- a/doc/generating/output/NonlinearISAM.ipynb +++ b/doc/generating/output/NonlinearISAM.ipynb @@ -2,10 +2,68 @@ "cells": [ { "cell_type": "markdown", - "id": "e31da023", + "id": "2b6fc012", "metadata": {}, "source": [ - "It seems there is an issue with accessing the file directly. However, I can guide you on how to document the class if you can provide the class definition and its key methods. You can paste the relevant parts of the file here, and I'll help you create the Markdown documentation." + "# NonlinearISAM Class Documentation\n", + "\n", + "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "\n", + "## Overview\n", + "\n", + "The `NonlinearISAM` class in GTSAM is a powerful tool for incrementally solving nonlinear factor graphs. It is particularly useful in applications where the problem is continuously evolving, such as in SLAM (Simultaneous Localization and Mapping) or incremental structure-from-motion. The class leverages the iSAM (incremental Smoothing and Mapping) algorithm to efficiently update solutions as new measurements are added.\n", + "\n", + "## Key Features\n", + "\n", + "- **Incremental Updates**: `NonlinearISAM` allows for the efficient update of the solution when new factors are added to the graph. This is crucial for real-time applications where the problem is continuously changing.\n", + " \n", + "- **Batch Initialization**: The class can perform a full batch optimization to initialize the solution, which can then be refined incrementally.\n", + "\n", + "- **Marginalization**: It supports marginalizing out variables that are no longer needed, which helps in maintaining computational efficiency.\n", + "\n", + "## Main Methods\n", + "\n", + "### Initialization and Update\n", + "\n", + "- **`update`**: This method is central to the `NonlinearISAM` class. It allows for the addition of new factors and variables to the existing factor graph. The update is performed incrementally, leveraging previous computations to enhance efficiency.\n", + "\n", + "- **`estimate`**: After performing updates, this method retrieves the current best estimate of the variable values.\n", + "\n", + "### Batch Operations\n", + "\n", + "- **`batchStep`**: This method performs a full batch optimization, which can be useful for reinitializing the solution or when a significant change in the problem structure occurs.\n", + "\n", + "### Marginalization\n", + "\n", + "- **`marginalize`**: This method allows for the removal of variables from the factor graph. Marginalization is useful for reducing the problem size and maintaining efficiency.\n", + "\n", + "## Mathematical Background\n", + "\n", + "The `NonlinearISAM` class operates on factor graphs, which are bipartite graphs consisting of variable nodes and factor nodes. The goal is to find the configuration of variables that maximizes the product of all factors, often expressed as:\n", + "\n", + "$$\n", + "\\max_{\\mathbf{x}} \\prod_{i} \\phi_i(\\mathbf{x}_i)\n", + "$$\n", + "\n", + "where $\\phi_i(\\mathbf{x}_i)$ are the factors depending on subsets of variables $\\mathbf{x}_i$.\n", + "\n", + "The iSAM algorithm updates the solution by incrementally solving the linearized system of equations derived from the factor graph:\n", + "\n", + "$$\n", + "\\mathbf{A} \\Delta \\mathbf{x} = \\mathbf{b}\n", + "$$\n", + "\n", + "where $\\mathbf{A}$ is the Jacobian matrix of the factors, $\\Delta \\mathbf{x}$ is the update to the variable estimates, and $\\mathbf{b}$ is the residual vector.\n", + "\n", + "## Usage Notes\n", + "\n", + "- **Efficiency**: The incremental nature of `NonlinearISAM` makes it highly efficient for large-scale problems where new data is continuously being integrated.\n", + "\n", + "- **Robustness**: The ability to perform batch optimizations and marginalize variables provides robustness against changes in the problem structure.\n", + "\n", + "- **Applications**: This class is particularly suited for robotics and computer vision applications where real-time performance is critical.\n", + "\n", + "In summary, the `NonlinearISAM` class is a sophisticated tool for handling dynamic nonlinear optimization problems, offering both incremental and batch processing capabilities to efficiently manage evolving factor graphs." ] } ], From 7c1a1e0765def267a154df6b253dcacc68d0f150 Mon Sep 17 00:00:00 2001 From: p-zach Date: Sat, 5 Apr 2025 15:29:11 -0400 Subject: [PATCH 11/21] Nitpicking --- gtsam/nonlinear/doc/DoglegOptimizer.ipynb | 4 ++-- gtsam/nonlinear/doc/NonlinearOptimizer.ipynb | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/gtsam/nonlinear/doc/DoglegOptimizer.ipynb b/gtsam/nonlinear/doc/DoglegOptimizer.ipynb index 846e2d1ae..bff4d2609 100644 --- a/gtsam/nonlinear/doc/DoglegOptimizer.ipynb +++ b/gtsam/nonlinear/doc/DoglegOptimizer.ipynb @@ -5,7 +5,7 @@ "id": "f851cef5", "metadata": {}, "source": [ - "# DoglegOptimizer Class Documentation\n", + "# DoglegOptimizer\n", "\n", "## Overview\n", "\n", @@ -23,7 +23,7 @@ "\n", "The Dogleg step, $p_{dl}$, is a combination of these two steps, determined by the trust region radius $\\Delta$.\n", "\n", - "It's key features:\n", + "Key features:\n", "\n", "- **Hybrid Approach**: Combines the strengths of both the steepest descent and Gauss-Newton methods.\n", "- **Trust Region Method**: Utilizes a trust region to determine the step size, balancing between the accuracy of Gauss-Newton and the robustness of steepest descent.\n", diff --git a/gtsam/nonlinear/doc/NonlinearOptimizer.ipynb b/gtsam/nonlinear/doc/NonlinearOptimizer.ipynb index c459b4de1..84c9283a9 100644 --- a/gtsam/nonlinear/doc/NonlinearOptimizer.ipynb +++ b/gtsam/nonlinear/doc/NonlinearOptimizer.ipynb @@ -5,7 +5,7 @@ "id": "2e4812da", "metadata": {}, "source": [ - "# NonlinearOptimizer Class Documentation\n", + "# NonlinearOptimizer\n", "\n", "## Overview\n", "\n", From 159e185764990bb9b3abb9c6f240f4de2681fbe6 Mon Sep 17 00:00:00 2001 From: p-zach Date: Sat, 5 Apr 2025 16:00:52 -0400 Subject: [PATCH 12/21] Refine other optimizers' docs --- .../nonlinear/doc/GaussNewtonOptimizer.ipynb | 61 +++++++-------- gtsam/nonlinear/doc/GncOptimizer.ipynb | 75 +++++++++++-------- .../doc/LevenbergMarquardtOptimizer.ipynb | 64 +++++++++------- .../NonlinearConjugateGradientOptimizer.ipynb | 57 +++++++------- 4 files changed, 139 insertions(+), 118 deletions(-) diff --git a/gtsam/nonlinear/doc/GaussNewtonOptimizer.ipynb b/gtsam/nonlinear/doc/GaussNewtonOptimizer.ipynb index 0f590e43b..82932189c 100644 --- a/gtsam/nonlinear/doc/GaussNewtonOptimizer.ipynb +++ b/gtsam/nonlinear/doc/GaussNewtonOptimizer.ipynb @@ -5,41 +5,12 @@ "id": "6463d580", "metadata": {}, "source": [ - "# GaussNewtonOptimizer Class Documentation\n", - "\n", - "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "# GaussNewtonOptimizer\n", "\n", "## Overview\n", "\n", "The `GaussNewtonOptimizer` class in GTSAM is designed to optimize nonlinear factor graphs using the Gauss-Newton algorithm. This class is particularly suited for problems where the cost function can be approximated well by a quadratic function near the minimum. The Gauss-Newton method is an iterative optimization technique that updates the solution by linearizing the nonlinear system at each iteration.\n", "\n", - "## Key Features\n", - "\n", - "- **Iterative Optimization**: The optimizer refines the solution iteratively by linearizing the nonlinear system around the current estimate.\n", - "- **Convergence Control**: It provides mechanisms to control the convergence through parameters such as maximum iterations and relative error tolerance.\n", - "- **Integration with GTSAM**: Seamlessly integrates with GTSAM's factor graph framework, allowing it to be used with various types of factors and variables.\n", - "\n", - "## Key Methods\n", - "\n", - "### Constructor\n", - "\n", - "- **GaussNewtonOptimizer**: Initializes the optimizer with a given factor graph and initial values. The constructor sets up the optimization problem and prepares it for iteration.\n", - "\n", - "### Optimization\n", - "\n", - "- **optimize**: Executes the optimization process. This method runs the Gauss-Newton iterations until convergence criteria are met, such as reaching the maximum number of iterations or achieving a relative error below a specified threshold.\n", - "\n", - "### Convergence Criteria\n", - "\n", - "- **checkConvergence**: Evaluates whether the optimization process has converged based on the change in error and the specified tolerance levels.\n", - "\n", - "### Accessors\n", - "\n", - "- **error**: Returns the current error of the factor graph with respect to the current estimate. This is useful for monitoring the progress of the optimization.\n", - "- **values**: Retrieves the current estimate of the variable values after optimization.\n", - "\n", - "## Mathematical Background\n", - "\n", "The Gauss-Newton algorithm is based on the idea of linearizing the nonlinear residuals $r(x)$ around the current estimate $x_k$. The update step is derived from solving the normal equations:\n", "\n", "$$ J(x_k)^T J(x_k) \\Delta x = -J(x_k)^T r(x_k) $$\n", @@ -50,17 +21,43 @@ "\n", "This process is repeated iteratively until convergence.\n", "\n", + "Key features:\n", + "\n", + "- **Iterative Optimization**: The optimizer refines the solution iteratively by linearizing the nonlinear system around the current estimate.\n", + "- **Convergence Control**: It provides mechanisms to control the convergence through parameters such as maximum iterations and relative error tolerance.\n", + "- **Integration with GTSAM**: Seamlessly integrates with GTSAM's factor graph framework, allowing it to be used with various types of factors and variables.\n", + "\n", + "## Key Methods\n", + "\n", + "Please see the base class [NonlinearOptimizer.ipynb](NonlinearOptimizer.ipynb).\n", + "\n", + "## Parameters\n", + "\n", + "The Gauss-Newton optimizer uses the standard optimization parameters inherited from `NonlinearOptimizerParams`, which include:\n", + "\n", + "- Maximum iterations\n", + "- Relative and absolute error thresholds\n", + "- Error function verbosity\n", + "- Linear solver type\n", + "\n", "## Usage Considerations\n", "\n", "- **Initial Guess**: The quality of the initial guess can significantly affect the convergence and performance of the Gauss-Newton optimizer.\n", "- **Non-convexity**: Since the method relies on linear approximations, it may struggle with highly non-convex problems or those with poor initial estimates.\n", "- **Performance**: The Gauss-Newton method is generally faster than other nonlinear optimization methods like Levenberg-Marquardt for problems that are well-approximated by a quadratic model near the solution.\n", "\n", - "In summary, the `GaussNewtonOptimizer` is a powerful tool for solving nonlinear optimization problems in factor graphs, particularly when the problem is well-suited to quadratic approximation. Its integration with GTSAM makes it a versatile choice for various applications in robotics and computer vision." + "## Files\n", + "\n", + "- [GaussNewtonOptimizer.h](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/GaussNewtonOptimizer.h)\n", + "- [GaussNewtonOptimizer.cpp](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/GaussNewtonOptimizer.cpp)" ] } ], - "metadata": {}, + "metadata": { + "language_info": { + "name": "python" + } + }, "nbformat": 4, "nbformat_minor": 5 } diff --git a/gtsam/nonlinear/doc/GncOptimizer.ipynb b/gtsam/nonlinear/doc/GncOptimizer.ipynb index 12b2b6aca..9fa3344cf 100644 --- a/gtsam/nonlinear/doc/GncOptimizer.ipynb +++ b/gtsam/nonlinear/doc/GncOptimizer.ipynb @@ -5,42 +5,12 @@ "id": "c950beef", "metadata": {}, "source": [ - "# GTSAM GncOptimizer Class Documentation\n", - "\n", - "*Disclaimer: This documentation was generated by AI and requires human revision to ensure accuracy and completeness.*\n", + "# GncOptimizer\n", "\n", "## Overview\n", "\n", "The `GncOptimizer` class in GTSAM is designed to perform robust optimization using Graduated Non-Convexity (GNC). This method is particularly useful in scenarios where the optimization problem is affected by outliers. The GNC approach gradually transitions from a convex approximation of the problem to the original non-convex problem, thereby improving robustness and convergence.\n", "\n", - "## Key Features\n", - "\n", - "- **Robust Optimization**: The `GncOptimizer` is specifically tailored to handle optimization problems with outliers, using a robust cost function that can mitigate their effects.\n", - "- **Graduated Non-Convexity**: This technique allows the optimizer to start with a convex problem and gradually transform it into the original non-convex problem, which helps in avoiding local minima.\n", - "- **Customizable Parameters**: Users can adjust various parameters to control the behavior of the optimizer, such as the type of robust loss function and the parameters governing the GNC process.\n", - "\n", - "## Key Methods\n", - "\n", - "### Initialization and Setup\n", - "\n", - "- **Constructor**: The class constructor initializes the optimizer with a given nonlinear factor graph and initial estimate. It also accepts parameters specific to the GNC process.\n", - "\n", - "### Optimization Process\n", - "\n", - "- **optimize()**: This method performs the optimization process. It iteratively refines the solution by adjusting the influence of the robust cost function, following the principles of graduated non-convexity.\n", - "\n", - "### Configuration and Parameters\n", - "\n", - "- **setParams()**: Allows users to set the parameters for the GNC optimization process, including the type of robust loss function and other algorithm-specific settings.\n", - "- **getParams()**: Retrieves the current parameters used by the optimizer, providing insight into the configuration of the optimization process.\n", - "\n", - "### Utility Functions\n", - "\n", - "- **cost()**: Computes the cost of the current estimate, which is useful for evaluating the progress of the optimization.\n", - "- **error()**: Returns the error associated with the current estimate, offering a measure of how well the optimization is performing.\n", - "\n", - "## Mathematical Formulation\n", - "\n", "The `GncOptimizer` leverages a robust cost function $\\rho(e)$, where $e$ is the error term. The goal is to minimize the sum of these robust costs over all measurements:\n", "\n", "$$\n", @@ -55,16 +25,55 @@ "\n", "As $\\mu$ increases, the function $\\rho_\\mu(e)$ transitions from a convex to a non-convex shape, allowing the optimizer to handle outliers effectively.\n", "\n", + "Key features:\n", + "\n", + "- **Robust Optimization**: The GncOptimizer is specifically tailored to handle optimization problems with outliers, using a robust cost function that can mitigate their effects.\n", + "- **Graduated Non-Convexity**: This technique allows the optimizer to start with a convex problem and gradually transform it into the original non-convex problem, which helps in avoiding local minima.\n", + "- **Customizable Parameters**: Users can adjust various parameters to control the behavior of the optimizer, such as the type of robust loss function and the parameters governing the GNC process.\n", + "\n", + "## Key Methods\n", + "\n", + "Please see the base class [NonlinearOptimizer.ipynb](NonlinearOptimizer.ipynb).\n", + "\n", + "## Parameters\n", + "\n", + "The `GncParams` class defines parameters specific to the GNC optimization algorithm:\n", + "\n", + "| Parameter | Type | Default Value | Description |\n", + "|-----------|------|---------------|-------------|\n", + "| lossType | GncLossType | TLS | Type of robust loss function (GM = Geman McClure or TLS = Truncated least squares) |\n", + "| maxIterations | size_t | 100 | Maximum number of iterations |\n", + "| muStep | double | 1.4 | Multiplicative factor to reduce/increase mu in GNC |\n", + "| relativeCostTol | double | 1e-5 | Threshold for relative cost change to stop iterating |\n", + "| weightsTol | double | 1e-4 | Threshold for weights being close to binary to stop iterating (TLS only) |\n", + "| verbosity | Verbosity enum | SILENT | Verbosity level (options: SILENT, SUMMARY, MU, WEIGHTS, VALUES) |\n", + "| knownInliers | IndexVector | Empty | Slots in factor graph for measurements known to be inliers |\n", + "| knownOutliers | IndexVector | Empty | Slots in factor graph for measurements known to be outliers |\n", + "\n", + "These parameters complement the standard optimization parameters inherited from `NonlinearOptimizerParams`, which include:\n", + "\n", + "- Maximum iterations\n", + "- Relative and absolute error thresholds\n", + "- Error function verbosity\n", + "- Linear solver type\n", + "\n", "## Usage Considerations\n", "\n", "- **Outlier Rejection**: The `GncOptimizer` is particularly effective in scenarios with significant outlier presence, such as SLAM or bundle adjustment problems.\n", "- **Parameter Tuning**: Proper tuning of the GNC parameters is crucial for achieving optimal performance. Users should experiment with different settings to find the best configuration for their specific problem.\n", "\n", - "This high-level overview provides a starting point for understanding and utilizing the `GncOptimizer` class in GTSAM. For detailed implementation and advanced usage, users should refer to the source code and additional GTSAM documentation." + "## Files\n", + "\n", + "- [GncOptimizer.h](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/GncOptimizer.h)\n", + "- [GncParams.h](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/GncParams.h)" ] } ], - "metadata": {}, + "metadata": { + "language_info": { + "name": "python" + } + }, "nbformat": 4, "nbformat_minor": 5 } diff --git a/gtsam/nonlinear/doc/LevenbergMarquardtOptimizer.ipynb b/gtsam/nonlinear/doc/LevenbergMarquardtOptimizer.ipynb index 2fa5a867c..434e25842 100644 --- a/gtsam/nonlinear/doc/LevenbergMarquardtOptimizer.ipynb +++ b/gtsam/nonlinear/doc/LevenbergMarquardtOptimizer.ipynb @@ -5,9 +5,7 @@ "id": "29642bb2", "metadata": {}, "source": [ - "# LevenbergMarquardtOptimizer Class Documentation\n", - "\n", - "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "# LevenbergMarquardtOptimizer\n", "\n", "## Overview\n", "\n", @@ -15,14 +13,6 @@ "\n", "The Levenberg-Marquardt algorithm is an iterative technique that interpolates between the Gauss-Newton algorithm and the method of gradient descent. It is particularly useful for optimizing problems where the solution is expected to be near the initial guess.\n", "\n", - "## Key Features\n", - "\n", - "- **Non-linear Optimization**: The class is designed to handle non-linear optimization problems efficiently.\n", - "- **Damping Mechanism**: It incorporates a damping parameter to control the step size, balancing between the Gauss-Newton and gradient descent methods.\n", - "- **Iterative Improvement**: The optimizer iteratively refines the solution, reducing the error at each step.\n", - "\n", - "## Mathematical Formulation\n", - "\n", "The Levenberg-Marquardt algorithm seeks to minimize a cost function $F(x)$ of the form:\n", "\n", "$$\n", @@ -37,25 +27,40 @@ "\n", "Here, $J$ is the Jacobian matrix of the residuals, $\\lambda$ is the damping parameter, and $I$ is the identity matrix.\n", "\n", + "Key features:\n", + "\n", + "- **Non-linear Optimization**: The class is designed to handle non-linear optimization problems efficiently.\n", + "- **Damping Mechanism**: It incorporates a damping parameter to control the step size, balancing between the Gauss-Newton and gradient descent methods.\n", + "- **Iterative Improvement**: The optimizer iteratively refines the solution, reducing the error at each step.\n", + "\n", "## Key Methods\n", "\n", - "### Initialization\n", + "Please see the base class [NonlinearOptimizer.ipynb](NonlinearOptimizer.ipynb).\n", "\n", - "- **Constructor**: Initializes the optimizer with the given parameters and initial values.\n", + "## Parameters\n", "\n", - "### Optimization\n", + "The `LevenbergMarquardtParams` class defines parameters specific to this optimization algorithm:\n", "\n", - "- **optimize**: Executes the optimization process, iteratively updating the solution to minimize the cost function.\n", + "| Parameter | Type | Default Value | Description |\n", + "|-----------|------|---------------|-------------|\n", + "| lambdaInitial | double | 1e-5 | The initial Levenberg-Marquardt damping term |\n", + "| lambdaFactor | double | 10.0 | The amount by which to multiply or divide lambda when adjusting lambda |\n", + "| lambdaUpperBound | double | 1e5 | The maximum lambda to try before assuming the optimization has failed |\n", + "| lambdaLowerBound | double | 0.0 | The minimum lambda used in LM |\n", + "| verbosityLM | VerbosityLM | SILENT | The verbosity level for Levenberg-Marquardt |\n", + "| minModelFidelity | double | 1e-3 | Lower bound for the modelFidelity to accept the result of an LM iteration |\n", + "| logFile | std::string | \"\" | An optional CSV log file, with [iteration, time, error, lambda] |\n", + "| diagonalDamping | bool | false | If true, use diagonal of Hessian |\n", + "| useFixedLambdaFactor | bool | true | If true applies constant increase (or decrease) to lambda according to lambdaFactor |\n", + "| minDiagonal | double | 1e-6 | When using diagonal damping saturates the minimum diagonal entries |\n", + "| maxDiagonal | double | 1e32 | When using diagonal damping saturates the maximum diagonal entries |\n", "\n", - "### Parameter Control\n", + "These parameters complement the standard optimization parameters inherited from `NonlinearOptimizerParams`, which include:\n", "\n", - "- **setLambda**: Sets the damping parameter $\\lambda$, which influences the convergence behavior.\n", - "- **getLambda**: Retrieves the current value of the damping parameter.\n", - "\n", - "### Convergence and Termination\n", - "\n", - "- **checkConvergence**: Evaluates whether the optimization process has converged based on predefined criteria.\n", - "- **terminate**: Stops the optimization process when certain conditions are met.\n", + "- Maximum iterations\n", + "- Relative and absolute error thresholds\n", + "- Error function verbosity\n", + "- Linear solver type\n", "\n", "## Usage Notes\n", "\n", @@ -63,11 +68,20 @@ "- Proper tuning of the damping parameter $\\lambda$ is crucial for balancing the convergence rate and stability.\n", "- The optimizer is most effective when the residuals are approximately linear near the solution.\n", "\n", - "This class is a powerful tool for tackling complex optimization problems where traditional linear methods fall short. By leveraging the strengths of both Gauss-Newton and gradient descent, the `LevenbergMarquardtOptimizer` provides a robust framework for achieving accurate solutions in non-linear least squares problems." + "## Files\n", + "\n", + "- [LevenbergMarquardtOptimizer.h](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/LevenbergMarquardtOptimizer.h)\n", + "- [LevenbergMarquardtOptimizer.cpp](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp)\n", + "- [LevenbergMarquardtParams.h](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/LevenbergMarquardtParams.h)\n", + "- [LevenbergMarquardtParams.cpp](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/LevenbergMarquardtParams.cpp)" ] } ], - "metadata": {}, + "metadata": { + "language_info": { + "name": "python" + } + }, "nbformat": 4, "nbformat_minor": 5 } diff --git a/gtsam/nonlinear/doc/NonlinearConjugateGradientOptimizer.ipynb b/gtsam/nonlinear/doc/NonlinearConjugateGradientOptimizer.ipynb index f8dbdf8fe..848509e7f 100644 --- a/gtsam/nonlinear/doc/NonlinearConjugateGradientOptimizer.ipynb +++ b/gtsam/nonlinear/doc/NonlinearConjugateGradientOptimizer.ipynb @@ -5,37 +5,12 @@ "id": "48970ca0", "metadata": {}, "source": [ - "# NonlinearConjugateGradientOptimizer Class Documentation\n", - "\n", - "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "# NonlinearConjugateGradientOptimizer\n", "\n", "## Overview\n", "\n", "The `NonlinearConjugateGradientOptimizer` class in GTSAM is an implementation of the nonlinear conjugate gradient method for optimizing nonlinear functions. This optimizer is particularly useful for solving large-scale optimization problems where the Hessian matrix is not easily computed or stored. The conjugate gradient method is an iterative algorithm that seeks to find the minimum of a function by following a series of conjugate directions.\n", "\n", - "## Key Features\n", - "\n", - "- **Optimization Method**: Implements the nonlinear conjugate gradient method, which is an extension of the linear conjugate gradient method to nonlinear optimization problems.\n", - "- **Efficiency**: Suitable for large-scale problems due to its iterative nature and reduced memory requirements compared to methods that require the Hessian matrix.\n", - "- **Flexibility**: Can be used with various line search strategies and conjugate gradient update formulas.\n", - "\n", - "## Main Methods\n", - "\n", - "### Constructor\n", - "\n", - "- **NonlinearConjugateGradientOptimizer**: Initializes the optimizer with a given nonlinear factor graph and initial values. The user can specify optimization parameters, including the choice of line search method and conjugate gradient update formula.\n", - "\n", - "### Optimization\n", - "\n", - "- **optimize**: Executes the optimization process. This method iteratively updates the solution by computing search directions and performing line searches to minimize the objective function along these directions.\n", - "\n", - "### Accessors\n", - "\n", - "- **error**: Returns the current error value of the objective function. This is useful for monitoring the convergence of the optimization process.\n", - "- **values**: Retrieves the current estimate of the optimized variables. This allows users to access the solution at any point during the optimization.\n", - "\n", - "## Mathematical Background\n", - "\n", "The nonlinear conjugate gradient method seeks to minimize a nonlinear function $f(x)$ by iteratively updating the solution $x_k$ according to:\n", "\n", "$$ x_{k+1} = x_k + \\alpha_k p_k $$\n", @@ -50,17 +25,43 @@ "\n", "The choice of $\\beta_k$ affects the convergence properties of the algorithm.\n", "\n", + "Key features:\n", + "\n", + "- **Optimization Method**: Implements the nonlinear conjugate gradient method, which is an extension of the linear conjugate gradient method to nonlinear optimization problems.\n", + "- **Efficiency**: Suitable for large-scale problems due to its iterative nature and reduced memory requirements compared to methods that require the Hessian matrix.\n", + "- **Flexibility**: Can be used with various line search strategies and conjugate gradient update formulas.\n", + "\n", + "## Key Methods\n", + "\n", + "Please see the base class [NonlinearOptimizer.ipynb](NonlinearOptimizer.ipynb).\n", + "\n", + "## Parameters\n", + "\n", + "The nonlinear conjugate gradient optimizer uses the standard optimization parameters inherited from `NonlinearOptimizerParams`, which include:\n", + "\n", + "- Maximum iterations\n", + "- Relative and absolute error thresholds\n", + "- Error function verbosity\n", + "- Linear solver type\n", + "\n", "## Usage Notes\n", "\n", "- The `NonlinearConjugateGradientOptimizer` is most effective when the problem size is large and the computation of the Hessian is impractical.\n", "- Users should choose an appropriate line search method and conjugate gradient update formula based on the specific characteristics of their optimization problem.\n", "- Monitoring the error and values during optimization can provide insights into the convergence behavior and help diagnose potential issues.\n", "\n", - "This class provides a robust framework for solving complex nonlinear optimization problems efficiently, leveraging the power of the conjugate gradient method." + "## Files\n", + "\n", + "- [NonlinearConjugateGradientOptimizer.h](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h)\n", + "- [NonlinearConjugateGradientOptimizer.cpp](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp)" ] } ], - "metadata": {}, + "metadata": { + "language_info": { + "name": "python" + } + }, "nbformat": 4, "nbformat_minor": 5 } From dc082510a431778e5034e750aeddf128c44b884b Mon Sep 17 00:00:00 2001 From: p-zach Date: Sat, 5 Apr 2025 16:03:22 -0400 Subject: [PATCH 13/21] Files for nonlinearopt --- gtsam/nonlinear/doc/NonlinearOptimizer.ipynb | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/gtsam/nonlinear/doc/NonlinearOptimizer.ipynb b/gtsam/nonlinear/doc/NonlinearOptimizer.ipynb index 84c9283a9..135cc42f1 100644 --- a/gtsam/nonlinear/doc/NonlinearOptimizer.ipynb +++ b/gtsam/nonlinear/doc/NonlinearOptimizer.ipynb @@ -39,7 +39,14 @@ "\n", "## Usage\n", "\n", - "The `NonlinearOptimizer` class is typically not used directly. Instead, one of its derived classes, such as `GaussNewtonOptimizer`, `LevenbergMarquardtOptimizer`, or `DoglegOptimizer`, is used to perform specific types of optimization. These derived classes implement the `optimize()` method according to their respective algorithms." + "The `NonlinearOptimizer` class is typically not used directly. Instead, one of its derived classes, such as `GaussNewtonOptimizer`, `LevenbergMarquardtOptimizer`, or `DoglegOptimizer`, is used to perform specific types of optimization. These derived classes implement the `optimize()` method according to their respective algorithms.\n", + "\n", + "## Files\n", + "\n", + "- [NonlinearOptimizer.h](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/NonlinearOptimizer.h)\n", + "- [NonlinearOptimizer.cpp](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/NonlinearOptimizer.cpp)\n", + "- [NonlinearOptimizerParams.h](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/NonlinearOptimizerParams.h)\n", + "- [NonlinearOptimizerParams.cpp](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/NonlinearOptimizerParams.cpp)" ] } ], From 50e40d00dfa023d70f99694678f41e3dd83b0183 Mon Sep 17 00:00:00 2001 From: p-zach Date: Sat, 5 Apr 2025 16:06:00 -0400 Subject: [PATCH 14/21] Move remaining files --- {doc/generating/output => gtsam/nonlinear/doc}/CustomFactor.ipynb | 0 .../output => gtsam/nonlinear/doc}/ExpressionFactor.ipynb | 0 .../output => gtsam/nonlinear/doc}/ExpressionFactorGraph.ipynb | 0 .../output => gtsam/nonlinear/doc}/ExtendedKalmanFilter.ipynb | 0 .../output => gtsam/nonlinear/doc}/FixedLagSmoother.ipynb | 0 {doc/generating/output => gtsam/nonlinear/doc}/ISAM2.ipynb | 0 .../output => gtsam/nonlinear/doc}/LinearContainerFactor.ipynb | 0 .../output => gtsam/nonlinear/doc}/NonlinearFactor.ipynb | 0 .../output => gtsam/nonlinear/doc}/NonlinearFactorGraph.ipynb | 0 .../generating/output => gtsam/nonlinear/doc}/NonlinearISAM.ipynb | 0 {doc/generating/output => gtsam/nonlinear/doc}/PriorFactor.ipynb | 0 .../output => gtsam/nonlinear/doc}/WhiteNoiseFactor.ipynb | 0 12 files changed, 0 insertions(+), 0 deletions(-) rename {doc/generating/output => gtsam/nonlinear/doc}/CustomFactor.ipynb (100%) rename {doc/generating/output => gtsam/nonlinear/doc}/ExpressionFactor.ipynb (100%) rename {doc/generating/output => gtsam/nonlinear/doc}/ExpressionFactorGraph.ipynb (100%) rename {doc/generating/output => gtsam/nonlinear/doc}/ExtendedKalmanFilter.ipynb (100%) rename {doc/generating/output => gtsam/nonlinear/doc}/FixedLagSmoother.ipynb (100%) rename {doc/generating/output => gtsam/nonlinear/doc}/ISAM2.ipynb (100%) rename {doc/generating/output => gtsam/nonlinear/doc}/LinearContainerFactor.ipynb (100%) rename {doc/generating/output => gtsam/nonlinear/doc}/NonlinearFactor.ipynb (100%) rename {doc/generating/output => gtsam/nonlinear/doc}/NonlinearFactorGraph.ipynb (100%) rename {doc/generating/output => gtsam/nonlinear/doc}/NonlinearISAM.ipynb (100%) rename {doc/generating/output => gtsam/nonlinear/doc}/PriorFactor.ipynb (100%) rename {doc/generating/output => gtsam/nonlinear/doc}/WhiteNoiseFactor.ipynb (100%) diff --git a/doc/generating/output/CustomFactor.ipynb b/gtsam/nonlinear/doc/CustomFactor.ipynb similarity index 100% rename from doc/generating/output/CustomFactor.ipynb rename to gtsam/nonlinear/doc/CustomFactor.ipynb diff --git a/doc/generating/output/ExpressionFactor.ipynb b/gtsam/nonlinear/doc/ExpressionFactor.ipynb similarity index 100% rename from doc/generating/output/ExpressionFactor.ipynb rename to gtsam/nonlinear/doc/ExpressionFactor.ipynb diff --git a/doc/generating/output/ExpressionFactorGraph.ipynb b/gtsam/nonlinear/doc/ExpressionFactorGraph.ipynb similarity index 100% rename from doc/generating/output/ExpressionFactorGraph.ipynb rename to gtsam/nonlinear/doc/ExpressionFactorGraph.ipynb diff --git a/doc/generating/output/ExtendedKalmanFilter.ipynb b/gtsam/nonlinear/doc/ExtendedKalmanFilter.ipynb similarity index 100% rename from doc/generating/output/ExtendedKalmanFilter.ipynb rename to gtsam/nonlinear/doc/ExtendedKalmanFilter.ipynb diff --git a/doc/generating/output/FixedLagSmoother.ipynb b/gtsam/nonlinear/doc/FixedLagSmoother.ipynb similarity index 100% rename from doc/generating/output/FixedLagSmoother.ipynb rename to gtsam/nonlinear/doc/FixedLagSmoother.ipynb diff --git a/doc/generating/output/ISAM2.ipynb b/gtsam/nonlinear/doc/ISAM2.ipynb similarity index 100% rename from doc/generating/output/ISAM2.ipynb rename to gtsam/nonlinear/doc/ISAM2.ipynb diff --git a/doc/generating/output/LinearContainerFactor.ipynb b/gtsam/nonlinear/doc/LinearContainerFactor.ipynb similarity index 100% rename from doc/generating/output/LinearContainerFactor.ipynb rename to gtsam/nonlinear/doc/LinearContainerFactor.ipynb diff --git a/doc/generating/output/NonlinearFactor.ipynb b/gtsam/nonlinear/doc/NonlinearFactor.ipynb similarity index 100% rename from doc/generating/output/NonlinearFactor.ipynb rename to gtsam/nonlinear/doc/NonlinearFactor.ipynb diff --git a/doc/generating/output/NonlinearFactorGraph.ipynb b/gtsam/nonlinear/doc/NonlinearFactorGraph.ipynb similarity index 100% rename from doc/generating/output/NonlinearFactorGraph.ipynb rename to gtsam/nonlinear/doc/NonlinearFactorGraph.ipynb diff --git a/doc/generating/output/NonlinearISAM.ipynb b/gtsam/nonlinear/doc/NonlinearISAM.ipynb similarity index 100% rename from doc/generating/output/NonlinearISAM.ipynb rename to gtsam/nonlinear/doc/NonlinearISAM.ipynb diff --git a/doc/generating/output/PriorFactor.ipynb b/gtsam/nonlinear/doc/PriorFactor.ipynb similarity index 100% rename from doc/generating/output/PriorFactor.ipynb rename to gtsam/nonlinear/doc/PriorFactor.ipynb diff --git a/doc/generating/output/WhiteNoiseFactor.ipynb b/gtsam/nonlinear/doc/WhiteNoiseFactor.ipynb similarity index 100% rename from doc/generating/output/WhiteNoiseFactor.ipynb rename to gtsam/nonlinear/doc/WhiteNoiseFactor.ipynb From ed6038d6f9ff9bd75d550bc50448fe2aae2b7bf9 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 6 Apr 2025 09:56:36 -0400 Subject: [PATCH 15/21] CustomFactor docs --- gtsam/nonlinear/doc/CustomFactor.ipynb | 203 ++++++++++++++++++++----- 1 file changed, 168 insertions(+), 35 deletions(-) diff --git a/gtsam/nonlinear/doc/CustomFactor.ipynb b/gtsam/nonlinear/doc/CustomFactor.ipynb index 9bccd7f74..19adf335f 100644 --- a/gtsam/nonlinear/doc/CustomFactor.ipynb +++ b/gtsam/nonlinear/doc/CustomFactor.ipynb @@ -5,45 +5,54 @@ "id": "31f395c5", "metadata": {}, "source": [ - "# CustomFactor Class Documentation\n", - "\n", - "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "# CustomFactor" + ] + }, + { + "cell_type": "markdown", + "id": "1a3591a2", + "metadata": {}, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5ccb48e4", + "metadata": { + "tags": [ + "remove-cell" + ], + "vscode": { + "languageId": "markdown" + } + }, + "outputs": [], + "source": [ + "%pip install --quiet gtsam-develop" + ] + }, + { + "cell_type": "markdown", + "id": "10df70c9", + "metadata": {}, + "source": [ "\n", "## Overview\n", "\n", - "The `CustomFactor` class in GTSAM is a specialized type of factor used in factor graphs, which allows users to define custom error functions and Jacobians. This class is particularly useful when the standard factors provided by GTSAM do not meet specific application needs, allowing for greater flexibility and customization in defining the behavior of the factor.\n", + "The `CustomFactor` class allows users to define custom error functions and Jacobians, and while it can be used in C++, it is particularly useful for use with the python wrapper.\n", "\n", - "## Key Functionalities\n", + "## Custom Error Function\n", "\n", - "### Custom Error Function\n", + "The `CustomFactor` class allows users to define a custom error function. In C++ it is defined as below:\n", "\n", - "The `CustomFactor` class allows users to define a custom error function. This is a critical feature as it enables the modeling of specific constraints or measurements that are not covered by existing GTSAM factors. The error function typically represents the difference between predicted and observed measurements and is central to the optimization process in factor graphs.\n", + "```c++\n", + "using JacobianVector = std::vector;\n", + "using CustomErrorFunction = std::function;\n", + "```\n", "\n", - "### Jacobian Calculation\n", - "\n", - "In addition to the error function, users can define custom Jacobians. The Jacobian is the matrix of all first-order partial derivatives of a vector-valued function. In the context of `CustomFactor`, it represents the sensitivity of the error function with respect to the variables involved. This is crucial for optimization algorithms, which rely on gradient information to find the minimum of the error function.\n", - "\n", - "### Integration with Factor Graphs\n", - "\n", - "`CustomFactor` seamlessly integrates with GTSAM's factor graph framework. It can be added to a factor graph just like any other factor, participating in the graph optimization process. This integration ensures that custom factors can be used alongside standard factors, providing a flexible and powerful tool for solving complex estimation problems.\n", - "\n", - "## Mathematical Formulation\n", - "\n", - "### Error Function\n", - "\n", - "The error function $e(\\mathbf{x})$ is defined by the user and represents the discrepancy between the observed and predicted measurements. It is a vector-valued function:\n", - "\n", - "$$ e(\\mathbf{x}) = h(\\mathbf{x}) - \\mathbf{z} $$\n", - "\n", - "where $h(\\mathbf{x})$ is the predicted measurement based on the current estimate of the variables $\\mathbf{x}$, and $\\mathbf{z}$ is the observed measurement.\n", - "\n", - "### Jacobian\n", - "\n", - "The Jacobian matrix $J$ of the error function with respect to the variables $\\mathbf{x}$ is given by:\n", - "\n", - "$$ J = \\frac{\\partial e}{\\partial \\mathbf{x}} $$\n", - "\n", - "This matrix is used in the optimization process to update the estimates of the variables in a direction that reduces the error.\n", + "The function will be passed a reference to the factor itself so the keys can be accessed, a `Values` reference, and a writeable vector of Jacobians.\n", "\n", "## Usage\n", "\n", @@ -51,13 +60,137 @@ "\n", "1. Define the custom error function that models the specific measurement or constraint.\n", "2. Implement the calculation of the Jacobian matrix for the error function.\n", - "3. Add the `CustomFactor` to a factor graph, specifying the keys of the variables it depends on.\n", + "3. Define a noise model of the appropriate dimension.\n", + "3. Add the `CustomFactor` to a factor graph, specifying\n", + " - the noise model\n", + " - the keys of the variables it depends on\n", + " - the error function\n", "\n", - "By following these steps, users can leverage the flexibility of `CustomFactor` to incorporate custom measurements and constraints into their factor graph models, enhancing the capability of GTSAM to solve a wide range of estimation problems." + "Below is a simple example that mimics a `BetweenFactor`." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "894bfaf2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "CustomFactor on 66, 77\n", + "isotropic dim=3 sigma=0.1\n", + "\n" + ] + } + ], + "source": [ + "import numpy as np\n", + "from gtsam import CustomFactor, noiseModel, Values, Pose2\n", + "\n", + "measurement = Pose2(2, 2, np.pi / 2)\n", + "\n", + "def error_func(this: CustomFactor, v: Values, H: list[np.ndarray]):\n", + " \"\"\"\n", + " Error function that mimics a BetweenFactor\n", + " :param this: reference to the current CustomFactor being evaluated\n", + " :param v: Values object\n", + " :param H: list of references to the Jacobian arrays\n", + " :return: the non-linear error\n", + " \"\"\"\n", + " key0 = this.keys()[0]\n", + " key1 = this.keys()[1]\n", + " gT1, gT2 = v.atPose2(key0), v.atPose2(key1)\n", + " error = measurement.localCoordinates(gT1.between(gT2))\n", + "\n", + " if H is not None:\n", + " result = gT1.between(gT2)\n", + " H[0] = -result.inverse().AdjointMap()\n", + " H[1] = np.eye(3)\n", + " return error\n", + "\n", + "# we use an isotropic noise model, and keys 66 and 77\n", + "noise_model = noiseModel.Isotropic.Sigma(3, 0.1)\n", + "custom_factor = CustomFactor(noise_model, [66, 77], error_func)\n", + "print(custom_factor)" + ] + }, + { + "cell_type": "markdown", + "id": "b72a8fc7", + "metadata": {}, + "source": [ + "Typically, you would not actually call methods of a custom factor directly: a nonlinear optimizer will call `linearize` in every nonlinear iteration. But if you wanted to, here is how you would do it:" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "c92caf2c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "error = 0.0\n", + "Linearized JacobianFactor:\n", + " A[66] = [\n", + "\t-6.12323e-16, -10, -20;\n", + "\t10, -6.12323e-16, -20;\n", + "\t-0, -0, -10\n", + "]\n", + " A[77] = [\n", + "\t10, 0, 0;\n", + "\t0, 10, 0;\n", + "\t0, 0, 10\n", + "]\n", + " b = [ -0 -0 -0 ]\n", + " No noise model\n", + "\n" + ] + } + ], + "source": [ + "values = Values()\n", + "values.insert(66, Pose2(1, 2, np.pi / 2))\n", + "values.insert(77, Pose2(-1, 4, np.pi))\n", + "\n", + "print(\"error = \", custom_factor.error(values))\n", + "print(\"Linearized JacobianFactor:\\n\", custom_factor.linearize(values))" + ] + }, + { + "cell_type": "markdown", + "id": "38c04012", + "metadata": {}, + "source": [ + "Note: there are not a lot of restrictions on the function, but note there is overhead in calling a python function from within a c++ optimization loop. You can mitigate this by having a python function that leverages batching of measurements.\n", + "\n", + "Some more examples of usage in python are given in [test_custom_factor.py](https://github.com/borglab/gtsam/blob/develop/python/gtsam/tests/test_custom_factor.py) and [CustomFactorExample.py](https://github.com/borglab/gtsam/blob/develop/python/gtsam/examples/CustomFactorExample.py)." ] } ], - "metadata": {}, + "metadata": { + "kernelspec": { + "display_name": "py312", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, "nbformat": 4, "nbformat_minor": 5 } From 3e8a29ae132a139edf84f2844e5ee798b5f8c455 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 6 Apr 2025 13:29:46 -0400 Subject: [PATCH 16/21] Added examples to site --- doc/examples.md | 3 + {gtsam => doc}/user_guide.md | 0 gtsam/geometry/geometry.i | 11 +- myst.yml | 5 +- .../gtsam/examples/CameraResectioning.ipynb | 188 ++++++++++++++++++ python/gtsam/examples/CameraResectioning.py | 85 -------- python/gtsam/examples/EqF.ipynb | 35 +++- .../examples/RangeISAMExample_plaza2.ipynb | 44 +++- 8 files changed, 270 insertions(+), 101 deletions(-) create mode 100644 doc/examples.md rename {gtsam => doc}/user_guide.md (100%) create mode 100644 python/gtsam/examples/CameraResectioning.ipynb delete mode 100644 python/gtsam/examples/CameraResectioning.py diff --git a/doc/examples.md b/doc/examples.md new file mode 100644 index 000000000..14300a03c --- /dev/null +++ b/doc/examples.md @@ -0,0 +1,3 @@ +# Examples + +This section contains python examples in interactive Python notebooks (`*.ipynb`). Python notebooks with an Open In Colab button near the top can be opened in your browser, where you can run the files yourself and make edits to play with and understand GTSAM. \ No newline at end of file diff --git a/gtsam/user_guide.md b/doc/user_guide.md similarity index 100% rename from gtsam/user_guide.md rename to doc/user_guide.md diff --git a/gtsam/geometry/geometry.i b/gtsam/geometry/geometry.i index e01394bfe..8c6e70ef4 100644 --- a/gtsam/geometry/geometry.i +++ b/gtsam/geometry/geometry.i @@ -1075,9 +1075,14 @@ class PinholeCamera { pair projectSafe(const gtsam::Point3& pw) const; gtsam::Point2 project(const gtsam::Point3& point); gtsam::Point2 project(const gtsam::Point3& point, - Eigen::Ref Dpose, - Eigen::Ref Dpoint, - Eigen::Ref Dcal); + Eigen::Ref Dpose); + gtsam::Point2 project(const gtsam::Point3& point, + Eigen::Ref Dpose, + Eigen::Ref Dpoint); + gtsam::Point2 project(const gtsam::Point3& point, + Eigen::Ref Dpose, + Eigen::Ref Dpoint, + Eigen::Ref Dcal); gtsam::Point3 backproject(const gtsam::Point2& p, double depth) const; gtsam::Point3 backproject(const gtsam::Point2& p, double depth, Eigen::Ref Dresult_dpose, diff --git a/myst.yml b/myst.yml index 7984b2d20..21df32d84 100644 --- a/myst.yml +++ b/myst.yml @@ -8,7 +8,7 @@ project: toc: - file: README.md - file: INSTALL.md - - file: ./gtsam/user_guide.md + - file: ./doc/user_guide.md children: - file: ./gtsam/geometry/geometry.md children: @@ -16,6 +16,9 @@ project: - file: ./gtsam/nonlinear/nonlinear.md children: - pattern: ./gtsam/nonlinear/doc/* + - file: ./doc/examples.md + children: + - pattern: ./python/gtsam/examples/*.ipynb site: nav: - title: Getting started diff --git a/python/gtsam/examples/CameraResectioning.ipynb b/python/gtsam/examples/CameraResectioning.ipynb new file mode 100644 index 000000000..e037dbfff --- /dev/null +++ b/python/gtsam/examples/CameraResectioning.ipynb @@ -0,0 +1,188 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Camera Resectioning Example\n", + "\n", + "This is a 1:1 transcription of CameraResectioning.cpp, but using custom factors." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [ + "remove-cell" + ] + }, + "source": [ + "GTSAM Copyright 2010-2022, Georgia Tech Research Corporation,\n", + "Atlanta, Georgia 30332-0415\n", + "All Rights Reserved\n", + "\n", + "Authors: Frank Dellaert, et al. (see THANKS for the full author list)\n", + "\n", + "See LICENSE for the license information" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "remove-cell" + ], + "vscode": { + "languageId": "markdown" + } + }, + "outputs": [], + "source": [ + "%pip install --quiet gtsam-develop" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "from gtsam import Cal3_S2, CustomFactor, LevenbergMarquardtOptimizer, KeyVector\n", + "from gtsam import NonlinearFactor, NonlinearFactorGraph\n", + "from gtsam import PinholeCameraCal3_S2, Point2, Point3, Pose3, Rot3, Values\n", + "from gtsam.noiseModel import Base as SharedNoiseModel, Diagonal\n", + "from gtsam.symbol_shorthand import X" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "def resectioning_factor(\n", + " model: SharedNoiseModel,\n", + " key: int,\n", + " calib: Cal3_S2,\n", + " p: Point2,\n", + " P: Point3,\n", + ") -> NonlinearFactor:\n", + "\n", + " def error_func(this: CustomFactor, v: Values, H: list[np.ndarray]) -> np.ndarray:\n", + " pose = v.atPose3(this.keys()[0])\n", + " camera = PinholeCameraCal3_S2(pose, calib)\n", + " if H is None:\n", + " return camera.project(P) - p\n", + " Dpose = np.zeros((2, 6), order=\"F\")\n", + " result = camera.project(P, Dpose) - p\n", + " H[0] = Dpose\n", + " return result\n", + "\n", + " return CustomFactor(model, KeyVector([key]), error_func)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Assumptions:\n", + "- Camera: $f = 1$, Image: $100\\times100$, center: $50, 50.0$\n", + "- Pose (ground truth): $(X_w, -Y_w, -Z_w, [0,0,2.0]^T)$\n", + "- Known landmarks: $(10,10,0), (-10,10,0), (-10,-10,0), (10,-10,0)$\n", + "- Perfect measurements: $(55,45), (45,45), (45,55), (55,55)$\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Final result:\n", + "\n", + "Values with 1 values:\n", + "Value x1: (gtsam::Pose3)\n", + "R: [\n", + "\t1, 0, 0;\n", + "\t0, -1, 0;\n", + "\t0, 0, -1\n", + "]\n", + "t: 0 0 2\n", + "\n" + ] + } + ], + "source": [ + "# Create camera intrinsic parameters\n", + "calibration = Cal3_S2(1, 1, 0, 50, 50)\n", + "\n", + "# 1. create graph\n", + "graph = NonlinearFactorGraph()\n", + "\n", + "# 2. add factors to the graph\n", + "measurement_noise = Diagonal.Sigmas(np.array([0.5, 0.5]))\n", + "graph.add(\n", + " resectioning_factor(\n", + " measurement_noise, X(1), calibration, Point2(55, 45), Point3(10, 10, 0)\n", + " )\n", + ")\n", + "graph.add(\n", + " resectioning_factor(\n", + " measurement_noise, X(1), calibration, Point2(45, 45), Point3(-10, 10, 0)\n", + " )\n", + ")\n", + "graph.add(\n", + " resectioning_factor(\n", + " measurement_noise, X(1), calibration, Point2(45, 55), Point3(-10, -10, 0)\n", + " )\n", + ")\n", + "graph.add(\n", + " resectioning_factor(\n", + " measurement_noise, X(1), calibration, Point2(55, 55), Point3(10, -10, 0)\n", + " )\n", + ")\n", + "\n", + "# 3. Create an initial estimate for the camera pose\n", + "initial: Values = Values()\n", + "initial.insert(X(1), Pose3(Rot3(1, 0, 0, 0, -1, 0, 0, 0, -1), Point3(0, 0, 1)))\n", + "\n", + "# 4. Optimize the graph using Levenberg-Marquardt\n", + "result: Values = LevenbergMarquardtOptimizer(graph, initial).optimize()\n", + "result.print(\"Final result:\\n\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "py312", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/python/gtsam/examples/CameraResectioning.py b/python/gtsam/examples/CameraResectioning.py deleted file mode 100644 index e962b40bb..000000000 --- a/python/gtsam/examples/CameraResectioning.py +++ /dev/null @@ -1,85 +0,0 @@ -# pylint: disable=consider-using-from-import,invalid-name,no-name-in-module,no-member,missing-function-docstring -""" -This is a 1:1 transcription of CameraResectioning.cpp. -""" -import numpy as np -from gtsam import Cal3_S2, CustomFactor, LevenbergMarquardtOptimizer, KeyVector -from gtsam import NonlinearFactor, NonlinearFactorGraph -from gtsam import PinholeCameraCal3_S2, Point2, Point3, Pose3, Rot3, Values -from gtsam.noiseModel import Base as SharedNoiseModel, Diagonal -from gtsam.symbol_shorthand import X - - -def resectioning_factor( - model: SharedNoiseModel, - key: int, - calib: Cal3_S2, - p: Point2, - P: Point3, -) -> NonlinearFactor: - - def error_func(this: CustomFactor, v: Values, H: list[np.ndarray]) -> np.ndarray: - pose = v.atPose3(this.keys()[0]) - camera = PinholeCameraCal3_S2(pose, calib) - if H is None: - return camera.project(P) - p - Dpose = np.zeros((2, 6), order="F") - Dpoint = np.zeros((2, 3), order="F") - Dcal = np.zeros((2, 5), order="F") - result = camera.project(P, Dpose, Dpoint, Dcal) - p - H[0] = Dpose - return result - - return CustomFactor(model, KeyVector([key]), error_func) - - -def main() -> None: - """ - Camera: f = 1, Image: 100x100, center: 50, 50.0 - Pose (ground truth): (Xw, -Yw, -Zw, [0,0,2.0]') - Known landmarks: - 3D Points: (10,10,0) (-10,10,0) (-10,-10,0) (10,-10,0) - Perfect measurements: - 2D Point: (55,45) (45,45) (45,55) (55,55) - """ - - # read camera intrinsic parameters - calib = Cal3_S2(1, 1, 0, 50, 50) - - # 1. create graph - graph = NonlinearFactorGraph() - - # 2. add factors to the graph - measurement_noise = Diagonal.Sigmas(np.array([0.5, 0.5])) - graph.add( - resectioning_factor( - measurement_noise, X(1), calib, Point2(55, 45), Point3(10, 10, 0) - ) - ) - graph.add( - resectioning_factor( - measurement_noise, X(1), calib, Point2(45, 45), Point3(-10, 10, 0) - ) - ) - graph.add( - resectioning_factor( - measurement_noise, X(1), calib, Point2(45, 55), Point3(-10, -10, 0) - ) - ) - graph.add( - resectioning_factor( - measurement_noise, X(1), calib, Point2(55, 55), Point3(10, -10, 0) - ) - ) - - # 3. Create an initial estimate for the camera pose - initial: Values = Values() - initial.insert(X(1), Pose3(Rot3(1, 0, 0, 0, -1, 0, 0, 0, -1), Point3(0, 0, 1))) - - # 4. Optimize the graph using Levenberg-Marquardt - result: Values = LevenbergMarquardtOptimizer(graph, initial).optimize() - result.print("Final result:\n") - - -if __name__ == "__main__": - main() diff --git a/python/gtsam/examples/EqF.ipynb b/python/gtsam/examples/EqF.ipynb index 33d7f74da..59ee2c525 100644 --- a/python/gtsam/examples/EqF.ipynb +++ b/python/gtsam/examples/EqF.ipynb @@ -8,16 +8,36 @@ "\n", "Implementing the example in [Fornasier et al, 2022, Overcoming Bias: Equivariant Filter Design for Biased Attitude Estimation with Online Calibration](https://arxiv.org/pdf/2209.12038).\n", "\n", - "This notebook uses Alessandro Fornasier's equivariant filter code (https://github.com/aau-cns/ABC-EqF) converted to use GTSAM's libraries.\n", - "Authors: Jennifer Oum & Darshan Rajasekaran\n", + "This notebook uses [Alessandro Fornasier's equivariant filter code](https://github.com/aau-cns/ABC-EqF) converted to use GTSAM's libraries.\n", "\n", - "We start by installing gtsam (GTSAM's python wrapper) and gtbook." + "Authors: Jennifer Oum & Darshan Rajasekaran" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [ + "remove-cell" + ] + }, + "source": [ + "GTSAM Copyright 2010-2022, Georgia Tech Research Corporation,\n", + "Atlanta, Georgia 30332-0415\n", + "All Rights Reserved\n", + "\n", + "Authors: Frank Dellaert, et al. (see THANKS for the full author list)\n", + "\n", + "See LICENSE for the license information" ] }, { "cell_type": "code", - "execution_count": 68, - "metadata": {}, + "execution_count": null, + "metadata": { + "tags": [ + "remove-cell" + ] + }, "outputs": [ { "name": "stdout", @@ -28,12 +48,13 @@ } ], "source": [ + "# We start by installing gtsam (GTSAM's python wrapper) and gtbook.\n", "%pip install --quiet gtsam gtbook" ] }, { "cell_type": "code", - "execution_count": 69, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -47,7 +68,7 @@ "import gtsam\n", "from gtsam import findExampleDataFile, Rot3\n", "\n", - "from EqF import *\n" + "from EqF import *" ] }, { diff --git a/python/gtsam/examples/RangeISAMExample_plaza2.ipynb b/python/gtsam/examples/RangeISAMExample_plaza2.ipynb index f11636606..d8b5ff4eb 100644 --- a/python/gtsam/examples/RangeISAMExample_plaza2.ipynb +++ b/python/gtsam/examples/RangeISAMExample_plaza2.ipynb @@ -3,6 +3,21 @@ { "cell_type": "markdown", "metadata": {}, + "source": [ + "# Range SLAM with iSAM\n", + "\n", + "A 2D Range SLAM example, with iSAM and smart range factors\n", + "\n", + "Author: Frank Dellaert" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [ + "remove-cell" + ] + }, "source": [ "GTSAM Copyright 2010-2022, Georgia Tech Research Corporation,\n", "Atlanta, Georgia 30332-0415\n", @@ -10,11 +25,30 @@ "\n", "Authors: Frank Dellaert, et al. (see THANKS for the full author list)\n", "\n", - "See LICENSE for the license information\n", - "\n", - "A 2D Range SLAM example, with iSAM and smart range factors\n", - "\n", - "Author: Frank Dellaert" + "See LICENSE for the license information" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "remove-cell" + ], + "vscode": { + "languageId": "markdown" + } + }, + "outputs": [], + "source": [ + "%pip install --quiet gtsam-develop" ] }, { From 02150a2f9009ecdbaa8ee8146d1617e080df24de Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 6 Apr 2025 14:08:45 -0400 Subject: [PATCH 17/21] Moved stuff to notebook, Jacobian guidance --- gtsam/nonlinear/doc/CustomFactor.ipynb | 144 ++++++++++++++++++++++--- python/CustomFactors.md | 112 +------------------ 2 files changed, 133 insertions(+), 123 deletions(-) diff --git a/gtsam/nonlinear/doc/CustomFactor.ipynb b/gtsam/nonlinear/doc/CustomFactor.ipynb index 19adf335f..577efae07 100644 --- a/gtsam/nonlinear/doc/CustomFactor.ipynb +++ b/gtsam/nonlinear/doc/CustomFactor.ipynb @@ -18,7 +18,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "5ccb48e4", "metadata": { "tags": [ @@ -28,7 +28,17 @@ "languageId": "markdown" } }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[31mERROR: Could not find a version that satisfies the requirement gtsam-develop (from versions: none)\u001b[0m\u001b[31m\n", + "\u001b[0m\u001b[31mERROR: No matching distribution found for gtsam-develop\u001b[0m\u001b[31m\n", + "\u001b[0mNote: you may need to restart the kernel to use updated packages.\n" + ] + } + ], "source": [ "%pip install --quiet gtsam-develop" ] @@ -54,24 +64,58 @@ "\n", "The function will be passed a reference to the factor itself so the keys can be accessed, a `Values` reference, and a writeable vector of Jacobians.\n", "\n", - "## Usage\n", + "## Usage in Python\n", "\n", - "To use `CustomFactor`, users must:\n", + "In order to use a Python-based factor, one needs to have a Python function with the following signature:\n", "\n", + "```python\n", + "def error_func(this: gtsam.CustomFactor, v: gtsam.Values, H: list[np.ndarray]) -> np.ndarray:\n", + " ...\n", + "```\n", + "\n", + "**Explanation**:\n", + "- `this` is a reference to the `CustomFactor` object. This is required because one can reuse the same `error_func` for multiple factors. `v` is a reference to the current set of values, and `H` is a list of *references* to the list of required Jacobians (see the corresponding C++ documentation). \n", + "- the error returned must be a 1D `numpy` array.\n", + "- If `H` is `None`, it means the current factor evaluation does not need Jacobians. For example, the `error`\n", + "method on a factor does not need Jacobians, so we don't evaluate them to save CPU. If `H` is not `None`,\n", + "each entry of `H` can be assigned a (2D) `numpy` array, as the Jacobian for the corresponding variable.\n", + "- All `numpy` matrices inside should be using `order=\"F\"` to maintain interoperability with C++.\n", + "\n", + "After defining `error_func`, one can create a `CustomFactor` just like any other factor in GTSAM. In summary, to use `CustomFactor`, users must:\n", "1. Define the custom error function that models the specific measurement or constraint.\n", "2. Implement the calculation of the Jacobian matrix for the error function.\n", "3. Define a noise model of the appropriate dimension.\n", "3. Add the `CustomFactor` to a factor graph, specifying\n", " - the noise model\n", " - the keys of the variables it depends on\n", - " - the error function\n", + " - the error function" + ] + }, + { + "cell_type": "markdown", + "id": "c7ec3512", + "metadata": {}, + "source": [ + "**Notes**:\n", + "- There are not a lot of restrictions on the function, but note there is overhead in calling a python function from within a c++ optimization loop. \n", + "- Because `pybind11` needs to lock the Python GIL lock for evaluation of each factor, parallel evaluation of `CustomFactor` is not possible.\n", + "- You can mitigate both of these by having a python function that leverages batching of measurements.\n", "\n", + "Some more examples of usage in python are given in [test_custom_factor.py](https://github.com/borglab/gtsam/blob/develop/python/gtsam/tests/test_custom_factor.py),[CustomFactorExample.py](https://github.com/borglab/gtsam/blob/develop/python/gtsam/examples/CustomFactorExample.py), and [CameraResectioning.py](https://github.com/borglab/gtsam/blob/develop/python/gtsam/examples/CameraResectioning.py)." + ] + }, + { + "cell_type": "markdown", + "id": "68a66627", + "metadata": {}, + "source": [ + "## Example\n", "Below is a simple example that mimics a `BetweenFactor`." ] }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 2, "id": "894bfaf2", "metadata": {}, "outputs": [ @@ -89,9 +133,9 @@ "import numpy as np\n", "from gtsam import CustomFactor, noiseModel, Values, Pose2\n", "\n", - "measurement = Pose2(2, 2, np.pi / 2)\n", + "measurement = Pose2(2, 2, np.pi / 2) # is used to create the error function\n", "\n", - "def error_func(this: CustomFactor, v: Values, H: list[np.ndarray]):\n", + "def error_func(this: CustomFactor, v: Values, H: list[np.ndarray]=None):\n", " \"\"\"\n", " Error function that mimics a BetweenFactor\n", " :param this: reference to the current CustomFactor being evaluated\n", @@ -126,7 +170,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 3, "id": "c92caf2c", "metadata": {}, "outputs": [ @@ -163,12 +207,88 @@ }, { "cell_type": "markdown", - "id": "38c04012", + "id": "d9b61f83", "metadata": {}, "source": [ - "Note: there are not a lot of restrictions on the function, but note there is overhead in calling a python function from within a c++ optimization loop. You can mitigate this by having a python function that leverages batching of measurements.\n", + "## Beware of Jacobians!\n", "\n", - "Some more examples of usage in python are given in [test_custom_factor.py](https://github.com/borglab/gtsam/blob/develop/python/gtsam/tests/test_custom_factor.py) and [CustomFactorExample.py](https://github.com/borglab/gtsam/blob/develop/python/gtsam/examples/CustomFactorExample.py)." + "It is important to unit-test the Jacobians you provide, because the convention used in GTSAM frequently leads to confusion. In particular, GTSAM updates variables using an exponential map *on the right*. In particular, for a variable $x\\in G$, an n-dimensional Lie group, the Jacobian $H_a$ at $x=a$ is defined as the linear map satisfying\n", + "$$\n", + "\\lim_{\\xi\\rightarrow0}\\frac{\\left|f(a)+H_a\\xi-f\\left(a \\, \\text{Exp}(\\xi)\\right)\\right|}{\\left|\\xi\\right|}=0,\n", + "$$\n", + "where $\\xi$ is a n-vector corresponding to an element in the Lie algebra $\\mathfrak{g}$, and $\\text{Exp}(\\xi)\\doteq\\exp(\\xi^{\\wedge})$, with $\\exp$ the exponential map from $\\mathfrak{g}$ back to $G$. The same holds for n-dimensional manifold $M$, in which case we use a suitable retraction instead of the exponential map. More details and examples can be found in [doc/math.pdf](https://github.com/borglab/gtsam/blob/develop/gtsam/doc/math.pdf).\n", + "\n", + "To test your Jacobians, you can use the handy `gtsam.utils.numerical_derivative` module. We give an example below:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "c815269f", + "metadata": {}, + "outputs": [], + "source": [ + "from gtsam.utils.numerical_derivative import numericalDerivative21, numericalDerivative22\n", + "\n", + "# Allocate the Jacobians and call error_func\n", + "H = [np.empty((6, 6), order='F'),np.empty((6, 6), order='F')]\n", + "error_func(custom_factor, values, H)\n", + "\n", + "# We use error_func directly, so we need to create a binary function constructing the values.\n", + "def f (T1, T2):\n", + " v = Values()\n", + " v.insert(66, T1)\n", + " v.insert(77, T2)\n", + " return error_func(custom_factor, v)\n", + "numerical0 = numericalDerivative21(f, values.atPose2(66), values.atPose2(77))\n", + "numerical1 = numericalDerivative22(f, values.atPose2(66), values.atPose2(77))\n", + "\n", + "# Check the numerical derivatives against the analytical ones\n", + "np.testing.assert_allclose(H[0], numerical0, rtol=1e-5, atol=1e-8)\n", + "np.testing.assert_allclose(H[1], numerical1, rtol=1e-5, atol=1e-8)" + ] + }, + { + "cell_type": "markdown", + "id": "fd09b0fc", + "metadata": {}, + "source": [ + "## Implementation Notes\n", + "\n", + "`CustomFactor` is a `NonlinearFactor` that has a `std::function` as its callback.\n", + "This callback can be translated to a Python function call, thanks to `pybind11`'s functional support.\n", + "\n", + "The constructor of `CustomFactor` is\n", + "```c++\n", + "/**\n", + "* Constructor\n", + "* @param noiseModel shared pointer to noise model\n", + "* @param keys keys of the variables\n", + "* @param errorFunction the error functional\n", + "*/\n", + "CustomFactor(const SharedNoiseModel& noiseModel, const KeyVector& keys, const CustomErrorFunction& errorFunction) :\n", + " Base(noiseModel, keys) {\n", + " this->error_function_ = errorFunction;\n", + "}\n", + "```\n", + "\n", + "At construction time, `pybind11` will pass the handle to the Python callback function as a `std::function` object.\n", + "\n", + "Something that deserves a special mention is this:\n", + "```c++\n", + "/*\n", + " * NOTE\n", + " * ==========\n", + " * pybind11 will invoke a copy if this is `JacobianVector &`,\n", + " * and modifications in Python will not be reflected.\n", + " *\n", + " * This is safe because this is passing a const pointer, \n", + " * and pybind11 will maintain the `std::vector` memory layout.\n", + " * Thus the pointer will never be invalidated.\n", + " */\n", + "using CustomErrorFunction = std::function;\n", + "```\n", + "which is not documented in `pybind11` docs. One needs to be aware of this if they wanted to implement similar \"mutable\" arguments going across the Python-C++ boundary.\n" ] } ], diff --git a/python/CustomFactors.md b/python/CustomFactors.md index 0a387bb4f..84b13bdfe 100644 --- a/python/CustomFactors.md +++ b/python/CustomFactors.md @@ -1,114 +1,4 @@ # GTSAM Python-based factors -One now can build factors purely in Python using the `CustomFactor` factor. +One now can build factors purely in Python using the `CustomFactor` factor. See [this notebook](../gtsam/nonlinear/doc/CustomFactor.ipynb) for usage. -## Usage - -In order to use a Python-based factor, one needs to have a Python function with the following signature: - -```python -import gtsam -import numpy as np -from typing import List - -def error_func(this: gtsam.CustomFactor, v: gtsam.Values, H: List[np.ndarray]) -> np.ndarray: - ... -``` - -`this` is a reference to the `CustomFactor` object. This is required because one can reuse the same -`error_func` for multiple factors. `v` is a reference to the current set of values, and `H` is a list of -**references** to the list of required Jacobians (see the corresponding C++ documentation). Note that -the error returned must be a 1D `numpy` array. - -If `H` is `None`, it means the current factor evaluation does not need Jacobians. For example, the `error` -method on a factor does not need Jacobians, so we don't evaluate them to save CPU. If `H` is not `None`, -each entry of `H` can be assigned a (2D) `numpy` array, as the Jacobian for the corresponding variable. - -All `numpy` matrices inside should be using `order="F"` to maintain interoperability with C++. - -After defining `error_func`, one can create a `CustomFactor` just like any other factor in GTSAM: - -```python -noise_model = gtsam.noiseModel.Unit.Create(3) -# constructor(, , ) -cf = gtsam.CustomFactor(noise_model, [X(0), X(1)], error_func) -``` - -## Example - -The following is a simple `BetweenFactor` implemented in Python. - -```python -import gtsam -import numpy as np -from typing import List - -expected = Pose2(2, 2, np.pi / 2) - -def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]) -> np.ndarray: - """ - Error function that mimics a BetweenFactor - :param this: reference to the current CustomFactor being evaluated - :param v: Values object - :param H: list of references to the Jacobian arrays - :return: the non-linear error - """ - key0 = this.keys()[0] - key1 = this.keys()[1] - gT1, gT2 = v.atPose2(key0), v.atPose2(key1) - error = expected.localCoordinates(gT1.between(gT2)) - - if H is not None: - result = gT1.between(gT2) - H[0] = -result.inverse().AdjointMap() - H[1] = np.eye(3) - return error - -noise_model = gtsam.noiseModel.Unit.Create(3) -cf = gtsam.CustomFactor(noise_model, gtsam.KeyVector([0, 1]), error_func) -``` - -In general, the Python-based factor works just like their C++ counterparts. - -## Known Issues - -Because of the `pybind11`-based translation, the performance of `CustomFactor` is not guaranteed. -Also, because `pybind11` needs to lock the Python GIL lock for evaluation of each factor, parallel -evaluation of `CustomFactor` is not possible. - -## Implementation - -`CustomFactor` is a `NonlinearFactor` that has a `std::function` as its callback. -This callback can be translated to a Python function call, thanks to `pybind11`'s functional support. - -The constructor of `CustomFactor` is -```c++ -/** -* Constructor -* @param noiseModel shared pointer to noise model -* @param keys keys of the variables -* @param errorFunction the error functional -*/ -CustomFactor(const SharedNoiseModel& noiseModel, const KeyVector& keys, const CustomErrorFunction& errorFunction) : - Base(noiseModel, keys) { - this->error_function_ = errorFunction; -} -``` - -At construction time, `pybind11` will pass the handle to the Python callback function as a `std::function` object. - -Something worth special mention is this: -```c++ -/* - * NOTE - * ========== - * pybind11 will invoke a copy if this is `JacobianVector &`, and modifications in Python will not be reflected. - * - * This is safe because this is passing a const pointer, and pybind11 will maintain the `std::vector` memory layout. - * Thus the pointer will never be invalidated. - */ -using CustomErrorFunction = std::function; -``` - -which is not documented in `pybind11` docs. One needs to be aware of this if they wanted to implement similar -"mutable" arguments going across the Python-C++ boundary. From d31ab0f8f6e5dd08af7ba635c04cf127de67214a Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 6 Apr 2025 14:49:06 -0400 Subject: [PATCH 18/21] Move and update expressions.md from BitBucket --- doc/expressions.md | 113 +++++++++++++++++++++++++++++++++++++++++++++ myst.yml | 1 + 2 files changed, 114 insertions(+) create mode 100644 doc/expressions.md diff --git a/doc/expressions.md b/doc/expressions.md new file mode 100644 index 000000000..6e5518f70 --- /dev/null +++ b/doc/expressions.md @@ -0,0 +1,113 @@ +# Expressions +## Motivation +GTSAM is an optimization library for objective functions expressed as a factor graph over a set of unknown variables. In the continuous case, the variables are typically vectors or elements on a manifold (such as the 3D rotation manifold). The factors compute vector-valued errors that need to be minimized, and are typically only connected to a handful of unknowns. + +In the continuous case, the main optimization methods we have implemented are variants of Gauss-Newton non-linear optimization or conjugate gradient methods. Let us assume there are m factors over n unknowns. For either optimization method, we need to evaluate the sparse Jacobian matrix of the entire factor graph, which is a sparse block-matrix of m block-rows and n-block columns. + +The sparse Jacobian is built up factor by factor, corresponding to the block-rows. A typical non-linear least-square term is $|h(x)-z|^2$ where $h(x)$ is a measurement function, which we need to be able to linearize as +$$ +h(x) \approx h(x_0+dx)+H(x_0)dx +$$ +Note the above is for vector unknowns, for Lie groups and manifold variables, see [doc/math.pdf](https://github.com/borglab/gtsam/blob/develop/doc/math.pdf) for details. + +## Expressions +In many cases one can use GTSAM 4 Expressions to implement factors. Expressions are objects of type Expression, and there are three main expression flavors: + +- constants, e.g., `Expression kExpr(Point2(3,4))` +- unknowns, e.g., `Expression pExpr(123)` where 123 is a key. +- functions, e.g., `Expression sumExpr(h, kexpr, pExpr)` + +The latter case is an example of wrapping a binary measurement function `h`. To be able to wrap `h`, it needs to be able to compute its local derivatives, i.e., it has to have the signature +```c++ +double h(const Point2& a, const Point3& b, + OptionalJacobian<1, 2> Ha, OptionalJacobian<1, 3> Hb) +``` +In this case the output type 'T' is 'double', the two arguments have type Point2 and Point3 respectively, and the two remaining arguments provide a way to compute the function Jacobians, if needed. The templated type `OptionalJacobian` behaves very much like `std::optional`. If an actual matrix is passed in, the function is expected to treat it as an output argument in which to write the Jacobian for the result wrp. the corresponding input argument. *The matrix to write in will be allocated before the call.* + +Expression constructors exist for both methods and functions with different arities. Note that an expression is templated with the output type T, not with the argument types. However, the constructor will infer the argument types from inspecting the signature of the function f, and will in this example expect two additional arguments of type Expression and Expression, respectively. + +As an example, here is the constructor declaration for wrapping unary functions: +```c++ +template +Expression(typename UnaryFunction::type function, + const Expression& expression); +``` +where (in this case) the function type is defined by +```c++ +template +struct UnaryFunction { +typedef boost::function< + T(const A1&, typename MakeOptionalJacobian::type)> type; +}; +``` +## Some measurement function examples +An example of a simple unary function is `gtsam::norm3` in [Point3.cpp](https://github.com/borglab/gtsam/blob/develop/gtsam/geometry/Point3.cpp#L41): +```c++ +double norm3(const Point3 & p, OptionalJacobian<1, 3> H = {}) { + double r = sqrt(p.x() * p.x() + p.y() * p.y() + p.z() * p.z()); + if (H) *H << p.x() / r, p.y() / r, p.z() / r; + return r; +} +``` +The key new concept here is OptionalJacobian, which acts like a std::optional: if it evaluates to true, you should write the Jacobian of the function in it. It acts as a fixed-size Eigen matrix. + +As we said above, expressions also support binary functions, ternary functions, and methods. An example of a binary function is 'Point3::cross': + +```c++ +Point3 cross(const Point3 &p, const Point3 & q, + OptionalJacobian<3, 3> H1 = {}, OptionalJacobian<3, 3> H2 = {}) { + if (H1) *H1 << skewSymmetric(-q.x(), -q.y(), -q.z()); + if (H2) *H2 << skewSymmetric(p.x(), p.y(), p.z()); + return Point3(p.y() * q.z() - p.z() * q.y(), p.z() * q.x() - p.x() * q.z(), p.x() * q.y() - p.y() * q.x()); +} +``` +Example of using cross: +```c++ +using namespace gtsam; +Matrix3 H1, H2; +Point3 p(1,2,3), q(4,5,6), r = cross(p,q,H1,H2); +``` +## Using Expressions for Inference +The way expressions are used is by creating unknown Expressions for the unknown variables we are optimizing for: +```c++ +Expression x(‘x’,1); +auto h = Expression(& norm3, x); +``` +For convenient creation of factors with expressions, we provide a new factor graph type `ExpressionFactorGraph`, which is just a `NonlinearFactorGraph` with an extra method addExpressionFactor(h, z, n) that takes a measurement expression h, an actual measurement z, and a measurement noise model R. With this, we can add a GTSAM nonlinear factor $|h(x)-z|^2$ to a `NonlinearFactorGraph` by +```c++ +graph.addExpressionFactor(h, z, R) +``` +In the above, the unknown in the example can be retrieved by the `gtsam::Symbol(‘x’,1)`, which evaluates to a uint64 identifier. + +## Composing Expressions +The key coolness behind expressions, however, is that you can compose them into expression trees, as long as the leaves know how to do their own derivatives: +```c++ +Expression x1(‘x’1), x2(‘x’,2); +auto h = Expression(& cross, x1, x2); +auto g = Expression(& norm3, h); +``` +Because we typedef Point3_ to Expression, we can write this very concisely as +```c++ +auto g = Point3_(& norm3, Point3_(& cross, x1(‘x’1), x2(‘x’,2))); +``` +## PoseSLAM Example +Using expressions, it is simple to quickly create a factor graph corresponding to a PoseSLAM problem, where our only measurements are relative poses between a series of unknown 2D or 3D poses. The following code snippet from [Pose2SLAMExampleExpressions.cpp](https://github.com/borglab/gtsam/blob/develop/examples/Pose2SLAMExampleExpressions.cpp) is used to create a simple Pose2 example (where the robot is moving on a plane): +```c++ +1 ExpressionFactorGraph graph; +2 Expression x1(1), x2(2), x3(3), x4(4), x5(5); +3 graph.addExpressionFactor(x1, Pose2(0, 0, 0), priorNoise); +4 graph.addExpressionFactor(between(x1,x2), Pose2(2, 0, 0 ), model); +5 graph.addExpressionFactor(between(x2,x3), Pose2(2, 0, M_PI_2), model); +6 graph.addExpressionFactor(between(x3,x4), Pose2(2, 0, M_PI_2), model); +7 graph.addExpressionFactor(between(x4,x5), Pose2(2, 0, M_PI_2), model); +8 graph.addExpressionFactor(between(x5,x2), Pose2(2, 0, M_PI_2), model); +``` +This is what is going on: +- In line 1, we create an empty factor graph. +- In line 2 we create the 5 unknown poses, of type `Expression`, with keys 1 to 5. These are what we will optimize over. +- Line 3 then creates a simple factor that gives a prior on `x1` (the first argument), namely that it is at the origin `Pose2(0, 0, 0)` (the second argument), with a particular probability density given by `priorNoise` (the third argument). +- Lines 4-7 adds factors for the odometry constraints, i.e., the movement between successive poses of the robot. The function `between(t1,t2)` is implemented in [nonlinear/expressions.h](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/expressions.h) and is equivalent to calling the constructor Expression(traits::Between, t1, t2). +- Finally, line 8 creates a loop closure constraint between poses x2 and x5. + +Another good example of its use is in +[SFMExampleExpressions.cpp](https://github.com/borglab/gtsam/blob/develop/examples/SFMExampleExpressions.cpp). \ No newline at end of file diff --git a/myst.yml b/myst.yml index 21df32d84..6f960a4e5 100644 --- a/myst.yml +++ b/myst.yml @@ -19,6 +19,7 @@ project: - file: ./doc/examples.md children: - pattern: ./python/gtsam/examples/*.ipynb + - file: ./doc/expressions.md site: nav: - title: Getting started From 0e68480e327677795b71c1c0113e18659ac450e5 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 6 Apr 2025 15:34:53 -0400 Subject: [PATCH 19/21] Reviewed 3 more --- gtsam/nonlinear/doc/CustomFactor.ipynb | 6 +- gtsam/nonlinear/doc/ExpressionFactor.ipynb | 67 +++++-------- .../nonlinear/doc/ExpressionFactorGraph.ipynb | 58 ++++------- .../nonlinear/doc/ExtendedKalmanFilter.ipynb | 99 ++++++++++--------- 4 files changed, 95 insertions(+), 135 deletions(-) diff --git a/gtsam/nonlinear/doc/CustomFactor.ipynb b/gtsam/nonlinear/doc/CustomFactor.ipynb index 577efae07..beb2961bb 100644 --- a/gtsam/nonlinear/doc/CustomFactor.ipynb +++ b/gtsam/nonlinear/doc/CustomFactor.ipynb @@ -57,7 +57,7 @@ "\n", "The `CustomFactor` class allows users to define a custom error function. In C++ it is defined as below:\n", "\n", - "```c++\n", + "```cpp\n", "using JacobianVector = std::vector;\n", "using CustomErrorFunction = std::function;\n", "```\n", @@ -259,7 +259,7 @@ "This callback can be translated to a Python function call, thanks to `pybind11`'s functional support.\n", "\n", "The constructor of `CustomFactor` is\n", - "```c++\n", + "```cpp\n", "/**\n", "* Constructor\n", "* @param noiseModel shared pointer to noise model\n", @@ -275,7 +275,7 @@ "At construction time, `pybind11` will pass the handle to the Python callback function as a `std::function` object.\n", "\n", "Something that deserves a special mention is this:\n", - "```c++\n", + "```cpp\n", "/*\n", " * NOTE\n", " * ==========\n", diff --git a/gtsam/nonlinear/doc/ExpressionFactor.ipynb b/gtsam/nonlinear/doc/ExpressionFactor.ipynb index 79a7b9018..1b4f850ab 100644 --- a/gtsam/nonlinear/doc/ExpressionFactor.ipynb +++ b/gtsam/nonlinear/doc/ExpressionFactor.ipynb @@ -5,63 +5,42 @@ "id": "59407eaf", "metadata": {}, "source": [ - "# ExpressionFactor Class Documentation\n", - "\n", - "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "# ExpressionFactor\n", "\n", "## Overview\n", "\n", - "The `ExpressionFactor` class in GTSAM is a template class designed to work with factor graphs in the context of nonlinear optimization. It represents a factor that can be constructed from an expression, allowing for flexible and efficient computation of error terms in optimization problems.\n", + "The `ExpressionFactor` class in GTSAM is a template class designed to work with factor graphs in the context of nonlinear optimization. It represents a factor that can be constructed from a [GTSAM expression](../../../doc/expressions.md), allowing for flexible and efficient computation of error terms in optimization problems.\n", "\n", - "## Key Features\n", - "\n", - "- **Expression-Based Factor**: The `ExpressionFactor` class allows users to define factors based on expressions, which can represent complex mathematical relationships between variables.\n", - "- **Error Calculation**: It computes the error based on the difference between the predicted and observed values, typically used in least-squares optimization.\n", - "- **Jacobian Computation**: The class can compute the Jacobian matrix, which is essential for gradient-based optimization methods.\n", + "The `ExpressionFactor` class allows users to define factors based on expressions in C++, that use (reverse) automatic differentiation to compute their Jacobians.\n", "\n", "## Main Methods\n", "\n", "### Constructor\n", "\n", - "The `ExpressionFactor` class provides constructors that allow for the initialization of the factor with a specific expression and measurement. The constructors are designed to handle various types of expressions and measurements, making the class versatile for different applications.\n", + "The `ExpressionFactor` class provides a constructor that allows for the initialization of the factor with a specific expression and measurement:\n", "\n", - "### `evaluateError`\n", - "\n", - "This method calculates the error vector for the factor. The error is typically defined as the difference between the predicted value from the expression and the actual measurement. Mathematically, this can be represented as:\n", - "\n", - "$$\n", - "\\text{error} = \\text{measurement} - \\text{expression}\n", - "$$\n", - "\n", - "where `measurement` is the observed value, and `expression` is the predicted value based on the current estimate of the variables.\n", - "\n", - "### `linearize`\n", - "\n", - "The `linearize` method is used to linearize the factor around a given linearization point. This involves computing the Jacobian matrix, which represents the partial derivatives of the error with respect to the variables. The Jacobian is crucial for iterative optimization algorithms such as Gauss-Newton or Levenberg-Marquardt.\n", - "\n", - "### `clone`\n", - "\n", - "The `clone` method creates a deep copy of the factor. This is useful when factors need to be duplicated, ensuring that changes to one copy do not affect the other.\n", - "\n", - "## Mathematical Background\n", - "\n", - "The `ExpressionFactor` class is grounded in the principles of nonlinear optimization, particularly in the context of factor graphs. Factor graphs are bipartite graphs used to represent the factorization of a function, often used in probabilistic graphical models and optimization problems.\n", - "\n", - "In the context of GTSAM, factors represent constraints or relationships between variables. The `ExpressionFactor` allows these relationships to be defined using mathematical expressions, providing a flexible and powerful tool for modeling complex systems.\n", - "\n", - "## Usage\n", - "\n", - "The `ExpressionFactor` class is typically used in scenarios where the relationships between variables can be naturally expressed as mathematical expressions. This includes applications in robotics, computer vision, and other fields where optimization problems are prevalent.\n", - "\n", - "By leveraging the power of expressions, users can define custom factors that capture the nuances of their specific problem, leading to more accurate and efficient optimization solutions.\n", - "\n", - "---\n", - "\n", - "This documentation provides a high-level overview of the `ExpressionFactor` class, highlighting its main features and methods. For detailed usage and examples, users should refer to the GTSAM library documentation and source code." + "```cpp\n", + " /**\n", + " * Constructor: creates a factor from a measurement and measurement function\n", + " * @param noiseModel the noise model associated with a measurement\n", + " * @param measurement actual value of the measurement, of type T\n", + " * @param expression predicts the measurement from Values\n", + " * The keys associated with the factor, returned by keys(), are sorted.\n", + " */\n", + " ExpressionFactor(const SharedNoiseModel& noiseModel, //\n", + " const T& measurement, const Expression& expression)\n", + " : NoiseModelFactor(noiseModel), measured_(measurement) {\n", + " initialize(expression);\n", + " }\n", + "```" ] } ], - "metadata": {}, + "metadata": { + "language_info": { + "name": "python" + } + }, "nbformat": 4, "nbformat_minor": 5 } diff --git a/gtsam/nonlinear/doc/ExpressionFactorGraph.ipynb b/gtsam/nonlinear/doc/ExpressionFactorGraph.ipynb index 540dc1082..1c22e1315 100644 --- a/gtsam/nonlinear/doc/ExpressionFactorGraph.ipynb +++ b/gtsam/nonlinear/doc/ExpressionFactorGraph.ipynb @@ -5,55 +5,31 @@ "id": "a1c00a8c", "metadata": {}, "source": [ - "# ExpressionFactorGraph Class Documentation\n", - "\n", - "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "# ExpressionFactorGraph \n", "\n", "## Overview\n", "\n", - "The `ExpressionFactorGraph` class in GTSAM is a specialized factor graph designed to work with expressions. It extends the capabilities of a standard factor graph by allowing the incorporation of symbolic expressions, which can be particularly useful in applications requiring symbolic computation and automatic differentiation.\n", + "The `ExpressionFactorGraph` class in GTSAM is a specialized factor graph designed to work with expressions. It extends the capabilities of a standard factor graph by allowing factors created from [GTSAM expressions](../../../doc/expressions.md), that implement automatic differentiation. It creates [ExpressionFactors](ExpressionFactor.ipynb).\n", "\n", - "## Key Features\n", + "### Adding Expression Factors\n", "\n", - "- **Expression Handling**: The class allows for the creation and manipulation of factors that are expressed symbolically. This can be advantageous in scenarios where the relationships between variables are best described using mathematical expressions.\n", - "\n", - "- **Automatic Differentiation**: By leveraging expressions, the class supports automatic differentiation, which is crucial for optimizing complex systems where derivatives are needed.\n", - "\n", - "- **Integration with GTSAM**: As part of the GTSAM library, `ExpressionFactorGraph` seamlessly integrates with other components, allowing for robust and efficient factor graph optimization.\n", - "\n", - "## Main Methods\n", - "\n", - "### Adding Factors\n", - "\n", - "- **addExpressionFactor**: This method allows the user to add a new factor to the graph based on a symbolic expression. The expression defines the relationship between the variables involved in the factor.\n", - "\n", - "### Graph Operations\n", - "\n", - "- **update**: This method updates the factor graph with new information. It recalculates the necessary components to ensure that the graph remains consistent with the added expressions.\n", - "\n", - "- **linearize**: Converts the expression-based factor graph into a linear factor graph. This is a crucial step for optimization, as many algorithms operate on linear approximations of the problem.\n", - "\n", - "### Optimization\n", - "\n", - "- **optimize**: This method runs the optimization process on the factor graph. It uses the symbolic expressions to guide the optimization, ensuring that the solution respects the relationships defined by the expressions.\n", - "\n", - "## Mathematical Foundations\n", - "\n", - "The `ExpressionFactorGraph` leverages several mathematical concepts to perform its functions:\n", - "\n", - "- **Factor Graphs**: A factor graph is a bipartite graph representing the factorization of a function. In the context of GTSAM, it is used to represent the joint probability distribution of a set of variables.\n", - "\n", - "- **Expressions**: Symbolic expressions are used to define the relationships between variables. These expressions can be differentiated and manipulated symbolically, providing flexibility and power in modeling complex systems.\n", - "\n", - "- **Automatic Differentiation**: This technique is used to compute derivatives of functions defined by expressions. It is essential for optimization algorithms that require gradient information.\n", - "\n", - "## Conclusion\n", - "\n", - "The `ExpressionFactorGraph` class is a powerful tool within the GTSAM library, offering advanced capabilities for working with symbolic expressions in factor graphs. Its integration of automatic differentiation and symbolic computation makes it particularly useful for complex optimization problems where traditional numerical methods may fall short. Users familiar with factor graphs and symbolic mathematics will find this class to be a valuable addition to their toolkit." + "use **addExpressionFactor**: This method allows the user to add a new factor to the graph based on a symbolic expression. The expression defines the relationship between the variables involved in the factor.\n", + "```c++\n", + " template\n", + " void addExpressionFactor(const Expression& h, const T& z,\n", + " const SharedNoiseModel& R) {\n", + " using F = ExpressionFactor;\n", + " push_back(std::allocate_shared(Eigen::aligned_allocator(), R, z, h));\n", + " }\n", + "```" ] } ], - "metadata": {}, + "metadata": { + "language_info": { + "name": "python" + } + }, "nbformat": 4, "nbformat_minor": 5 } diff --git a/gtsam/nonlinear/doc/ExtendedKalmanFilter.ipynb b/gtsam/nonlinear/doc/ExtendedKalmanFilter.ipynb index 9b379152a..bf24e7e10 100644 --- a/gtsam/nonlinear/doc/ExtendedKalmanFilter.ipynb +++ b/gtsam/nonlinear/doc/ExtendedKalmanFilter.ipynb @@ -5,70 +5,75 @@ "id": "93869c17", "metadata": {}, "source": [ - "# ExtendedKalmanFilter Class Documentation\n", - "\n", - "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "# ExtendedKalmanFilter\n", "\n", "## Overview\n", "\n", - "The `ExtendedKalmanFilter` class in GTSAM is a robust implementation of the Extended Kalman Filter (EKF), which is a powerful tool for estimating the state of a nonlinear dynamic system. The EKF extends the capabilities of the traditional Kalman Filter by linearizing about the current mean and covariance, making it suitable for nonlinear systems.\n", + "The `ExtendedKalmanFilter` class in GTSAM is an implementation of the [Extended Kalman Filter (EKF)](https://en.wikipedia.org/wiki/Extended_Kalman_filter), which is a powerful tool for estimating the state of a nonlinear dynamic system.\n", "\n", - "## Key Features\n", + "See also [this notebook](../../../python/gtsam/examples/easyPoint2KalmanFilter.ipynb) for the python version of the C++ example below." + ] + }, + { + "cell_type": "markdown", + "id": "161c36eb", + "metadata": {}, + "source": [ + "## Using the ExtendedKalmanFilter Class\n", "\n", - "- **Nonlinear State Estimation**: The EKF is designed to handle systems where the state transition and observation models are nonlinear.\n", - "- **Predict and Update Cycles**: The class provides mechanisms to predict the future state and update the current state estimate based on new measurements.\n", - "- **Covariance Management**: It maintains and updates the state covariance matrix, which represents the uncertainty of the state estimate.\n", + "The `ExtendedKalmanFilter` class in GTSAM provides a flexible way to implement Kalman filtering using factor graphs. Here's a step-by-step guide based on the example provided in [easyPoint2KalmanFilter.cpp](https://github.com/borglab/gtsam/blob/develop/examples/easyPoint2KalmanFilter.cpp):\n", "\n", - "## Mathematical Foundation\n", + "### Steps to Use the ExtendedKalmanFilter\n", "\n", - "The EKF operates on the principle of linearizing nonlinear functions around the current estimate. The primary equations involved in the EKF are:\n", + "1. **Initialize the Filter**:\n", + " - Define the initial state (e.g., position) and its covariance.\n", + " - Create a key for the initial state.\n", + " - Instantiate the `ExtendedKalmanFilter` object with the initial state and covariance.\n", "\n", - "1. **State Prediction**:\n", - " $$ \\hat{x}_{k|k-1} = f(\\hat{x}_{k-1|k-1}, u_k) $$\n", - " $$ P_{k|k-1} = F_k P_{k-1|k-1} F_k^T + Q_k $$\n", + " ```cpp\n", + " Point2 x_initial(0.0, 0.0);\n", + " SharedDiagonal P_initial = noiseModel::Diagonal::Sigmas(Vector2(0.1, 0.1));\n", + " Symbol x0('x', 0);\n", + " ExtendedKalmanFilter ekf(x0, x_initial, P_initial);\n", + " ```\n", "\n", - "2. **Measurement Update**:\n", - " $$ y_k = z_k - h(\\hat{x}_{k|k-1}) $$\n", - " $$ S_k = H_k P_{k|k-1} H_k^T + R_k $$\n", - " $$ K_k = P_{k|k-1} H_k^T S_k^{-1} $$\n", - " $$ \\hat{x}_{k|k} = \\hat{x}_{k|k-1} + K_k y_k $$\n", - " $$ P_{k|k} = (I - K_k H_k) P_{k|k-1} $$\n", + "2. Predict the Next State:\n", "\n", - "Where:\n", - "- $f$ and $h$ are the nonlinear state transition and measurement functions, respectively.\n", - "- $F_k$ and $H_k$ are the Jacobians of $f$ and $h$.\n", - "- $Q_k$ and $R_k$ are the process and measurement noise covariance matrices.\n", + " - Define the motion model using a BetweenFactor.\n", + " - Predict the next state using the predict method.\n", + " ```cpp\n", + " Symbol x1('x', 1);\n", + " Point2 difference(1, 0);\n", + " SharedDiagonal Q = noiseModel::Diagonal::Sigmas(Vector2(0.1, 0.1), true);\n", + " BetweenFactor factor1(x0, x1, difference, Q);\n", + " Point2 x1_predict = ekf.predict(factor1);\n", + " ```\n", "\n", - "## Key Methods\n", + "3. Update the State with Measurements:\n", + " - Define the measurement model using a PriorFactor.\n", + " - Update the state using the update method.\n", + " ```cpp\n", + " Point2 z1(1.0, 0.0);\n", + " SharedDiagonal R = noiseModel::Diagonal::Sigmas(Vector2(0.25, 0.25), true);\n", + " PriorFactor factor2(x1, z1, R);\n", + " Point2 x1_update = ekf.update(factor2);\n", + " ```\n", + "4. Repeat for Subsequent Time Steps:\n", "\n", - "### Initialization\n", + " - Repeat the prediction and update steps for subsequent states and measurements.\n", "\n", - "- **Constructor**: Initializes the filter with a given initial state and covariance.\n", + "## Example Use Case\n", + "This example demonstrates tracking a moving 2D point using a simple linear motion model and position measurements. The ExtendedKalmanFilter class allows for flexible modeling of both the motion and measurement processes using GTSAM's factor graph framework.\n", "\n", - "### Prediction\n", - "\n", - "- **predict**: Advances the state estimate to the next time step using the state transition model. It computes the predicted state and updates the state covariance matrix.\n", - "\n", - "### Update\n", - "\n", - "- **update**: Incorporates a new measurement into the state estimate. It calculates the innovation, updates the state estimate, and adjusts the covariance matrix accordingly.\n", - "\n", - "### Accessors\n", - "\n", - "- **getState**: Returns the current estimated state.\n", - "- **getCovariance**: Provides the current state covariance matrix, representing the uncertainty of the estimate.\n", - "\n", - "## Usage\n", - "\n", - "The `ExtendedKalmanFilter` class is typically used in a loop where the `predict` method is called to project the state forward in time, and the `update` method is called whenever a new measurement is available. This cycle continues, refining the state estimate and reducing uncertainty over time.\n", - "\n", - "## Conclusion\n", - "\n", - "The `ExtendedKalmanFilter` class in GTSAM is a versatile tool for state estimation in nonlinear systems. By leveraging the power of linearization, it provides accurate and efficient estimation capabilities, making it suitable for a wide range of applications in robotics, navigation, and control systems." + "For the full implementation, see the [easyPoint2KalmanFilter.cpp](https://github.com/borglab/gtsam/blob/develop/examples/easyPoint2KalmanFilter.cpp) file.\n" ] } ], - "metadata": {}, + "metadata": { + "language_info": { + "name": "python" + } + }, "nbformat": 4, "nbformat_minor": 5 } From d3895d6ebb2b1d5149512c35ba4a133f08e0aa3f Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 6 Apr 2025 15:35:51 -0400 Subject: [PATCH 20/21] Moved example notebooks to examples --- python/gtsam/examples/DiscreteBayesTree.ipynb | 1013 +++++++++++++++++ python/gtsam/examples/DiscreteSwitching.ipynb | 805 +++++++++++++ python/gtsam/examples/EqF.ipynb | 12 +- .../easyPoint2KalmanFilter.ipynb | 74 +- .../elaboratePoint2KalmanFilter.ipynb | 53 +- .../gtsam/notebooks/DiscreteBayesTree.ipynb | 200 ---- .../gtsam/notebooks/DiscreteSwitching.ipynb | 155 --- 7 files changed, 1934 insertions(+), 378 deletions(-) create mode 100644 python/gtsam/examples/DiscreteBayesTree.ipynb create mode 100644 python/gtsam/examples/DiscreteSwitching.ipynb rename python/gtsam/{notebooks => examples}/easyPoint2KalmanFilter.ipynb (72%) rename python/gtsam/{notebooks => examples}/elaboratePoint2KalmanFilter.ipynb (84%) delete mode 100644 python/gtsam/notebooks/DiscreteBayesTree.ipynb delete mode 100644 python/gtsam/notebooks/DiscreteSwitching.ipynb diff --git a/python/gtsam/examples/DiscreteBayesTree.ipynb b/python/gtsam/examples/DiscreteBayesTree.ipynb new file mode 100644 index 000000000..f980a22c0 --- /dev/null +++ b/python/gtsam/examples/DiscreteBayesTree.ipynb @@ -0,0 +1,1013 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# The Discrete Bayes Tree\n", + "\n", + "An example of building a Bayes net, then eliminating it into a Bayes tree. Mirrors the code in `testDiscreteBayesTree.cpp` ." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [ + "remove-cell" + ] + }, + "source": [ + "GTSAM Copyright 2010-2022, Georgia Tech Research Corporation,\n", + "Atlanta, Georgia 30332-0415\n", + "All Rights Reserved\n", + "\n", + "Authors: Frank Dellaert, et al. (see THANKS for the full author list)\n", + "\n", + "See LICENSE for the license information" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "remove-cell" + ] + }, + "outputs": [], + "source": [ + "# This needs gtbook:\n", + "% pip install --quiet gtbook" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from gtsam import DiscreteBayesTree, DiscreteBayesNet, DiscreteKeys, DiscreteFactorGraph, Ordering\n", + "from gtsam.symbol_shorthand import S" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "def P(*args):\n", + " \"\"\" Create a DiscreteKeys instances from a variable number of DiscreteKey pairs.\"\"\"\n", + " #TODO: We can make life easier by providing variable argument functions in C++ itself.\n", + " dks = DiscreteKeys()\n", + " for key in args:\n", + " dks.push_back(key)\n", + " return dks" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "import graphviz\n", + "class show(graphviz.Source):\n", + " \"\"\" Display an object with a dot method as a graph.\"\"\"\n", + "\n", + " def __init__(self, obj):\n", + " \"\"\"Construct from object with 'dot' method.\"\"\"\n", + " # This small class takes an object, calls its dot function, and uses the\n", + " # resulting string to initialize a graphviz.Source instance. This in turn\n", + " # has a _repr_mimebundle_ method, which then renders it in the notebook.\n", + " super().__init__(obj.dot())" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "G\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "8->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "8->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "12\n", + "\n", + "12\n", + "\n", + "\n", + "\n", + "12->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "12->0\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "12->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "12->9\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "12->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "12->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "9->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "9->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "10->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "10->5\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "13\n", + "\n", + "13\n", + "\n", + "\n", + "\n", + "13->10\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "13->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "13->5\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "11\n", + "\n", + "\n", + "\n", + "13->11\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "13->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "13->7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "11->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "11->7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "14\n", + "\n", + "14\n", + "\n", + "\n", + "\n", + "14->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "14->12\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "14->9\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "14->10\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "14->13\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "14->11\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "<__main__.show at 0x109c615b0>" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Define DiscreteKey pairs.\n", + "keys = [(j, 2) for j in range(15)]\n", + "\n", + "# Create thin-tree Bayesnet.\n", + "bayesNet = DiscreteBayesNet()\n", + "\n", + "\n", + "bayesNet.add(keys[0], P(keys[8], keys[12]), \"2/3 1/4 3/2 4/1\")\n", + "bayesNet.add(keys[1], P(keys[8], keys[12]), \"4/1 2/3 3/2 1/4\")\n", + "bayesNet.add(keys[2], P(keys[9], keys[12]), \"1/4 8/2 2/3 4/1\")\n", + "bayesNet.add(keys[3], P(keys[9], keys[12]), \"1/4 2/3 3/2 4/1\")\n", + "\n", + "bayesNet.add(keys[4], P(keys[10], keys[13]), \"2/3 1/4 3/2 4/1\")\n", + "bayesNet.add(keys[5], P(keys[10], keys[13]), \"4/1 2/3 3/2 1/4\")\n", + "bayesNet.add(keys[6], P(keys[11], keys[13]), \"1/4 3/2 2/3 4/1\")\n", + "bayesNet.add(keys[7], P(keys[11], keys[13]), \"1/4 2/3 3/2 4/1\")\n", + "\n", + "bayesNet.add(keys[8], P(keys[12], keys[14]), \"T 1/4 3/2 4/1\")\n", + "bayesNet.add(keys[9], P(keys[12], keys[14]), \"4/1 2/3 F 1/4\")\n", + "bayesNet.add(keys[10], P(keys[13], keys[14]), \"1/4 3/2 2/3 4/1\")\n", + "bayesNet.add(keys[11], P(keys[13], keys[14]), \"1/4 2/3 3/2 4/1\")\n", + "\n", + "bayesNet.add(keys[12], P(keys[14]), \"3/1 3/1\")\n", + "bayesNet.add(keys[13], P(keys[14]), \"1/3 3/1\")\n", + "\n", + "bayesNet.add(keys[14], P(), \"1/3\")\n", + "\n", + "show(bayesNet)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "DiscreteValues{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1, 6: 0, 7: 1, 8: 0, 9: 0, 10: 0, 11: 0, 12: 1, 13: 1, 14: 0}\n", + "DiscreteValues{0: 0, 1: 1, 2: 0, 3: 0, 4: 1, 5: 0, 6: 0, 7: 0, 8: 1, 9: 1, 10: 0, 11: 1, 12: 0, 13: 0, 14: 1}\n", + "DiscreteValues{0: 1, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1, 7: 0, 8: 1, 9: 0, 10: 1, 11: 1, 12: 0, 13: 1, 14: 0}\n", + "DiscreteValues{0: 1, 1: 1, 2: 0, 3: 0, 4: 1, 5: 1, 6: 1, 7: 1, 8: 0, 9: 1, 10: 0, 11: 0, 12: 1, 13: 0, 14: 1}\n", + "DiscreteValues{0: 0, 1: 0, 2: 1, 3: 0, 4: 1, 5: 1, 6: 1, 7: 0, 8: 1, 9: 1, 10: 0, 11: 1, 12: 0, 13: 0, 14: 1}\n" + ] + } + ], + "source": [ + "# Sample Bayes net (needs conditionals added in elimination order!)\n", + "for i in range(5):\n", + " print(bayesNet.sample())" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "var0\n", + "\n", + "0\n", + "\n", + "\n", + "\n", + "factor0\n", + "\n", + "\n", + "\n", + "\n", + "var0--factor0\n", + "\n", + "\n", + "\n", + "\n", + "var1\n", + "\n", + "1\n", + "\n", + "\n", + "\n", + "factor1\n", + "\n", + "\n", + "\n", + "\n", + "var1--factor1\n", + "\n", + "\n", + "\n", + "\n", + "var2\n", + "\n", + "2\n", + "\n", + "\n", + "\n", + "factor2\n", + "\n", + "\n", + "\n", + "\n", + "var2--factor2\n", + "\n", + "\n", + "\n", + "\n", + "var3\n", + "\n", + "3\n", + "\n", + "\n", + "\n", + "factor3\n", + "\n", + "\n", + "\n", + "\n", + "var3--factor3\n", + "\n", + "\n", + "\n", + "\n", + "var4\n", + "\n", + "4\n", + "\n", + "\n", + "\n", + "factor4\n", + "\n", + "\n", + "\n", + "\n", + "var4--factor4\n", + "\n", + "\n", + "\n", + "\n", + "var5\n", + "\n", + "5\n", + "\n", + "\n", + "\n", + "factor5\n", + "\n", + "\n", + "\n", + "\n", + "var5--factor5\n", + "\n", + "\n", + "\n", + "\n", + "var6\n", + "\n", + "6\n", + "\n", + "\n", + "\n", + "factor6\n", + "\n", + "\n", + "\n", + "\n", + "var6--factor6\n", + "\n", + "\n", + "\n", + "\n", + "var7\n", + "\n", + "7\n", + "\n", + "\n", + "\n", + "factor7\n", + "\n", + "\n", + "\n", + "\n", + "var7--factor7\n", + "\n", + "\n", + "\n", + "\n", + "var8\n", + "\n", + "8\n", + "\n", + "\n", + "\n", + "var8--factor0\n", + "\n", + "\n", + "\n", + "\n", + "var8--factor1\n", + "\n", + "\n", + "\n", + "\n", + "factor8\n", + "\n", + "\n", + "\n", + "\n", + "var8--factor8\n", + "\n", + "\n", + "\n", + "\n", + "var9\n", + "\n", + "9\n", + "\n", + "\n", + "\n", + "var9--factor2\n", + "\n", + "\n", + "\n", + "\n", + "var9--factor3\n", + "\n", + "\n", + "\n", + "\n", + "factor9\n", + "\n", + "\n", + "\n", + "\n", + "var9--factor9\n", + "\n", + "\n", + "\n", + "\n", + "var10\n", + "\n", + "10\n", + "\n", + "\n", + "\n", + "var10--factor4\n", + "\n", + "\n", + "\n", + "\n", + "var10--factor5\n", + "\n", + "\n", + "\n", + "\n", + "factor10\n", + "\n", + "\n", + "\n", + "\n", + "var10--factor10\n", + "\n", + "\n", + "\n", + "\n", + "var11\n", + "\n", + "11\n", + "\n", + "\n", + "\n", + "var11--factor6\n", + "\n", + "\n", + "\n", + "\n", + "var11--factor7\n", + "\n", + "\n", + "\n", + "\n", + "factor11\n", + "\n", + "\n", + "\n", + "\n", + "var11--factor11\n", + "\n", + "\n", + "\n", + "\n", + "var12\n", + "\n", + "12\n", + "\n", + "\n", + "\n", + "var14\n", + "\n", + "14\n", + "\n", + "\n", + "\n", + "var12--var14\n", + "\n", + "\n", + "\n", + "\n", + "var12--factor0\n", + "\n", + "\n", + "\n", + "\n", + "var12--factor1\n", + "\n", + "\n", + "\n", + "\n", + "var12--factor2\n", + "\n", + "\n", + "\n", + "\n", + "var12--factor3\n", + "\n", + "\n", + "\n", + "\n", + "var12--factor8\n", + "\n", + "\n", + "\n", + "\n", + "var12--factor9\n", + "\n", + "\n", + "\n", + "\n", + "var13\n", + "\n", + "13\n", + "\n", + "\n", + "\n", + "var13--var14\n", + "\n", + "\n", + "\n", + "\n", + "var13--factor4\n", + "\n", + "\n", + "\n", + "\n", + "var13--factor5\n", + "\n", + "\n", + "\n", + "\n", + "var13--factor6\n", + "\n", + "\n", + "\n", + "\n", + "var13--factor7\n", + "\n", + "\n", + "\n", + "\n", + "var13--factor10\n", + "\n", + "\n", + "\n", + "\n", + "var13--factor11\n", + "\n", + "\n", + "\n", + "\n", + "var14--factor8\n", + "\n", + "\n", + "\n", + "\n", + "var14--factor9\n", + "\n", + "\n", + "\n", + "\n", + "var14--factor10\n", + "\n", + "\n", + "\n", + "\n", + "var14--factor11\n", + "\n", + "\n", + "\n", + "\n", + "factor14\n", + "\n", + "\n", + "\n", + "\n", + "var14--factor14\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "<__main__.show at 0x109c61f10>" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Create a factor graph out of the Bayes net.\n", + "factorGraph = DiscreteFactorGraph(bayesNet)\n", + "show(factorGraph)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "G\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "8,12,14\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "0 : 8,12\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "1 : 8,12\n", + "\n", + "\n", + "\n", + "0->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "9 : 12,14\n", + "\n", + "\n", + "\n", + "0->3\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "6\n", + "\n", + "10,13 : 14\n", + "\n", + "\n", + "\n", + "0->6\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "4\n", + "\n", + "2 : 9,12\n", + "\n", + "\n", + "\n", + "3->4\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "5\n", + "\n", + "3 : 9,12\n", + "\n", + "\n", + "\n", + "3->5\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "7\n", + "\n", + "4 : 10,13\n", + "\n", + "\n", + "\n", + "6->7\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "8\n", + "\n", + "5 : 10,13\n", + "\n", + "\n", + "\n", + "6->8\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "9\n", + "\n", + "11 : 13,14\n", + "\n", + "\n", + "\n", + "6->9\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "10\n", + "\n", + "6 : 11,13\n", + "\n", + "\n", + "\n", + "9->10\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "11\n", + "\n", + "7 : 11,13\n", + "\n", + "\n", + "\n", + "9->11\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "<__main__.show at 0x109c61b50>" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Create a BayesTree out of the factor graph.\n", + "ordering = Ordering()\n", + "for j in range(15): ordering.push_back(j)\n", + "bayesTree = factorGraph.eliminateMultifrontal(ordering)\n", + "show(bayesTree)" + ] + } + ], + "metadata": { + "interpreter": { + "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6" + }, + "kernelspec": { + "display_name": "Python 3.8.9 64-bit", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.7" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/python/gtsam/examples/DiscreteSwitching.ipynb b/python/gtsam/examples/DiscreteSwitching.ipynb new file mode 100644 index 000000000..4af283cbd --- /dev/null +++ b/python/gtsam/examples/DiscreteSwitching.ipynb @@ -0,0 +1,805 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# A Discrete Switching System\n", + "\n", + "A la multi-hypothesis-smoother (MHS), but all discrete.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [ + "remove-cell" + ] + }, + "source": [ + "GTSAM Copyright 2010-2022, Georgia Tech Research Corporation,\n", + "Atlanta, Georgia 30332-0415\n", + "All Rights Reserved\n", + "\n", + "Authors: Frank Dellaert, et al. (see THANKS for the full author list)\n", + "\n", + "See LICENSE for the license information" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "remove-cell" + ] + }, + "outputs": [], + "source": [ + "# This needs gtbook:\n", + "% pip install --quiet gtbook" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from gtsam import DiscreteBayesNet, DiscreteKeys, DiscreteFactorGraph, Ordering\n", + "from gtsam.symbol_shorthand import S\n", + "from gtsam.symbol_shorthand import M" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "def P(*args):\n", + " \"\"\" Create a DiscreteKeys instances from a variable number of DiscreteKey pairs.\"\"\"\n", + " # TODO: We can make life easier by providing variable argument functions in C++ itself.\n", + " dks = DiscreteKeys()\n", + " for key in args:\n", + " dks.push_back(key)\n", + " return dks\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "import graphviz\n", + "\n", + "\n", + "class show(graphviz.Source):\n", + " \"\"\" Display an object with a dot method as a graph.\"\"\"\n", + "\n", + " def __init__(self, obj):\n", + " \"\"\"Construct from object with 'dot' method.\"\"\"\n", + " # This small class takes an object, calls its dot function, and uses the\n", + " # resulting string to initialize a graphviz.Source instance. This in turn\n", + " # has a _repr_mimebundle_ method, which then renders it in the notebook.\n", + " super().__init__(obj.dot())\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "

DiscreteBayesNet of size 4

\n", + "

P(s2|m1,s1):

\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
m1s1012
000.90.10
010.10.80.1
0200.10.9
100.10.90
1100.10.9
120.900.1
\n", + "
\n", + "
\n", + "

P(s3|m2,s2):

\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
m2s2012
000.90.10
010.10.80.1
0200.10.9
100.10.90
1100.10.9
120.900.1
\n", + "
\n", + "
\n", + "

P(s4|m3,s3):

\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
m3s3012
000.90.10
010.10.80.1
0200.10.9
100.10.90
1100.10.9
120.900.1
\n", + "
\n", + "
\n", + "

P(s5|m4,s4):

\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
m4s4012
000.90.10
010.10.80.1
0200.10.9
100.10.90
1100.10.9
120.900.1
\n", + "
\n" + ], + "text/markdown": [ + "`DiscreteBayesNet` of size 4\n", + "\n", + " *P(s2|m1,s1):*\n", + "\n", + "|*m1*|*s1*|0|1|2|\n", + "|:-:|:-:|:-:|:-:|:-:|\n", + "|0|0|0.9|0.1|0|\n", + "|0|1|0.1|0.8|0.1|\n", + "|0|2|0|0.1|0.9|\n", + "|1|0|0.1|0.9|0|\n", + "|1|1|0|0.1|0.9|\n", + "|1|2|0.9|0|0.1|\n", + "\n", + " *P(s3|m2,s2):*\n", + "\n", + "|*m2*|*s2*|0|1|2|\n", + "|:-:|:-:|:-:|:-:|:-:|\n", + "|0|0|0.9|0.1|0|\n", + "|0|1|0.1|0.8|0.1|\n", + "|0|2|0|0.1|0.9|\n", + "|1|0|0.1|0.9|0|\n", + "|1|1|0|0.1|0.9|\n", + "|1|2|0.9|0|0.1|\n", + "\n", + " *P(s4|m3,s3):*\n", + "\n", + "|*m3*|*s3*|0|1|2|\n", + "|:-:|:-:|:-:|:-:|:-:|\n", + "|0|0|0.9|0.1|0|\n", + "|0|1|0.1|0.8|0.1|\n", + "|0|2|0|0.1|0.9|\n", + "|1|0|0.1|0.9|0|\n", + "|1|1|0|0.1|0.9|\n", + "|1|2|0.9|0|0.1|\n", + "\n", + " *P(s5|m4,s4):*\n", + "\n", + "|*m4*|*s4*|0|1|2|\n", + "|:-:|:-:|:-:|:-:|:-:|\n", + "|0|0|0.9|0.1|0|\n", + "|0|1|0.1|0.8|0.1|\n", + "|0|2|0|0.1|0.9|\n", + "|1|0|0.1|0.9|0|\n", + "|1|1|0|0.1|0.9|\n", + "|1|2|0.9|0|0.1|\n", + "\n" + ], + "text/plain": [ + "DiscreteBayesNet\n", + " \n", + "size: 4\n", + "conditional 0: P( s2 | m1 s1 ):\n", + " Choice(s2) \n", + " 0 Choice(s1) \n", + " 0 0 Choice(m1) \n", + " 0 0 0 Leaf 0.9\n", + " 0 0 1 Leaf 0.1\n", + " 0 1 Choice(m1) \n", + " 0 1 0 Leaf 0.1\n", + " 0 1 1 Leaf 0\n", + " 0 2 Choice(m1) \n", + " 0 2 0 Leaf 0\n", + " 0 2 1 Leaf 0.9\n", + " 1 Choice(s1) \n", + " 1 0 Choice(m1) \n", + " 1 0 0 Leaf 0.1\n", + " 1 0 1 Leaf 0.9\n", + " 1 1 Choice(m1) \n", + " 1 1 0 Leaf 0.8\n", + " 1 1 1 Leaf 0.1\n", + " 1 2 Choice(m1) \n", + " 1 2 0 Leaf 0.1\n", + " 1 2 1 Leaf 0\n", + " 2 Choice(s1) \n", + " 2 0 Choice(m1) \n", + " 2 0 0 Leaf 0\n", + " 2 0 1 Leaf 0\n", + " 2 1 Choice(m1) \n", + " 2 1 0 Leaf 0.1\n", + " 2 1 1 Leaf 0.9\n", + " 2 2 Choice(m1) \n", + " 2 2 0 Leaf 0.9\n", + " 2 2 1 Leaf 0.1\n", + "\n", + "conditional 1: P( s3 | m2 s2 ):\n", + " Choice(s3) \n", + " 0 Choice(s2) \n", + " 0 0 Choice(m2) \n", + " 0 0 0 Leaf 0.9\n", + " 0 0 1 Leaf 0.1\n", + " 0 1 Choice(m2) \n", + " 0 1 0 Leaf 0.1\n", + " 0 1 1 Leaf 0\n", + " 0 2 Choice(m2) \n", + " 0 2 0 Leaf 0\n", + " 0 2 1 Leaf 0.9\n", + " 1 Choice(s2) \n", + " 1 0 Choice(m2) \n", + " 1 0 0 Leaf 0.1\n", + " 1 0 1 Leaf 0.9\n", + " 1 1 Choice(m2) \n", + " 1 1 0 Leaf 0.8\n", + " 1 1 1 Leaf 0.1\n", + " 1 2 Choice(m2) \n", + " 1 2 0 Leaf 0.1\n", + " 1 2 1 Leaf 0\n", + " 2 Choice(s2) \n", + " 2 0 Choice(m2) \n", + " 2 0 0 Leaf 0\n", + " 2 0 1 Leaf 0\n", + " 2 1 Choice(m2) \n", + " 2 1 0 Leaf 0.1\n", + " 2 1 1 Leaf 0.9\n", + " 2 2 Choice(m2) \n", + " 2 2 0 Leaf 0.9\n", + " 2 2 1 Leaf 0.1\n", + "\n", + "conditional 2: P( s4 | m3 s3 ):\n", + " Choice(s4) \n", + " 0 Choice(s3) \n", + " 0 0 Choice(m3) \n", + " 0 0 0 Leaf 0.9\n", + " 0 0 1 Leaf 0.1\n", + " 0 1 Choice(m3) \n", + " 0 1 0 Leaf 0.1\n", + " 0 1 1 Leaf 0\n", + " 0 2 Choice(m3) \n", + " 0 2 0 Leaf 0\n", + " 0 2 1 Leaf 0.9\n", + " 1 Choice(s3) \n", + " 1 0 Choice(m3) \n", + " 1 0 0 Leaf 0.1\n", + " 1 0 1 Leaf 0.9\n", + " 1 1 Choice(m3) \n", + " 1 1 0 Leaf 0.8\n", + " 1 1 1 Leaf 0.1\n", + " 1 2 Choice(m3) \n", + " 1 2 0 Leaf 0.1\n", + " 1 2 1 Leaf 0\n", + " 2 Choice(s3) \n", + " 2 0 Choice(m3) \n", + " 2 0 0 Leaf 0\n", + " 2 0 1 Leaf 0\n", + " 2 1 Choice(m3) \n", + " 2 1 0 Leaf 0.1\n", + " 2 1 1 Leaf 0.9\n", + " 2 2 Choice(m3) \n", + " 2 2 0 Leaf 0.9\n", + " 2 2 1 Leaf 0.1\n", + "\n", + "conditional 3: P( s5 | m4 s4 ):\n", + " Choice(s5) \n", + " 0 Choice(s4) \n", + " 0 0 Choice(m4) \n", + " 0 0 0 Leaf 0.9\n", + " 0 0 1 Leaf 0.1\n", + " 0 1 Choice(m4) \n", + " 0 1 0 Leaf 0.1\n", + " 0 1 1 Leaf 0\n", + " 0 2 Choice(m4) \n", + " 0 2 0 Leaf 0\n", + " 0 2 1 Leaf 0.9\n", + " 1 Choice(s4) \n", + " 1 0 Choice(m4) \n", + " 1 0 0 Leaf 0.1\n", + " 1 0 1 Leaf 0.9\n", + " 1 1 Choice(m4) \n", + " 1 1 0 Leaf 0.8\n", + " 1 1 1 Leaf 0.1\n", + " 1 2 Choice(m4) \n", + " 1 2 0 Leaf 0.1\n", + " 1 2 1 Leaf 0\n", + " 2 Choice(s4) \n", + " 2 0 Choice(m4) \n", + " 2 0 0 Leaf 0\n", + " 2 0 1 Leaf 0\n", + " 2 1 Choice(m4) \n", + " 2 1 0 Leaf 0.1\n", + " 2 1 1 Leaf 0.9\n", + " 2 2 Choice(m4) \n", + " 2 2 0 Leaf 0.9\n", + " 2 2 1 Leaf 0.1\n" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "nrStates = 3\n", + "K = 5\n", + "\n", + "bayesNet = DiscreteBayesNet()\n", + "for k in range(1, K):\n", + " key = S(k), nrStates\n", + " key_plus = S(k+1), nrStates\n", + " mode = M(k), 2\n", + " bayesNet.add(key_plus, P(mode, key), \"9/1/0 1/8/1 0/1/9 1/9/0 0/1/9 9/0/1\")\n", + "\n", + "bayesNet" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "var7854277750134145025\n", + "\n", + "m1\n", + "\n", + "\n", + "\n", + "var8286623314361712642\n", + "\n", + "s2\n", + "\n", + "\n", + "\n", + "var7854277750134145025->var8286623314361712642\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "var7854277750134145026\n", + "\n", + "m2\n", + "\n", + "\n", + "\n", + "var8286623314361712643\n", + "\n", + "s3\n", + "\n", + "\n", + "\n", + "var7854277750134145026->var8286623314361712643\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "var7854277750134145027\n", + "\n", + "m3\n", + "\n", + "\n", + "\n", + "var8286623314361712644\n", + "\n", + "s4\n", + "\n", + "\n", + "\n", + "var7854277750134145027->var8286623314361712644\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "var7854277750134145028\n", + "\n", + "m4\n", + "\n", + "\n", + "\n", + "var8286623314361712645\n", + "\n", + "s5\n", + "\n", + "\n", + "\n", + "var7854277750134145028->var8286623314361712645\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "var8286623314361712641\n", + "\n", + "s1\n", + "\n", + "\n", + "\n", + "var8286623314361712641->var8286623314361712642\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "var8286623314361712642->var8286623314361712643\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "var8286623314361712643->var8286623314361712644\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "var8286623314361712644->var8286623314361712645\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "<__main__.show at 0x11216aea0>" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "show(bayesNet)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "var7854277750134145025\n", + "\n", + "m1\n", + "\n", + "\n", + "\n", + "factor0\n", + "\n", + "\n", + "\n", + "\n", + "var7854277750134145025--factor0\n", + "\n", + "\n", + "\n", + "\n", + "var7854277750134145026\n", + "\n", + "m2\n", + "\n", + "\n", + "\n", + "factor1\n", + "\n", + "\n", + "\n", + "\n", + "var7854277750134145026--factor1\n", + "\n", + "\n", + "\n", + "\n", + "var7854277750134145027\n", + "\n", + "m3\n", + "\n", + "\n", + "\n", + "factor2\n", + "\n", + "\n", + "\n", + "\n", + "var7854277750134145027--factor2\n", + "\n", + "\n", + "\n", + "\n", + "var7854277750134145028\n", + "\n", + "m4\n", + "\n", + "\n", + "\n", + "factor3\n", + "\n", + "\n", + "\n", + "\n", + "var7854277750134145028--factor3\n", + "\n", + "\n", + "\n", + "\n", + "var8286623314361712641\n", + "\n", + "s1\n", + "\n", + "\n", + "\n", + "var8286623314361712641--factor0\n", + "\n", + "\n", + "\n", + "\n", + "var8286623314361712642\n", + "\n", + "s2\n", + "\n", + "\n", + "\n", + "var8286623314361712642--factor0\n", + "\n", + "\n", + "\n", + "\n", + "var8286623314361712642--factor1\n", + "\n", + "\n", + "\n", + "\n", + "var8286623314361712643\n", + "\n", + "s3\n", + "\n", + "\n", + "\n", + "var8286623314361712643--factor1\n", + "\n", + "\n", + "\n", + "\n", + "var8286623314361712643--factor2\n", + "\n", + "\n", + "\n", + "\n", + "var8286623314361712644\n", + "\n", + "s4\n", + "\n", + "\n", + "\n", + "var8286623314361712644--factor2\n", + "\n", + "\n", + "\n", + "\n", + "var8286623314361712644--factor3\n", + "\n", + "\n", + "\n", + "\n", + "var8286623314361712645\n", + "\n", + "s5\n", + "\n", + "\n", + "\n", + "var8286623314361712645--factor3\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "<__main__.show at 0x1121a44d0>" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Create a factor graph out of the Bayes net.\n", + "factorGraph = DiscreteFactorGraph(bayesNet)\n", + "show(factorGraph)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Position 0: s1, s2, s3, s4, s5, m1, m2, m3, m4\n", + "\n" + ] + } + ], + "source": [ + "# Create a BayesTree out of the factor graph.\n", + "ordering = Ordering()\n", + "# First eliminate \"continuous\" states in time order\n", + "for k in range(1, K+1):\n", + " ordering.push_back(S(k))\n", + "for k in range(1, K):\n", + " ordering.push_back(M(k))\n", + "print(ordering)\n", + "bayesTree = factorGraph.eliminateMultifrontal(ordering)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "G\n", + "\n", + "\n", + "\n", + "0\n", + "\n", + "s4, s5, m1, m2, m3, m4\n", + "\n", + "\n", + "\n", + "1\n", + "\n", + "s3 : m1, m2, m3, s4\n", + "\n", + "\n", + "\n", + "0->1\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "2\n", + "\n", + "s2 : m1, m2, s3\n", + "\n", + "\n", + "\n", + "1->2\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "3\n", + "\n", + "s1 : m1, s2\n", + "\n", + "\n", + "\n", + "2->3\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "<__main__.show at 0x11775c3e0>" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "show(bayesTree)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "py312", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/python/gtsam/examples/EqF.ipynb b/python/gtsam/examples/EqF.ipynb index 59ee2c525..9eec85d55 100644 --- a/python/gtsam/examples/EqF.ipynb +++ b/python/gtsam/examples/EqF.ipynb @@ -6,13 +6,19 @@ "source": [ "# Equivariant Filter Bias\n", "\n", - "Implementing the example in [Fornasier et al, 2022, Overcoming Bias: Equivariant Filter Design for Biased Attitude Estimation with Online Calibration](https://arxiv.org/pdf/2209.12038).\n", - "\n", - "This notebook uses [Alessandro Fornasier's equivariant filter code](https://github.com/aau-cns/ABC-EqF) converted to use GTSAM's libraries.\n", + "Implementing the example in the \"Overcoming Bias\" paper by {cite:t}`https://doi.org/10.1109/LRA.2022.3210867` ([arxiv version](https://arxiv.org/pdf/2209.12038)).\n", + "This notebook was created by converting [Alessandro Fornasier's equivariant filter code](https://github.com/aau-cns/ABC-EqF) to use GTSAM's built-in data structures.\n", "\n", "Authors: Jennifer Oum & Darshan Rajasekaran" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\"Open" + ] + }, { "cell_type": "markdown", "metadata": { diff --git a/python/gtsam/notebooks/easyPoint2KalmanFilter.ipynb b/python/gtsam/examples/easyPoint2KalmanFilter.ipynb similarity index 72% rename from python/gtsam/notebooks/easyPoint2KalmanFilter.ipynb rename to python/gtsam/examples/easyPoint2KalmanFilter.ipynb index 1bdd12b8e..5417fca3b 100644 --- a/python/gtsam/notebooks/easyPoint2KalmanFilter.ipynb +++ b/python/gtsam/examples/easyPoint2KalmanFilter.ipynb @@ -1,24 +1,76 @@ { "cells": [ { - "cell_type": "code", - "execution_count": 1, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "\"\"\"\n", + "# Extended Kalman Filter\n", + "\n", "Extended Kalman filter on a moving 2D point, but done using factor graphs.\n", "This example uses the ExtendedKalmanFilter class to perform filtering\n", "on a linear system, demonstrating the same operations as in elaboratePoint2KalmanFilter.\n", - "\"\"\"\n", "\n", + "Author: Matt Kielo" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [ + "remove-cell" + ] + }, + "source": [ + "GTSAM Copyright 2010-2022, Georgia Tech Research Corporation,\n", + "Atlanta, Georgia 30332-0415\n", + "All Rights Reserved\n", + "\n", + "Authors: Frank Dellaert, et al. (see THANKS for the full author list)\n", + "\n", + "See LICENSE for the license information" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "tags": [ + "remove-cell" + ] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "%pip install --quiet gtsam gtbook" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ "import gtsam\n", "import numpy as np" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ @@ -49,7 +101,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "metadata": {}, "outputs": [ { @@ -64,8 +116,8 @@ "X3 Update: [3. 0.]\n", "\n", "Easy Final Covariance (after update):\n", - " [[0.0193 0. ]\n", - " [0. 0.0193]]\n" + " [[0.01930567 0. ]\n", + " [0. 0.01930567]]\n" ] } ], @@ -100,7 +152,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "py312", "language": "python", "name": "python3" }, @@ -114,7 +166,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.12" + "version": "3.12.6" } }, "nbformat": 4, diff --git a/python/gtsam/notebooks/elaboratePoint2KalmanFilter.ipynb b/python/gtsam/examples/elaboratePoint2KalmanFilter.ipynb similarity index 84% rename from python/gtsam/notebooks/elaboratePoint2KalmanFilter.ipynb rename to python/gtsam/examples/elaboratePoint2KalmanFilter.ipynb index 34c467c35..afd195c20 100644 --- a/python/gtsam/notebooks/elaboratePoint2KalmanFilter.ipynb +++ b/python/gtsam/examples/elaboratePoint2KalmanFilter.ipynb @@ -1,34 +1,69 @@ { "cells": [ { - "cell_type": "code", - "execution_count": 1, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "\"\"\"\n", + "\n", + "# Elaborate EKF Example\n", + "\n", "Simple linear Kalman filter on a moving 2D point using factor graphs in GTSAM.\n", "This example manually creates all of the needed data structures to show how\n", "the Kalman filter works under the hood using factor graphs, but uses a loop\n", "to handle the repetitive prediction and update steps.\n", "\n", - "Based on the C++ example by Frank Dellaert and Stephen Williams\n", - "\"\"\"\n", + "Author: Matt Kielo. Based on the C++ example by Frank Dellaert and Stephen Williams" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [ + "remove-cell" + ] + }, + "source": [ + "GTSAM Copyright 2010-2022, Georgia Tech Research Corporation,\n", + "Atlanta, Georgia 30332-0415\n", + "All Rights Reserved\n", "\n", + "Authors: Frank Dellaert, et al. (see THANKS for the full author list)\n", + "\n", + "See LICENSE for the license information" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "import gtsam\n", "import numpy as np\n", "from gtsam import Point2, noiseModel\n", "from gtsam.symbol_shorthand import X" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The code below basically implements the SRIF (Square-root Information filter version of the EKF) with Cholesky factorization." + ] + }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "# [code below basically does SRIF with Cholesky]\n", - "\n", "# Setup containers for linearization points\n", "linearization_points = gtsam.Values()\n", "\n", diff --git a/python/gtsam/notebooks/DiscreteBayesTree.ipynb b/python/gtsam/notebooks/DiscreteBayesTree.ipynb deleted file mode 100644 index 066c31d6a..000000000 --- a/python/gtsam/notebooks/DiscreteBayesTree.ipynb +++ /dev/null @@ -1,200 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# The Discrete Bayes Tree\n", - "\n", - "An example of building a Bayes net, then eliminating it into a Bayes tree. Mirrors the code in `testDiscreteBayesTree.cpp` .\n" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "from gtsam import DiscreteBayesTree, DiscreteBayesNet, DiscreteKeys, DiscreteFactorGraph, Ordering\n", - "from gtsam.symbol_shorthand import S\n" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "def P(*args):\n", - " \"\"\" Create a DiscreteKeys instances from a variable number of DiscreteKey pairs.\"\"\"\n", - " #TODO: We can make life easier by providing variable argument functions in C++ itself.\n", - " dks = DiscreteKeys()\n", - " for key in args:\n", - " dks.push_back(key)\n", - " return dks" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "import graphviz\n", - "class show(graphviz.Source):\n", - " \"\"\" Display an object with a dot method as a graph.\"\"\"\n", - "\n", - " def __init__(self, obj):\n", - " \"\"\"Construct from object with 'dot' method.\"\"\"\n", - " # This small class takes an object, calls its dot function, and uses the\n", - " # resulting string to initialize a graphviz.Source instance. This in turn\n", - " # has a _repr_mimebundle_ method, which then renders it in the notebook.\n", - " super().__init__(obj.dot())" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "data": { - "image/svg+xml": "\n\n\n\n\n\nG\n\n\n\n8\n\n8\n\n\n\n0\n\n0\n\n\n\n8->0\n\n\n\n\n\n1\n\n1\n\n\n\n8->1\n\n\n\n\n\n12\n\n12\n\n\n\n12->8\n\n\n\n\n\n12->0\n\n\n\n\n\n12->1\n\n\n\n\n\n9\n\n9\n\n\n\n12->9\n\n\n\n\n\n2\n\n2\n\n\n\n12->2\n\n\n\n\n\n3\n\n3\n\n\n\n12->3\n\n\n\n\n\n9->2\n\n\n\n\n\n9->3\n\n\n\n\n\n10\n\n10\n\n\n\n4\n\n4\n\n\n\n10->4\n\n\n\n\n\n5\n\n5\n\n\n\n10->5\n\n\n\n\n\n13\n\n13\n\n\n\n13->10\n\n\n\n\n\n13->4\n\n\n\n\n\n13->5\n\n\n\n\n\n11\n\n11\n\n\n\n13->11\n\n\n\n\n\n6\n\n6\n\n\n\n13->6\n\n\n\n\n\n7\n\n7\n\n\n\n13->7\n\n\n\n\n\n11->6\n\n\n\n\n\n11->7\n\n\n\n\n\n14\n\n14\n\n\n\n14->8\n\n\n\n\n\n14->12\n\n\n\n\n\n14->9\n\n\n\n\n\n14->10\n\n\n\n\n\n14->13\n\n\n\n\n\n14->11\n\n\n\n\n\n", - "text/plain": [ - "<__main__.show at 0x109c615b0>" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# Define DiscreteKey pairs.\n", - "keys = [(j, 2) for j in range(15)]\n", - "\n", - "# Create thin-tree Bayesnet.\n", - "bayesNet = DiscreteBayesNet()\n", - "\n", - "\n", - "bayesNet.add(keys[0], P(keys[8], keys[12]), \"2/3 1/4 3/2 4/1\")\n", - "bayesNet.add(keys[1], P(keys[8], keys[12]), \"4/1 2/3 3/2 1/4\")\n", - "bayesNet.add(keys[2], P(keys[9], keys[12]), \"1/4 8/2 2/3 4/1\")\n", - "bayesNet.add(keys[3], P(keys[9], keys[12]), \"1/4 2/3 3/2 4/1\")\n", - "\n", - "bayesNet.add(keys[4], P(keys[10], keys[13]), \"2/3 1/4 3/2 4/1\")\n", - "bayesNet.add(keys[5], P(keys[10], keys[13]), \"4/1 2/3 3/2 1/4\")\n", - "bayesNet.add(keys[6], P(keys[11], keys[13]), \"1/4 3/2 2/3 4/1\")\n", - "bayesNet.add(keys[7], P(keys[11], keys[13]), \"1/4 2/3 3/2 4/1\")\n", - "\n", - "bayesNet.add(keys[8], P(keys[12], keys[14]), \"T 1/4 3/2 4/1\")\n", - "bayesNet.add(keys[9], P(keys[12], keys[14]), \"4/1 2/3 F 1/4\")\n", - "bayesNet.add(keys[10], P(keys[13], keys[14]), \"1/4 3/2 2/3 4/1\")\n", - "bayesNet.add(keys[11], P(keys[13], keys[14]), \"1/4 2/3 3/2 4/1\")\n", - "\n", - "bayesNet.add(keys[12], P(keys[14]), \"3/1 3/1\")\n", - "bayesNet.add(keys[13], P(keys[14]), \"1/3 3/1\")\n", - "\n", - "bayesNet.add(keys[14], P(), \"1/3\")\n", - "\n", - "show(bayesNet)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "DiscreteValues{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1, 6: 0, 7: 1, 8: 0, 9: 0, 10: 0, 11: 0, 12: 1, 13: 1, 14: 0}\n", - "DiscreteValues{0: 0, 1: 1, 2: 0, 3: 0, 4: 1, 5: 0, 6: 0, 7: 0, 8: 1, 9: 1, 10: 0, 11: 1, 12: 0, 13: 0, 14: 1}\n", - "DiscreteValues{0: 1, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1, 7: 0, 8: 1, 9: 0, 10: 1, 11: 1, 12: 0, 13: 1, 14: 0}\n", - "DiscreteValues{0: 1, 1: 1, 2: 0, 3: 0, 4: 1, 5: 1, 6: 1, 7: 1, 8: 0, 9: 1, 10: 0, 11: 0, 12: 1, 13: 0, 14: 1}\n", - "DiscreteValues{0: 0, 1: 0, 2: 1, 3: 0, 4: 1, 5: 1, 6: 1, 7: 0, 8: 1, 9: 1, 10: 0, 11: 1, 12: 0, 13: 0, 14: 1}\n" - ] - } - ], - "source": [ - "# Sample Bayes net (needs conditionals added in elimination order!)\n", - "for i in range(5):\n", - " print(bayesNet.sample())" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "data": { - "image/svg+xml": "\n\n\n\n\n\n\n\n\nvar0\n\n0\n\n\n\nfactor0\n\n\n\n\nvar0--factor0\n\n\n\n\nvar1\n\n1\n\n\n\nfactor1\n\n\n\n\nvar1--factor1\n\n\n\n\nvar2\n\n2\n\n\n\nfactor2\n\n\n\n\nvar2--factor2\n\n\n\n\nvar3\n\n3\n\n\n\nfactor3\n\n\n\n\nvar3--factor3\n\n\n\n\nvar4\n\n4\n\n\n\nfactor4\n\n\n\n\nvar4--factor4\n\n\n\n\nvar5\n\n5\n\n\n\nfactor5\n\n\n\n\nvar5--factor5\n\n\n\n\nvar6\n\n6\n\n\n\nfactor6\n\n\n\n\nvar6--factor6\n\n\n\n\nvar7\n\n7\n\n\n\nfactor7\n\n\n\n\nvar7--factor7\n\n\n\n\nvar8\n\n8\n\n\n\nvar8--factor0\n\n\n\n\nvar8--factor1\n\n\n\n\nfactor8\n\n\n\n\nvar8--factor8\n\n\n\n\nvar9\n\n9\n\n\n\nvar9--factor2\n\n\n\n\nvar9--factor3\n\n\n\n\nfactor9\n\n\n\n\nvar9--factor9\n\n\n\n\nvar10\n\n10\n\n\n\nvar10--factor4\n\n\n\n\nvar10--factor5\n\n\n\n\nfactor10\n\n\n\n\nvar10--factor10\n\n\n\n\nvar11\n\n11\n\n\n\nvar11--factor6\n\n\n\n\nvar11--factor7\n\n\n\n\nfactor11\n\n\n\n\nvar11--factor11\n\n\n\n\nvar12\n\n12\n\n\n\nvar14\n\n14\n\n\n\nvar12--var14\n\n\n\n\nvar12--factor0\n\n\n\n\nvar12--factor1\n\n\n\n\nvar12--factor2\n\n\n\n\nvar12--factor3\n\n\n\n\nvar12--factor8\n\n\n\n\nvar12--factor9\n\n\n\n\nvar13\n\n13\n\n\n\nvar13--var14\n\n\n\n\nvar13--factor4\n\n\n\n\nvar13--factor5\n\n\n\n\nvar13--factor6\n\n\n\n\nvar13--factor7\n\n\n\n\nvar13--factor10\n\n\n\n\nvar13--factor11\n\n\n\n\nvar14--factor8\n\n\n\n\nvar14--factor9\n\n\n\n\nvar14--factor10\n\n\n\n\nvar14--factor11\n\n\n\n\nfactor14\n\n\n\n\nvar14--factor14\n\n\n\n\n", - "text/plain": [ - "<__main__.show at 0x109c61f10>" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# Create a factor graph out of the Bayes net.\n", - "factorGraph = DiscreteFactorGraph(bayesNet)\n", - "show(factorGraph)" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "data": { - "image/svg+xml": "\n\n\n\n\n\nG\n\n\n\n0\n\n8,12,14\n\n\n\n1\n\n0 : 8,12\n\n\n\n0->1\n\n\n\n\n\n2\n\n1 : 8,12\n\n\n\n0->2\n\n\n\n\n\n3\n\n9 : 12,14\n\n\n\n0->3\n\n\n\n\n\n6\n\n10,13 : 14\n\n\n\n0->6\n\n\n\n\n\n4\n\n2 : 9,12\n\n\n\n3->4\n\n\n\n\n\n5\n\n3 : 9,12\n\n\n\n3->5\n\n\n\n\n\n7\n\n4 : 10,13\n\n\n\n6->7\n\n\n\n\n\n8\n\n5 : 10,13\n\n\n\n6->8\n\n\n\n\n\n9\n\n11 : 13,14\n\n\n\n6->9\n\n\n\n\n\n10\n\n6 : 11,13\n\n\n\n9->10\n\n\n\n\n\n11\n\n7 : 11,13\n\n\n\n9->11\n\n\n\n\n\n", - "text/plain": [ - "<__main__.show at 0x109c61b50>" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# Create a BayesTree out of the factor graph.\n", - "ordering = Ordering()\n", - "for j in range(15): ordering.push_back(j)\n", - "bayesTree = factorGraph.eliminateMultifrontal(ordering)\n", - "show(bayesTree)" - ] - } - ], - "metadata": { - "interpreter": { - "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6" - }, - "kernelspec": { - "display_name": "Python 3.8.9 64-bit", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - }, - "orig_nbformat": 4 - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/python/gtsam/notebooks/DiscreteSwitching.ipynb b/python/gtsam/notebooks/DiscreteSwitching.ipynb deleted file mode 100644 index 6872e78c8..000000000 --- a/python/gtsam/notebooks/DiscreteSwitching.ipynb +++ /dev/null @@ -1,155 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# A Discrete Switching System\n", - "\n", - "A la MHS, but all discrete.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from gtsam import DiscreteBayesNet, DiscreteKeys, DiscreteFactorGraph, Ordering\n", - "from gtsam.symbol_shorthand import S\n", - "from gtsam.symbol_shorthand import M\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def P(*args):\n", - " \"\"\" Create a DiscreteKeys instances from a variable number of DiscreteKey pairs.\"\"\"\n", - " # TODO: We can make life easier by providing variable argument functions in C++ itself.\n", - " dks = DiscreteKeys()\n", - " for key in args:\n", - " dks.push_back(key)\n", - " return dks\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import graphviz\n", - "\n", - "\n", - "class show(graphviz.Source):\n", - " \"\"\" Display an object with a dot method as a graph.\"\"\"\n", - "\n", - " def __init__(self, obj):\n", - " \"\"\"Construct from object with 'dot' method.\"\"\"\n", - " # This small class takes an object, calls its dot function, and uses the\n", - " # resulting string to initialize a graphviz.Source instance. This in turn\n", - " # has a _repr_mimebundle_ method, which then renders it in the notebook.\n", - " super().__init__(obj.dot())\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "nrStates = 3\n", - "K = 5\n", - "\n", - "bayesNet = DiscreteBayesNet()\n", - "for k in range(1, K):\n", - " key = S(k), nrStates\n", - " key_plus = S(k+1), nrStates\n", - " mode = M(k), 2\n", - " bayesNet.add(key_plus, P(mode, key), \"9/1/0 1/8/1 0/1/9 1/9/0 0/1/9 9/0/1\")\n", - "\n", - "bayesNet" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "show(bayesNet)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Create a factor graph out of the Bayes net.\n", - "factorGraph = DiscreteFactorGraph(bayesNet)\n", - "show(factorGraph)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Create a BayesTree out of the factor graph.\n", - "ordering = Ordering()\n", - "# First eliminate \"continuous\" states in time order\n", - "for k in range(1, K+1):\n", - " ordering.push_back(S(k))\n", - "for k in range(1, K):\n", - " ordering.push_back(M(k))\n", - "print(ordering)\n", - "bayesTree = factorGraph.eliminateMultifrontal(ordering)\n", - "bayesTree" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "show(bayesTree)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [] - } - ], - "metadata": { - "interpreter": { - "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6" - }, - "kernelspec": { - "display_name": "Python 3.8.9 64-bit", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - }, - "orig_nbformat": 4 - }, - "nbformat": 4, - "nbformat_minor": 2 -} From 942750b1273abdf38d91d00017628f2993c9ebeb Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 6 Apr 2025 16:43:40 -0400 Subject: [PATCH 21/21] Reviewed all remaining notebooks --- .../nonlinear/doc/BatchFixedLagSmoother.ipynb | 21 +-- gtsam/nonlinear/doc/FixedLagSmoother.ipynb | 65 ++++----- gtsam/nonlinear/doc/ISAM2.ipynb | 46 +++---- .../doc/IncrementalFixedLagSmoother.ipynb | 23 ++++ .../nonlinear/doc/LinearContainerFactor.ipynb | 48 ++----- gtsam/nonlinear/doc/NonlinearEquality.ipynb | 124 ++++++++++++++++++ gtsam/nonlinear/doc/NonlinearFactor.ipynb | 67 ++++++---- .../nonlinear/doc/NonlinearFactorGraph.ipynb | 39 +++--- gtsam/nonlinear/doc/NonlinearISAM.ipynb | 64 +-------- gtsam/nonlinear/doc/PriorFactor.ipynb | 22 ++-- gtsam/nonlinear/doc/WhiteNoiseFactor.ipynb | 83 ++++++------ gtsam/nonlinear/nonlinear.md | 54 ++++---- 12 files changed, 343 insertions(+), 313 deletions(-) create mode 100644 gtsam/nonlinear/doc/IncrementalFixedLagSmoother.ipynb create mode 100644 gtsam/nonlinear/doc/NonlinearEquality.ipynb diff --git a/gtsam/nonlinear/doc/BatchFixedLagSmoother.ipynb b/gtsam/nonlinear/doc/BatchFixedLagSmoother.ipynb index a698bd430..56b4e414a 100644 --- a/gtsam/nonlinear/doc/BatchFixedLagSmoother.ipynb +++ b/gtsam/nonlinear/doc/BatchFixedLagSmoother.ipynb @@ -9,30 +9,11 @@ "\n", "## Overview\n", "\n", - "The `BatchFixedLagSmoother` class in GTSAM is designed for fixed-lag smoothing in nonlinear factor graphs. It maintains a sliding window of the most recent variables and marginalizes out older variables. This is particularly useful in real-time applications where memory and computational efficiency are critical.\n", + "The `IncrementalFixedLagSmoother` is a [FixedLagSmoother](FixedLagSmoother.ipynb) that uses [LevenbergMarquardtOptimizer](LevenbergMarquardtOptimizer.ipynb) for batch optimization.\n", "\n", "This fixed lag smoother will **batch-optimize** at every iteration, but warm-started from the last estimate." ] }, - { - "cell_type": "markdown", - "id": "42c80522", - "metadata": {}, - "source": [ - "## Mathematical Formulation\n", - "\n", - "The `BatchFixedLagSmoother` operates on the principle of fixed-lag smoothing, where the objective is to estimate the state $\\mathbf{x}_t$ given all measurements up to time $t$, but only retaining a fixed window of recent states. The optimization problem can be expressed as:\n", - "$$\n", - "\\min_{\\mathbf{x}_{t-L:t}} \\sum_{i=1}^{N} \\| \\mathbf{h}_i(\\mathbf{x}_{t-L:t}) - \\mathbf{z}_i \\|^2\n", - "$$\n", - "where $L$ is the fixed lag, $\\mathbf{h}_i$ are the measurement functions, and $\\mathbf{z}_i$ are the measurements.\n", - "In practice, the functions $\\mathbf{h}_i$ depend only on a subset of the state variables $\\mathbf{X}_i$, and the optimization is performed over a set of $N$ *factors* $\\phi_i$ instead:\n", - "$$\n", - "\\min_{\\mathbf{x}_{t-L:t}} \\sum_{i=1}^{N} \\| \\phi_i(\\mathbf{X}_i; \\mathbf{z}_i) \\|^2\n", - "$$\n", - "The API below allows the user to add new factors at every iteration, which will be automatically pruned after they no longer depend on any variables in the lag." - ] - }, { "cell_type": "markdown", "id": "92b4f851", diff --git a/gtsam/nonlinear/doc/FixedLagSmoother.ipynb b/gtsam/nonlinear/doc/FixedLagSmoother.ipynb index 1a3fc856d..50ac1900d 100644 --- a/gtsam/nonlinear/doc/FixedLagSmoother.ipynb +++ b/gtsam/nonlinear/doc/FixedLagSmoother.ipynb @@ -5,59 +5,40 @@ "id": "cdd2fdc5", "metadata": {}, "source": [ - "# FixedLagSmoother Class Documentation\n", - "\n", - "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "# FixedLagSmoother\n", "\n", "## Overview\n", "\n", - "The `FixedLagSmoother` class in GTSAM is designed for incremental smoothing and mapping in robotics and computer vision applications. It maintains a fixed-size window of the most recent states, allowing for efficient updates and marginalization of older states. This is particularly useful in scenarios where real-time performance is crucial, and memory usage needs to be controlled.\n", - "\n", - "## Key Features\n", - "\n", - "- **Incremental Updates**: The `FixedLagSmoother` allows for efficient updates as new measurements are received, making it suitable for real-time applications.\n", - "- **Fixed-Lag Smoothing**: It maintains a fixed window of recent states, which helps in managing computational resources by marginalizing out older states.\n", - "- **Nonlinear Optimization**: Utilizes nonlinear optimization techniques to refine the estimates of the states within the fixed lag window.\n", - "\n", - "## Main Methods\n", - "\n", - "### Update\n", - "\n", - "The `update` method is central to the `FixedLagSmoother` class. It incorporates new measurements and updates the state estimates within the fixed lag window. The method ensures that the estimates are consistent with the new information while maintaining computational efficiency.\n", - "\n", - "### Marginalization\n", - "\n", - "Marginalization is a key process in fixed-lag smoothing, where older states are removed from the optimization problem to keep the problem size manageable. This is done while preserving the essential information about the past states in the form of a prior.\n", - "\n", - "### Optimization\n", - "\n", - "The class employs nonlinear optimization techniques to solve the smoothing problem. The optimization process aims to minimize the error between the predicted and observed measurements, leading to refined state estimates.\n", + "The `FixedLagSmoother` class is the base class for [BatchFixedLagSmoother](BatchFixedLagSmoother.ipynb) and [IncrementalFixedLagSmoother](IncrementalFixedLagSmoother.ipynb).\n", "\n", + "It provides an API for fixed-lag smoothing in nonlinear factor graphs. It maintains a sliding window of the most recent variables and marginalizes out older variables. This is particularly useful in real-time applications where memory and computational efficiency are critical." + ] + }, + { + "cell_type": "markdown", + "id": "8d372784", + "metadata": {}, + "source": [ "## Mathematical Formulation\n", "\n", - "The `FixedLagSmoother` operates on the principle of minimizing a cost function that represents the sum of squared errors between the predicted and observed measurements. Mathematically, this can be expressed as:\n", - "\n", + "In fixed-lag smoothing the objective is to estimate the state $\\mathbf{x}_t$ given all measurements up to time $t$, but only retaining a fixed window of recent states. The optimization problem can be expressed as:\n", "$$\n", - "\\min_x \\sum_i \\| h(x_i) - z_i \\|^2\n", + "\\min_{\\mathbf{x}_{t-L:t}} \\sum_{i=1}^{N} \\| \\mathbf{h}_i(\\mathbf{x}_{t-L:t}) - \\mathbf{z}_i \\|^2\n", "$$\n", - "\n", - "where $h(x_i)$ is the predicted measurement, $z_i$ is the observed measurement, and $x_i$ represents the state variables within the fixed lag window.\n", - "\n", - "## Applications\n", - "\n", - "The `FixedLagSmoother` is widely used in applications such as:\n", - "\n", - "- **Simultaneous Localization and Mapping (SLAM)**: Helps in maintaining a consistent map and robot trajectory in real-time.\n", - "- **Visual-Inertial Odometry (VIO)**: Used for estimating the motion of a camera-equipped device by fusing visual and inertial data.\n", - "- **Sensor Fusion**: Combines data from multiple sensors to improve the accuracy of state estimates.\n", - "\n", - "## Conclusion\n", - "\n", - "The `FixedLagSmoother` class is a powerful tool for real-time state estimation in dynamic environments. Its ability to handle incremental updates and maintain a fixed-size problem makes it ideal for applications where computational resources are limited. By leveraging nonlinear optimization, it provides accurate and consistent state estimates within the fixed lag window." + "where $L$ is the fixed lag, $\\mathbf{h}_i$ are the measurement functions, and $\\mathbf{z}_i$ are the measurements.\n", + "In practice, the functions $\\mathbf{h}_i$ depend only on a subset of the state variables $\\mathbf{X}_i$, and the optimization is performed over a set of $N$ *factors* $\\phi_i$ instead:\n", + "$$\n", + "\\min_{\\mathbf{x}_{t-L:t}} \\sum_{i=1}^{N} \\| \\phi_i(\\mathbf{X}_i; \\mathbf{z}_i) \\|^2\n", + "$$\n", + "The API below allows the user to add new factors at every iteration, which will be automatically pruned after they no longer depend on any variables in the lag." ] } ], - "metadata": {}, + "metadata": { + "language_info": { + "name": "python" + } + }, "nbformat": 4, "nbformat_minor": 5 } diff --git a/gtsam/nonlinear/doc/ISAM2.ipynb b/gtsam/nonlinear/doc/ISAM2.ipynb index dcab771a3..d5f89df91 100644 --- a/gtsam/nonlinear/doc/ISAM2.ipynb +++ b/gtsam/nonlinear/doc/ISAM2.ipynb @@ -5,21 +5,26 @@ "id": "867a20bc", "metadata": {}, "source": [ - "# ISAM2 Class Documentation\n", - "\n", - "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "# ISAM2\n", "\n", "## Overview\n", "\n", - "The `ISAM2` class in GTSAM is an incremental smoothing and mapping algorithm that efficiently updates the solution to a nonlinear optimization problem as new measurements are added. This class is particularly useful in applications such as SLAM (Simultaneous Localization and Mapping) where real-time performance is crucial.\n", + "The `ISAM2` class in GTSAM is an incremental smoothing and mapping algorithm that efficiently updates the solution to a nonlinear optimization problem as new measurements are added. This class is particularly useful in applications such as SLAM (Simultaneous Localization and Mapping) where real-time performance is crucial. \n", + "\n", + "The algorithm is described in the 2012 IJJR paper by {cite:t}`http://dx.doi.org/10.1177/0278364911430419`. For background, also see the more recent booklet by {cite:t}`https://doi.org/10.1561/2300000043`.\n", "\n", "## Key Features\n", "\n", "- **Incremental Updates**: `ISAM2` allows for incremental updates to the factor graph, avoiding the need to solve the entire problem from scratch with each new measurement.\n", - "- **Bayesian Inference**: Utilizes Bayes' rule to update beliefs about the state of the system as new information becomes available.\n", "- **Nonlinear Optimization**: Capable of handling nonlinear systems, leveraging iterative optimization techniques to refine estimates.\n", - "- **Efficient Variable Reordering**: Dynamically reorders variables to maintain sparsity and improve computational efficiency.\n", - "\n", + "- **Efficient Variable Reordering**: Dynamically reorders variables to maintain sparsity and improve computational efficiency." + ] + }, + { + "cell_type": "markdown", + "id": "9ce0ec12", + "metadata": {}, + "source": [ "## Main Methods\n", "\n", "### Initialization and Configuration\n", @@ -37,30 +42,15 @@ "\n", "### Advanced Features\n", "\n", - "- **relinearize**: Forces relinearization of the entire factor graph, which can be useful in scenarios where significant nonlinearities are introduced.\n", - "- **getFactorsUnsafe**: Provides access to the internal factor graph, allowing for advanced manipulations and custom analyses.\n", - "\n", - "## Mathematical Formulation\n", - "\n", - "The `ISAM2` algorithm is based on the factor graph representation of the problem, where the joint probability distribution is expressed as a product of factors:\n", - "\n", - "$$ P(X|Z) \\propto \\prod_{i} \\phi_i(X_i, Z_i) $$\n", - "\n", - "Here, $X$ represents the set of variables, $Z$ the measurements, and $\\phi_i$ the individual factors.\n", - "\n", - "The update process involves solving a nonlinear optimization problem, typically using the Gauss-Newton or Levenberg-Marquardt algorithms, to minimize the error:\n", - "\n", - "$$ \\min_{X} \\sum_{i} \\| h_i(X_i) - Z_i \\|^2 $$\n", - "\n", - "where $h_i(X_i)$ are the measurement functions.\n", - "\n", - "## Conclusion\n", - "\n", - "The `ISAM2` class is a powerful tool for real-time estimation in dynamic environments. Its ability to efficiently update solutions with new data makes it ideal for applications requiring continuous adaptation and refinement of estimates. Users can leverage its advanced features to customize the behavior and performance of the algorithm to suit specific needs." + "- **getFactorsUnsafe**: Provides access to the internal factor graph, allowing for advanced manipulations and custom analysis." ] } ], - "metadata": {}, + "metadata": { + "language_info": { + "name": "python" + } + }, "nbformat": 4, "nbformat_minor": 5 } diff --git a/gtsam/nonlinear/doc/IncrementalFixedLagSmoother.ipynb b/gtsam/nonlinear/doc/IncrementalFixedLagSmoother.ipynb new file mode 100644 index 000000000..287e00d02 --- /dev/null +++ b/gtsam/nonlinear/doc/IncrementalFixedLagSmoother.ipynb @@ -0,0 +1,23 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "cdd2fdc5", + "metadata": {}, + "source": [ + "# IncrementalFixedLagSmoother\n", + "\n", + "## Overview\n", + "\n", + "The `IncrementalFixedLagSmoother` is a [FixedLagSmoother](FixedLagSmoother.ipynb) that uses [iSAM2](iSAM2.ipynb) for incremental inference.\n" + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/gtsam/nonlinear/doc/LinearContainerFactor.ipynb b/gtsam/nonlinear/doc/LinearContainerFactor.ipynb index edb00e936..602c4e401 100644 --- a/gtsam/nonlinear/doc/LinearContainerFactor.ipynb +++ b/gtsam/nonlinear/doc/LinearContainerFactor.ipynb @@ -5,13 +5,11 @@ "id": "f4c73cc1", "metadata": {}, "source": [ - "# LinearContainerFactor Class Documentation\n", - "\n", - "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "# LinearContainerFactor\n", "\n", "## Overview\n", "\n", - "The `LinearContainerFactor` class in GTSAM is a specialized factor that encapsulates a linear factor within a nonlinear factor graph. This class allows for the seamless integration of linear factors into a nonlinear optimization problem, providing flexibility in problem modeling and solution.\n", + "The `LinearContainerFactor` class in GTSAM is a specialized factor that encapsulates a linear factor within a nonlinear factor graph. This is used extensively when marginalizing out variables.\n", "\n", "## Key Features\n", "\n", @@ -21,45 +19,15 @@ "\n", "## Key Methods\n", "\n", - "### Constructor\n", - "\n", - "- **LinearContainerFactor**: This constructor initializes the `LinearContainerFactor` with a linear factor and optionally with values. It serves as the entry point for creating an instance of this class.\n", - "\n", - "### Error Evaluation\n", - "\n", - "- **error**: This method calculates the error of the factor given a set of values. The error is typically defined as the difference between the predicted and observed measurements, and it plays a crucial role in optimization.\n", - "\n", - "### Jacobian Computation\n", - "\n", - "- **linearize**: This method computes the Jacobian matrix of the factor. The Jacobian is a matrix of partial derivatives that describes how the error changes with respect to changes in the variables. It is a critical component in gradient-based optimization algorithms.\n", - "\n", - "### Accessors\n", - "\n", - "- **keys**: This method returns the keys associated with the factor. Keys are identifiers for the variables involved in the factor, and they are essential for understanding the structure of the factor graph.\n", - "\n", - "### Utility Methods\n", - "\n", - "- **equals**: This method checks for equality between two `LinearContainerFactor` instances. It is useful for testing and validation purposes.\n", - "\n", - "## Mathematical Background\n", - "\n", - "The `LinearContainerFactor` operates within the context of factor graphs, where the goal is to minimize the total error across all factors. The error for a linear factor can be expressed as:\n", - "\n", - "$$ e(x) = A \\cdot x - b $$\n", - "\n", - "where $A$ is the coefficient matrix, $x$ is the vector of variables, and $b$ is the measurement vector. The optimization process aims to find the values of $x$ that minimize the sum of squared errors:\n", - "\n", - "$$ \\text{minimize} \\quad \\sum e(x)^T \\cdot e(x) $$\n", - "\n", - "The Jacobian matrix, which is derived from the linearization of the error function, is crucial for iterative optimization techniques such as Gauss-Newton or Levenberg-Marquardt.\n", - "\n", - "## Conclusion\n", - "\n", - "The `LinearContainerFactor` class is a powerful tool in GTSAM for integrating linear factors into nonlinear optimization problems. By providing mechanisms for error evaluation and Jacobian computation, it facilitates the efficient solution of complex estimation problems in robotics and computer vision." + "- **LinearContainerFactor**: This constructor initializes the `LinearContainerFactor` with a linear factor and optionally with values. It serves as the entry point for creating an instance of this class." ] } ], - "metadata": {}, + "metadata": { + "language_info": { + "name": "python" + } + }, "nbformat": 4, "nbformat_minor": 5 } diff --git a/gtsam/nonlinear/doc/NonlinearEquality.ipynb b/gtsam/nonlinear/doc/NonlinearEquality.ipynb new file mode 100644 index 000000000..98c3656b1 --- /dev/null +++ b/gtsam/nonlinear/doc/NonlinearEquality.ipynb @@ -0,0 +1,124 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# NonlinearEquality\n", + "\n", + "The `NonlinearEquality` family of factors in GTSAM provides constraints to enforce equality between variables or between a variable and a constant value. These factors are useful in optimization problems where strict equality constraints are required. Below is an overview of the API, grouped by functionality." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## NonlinearEquality\n", + "\n", + "The `NonlinearEquality` factor enforces equality between a variable and a feasible value. It supports both exact and inexact evaluation modes.\n", + "\n", + "### Constructors\n", + "- `NonlinearEquality(Key j, const T& feasible, const CompareFunction& compare)` \n", + " Creates a factor that enforces exact equality between the variable at key `j` and the feasible value `feasible`. \n", + " - `j`: Key of the variable to constrain. \n", + " - `feasible`: The feasible value to enforce equality with. \n", + " - `compare`: Optional comparison function (default uses `traits::Equals`).\n", + "\n", + "- `NonlinearEquality(Key j, const T& feasible, double error_gain, const CompareFunction& compare)` \n", + " Creates a factor that allows inexact evaluation with a specified error gain. \n", + " - `error_gain`: Gain applied to the error when the constraint is violated.\n", + "\n", + "### Methods\n", + "- `double error(const Values& c) const` \n", + " Computes the error for the given values. Returns `0.0` if the constraint is satisfied, or a scaled error if `allow_error_` is enabled.\n", + "\n", + "- `Vector evaluateError(const T& xj, OptionalMatrixType H = nullptr) const` \n", + " Evaluates the error vector for the given variable value `xj`. Optionally computes the Jacobian matrix `H`.\n", + "\n", + "- `GaussianFactor::shared_ptr linearize(const Values& x) const` \n", + " Linearizes the factor at the given values `x` to create a Gaussian factor." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## NonlinearEquality1\n", + "\n", + "The `NonlinearEquality1` factor is a unary equality constraint that fixes a variable to a specific value.\n", + "\n", + "### Constructors\n", + "- `NonlinearEquality1(const X& value, Key key, double mu = 1000.0)` \n", + " Creates a factor that fixes the variable at `key` to the value `value`. \n", + " - `value`: The fixed value for the variable. \n", + " - `key`: Key of the variable to constrain. \n", + " - `mu`: Strength of the constraint (default: `1000.0`).\n", + "\n", + "### Methods\n", + "- `Vector evaluateError(const X& x1, OptionalMatrixType H = nullptr) const` \n", + " Evaluates the error vector for the given variable value `x1`. Optionally computes the Jacobian matrix `H`.\n", + "\n", + "- `void print(const std::string& s, const KeyFormatter& keyFormatter) const` \n", + " Prints the factor details, including the fixed value and noise model." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## NonlinearEquality2\n", + "\n", + "The `NonlinearEquality2` factor is a binary equality constraint that enforces equality between two variables.\n", + "\n", + "### Constructors\n", + "- `NonlinearEquality2(Key key1, Key key2, double mu = 1e4)` \n", + " Creates a factor that enforces equality between the variables at `key1` and `key2`. \n", + " - `key1`: Key of the first variable. \n", + " - `key2`: Key of the second variable. \n", + " - `mu`: Strength of the constraint (default: `1e4`).\n", + "\n", + "### Methods\n", + "- `Vector evaluateError(const T& x1, const T& x2, OptionalMatrixType H1 = nullptr, OptionalMatrixType H2 = nullptr) const` \n", + " Evaluates the error vector for the given variable values `x1` and `x2`. Optionally computes the Jacobian matrices `H1` and `H2`.\n", + "\n", + "- `void print(const std::string& s, const KeyFormatter& keyFormatter) const` \n", + " Prints the factor details, including the keys and noise model." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Common Features\n", + "\n", + "### Error Handling Modes\n", + "- Exact Evaluation: Throws an error during linearization if the constraint is violated. \n", + "- Inexact Evaluation: Allows nonzero error and scales it using the `error_gain_` parameter.\n", + "\n", + "### Serialization\n", + "All factors support serialization for saving and loading models.\n", + "\n", + "### Testable Interface\n", + "All factors implement the `Testable` interface, providing methods like:\n", + "- `void print(const std::string& s, const KeyFormatter& keyFormatter) const` \n", + " Prints the factor details.\n", + "- `bool equals(const NonlinearFactor& f, double tol) const` \n", + " Checks if two factors are equal within a specified tolerance." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "These factors provide a flexible way to enforce equality constraints in nonlinear optimization problems, making them useful for applications like SLAM, robotics, and control systems." + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/gtsam/nonlinear/doc/NonlinearFactor.ipynb b/gtsam/nonlinear/doc/NonlinearFactor.ipynb index 32b0685ca..a5986d9cc 100644 --- a/gtsam/nonlinear/doc/NonlinearFactor.ipynb +++ b/gtsam/nonlinear/doc/NonlinearFactor.ipynb @@ -5,14 +5,45 @@ "id": "381ccaaa", "metadata": {}, "source": [ - "# NonlinearFactor Class Documentation\n", - "\n", - "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "# NonlinearFactor\n", "\n", "## Overview\n", "\n", - "The `NonlinearFactor` class in GTSAM is a fundamental component used in nonlinear optimization problems. It represents a factor in a factor graph, which is a key concept in probabilistic graphical models. The class is designed to work with nonlinear functions, making it suitable for a wide range of applications in robotics and computer vision, such as SLAM (Simultaneous Localization and Mapping) and structure from motion.\n", + "The `NonlinearFactor` class in GTSAM is a fundamental component used in nonlinear optimization. It represents a factor in a factor graph. The class is designed to work with nonlinear, continuous functions." + ] + }, + { + "cell_type": "markdown", + "id": "94ffa16d", + "metadata": {}, + "source": [ + "## Mathematical Formulation\n", "\n", + "The `NonlinearFactor` is generally represented by a function $f(x)$, where $x$ is a vector of variables. The error is given by:\n", + "$$\n", + "e(x) = f(x)- z\n", + "$$\n", + "where $z$ is the observed measurement. The optimization process aims to minimize the sum of squared errors:\n", + "$$\n", + "\\min_x \\sum_i \\| e_i(x) \\|^2 \n", + "$$\n", + "\n", + "Linearization involves approximating $f(x)$ around a point $x_0$:\n", + "$$\n", + "f(x) \\approx f(x_0) + A\\delta x\n", + "$$\n", + "where $A$ is the Jacobian matrix of $f$ at $x_0$, and $\\delta x \\doteq x - x_0$. This leads to a linearized error:\n", + "$$\n", + "e(x) \\approx (f(x_0) + A\\delta x) - z = A\\delta x - b\n", + "$$\n", + "where $b\\doteq z - f(x_0)$ is the prediction error." + ] + }, + { + "cell_type": "markdown", + "id": "e3842ba3", + "metadata": {}, + "source": [ "## Key Functionalities\n", "\n", "### Error Calculation\n", @@ -35,35 +66,19 @@ "\n", "- **keys**: Provides access to the keys (or variable indices) involved in the factor. This is essential for understanding which variables the factor is connected to in the factor graph.\n", "\n", - "## Mathematical Formulation\n", - "\n", - "The `NonlinearFactor` is generally represented by a function $f(x)$, where $x$ is a vector of variables. The error is given by:\n", - "\n", - "$$ e(x) = z - f(x) $$\n", - "\n", - "where $z$ is the observed measurement. The optimization process aims to minimize the sum of squared errors:\n", - "\n", - "$$ \\min_x \\sum_i \\| e_i(x) \\|^2 $$\n", - "\n", - "Linearization involves approximating $f(x)$ around a point $x_0$:\n", - "\n", - "$$ f(x) \\approx f(x_0) + J(x - x_0) $$\n", - "\n", - "where $J$ is the Jacobian matrix of $f$ at $x_0$. This leads to a linearized error:\n", - "\n", - "$$ e(x) \\approx z - (f(x_0) + J(x - x_0)) $$\n", - "\n", "## Usage Notes\n", "\n", "- The `NonlinearFactor` class is typically used in conjunction with a `NonlinearFactorGraph`, which is a collection of such factors.\n", "- Users need to implement the `evaluateError` method in derived classes to define the specific measurement model.\n", - "- The class is designed to be flexible and extensible, allowing for custom factors to be created for specific applications.\n", - "\n", - "In summary, the `NonlinearFactor` class is a versatile and essential component for building and solving nonlinear optimization problems in GTSAM. Its ability to handle nonlinear relationships and provide linear approximations makes it suitable for a wide range of applications in robotics and beyond." + "- The class is designed to be flexible and extensible, allowing for custom factors to be created for specific applications." ] } ], - "metadata": {}, + "metadata": { + "language_info": { + "name": "python" + } + }, "nbformat": 4, "nbformat_minor": 5 } diff --git a/gtsam/nonlinear/doc/NonlinearFactorGraph.ipynb b/gtsam/nonlinear/doc/NonlinearFactorGraph.ipynb index 643118c47..cb373ce9e 100644 --- a/gtsam/nonlinear/doc/NonlinearFactorGraph.ipynb +++ b/gtsam/nonlinear/doc/NonlinearFactorGraph.ipynb @@ -5,13 +5,11 @@ "id": "a58d890a", "metadata": {}, "source": [ - "# NonlinearFactorGraph Class Documentation\n", - "\n", - "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "# NonlinearFactorGraph\n", "\n", "## Overview\n", "\n", - "The `NonlinearFactorGraph` class in GTSAM is a key component for representing and solving nonlinear factor graphs. A factor graph is a bipartite graph that represents the factorization of a function, commonly used in probabilistic graphical models. In the context of GTSAM, it is used to represent the structure of optimization problems, particularly in the domain of simultaneous localization and mapping (SLAM) and structure from motion (SfM).\n", + "The `NonlinearFactorGraph` class in GTSAM is a key component for representing and solving nonlinear factor graphs. A factor graph is a bipartite graph that represents the factorization of a function, commonly used in probabilistic graphical models. In the context of GTSAM, it is used to represent the structure of optimization problems, e.g., in the domain of simultaneous localization and mapping (SLAM) or structure from motion (SfM).\n", "\n", "## Key Functionalities\n", "\n", @@ -35,32 +33,31 @@ "- **empty**: Checks if the graph contains any factors.\n", "- **at**: Accesses a specific factor by its index.\n", "- **back**: Retrieves the last factor in the graph.\n", - "- **front**: Retrieves the first factor in the graph.\n", - "\n", + "- **front**: Retrieves the first factor in the graph." + ] + }, + { + "cell_type": "markdown", + "id": "71b15f4c", + "metadata": {}, + "source": [ "### Optimization and Linearization\n", "\n", - "- **linearize**: Converts the nonlinear factor graph into a linear factor graph at a given linearization point. This is a crucial step in iterative optimization algorithms like Gauss-Newton or Levenberg-Marquardt.\n", + "- **linearize**: Converts the nonlinear factor graph into a linear factor graph at a given linearization point. This is a crucial step in iterative optimization algorithms like [Gauss-Newton](./GaussNewtonOptimizer.ipynb) or [Levenberg-Marquardt](./LevenbergMarquardtOptimizer.ipynb).\n", " \n", " The linearization process involves computing the Jacobian matrices of the nonlinear functions, resulting in a linear approximation:\n", " \n", - " $$ f(x) \\approx f(x_0) + J(x - x_0) $$\n", + " $$ f(x) \\approx f(x_0) + A(x - x_0) $$\n", " \n", - " where $J$ is the Jacobian matrix evaluated at the point $x_0$.\n", - "\n", - "### Utilities\n", - "\n", - "- **equals**: Compares two nonlinear factor graphs for equality, considering both the structure and the factors themselves.\n", - "- **clone**: Creates a deep copy of the factor graph, including all its factors.\n", - "\n", - "## Usage Notes\n", - "\n", - "The `NonlinearFactorGraph` class is designed to be flexible and efficient, allowing users to construct complex optimization problems by adding and managing factors. It integrates seamlessly with GTSAM's optimization algorithms, enabling robust solutions to large-scale nonlinear problems.\n", - "\n", - "For effective use, it is important to understand the nature of the factors being added and the implications of linearization on the optimization process. The class provides a robust interface for managing the lifecycle of a factor graph, from construction through to optimization and solution extraction." + " where $A$ is the Jacobian matrix evaluated at the point $x_0$." ] } ], - "metadata": {}, + "metadata": { + "language_info": { + "name": "python" + } + }, "nbformat": 4, "nbformat_minor": 5 } diff --git a/gtsam/nonlinear/doc/NonlinearISAM.ipynb b/gtsam/nonlinear/doc/NonlinearISAM.ipynb index c7296bab8..167cb94ff 100644 --- a/gtsam/nonlinear/doc/NonlinearISAM.ipynb +++ b/gtsam/nonlinear/doc/NonlinearISAM.ipynb @@ -5,69 +5,19 @@ "id": "2b6fc012", "metadata": {}, "source": [ - "# NonlinearISAM Class Documentation\n", - "\n", - "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "# NonlinearISAM\n", "\n", "## Overview\n", "\n", - "The `NonlinearISAM` class in GTSAM is a powerful tool for incrementally solving nonlinear factor graphs. It is particularly useful in applications where the problem is continuously evolving, such as in SLAM (Simultaneous Localization and Mapping) or incremental structure-from-motion. The class leverages the iSAM (incremental Smoothing and Mapping) algorithm to efficiently update solutions as new measurements are added.\n", - "\n", - "## Key Features\n", - "\n", - "- **Incremental Updates**: `NonlinearISAM` allows for the efficient update of the solution when new factors are added to the graph. This is crucial for real-time applications where the problem is continuously changing.\n", - " \n", - "- **Batch Initialization**: The class can perform a full batch optimization to initialize the solution, which can then be refined incrementally.\n", - "\n", - "- **Marginalization**: It supports marginalizing out variables that are no longer needed, which helps in maintaining computational efficiency.\n", - "\n", - "## Main Methods\n", - "\n", - "### Initialization and Update\n", - "\n", - "- **`update`**: This method is central to the `NonlinearISAM` class. It allows for the addition of new factors and variables to the existing factor graph. The update is performed incrementally, leveraging previous computations to enhance efficiency.\n", - "\n", - "- **`estimate`**: After performing updates, this method retrieves the current best estimate of the variable values.\n", - "\n", - "### Batch Operations\n", - "\n", - "- **`batchStep`**: This method performs a full batch optimization, which can be useful for reinitializing the solution or when a significant change in the problem structure occurs.\n", - "\n", - "### Marginalization\n", - "\n", - "- **`marginalize`**: This method allows for the removal of variables from the factor graph. Marginalization is useful for reducing the problem size and maintaining efficiency.\n", - "\n", - "## Mathematical Background\n", - "\n", - "The `NonlinearISAM` class operates on factor graphs, which are bipartite graphs consisting of variable nodes and factor nodes. The goal is to find the configuration of variables that maximizes the product of all factors, often expressed as:\n", - "\n", - "$$\n", - "\\max_{\\mathbf{x}} \\prod_{i} \\phi_i(\\mathbf{x}_i)\n", - "$$\n", - "\n", - "where $\\phi_i(\\mathbf{x}_i)$ are the factors depending on subsets of variables $\\mathbf{x}_i$.\n", - "\n", - "The iSAM algorithm updates the solution by incrementally solving the linearized system of equations derived from the factor graph:\n", - "\n", - "$$\n", - "\\mathbf{A} \\Delta \\mathbf{x} = \\mathbf{b}\n", - "$$\n", - "\n", - "where $\\mathbf{A}$ is the Jacobian matrix of the factors, $\\Delta \\mathbf{x}$ is the update to the variable estimates, and $\\mathbf{b}$ is the residual vector.\n", - "\n", - "## Usage Notes\n", - "\n", - "- **Efficiency**: The incremental nature of `NonlinearISAM` makes it highly efficient for large-scale problems where new data is continuously being integrated.\n", - "\n", - "- **Robustness**: The ability to perform batch optimizations and marginalize variables provides robustness against changes in the problem structure.\n", - "\n", - "- **Applications**: This class is particularly suited for robotics and computer vision applications where real-time performance is critical.\n", - "\n", - "In summary, the `NonlinearISAM` class is a sophisticated tool for handling dynamic nonlinear optimization problems, offering both incremental and batch processing capabilities to efficiently manage evolving factor graphs." + "The `NonlinearISAM` class wraps the incremental factorization version of iSAM {cite:p}`http://dx.doi.org/10.1109/TRO.2008.2006706`. It is largely superseded by [iSAM2](./ISAM2.ipynb) but it is a simpler class, with less bells and whistles, that might be easier to debug. For background, also see the more recent booklet by {cite:t}`https://doi.org/10.1561/2300000043`.\n" ] } ], - "metadata": {}, + "metadata": { + "language_info": { + "name": "python" + } + }, "nbformat": 4, "nbformat_minor": 5 } diff --git a/gtsam/nonlinear/doc/PriorFactor.ipynb b/gtsam/nonlinear/doc/PriorFactor.ipynb index d485cd2aa..9974669fc 100644 --- a/gtsam/nonlinear/doc/PriorFactor.ipynb +++ b/gtsam/nonlinear/doc/PriorFactor.ipynb @@ -5,13 +5,11 @@ "id": "ec35011c", "metadata": {}, "source": [ - "# GTSAM PriorFactor Class Documentation\n", - "\n", - "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "# PriorFactor\n", "\n", "## Overview\n", "\n", - "The `PriorFactor` class in GTSAM is a specialized factor used in probabilistic graphical models, particularly within the context of nonlinear optimization and estimation problems. It represents a prior belief about a variable in the form of a Gaussian distribution. This class is crucial for incorporating prior knowledge into the optimization process, which can significantly enhance the accuracy and robustness of the solutions.\n", + "The `PriorFactor` represents a prior belief about a variable in the form of a Gaussian distribution. This class is crucial for incorporating prior knowledge into the optimization process, which can significantly enhance the accuracy and robustness of the solutions.\n", "\n", "## Key Functionalities\n", "\n", @@ -29,17 +27,13 @@ "\n", "where $x$ is the estimated value, and $\\mu$ is the prior mean. The error is then weighted by the noise model to form the contribution of this factor to the overall objective function.\n", "\n", - "### Jacobian Computation\n", + "### Adding to a Factor Graph\n", "\n", - "In the context of optimization, the `PriorFactor` provides methods to compute the Jacobian of the error function. This is essential for gradient-based optimization algorithms, which rely on derivatives to iteratively improve the solution.\n", - "\n", - "### Contribution to Factor Graph\n", - "\n", - "The `PriorFactor` contributes to the factor graph by adding a term to the objective function that penalizes deviations from the prior. This term is integrated into the overall optimization problem, ensuring that the solution respects the prior knowledge encoded by the factor.\n", + "[NonlinearFactorGraph](./NonlinearFactorGraph.ipynb) has a templated method `addPrior` that provides a convenient way to add priors.\n", "\n", "## Usage Considerations\n", "\n", - "- **Noise Model**: The choice of noise model is critical as it determines how strongly the prior is enforced. A tighter noise model implies a stronger belief in the prior.\n", + "- **Noise Model**: The choice of noise model is critical as it determines how strongly the prior is enforced. A tighter noise model implies a stronger belief in the prior. *Note that very strong priors might make the condition number of the linear systems to be solved very high. In this case consider adding a [NonlinearEqualityFactor]\n", "- **Integration with Other Factors**: The `PriorFactor` is typically used in conjunction with other factors that model the system dynamics and measurements. It helps anchor the solution, especially in scenarios with limited or noisy measurements.\n", "- **Applications**: Common applications include SLAM (Simultaneous Localization and Mapping), where priors on initial poses or landmarks can significantly improve map accuracy and convergence speed.\n", "\n", @@ -49,7 +43,11 @@ ] } ], - "metadata": {}, + "metadata": { + "language_info": { + "name": "python" + } + }, "nbformat": 4, "nbformat_minor": 5 } diff --git a/gtsam/nonlinear/doc/WhiteNoiseFactor.ipynb b/gtsam/nonlinear/doc/WhiteNoiseFactor.ipynb index b34b170b3..fdbf91922 100644 --- a/gtsam/nonlinear/doc/WhiteNoiseFactor.ipynb +++ b/gtsam/nonlinear/doc/WhiteNoiseFactor.ipynb @@ -5,62 +5,63 @@ "id": "5a0c879e", "metadata": {}, "source": [ - "# WhiteNoiseFactor Class Documentation\n", + "# WhiteNoiseFactor\n", "\n", - "*Disclaimer: This documentation was generated by AI and may require human revision for accuracy and completeness.*\n", + "*Below is partly generated with ChatGPT 4o, needs to be verified.*\n", "\n", "## Overview\n", "\n", - "The `WhiteNoiseFactor` class in GTSAM is a specialized factor used in nonlinear optimization problems, particularly in the context of probabilistic graphical models. This class models the effect of white noise on a measurement, which is a common assumption in many estimation problems. The primary purpose of this class is to incorporate the uncertainty due to white noise into the optimization process.\n", - "\n", - "## Key Functionalities\n", - "\n", - "### Noise Modeling\n", - "\n", - "- **White Noise Assumption**: The class assumes that the noise affecting the measurements is Gaussian and uncorrelated, which is often referred to as \"white noise\". This assumption simplifies the mathematical treatment of noise in the optimization problem.\n", - "\n", - "### Factor Operations\n", - "\n", - "- **Error Calculation**: The `WhiteNoiseFactor` computes the error between the predicted and observed measurements, incorporating the noise model. This error is crucial for the optimization process as it influences the adjustment of variables to minimize the overall error in the system.\n", - "\n", - "- **Jacobian Computation**: The class provides methods to compute the Jacobian of the error function with respect to the variables involved. The Jacobian is essential for gradient-based optimization techniques, as it provides the necessary derivatives to guide the optimization algorithm.\n", - "\n", - "### Mathematical Formulation\n", - "\n", - "The error function for a `WhiteNoiseFactor` can be represented as:\n", - "\n", - "$$ e(x) = h(x) - z $$\n", + "The `WhiteNoiseFactor` in GTSAM is a binary nonlinear factor designed to estimate the parameters of zero-mean Gaussian white noise. It uses a **mean-precision parameterization**, where the mean $ \\mu $ and precision $ \\tau = 1/\\sigma^2 $ are treated as variables to be optimized." + ] + }, + { + "cell_type": "markdown", + "id": "b40b3242", + "metadata": {}, + "source": [ + "## Parameterization\n", "\n", + "The factor models the negative log-likelihood of a zero-mean Gaussian distribution as follows,\n", + "$$\n", + "f(z, \\mu, \\tau) = \\log(\\sqrt{2\\pi}) - 0.5 \\log(\\tau) + 0.5 \\tau (z - \\mu)^2\n", + "$$\n", "where:\n", - "- $e(x)$ is the error function.\n", - "- $h(x)$ is the predicted measurement based on the current estimate of the variables.\n", - "- $z$ is the observed measurement.\n", + "- $ z $: Measurement value (observed data).\n", + "- $ \\mu $: Mean of the Gaussian distribution (to be estimated).\n", + "- $ \\tau $: Precision of the Gaussian distribution $ \\tau = 1/\\sigma^2 $, also to be estimated).\n", "\n", - "The noise is assumed to be Gaussian with zero mean and a certain covariance, which is often represented as:\n", + "This formulation allows the factor to optimize both the mean and precision of the noise model simultaneously." + ] + }, + { + "cell_type": "markdown", + "id": "2f36abdb", + "metadata": {}, + "source": [ + "## Use Case: Estimating IMU Noise Characteristics\n", "\n", - "$$ \\text{Cov}(e) = \\sigma^2 I $$\n", + "The `WhiteNoiseFactor` can be used in system identification tasks, such as estimating the noise characteristics of an IMU. Here's how it would work:\n", "\n", - "where $\\sigma^2$ is the variance of the noise and $I$ is the identity matrix.\n", + "1. **Define the Measurement**:\n", + " - Collect a series of IMU measurements (e.g., accelerometer or gyroscope readings) under known conditions (e.g., stationary or constant velocity).\n", "\n", - "### Optimization Integration\n", + "2. **Set Up the Factor Graph**:\n", + " - Add `WhiteNoiseFactor` instances to the factor graph for each measurement, linking the observed value $ z $ to the mean and precision variables.\n", "\n", - "- **Factor Graphs**: The `WhiteNoiseFactor` is integrated into factor graphs, which are a key structure in GTSAM for representing and solving large-scale estimation problems. Each factor in the graph contributes to the overall error that the optimization process seeks to minimize.\n", + "3. **Optimize the Graph**:\n", + " - Use GTSAM's nonlinear optimization tools to solve for the mean $ \\mu $ and precision $ \\tau $ that best explain the observed measurements.\n", "\n", - "- **Nonlinear Optimization**: The class is designed to work seamlessly with GTSAM's nonlinear optimization framework, allowing it to handle complex, real-world estimation problems that involve non-linear relationships between variables.\n", - "\n", - "## Usage Notes\n", - "\n", - "- **Assumptions**: Users should ensure that the white noise assumption is valid for their specific application, as deviations from this assumption can lead to suboptimal estimation results.\n", - "\n", - "- **Integration**: The `WhiteNoiseFactor` should be used in conjunction with other factors and variables in a factor graph to effectively model the entire system being estimated.\n", - "\n", - "- **Performance**: The efficiency of the optimization process can be influenced by the choice of noise model and the structure of the factor graph. Proper tuning and validation are recommended to achieve optimal performance.\n", - "\n", - "In summary, the `WhiteNoiseFactor` class is a powerful tool in GTSAM for modeling and mitigating the effects of white noise in nonlinear estimation problems. Its integration into factor graphs and compatibility with GTSAM's optimization algorithms make it a versatile component for a wide range of applications." + "4. **Extract Noise Characteristics**:\n", + " - The optimized mean $ \\mu $ represents the bias in the sensor measurements.\n", + " - The optimized precision $ \\tau $ can be inverted to compute the standard deviation $ \\sigma = 1/\\sqrt{\\tau} $, which represents the noise level." ] } ], - "metadata": {}, + "metadata": { + "language_info": { + "name": "python" + } + }, "nbformat": 4, "nbformat_minor": 5 } diff --git a/gtsam/nonlinear/nonlinear.md b/gtsam/nonlinear/nonlinear.md index 99a9907f7..827b76bba 100644 --- a/gtsam/nonlinear/nonlinear.md +++ b/gtsam/nonlinear/nonlinear.md @@ -4,48 +4,50 @@ The `nonlinear` module in GTSAM includes a comprehensive set of tools for nonlin ## Core Classes -- **[NonlinearFactorGraph](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/NonlinearFactorGraph.h)**: Represents the optimization problem as a graph of factors. -- **[NonlinearFactor](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/NonlinearFactor.h)**: Base class for all nonlinear factors. -- **[NoiseModelFactor](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/NoiseModelFactor.h)**: Base class for factors with noise models. -- **[Values](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/Values.h)**: Container for variable assignments used in optimization. +- [NonlinearFactorGraph](doc/NonlinearFactorGraph.ipynb): Represents the optimization problem as a graph of factors. +- [NonlinearFactor](doc/NonlinearFactor.ipynb): Base class for all nonlinear factors. +- [NoiseModelFactor](doc/NonlinearFactor.ipynb): Base class for factors with noise models. +- [Values](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/Values.h): Container for variable assignments used in optimization. ## Batch Optimizers -- **[NonlinearOptimizer](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/NonlinearOptimizer.h)**: Base class for all batch optimizers. - - **[NonlinearOptimizerParams](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/NonlinearOptimizerParams.h)**: Base parameters class for all optimizers. +- [NonlinearOptimizer](doc/NonlinearOptimizer.ipynb): Base class for all batch optimizers. + - [NonlinearOptimizerParams](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/NonlinearOptimizerParams.h): Base parameters class for all optimizers. -- **[GaussNewtonOptimizer](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/GaussNewtonOptimizer.h)**: Implements Gauss-Newton optimization. - - **[GaussNewtonParams](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/GaussNewtonParams.h)**: Parameters for Gauss-Newton optimization. +- [GaussNewtonOptimizer](doc/GaussNewtonOptimizer.ipynb): Implements Gauss-Newton optimization. + - [GaussNewtonParams](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/GaussNewtonParams.h): Parameters for Gauss-Newton optimization. -- **[LevenbergMarquardtOptimizer](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/LevenbergMarquardtOptimizer.h)**: Implements Levenberg-Marquardt optimization. - - **[LevenbergMarquardtParams](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/LevenbergMarquardtParams.h)**: Parameters for Levenberg-Marquardt optimization. +- [LevenbergMarquardtOptimizer](doc/LevenbergMarquardtOptimizer.ipynb): Implements Levenberg-Marquardt optimization. + - [LevenbergMarquardtParams](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/LevenbergMarquardtParams.h): Parameters for Levenberg-Marquardt optimization. -- **[DoglegOptimizer](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/DoglegOptimizer.h)**: Implements Powell's Dogleg optimization. - - **[DoglegParams](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/DoglegParams.h)**: Parameters for Dogleg optimization. +- [DoglegOptimizer](doc/DoglegOptimizer.ipynb): Implements Powell's Dogleg optimization. + - [DoglegParams](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/DoglegParams.h): Parameters for Dogleg optimization. -- **[GncOptimizer](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/GncOptimizer.h)**: Implements robust optimization using Graduated Non-Convexity. - - **[GncParams](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/GncParams.h)**: Parameters for Graduated Non-Convexity optimization. +- [GncOptimizer](doc/GncOptimizer.ipynb): Implements robust optimization using Graduated Non-Convexity. + - [GncParams](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/GncParams.h): Parameters for Graduated Non-Convexity optimization. ## Incremental Optimizers -- **[ISAM2](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/ISAM2.h)**: Incremental Smoothing and Mapping 2, with fluid relinearization. - - **[ISAM2Params](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/ISAM2Params.h)**: Parameters controlling the ISAM2 algorithm. - - **[ISAM2Result](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/ISAM2Result.h)**: Results from ISAM2 update operations. -- **[NonlinearISAM](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/NonlinearISAM.h)**: Original iSAM implementation (mostly superseded by ISAM2). +- [ISAM2](doc/ISAM2.ipynb): Incremental Smoothing and Mapping 2, with fluid relinearization. + - [ISAM2Params](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/ISAM2Params.h): Parameters controlling the ISAM2 algorithm. + - [ISAM2Result](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/ISAM2Result.h): Results from ISAM2 update operations. +- [NonlinearISAM](doc/NonlinearISAM.ipynb): Original iSAM implementation (mostly superseded by ISAM2). ## Specialized Factors -- **[PriorFactor](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/PriorFactor.h)**: Imposes a prior constraint on a variable. -- **[NonlinearEquality](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/NonlinearEquality.h)**: Enforces equality constraints between variables. -- **[LinearContainerFactor](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/LinearContainerFactor.h)**: Wraps linear factors for inclusion in nonlinear factor graphs. +- [PriorFactor](doc/PriorFactor.ipynb): Imposes a prior constraint on a variable. +- [NonlinearEquality](doc/NonlinearEquality.ipynb): Enforces equality constraints between variables. +- [LinearContainerFactor](doc/LinearContainerFactor.ipynb): Wraps linear factors for inclusion in nonlinear factor graphs. +- [WhiteNoiseFactor](doc/WhiteNoiseFactor.ipynb): Binary factor to estimate parameters of zero-mean Gaussian white noise. ## Filtering and Smoothing -- **[ExtendedKalmanFilter](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/ExtendedKalmanFilter.h)**: Nonlinear Kalman filter implementation. -- **[FixedLagSmoother](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/FixedLagSmoother.h)**: Base class for fixed-lag smoothers. -- **[BatchFixedLagSmoother](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/BatchFixedLagSmoother.h)**: Implementation of a fixed-lag smoother using batch optimization. +- [ExtendedKalmanFilter](doc/ExtendedKalmanFilter.ipynb): Nonlinear Kalman filter implementation. +- [FixedLagSmoother](doc/FixedLagSmoother.ipynb): Base class for fixed-lag smoothers. + - [BatchFixedLagSmoother](doc/BatchFixedLagSmoother.ipynb): Implementation of a fixed-lag smoother using batch optimization. + - [IncrementalFixedLagSmoother](doc/IncrementalFixedLagSmoother.ipynb): Implementation of a fixed-lag smoother using iSAM2. ## Analysis and Visualization -- **[Marginals](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/Marginals.h)**: Computes marginal covariances from optimization results. -- **[GraphvizFormatting](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/GraphvizFormatting.h)**: Provides customization for factor graph visualization. \ No newline at end of file +- [Marginals](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/Marginals.h): Computes marginal covariances from optimization results. +- [GraphvizFormatting](https://github.com/borglab/gtsam/blob/develop/gtsam/nonlinear/GraphvizFormatting.h): Provides customization for factor graph visualization. \ No newline at end of file