Update ceres to the latest version in g3

Please pay special attention to the changes in Android.mk.
They are the only real changes I had to make.

Bug: 16953678

Change-Id: I44a644358e779aaff99a2ea822387fe49ac26888
diff --git a/Android.mk b/Android.mk
index ac262aa..a52c766 100644
--- a/Android.mk
+++ b/Android.mk
@@ -29,141 +29,208 @@
 # Author: settinger@google.com (Scott Ettinger)
 #         keir@google.com (Keir Mierle)
 #
-# Builds Ceres for Android, using the standard toolchain (not standalone). It
-# uses STLPort instead of GNU C++. This is useful for anyone wishing to ship
-# GPL-free code. This cannot build the tests or other parts of Ceres; only the
-# core libraries. If you need a more complete Ceres build, consider using the
-# CMake toolchain (noting that the standalone toolchain doesn't work with
-# STLPort).
+# Builds Ceres for Android, using the standard toolchain (not
+# standalone). It uses LLVM's libc++ as the standard library. It is a
+# modern BSD licensed implementation of the standard c++ library. We
+# do this to avoid any licensing issues that may arise from using
+# GCC's libstdc++ which is licensed under GPL3.
 #
-# Reducing binary size:
+# Building
+# --------
 #
-# This build includes the Schur specializations, which cause binary bloat. If
-# you don't need them for your application, consider adding:
+# You will have to specify the environment EIGEN_PATH to point to the
+# Eigen sources when building. For example:
+#
+#   EIGEN_PATH=/home/keir/src/eigen-3.0.5 ndk-build -j
+#
+# It is also possible to specify CERES_EXTRA_DEFINES, in case you need
+# to pass more definitions to the C compiler.
+#
+# Using the library
+# -----------------
+# Copy the static library:
+#
+#   ../obj/local/armeabi-v7a/libceres.a
+#
+# into your own project, then link it into your binary in your
+# Android.mk file.
+#
+# Reducing binary size
+# --------------------
+# This build includes the Schur specializations, which increase the
+# size of the binary. If you don't need them for your application,
+# consider adding:
 #
 #   -DCERES_RESTRICT_SCHUR_SPECIALIZATION
 #
-# to the LOCAL_CFLAGS variable below, and commenting out all the
-# generated/schur_eliminator_2_2_2.cc-alike files, leaving only the _d_d_d one.
+# to the LOCAL_CFLAGS variable below.
 #
-# Similarly if you do not need the line search minimizer, consider adding
-#
-#   -DCERES_NO_LINE_SEARCH_MINIMIZER
+# Changing the logging library
+# ----------------------------
+# Ceres Solver ships with a replacement for glog that provides a
+# simple and small implementation that builds on Android. However, if
+# you wish to supply a header only version yourself, then you may
+# define CERES_GLOG_DIR to point to it.
 
 LOCAL_PATH := $(call my-dir)
-include $(CLEAR_VARS)
 
-LOCAL_MODULE := libceres
+EIGEN_PATH := external/eigen
+CERES_INCLUDE_PATHS := $(CERES_EXTRA_INCLUDES)
+CERES_INCLUDE_PATHS += $(LOCAL_PATH)/internal
+CERES_INCLUDE_PATHS += $(LOCAL_PATH)/internal/ceres
+CERES_INCLUDE_PATHS += $(LOCAL_PATH)/include
+CERES_INCLUDE_PATHS += $(LOCAL_PATH)/config
+
+# Use the alternate glog implementation if provided by the user.
+ifdef CERES_GLOG_DIR
+  CERES_INCLUDE_PATHS += $(CERES_GLOG_DIR)
+else
+  CERES_INCLUDE_PATHS += $(LOCAL_PATH)/internal/ceres/miniglog
+endif
+CERES_SRC_PATH := internal/ceres
+
+
+include $(CLEAR_VARS)
+LOCAL_C_INCLUDES := $(CERES_INCLUDE_PATHS)
+LOCAL_C_INCLUDES += $(EIGEN_PATH)
 
 LOCAL_SDK_VERSION := 17
 LOCAL_NDK_STL_VARIANT := c++_static
 
-LOCAL_C_INCLUDES := $(LOCAL_PATH)/internal \
-                    $(LOCAL_PATH)/internal/ceres \
-                    $(LOCAL_PATH)/internal/ceres/miniglog \
-                    $(LOCAL_PATH)/include \
-                    external/eigen \
-
 LOCAL_CPP_EXTENSION := .cc
-LOCAL_CPPFLAGS := -DCERES_NO_PROTOCOL_BUFFERS \
-                  -DCERES_NO_LAPACK \
-                  -DCERES_NO_SUITESPARSE \
-                  -DCERES_NO_GFLAGS \
-                  -DCERES_NO_THREADS \
-                  -DCERES_NO_CXSPARSE \
-                  -DCERES_NO_TR1 \
-                  -DCERES_WORK_AROUND_ANDROID_NDK_COMPILER_BUG \
-                  -DMAX_LOG_LEVEL=-1 \
-                  -O3 -w
+LOCAL_CFLAGS := $(CERES_EXTRA_DEFINES) \
+                -DCERES_NO_LAPACK \
+                -DCERES_NO_SUITESPARSE \
+                -DCERES_NO_THREADS \
+                -DCERES_NO_CXSPARSE \
+                -DCERES_STD_UNORDERED_MAP
+
+# The default relase optimization level is O2, but we want O3.
+LOCAL_CFLAGS += -O3 -w
+
+# Set the logging max level to 1.
+LOCAL_CFLAGS += -DMAX_LOG_LEVEL=-1
 
 # On Android NDK 8b, GCC gives spurrious warnings about ABI incompatibility for
 # which there is no solution. Hide the warning instead.
 LOCAL_CFLAGS += -Wno-psabi
 
-LOCAL_SRC_FILES := internal/ceres/array_utils.cc \
-                   internal/ceres/blas.cc \
-                   internal/ceres/block_evaluate_preparer.cc \
-                   internal/ceres/block_jacobian_writer.cc \
-                   internal/ceres/block_jacobi_preconditioner.cc \
-                   internal/ceres/block_random_access_dense_matrix.cc \
-                   internal/ceres/block_random_access_matrix.cc \
-                   internal/ceres/block_random_access_sparse_matrix.cc \
-                   internal/ceres/block_sparse_matrix.cc \
-                   internal/ceres/block_structure.cc \
-                   internal/ceres/canonical_views_clustering.cc \
-                   internal/ceres/cgnr_solver.cc \
-                   internal/ceres/compressed_row_jacobian_writer.cc \
-                   internal/ceres/compressed_row_sparse_matrix.cc \
-                   internal/ceres/conditioned_cost_function.cc \
-                   internal/ceres/conjugate_gradients_solver.cc \
-                   internal/ceres/coordinate_descent_minimizer.cc \
-                   internal/ceres/corrector.cc \
-                   internal/ceres/dense_normal_cholesky_solver.cc \
-                   internal/ceres/dense_qr_solver.cc \
-                   internal/ceres/dense_sparse_matrix.cc \
-                   internal/ceres/detect_structure.cc \
-                   internal/ceres/dogleg_strategy.cc \
-                   internal/ceres/evaluator.cc \
-                   internal/ceres/file.cc \
-                   internal/ceres/gradient_checking_cost_function.cc \
-                   internal/ceres/implicit_schur_complement.cc \
-                   internal/ceres/iterative_schur_complement_solver.cc \
-                   internal/ceres/lapack.cc \
-                   internal/ceres/levenberg_marquardt_strategy.cc \
-                   internal/ceres/line_search.cc \
-                   internal/ceres/line_search_direction.cc \
-                   internal/ceres/line_search_minimizer.cc \
-                   internal/ceres/linear_least_squares_problems.cc \
-                   internal/ceres/linear_operator.cc \
-                   internal/ceres/linear_solver.cc \
-                   internal/ceres/local_parameterization.cc \
-                   internal/ceres/loss_function.cc \
-                   internal/ceres/low_rank_inverse_hessian.cc \
-                   internal/ceres/minimizer.cc \
-                   internal/ceres/normal_prior.cc \
-                   internal/ceres/parameter_block_ordering.cc \
-                   internal/ceres/partitioned_matrix_view.cc \
-                   internal/ceres/polynomial.cc \
-                   internal/ceres/preconditioner.cc \
-                   internal/ceres/problem.cc \
-                   internal/ceres/problem_impl.cc \
-                   internal/ceres/program.cc \
-                   internal/ceres/residual_block.cc \
-                   internal/ceres/residual_block_utils.cc \
-                   internal/ceres/runtime_numeric_diff_cost_function.cc \
-                   internal/ceres/schur_complement_solver.cc \
-                   internal/ceres/schur_eliminator.cc \
-                   internal/ceres/schur_jacobi_preconditioner.cc \
-                   internal/ceres/scratch_evaluate_preparer.cc \
-                   internal/ceres/solver.cc \
-                   internal/ceres/solver_impl.cc \
-                   internal/ceres/sparse_matrix.cc \
-                   internal/ceres/sparse_normal_cholesky_solver.cc \
-                   internal/ceres/split.cc \
-                   internal/ceres/stringprintf.cc \
-                   internal/ceres/suitesparse.cc \
-                   internal/ceres/triplet_sparse_matrix.cc \
-                   internal/ceres/trust_region_minimizer.cc \
-                   internal/ceres/trust_region_strategy.cc \
-                   internal/ceres/types.cc \
-                   internal/ceres/visibility_based_preconditioner.cc \
-                   internal/ceres/visibility.cc \
-                   internal/ceres/wall_time.cc \
-                   internal/ceres/generated/schur_eliminator_d_d_d.cc \
-                   internal/ceres/generated/schur_eliminator_2_2_2.cc \
-                   internal/ceres/generated/schur_eliminator_2_2_3.cc \
-                   internal/ceres/generated/schur_eliminator_2_2_4.cc \
-                   internal/ceres/generated/schur_eliminator_2_2_d.cc \
-                   internal/ceres/generated/schur_eliminator_2_3_3.cc \
-                   internal/ceres/generated/schur_eliminator_2_3_4.cc \
-                   internal/ceres/generated/schur_eliminator_2_3_9.cc \
-                   internal/ceres/generated/schur_eliminator_2_3_d.cc \
-                   internal/ceres/generated/schur_eliminator_2_4_3.cc \
-                   internal/ceres/generated/schur_eliminator_2_4_4.cc \
-                   internal/ceres/generated/schur_eliminator_2_4_d.cc \
-                   internal/ceres/generated/schur_eliminator_4_4_2.cc \
-                   internal/ceres/generated/schur_eliminator_4_4_3.cc \
-                   internal/ceres/generated/schur_eliminator_4_4_4.cc \
-                   internal/ceres/generated/schur_eliminator_4_4_d.cc
+LOCAL_SRC_FILES := $(CERES_SRC_PATH)/array_utils.cc \
+                   $(CERES_SRC_PATH)/blas.cc \
+                   $(CERES_SRC_PATH)/block_evaluate_preparer.cc \
+                   $(CERES_SRC_PATH)/block_jacobian_writer.cc \
+                   $(CERES_SRC_PATH)/block_jacobi_preconditioner.cc \
+                   $(CERES_SRC_PATH)/block_random_access_dense_matrix.cc \
+                   $(CERES_SRC_PATH)/block_random_access_diagonal_matrix.cc \
+                   $(CERES_SRC_PATH)/block_random_access_matrix.cc \
+                   $(CERES_SRC_PATH)/block_random_access_sparse_matrix.cc \
+                   $(CERES_SRC_PATH)/block_sparse_matrix.cc \
+                   $(CERES_SRC_PATH)/block_structure.cc \
+                   $(CERES_SRC_PATH)/callbacks.cc \
+                   $(CERES_SRC_PATH)/canonical_views_clustering.cc \
+                   $(CERES_SRC_PATH)/cgnr_solver.cc \
+                   $(CERES_SRC_PATH)/compressed_row_jacobian_writer.cc \
+                   $(CERES_SRC_PATH)/compressed_row_sparse_matrix.cc \
+                   $(CERES_SRC_PATH)/conditioned_cost_function.cc \
+                   $(CERES_SRC_PATH)/conjugate_gradients_solver.cc \
+                   $(CERES_SRC_PATH)/coordinate_descent_minimizer.cc \
+                   $(CERES_SRC_PATH)/corrector.cc \
+                   $(CERES_SRC_PATH)/dense_normal_cholesky_solver.cc \
+                   $(CERES_SRC_PATH)/dense_qr_solver.cc \
+                   $(CERES_SRC_PATH)/dense_sparse_matrix.cc \
+                   $(CERES_SRC_PATH)/detect_structure.cc \
+                   $(CERES_SRC_PATH)/dogleg_strategy.cc \
+                   $(CERES_SRC_PATH)/dynamic_compressed_row_jacobian_writer.cc \
+                   $(CERES_SRC_PATH)/dynamic_compressed_row_sparse_matrix.cc \
+                   $(CERES_SRC_PATH)/evaluator.cc \
+                   $(CERES_SRC_PATH)/file.cc \
+                   $(CERES_SRC_PATH)/gradient_checking_cost_function.cc \
+                   $(CERES_SRC_PATH)/implicit_schur_complement.cc \
+                   $(CERES_SRC_PATH)/iterative_schur_complement_solver.cc \
+                   $(CERES_SRC_PATH)/lapack.cc \
+                   $(CERES_SRC_PATH)/levenberg_marquardt_strategy.cc \
+                   $(CERES_SRC_PATH)/line_search.cc \
+                   $(CERES_SRC_PATH)/line_search_direction.cc \
+                   $(CERES_SRC_PATH)/line_search_minimizer.cc \
+                   $(CERES_SRC_PATH)/linear_least_squares_problems.cc \
+                   $(CERES_SRC_PATH)/linear_operator.cc \
+                   $(CERES_SRC_PATH)/linear_solver.cc \
+                   $(CERES_SRC_PATH)/local_parameterization.cc \
+                   $(CERES_SRC_PATH)/loss_function.cc \
+                   $(CERES_SRC_PATH)/low_rank_inverse_hessian.cc \
+                   $(CERES_SRC_PATH)/minimizer.cc \
+                   $(CERES_SRC_PATH)/normal_prior.cc \
+                   $(CERES_SRC_PATH)/parameter_block_ordering.cc \
+                   $(CERES_SRC_PATH)/partitioned_matrix_view.cc \
+                   $(CERES_SRC_PATH)/polynomial.cc \
+                   $(CERES_SRC_PATH)/preconditioner.cc \
+                   $(CERES_SRC_PATH)/problem.cc \
+                   $(CERES_SRC_PATH)/problem_impl.cc \
+                   $(CERES_SRC_PATH)/program.cc \
+                   $(CERES_SRC_PATH)/reorder_program.cc \
+                   $(CERES_SRC_PATH)/residual_block.cc \
+                   $(CERES_SRC_PATH)/residual_block_utils.cc \
+                   $(CERES_SRC_PATH)/schur_complement_solver.cc \
+                   $(CERES_SRC_PATH)/schur_eliminator.cc \
+                   $(CERES_SRC_PATH)/schur_jacobi_preconditioner.cc \
+                   $(CERES_SRC_PATH)/scratch_evaluate_preparer.cc \
+                   $(CERES_SRC_PATH)/solver.cc \
+                   $(CERES_SRC_PATH)/solver_impl.cc \
+                   $(CERES_SRC_PATH)/sparse_matrix.cc \
+                   $(CERES_SRC_PATH)/sparse_normal_cholesky_solver.cc \
+                   $(CERES_SRC_PATH)/split.cc \
+                   $(CERES_SRC_PATH)/stringprintf.cc \
+                   $(CERES_SRC_PATH)/summary_utils.cc \
+                   $(CERES_SRC_PATH)/suitesparse.cc \
+                   $(CERES_SRC_PATH)/triplet_sparse_matrix.cc \
+                   $(CERES_SRC_PATH)/trust_region_minimizer.cc \
+                   $(CERES_SRC_PATH)/trust_region_strategy.cc \
+                   $(CERES_SRC_PATH)/types.cc \
+                   $(CERES_SRC_PATH)/visibility_based_preconditioner.cc \
+                   $(CERES_SRC_PATH)/visibility.cc \
+                   $(CERES_SRC_PATH)/wall_time.cc \
+                   $(CERES_SRC_PATH)/generated/schur_eliminator_d_d_d.cc \
+                   $(CERES_SRC_PATH)/generated/schur_eliminator_2_2_2.cc \
+                   $(CERES_SRC_PATH)/generated/schur_eliminator_2_2_3.cc \
+                   $(CERES_SRC_PATH)/generated/schur_eliminator_2_2_4.cc \
+                   $(CERES_SRC_PATH)/generated/schur_eliminator_2_2_d.cc \
+                   $(CERES_SRC_PATH)/generated/schur_eliminator_2_3_3.cc \
+                   $(CERES_SRC_PATH)/generated/schur_eliminator_2_3_4.cc \
+                   $(CERES_SRC_PATH)/generated/schur_eliminator_2_3_9.cc \
+                   $(CERES_SRC_PATH)/generated/schur_eliminator_2_3_d.cc \
+                   $(CERES_SRC_PATH)/generated/schur_eliminator_2_4_3.cc \
+                   $(CERES_SRC_PATH)/generated/schur_eliminator_2_4_4.cc \
+                   $(CERES_SRC_PATH)/generated/schur_eliminator_2_4_8.cc \
+                   $(CERES_SRC_PATH)/generated/schur_eliminator_2_4_9.cc \
+                   $(CERES_SRC_PATH)/generated/schur_eliminator_2_4_d.cc \
+                   $(CERES_SRC_PATH)/generated/schur_eliminator_2_d_d.cc \
+                   $(CERES_SRC_PATH)/generated/schur_eliminator_4_4_2.cc \
+                   $(CERES_SRC_PATH)/generated/schur_eliminator_4_4_3.cc \
+                   $(CERES_SRC_PATH)/generated/schur_eliminator_4_4_4.cc \
+                   $(CERES_SRC_PATH)/generated/schur_eliminator_4_4_d.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_d_d_d.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_2_2_2.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_2_2_3.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_2_2_4.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_2_2_d.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_2_3_3.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_2_3_4.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_2_3_9.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_2_3_d.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_2_4_3.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_2_4_4.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_2_4_8.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_2_4_9.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_2_4_d.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_2_d_d.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_4_4_2.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_4_4_3.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_4_4_4.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_4_4_d.cc
 
+ifndef CERES_GLOG_DIR
+LOCAL_SRC_FILES += $(CERES_SRC_PATH)/miniglog/glog/logging.cc
+endif
+
+LOCAL_MODULE := libceres
 include $(BUILD_STATIC_LIBRARY)
diff --git a/CMakeLists.txt b/CMakeLists.txt
index b89d55a..a262c5b 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -26,7 +26,8 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 #
-# Author: keir@google.com (Keir Mierle)
+# Authors: keir@google.com (Keir Mierle)
+#          alexs.mac@gmail.com (Alex Stewart)
 
 CMAKE_MINIMUM_REQUIRED(VERSION 2.8.0)
 CMAKE_POLICY(VERSION 2.8)
@@ -63,9 +64,18 @@
   ENDIF (NOT EXISTS ${LOCAL_GIT_DIRECTORY}/hooks/commit-msg)
 ENDIF (EXISTS ${LOCAL_GIT_DIRECTORY})
 
+# Make CMake aware of the cmake folder for local FindXXX scripts,
+# append rather than set in case the user has passed their own
+# additional paths via -D.
+LIST(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake")
+INCLUDE(UpdateCacheVariable)
+
 SET(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
 SET(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
 SET(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
+# Set postfixes for generated libraries based on buildtype.
+SET(CMAKE_RELEASE_POSTFIX "")
+SET(CMAKE_DEBUG_POSTFIX "-debug")
 
 # Important: Always bump the second number (e.g. 1.3.x to 1.4.0) for any
 # release that changes the ABI. The ABI changes for almost any modification to
@@ -75,16 +85,19 @@
 # For versions without ABI changes, bump the smallest number in CERES_VERSION,
 # but leave the CERES_ABI_VERSION unchanged.
 SET(CERES_VERSION_MAJOR 1)
-SET(CERES_VERSION_MINOR 7)
+SET(CERES_VERSION_MINOR 9)
 SET(CERES_VERSION_PATCH 0)
 SET(CERES_VERSION
     ${CERES_VERSION_MAJOR}.${CERES_VERSION_MINOR}.${CERES_VERSION_PATCH})
-SET(CERES_ABI_VERSION 1.7.0)
+SET(CERES_ABI_VERSION 1.9.0)
 
 ENABLE_TESTING()
 
-OPTION(MINIGLOG "Use a stripped down version of glog" OFF)
+OPTION(MINIGLOG "Use a stripped down version of glog." OFF)
 OPTION(GFLAGS "Enable Google Flags." ON)
+OPTION(SUITESPARSE "Enable SuiteSparse." ON)
+OPTION(CXSPARSE "Enable CXSparse." ON)
+OPTION(LAPACK "Enable use of LAPACK." ON)
 # Template specializations for the Schur complement based solvers. If
 # compile time, binary size or compiler performance is an issue, you
 # may consider disabling this.
@@ -94,484 +107,445 @@
        ON)
 # Multithreading using OpenMP
 OPTION(OPENMP "Enable threaded solving in Ceres (requires OpenMP)" ON)
-# TODO(sameeragarwal): Replace this with a positive option instead?
-OPTION(DISABLE_TR1
-       "Don't use TR1. This replaces some hash tables with sets. Slower."
-       OFF)
-# Line search minimizer is useful for large scale problems or when
-# sparse linear algebra libraries are not available. If compile time,
-# binary size or compiler performance is an issue, consider disabling
-# this.
-OPTION(LINE_SEARCH_MINIMIZER "Enable the line search minimizer." ON)
+OPTION(EIGENSPARSE
+  "Enable the use of Eigen as a sparse linear algebra library for
+   solving the nonlinear least squares problems. Enabling this
+   option will result in an LGPL licensed version of Ceres Solver
+   as the Simplicial Cholesky factorization in Eigen is licensed under the LGPL.
+   This does not affect the covariance estimation algorithm, as it
+   depends on the sparse QR factorization algorithm, which is licensed
+   under the MPL."
+  OFF)
 OPTION(BUILD_TESTING "Enable tests" ON)
 OPTION(BUILD_DOCUMENTATION "Build User's Guide (html)" OFF)
 OPTION(BUILD_EXAMPLES "Build examples" ON)
+OPTION(BUILD_SHARED_LIBS "Build Ceres as a shared library." OFF)
+IF (MSVC)
+  OPTION(MSVC_USE_STATIC_CRT
+    "MS Visual Studio: Use static C-Run Time Library in place of shared." OFF)
 
-# Default locations to search for on various platforms.
+  IF (BUILD_TESTING AND BUILD_SHARED_LIBS)
+    MESSAGE(
+      "-- Disabling tests. The flags BUILD_TESTING and BUILD_SHARED_LIBS"
+      " are incompatible with MSVC."
+      )
+    UPDATE_CACHE_VARIABLE(BUILD_TESTING OFF)
+  ENDIF (BUILD_TESTING AND BUILD_SHARED_LIBS)
+ENDIF (MSVC)
 
-# Libraries
-LIST(APPEND CMAKE_LIBRARY_PATH /opt/local/lib)
-LIST(APPEND CMAKE_LIBRARY_PATH /opt/local/lib/ufsparse) # Mac OS X
-LIST(APPEND CMAKE_LIBRARY_PATH /usr/lib)
-LIST(APPEND CMAKE_LIBRARY_PATH /usr/lib/atlas)
-LIST(APPEND CMAKE_LIBRARY_PATH /usr/lib/suitesparse) # Ubuntu
-LIST(APPEND CMAKE_LIBRARY_PATH /usr/lib64/atlas)
-LIST(APPEND CMAKE_LIBRARY_PATH /usr/local/homebrew/lib) # Mac OS X
-LIST(APPEND CMAKE_LIBRARY_PATH /usr/local/lib)
-LIST(APPEND CMAKE_LIBRARY_PATH /usr/local/lib/suitesparse)
+# Use ios-cmake to build a static library for iOS
+#
+# We need to add isysroot to force cmake to find the toolchains from the iOS SDK
+# instead of using the standard ones. And add flag mios-simulator-version so clang
+# knows we are building for ios simulator but not mac.
+#
+# You can build for OS (armv7, armv7s, arm64), SIMULATOR (i386) or SIMULATOR64 (x86_64)
+# separately and use lipo to merge them into one static library.
+#
+# There are some features/algorithms are not available in iOS version and the
+# minimum supported iOS version is 6.0 now.
+#
+# Use cmake ../ceres-solver -DCMAKE_TOOLCHAIN_FILE=../ceres-solver/cmake/iOS.cmake \
+# -DIOS_PLATFORM=PLATFORM -DEIGEN_INCLUDE_DIR=/path/to/eigen/header
+# to config the cmake. The PLATFORM can be one of OS, SIMULATOR and SIMULATOR64.
+# Check the documentation in iOS.cmake to find more options.
+#
+# After building, you will get a single library: libceres.a, which
+# you need to add to your Xcode project.
+#
+# If you use the lapack and blas, then you also need to add Accelerate.framework
+# to your Xcode project's linking dependency.
+IF (IOS)
+  MESSAGE(STATUS "Building Ceres for iOS platform: ${IOS_PLATFORM}")
 
-# Headers
-LIST(APPEND CMAKE_INCLUDE_PATH /opt/local/include)
-LIST(APPEND CMAKE_INCLUDE_PATH /opt/local/include/ufsparse) # Mac OS X
-LIST(APPEND CMAKE_INCLUDE_PATH /opt/local/var/macports/software/eigen3/opt/local/include/eigen3) # Mac OS X
-LIST(APPEND CMAKE_INCLUDE_PATH /usr/include)
-LIST(APPEND CMAKE_INCLUDE_PATH /usr/include/eigen3) # Ubuntu 10.04's default location.
-LIST(APPEND CMAKE_INCLUDE_PATH /usr/include/suitesparse) # Ubuntu
-LIST(APPEND CMAKE_INCLUDE_PATH /usr/local/homebrew/include) # Mac OS X
-LIST(APPEND CMAKE_INCLUDE_PATH /usr/local/homebrew/include/eigen3)  # Mac OS X
-LIST(APPEND CMAKE_INCLUDE_PATH /usr/local/include)
-LIST(APPEND CMAKE_INCLUDE_PATH /usr/local/include/eigen3)
-LIST(APPEND CMAKE_INCLUDE_PATH /usr/local/include/suitesparse)
+  UPDATE_CACHE_VARIABLE(MINIGLOG ON)
+  MESSAGE(STATUS "Building for iOS, forcing use of miniglog instead of glog.")
 
-# Eigen
-FIND_PATH(EIGEN_INCLUDE NAMES Eigen/Core)
-IF (NOT EXISTS ${EIGEN_INCLUDE})
-  MESSAGE(FATAL_ERROR "Can't find Eigen. Try passing -DEIGEN_INCLUDE=...")
-ELSE (NOT EXISTS ${EIGEN_INCLUDE})
-  MESSAGE("-- Found Eigen 3.x: ${EIGEN_INCLUDE}")
-ENDIF (NOT EXISTS ${EIGEN_INCLUDE})
+  UPDATE_CACHE_VARIABLE(SUITESPARSE OFF)
+  UPDATE_CACHE_VARIABLE(CXSPARSE OFF)
+  UPDATE_CACHE_VARIABLE(GFLAGS OFF)
+  UPDATE_CACHE_VARIABLE(OPENMP OFF)
 
-SET(BLAS_AND_LAPACK_FOUND TRUE)
-IF ((NOT DEFINED LAPACK) OR (DEFINED LAPACK AND LAPACK))
-  FIND_PACKAGE(LAPACK)
+  MESSAGE(STATUS "Building for iOS: SuiteSparse, CXSparse, gflags and OpenMP are not available.")
+
+  UPDATE_CACHE_VARIABLE(BUILD_EXAMPLES OFF)
+  MESSAGE(STATUS "Building for iOS, will not build examples.")
+
+  SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fobjc-abi-version=2 -fobjc-arc -isysroot ${CMAKE_OSX_SYSROOT}")
+  SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fobjc-abi-version=2 -fobjc-arc -isysroot ${CMAKE_OSX_SYSROOT}")
+
+  IF (${IOS_PLATFORM} STREQUAL "SIMULATOR" OR ${IOS_PLATFORM} STREQUAL "SIMULATOR64")
+    SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mios-simulator-version-min=6.0")
+  ENDIF()
+ENDIF (IOS)
+
+# Prior to October 2013, Ceres used some non-CMake standardised variables to
+# hold user-specified (as opposed to FindPackage found) include directory and
+# library paths for Ceres dependencies.  These were were of the form:
+# <DEPENDENCY>_LIB / <DEPENDENCY>_INCLUDE.  Since then, Ceres now has
+# FindPackage() scripts for all of its dependencies which obey the standard
+# CMake variables: <DEPENDENCY>_LIBRARIES & <DEPENDENCY>_INCLUDE_DIRS.  In order
+# to ensure backwards compatibility, we use convert any legacy variables to
+# _directory_ hints for the FindPackage() scripts.
+MACRO(HANDLE_LEGACY_INCLUDE_DEPENDENCY_HINT
+    LEGACY_VAR DIRECTORY_HINT_VAR)
+  IF (DEFINED ${LEGACY_VAR})
+    # Get the dependency name (all caps) from the hint directory variable
+    # for the warning message.
+    STRING(REGEX MATCH "^[^_]*" DEPENDENCY_NAME ${DIRECTORY_HINT_VAR})
+    MESSAGE(WARNING "You are defining a legacy variable ${LEGACY_VAR} "
+      "to specify the include directory for ${DEPENDENCY_NAME}.  This is "
+      "deprecated and support for it will be removed in a future release. "
+      "Please use either the search directory hints variable: "
+      "${DIRECTORY_HINT_VAR} or ${DEPENDENCY_NAME}_INCLUDE_DIR to specify "
+      "exactly the directory used (no search performed), see: "
+      "http://homes.cs.washington.edu/~sagarwal/ceres-solver/dev/building.html "
+      "for more information.")
+    LIST(APPEND ${DIRECTORY_HINT_VAR} ${${LEGACY_VAR}})
+  ENDIF (DEFINED ${LEGACY_VAR})
+ENDMACRO(HANDLE_LEGACY_INCLUDE_DEPENDENCY_HINT)
+
+MACRO(HANDLE_LEGACY_LIBRARY_DEPENDENCY_HINT
+    LEGACY_VAR DIRECTORY_HINT_VAR)
+  IF (DEFINED ${LEGACY_VAR})
+    # Get the dependency name (all caps) from the hint directory variable
+    # for the warning message.
+    STRING(REGEX MATCH "^[^_]*" DEPENDENCY_NAME ${DIRECTORY_HINT_VAR})
+    MESSAGE(WARNING "You are defining a legacy variable ${LEGACY_VAR} "
+      "to specify the library for ${DEPENDENCY_NAME}.  This is "
+      "deprecated and support for it will be removed in a future release. "
+      "Please use either the search directory hints variable: "
+      "${DIRECTORY_HINT_VAR} or ${DEPENDENCY_NAME}_LIBRARY to specify "
+      "exactly the library used (no search performed), see: "
+      "http://homes.cs.washington.edu/~sagarwal/ceres-solver/dev/building.html "
+      "for more information.")
+    IF (EXISTS ${${LEGACY_VAR}} AND
+        NOT IS_DIRECTORY ${${LEGACY_VAR}})
+      # User specified an explicit (library) file using the legacy variable
+      # interface, hints to FindPackage() scripts are directories so add the
+      # parent directory of the specified file.
+      GET_FILENAME_COMPONENT(DIR_HINT ${${LEGACY_VAR}} PATH)
+      LIST(APPEND ${DIRECTORY_HINT_VAR} ${DIR_HINT})
+    ELSEIF (EXISTS ${${LEGACY_VAR}} AND
+            IS_DIRECTORY ${${LEGACY_VAR}})
+      # User specified a directory hint using the legacy variable, use it.
+      LIST(APPEND ${DIRECTORY_HINT_VAR} ${${LEGACY_VAR}})
+    ENDIF()
+  ENDIF (DEFINED ${LEGACY_VAR})
+ENDMACRO(HANDLE_LEGACY_LIBRARY_DEPENDENCY_HINT)
+
+UNSET(CERES_COMPILE_OPTIONS)
+
+# Eigen.
+HANDLE_LEGACY_INCLUDE_DEPENDENCY_HINT(EIGEN_INCLUDE EIGEN_INCLUDE_DIR_HINTS)
+FIND_PACKAGE(Eigen REQUIRED)
+IF (EIGEN_FOUND)
+  MESSAGE("-- Found Eigen version ${EIGEN_VERSION}: ${EIGEN_INCLUDE_DIRS}")
+  # Ensure that only MPL2 licensed code is part of the default build.
+  MESSAGE("")
+  MESSAGE("   ===============================================================")
+  IF (EIGENSPARSE)
+    LIST(APPEND CERES_COMPILE_OPTIONS CERES_USE_EIGEN_SPARSE)
+    MESSAGE("   Enabling the use of Eigen as a sparse linear algebra library ")
+    MESSAGE("   for solving the nonlinear least squares problems. Enabling ")
+    MESSAGE("   this option will result in an LGPL licensed version of ")
+    MESSAGE("   Ceres Solver as the Simplicial Cholesky factorization in Eigen")
+    MESSAGE("   is licensed under the LGPL. ")
+  ELSE (EIGENSPARSE)
+    MESSAGE("   Disabling the use of Eigen as a sparse linear algebra library.")
+    MESSAGE("   This does not affect the covariance estimation algorithm ")
+    MESSAGE("   which can still use the EIGEN_SPARSE_QR algorithm.")
+    ADD_DEFINITIONS(-DEIGEN_MPL2_ONLY)
+  ENDIF (EIGENSPARSE)
+    MESSAGE("   ===============================================================")
+    MESSAGE("")
+ENDIF (EIGEN_FOUND)
+
+# LAPACK (& BLAS).
+IF (LAPACK)
+  FIND_PACKAGE(LAPACK QUIET)
   IF (LAPACK_FOUND)
     MESSAGE("-- Found LAPACK library: ${LAPACK_LIBRARIES}")
   ELSE (LAPACK_FOUND)
-    MESSAGE("-- Did not find LAPACK library")
-    SET(BLAS_AND_LAPACK_FOUND FALSE)
+    MESSAGE("-- Did not find LAPACK library, disabling LAPACK support.")
   ENDIF (LAPACK_FOUND)
 
-  FIND_PACKAGE(BLAS)
+  FIND_PACKAGE(BLAS QUIET)
   IF (BLAS_FOUND)
     MESSAGE("-- Found BLAS library: ${BLAS_LIBRARIES}")
   ELSE (BLAS_FOUND)
-    MESSAGE("-- Did not find BLAS library")
-    SET(BLAS_AND_BLAS_FOUND FALSE)
+    MESSAGE("-- Did not find BLAS library, disabling LAPACK support.")
   ENDIF (BLAS_FOUND)
 
-ELSE ((NOT DEFINED LAPACK) OR (DEFINED LAPACK AND LAPACK))
-  SET(BLAS_AND_LAPACK_FOUND FALSE)
-ENDIF ((NOT DEFINED LAPACK) OR (DEFINED LAPACK AND LAPACK))
+  IF (NOT (LAPACK_FOUND AND BLAS_FOUND))
+    UPDATE_CACHE_VARIABLE(LAPACK OFF)
+    LIST(APPEND CERES_COMPILE_OPTIONS CERES_NO_LAPACK)
+  ENDIF (NOT (LAPACK_FOUND AND BLAS_FOUND))
+ELSE (LAPACK)
+  MESSAGE("-- Building without LAPACK.")
+  LIST(APPEND CERES_COMPILE_OPTIONS CERES_NO_LAPACK)
+ENDIF (LAPACK)
 
-IF (NOT BLAS_AND_LAPACK_FOUND)
-  ADD_DEFINITIONS(-DCERES_NO_LAPACK)
-ENDIF (NOT BLAS_AND_LAPACK_FOUND)
+# SuiteSparse.
+IF (SUITESPARSE AND NOT LAPACK)
+  # If user has disabled LAPACK, but left SUITESPARSE ON, turn it OFF,
+  # LAPACK controls whether Ceres will be linked, directly or indirectly
+  # via SuiteSparse to LAPACK.
+  MESSAGE("-- Disabling SuiteSparse as use of LAPACK has been disabled, "
+    "turn ON LAPACK to enable (optional) building with SuiteSparse.")
+  UPDATE_CACHE_VARIABLE(SUITESPARSE OFF)
+ENDIF (SUITESPARSE AND NOT LAPACK)
+IF (SUITESPARSE)
+  # By default, if SuiteSparse and all dependencies are found, Ceres is
+  # built with SuiteSparse support.
 
-IF ((NOT DEFINED SUITESPARSE) OR (DEFINED SUITESPARSE AND SUITESPARSE))
-# Check for SuiteSparse dependencies
-
-SET(AMD_FOUND TRUE)
-FIND_LIBRARY(AMD_LIB NAMES amd)
-IF (EXISTS ${AMD_LIB})
-  MESSAGE("-- Found AMD library: ${AMD_LIB}")
-ELSE (EXISTS ${AMD_LIB})
-  MESSAGE("-- Did not find AMD library")
-  SET(AMD_FOUND FALSE)
-ENDIF (EXISTS ${AMD_LIB})
-
-FIND_PATH(AMD_INCLUDE NAMES amd.h)
-IF (EXISTS ${AMD_INCLUDE})
-  MESSAGE("-- Found AMD header in: ${AMD_INCLUDE}")
-ELSE (EXISTS ${AMD_INCLUDE})
-  MESSAGE("-- Did not find AMD header")
-  SET(AMD_FOUND FALSE)
-ENDIF (EXISTS ${AMD_INCLUDE})
-
-SET(CAMD_FOUND TRUE)
-FIND_LIBRARY(CAMD_LIB NAMES camd)
-IF (EXISTS ${CAMD_LIB})
-  MESSAGE("-- Found CAMD library: ${CAMD_LIB}")
-ELSE (EXISTS ${CAMD_LIB})
-  MESSAGE("-- Did not find CAMD library")
-  SET(CAMD_FOUND FALSE)
-ENDIF (EXISTS ${CAMD_LIB})
-
-FIND_PATH(CAMD_INCLUDE NAMES camd.h)
-IF (EXISTS ${CAMD_INCLUDE})
-  MESSAGE("-- Found CAMD header in: ${CAMD_INCLUDE}")
-ELSE (EXISTS ${CAMD_INCLUDE})
-  MESSAGE("-- Did not find CAMD header")
-  SET(CAMD_FOUND FALSE)
-ENDIF (EXISTS ${CAMD_INCLUDE})
-
-SET(COLAMD_FOUND TRUE)
-FIND_LIBRARY(COLAMD_LIB NAMES colamd)
-IF (EXISTS ${COLAMD_LIB})
-  MESSAGE("-- Found COLAMD library: ${COLAMD_LIB}")
-ELSE (EXISTS ${COLAMD_LIB})
-  MESSAGE("-- Did not find COLAMD library")
-  SET(COLAMD_FOUND FALSE)
-ENDIF (EXISTS ${COLAMD_LIB})
-
-FIND_PATH(COLAMD_INCLUDE NAMES colamd.h)
-IF (EXISTS ${COLAMD_INCLUDE})
-  MESSAGE("-- Found COLAMD header in: ${COLAMD_INCLUDE}")
-ELSE (EXISTS ${COLAMD_INCLUDE})
-  MESSAGE("-- Did not find COLAMD header")
-  SET(COLAMD_FOUND FALSE)
-ENDIF (EXISTS ${COLAMD_INCLUDE})
-
-SET(CCOLAMD_FOUND TRUE)
-FIND_LIBRARY(CCOLAMD_LIB NAMES ccolamd)
-IF (EXISTS ${CCOLAMD_LIB})
-  MESSAGE("-- Found CCOLAMD library: ${CCOLAMD_LIB}")
-ELSE (EXISTS ${CCOLAMD_LIB})
-  MESSAGE("-- Did not find CCOLAMD library")
-  SET(CCOLAMD_FOUND FALSE)
-ENDIF (EXISTS ${CCOLAMD_LIB})
-
-FIND_PATH(CCOLAMD_INCLUDE NAMES ccolamd.h)
-IF (EXISTS ${CCOLAMD_INCLUDE})
-  MESSAGE("-- Found CCOLAMD header in: ${CCOLAMD_INCLUDE}")
-ELSE (EXISTS ${CCOLAMD_INCLUDE})
-  MESSAGE("-- Did not find CCOLAMD header")
-  SET(CCOLAMD_FOUND FALSE)
-ENDIF (EXISTS ${CCOLAMD_INCLUDE})
-
-SET(CHOLMOD_FOUND TRUE)
-FIND_LIBRARY(CHOLMOD_LIB NAMES cholmod)
-IF (EXISTS ${CHOLMOD_LIB})
-  MESSAGE("-- Found CHOLMOD library: ${CHOLMOD_LIB}")
-ELSE (EXISTS ${CHOLMOD_LIB})
-  MESSAGE("-- Did not find CHOLMOD library")
-  SET(CHOLMOD_FOUND FALSE)
-ENDIF (EXISTS ${CHOLMOD_LIB})
-
-FIND_PATH(CHOLMOD_INCLUDE NAMES cholmod.h)
-IF (EXISTS ${CHOLMOD_INCLUDE})
-  MESSAGE("-- Found CHOLMOD header in: ${CHOLMOD_INCLUDE}")
-ELSE (EXISTS ${CHOLMOD_INCLUDE})
-  MESSAGE("-- Did not find CHOLMOD header")
-  SET(CHOLMOD_FOUND FALSE)
-ENDIF (EXISTS ${CHOLMOD_INCLUDE})
-
-SET(SUITESPARSEQR_FOUND TRUE)
-FIND_LIBRARY(SUITESPARSEQR_LIB NAMES spqr)
-IF (EXISTS ${SUITESPARSEQR_LIB})
-  MESSAGE("-- Found SUITESPARSEQR library: ${SUITESPARSEQR_LIB}")
-ELSE (EXISTS ${SUITESPARSEQR_LIB})
-  MESSAGE("-- Did not find SUITESPARSEQR library")
-  SET(SUITESPARSEQR_FOUND FALSE)
-ENDIF (EXISTS ${SUITESPARSEQR_LIB})
-
-FIND_PATH(SUITESPARSEQR_INCLUDE NAMES SuiteSparseQR.hpp)
-IF (EXISTS ${SUITESPARSEQR_INCLUDE})
-  MESSAGE("-- Found SUITESPARSEQR header in: ${SUITESPARSEQR_INCLUDE}")
-ELSE (EXISTS ${SUITESPARSEQR_INCLUDE})
-  MESSAGE("-- Did not find SUITESPARSEQR header")
-  SET(SUITESPARSEQR_FOUND FALSE)
-ENDIF (EXISTS ${SUITESPARSEQR_INCLUDE})
-
-# If SuiteSparse version is >= 4 then SuiteSparse_config is required.
-# For SuiteSparse 3, UFconfig.h is required.
-SET(SUITESPARSE_CONFIG_FOUND TRUE)
-SET(UFCONFIG_FOUND TRUE)
-
-FIND_LIBRARY(SUITESPARSE_CONFIG_LIB NAMES suitesparseconfig)
-IF (EXISTS ${SUITESPARSE_CONFIG_LIB})
-  MESSAGE("-- Found SuiteSparse_config library: ${SUITESPARSE_CONFIG_LIB}")
-ELSE (EXISTS ${SUITESPARSE_CONFIG_LIB})
-  MESSAGE("-- Did not find SuiteSparse_config library")
-ENDIF (EXISTS ${SUITESPARSE_CONFIG_LIB})
-
-FIND_PATH(SUITESPARSE_CONFIG_INCLUDE NAMES SuiteSparse_config.h)
-IF (EXISTS ${SUITESPARSE_CONFIG_INCLUDE})
-  MESSAGE("-- Found SuiteSparse_config header in: ${SUITESPARSE_CONFIG_INCLUDE}")
-  SET(UFCONFIG_FOUND FALSE)
-ELSE (EXISTS ${SUITESPARSE_CONFIG_INCLUDE})
-  MESSAGE("-- Did not find SuiteSparse_config header")
-ENDIF (EXISTS ${SUITESPARSE_CONFIG_INCLUDE})
-
-IF (NOT EXISTS ${SUITESPARSE_CONFIG_LIB} OR
-    NOT EXISTS ${SUITESPARSE_CONFIG_INCLUDE})
-  SET(SUITESPARSE_CONFIG_FOUND FALSE)
-  FIND_PATH(UFCONFIG_INCLUDE NAMES UFconfig.h)
-  IF (EXISTS ${UFCONFIG_INCLUDE})
-    MESSAGE("-- Found UFconfig header in: ${UFCONFIG_INCLUDE}")
-  ELSE (EXISTS ${UFCONFIG_INCLUDE})
-    MESSAGE("-- Did not find UFconfig header")
-    SET(UFCONFIG_FOUND FALSE)
-  ENDIF (EXISTS ${UFCONFIG_INCLUDE})
-ENDIF (NOT EXISTS ${SUITESPARSE_CONFIG_LIB} OR
-       NOT EXISTS ${SUITESPARSE_CONFIG_INCLUDE})
-
-FIND_LIBRARY(METIS_LIB NAMES metis)
-IF (EXISTS ${METIS_LIB})
-  MESSAGE("-- Found METIS library: ${METIS_LIB}")
-ELSE (EXISTS ${METIS_LIB})
-  MESSAGE("-- Did not find METIS library")
-ENDIF (EXISTS ${METIS_LIB})
-
-# SuiteSparseQR may be compiled with Intel Threading Building Blocks.
-SET(TBB_FOUND TRUE)
-FIND_LIBRARY(TBB_LIB NAMES tbb)
-IF (EXISTS ${TBB_LIB})
-  MESSAGE("-- Found TBB library: ${TBB_LIB}")
-ELSE (EXISTS ${TBB_LIB})
-  MESSAGE("-- Did not find TBB library")
-  SET(TBB_FOUND FALSE)
-ENDIF (EXISTS ${TBB_LIB})
-
-FIND_LIBRARY(TBB_MALLOC_LIB NAMES tbbmalloc)
-IF (EXISTS ${TBB_MALLOC_LIB})
-  MESSAGE("-- Found TBB Malloc library: ${TBB_MALLOC_LIB}")
-ELSE (EXISTS ${TBB_MALLOC_LIB})
-  MESSAGE("-- Did not find TBB library")
-  SET(TBB_FOUND FALSE)
-ENDIF (EXISTS ${TBB_MALLOC_LIB})
-
-# We don't use SET(SUITESPARSE_FOUND ${AMD_FOUND} ...) in order to be
-# able to check whether SuiteSparse is available without expanding
-# SUITESPARSE_FOUND with ${}. This means further checks could be:
-#
-#   IF (SUITESPARSE_FOUND)
-#
-# and not:
-#
-#   IF (${SUITESPARSE_FOUND})
-#
-IF (${AMD_FOUND} AND
-    ${CAMD_FOUND} AND
-    ${COLAMD_FOUND} AND
-    ${CCOLAMD_FOUND} AND
-    ${CHOLMOD_FOUND} AND
-    (${SUITESPARSE_CONFIG_FOUND} OR ${UFCONFIG_FOUND}) AND
-    ${BLAS_AND_LAPACK_FOUND})
-  SET(SUITESPARSE_FOUND TRUE)
-ELSE ()
-  SET(SUITESPARSE_FOUND FALSE)
-ENDIF ()
-
-ENDIF ((NOT DEFINED SUITESPARSE) OR (DEFINED SUITESPARSE AND SUITESPARSE))
-# By default, if all of SuiteSparse's dependencies are found, Ceres is
-# built with SuiteSparse support. -DSUITESPARSE=ON/OFF can be used to
-# enable/disable SuiteSparse explicitly.
-IF (DEFINED SUITESPARSE)
-  IF (SUITESPARSE)
-    IF (NOT SUITESPARSE_FOUND)
-      MESSAGE(FATAL_ERROR "One or more of SuiteSparse's dependencies was not found")
-    ENDIF (NOT SUITESPARSE_FOUND)
-  ELSE (SUITESPARSE)
-    ADD_DEFINITIONS(-DCERES_NO_SUITESPARSE)
-  ENDIF (SUITESPARSE)
-ELSE (DEFINED SUITESPARSE)
+  # Check for SuiteSparse and dependencies.
+  FIND_PACKAGE(SuiteSparse)
   IF (SUITESPARSE_FOUND)
-    MESSAGE("-- Found all SuiteSparse dependencies. Building with SuiteSparse")
-    SET(SUITESPARSE ON)
+    # On Ubuntu the system install of SuiteSparse (v3.4.0) up to at least
+    # Ubuntu 13.10 cannot be used to link shared libraries.
+    IF (BUILD_SHARED_LIBS AND
+        SUITESPARSE_IS_BROKEN_SHARED_LINKING_UBUNTU_SYSTEM_VERSION)
+      MESSAGE(FATAL_ERROR "You are attempting to build Ceres as a shared "
+        "library on Ubuntu using a system package install of SuiteSparse "
+        "3.4.0. This package is broken and does not support the "
+        "construction of shared libraries (you can still build Ceres as "
+        "a static library).  If you wish to build a shared version of Ceres "
+        "you should uninstall the system install of SuiteSparse "
+        "(libsuitesparse-dev) and perform a source install of SuiteSparse "
+        "(we recommend that you use the latest version), "
+        "see: http://homes.cs.washington.edu/~sagarwal"
+        "/ceres-solver/dev/building.html for more information.")
+    ENDIF (BUILD_SHARED_LIBS AND
+      SUITESPARSE_IS_BROKEN_SHARED_LINKING_UBUNTU_SYSTEM_VERSION)
+
+    # By default, if all of SuiteSparse's dependencies are found, Ceres is
+    # built with SuiteSparse support.
+    MESSAGE("-- Found SuiteSparse ${SUITESPARSE_VERSION}, "
+            "building with SuiteSparse.")
   ELSE (SUITESPARSE_FOUND)
-    MESSAGE("-- Did not find all SuiteSparse dependencies. Building without SuiteSparse")
-    SET(SUITESPARSE OFF)
-    ADD_DEFINITIONS(-DCERES_NO_SUITESPARSE)
+    # Disable use of SuiteSparse if it cannot be found and continue.
+    MESSAGE("-- Did not find all SuiteSparse dependencies, disabling "
+      "SuiteSparse support.")
+    UPDATE_CACHE_VARIABLE(SUITESPARSE OFF)
+    LIST(APPEND CERES_COMPILE_OPTIONS CERES_NO_SUITESPARSE)
   ENDIF (SUITESPARSE_FOUND)
-ENDIF (DEFINED SUITESPARSE)
+ELSE (SUITESPARSE)
+  MESSAGE("-- Building without SuiteSparse.")
+  LIST(APPEND CERES_COMPILE_OPTIONS CERES_NO_SUITESPARSE)
+ENDIF (SUITESPARSE)
 
-# By default, if all of CXSparse's dependencies are found, Ceres is
-# built with CXSparse support. -DCXSPARSE=ON/OFF can be used to
-# enable/disable CXSparse explicitly.
-IF ((NOT DEFINED CXSPARSE) OR (DEFINED CXSPARSE AND CXSPARSE))
-
-SET(CXSPARSE_FOUND ON)
-FIND_LIBRARY(CXSPARSE_LIB NAMES cxsparse)
-IF (EXISTS ${CXSPARSE_LIB})
-  MESSAGE("-- Found CXSparse library in: ${CXSPARSE_LIB}")
-ELSE (EXISTS ${CXSPARSE_LIB})
-  MESSAGE("-- Did not find CXSparse header")
-  SET(CXSPARSE_FOUND FALSE)
-ENDIF (EXISTS ${CXSPARSE_LIB})
-
-FIND_PATH(CXSPARSE_INCLUDE NAMES cs.h)
-IF (EXISTS ${CXSPARSE_INCLUDE})
-  MESSAGE("-- Found CXSparse header in: ${CXSPARSE_INCLUDE}")
-ELSE (EXISTS ${CXSPARSE_INCLUDE})
-  MESSAGE("-- Did not find CXSparse header")
-  SET(CXSPARSE_FOUND FALSE)
-ENDIF (EXISTS ${CXSPARSE_INCLUDE})
-ENDIF ((NOT DEFINED CXSPARSE) OR (DEFINED CXSPARSE AND CXSPARSE))
-
-IF (DEFINED CXSPARSE)
-  IF (CXSPARSE)
-    IF (NOT CXSPARSE_FOUND)
-      MESSAGE(FATAL_ERROR "-- CXSparse not found.")
-    ENDIF (NOT CXSPARSE_FOUND)
-  ELSE (CXSPARSE)
-    ADD_DEFINITIONS(-DCERES_NO_CXSPARSE)
-  ENDIF (CXSPARSE)
-ELSE (DEFINED CXSPARSE)
+# CXSparse.
+IF (CXSPARSE)
+  # Don't search with REQUIRED as we can continue without CXSparse.
+  FIND_PACKAGE(CXSparse)
   IF (CXSPARSE_FOUND)
-    MESSAGE("-- Building with CXSparse support.")
-    SET(CXSPARSE ON)
+    # By default, if CXSparse and all dependencies are found, Ceres is
+    # built with CXSparse support.
+    MESSAGE("-- Found CXSparse version: ${CXSPARSE_VERSION}, "
+      "building with CXSparse.")
   ELSE (CXSPARSE_FOUND)
-    MESSAGE("-- Building without CXSparse.")
-    SET(CXSPARSE OFF)
-    ADD_DEFINITIONS(-DCERES_NO_CXSPARSE)
+    # Disable use of CXSparse if it cannot be found and continue.
+    MESSAGE("-- Did not find CXSparse, Building without CXSparse.")
+    UPDATE_CACHE_VARIABLE(CXSPARSE OFF)
+    LIST(APPEND CERES_COMPILE_OPTIONS CERES_NO_CXSPARSE)
   ENDIF (CXSPARSE_FOUND)
-ENDIF (DEFINED CXSPARSE)
+ELSE (CXSPARSE)
+  MESSAGE("-- Building without CXSparse.")
+  LIST(APPEND CERES_COMPILE_OPTIONS CERES_NO_CXSPARSE)
+  # Mark as advanced (remove from default GUI view) the CXSparse search
+  # variables in case user enabled CXSPARSE, FindCXSparse did not find it, so
+  # made search variables visible in GUI for user to set, but then user disables
+  # CXSPARSE instead of setting them.
+  MARK_AS_ADVANCED(FORCE CXSPARSE_INCLUDE_DIR
+                         CXSPARSE_LIBRARY)
+ENDIF (CXSPARSE)
 
+# GFlags.
 IF (GFLAGS)
-  FIND_LIBRARY(GFLAGS_LIB NAMES gflags)
-  IF (NOT EXISTS ${GFLAGS_LIB})
-    MESSAGE(FATAL_ERROR
-            "Can't find Google Flags. Please specify: "
-            "-DGFLAGS_LIB=...")
-  ENDIF (NOT EXISTS ${GFLAGS_LIB})
-  MESSAGE("-- Found Google Flags library: ${GFLAGS_LIB}")
-  FIND_PATH(GFLAGS_INCLUDE NAMES gflags/gflags.h)
-  IF (NOT EXISTS ${GFLAGS_INCLUDE})
-    MESSAGE(FATAL_ERROR
-            "Can't find Google Flags. Please specify: "
-            "-DGFLAGS_INCLUDE=...")
-  ENDIF (NOT EXISTS ${GFLAGS_INCLUDE})
-  MESSAGE("-- Found Google Flags header in: ${GFLAGS_INCLUDE}")
+  HANDLE_LEGACY_INCLUDE_DEPENDENCY_HINT(GFLAGS_INCLUDE GFLAGS_INCLUDE_DIR_HINTS)
+  HANDLE_LEGACY_LIBRARY_DEPENDENCY_HINT(GFLAGS_LIB GFLAGS_LIBRARY_DIR_HINTS)
+
+  # Don't search with REQUIRED as we can continue without gflags.
+  FIND_PACKAGE(Gflags)
+  IF (GFLAGS_FOUND)
+    MESSAGE("-- Found Google Flags header in: ${GFLAGS_INCLUDE_DIRS}")
+  ELSE (GFLAGS_FOUND)
+    MESSAGE("-- Did not find Google Flags (gflags), Building without gflags "
+      "- no tests or tools will be built!")
+    UPDATE_CACHE_VARIABLE(GFLAGS OFF)
+  ENDIF (GFLAGS_FOUND)
 ELSE (GFLAGS)
   MESSAGE("-- Google Flags disabled; no tests or tools will be built!")
-  ADD_DEFINITIONS(-DCERES_NO_GFLAGS)
+  # Mark as advanced (remove from default GUI view) the gflags search
+  # variables in case user enabled GFLAGS, FindGflags did not find it, so
+  # made search variables visible in GUI for user to set, but then user disables
+  # GFLAGS instead of setting them.
+  MARK_AS_ADVANCED(FORCE GFLAGS_INCLUDE_DIR
+                         GFLAGS_LIBRARY)
 ENDIF (GFLAGS)
 
+# MiniGLog.
 IF (MINIGLOG)
-  SET(GLOG_LIB miniglog)
-  MESSAGE("-- Using minimal Glog substitute (library): ${GLOG_LIB}")
-  SET(GLOG_INCLUDE internal/ceres/miniglog)
-  MESSAGE("-- Using minimal Glog substitute (include): ${GLOG_INCLUDE}")
-ELSE (MINIGLOG)
-  FIND_LIBRARY(GLOG_LIB NAMES glog)
-  IF (EXISTS ${GLOG_LIB})
-    MESSAGE("-- Found Google Log library: ${GLOG_LIB}")
-  ELSE (EXISTS ${GLOG_LIB})
-    MESSAGE(FATAL_ERROR
-            "Can't find Google Log. Please specify: -DGLOG_LIB=...")
-  ENDIF (EXISTS ${GLOG_LIB})
+  MESSAGE("-- Compiling minimal glog substitute into Ceres.")
+  SET(GLOG_INCLUDE_DIRS internal/ceres/miniglog)
+  MESSAGE("-- Using minimal glog substitute (include): ${GLOG_INCLUDE_DIRS}")
 
-  FIND_PATH(GLOG_INCLUDE NAMES glog/logging.h)
-  IF (EXISTS ${GLOG_INCLUDE})
-    MESSAGE("-- Found Google Log header in: ${GLOG_INCLUDE}")
-  ELSE (EXISTS ${GLOG_INCLUDE})
-    MESSAGE(FATAL_ERROR
-            "Can't find Google Log. Please specify: -DGLOG_INCLUDE=...")
-  ENDIF (EXISTS ${GLOG_INCLUDE})
+  # Mark as advanced (remove from default GUI view) the glog search
+  # variables in case user disables MINIGLOG, FindGlog did not find it, so
+  # made search variables visible in GUI for user to set, but then user enables
+  # MINIGLOG instead of setting them.
+  MARK_AS_ADVANCED(FORCE GLOG_INCLUDE_DIR
+                         GLOG_LIBRARY)
+ELSE (MINIGLOG)
+  HANDLE_LEGACY_INCLUDE_DEPENDENCY_HINT(GLOG_INCLUDE GLOG_INCLUDE_DIR_HINTS)
+  HANDLE_LEGACY_LIBRARY_DEPENDENCY_HINT(GLOG_LIB GLOG_LIBRARY_DIR_HINTS)
+
+  # Don't search with REQUIRED so that configuration continues if not found and
+  # we can output an error messages explaining MINIGLOG option.
+  FIND_PACKAGE(Glog)
+  IF (GLOG_FOUND)
+    MESSAGE("-- Found Google Log header in: ${GLOG_INCLUDE_DIRS}")
+  ELSE (GLOG_FOUND)
+    MESSAGE(FATAL_ERROR "Can't find Google Log. Please set GLOG_INCLUDE_DIR & "
+      "GLOG_LIBRARY or enable MINIGLOG option to use minimal glog "
+      "implementation.")
+  ENDIF (GLOG_FOUND)
 ENDIF (MINIGLOG)
 
 IF (NOT SCHUR_SPECIALIZATIONS)
-  ADD_DEFINITIONS(-DCERES_RESTRICT_SCHUR_SPECIALIZATION)
+  LIST(APPEND CERES_COMPILE_OPTIONS CERES_RESTRICT_SCHUR_SPECIALIZATION)
   MESSAGE("-- Disabling Schur specializations (faster compiles)")
 ENDIF (NOT SCHUR_SPECIALIZATIONS)
 
-IF (NOT LINE_SEARCH_MINIMIZER)
-  ADD_DEFINITIONS(-DCERES_NO_LINE_SEARCH_MINIMIZER)
-  MESSAGE("-- Disabling line search minimizer")
-ENDIF (NOT LINE_SEARCH_MINIMIZER)
-
 IF (NOT CUSTOM_BLAS)
-  ADD_DEFINITIONS(-DCERES_NO_CUSTOM_BLAS)
+  LIST(APPEND CERES_COMPILE_OPTIONS CERES_NO_CUSTOM_BLAS)
   MESSAGE("-- Disabling custom blas")
 ENDIF (NOT CUSTOM_BLAS)
 
-IF (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
-  SET(OPENMP_FOUND FALSE)
-ELSE (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
-  IF (OPENMP)
-    FIND_PACKAGE(OpenMP)
-  ENDIF (OPENMP)
-ENDIF (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
+IF (OPENMP)
+  # Clang does not (yet) support OpenMP.
+  IF (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
+    UPDATE_CACHE_VARIABLE(OPENMP OFF)
+    MESSAGE("-- Compiler is Clang, disabling OpenMP.")
+    LIST(APPEND CERES_COMPILE_OPTIONS CERES_NO_THREADS)
+  ELSE (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
+    # Find quietly s/t as we can continue without OpenMP if it is not found.
+    FIND_PACKAGE(OpenMP QUIET)
+    IF (OPENMP_FOUND)
+      MESSAGE("-- Building with OpenMP.")
+      LIST(APPEND CERES_COMPILE_OPTIONS CERES_USE_OPENMP)
+      SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
+      IF (UNIX)
+        # At least on Linux, we need pthreads to be enabled for mutex to
+        # compile.  This may not work on Windows or Android.
+        FIND_PACKAGE(Threads REQUIRED)
+        LIST(APPEND CERES_COMPILE_OPTIONS CERES_HAVE_PTHREAD)
+        LIST(APPEND CERES_COMPILE_OPTIONS CERES_HAVE_RWLOCK)
+      ENDIF (UNIX)
+    ELSE (OPENMP_FOUND)
+      MESSAGE("-- Failed to find OpenMP, disabling.")
+      UPDATE_CACHE_VARIABLE(OPENMP OFF)
+      LIST(APPEND CERES_COMPILE_OPTIONS CERES_NO_THREADS)
+    ENDIF (OPENMP_FOUND)
+  ENDIF (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
+ELSE (OPENMP)
+  MESSAGE("-- Building without OpenMP (disabling multithreading).")
+  LIST(APPEND CERES_COMPILE_OPTIONS CERES_NO_THREADS)
+ENDIF (OPENMP)
 
-IF (OPENMP_FOUND)
-  MESSAGE("-- Found OpenMP.")
-  ADD_DEFINITIONS(-DCERES_USE_OPENMP)
-  SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
-  IF (UNIX)
-    # At least on Linux, we need pthreads to be enabled for mutex to
-    # compile.  This may not work on Windows or Android.
-    FIND_PACKAGE(Threads REQUIRED)
-    SET(STATIC_LIBRARY_FLAGS
-        "${STATIC_LIBRARY_FLAGS} ${CMAKE_THREAD_LIBS_INIT}")
-    SET(CMAKE_SHARED_LINKER_FLAGS
-        "${CMAKE_SHARED_LINKER_FLAGS} ${CMAKE_THREAD_LIBS_INIT}")
-    ADD_DEFINITIONS(-DCERES_HAVE_PTHREAD)
-    ADD_DEFINITIONS(-DCERES_HAVE_RWLOCK)
-  ENDIF (UNIX)
-ELSE (OPENMP_FOUND)
-  MESSAGE("-- Can't find OpenMP. Disabling multithreading.")
-  ADD_DEFINITIONS(-DCERES_NO_THREADS)
-ENDIF (OPENMP_FOUND)
+INCLUDE(CheckIncludeFileCXX)
+CHECK_INCLUDE_FILE_CXX(unordered_map HAVE_STD_UNORDERED_MAP_HEADER)
+IF (HAVE_STD_UNORDERED_MAP_HEADER)
+  # Finding the unordered_map header doesn't mean that unordered_map
+  # is in std namespace.
+  #
+  # In particular, MSVC 2008 has unordered_map declared in std::tr1.
+  # In order to support this, we do an extra check to see which
+  # namespace should be used.
+  INCLUDE(CheckCXXSourceCompiles)
+  CHECK_CXX_SOURCE_COMPILES("#include <unordered_map>
+                             int main() {
+                               std::unordered_map<int, int> map;
+                               return 0;
+                             }"
+                            HAVE_UNORDERED_MAP_IN_STD_NAMESPACE)
+  IF (HAVE_UNORDERED_MAP_IN_STD_NAMESPACE)
+    LIST(APPEND CERES_COMPILE_OPTIONS CERES_STD_UNORDERED_MAP)
+    MESSAGE("-- Found unordered_map/set in std namespace.")
+  ELSE (HAVE_UNORDERED_MAP_IN_STD_NAMESPACE)
+    CHECK_CXX_SOURCE_COMPILES("#include <unordered_map>
+                               int main() {
+                                 std::tr1::unordered_map<int, int> map;
+                                 return 0;
+                               }"
+                              HAVE_UNORDERED_MAP_IN_TR1_NAMESPACE)
+    IF (HAVE_UNORDERED_MAP_IN_TR1_NAMESPACE)
+      LIST(APPEND CERES_COMPILE_OPTIONS CERES_STD_UNORDERED_MAP_IN_TR1_NAMESPACE)
+      MESSAGE("-- Found unordered_map/set in std::tr1 namespace.")
+    ELSE (HAVE_UNORDERED_MAP_IN_TR1_NAMESPACE)
+      MESSAGE("-- Found <unordered_map> but cannot find either std::unordered_map "
+              "or std::tr1::unordered_map.")
+      MESSAGE("-- Replacing unordered_map/set with map/set (warning: slower!)")
+      LIST(APPEND CERES_COMPILE_OPTIONS CERES_NO_UNORDERED_MAP)
+    ENDIF (HAVE_UNORDERED_MAP_IN_TR1_NAMESPACE)
+  ENDIF (HAVE_UNORDERED_MAP_IN_STD_NAMESPACE)
+ELSE (HAVE_STD_UNORDERED_MAP_HEADER)
+  CHECK_INCLUDE_FILE_CXX("tr1/unordered_map" HAVE_TR1_UNORDERED_MAP_HEADER)
+  IF (HAVE_TR1_UNORDERED_MAP_HEADER)
+    LIST(APPEND CERES_COMPILE_OPTIONS CERES_TR1_UNORDERED_MAP)
+    MESSAGE("-- Found tr1/unordered_map/set in std::tr1 namespace.")
+  ELSE (HAVE_TR1_UNORDERED_MAP_HEADE)
+    MESSAGE("-- Unable to find <unordered_map> or <tr1/unordered_map>. ")
+    MESSAGE("-- Replacing unordered_map/set with map/set (warning: slower!)")
+    LIST(APPEND CERES_COMPILE_OPTIONS CERES_NO_UNORDERED_MAP)
+  ENDIF (HAVE_TR1_UNORDERED_MAP_HEADER)
+ENDIF (HAVE_STD_UNORDERED_MAP_HEADER)
 
-IF (DISABLE_TR1)
-  MESSAGE("-- Replacing unordered_map/set with map/set (warning: slower!)")
-  ADD_DEFINITIONS(-DCERES_NO_TR1)
-ELSE (DISABLE_TR1)
-  MESSAGE("-- Using normal TR1 unordered_map/set")
-  # Use the std namespace for the hash<> and related templates. This may vary by
-  # system.
-  IF (MSVC)
-    IF (MSVC90)
-      # Special case for Visual Studio 2008.
-      # Newer versions have got tr1 symbols in another namespace,
-      # and this is being handled in Else branch of this condition.
-      # Probably Visual studio 2003 and 2005 also shall be handled here,
-      # but don't have by hand to verify and most likely they're not
-      # used by Ceres users anyway.
-      ADD_DEFINITIONS("\"-DCERES_HASH_NAMESPACE_START=namespace std { namespace tr1 {\"")
-      ADD_DEFINITIONS("\"-DCERES_HASH_NAMESPACE_END=}}\"")
-    ELSE (MSVC90)
-      # This is known to work with Visual Studio 2010 Express.
-      # Further, for as long Visual Studio 2012 didn't move tr1 to
-      # just another namespace, the same define will work for it as well.
-      # Hopefully all further versions will also keep working with this define.
-      ADD_DEFINITIONS("\"-DCERES_HASH_NAMESPACE_START=namespace std {\"")
-      ADD_DEFINITIONS("\"-DCERES_HASH_NAMESPACE_END=}\"")
-    ENDIF(MSVC90)
-  ELSE (MSVC)
-    # This is known to work with recent versions of Linux and Mac OS X.
-    ADD_DEFINITIONS("\"-DCERES_HASH_NAMESPACE_START=namespace std { namespace tr1 {\"")
-    ADD_DEFINITIONS("\"-DCERES_HASH_NAMESPACE_END=}}\"")
-  ENDIF (MSVC)
-ENDIF (DISABLE_TR1)
+INCLUDE(FindSharedPtr)
+FIND_SHARED_PTR()
+IF (SHARED_PTR_FOUND)
+  IF (SHARED_PTR_TR1_MEMORY_HEADER)
+    LIST(APPEND CERES_COMPILE_OPTIONS CERES_TR1_MEMORY_HEADER)
+  ENDIF (SHARED_PTR_TR1_MEMORY_HEADER)
+  IF (SHARED_PTR_TR1_NAMESPACE)
+    LIST(APPEND CERES_COMPILE_OPTIONS CERES_TR1_SHARED_PTR)
+  ENDIF (SHARED_PTR_TR1_NAMESPACE)
+ELSE (SHARED_PTR_FOUND)
+  MESSAGE(FATAL_ERROR "Unable to find shared_ptr.")
+ENDIF (SHARED_PTR_FOUND)
 
 INCLUDE_DIRECTORIES(
   include
   internal
   internal/ceres
-  ${GLOG_INCLUDE}
-  ${EIGEN_INCLUDE}
-  )
-
-FILE(GLOB CERES_HDRS ${CMAKE_SOURCE_DIR}/include/ceres/*.h)
-INSTALL(FILES ${CERES_HDRS} DESTINATION include/ceres)
-
-FILE(GLOB CERES_PUBLIC_INTERNAL_HDRS ${CMAKE_SOURCE_DIR}/include/ceres/internal/*.h)
-INSTALL(FILES ${CERES_PUBLIC_INTERNAL_HDRS} DESTINATION include/ceres/internal)
+  ${GLOG_INCLUDE_DIRS}
+  ${EIGEN_INCLUDE_DIRS})
 
 IF (SUITESPARSE)
-  INCLUDE_DIRECTORIES(${AMD_INCLUDE})
-  INCLUDE_DIRECTORIES(${CAMD_INCLUDE})
-  INCLUDE_DIRECTORIES(${COLAMD_INCLUDE})
-  INCLUDE_DIRECTORIES(${CCOLAMD_INCLUDE})
-  INCLUDE_DIRECTORIES(${CHOLMOD_INCLUDE})
-  INCLUDE_DIRECTORIES(${SUITESPARSEQR_INCLUDE})
-  IF (SUITESPARSE_CONFIG_FOUND)
-    INCLUDE_DIRECTORIES(${SUITESPARSE_CONFIG_INCLUDE})
-  ENDIF (SUITESPARSE_CONFIG_FOUND)
-  IF (UFCONFIG_FOUND)
-    INCLUDE_DIRECTORIES(${UFCONFIG_INCLUDE})
-  ENDIF (UFCONFIG_FOUND)
+  INCLUDE_DIRECTORIES(${SUITESPARSE_INCLUDE_DIRS})
 ENDIF (SUITESPARSE)
 
 IF (CXSPARSE)
-  INCLUDE_DIRECTORIES(${CXSPARSE_INCLUDE})
+  INCLUDE_DIRECTORIES(${CXSPARSE_INCLUDE_DIRS})
 ENDIF (CXSPARSE)
 
 IF (GFLAGS)
-  INCLUDE_DIRECTORIES(${GFLAGS_INCLUDE})
+  INCLUDE_DIRECTORIES(${GFLAGS_INCLUDE_DIRS})
 ENDIF (GFLAGS)
 
+IF (BUILD_SHARED_LIBS)
+  MESSAGE("-- Building Ceres as a shared library.")
+  # The CERES_BUILDING_SHARED_LIBRARY compile definition is NOT stored in
+  # CERES_COMPILE_OPTIONS as it must only be defined when Ceres is compiled
+  # not when it is used as it controls the CERES_EXPORT macro which provides
+  # dllimport/export support in MSVC.
+  ADD_DEFINITIONS(-DCERES_BUILDING_SHARED_LIBRARY)
+  LIST(APPEND CERES_COMPILE_OPTIONS CERES_USING_SHARED_LIBRARY)
+ELSE (BUILD_SHARED_LIBS)
+  MESSAGE("-- Building Ceres as a static library.")
+ENDIF (BUILD_SHARED_LIBS)
+
 # Change the default build type from Debug to Release, while still
 # supporting overriding the build type.
 #
@@ -598,30 +572,35 @@
   IF (CMAKE_COMPILER_IS_GNUCXX)
     # Linux
     IF (CMAKE_SYSTEM_NAME MATCHES "Linux")
-      SET (CERES_CXX_FLAGS "${CERES_CXX_FLAGS} -march=native -mtune=native")
+      IF (NOT GCC_VERSION VERSION_LESS 4.2)
+        SET (CERES_CXX_FLAGS "${CERES_CXX_FLAGS} -march=native -mtune=native")
+      ENDIF (NOT GCC_VERSION VERSION_LESS 4.2)
     ENDIF (CMAKE_SYSTEM_NAME MATCHES "Linux")
     # Mac OS X
     IF (CMAKE_SYSTEM_NAME MATCHES "Darwin")
       SET (CERES_CXX_FLAGS "${CERES_CXX_FLAGS} -msse3")
       # Use of -fast only applicable for Apple's GCC
       # Assume this is being used if GCC version < 4.3 on OSX
-      EXECUTE_PROCESS(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
+      EXECUTE_PROCESS(COMMAND ${CMAKE_C_COMPILER}
+        ARGS ${CMAKE_CXX_COMPILER_ARG1} -dumpversion
+        OUTPUT_VARIABLE GCC_VERSION
+        OUTPUT_STRIP_TRAILING_WHITESPACE)
       IF (GCC_VERSION VERSION_LESS 4.3)
         SET (CERES_CXX_FLAGS "${CERES_CXX_FLAGS} -fast")
       ENDIF (GCC_VERSION VERSION_LESS 4.3)
     ENDIF (CMAKE_SYSTEM_NAME MATCHES "Darwin")
   ENDIF (CMAKE_COMPILER_IS_GNUCXX)
   IF (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
-    # Use of -O4 requires use of gold linker & LLVM-gold plugin, which might
+    # Use of -flto requires use of gold linker & LLVM-gold plugin, which might
     # well not be present / in use and without which files will compile, but
     # not link ('file not recognized') so explicitly check for support
     INCLUDE(CheckCXXCompilerFlag)
-    CHECK_CXX_COMPILER_FLAG("-O4" HAVE_LTO_SUPPORT)
+    CHECK_CXX_COMPILER_FLAG("-flto" HAVE_LTO_SUPPORT)
     IF (HAVE_LTO_SUPPORT)
-      MESSAGE(STATUS "Enabling link-time optimization (-O4)")
-      SET(CERES_CXX_FLAGS "${CERES_CXX_FLAGS} -O4")
+      MESSAGE(STATUS "Enabling link-time optimization (-flto)")
+      SET(CERES_CXX_FLAGS "${CERES_CXX_FLAGS} -flto")
     ELSE ()
-      MESSAGE(STATUS "Compiler/linker does not support link-time optimization (-O4), disabling.")
+      MESSAGE(STATUS "Compiler/linker does not support link-time optimization (-flto), disabling.")
     ENDIF (HAVE_LTO_SUPPORT)
   ENDIF ()
 ENDIF (CMAKE_BUILD_TYPE STREQUAL "Release")
@@ -656,6 +635,29 @@
   # the warnings.
   SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /ignore:4049")
 
+  # Update the C/CXX flags for MSVC to use either the static or shared
+  # C-Run Time (CRT) library based on the user option: MSVC_USE_STATIC_CRT.
+  LIST(APPEND C_CXX_FLAGS
+    CMAKE_CXX_FLAGS
+    CMAKE_CXX_FLAGS_DEBUG
+    CMAKE_CXX_FLAGS_RELEASE
+    CMAKE_CXX_FLAGS_MINSIZEREL
+    CMAKE_CXX_FLAGS_RELWITHDEBINFO)
+
+  FOREACH(FLAG_VAR ${C_CXX_FLAGS})
+    IF (MSVC_USE_STATIC_CRT)
+      # Use static CRT.
+      IF (${FLAG_VAR} MATCHES "/MD")
+        STRING(REGEX REPLACE "/MD" "/MT" ${FLAG_VAR} "${${FLAG_VAR}}")
+      ENDIF (${FLAG_VAR} MATCHES "/MD")
+    ELSE (MSVC_USE_STATIC_CRT)
+      # Use shared, not static, CRT.
+      IF (${FLAG_VAR} MATCHES "/MT")
+        STRING(REGEX REPLACE "/MT" "/MD" ${FLAG_VAR} "${${FLAG_VAR}}")
+      ENDIF (${FLAG_VAR} MATCHES "/MT")
+    ENDIF (MSVC_USE_STATIC_CRT)
+  ENDFOREACH()
+
   # Tuple sizes of 10 are used by Gtest.
   ADD_DEFINITIONS("-D_VARIADIC_MAX=10")
 ENDIF (MSVC)
@@ -672,17 +674,52 @@
 # threshold to the linker and clang complains about it and dies.
 IF (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
   SET(CMAKE_CXX_FLAGS
-      "${CMAKE_CXX_FLAGS} -Qunused-arguments -mllvm -inline-threshold=600 -Wno-return-type-c-linkage")
+      "${CMAKE_CXX_FLAGS} -Qunused-arguments -mllvm -inline-threshold=600")
+  # Older versions of Clang (<= 2.9) do not support the 'return-type-c-linkage'
+  # option, so check for its presence before adding it to the default flags set.
+  INCLUDE(CheckCXXCompilerFlag)
+  CHECK_CXX_COMPILER_FLAG("-Wno-return-type-c-linkage"
+                          HAVE_RETURN_TYPE_C_LINKAGE)
+  IF (HAVE_RETURN_TYPE_C_LINKAGE)
+    SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-return-type-c-linkage")
+  ENDIF(HAVE_RETURN_TYPE_C_LINKAGE)
 ENDIF ()
 
+# Xcode 4.5.x used Clang 4.1 (Apple version), this has a bug that prevents
+# compilation of Ceres.
+IF (APPLE AND CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
+  EXECUTE_PROCESS(COMMAND ${CMAKE_CXX_COMPILER}
+    ARGS ${CMAKE_CXX_COMPILER_ARG1} -dumpversion
+    OUTPUT_VARIABLE CLANG_VERSION
+    OUTPUT_STRIP_TRAILING_WHITESPACE)
+  # Use version > 4.0 & < 4.2 to catch all 4.1(.x) versions.
+  IF (CLANG_VERSION VERSION_GREATER 4.0 AND
+      CLANG_VERSION VERSION_LESS 4.2)
+    MESSAGE(FATAL_ERROR "You are attempting to build Ceres on OS X using Xcode "
+      "4.5.x (Clang version: ${CLANG_VERSION}). This version of Clang has a "
+      "bug that prevents compilation of Ceres, please update to "
+      "Xcode >= 4.6.3.")
+  ENDIF (CLANG_VERSION VERSION_GREATER 4.0 AND
+    CLANG_VERSION VERSION_LESS 4.2)
+ENDIF (APPLE AND CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
+
+# Configure the Ceres config.h compile options header using the current
+# compile options and put the configured header into the Ceres build
+# directory.  Note that the ceres/internal subdir in <build>/config where
+# the configured config.h is placed is important, because Ceres will be
+# built against this configured header, it needs to have the same relative
+# include path as it would if it were in the source tree (or installed).
+LIST(REMOVE_DUPLICATES CERES_COMPILE_OPTIONS)
+INCLUDE(CreateCeresConfig)
+CREATE_CERES_CONFIG("${CERES_COMPILE_OPTIONS}"
+  ${CMAKE_CURRENT_BINARY_DIR}/config/ceres/internal)
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}/config)
+
 ADD_SUBDIRECTORY(internal/ceres)
 
 IF (BUILD_DOCUMENTATION)
   MESSAGE("-- Documentation building is enabled")
 
-  # Make CMake aware of the cmake folder, in order to find 'FindSphinx.cmake'
-  SET (CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake")
-
   # Generate the User's Guide (html).
   # The corresponding target is UserGuide, but is included in ALL.
   ADD_SUBDIRECTORY(docs)
@@ -695,6 +732,26 @@
   MESSAGE("-- Do not build any example.")
 ENDIF (BUILD_EXAMPLES)
 
+# Setup installation of Ceres public headers.
+FILE(GLOB CERES_HDRS ${CMAKE_SOURCE_DIR}/include/ceres/*.h)
+INSTALL(FILES ${CERES_HDRS} DESTINATION include/ceres)
+
+FILE(GLOB CERES_PUBLIC_INTERNAL_HDRS ${CMAKE_SOURCE_DIR}/include/ceres/internal/*.h)
+INSTALL(FILES ${CERES_PUBLIC_INTERNAL_HDRS} DESTINATION include/ceres/internal)
+
+# Also setup installation of Ceres config.h configured with the current
+# build options into the installed headers directory.
+INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/config/ceres/internal/config.h
+        DESTINATION include/ceres/internal)
+
+
+IF (MINIGLOG)
+  # Install miniglog header if being used as logging #includes appear in
+  # installed public Ceres headers.
+  INSTALL(FILES ${CMAKE_SOURCE_DIR}/internal/ceres/miniglog/glog/logging.h
+          DESTINATION include/ceres/internal/miniglog/glog)
+ENDIF (MINIGLOG)
+
 # Add an uninstall target to remove all installed files.
 CONFIGURE_FILE("${CMAKE_SOURCE_DIR}/cmake/uninstall.cmake.in"
                "${CMAKE_BINARY_DIR}/cmake/uninstall.cmake"
@@ -703,11 +760,13 @@
 ADD_CUSTOM_TARGET(uninstall
                   COMMAND ${CMAKE_COMMAND} -P ${CMAKE_BINARY_DIR}/cmake/uninstall.cmake)
 
-# Set up install directories. INCLUDE_INSTALL_DIR, LIB_INSTALL_DIR and
-# CMAKECONFIG_INSTALL_DIR must not be absolute paths.
-SET(INCLUDE_INSTALL_DIR include)
-SET(LIB_INSTALL_DIR lib)
-SET(CMAKECONFIG_INSTALL_DIR share/Ceres)
+# Set relative install paths, which are appended to CMAKE_INSTALL_PREFIX to
+# generate the absolute install paths.
+IF (WIN32)
+  SET(RELATIVE_CMAKECONFIG_INSTALL_DIR CMake)
+ELSE ()
+  SET(RELATIVE_CMAKECONFIG_INSTALL_DIR share/Ceres)
+ENDIF ()
 
 # This "exports" all targets which have been put into the export set
 # "CeresExport". This means that CMake generates a file with the given
@@ -717,14 +776,15 @@
 # library targets from these, which can be used in many ways in the same way
 # as a normal library target created via a normal ADD_LIBRARY().
 INSTALL(EXPORT CeresExport
-        DESTINATION ${CMAKECONFIG_INSTALL_DIR} FILE CeresTargets.cmake)
+        DESTINATION ${RELATIVE_CMAKECONFIG_INSTALL_DIR} FILE CeresTargets.cmake)
 
 # Figure out the relative path from the installed Config.cmake file to the
 # install prefix (which may be at runtime different from the chosen
 # CMAKE_INSTALL_PREFIX if under Windows the package was installed anywhere)
 # This relative path will be configured into the CeresConfig.cmake.
-FILE(RELATIVE_PATH relInstallDir
-     ${CMAKE_INSTALL_PREFIX}/${CMAKECONFIG_INSTALL_DIR} ${CMAKE_INSTALL_PREFIX})
+FILE(RELATIVE_PATH INSTALL_ROOT_REL_CONFIG_INSTALL_DIR
+     ${CMAKE_INSTALL_PREFIX}/${RELATIVE_CMAKECONFIG_INSTALL_DIR}
+     ${CMAKE_INSTALL_PREFIX})
 
 # Create a CeresConfig.cmake file. <name>Config.cmake files are searched by
 # FIND_PACKAGE() automatically. We configure that file so that we can put any
@@ -739,8 +799,11 @@
 CONFIGURE_FILE("${CMAKE_SOURCE_DIR}/cmake/CeresConfigVersion.cmake.in"
                "${CMAKE_CURRENT_BINARY_DIR}/CeresConfigVersion.cmake" @ONLY)
 
-# Install these two files into the same directory as the generated exports-file.
+# Install these files into the same directory as the generated exports-file,
+# we include the FindPackage scripts for libraries whose headers are included
+# in the public API of Ceres and should thus be present in CERES_INCLUDE_DIRS.
 INSTALL(FILES "${CMAKE_CURRENT_BINARY_DIR}/CeresConfig.cmake"
               "${CMAKE_CURRENT_BINARY_DIR}/CeresConfigVersion.cmake"
-              "${CMAKE_SOURCE_DIR}/cmake/depend.cmake"
-        DESTINATION ${CMAKECONFIG_INSTALL_DIR})
+              "${CMAKE_SOURCE_DIR}/cmake/FindEigen.cmake"
+              "${CMAKE_SOURCE_DIR}/cmake/FindGlog.cmake"
+        DESTINATION ${RELATIVE_CMAKECONFIG_INSTALL_DIR})
diff --git a/MODULE_LICENSE_BSD b/MODULE_LICENSE_BSD
deleted file mode 100644
index e69de29..0000000
--- a/MODULE_LICENSE_BSD
+++ /dev/null
diff --git a/README.google b/README.google
index 8b8b148..b42f1c9 100644
--- a/README.google
+++ b/README.google
@@ -1,5 +1,5 @@
-URL: https://ceres-solver.googlesource.com/ceres-solver/+archive/0338f9a8e69582a550ef6d128e447779536d623c.tar.gz
-Version: 0338f9a8e69582a550ef6d128e447779536d623c
+URL: https://ceres-solver.googlesource.com/ceres-solver/+archive/cfb36463f9c1f806121779d651c7105ad899bb20.tar.gz
+Version: cfb36463f9c1f806121779d651c7105ad899bb20
 License: New BSD, portions MIT
 License File: LICENSE
 
@@ -9,7 +9,6 @@
 
 Website : https://code.google.com/p/ceres-solver/
 Code    : https://ceres-solver.googlesource.com/ceres-solver/
-Docs    : http://homes.cs.washington.edu/~sagarwal/ceres-solver/
 
 Local modifications:
-Replaced the implementation of logging.h with the google3 /mobile/base/logging.h
+None
diff --git a/android/build_android.sh b/android/build_android.sh
deleted file mode 100644
index 053b828..0000000
--- a/android/build_android.sh
+++ /dev/null
@@ -1,154 +0,0 @@
-#!/bin/sh
-#
-# Ceres Solver - A fast non-linear least squares minimizer
-# Copyright 2012 Google Inc. All rights reserved.
-# http://code.google.com/p/ceres-solver/
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice,
-#   this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-# * Neither the name of Google Inc. nor the names of its contributors may be
-#   used to endorse or promote products derived from this software without
-#   specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-#
-# Author: keir@google.com
-#         settinger@google.com
-#
-# Ceres build script for Android. To build the ceres.so libraray for Android,
-# cd into an empty directory and run the script. Usage:
-#
-#   build_android.sh \
-#   <path to Android NDK> \
-#   <path to Eigen> \
-#   <path to Ceres source>
-#
-#   make
-#
-# This script exists only to make it easier to get Ceres building on Android;
-# as one can see from the code below, it is only a matter of extracting a
-# standalone NDK toolchain from the NDK, and getting the right arguments to
-# CMake to get it to work.
-#
-# Android NDK version r5 or higher is required. Jellybean does not work out of
-# the box, since the android-cmake toolchain is not yet updated to for it.
-#
-# Note: You may wish to run 'ccmake .', the CMake curses GUI, in order to tweak
-# the build parameters that are set by default. There are a few settings to
-# consider changing:
-#
-# SCHUR_SPECIALIZATIONS:
-#
-# Consider if enabling the schur specializations is a big enough win for the
-# problem you are solving, since compiling the schur specializations
-# considerably increases the binary size. Disable them by running 'ccmake .',
-# finding the SCHUR_SPECIALIZATIONS variable, hitting enter (toggling to "off"),
-# pressing 'c' to generate, then 'g' to generate and exit, followed by 'make'.
-#
-# EXECUTABLE_OUTPUT_PATH
-# LIBRARY_OUTPUT_PATH
-# LIBRARY_OUTPUT_PATH_ROOT:
-#
-# In normal CMake builds, where you do an out of source build, the source
-# directory is untouched when building. However, by default the Android CMake
-# toolchain selects locations under your *source* tree for the final library
-# and binary destinations. For example, if your Ceres git tree is under
-# ceres-solver.git/ and the build directory you are using is
-# ceres-android-bin/, the resulting binaries will live in ceres-solver.git/
-# (but not the intermediate .o files!) By changing the variables
-# EXECUTABLE_OUTPUT_PATH, LIBRARY_OUTPUT_PATH, and LIBRARY_OUTPUT_PATH_ROOT to
-# something under e.g. ceres-android-bin/ then true out-of-ource builds work.
-
-if [ $# -ne 3 ] ; then
-  echo "usage: build_android.sh \\"
-  echo "       <path to Android NDK> \\"
-  echo "       <path to Eigen> \\"
-  echo "       <path to Ceres source>"
-  exit 1
-fi
-
-if [ -f "CMakeLists.txt" ] ; then
-  echo "ERROR: Can't run from inside the source tree."
-  echo "       Make a new directory that's not under"
-  echo "       the main Ceres source tree."
-  exit 1
-fi
-
-# For some reason, using the NDK in-place doesn't work even though the
-# android-cmake toolchain appears to support it.
-#
-# TODO(keir): Figure out the issue with the stand-alone NDK and don't create
-# the extra stand-alone toolchain. Also test with other NDK versions and add
-# explicit checks to ensure a compatible version is used.
-ANDROID_NDK=$1
-MAKE_ANDROID_TOOLCHAIN=$ANDROID_NDK/build/tools/make-standalone-toolchain.sh
-if [ ! -f $MAKE_ANDROID_TOOLCHAIN ] ; then
-  echo "ERROR: First argument doesn't appear to be the NDK root; missing:"
-  echo "       $MAKE_ANDROID_TOOLCHAIN"
-  exit 1
-fi
-
-EIGEN_DIR=$2
-if [ ! -f $EIGEN_DIR/eigen3.pc.in ] ; then
-  echo "ERROR: Second argument doesn't appear to be Eigen3; missing:"
-  echo "       $EIGEN_DIR/eigen3.pc.in"
-  exit 1
-fi
-
-CERES_SOURCE_ROOT=$3
-if [ ! -f "$CERES_SOURCE_ROOT/internal/ceres/CMakeLists.txt" ] ; then
-  echo "ERROR: Third argument doesn't appear to be the Ceres source directory."
-  exit 1
-fi
-echo "Using Ceres source directory: $CERES_SOURCE_ROOT"
-
-# Make a standalone Android NDK toolchain if needed.
-export ANDROID_STANDALONE_TOOLCHAIN="`pwd`/toolchain"
-if [ ! -d "$ANDROID_STANDALONE_TOOLCHAIN" ] ; then
-  echo "Extracting the Android GCC standalone toolchain to:"
-  echo "    $ANDROID_STANDALONE_TOOLCHAIN..."
-  $ANDROID_NDK/build/tools/make-standalone-toolchain.sh \
-  --platform=android-8 \
-  --install-dir=$ANDROID_STANDALONE_TOOLCHAIN
-else
-  echo "Found NDK standalone toolchain; skipping creation."
-fi
-
-# Get the Android CMake NDK toolchain file if needed.
-if [ ! -d "android-cmake" ] ; then
-  hg clone https://code.google.com/p/android-cmake/
-else
-  echo "Found Android-CMake toolchain; skipping download."
-fi
-
-ANDROID_CMAKE_TOOLCHAIN=android-cmake/toolchain/android.toolchain.cmake
-if [ ! -f $ANDROID_CMAKE_TOOLCHAIN ] ; then
-  echo "ERROR: It seems the toolchain file is missing:"
-  echo "       $ANDROID_CMAKE_TOOLCHAIN"
-  exit 1
-fi
-
-cmake $CERES_SOURCE_ROOT \
-      -DCMAKE_TOOLCHAIN_FILE=$ANDROID_CMAKE_TOOLCHAIN \
-      -DCMAKE_BUILD_TYPE=Release \
-      -DEIGEN_INCLUDE=$EIGEN_DIR \
-      -DBUILD_ANDROID=ON \
-      -DSUITESPARSE=OFF \
-      -DGFLAGS=OFF \
-      -DCXSPARSE=OFF
diff --git a/cmake/CeresConfig.cmake.in b/cmake/CeresConfig.cmake.in
index d000046..a846850 100644
--- a/cmake/CeresConfig.cmake.in
+++ b/cmake/CeresConfig.cmake.in
@@ -26,24 +26,191 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 #
-# Author: pablo.speciale@gmail.com (Pablo Speciale)
+# Authors: pablo.speciale@gmail.com (Pablo Speciale)
+#          alexs.mac@gmail.com (Alex Stewart)
 #
 
-# Get the directory
-GET_FILENAME_COMPONENT(currentDir ${CMAKE_CURRENT_LIST_FILE} PATH)
+# Config file for Ceres Solver - Find Ceres & dependencies.
+#
+# This file is used by CMake when FIND_PACKAGE( Ceres ) is invoked (and
+# the directory containing this file is present in CMAKE_MODULE_PATH).
+#
+# This module defines the following variables:
+#
+# Ceres_FOUND / CERES_FOUND: True iff Ceres has been successfully
+#                            found. Both variables are set as although
+#                            FindPackage() only references Ceres_FOUND
+#                            in Config mode, given the conventions for
+#                            <package>_FOUND when FindPackage() is
+#                            called in Module mode, users could
+#                            reasonably expect to use CERES_FOUND
+#                            instead.
+#
+# CERES_VERSION: Version of Ceres found.
+#
+# CERES_INCLUDE_DIRS: Include directories for Ceres and the
+#                     dependencies which appear in the Ceres public
+#                     API and are thus required to use Ceres.
+#                     CERES_LIBRARIES: Libraries for Ceres and all
+#                     dependencies against which Ceres was
+#                     compiled. This will not include any optional
+#                     dependencies that were disabled when Ceres was
+#                     compiled.
+#
+# The following variables are also defined for legacy compatibility
+# only.  Any new code should not use them as they do not conform to
+# the standard CMake FindPackage naming conventions.
+#
+# CERES_INCLUDES = ${CERES_INCLUDE_DIRS}.
 
-# Get the chosen install prefix
-GET_FILENAME_COMPONENT(rootDir ${currentDir}/@relInstallDir@ ABSOLUTE)
+# Called if we failed to find Ceres or any of it's required dependencies,
+# unsets all public (designed to be used externally) variables and reports
+# error message at priority depending upon [REQUIRED/QUIET/<NONE>] argument.
+MACRO(CERES_REPORT_NOT_FOUND REASON_MSG)
+  # FindPackage() only references Ceres_FOUND, and requires it to be
+  # explicitly set FALSE to denote not found (not merely undefined).
+  SET(Ceres_FOUND FALSE)
+  SET(CERES_FOUND FALSE)
+  UNSET(CERES_INCLUDE_DIRS)
+  UNSET(CERES_LIBRARIES)
 
-# Set the version
+  # Reset the CMake module path to its state when this script was called.
+  SET(CMAKE_MODULE_PATH ${CALLERS_CMAKE_MODULE_PATH})
+
+  # Note <package>_FIND_[REQUIRED/QUIETLY] variables defined by
+  # FindPackage() use the camelcase library name, not uppercase.
+  IF (Ceres_FIND_QUIETLY)
+    MESSAGE(STATUS "Failed to find Ceres - " ${REASON_MSG} ${ARGN})
+  ELSE (Ceres_FIND_REQUIRED)
+    MESSAGE(FATAL_ERROR "Failed to find Ceres - " ${REASON_MSG} ${ARGN})
+  ELSE()
+    # Neither QUIETLY nor REQUIRED, use SEND_ERROR which emits an error
+    # that prevents generation, but continues configuration.
+    MESSAGE(SEND_ERROR "Failed to find Ceres - " ${REASON_MSG} ${ARGN})
+  ENDIF ()
+  RETURN()
+ENDMACRO(CERES_REPORT_NOT_FOUND)
+
+# Get the (current, i.e. installed) directory containing this file.
+GET_FILENAME_COMPONENT(CURRENT_CONFIG_INSTALL_DIR
+  "${CMAKE_CURRENT_LIST_FILE}" PATH)
+
+# Record the state of the CMake module path when this script was
+# called so that we can ensure that we leave it in the same state on
+# exit as it was on entry, but modify it locally.
+SET(CALLERS_CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH})
+# Reset CMake module path to the installation directory of this
+# script, thus we will use the FindPackage() scripts shipped with
+# Ceres to find Ceres' dependencies, even if the user has equivalently
+# named FindPackage() scripts in their project.
+SET(CMAKE_MODULE_PATH ${CURRENT_CONFIG_INSTALL_DIR})
+
+# Build the absolute root install directory as a relative path
+# (determined when Ceres was configured & built) from the current
+# install directory for this this file.  This allows for the install
+# tree to be relocated, after Ceres was built, outside of CMake.
+GET_FILENAME_COMPONENT(CURRENT_ROOT_INSTALL_DIR
+  ${CURRENT_CONFIG_INSTALL_DIR}/@INSTALL_ROOT_REL_CONFIG_INSTALL_DIR@ ABSOLUTE)
+IF (NOT EXISTS ${CURRENT_ROOT_INSTALL_DIR})
+  CERES_REPORT_NOT_FOUND(
+    "Ceres install root: ${CURRENT_ROOT_INSTALL_DIR}, "
+    "determined from relative path from CeresConfg.cmake install location: "
+    "${CURRENT_CONFIG_INSTALL_DIR}, does not exist. Either the install "
+    "directory was deleted, or the install tree was only partially relocated "
+    "outside of CMake after Ceres was built.")
+ENDIF (NOT EXISTS ${CURRENT_ROOT_INSTALL_DIR})
+
+# Set the version.
 SET(CERES_VERSION @CERES_VERSION@ )
 
-# What is my include directory
-SET(CERES_INCLUDES "${rootDir}/@INCLUDE_INSTALL_DIR@")
+# Set the include directories for Ceres (itself).
+SET(CERES_INCLUDE_DIR "${CURRENT_ROOT_INSTALL_DIR}/include")
+IF (NOT EXISTS ${CERES_INCLUDE_DIR}/ceres/ceres.h)
+  CERES_REPORT_NOT_FOUND(
+    "Ceres install root: ${CURRENT_ROOT_INSTALL_DIR}, "
+    "determined from relative path from CeresConfg.cmake install location: "
+    "${CURRENT_CONFIG_INSTALL_DIR}, does not contain Ceres headers. "
+    "Either the install directory was deleted, or the install tree was only "
+    "partially relocated outside of CMake after Ceres was built.")
+ENDIF (NOT EXISTS ${CERES_INCLUDE_DIR}/ceres/ceres.h)
 
-# Import the exported targets
-INCLUDE(${currentDir}/CeresTargets.cmake)
-INCLUDE(${currentDir}/depend.cmake)
+# Append the include directories for all (potentially optional)
+# dependencies with which Ceres was compiled, the libraries themselves
+# come in via CeresTargets-<release/debug>.cmake as link libraries for
+# Ceres target.
+SET(CERES_INCLUDE_DIRS ${CERES_INCLUDE_DIR})
 
-# Set the expected library variable
+# Eigen.
+# Flag set during configuration and build of Ceres.
+SET(CERES_EIGEN_VERSION @EIGEN_VERSION@)
+# Append the locations of Eigen when Ceres was built to the search path hints.
+LIST(APPEND EIGEN_INCLUDE_DIR_HINTS @EIGEN_INCLUDE_DIR@)
+# Search quietly s/t we control the timing of the error message if not found.
+FIND_PACKAGE(Eigen ${CERES_EIGEN_VERSION} EXACT QUIET)
+IF (EIGEN_FOUND)
+  MESSAGE(STATUS "Found required Ceres dependency: "
+    "Eigen version ${CERES_EIGEN_VERSION} in ${EIGEN_INCLUDE_DIRS}")
+ELSE (EIGEN_FOUND)
+  CERES_REPORT_NOT_FOUND("Missing required Ceres "
+    "dependency: Eigen version ${CERES_EIGEN_VERSION}, please set "
+    "EIGEN_INCLUDE_DIR.")
+ENDIF (EIGEN_FOUND)
+LIST(APPEND CERES_INCLUDE_DIRS ${EIGEN_INCLUDE_DIRS})
+
+# Glog.
+# Flag set during configuration and build of Ceres.
+SET(CERES_USES_MINIGLOG @MINIGLOG@)
+IF (CERES_USES_MINIGLOG)
+  SET(MINIGLOG_INCLUDE_DIR ${CERES_INCLUDE_DIR}/ceres/internal/miniglog)
+  IF (NOT EXISTS ${MINIGLOG_INCLUDE_DIR})
+    CERES_REPORT_NOT_FOUND(
+      "Ceres install include directory: "
+      "${CERES_INCLUDE_DIR} does not include miniglog, but Ceres was "
+      "compiled with MINIGLOG enabled (in place of Glog).")
+  ENDIF (NOT EXISTS ${MINIGLOG_INCLUDE_DIR})
+  LIST(APPEND CERES_INCLUDE_DIRS ${MINIGLOG_INCLUDE_DIR})
+  # Output message at standard log level (not the lower STATUS) so that
+  # the message is output in GUI during configuration to warn user.
+  MESSAGE("-- Found Ceres installation compiled with miniglog substitute "
+    "for glog, beware this will likely cause problems if glog is later linked.")
+ELSE (CERES_USES_MINIGLOG)
+  # Append the locations of glog when Ceres was built to the search path hints.
+  LIST(APPEND GLOG_INCLUDE_DIR_HINTS @GLOG_INCLUDE_DIR@)
+  GET_FILENAME_COMPONENT(CERES_BUILD_GLOG_LIBRARY_DIR @GLOG_LIBRARY@ PATH)
+  LIST(APPEND GLOG_LIBRARY_DIR_HINTS ${CERES_BUILD_GLOG_LIBRARY_DIR})
+
+  # Search quietly s/t we control the timing of the error message if not found.
+  FIND_PACKAGE(Glog QUIET)
+  IF (GLOG_FOUND)
+    MESSAGE(STATUS "Found required Ceres dependency: "
+      "Glog in ${GLOG_INCLUDE_DIRS}")
+  ELSE (GLOG_FOUND)
+    CERES_REPORT_NOT_FOUND("Missing required Ceres "
+      "dependency: Glog, please set GLOG_INCLUDE_DIR.")
+  ENDIF (GLOG_FOUND)
+  LIST(APPEND CERES_INCLUDE_DIRS ${GLOG_INCLUDE_DIRS})
+ENDIF (CERES_USES_MINIGLOG)
+
+# Import exported Ceres targets.
+IF (NOT TARGET ceres AND NOT Ceres_BINARY_DIR)
+  INCLUDE(${CURRENT_CONFIG_INSTALL_DIR}/CeresTargets.cmake)
+ENDIF (NOT TARGET ceres AND NOT Ceres_BINARY_DIR)
+# Set the expected XX_LIBRARIES variable for FindPackage().
 SET(CERES_LIBRARIES ceres)
+
+# Set legacy include directories variable for backwards compatibility.
+SET(CERES_INCLUDES ${CERES_INCLUDE_DIRS})
+
+# Reset CMake module path to its state when this script was called.
+SET(CMAKE_MODULE_PATH ${CALLERS_CMAKE_MODULE_PATH})
+
+# As we use CERES_REPORT_NOT_FOUND() to abort, if we reach this point we have
+# found Ceres and all required dependencies.
+MESSAGE(STATUS "Found Ceres version: ${CERES_VERSION} "
+    "installed in: ${CURRENT_ROOT_INSTALL_DIR}")
+
+# Set CERES_FOUND to be equivalent to Ceres_FOUND, which is set to
+# TRUE by FindPackage() if this file is found and run, and after which
+# Ceres_FOUND is not (explicitly, i.e. undefined does not count) set
+# to FALSE.
+SET(CERES_FOUND TRUE)
diff --git a/cmake/CreateCeresConfig.cmake b/cmake/CreateCeresConfig.cmake
new file mode 100644
index 0000000..23f449b
--- /dev/null
+++ b/cmake/CreateCeresConfig.cmake
@@ -0,0 +1,115 @@
+# Ceres Solver - A fast non-linear least squares minimizer
+# Copyright 2014 Google Inc. All rights reserved.
+# http://code.google.com/p/ceres-solver/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+#   this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.
+# * Neither the name of Google Inc. nor the names of its contributors may be
+#   used to endorse or promote products derived from this software without
+#   specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: alexs.mac@gmail.com (Alex Stewart)
+
+# This must take place outside of CONFIGURE_CERES_CONFIG() in order that
+# we can determine where *this* file is, and thus the relative path to
+# config.h.in.  Inside of CONFIGURE_CERES_CONFIG(), CMAKE_CURRENT_LIST_DIR
+# refers to the caller of CONFIGURE_CERES_CONFIG(), not this file.
+SET(CERES_CONFIG_IN_FILE "${CMAKE_CURRENT_LIST_DIR}/config.h.in")
+
+# CreateCeresConfig.cmake - Create the config.h for Ceres.
+#
+# This function configures the Ceres config.h file based on the current
+# compile options and copies it into the specified location.  It should be
+# called before Ceres is built so that the correct config.h is used when
+# Ceres is compiled.
+#
+# INPUTS:
+#   CURRENT_CERES_COMPILE_OPTIONS: List of currently enabled Ceres compile
+#                                  options. These are compared against the
+#                                  full list of valid options, which are read
+#                                  from config.h.in.  Any options present
+#                                  which are not part of the valid set will
+#                                  invoke an error.  Any valid option present
+#                                  will be enabled in the resulting config.h,
+#                                  all other options will be disabled.
+#
+#   CERES_CONFIG_OUTPUT_DIRECTORY: Path to output directory in which to save
+#                                  the configured config.h.  Typically this
+#                                  will be <src>/include/ceres/internal.
+
+FUNCTION(CREATE_CERES_CONFIG CURRENT_CERES_COMPILE_OPTIONS CERES_CONFIG_OUTPUT_DIRECTORY)
+  # Create the specified output directory if it does not exist.
+  IF (NOT EXISTS "${CERES_CONFIG_OUTPUT_DIRECTORY}")
+    MESSAGE(STATUS "Creating configured Ceres config.h output directory: "
+      "${CERES_CONFIG_OUTPUT_DIRECTORY}")
+    FILE(MAKE_DIRECTORY "${CERES_CONFIG_OUTPUT_DIRECTORY}")
+  ENDIF()
+  IF (EXISTS "${CERES_CONFIG_OUTPUT_DIRECTORY}" AND
+      NOT IS_DIRECTORY "${CERES_CONFIG_OUTPUT_DIRECTORY}")
+    MESSAGE(FATAL_ERROR "Ceres Bug: Specified CERES_CONFIG_OUTPUT_DIRECTORY: "
+      "${CERES_CONFIG_OUTPUT_DIRECTORY} exists, but is not a directory.")
+  ENDIF()
+
+  # Read all possible configurable compile options from config.h.in, this avoids
+  # us having to hard-code in this file what the valid options are.
+  FILE(READ ${CERES_CONFIG_IN_FILE} CERES_CONFIG_IN_CONTENTS)
+  STRING(REGEX MATCHALL "@[^@ $]+@"
+    ALL_CONFIGURABLE_CERES_OPTIONS "${CERES_CONFIG_IN_CONTENTS}")
+  # Removing @ symbols at beginning and end of each option.
+  STRING(REPLACE "@" ""
+    ALL_CONFIGURABLE_CERES_OPTIONS "${ALL_CONFIGURABLE_CERES_OPTIONS}")
+
+  # Ensure that there are no repetitions in the current compile options.
+  LIST(REMOVE_DUPLICATES CURRENT_CERES_COMPILE_OPTIONS)
+
+  FOREACH (CERES_OPTION ${ALL_CONFIGURABLE_CERES_OPTIONS})
+    # Try and find the option in the list of current compile options, if it
+    # is present, then the option is enabled, otherwise it is disabled.
+    LIST(FIND CURRENT_CERES_COMPILE_OPTIONS ${CERES_OPTION} OPTION_ENABLED)
+
+    # list(FIND ..) returns -1 if the element was not in the list, but CMake
+    # interprets if (VAR) to be true if VAR is any non-zero number, even
+    # negative ones, hence we have to explicitly check for >= 0.
+    IF (OPTION_ENABLED GREATER -1)
+      MESSAGE(STATUS "Enabling ${CERES_OPTION} in Ceres config.h")
+      SET(${CERES_OPTION} "#define ${CERES_OPTION}")
+
+      # Remove the item from the list of current options so that we can identify
+      # any options that were in CURRENT_CERES_COMPILE_OPTIONS, but not in
+      # ALL_CONFIGURABLE_CERES_OPTIONS (which is an error).
+      LIST(REMOVE_ITEM CURRENT_CERES_COMPILE_OPTIONS ${CERES_OPTION})
+    ELSE()
+      SET(${CERES_OPTION} "// #define ${CERES_OPTION}")
+    ENDIF()
+  ENDFOREACH()
+
+  # CURRENT_CERES_COMPILE_OPTIONS should now be an empty list, any elements
+  # remaining were not present in ALL_CONFIGURABLE_CERES_OPTIONS read from
+  # config.h.in.
+  IF (CURRENT_CERES_COMPILE_OPTIONS)
+    MESSAGE(FATAL_ERROR "Ceres Bug: CURRENT_CERES_COMPILE_OPTIONS contained "
+      "the following options which were not present in config.h.in: "
+      "${CURRENT_CERES_COMPILE_OPTIONS}")
+  ENDIF()
+
+  CONFIGURE_FILE(${CERES_CONFIG_IN_FILE}
+    "${CERES_CONFIG_OUTPUT_DIRECTORY}/config.h" @ONLY)
+ENDFUNCTION()
diff --git a/cmake/FindCXSparse.cmake b/cmake/FindCXSparse.cmake
new file mode 100644
index 0000000..8c3f368
--- /dev/null
+++ b/cmake/FindCXSparse.cmake
@@ -0,0 +1,204 @@
+# Ceres Solver - A fast non-linear least squares minimizer
+# Copyright 2013 Google Inc. All rights reserved.
+# http://code.google.com/p/ceres-solver/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+#   this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.
+# * Neither the name of Google Inc. nor the names of its contributors may be
+#   used to endorse or promote products derived from this software without
+#   specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: alexs.mac@gmail.com (Alex Stewart)
+#
+
+# FindCXSparse.cmake - Find CXSparse libraries & dependencies.
+#
+# This module defines the following variables which should be referenced
+# by the caller to use the library.
+#
+# CXSPARSE_FOUND: TRUE iff CXSparse and all dependencies have been found.
+# CXSPARSE_INCLUDE_DIRS: Include directories for CXSparse.
+# CXSPARSE_LIBRARIES: Libraries for CXSparse and all dependencies.
+#
+# CXSPARSE_VERSION: Extracted from cs.h.
+# CXSPARSE_MAIN_VERSION: Equal to 3 if CXSPARSE_VERSION = 3.1.2
+# CXSPARSE_SUB_VERSION: Equal to 1 if CXSPARSE_VERSION = 3.1.2
+# CXSPARSE_SUBSUB_VERSION: Equal to 2 if CXSPARSE_VERSION = 3.1.2
+#
+# The following variables control the behaviour of this module:
+#
+# CXSPARSE_INCLUDE_DIR_HINTS: List of additional directories in which to
+#                             search for CXSparse includes,
+#                             e.g: /timbuktu/include.
+# CXSPARSE_LIBRARY_DIR_HINTS: List of additional directories in which to
+#                             search for CXSparse libraries, e.g: /timbuktu/lib.
+#
+# The following variables are also defined by this module, but in line with
+# CMake recommended FindPackage() module style should NOT be referenced directly
+# by callers (use the plural variables detailed above instead).  These variables
+# do however affect the behaviour of the module via FIND_[PATH/LIBRARY]() which
+# are NOT re-called (i.e. search for library is not repeated) if these variables
+# are set with valid values _in the CMake cache_. This means that if these
+# variables are set directly in the cache, either by the user in the CMake GUI,
+# or by the user passing -DVAR=VALUE directives to CMake when called (which
+# explicitly defines a cache variable), then they will be used verbatim,
+# bypassing the HINTS variables and other hard-coded search locations.
+#
+# CXSPARSE_INCLUDE_DIR: Include directory for CXSparse, not including the
+#                       include directory of any dependencies.
+# CXSPARSE_LIBRARY: CXSparse library, not including the libraries of any
+#                   dependencies.
+
+# Called if we failed to find CXSparse or any of it's required dependencies,
+# unsets all public (designed to be used externally) variables and reports
+# error message at priority depending upon [REQUIRED/QUIET/<NONE>] argument.
+MACRO(CXSPARSE_REPORT_NOT_FOUND REASON_MSG)
+  UNSET(CXSPARSE_FOUND)
+  UNSET(CXSPARSE_INCLUDE_DIRS)
+  UNSET(CXSPARSE_LIBRARIES)
+  # Make results of search visible in the CMake GUI if CXSparse has not
+  # been found so that user does not have to toggle to advanced view.
+  MARK_AS_ADVANCED(CLEAR CXSPARSE_INCLUDE_DIR
+                         CXSPARSE_LIBRARY)
+  # Note <package>_FIND_[REQUIRED/QUIETLY] variables defined by FindPackage()
+  # use the camelcase library name, not uppercase.
+  IF (CXSparse_FIND_QUIETLY)
+    MESSAGE(STATUS "Failed to find CXSparse - " ${REASON_MSG} ${ARGN})
+  ELSEIF (CXSparse_FIND_REQUIRED)
+    MESSAGE(FATAL_ERROR "Failed to find CXSparse - " ${REASON_MSG} ${ARGN})
+  ELSE()
+    # Neither QUIETLY nor REQUIRED, use no priority which emits a message
+    # but continues configuration and allows generation.
+    MESSAGE("-- Failed to find CXSparse - " ${REASON_MSG} ${ARGN})
+  ENDIF ()
+ENDMACRO(CXSPARSE_REPORT_NOT_FOUND)
+
+# Search user-installed locations first, so that we prefer user installs
+# to system installs where both exist.
+#
+# TODO: Add standard Windows search locations for CXSparse.
+LIST(APPEND CXSPARSE_CHECK_INCLUDE_DIRS
+  /usr/local/include
+  /usr/local/homebrew/include # Mac OS X
+  /opt/local/var/macports/software # Mac OS X.
+  /opt/local/include
+  /usr/include)
+LIST(APPEND CXSPARSE_CHECK_LIBRARY_DIRS
+  /usr/local/lib
+  /usr/local/homebrew/lib # Mac OS X.
+  /opt/local/lib
+  /usr/lib)
+
+# Search supplied hint directories first if supplied.
+FIND_PATH(CXSPARSE_INCLUDE_DIR
+  NAMES cs.h
+  PATHS ${CXSPARSE_INCLUDE_DIR_HINTS}
+  ${CXSPARSE_CHECK_INCLUDE_DIRS})
+IF (NOT CXSPARSE_INCLUDE_DIR OR
+    NOT EXISTS ${CXSPARSE_INCLUDE_DIR})
+  CXSPARSE_REPORT_NOT_FOUND(
+    "Could not find CXSparse include directory, set CXSPARSE_INCLUDE_DIR "
+    "to directory containing cs.h")
+ENDIF (NOT CXSPARSE_INCLUDE_DIR OR
+       NOT EXISTS ${CXSPARSE_INCLUDE_DIR})
+
+FIND_LIBRARY(CXSPARSE_LIBRARY NAMES cxsparse
+  PATHS ${CXSPARSE_LIBRARY_DIR_HINTS}
+  ${CXSPARSE_CHECK_LIBRARY_DIRS})
+IF (NOT CXSPARSE_LIBRARY OR
+    NOT EXISTS ${CXSPARSE_LIBRARY})
+  CXSPARSE_REPORT_NOT_FOUND(
+    "Could not find CXSparse library, set CXSPARSE_LIBRARY "
+    "to full path to libcxsparse.")
+ENDIF (NOT CXSPARSE_LIBRARY OR
+       NOT EXISTS ${CXSPARSE_LIBRARY})
+
+# Mark internally as found, then verify. CXSPARSE_REPORT_NOT_FOUND() unsets
+# if called.
+SET(CXSPARSE_FOUND TRUE)
+
+# Extract CXSparse version from cs.h
+IF (CXSPARSE_INCLUDE_DIR)
+  SET(CXSPARSE_VERSION_FILE ${CXSPARSE_INCLUDE_DIR}/cs.h)
+  IF (NOT EXISTS ${CXSPARSE_VERSION_FILE})
+    CXSPARSE_REPORT_NOT_FOUND(
+      "Could not find file: ${CXSPARSE_VERSION_FILE} "
+      "containing version information in CXSparse install located at: "
+      "${CXSPARSE_INCLUDE_DIR}.")
+  ELSE (NOT EXISTS ${CXSPARSE_VERSION_FILE})
+    FILE(READ ${CXSPARSE_INCLUDE_DIR}/cs.h CXSPARSE_VERSION_FILE_CONTENTS)
+
+    STRING(REGEX MATCH "#define CS_VER [0-9]+"
+      CXSPARSE_MAIN_VERSION "${CXSPARSE_VERSION_FILE_CONTENTS}")
+    STRING(REGEX REPLACE "#define CS_VER ([0-9]+)" "\\1"
+      CXSPARSE_MAIN_VERSION "${CXSPARSE_MAIN_VERSION}")
+
+    STRING(REGEX MATCH "#define CS_SUBVER [0-9]+"
+      CXSPARSE_SUB_VERSION "${CXSPARSE_VERSION_FILE_CONTENTS}")
+    STRING(REGEX REPLACE "#define CS_SUBVER ([0-9]+)" "\\1"
+      CXSPARSE_SUB_VERSION "${CXSPARSE_SUB_VERSION}")
+
+    STRING(REGEX MATCH "#define CS_SUBSUB [0-9]+"
+      CXSPARSE_SUBSUB_VERSION "${CXSPARSE_VERSION_FILE_CONTENTS}")
+    STRING(REGEX REPLACE "#define CS_SUBSUB ([0-9]+)" "\\1"
+      CXSPARSE_SUBSUB_VERSION "${CXSPARSE_SUBSUB_VERSION}")
+
+    # This is on a single line s/t CMake does not interpret it as a list of
+    # elements and insert ';' separators which would result in 3.;1.;2 nonsense.
+    SET(CXSPARSE_VERSION "${CXSPARSE_MAIN_VERSION}.${CXSPARSE_SUB_VERSION}.${CXSPARSE_SUBSUB_VERSION}")
+  ENDIF (NOT EXISTS ${CXSPARSE_VERSION_FILE})
+ENDIF (CXSPARSE_INCLUDE_DIR)
+
+# Catch the case when the caller has set CXSPARSE_LIBRARY in the cache / GUI and
+# thus FIND_LIBRARY was not called, but specified library is invalid, otherwise
+# we would report CXSparse as found.
+# TODO: This regex for CXSparse library is pretty primitive, we use lowercase
+#       for comparison to handle Windows using CamelCase library names, could
+#       this check be better?
+STRING(TOLOWER "${CXSPARSE_LIBRARY}" LOWERCASE_CXSPARSE_LIBRARY)
+IF (CXSPARSE_LIBRARY AND
+    EXISTS ${CXSPARSE_LIBRARY} AND
+    NOT "${LOWERCASE_CXSPARSE_LIBRARY}" MATCHES ".*cxsparse[^/]*")
+  CXSPARSE_REPORT_NOT_FOUND(
+    "Caller defined CXSPARSE_LIBRARY: "
+    "${CXSPARSE_LIBRARY} does not match CXSparse.")
+ENDIF (CXSPARSE_LIBRARY AND
+       EXISTS ${CXSPARSE_LIBRARY} AND
+       NOT "${LOWERCASE_CXSPARSE_LIBRARY}" MATCHES ".*cxsparse[^/]*")
+
+# Set standard CMake FindPackage variables if found.
+IF (CXSPARSE_FOUND)
+  SET(CXSPARSE_INCLUDE_DIRS ${CXSPARSE_INCLUDE_DIR})
+  SET(CXSPARSE_LIBRARIES ${CXSPARSE_LIBRARY})
+ENDIF (CXSPARSE_FOUND)
+
+# Handle REQUIRED / QUIET optional arguments and version.
+INCLUDE(FindPackageHandleStandardArgs)
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(CXSparse
+  REQUIRED_VARS CXSPARSE_INCLUDE_DIRS CXSPARSE_LIBRARIES
+  VERSION_VAR CXSPARSE_VERSION)
+
+# Only mark internal variables as advanced if we found CXSparse, otherwise
+# leave them visible in the standard GUI for the user to set manually.
+IF (CXSPARSE_FOUND)
+  MARK_AS_ADVANCED(FORCE CXSPARSE_INCLUDE_DIR
+                         CXSPARSE_LIBRARY)
+ENDIF (CXSPARSE_FOUND)
diff --git a/cmake/FindEigen.cmake b/cmake/FindEigen.cmake
new file mode 100644
index 0000000..2cd3e12
--- /dev/null
+++ b/cmake/FindEigen.cmake
@@ -0,0 +1,160 @@
+# Ceres Solver - A fast non-linear least squares minimizer
+# Copyright 2013 Google Inc. All rights reserved.
+# http://code.google.com/p/ceres-solver/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+#   this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.
+# * Neither the name of Google Inc. nor the names of its contributors may be
+#   used to endorse or promote products derived from this software without
+#   specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: alexs.mac@gmail.com (Alex Stewart)
+#
+
+# FindEigen.cmake - Find Eigen library, version >= 3.
+#
+# This module defines the following variables:
+#
+# EIGEN_FOUND: TRUE iff Eigen is found.
+# EIGEN_INCLUDE_DIRS: Include directories for Eigen.
+#
+# EIGEN_VERSION: Extracted from Eigen/src/Core/util/Macros.h
+# EIGEN_WORLD_VERSION: Equal to 3 if EIGEN_VERSION = 3.2.0
+# EIGEN_MAJOR_VERSION: Equal to 2 if EIGEN_VERSION = 3.2.0
+# EIGEN_MINOR_VERSION: Equal to 0 if EIGEN_VERSION = 3.2.0
+#
+# The following variables control the behaviour of this module:
+#
+# EIGEN_INCLUDE_DIR_HINTS: List of additional directories in which to
+#                          search for eigen includes, e.g: /timbuktu/eigen3.
+#
+# The following variables are also defined by this module, but in line with
+# CMake recommended FindPackage() module style should NOT be referenced directly
+# by callers (use the plural variables detailed above instead).  These variables
+# do however affect the behaviour of the module via FIND_[PATH/LIBRARY]() which
+# are NOT re-called (i.e. search for library is not repeated) if these variables
+# are set with valid values _in the CMake cache_. This means that if these
+# variables are set directly in the cache, either by the user in the CMake GUI,
+# or by the user passing -DVAR=VALUE directives to CMake when called (which
+# explicitly defines a cache variable), then they will be used verbatim,
+# bypassing the HINTS variables and other hard-coded search locations.
+#
+# EIGEN_INCLUDE_DIR: Include directory for CXSparse, not including the
+#                    include directory of any dependencies.
+
+# Called if we failed to find Eigen or any of it's required dependencies,
+# unsets all public (designed to be used externally) variables and reports
+# error message at priority depending upon [REQUIRED/QUIET/<NONE>] argument.
+MACRO(EIGEN_REPORT_NOT_FOUND REASON_MSG)
+  UNSET(EIGEN_FOUND)
+  UNSET(EIGEN_INCLUDE_DIRS)
+  # Make results of search visible in the CMake GUI if Eigen has not
+  # been found so that user does not have to toggle to advanced view.
+  MARK_AS_ADVANCED(CLEAR EIGEN_INCLUDE_DIR)
+  # Note <package>_FIND_[REQUIRED/QUIETLY] variables defined by FindPackage()
+  # use the camelcase library name, not uppercase.
+  IF (Eigen_FIND_QUIETLY)
+    MESSAGE(STATUS "Failed to find Eigen - " ${REASON_MSG} ${ARGN})
+  ELSEIF (Eigen_FIND_REQUIRED)
+    MESSAGE(FATAL_ERROR "Failed to find Eigen - " ${REASON_MSG} ${ARGN})
+  ELSE()
+    # Neither QUIETLY nor REQUIRED, use no priority which emits a message
+    # but continues configuration and allows generation.
+    MESSAGE("-- Failed to find Eigen - " ${REASON_MSG} ${ARGN})
+  ENDIF ()
+ENDMACRO(EIGEN_REPORT_NOT_FOUND)
+
+# Search user-installed locations first, so that we prefer user installs
+# to system installs where both exist.
+#
+# TODO: Add standard Windows search locations for Eigen.
+LIST(APPEND EIGEN_CHECK_INCLUDE_DIRS
+  /usr/local/include/eigen3
+  /usr/local/homebrew/include/eigen3 # Mac OS X
+  /opt/local/var/macports/software/eigen3 # Mac OS X.
+  /opt/local/include/eigen3
+  /usr/include/eigen3)
+
+# Search supplied hint directories first if supplied.
+FIND_PATH(EIGEN_INCLUDE_DIR
+  NAMES Eigen/Core
+  PATHS ${EIGEN_INCLUDE_DIR_HINTS}
+  ${EIGEN_CHECK_INCLUDE_DIRS})
+IF (NOT EIGEN_INCLUDE_DIR OR
+    NOT EXISTS ${EIGEN_INCLUDE_DIR})
+  EIGEN_REPORT_NOT_FOUND(
+    "Could not find eigen3 include directory, set EIGEN_INCLUDE_DIR to "
+    "path to eigen3 include directory, e.g. /usr/local/include/eigen3.")
+ENDIF (NOT EIGEN_INCLUDE_DIR OR
+       NOT EXISTS ${EIGEN_INCLUDE_DIR})
+
+# Mark internally as found, then verify. EIGEN_REPORT_NOT_FOUND() unsets
+# if called.
+SET(EIGEN_FOUND TRUE)
+
+# Extract Eigen version from Eigen/src/Core/util/Macros.h
+IF (EIGEN_INCLUDE_DIR)
+  SET(EIGEN_VERSION_FILE ${EIGEN_INCLUDE_DIR}/Eigen/src/Core/util/Macros.h)
+  IF (NOT EXISTS ${EIGEN_VERSION_FILE})
+    EIGEN_REPORT_NOT_FOUND(
+      "Could not find file: ${EIGEN_VERSION_FILE} "
+      "containing version information in Eigen install located at: "
+      "${EIGEN_INCLUDE_DIR}.")
+  ELSE (NOT EXISTS ${EIGEN_VERSION_FILE})
+    FILE(READ ${EIGEN_VERSION_FILE} EIGEN_VERSION_FILE_CONTENTS)
+
+    STRING(REGEX MATCH "#define EIGEN_WORLD_VERSION [0-9]+"
+      EIGEN_WORLD_VERSION "${EIGEN_VERSION_FILE_CONTENTS}")
+    STRING(REGEX REPLACE "#define EIGEN_WORLD_VERSION ([0-9]+)" "\\1"
+      EIGEN_WORLD_VERSION "${EIGEN_WORLD_VERSION}")
+
+    STRING(REGEX MATCH "#define EIGEN_MAJOR_VERSION [0-9]+"
+      EIGEN_MAJOR_VERSION "${EIGEN_VERSION_FILE_CONTENTS}")
+    STRING(REGEX REPLACE "#define EIGEN_MAJOR_VERSION ([0-9]+)" "\\1"
+      EIGEN_MAJOR_VERSION "${EIGEN_MAJOR_VERSION}")
+
+    STRING(REGEX MATCH "#define EIGEN_MINOR_VERSION [0-9]+"
+      EIGEN_MINOR_VERSION "${EIGEN_VERSION_FILE_CONTENTS}")
+    STRING(REGEX REPLACE "#define EIGEN_MINOR_VERSION ([0-9]+)" "\\1"
+      EIGEN_MINOR_VERSION "${EIGEN_MINOR_VERSION}")
+
+    # This is on a single line s/t CMake does not interpret it as a list of
+    # elements and insert ';' separators which would result in 3.;2.;0 nonsense.
+    SET(EIGEN_VERSION "${EIGEN_WORLD_VERSION}.${EIGEN_MAJOR_VERSION}.${EIGEN_MINOR_VERSION}")
+  ENDIF (NOT EXISTS ${EIGEN_VERSION_FILE})
+ENDIF (EIGEN_INCLUDE_DIR)
+
+# Set standard CMake FindPackage variables if found.
+IF (EIGEN_FOUND)
+  SET(EIGEN_INCLUDE_DIRS ${EIGEN_INCLUDE_DIR})
+ENDIF (EIGEN_FOUND)
+
+# Handle REQUIRED / QUIET optional arguments and version.
+INCLUDE(FindPackageHandleStandardArgs)
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(Eigen
+  REQUIRED_VARS EIGEN_INCLUDE_DIRS
+  VERSION_VAR EIGEN_VERSION)
+
+# Only mark internal variables as advanced if we found Eigen, otherwise
+# leave it visible in the standard GUI for the user to set manually.
+IF (EIGEN_FOUND)
+  MARK_AS_ADVANCED(FORCE EIGEN_INCLUDE_DIR)
+ENDIF (EIGEN_FOUND)
diff --git a/cmake/FindGflags.cmake b/cmake/FindGflags.cmake
new file mode 100644
index 0000000..af50220
--- /dev/null
+++ b/cmake/FindGflags.cmake
@@ -0,0 +1,172 @@
+# Ceres Solver - A fast non-linear least squares minimizer
+# Copyright 2013 Google Inc. All rights reserved.
+# http://code.google.com/p/ceres-solver/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+#   this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.
+# * Neither the name of Google Inc. nor the names of its contributors may be
+#   used to endorse or promote products derived from this software without
+#   specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: alexs.mac@gmail.com (Alex Stewart)
+#
+
+# FindGflags.cmake - Find Google gflags logging library.
+#
+# This module defines the following variables:
+#
+# GFLAGS_FOUND: TRUE iff gflags is found.
+# GFLAGS_INCLUDE_DIRS: Include directories for gflags.
+# GFLAGS_LIBRARIES: Libraries required to link gflags.
+#
+# The following variables control the behaviour of this module:
+#
+# GFLAGS_INCLUDE_DIR_HINTS: List of additional directories in which to
+#                           search for gflags includes, e.g: /timbuktu/include.
+# GFLAGS_LIBRARY_DIR_HINTS: List of additional directories in which to
+#                           search for gflags libraries, e.g: /timbuktu/lib.
+#
+# The following variables are also defined by this module, but in line with
+# CMake recommended FindPackage() module style should NOT be referenced directly
+# by callers (use the plural variables detailed above instead).  These variables
+# do however affect the behaviour of the module via FIND_[PATH/LIBRARY]() which
+# are NOT re-called (i.e. search for library is not repeated) if these variables
+# are set with valid values _in the CMake cache_. This means that if these
+# variables are set directly in the cache, either by the user in the CMake GUI,
+# or by the user passing -DVAR=VALUE directives to CMake when called (which
+# explicitly defines a cache variable), then they will be used verbatim,
+# bypassing the HINTS variables and other hard-coded search locations.
+#
+# GFLAGS_INCLUDE_DIR: Include directory for gflags, not including the
+#                     include directory of any dependencies.
+# GFLAGS_LIBRARY: gflags library, not including the libraries of any
+#                 dependencies.
+
+# Called if we failed to find gflags or any of it's required dependencies,
+# unsets all public (designed to be used externally) variables and reports
+# error message at priority depending upon [REQUIRED/QUIET/<NONE>] argument.
+MACRO(GFLAGS_REPORT_NOT_FOUND REASON_MSG)
+  UNSET(GFLAGS_FOUND)
+  UNSET(GFLAGS_INCLUDE_DIRS)
+  UNSET(GFLAGS_LIBRARIES)
+  # Make results of search visible in the CMake GUI if gflags has not
+  # been found so that user does not have to toggle to advanced view.
+  MARK_AS_ADVANCED(CLEAR GFLAGS_INCLUDE_DIR
+                         GFLAGS_LIBRARY)
+  # Note <package>_FIND_[REQUIRED/QUIETLY] variables defined by FindPackage()
+  # use the camelcase library name, not uppercase.
+  IF (Gflags_FIND_QUIETLY)
+    MESSAGE(STATUS "Failed to find gflags - " ${REASON_MSG} ${ARGN})
+  ELSEIF (Gflags_FIND_REQUIRED)
+    MESSAGE(FATAL_ERROR "Failed to find gflags - " ${REASON_MSG} ${ARGN})
+  ELSE()
+    # Neither QUIETLY nor REQUIRED, use no priority which emits a message
+    # but continues configuration and allows generation.
+    MESSAGE("-- Failed to find gflags - " ${REASON_MSG} ${ARGN})
+  ENDIF ()
+ENDMACRO(GFLAGS_REPORT_NOT_FOUND)
+
+# Search user-installed locations first, so that we prefer user installs
+# to system installs where both exist.
+#
+# TODO: Add standard Windows search locations for gflags.
+LIST(APPEND GFLAGS_CHECK_INCLUDE_DIRS
+  /usr/local/include
+  /usr/local/homebrew/include # Mac OS X
+  /opt/local/var/macports/software # Mac OS X.
+  /opt/local/include
+  /usr/include)
+LIST(APPEND GFLAGS_CHECK_LIBRARY_DIRS
+  /usr/local/lib
+  /usr/local/homebrew/lib # Mac OS X.
+  /opt/local/lib
+  /usr/lib)
+
+# Search supplied hint directories first if supplied.
+FIND_PATH(GFLAGS_INCLUDE_DIR
+  NAMES gflags/gflags.h
+  PATHS ${GFLAGS_INCLUDE_DIR_HINTS}
+  ${GFLAGS_CHECK_INCLUDE_DIRS})
+IF (NOT GFLAGS_INCLUDE_DIR OR
+    NOT EXISTS ${GFLAGS_INCLUDE_DIR})
+  GFLAGS_REPORT_NOT_FOUND(
+    "Could not find gflags include directory, set GFLAGS_INCLUDE_DIR "
+    "to directory containing gflags/gflags.h")
+ENDIF (NOT GFLAGS_INCLUDE_DIR OR
+       NOT EXISTS ${GFLAGS_INCLUDE_DIR})
+
+FIND_LIBRARY(GFLAGS_LIBRARY NAMES gflags
+  PATHS ${GFLAGS_LIBRARY_DIR_HINTS}
+  ${GFLAGS_CHECK_LIBRARY_DIRS})
+IF (NOT GFLAGS_LIBRARY OR
+    NOT EXISTS ${GFLAGS_LIBRARY})
+  GFLAGS_REPORT_NOT_FOUND(
+    "Could not find gflags library, set GFLAGS_LIBRARY "
+    "to full path to libgflags.")
+ENDIF (NOT GFLAGS_LIBRARY OR
+       NOT EXISTS ${GFLAGS_LIBRARY})
+
+# Mark internally as found, then verify. GFLAGS_REPORT_NOT_FOUND() unsets
+# if called.
+SET(GFLAGS_FOUND TRUE)
+
+# gflags does not seem to provide any record of the version in its
+# source tree, thus cannot extract version.
+
+# Catch case when caller has set GFLAGS_INCLUDE_DIR in the cache / GUI and
+# thus FIND_[PATH/LIBRARY] are not called, but specified locations are
+# invalid, otherwise we would report the library as found.
+IF (GFLAGS_INCLUDE_DIR AND
+    NOT EXISTS ${GFLAGS_INCLUDE_DIR}/gflags/gflags.h)
+  GFLAGS_REPORT_NOT_FOUND(
+    "Caller defined GFLAGS_INCLUDE_DIR:"
+    " ${GFLAGS_INCLUDE_DIR} does not contain gflags/gflags.h header.")
+ENDIF (GFLAGS_INCLUDE_DIR AND
+       NOT EXISTS ${GFLAGS_INCLUDE_DIR}/gflags/gflags.h)
+# TODO: This regex for gflags library is pretty primitive, we use lowercase
+#       for comparison to handle Windows using CamelCase library names, could
+#       this check be better?
+STRING(TOLOWER "${GFLAGS_LIBRARY}" LOWERCASE_GFLAGS_LIBRARY)
+IF (GFLAGS_LIBRARY AND
+    NOT "${LOWERCASE_GFLAGS_LIBRARY}" MATCHES ".*gflags[^/]*")
+  GFLAGS_REPORT_NOT_FOUND(
+    "Caller defined GFLAGS_LIBRARY: "
+    "${GFLAGS_LIBRARY} does not match gflags.")
+ENDIF (GFLAGS_LIBRARY AND
+       NOT "${LOWERCASE_GFLAGS_LIBRARY}" MATCHES ".*gflags[^/]*")
+
+# Set standard CMake FindPackage variables if found.
+IF (GFLAGS_FOUND)
+  SET(GFLAGS_INCLUDE_DIRS ${GFLAGS_INCLUDE_DIR})
+  SET(GFLAGS_LIBRARIES ${GFLAGS_LIBRARY})
+ENDIF (GFLAGS_FOUND)
+
+# Handle REQUIRED / QUIET optional arguments.
+INCLUDE(FindPackageHandleStandardArgs)
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(Gflags DEFAULT_MSG
+  GFLAGS_INCLUDE_DIRS GFLAGS_LIBRARIES)
+
+# Only mark internal variables as advanced if we found gflags, otherwise
+# leave them visible in the standard GUI for the user to set manually.
+IF (GFLAGS_FOUND)
+  MARK_AS_ADVANCED(FORCE GFLAGS_INCLUDE_DIR
+                         GFLAGS_LIBRARY)
+ENDIF (GFLAGS_FOUND)
diff --git a/cmake/FindGlog.cmake b/cmake/FindGlog.cmake
new file mode 100644
index 0000000..0dde218
--- /dev/null
+++ b/cmake/FindGlog.cmake
@@ -0,0 +1,172 @@
+# Ceres Solver - A fast non-linear least squares minimizer
+# Copyright 2013 Google Inc. All rights reserved.
+# http://code.google.com/p/ceres-solver/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+#   this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.
+# * Neither the name of Google Inc. nor the names of its contributors may be
+#   used to endorse or promote products derived from this software without
+#   specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: alexs.mac@gmail.com (Alex Stewart)
+#
+
+# FindGlog.cmake - Find Google glog logging library.
+#
+# This module defines the following variables:
+#
+# GLOG_FOUND: TRUE iff glog is found.
+# GLOG_INCLUDE_DIRS: Include directories for glog.
+# GLOG_LIBRARIES: Libraries required to link glog.
+#
+# The following variables control the behaviour of this module:
+#
+# GLOG_INCLUDE_DIR_HINTS: List of additional directories in which to
+#                         search for glog includes, e.g: /timbuktu/include.
+# GLOG_LIBRARY_DIR_HINTS: List of additional directories in which to
+#                         search for glog libraries, e.g: /timbuktu/lib.
+#
+# The following variables are also defined by this module, but in line with
+# CMake recommended FindPackage() module style should NOT be referenced directly
+# by callers (use the plural variables detailed above instead).  These variables
+# do however affect the behaviour of the module via FIND_[PATH/LIBRARY]() which
+# are NOT re-called (i.e. search for library is not repeated) if these variables
+# are set with valid values _in the CMake cache_. This means that if these
+# variables are set directly in the cache, either by the user in the CMake GUI,
+# or by the user passing -DVAR=VALUE directives to CMake when called (which
+# explicitly defines a cache variable), then they will be used verbatim,
+# bypassing the HINTS variables and other hard-coded search locations.
+#
+# GLOG_INCLUDE_DIR: Include directory for glog, not including the
+#                   include directory of any dependencies.
+# GLOG_LIBRARY: glog library, not including the libraries of any
+#               dependencies.
+
+# Called if we failed to find glog or any of it's required dependencies,
+# unsets all public (designed to be used externally) variables and reports
+# error message at priority depending upon [REQUIRED/QUIET/<NONE>] argument.
+MACRO(GLOG_REPORT_NOT_FOUND REASON_MSG)
+  UNSET(GLOG_FOUND)
+  UNSET(GLOG_INCLUDE_DIRS)
+  UNSET(GLOG_LIBRARIES)
+  # Make results of search visible in the CMake GUI if glog has not
+  # been found so that user does not have to toggle to advanced view.
+  MARK_AS_ADVANCED(CLEAR GLOG_INCLUDE_DIR
+                         GLOG_LIBRARY)
+  # Note <package>_FIND_[REQUIRED/QUIETLY] variables defined by FindPackage()
+  # use the camelcase library name, not uppercase.
+  IF (Glog_FIND_QUIETLY)
+    MESSAGE(STATUS "Failed to find glog - " ${REASON_MSG} ${ARGN})
+  ELSEIF (Glog_FIND_REQUIRED)
+    MESSAGE(FATAL_ERROR "Failed to find glog - " ${REASON_MSG} ${ARGN})
+  ELSE()
+    # Neither QUIETLY nor REQUIRED, use no priority which emits a message
+    # but continues configuration and allows generation.
+    MESSAGE("-- Failed to find glog - " ${REASON_MSG} ${ARGN})
+  ENDIF ()
+ENDMACRO(GLOG_REPORT_NOT_FOUND)
+
+# Search user-installed locations first, so that we prefer user installs
+# to system installs where both exist.
+#
+# TODO: Add standard Windows search locations for glog.
+LIST(APPEND GLOG_CHECK_INCLUDE_DIRS
+  /usr/local/include
+  /usr/local/homebrew/include # Mac OS X
+  /opt/local/var/macports/software # Mac OS X.
+  /opt/local/include
+  /usr/include)
+LIST(APPEND GLOG_CHECK_LIBRARY_DIRS
+  /usr/local/lib
+  /usr/local/homebrew/lib # Mac OS X.
+  /opt/local/lib
+  /usr/lib)
+
+# Search supplied hint directories first if supplied.
+FIND_PATH(GLOG_INCLUDE_DIR
+  NAMES glog/logging.h
+  PATHS ${GLOG_INCLUDE_DIR_HINTS}
+  ${GLOG_CHECK_INCLUDE_DIRS})
+IF (NOT GLOG_INCLUDE_DIR OR
+    NOT EXISTS ${GLOG_INCLUDE_DIR})
+  GLOG_REPORT_NOT_FOUND(
+    "Could not find glog include directory, set GLOG_INCLUDE_DIR "
+    "to directory containing glog/logging.h")
+ENDIF (NOT GLOG_INCLUDE_DIR OR
+       NOT EXISTS ${GLOG_INCLUDE_DIR})
+
+FIND_LIBRARY(GLOG_LIBRARY NAMES glog
+  PATHS ${GLOG_LIBRARY_DIR_HINTS}
+  ${GLOG_CHECK_LIBRARY_DIRS})
+IF (NOT GLOG_LIBRARY OR
+    NOT EXISTS ${GLOG_LIBRARY})
+  GLOG_REPORT_NOT_FOUND(
+    "Could not find glog library, set GLOG_LIBRARY "
+    "to full path to libglog.")
+ENDIF (NOT GLOG_LIBRARY OR
+       NOT EXISTS ${GLOG_LIBRARY})
+
+# Mark internally as found, then verify. GLOG_REPORT_NOT_FOUND() unsets
+# if called.
+SET(GLOG_FOUND TRUE)
+
+# Glog does not seem to provide any record of the version in its
+# source tree, thus cannot extract version.
+
+# Catch case when caller has set GLOG_INCLUDE_DIR in the cache / GUI and
+# thus FIND_[PATH/LIBRARY] are not called, but specified locations are
+# invalid, otherwise we would report the library as found.
+IF (GLOG_INCLUDE_DIR AND
+    NOT EXISTS ${GLOG_INCLUDE_DIR}/glog/logging.h)
+  GLOG_REPORT_NOT_FOUND(
+    "Caller defined GLOG_INCLUDE_DIR:"
+    " ${GLOG_INCLUDE_DIR} does not contain glog/logging.h header.")
+ENDIF (GLOG_INCLUDE_DIR AND
+       NOT EXISTS ${GLOG_INCLUDE_DIR}/glog/logging.h)
+# TODO: This regex for glog library is pretty primitive, we use lowercase
+#       for comparison to handle Windows using CamelCase library names, could
+#       this check be better?
+STRING(TOLOWER "${GLOG_LIBRARY}" LOWERCASE_GLOG_LIBRARY)
+IF (GLOG_LIBRARY AND
+    NOT "${LOWERCASE_GLOG_LIBRARY}" MATCHES ".*glog[^/]*")
+  GLOG_REPORT_NOT_FOUND(
+    "Caller defined GLOG_LIBRARY: "
+    "${GLOG_LIBRARY} does not match glog.")
+ENDIF (GLOG_LIBRARY AND
+       NOT "${LOWERCASE_GLOG_LIBRARY}" MATCHES ".*glog[^/]*")
+
+# Set standard CMake FindPackage variables if found.
+IF (GLOG_FOUND)
+  SET(GLOG_INCLUDE_DIRS ${GLOG_INCLUDE_DIR})
+  SET(GLOG_LIBRARIES ${GLOG_LIBRARY})
+ENDIF (GLOG_FOUND)
+
+# Handle REQUIRED / QUIET optional arguments.
+INCLUDE(FindPackageHandleStandardArgs)
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(Glog DEFAULT_MSG
+  GLOG_INCLUDE_DIRS GLOG_LIBRARIES)
+
+# Only mark internal variables as advanced if we found glog, otherwise
+# leave them visible in the standard GUI for the user to set manually.
+IF (GLOG_FOUND)
+  MARK_AS_ADVANCED(FORCE GLOG_INCLUDE_DIR
+                         GLOG_LIBRARY)
+ENDIF (GLOG_FOUND)
diff --git a/cmake/FindSharedPtr.cmake b/cmake/FindSharedPtr.cmake
new file mode 100644
index 0000000..3d2ba6c
--- /dev/null
+++ b/cmake/FindSharedPtr.cmake
@@ -0,0 +1,99 @@
+# Ceres Solver - A fast non-linear least squares minimizer
+# Copyright 2014 Google Inc. All rights reserved.
+# http://code.google.com/p/ceres-solver/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+#   this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.
+# * Neither the name of Google Inc. nor the names of its contributors may be
+#   used to endorse or promote products derived from this software without
+#   specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: sergey.vfx@gmail.com (Sergey Sharybin)
+#
+
+# FindSharedPtr.cmake - Find shared pointer header and namespace.
+#
+# This module defines the following variables:
+#
+# SHARED_PTR_FOUND: TRUE if shared_ptr found.
+# SHARED_PTR_TR1_MEMORY_HEADER: True if <tr1/memory> header is to be used
+# for the shared_ptr object, otherwise use <memory>.
+# SHARED_PTR_TR1_NAMESPACE: TRUE if shared_ptr is defined in std::tr1 namespace,
+# otherwise it's assumed to be defined in std namespace.
+
+MACRO(FIND_SHARED_PTR)
+  SET(SHARED_PTR_FOUND FALSE)
+  CHECK_INCLUDE_FILE_CXX(memory HAVE_STD_MEMORY_HEADER)
+  IF (HAVE_STD_MEMORY_HEADER)
+    # Finding the memory header doesn't mean that shared_ptr is in std
+    # namespace.
+    #
+    # In particular, MSVC 2008 has shared_ptr declared in std::tr1.  In
+    # order to support this, we do an extra check to see which namespace
+    # should be used.
+    INCLUDE(CheckCXXSourceCompiles)
+    CHECK_CXX_SOURCE_COMPILES("#include <memory>
+                               int main() {
+                                 std::shared_ptr<int> int_ptr;
+                                 return 0;
+                               }"
+                              HAVE_SHARED_PTR_IN_STD_NAMESPACE)
+
+    IF (HAVE_SHARED_PTR_IN_STD_NAMESPACE)
+      MESSAGE("-- Found shared_ptr in std namespace using <memory> header.")
+      SET(SHARED_PTR_FOUND TRUE)
+    ELSE (HAVE_SHARED_PTR_IN_STD_NAMESPACE)
+      CHECK_CXX_SOURCE_COMPILES("#include <memory>
+                                 int main() {
+                                   std::tr1::shared_ptr<int> int_ptr;
+                                   return 0;
+                                 }"
+                                HAVE_SHARED_PTR_IN_TR1_NAMESPACE)
+      IF (HAVE_SHARED_PTR_IN_TR1_NAMESPACE)
+        MESSAGE("-- Found shared_ptr in std::tr1 namespace using <memory> header.")
+        SET(SHARED_PTR_TR1_NAMESPACE TRUE)
+        SET(SHARED_PTR_FOUND TRUE)
+      ENDIF (HAVE_SHARED_PTR_IN_TR1_NAMESPACE)
+    ENDIF (HAVE_SHARED_PTR_IN_STD_NAMESPACE)
+  ENDIF (HAVE_STD_MEMORY_HEADER)
+
+  IF (NOT SHARED_PTR_FOUND)
+    # Further, gcc defines shared_ptr in std::tr1 namespace and
+    # <tr1/memory> is to be included for this. And what makes things
+    # even more tricky is that gcc does have <memory> header, so
+    # all the checks above wouldn't find shared_ptr.
+    CHECK_INCLUDE_FILE_CXX("tr1/memory" HAVE_TR1_MEMORY_HEADER)
+    IF (HAVE_TR1_MEMORY_HEADER)
+      CHECK_CXX_SOURCE_COMPILES("#include <tr1/memory>
+                                 int main() {
+                                   std::tr1::shared_ptr<int> int_ptr;
+                                   return 0;
+                                 }"
+                                HAVE_SHARED_PTR_IN_TR1_NAMESPACE_FROM_TR1_MEMORY_HEADER)
+      IF (HAVE_SHARED_PTR_IN_TR1_NAMESPACE_FROM_TR1_MEMORY_HEADER)
+        MESSAGE("-- Found shared_ptr in std::tr1 namespace using <tr1/memory> header.")
+          SET(SHARED_PTR_TR1_MEMORY_HEADER TRUE)
+          SET(SHARED_PTR_TR1_NAMESPACE TRUE)
+        SET(SHARED_PTR_FOUND TRUE)
+      ENDIF (HAVE_SHARED_PTR_IN_TR1_NAMESPACE_FROM_TR1_MEMORY_HEADER)
+    ENDIF (HAVE_TR1_MEMORY_HEADER)
+  ENDIF (NOT SHARED_PTR_FOUND)
+ENDMACRO(FIND_SHARED_PTR)
diff --git a/cmake/FindSuiteSparse.cmake b/cmake/FindSuiteSparse.cmake
new file mode 100644
index 0000000..05665a6
--- /dev/null
+++ b/cmake/FindSuiteSparse.cmake
@@ -0,0 +1,628 @@
+# Ceres Solver - A fast non-linear least squares minimizer
+# Copyright 2013 Google Inc. All rights reserved.
+# http://code.google.com/p/ceres-solver/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+#   this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.
+# * Neither the name of Google Inc. nor the names of its contributors may be
+#   used to endorse or promote products derived from this software without
+#   specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: alexs.mac@gmail.com (Alex Stewart)
+#
+
+# FindSuiteSparse.cmake - Find SuiteSparse libraries & dependencies.
+#
+# This module defines the following variables:
+#
+# SUITESPARSE_FOUND: TRUE iff SuiteSparse and all dependencies have been found.
+# SUITESPARSE_INCLUDE_DIRS: Include directories for all SuiteSparse components.
+# SUITESPARSE_LIBRARIES: Libraries for all SuiteSparse component libraries and
+#                        dependencies.
+# SUITESPARSE_VERSION: Extracted from UFconfig.h (<= v3) or
+#                      SuiteSparse_config.h (>= v4).
+# SUITESPARSE_MAIN_VERSION: Equal to 4 if SUITESPARSE_VERSION = 4.2.1
+# SUITESPARSE_SUB_VERSION: Equal to 2 if SUITESPARSE_VERSION = 4.2.1
+# SUITESPARSE_SUBSUB_VERSION: Equal to 1 if SUITESPARSE_VERSION = 4.2.1
+#
+# SUITESPARSE_IS_BROKEN_SHARED_LINKING_UBUNTU_SYSTEM_VERSION: TRUE iff running
+#     on Ubuntu, SUITESPARSE_VERSION is 3.4.0 and found SuiteSparse is a system
+#     install, in which case found version of SuiteSparse cannot be used to link
+#     a shared library due to a bug (static linking is unaffected).
+#
+# The following variables control the behaviour of this module:
+#
+# SUITESPARSE_INCLUDE_DIR_HINTS: List of additional directories in which to
+#                                search for SuiteSparse includes,
+#                                e.g: /timbuktu/include.
+# SUITESPARSE_LIBRARY_DIR_HINTS: List of additional directories in which to
+#                                search for SuiteSparse libraries,
+#                                e.g: /timbuktu/lib.
+#
+# The following variables define the presence / includes & libraries for the
+# SuiteSparse components searched for, the SUITESPARSE_XX variables are the
+# union of the variables for all components.
+#
+# == Symmetric Approximate Minimum Degree (AMD)
+# AMD_FOUND
+# AMD_INCLUDE_DIR
+# AMD_LIBRARY
+#
+# == Constrained Approximate Minimum Degree (CAMD)
+# CAMD_FOUND
+# CAMD_INCLUDE_DIR
+# CAMD_LIBRARY
+#
+# == Column Approximate Minimum Degree (COLAMD)
+# COLAMD_FOUND
+# COLAMD_INCLUDE_DIR
+# COLAMD_LIBRARY
+#
+# Constrained Column Approximate Minimum Degree (CCOLAMD)
+# CCOLAMD_FOUND
+# CCOLAMD_INCLUDE_DIR
+# CCOLAMD_LIBRARY
+#
+# == Sparse Supernodal Cholesky Factorization and Update/Downdate (CHOLMOD)
+# CHOLMOD_FOUND
+# CHOLMOD_INCLUDE_DIR
+# CHOLMOD_LIBRARY
+#
+# == Multifrontal Sparse QR (SuiteSparseQR)
+# SUITESPARSEQR_FOUND
+# SUITESPARSEQR_INCLUDE_DIR
+# SUITESPARSEQR_LIBRARY
+#
+# == Common configuration for all but CSparse (SuiteSparse version >= 4).
+# SUITESPARSE_CONFIG_FOUND
+# SUITESPARSE_CONFIG_INCLUDE_DIR
+# SUITESPARSE_CONFIG_LIBRARY
+#
+# == Common configuration for all but CSparse (SuiteSparse version < 4).
+# UFCONFIG_FOUND
+# UFCONFIG_INCLUDE_DIR
+#
+# Optional SuiteSparse Dependencies:
+#
+# == Serial Graph Partitioning and Fill-reducing Matrix Ordering (METIS)
+# METIS_FOUND
+# METIS_LIBRARY
+#
+# == Intel Thread Building Blocks (TBB)
+# TBB_FOUND
+# TBB_LIBRARIES
+
+# Called if we failed to find SuiteSparse or any of it's required dependencies,
+# unsets all public (designed to be used externally) variables and reports
+# error message at priority depending upon [REQUIRED/QUIET/<NONE>] argument.
+MACRO(SUITESPARSE_REPORT_NOT_FOUND REASON_MSG)
+  UNSET(SUITESPARSE_FOUND)
+  UNSET(SUITESPARSE_INCLUDE_DIRS)
+  UNSET(SUITESPARSE_LIBRARIES)
+  UNSET(SUITESPARSE_VERSION)
+  UNSET(SUITESPARSE_MAIN_VERSION)
+  UNSET(SUITESPARSE_SUB_VERSION)
+  UNSET(SUITESPARSE_SUBSUB_VERSION)
+  # Do NOT unset SUITESPARSE_FOUND_REQUIRED_VARS here, as it is used by
+  # FindPackageHandleStandardArgs() to generate the automatic error message on
+  # failure which highlights which components are missing.
+
+  # Note <package>_FIND_[REQUIRED/QUIETLY] variables defined by FindPackage()
+  # use the camelcase library name, not uppercase.
+  IF (SuiteSparse_FIND_QUIETLY)
+    MESSAGE(STATUS "Failed to find SuiteSparse - " ${REASON_MSG} ${ARGN})
+  ELSEIF (SuiteSparse_FIND_REQUIRED)
+    MESSAGE(FATAL_ERROR "Failed to find SuiteSparse - " ${REASON_MSG} ${ARGN})
+  ELSE()
+    # Neither QUIETLY nor REQUIRED, use no priority which emits a message
+    # but continues configuration and allows generation.
+    MESSAGE("-- Failed to find SuiteSparse - " ${REASON_MSG} ${ARGN})
+  ENDIF (SuiteSparse_FIND_QUIETLY)
+
+  # Do not call RETURN(), s/t we keep processing if not called with REQUIRED.
+ENDMACRO(SUITESPARSE_REPORT_NOT_FOUND)
+
+# Specify search directories for include files and libraries (this is the union
+# of the search directories for all OSs).  Search user-specified hint
+# directories first if supplied, and search user-installed locations first
+# so that we prefer user installs to system installs where both exist.
+LIST(APPEND SUITESPARSE_CHECK_INCLUDE_DIRS
+  ${SUITESPARSE_INCLUDE_DIR_HINTS}
+  /opt/local/include
+  /opt/local/include/ufsparse # Mac OS X
+  /usr/local/homebrew/include # Mac OS X
+  /usr/local/include
+  /usr/local/include/suitesparse
+  /usr/include/suitesparse # Ubuntu
+  /usr/include)
+LIST(APPEND SUITESPARSE_CHECK_LIBRARY_DIRS
+  ${SUITESPARSE_LIBRARY_DIR_HINTS}
+  /opt/local/lib
+  /opt/local/lib/ufsparse # Mac OS X
+  /usr/local/homebrew/lib # Mac OS X
+  /usr/local/lib
+  /usr/local/lib/suitesparse
+  /usr/lib/suitesparse # Ubuntu
+  /usr/lib)
+
+# Given the number of components of SuiteSparse, and to ensure that the
+# automatic failure message generated by FindPackageHandleStandardArgs()
+# when not all required components are found is helpful, we maintain a list
+# of all variables that must be defined for SuiteSparse to be considered found.
+UNSET(SUITESPARSE_FOUND_REQUIRED_VARS)
+
+# BLAS.
+FIND_PACKAGE(BLAS QUIET)
+IF (NOT BLAS_FOUND)
+  SUITESPARSE_REPORT_NOT_FOUND(
+    "Did not find BLAS library (required for SuiteSparse).")
+ENDIF (NOT BLAS_FOUND)
+LIST(APPEND SUITESPARSE_FOUND_REQUIRED_VARS BLAS_FOUND)
+
+# LAPACK.
+FIND_PACKAGE(LAPACK QUIET)
+IF (NOT LAPACK_FOUND)
+  SUITESPARSE_REPORT_NOT_FOUND(
+    "Did not find LAPACK library (required for SuiteSparse).")
+ENDIF (NOT LAPACK_FOUND)
+LIST(APPEND SUITESPARSE_FOUND_REQUIRED_VARS LAPACK_FOUND)
+
+# AMD.
+SET(AMD_FOUND TRUE)
+LIST(APPEND SUITESPARSE_FOUND_REQUIRED_VARS AMD_FOUND)
+FIND_LIBRARY(AMD_LIBRARY NAMES amd
+  PATHS ${SUITESPARSE_CHECK_LIBRARY_DIRS})
+IF (EXISTS ${AMD_LIBRARY})
+  MESSAGE(STATUS "Found AMD library: ${AMD_LIBRARY}")
+ELSE (EXISTS ${AMD_LIBRARY})
+  SUITESPARSE_REPORT_NOT_FOUND(
+    "Did not find AMD library (required SuiteSparse component).")
+  SET(AMD_FOUND FALSE)
+ENDIF (EXISTS ${AMD_LIBRARY})
+MARK_AS_ADVANCED(AMD_LIBRARY)
+
+FIND_PATH(AMD_INCLUDE_DIR NAMES amd.h
+  PATHS ${SUITESPARSE_CHECK_INCLUDE_DIRS})
+IF (EXISTS ${AMD_INCLUDE_DIR})
+  MESSAGE(STATUS "Found AMD header in: ${AMD_INCLUDE_DIR}")
+ELSE (EXISTS ${AMD_INCLUDE_DIR})
+  SUITESPARSE_REPORT_NOT_FOUND(
+    "Did not find AMD header (required SuiteSparse component).")
+  SET(AMD_FOUND FALSE)
+ENDIF (EXISTS ${AMD_INCLUDE_DIR})
+MARK_AS_ADVANCED(AMD_INCLUDE_DIR)
+
+# CAMD.
+SET(CAMD_FOUND TRUE)
+LIST(APPEND SUITESPARSE_FOUND_REQUIRED_VARS CAMD_FOUND)
+FIND_LIBRARY(CAMD_LIBRARY NAMES camd
+  PATHS ${SUITESPARSE_CHECK_LIBRARY_DIRS})
+IF (EXISTS ${CAMD_LIBRARY})
+  MESSAGE(STATUS "Found CAMD library: ${CAMD_LIBRARY}")
+ELSE (EXISTS ${CAMD_LIBRARY})
+  SUITESPARSE_REPORT_NOT_FOUND(
+    "Did not find CAMD library (required SuiteSparse component).")
+  SET(CAMD_FOUND FALSE)
+ENDIF (EXISTS ${CAMD_LIBRARY})
+MARK_AS_ADVANCED(CAMD_LIBRARY)
+
+FIND_PATH(CAMD_INCLUDE_DIR NAMES camd.h
+  PATHS ${SUITESPARSE_CHECK_INCLUDE_DIRS})
+IF (EXISTS ${CAMD_INCLUDE_DIR})
+  MESSAGE(STATUS "Found CAMD header in: ${CAMD_INCLUDE_DIR}")
+ELSE (EXISTS ${CAMD_INCLUDE_DIR})
+  SUITESPARSE_REPORT_NOT_FOUND(
+    "Did not find CAMD header (required SuiteSparse component).")
+  SET(CAMD_FOUND FALSE)
+ENDIF (EXISTS ${CAMD_INCLUDE_DIR})
+MARK_AS_ADVANCED(CAMD_INCLUDE_DIR)
+
+# COLAMD.
+SET(COLAMD_FOUND TRUE)
+LIST(APPEND SUITESPARSE_FOUND_REQUIRED_VARS COLAMD_FOUND)
+FIND_LIBRARY(COLAMD_LIBRARY NAMES colamd
+  PATHS ${SUITESPARSE_CHECK_LIBRARY_DIRS})
+IF (EXISTS ${COLAMD_LIBRARY})
+  MESSAGE(STATUS "Found COLAMD library: ${COLAMD_LIBRARY}")
+ELSE (EXISTS ${COLAMD_LIBRARY})
+  SUITESPARSE_REPORT_NOT_FOUND(
+    "Did not find COLAMD library (required SuiteSparse component).")
+  SET(COLAMD_FOUND FALSE)
+ENDIF (EXISTS ${COLAMD_LIBRARY})
+MARK_AS_ADVANCED(COLAMD_LIBRARY)
+
+FIND_PATH(COLAMD_INCLUDE_DIR NAMES colamd.h
+  PATHS ${SUITESPARSE_CHECK_INCLUDE_DIRS})
+IF (EXISTS ${COLAMD_INCLUDE_DIR})
+  MESSAGE(STATUS "Found COLAMD header in: ${COLAMD_INCLUDE_DIR}")
+ELSE (EXISTS ${COLAMD_INCLUDE_DIR})
+  SUITESPARSE_REPORT_NOT_FOUND(
+    "Did not find COLAMD header (required SuiteSparse component).")
+  SET(COLAMD_FOUND FALSE)
+ENDIF (EXISTS ${COLAMD_INCLUDE_DIR})
+MARK_AS_ADVANCED(COLAMD_INCLUDE_DIR)
+
+# CCOLAMD.
+SET(CCOLAMD_FOUND TRUE)
+LIST(APPEND SUITESPARSE_FOUND_REQUIRED_VARS CCOLAMD_FOUND)
+FIND_LIBRARY(CCOLAMD_LIBRARY NAMES ccolamd
+  PATHS ${SUITESPARSE_CHECK_LIBRARY_DIRS})
+IF (EXISTS ${CCOLAMD_LIBRARY})
+  MESSAGE(STATUS "Found CCOLAMD library: ${CCOLAMD_LIBRARY}")
+ELSE (EXISTS ${CCOLAMD_LIBRARY})
+  SUITESPARSE_REPORT_NOT_FOUND(
+    "Did not find CCOLAMD library (required SuiteSparse component).")
+  SET(CCOLAMD_FOUND FALSE)
+ENDIF (EXISTS ${CCOLAMD_LIBRARY})
+MARK_AS_ADVANCED(CCOLAMD_LIBRARY)
+
+FIND_PATH(CCOLAMD_INCLUDE_DIR NAMES ccolamd.h
+  PATHS ${SUITESPARSE_CHECK_INCLUDE_DIRS})
+IF (EXISTS ${CCOLAMD_INCLUDE_DIR})
+  MESSAGE(STATUS "Found CCOLAMD header in: ${CCOLAMD_INCLUDE_DIR}")
+ELSE (EXISTS ${CCOLAMD_INCLUDE_DIR})
+  SUITESPARSE_REPORT_NOT_FOUND(
+    "Did not find CCOLAMD header (required SuiteSparse component).")
+  SET(CCOLAMD_FOUND FALSE)
+ENDIF (EXISTS ${CCOLAMD_INCLUDE_DIR})
+MARK_AS_ADVANCED(CCOLAMD_INCLUDE_DIR)
+
+# CHOLMOD.
+SET(CHOLMOD_FOUND TRUE)
+LIST(APPEND SUITESPARSE_FOUND_REQUIRED_VARS CHOLMOD_FOUND)
+FIND_LIBRARY(CHOLMOD_LIBRARY NAMES cholmod
+  PATHS ${SUITESPARSE_CHECK_LIBRARY_DIRS})
+IF (EXISTS ${CHOLMOD_LIBRARY})
+  MESSAGE(STATUS "Found CHOLMOD library: ${CHOLMOD_LIBRARY}")
+ELSE (EXISTS ${CHOLMOD_LIBRARY})
+  SUITESPARSE_REPORT_NOT_FOUND(
+    "Did not find CHOLMOD library (required SuiteSparse component).")
+  SET(CHOLMOD_FOUND FALSE)
+ENDIF (EXISTS ${CHOLMOD_LIBRARY})
+MARK_AS_ADVANCED(CHOLMOD_LIBRARY)
+
+FIND_PATH(CHOLMOD_INCLUDE_DIR NAMES cholmod.h
+  PATHS ${SUITESPARSE_CHECK_INCLUDE_DIRS})
+IF (EXISTS ${CHOLMOD_INCLUDE_DIR})
+  MESSAGE(STATUS "Found CHOLMOD header in: ${CHOLMOD_INCLUDE_DIR}")
+ELSE (EXISTS ${CHOLMOD_INCLUDE_DIR})
+  SUITESPARSE_REPORT_NOT_FOUND(
+    "Did not find CHOLMOD header (required SuiteSparse component).")
+  SET(CHOLMOD_FOUND FALSE)
+ENDIF (EXISTS ${CHOLMOD_INCLUDE_DIR})
+MARK_AS_ADVANCED(CHOLMOD_INCLUDE_DIR)
+
+# SuiteSparseQR.
+SET(SUITESPARSEQR_FOUND TRUE)
+LIST(APPEND SUITESPARSE_FOUND_REQUIRED_VARS SUITESPARSEQR_FOUND)
+FIND_LIBRARY(SUITESPARSEQR_LIBRARY NAMES spqr
+  PATHS ${SUITESPARSE_CHECK_LIBRARY_DIRS})
+IF (EXISTS ${SUITESPARSEQR_LIBRARY})
+  MESSAGE(STATUS "Found SuiteSparseQR library: ${SUITESPARSEQR_LIBRARY}")
+ELSE (EXISTS ${SUITESPARSEQR_LIBRARY})
+  SUITESPARSE_REPORT_NOT_FOUND(
+    "Did not find SuiteSparseQR library (required SuiteSparse component).")
+  SET(SUITESPARSEQR_FOUND FALSE)
+ENDIF (EXISTS ${SUITESPARSEQR_LIBRARY})
+MARK_AS_ADVANCED(SUITESPARSEQR_LIBRARY)
+
+FIND_PATH(SUITESPARSEQR_INCLUDE_DIR NAMES SuiteSparseQR.hpp
+  PATHS ${SUITESPARSE_CHECK_INCLUDE_DIRS})
+IF (EXISTS ${SUITESPARSEQR_INCLUDE_DIR})
+  MESSAGE(STATUS "Found SuiteSparseQR header in: ${SUITESPARSEQR_INCLUDE_DIR}")
+ELSE (EXISTS ${SUITESPARSEQR_INCLUDE_DIR})
+  SUITESPARSE_REPORT_NOT_FOUND(
+    "Did not find SUITESPARSEQR header (required SuiteSparse component).")
+  SET(SUITESPARSEQR_FOUND FALSE)
+ENDIF (EXISTS ${SUITESPARSEQR_INCLUDE_DIR})
+MARK_AS_ADVANCED(SUITESPARSEQR_INCLUDE_DIR)
+
+IF (SUITESPARSEQR_FOUND)
+  # SuiteSparseQR may be compiled with Intel Threading Building Blocks,
+  # we assume that if TBB is installed, SuiteSparseQR was compiled with
+  # support for it, this will do no harm if it wasn't.
+  SET(TBB_FOUND TRUE)
+  FIND_LIBRARY(TBB_LIBRARIES NAMES tbb
+    PATHS ${SUITESPARSE_CHECK_LIBRARY_DIRS})
+  IF (EXISTS ${TBB_LIBRARIES})
+    MESSAGE(STATUS "Found Intel Thread Building Blocks (TBB) library: "
+      "${TBB_LIBRARIES}, assuming SuiteSparseQR was compiled with TBB.")
+  ELSE (EXISTS ${TBB_LIBRARIES})
+    MESSAGE(STATUS "Did not find Intel TBB library, assuming SuiteSparseQR was "
+      "not compiled with TBB.")
+    SET(TBB_FOUND FALSE)
+  ENDIF (EXISTS ${TBB_LIBRARIES})
+  MARK_AS_ADVANCED(TBB_LIBRARIES)
+
+  IF (TBB_FOUND)
+    FIND_LIBRARY(TBB_MALLOC_LIB NAMES tbbmalloc
+      PATHS ${SUITESPARSE_CHECK_LIBRARY_DIRS})
+    IF (EXISTS ${TBB_MALLOC_LIB})
+      MESSAGE(STATUS "Found Intel Thread Building Blocks (TBB) Malloc library: "
+        "${TBB_MALLOC_LIB}")
+      # Append TBB malloc library to TBB libraries list whilst retaining
+      # any CMake generated help string (cache variable).
+      LIST(APPEND TBB_LIBRARIES ${TBB_MALLOC_LIB})
+      GET_PROPERTY(HELP_STRING CACHE TBB_LIBRARIES PROPERTY HELPSTRING)
+      SET(TBB_LIBRARIES "${TBB_LIBRARIES}" CACHE STRING ${HELP_STRING})
+
+      # Add the TBB libraries to the SuiteSparseQR libraries (the only
+      # libraries to optionally depend on TBB).
+      LIST(APPEND SUITESPARSEQR_LIBRARY ${TBB_LIBRARIES})
+
+    ELSE (EXISTS ${TBB_MALLOC_LIB})
+      # If we cannot find all required TBB components do not include it as
+      # a dependency.
+      MESSAGE(STATUS "Did not find Intel Thread Building Blocks (TBB) Malloc "
+        "Library, discarding TBB as a dependency.")
+      SET(TBB_FOUND FALSE)
+    ENDIF (EXISTS ${TBB_MALLOC_LIB})
+    MARK_AS_ADVANCED(TBB_MALLOC_LIB)
+  ENDIF (TBB_FOUND)
+ENDIF(SUITESPARSEQR_FOUND)
+
+# UFconfig / SuiteSparse_config.
+#
+# If SuiteSparse version is >= 4 then SuiteSparse_config is required.
+# For SuiteSparse 3, UFconfig.h is required.
+FIND_LIBRARY(SUITESPARSE_CONFIG_LIBRARY NAMES suitesparseconfig
+  PATHS ${SUITESPARSE_CHECK_LIBRARY_DIRS})
+IF (EXISTS ${SUITESPARSE_CONFIG_LIBRARY})
+  MESSAGE(STATUS "Found SuiteSparse_config library: "
+    "${SUITESPARSE_CONFIG_LIBRARY}")
+ENDIF (EXISTS ${SUITESPARSE_CONFIG_LIBRARY})
+MARK_AS_ADVANCED(SUITESPARSE_CONFIG_LIBRARY)
+
+FIND_PATH(SUITESPARSE_CONFIG_INCLUDE_DIR NAMES SuiteSparse_config.h
+  PATHS ${SUITESPARSE_CHECK_INCLUDE_DIRS})
+IF (EXISTS ${SUITESPARSE_CONFIG_INCLUDE_DIR})
+  MESSAGE(STATUS "Found SuiteSparse_config header in: "
+    "${SUITESPARSE_CONFIG_INCLUDE_DIR}")
+ENDIF (EXISTS ${SUITESPARSE_CONFIG_INCLUDE_DIR})
+MARK_AS_ADVANCED(SUITESPARSE_CONFIG_INCLUDE_DIR)
+
+SET(SUITESPARSE_CONFIG_FOUND FALSE)
+SET(UFCONFIG_FOUND FALSE)
+
+IF (EXISTS ${SUITESPARSE_CONFIG_LIBRARY} AND
+    EXISTS ${SUITESPARSE_CONFIG_INCLUDE_DIR})
+  SET(SUITESPARSE_CONFIG_FOUND TRUE)
+  # SuiteSparse_config (SuiteSparse version >= 4) requires librt library for
+  # timing by default when compiled on Linux or Unix, but not on OSX (which
+  # does not have librt).
+  IF (CMAKE_SYSTEM_NAME MATCHES "Linux" OR UNIX AND NOT APPLE)
+    FIND_LIBRARY(LIBRT_LIBRARY NAMES rt
+      PATHS ${SUITESPARSE_CHECK_LIBRARY_DIRS})
+    IF (LIBRT_LIBRARY)
+      MESSAGE(STATUS "Adding librt: ${LIBRT_LIBRARY} to "
+        "SuiteSparse_config libraries (required on Linux & Unix [not OSX] if "
+        "SuiteSparse is compiled with timing).")
+    ELSE (LIBRT_LIBRARY)
+      MESSAGE(STATUS "Could not find librt, but found SuiteSparse_config, "
+        "assuming that SuiteSparse was compiled without timing.")
+    ENDIF (LIBRT_LIBRARY)
+    MARK_AS_ADVANCED(LIBRT_LIBRARY)
+    LIST(APPEND SUITESPARSE_CONFIG_LIBRARY ${LIBRT_LIBRARY})
+  ENDIF (CMAKE_SYSTEM_NAME MATCHES "Linux" OR UNIX AND NOT APPLE)
+
+ELSE (EXISTS ${SUITESPARSE_CONFIG_LIBRARY} AND
+      EXISTS ${SUITESPARSE_CONFIG_INCLUDE_DIR})
+  # Failed to find SuiteSparse_config (>= v4 installs), instead look for
+  # UFconfig header which should be present in < v4 installs.
+  SET(SUITESPARSE_CONFIG_FOUND FALSE)
+  FIND_PATH(UFCONFIG_INCLUDE_DIR NAMES UFconfig.h
+    PATHS ${SUITESPARSE_CHECK_INCLUDE_DIRS})
+  IF (EXISTS ${UFCONFIG_INCLUDE_DIR})
+    MESSAGE(STATUS "Found UFconfig header in: ${UFCONFIG_INCLUDE_DIR}")
+    SET(UFCONFIG_FOUND TRUE)
+  ENDIF (EXISTS ${UFCONFIG_INCLUDE_DIR})
+  MARK_AS_ADVANCED(UFCONFIG_INCLUDE_DIR)
+ENDIF (EXISTS ${SUITESPARSE_CONFIG_LIBRARY} AND
+       EXISTS ${SUITESPARSE_CONFIG_INCLUDE_DIR})
+
+IF (NOT SUITESPARSE_CONFIG_FOUND AND
+    NOT UFCONFIG_FOUND)
+  SUITESPARSE_REPORT_NOT_FOUND(
+    "Failed to find either: SuiteSparse_config header & library (should be "
+    "present in all SuiteSparse >= v4 installs), or UFconfig header (should "
+    "be present in all SuiteSparse < v4 installs).")
+ENDIF (NOT SUITESPARSE_CONFIG_FOUND AND
+       NOT UFCONFIG_FOUND)
+
+# Extract the SuiteSparse version from the appropriate header (UFconfig.h for
+# <= v3, SuiteSparse_config.h for >= v4).
+LIST(APPEND SUITESPARSE_FOUND_REQUIRED_VARS SUITESPARSE_VERSION)
+
+IF (UFCONFIG_FOUND)
+  # SuiteSparse version <= 3.
+  SET(SUITESPARSE_VERSION_FILE ${UFCONFIG_INCLUDE_DIR}/UFconfig.h)
+  IF (NOT EXISTS ${SUITESPARSE_VERSION_FILE})
+    SUITESPARSE_REPORT_NOT_FOUND(
+      "Could not find file: ${SUITESPARSE_VERSION_FILE} containing version "
+      "information for <= v3 SuiteSparse installs, but UFconfig was found "
+      "(only present in <= v3 installs).")
+  ELSE (NOT EXISTS ${SUITESPARSE_VERSION_FILE})
+    FILE(READ ${SUITESPARSE_VERSION_FILE} UFCONFIG_CONTENTS)
+
+    STRING(REGEX MATCH "#define SUITESPARSE_MAIN_VERSION [0-9]+"
+      SUITESPARSE_MAIN_VERSION "${UFCONFIG_CONTENTS}")
+    STRING(REGEX REPLACE "#define SUITESPARSE_MAIN_VERSION ([0-9]+)" "\\1"
+      SUITESPARSE_MAIN_VERSION "${SUITESPARSE_MAIN_VERSION}")
+
+    STRING(REGEX MATCH "#define SUITESPARSE_SUB_VERSION [0-9]+"
+      SUITESPARSE_SUB_VERSION "${UFCONFIG_CONTENTS}")
+    STRING(REGEX REPLACE "#define SUITESPARSE_SUB_VERSION ([0-9]+)" "\\1"
+      SUITESPARSE_SUB_VERSION "${SUITESPARSE_SUB_VERSION}")
+
+    STRING(REGEX MATCH "#define SUITESPARSE_SUBSUB_VERSION [0-9]+"
+      SUITESPARSE_SUBSUB_VERSION "${UFCONFIG_CONTENTS}")
+    STRING(REGEX REPLACE "#define SUITESPARSE_SUBSUB_VERSION ([0-9]+)" "\\1"
+      SUITESPARSE_SUBSUB_VERSION "${SUITESPARSE_SUBSUB_VERSION}")
+
+    # This is on a single line s/t CMake does not interpret it as a list of
+    # elements and insert ';' separators which would result in 4.;2.;1 nonsense.
+    SET(SUITESPARSE_VERSION
+      "${SUITESPARSE_MAIN_VERSION}.${SUITESPARSE_SUB_VERSION}.${SUITESPARSE_SUBSUB_VERSION}")
+  ENDIF (NOT EXISTS ${SUITESPARSE_VERSION_FILE})
+ENDIF (UFCONFIG_FOUND)
+
+IF (SUITESPARSE_CONFIG_FOUND)
+  # SuiteSparse version >= 4.
+  SET(SUITESPARSE_VERSION_FILE
+    ${SUITESPARSE_CONFIG_INCLUDE_DIR}/SuiteSparse_config.h)
+  IF (NOT EXISTS ${SUITESPARSE_VERSION_FILE})
+    SUITESPARSE_REPORT_NOT_FOUND(
+      "Could not find file: ${SUITESPARSE_VERSION_FILE} containing version "
+      "information for >= v4 SuiteSparse installs, but SuiteSparse_config was "
+      "found (only present in >= v4 installs).")
+  ELSE (NOT EXISTS ${SUITESPARSE_VERSION_FILE})
+    FILE(READ ${SUITESPARSE_VERSION_FILE} SUITESPARSE_CONFIG_CONTENTS)
+
+    STRING(REGEX MATCH "#define SUITESPARSE_MAIN_VERSION [0-9]+"
+      SUITESPARSE_MAIN_VERSION "${SUITESPARSE_CONFIG_CONTENTS}")
+    STRING(REGEX REPLACE "#define SUITESPARSE_MAIN_VERSION ([0-9]+)" "\\1"
+      SUITESPARSE_MAIN_VERSION "${SUITESPARSE_MAIN_VERSION}")
+
+    STRING(REGEX MATCH "#define SUITESPARSE_SUB_VERSION [0-9]+"
+      SUITESPARSE_SUB_VERSION "${SUITESPARSE_CONFIG_CONTENTS}")
+    STRING(REGEX REPLACE "#define SUITESPARSE_SUB_VERSION ([0-9]+)" "\\1"
+      SUITESPARSE_SUB_VERSION "${SUITESPARSE_SUB_VERSION}")
+
+    STRING(REGEX MATCH "#define SUITESPARSE_SUBSUB_VERSION [0-9]+"
+      SUITESPARSE_SUBSUB_VERSION "${SUITESPARSE_CONFIG_CONTENTS}")
+    STRING(REGEX REPLACE "#define SUITESPARSE_SUBSUB_VERSION ([0-9]+)" "\\1"
+      SUITESPARSE_SUBSUB_VERSION "${SUITESPARSE_SUBSUB_VERSION}")
+
+    # This is on a single line s/t CMake does not interpret it as a list of
+    # elements and insert ';' separators which would result in 4.;2.;1 nonsense.
+    SET(SUITESPARSE_VERSION
+      "${SUITESPARSE_MAIN_VERSION}.${SUITESPARSE_SUB_VERSION}.${SUITESPARSE_SUBSUB_VERSION}")
+  ENDIF (NOT EXISTS ${SUITESPARSE_VERSION_FILE})
+ENDIF (SUITESPARSE_CONFIG_FOUND)
+
+# METIS (Optional dependency).
+FIND_LIBRARY(METIS_LIBRARY NAMES metis
+  PATHS ${SUITESPARSE_CHECK_LIBRARY_DIRS})
+IF (EXISTS ${METIS_LIBRARY})
+  MESSAGE(STATUS "Found METIS library: ${METIS_LIBRARY}.")
+  set(METIS_FOUND TRUE)
+ELSE (EXISTS ${METIS_LIBRARY})
+  MESSAGE(STATUS "Did not find METIS library (optional SuiteSparse dependency)")
+  set(METIS_FOUND FALSE)
+ENDIF (EXISTS ${METIS_LIBRARY})
+MARK_AS_ADVANCED(METIS_LIBRARY)
+
+# Only mark SuiteSparse as found if all required components and dependencies
+# have been found.
+SET(SUITESPARSE_FOUND TRUE)
+FOREACH(REQUIRED_VAR ${SUITESPARSE_FOUND_REQUIRED_VARS})
+  IF (NOT ${REQUIRED_VAR})
+    SET(SUITESPARSE_FOUND FALSE)
+  ENDIF (NOT ${REQUIRED_VAR})
+ENDFOREACH(REQUIRED_VAR ${SUITESPARSE_FOUND_REQUIRED_VARS})
+
+IF (SUITESPARSE_FOUND)
+  LIST(APPEND SUITESPARSE_INCLUDE_DIRS
+    ${AMD_INCLUDE_DIR}
+    ${CAMD_INCLUDE_DIR}
+    ${COLAMD_INCLUDE_DIR}
+    ${CCOLAMD_INCLUDE_DIR}
+    ${CHOLMOD_INCLUDE_DIR}
+    ${SUITESPARSEQR_INCLUDE_DIR})
+  # Handle config separately, as otherwise at least one of them will be set
+  # to NOTFOUND which would cause any check on SUITESPARSE_INCLUDE_DIRS to fail.
+  IF (SUITESPARSE_CONFIG_FOUND)
+    LIST(APPEND SUITESPARSE_INCLUDE_DIRS
+      ${SUITESPARSE_CONFIG_INCLUDE_DIR})
+  ENDIF (SUITESPARSE_CONFIG_FOUND)
+  IF (UFCONFIG_FOUND)
+    LIST(APPEND SUITESPARSE_INCLUDE_DIRS
+      ${UFCONFIG_INCLUDE_DIR})
+  ENDIF (UFCONFIG_FOUND)
+  # As SuiteSparse includes are often all in the same directory, remove any
+  # repetitions.
+  LIST(REMOVE_DUPLICATES SUITESPARSE_INCLUDE_DIRS)
+
+  # Important: The ordering of these libraries is *NOT* arbitrary, as these
+  # could potentially be static libraries their link ordering is important.
+  LIST(APPEND SUITESPARSE_LIBRARIES
+    ${SUITESPARSEQR_LIBRARY}
+    ${CHOLMOD_LIBRARY}
+    ${CCOLAMD_LIBRARY}
+    ${CAMD_LIBRARY}
+    ${COLAMD_LIBRARY}
+    ${AMD_LIBRARY}
+    ${LAPACK_LIBRARIES}
+    ${BLAS_LIBRARIES})
+  IF (SUITESPARSE_CONFIG_FOUND)
+    LIST(APPEND SUITESPARSE_LIBRARIES
+      ${SUITESPARSE_CONFIG_LIBRARY})
+  ENDIF (SUITESPARSE_CONFIG_FOUND)
+  IF (METIS_FOUND)
+    LIST(APPEND SUITESPARSE_LIBRARIES
+      ${METIS_LIBRARY})
+  ENDIF (METIS_FOUND)
+ENDIF()
+
+# Determine if we are running on Ubuntu with the package install of SuiteSparse
+# which is broken and does not support linking a shared library.
+SET(SUITESPARSE_IS_BROKEN_SHARED_LINKING_UBUNTU_SYSTEM_VERSION FALSE)
+IF (CMAKE_SYSTEM_NAME MATCHES "Linux" AND
+    SUITESPARSE_VERSION VERSION_EQUAL 3.4.0)
+  FIND_PROGRAM(LSB_RELEASE_EXECUTABLE lsb_release)
+  IF (LSB_RELEASE_EXECUTABLE)
+    # Any even moderately recent Ubuntu release (likely to be affected by
+    # this bug) should have lsb_release, if it isn't present we are likely
+    # on a different Linux distribution (should be fine).
+
+    EXECUTE_PROCESS(COMMAND ${LSB_RELEASE_EXECUTABLE} -si
+      OUTPUT_VARIABLE LSB_DISTRIBUTOR_ID
+      OUTPUT_STRIP_TRAILING_WHITESPACE)
+
+    IF (LSB_DISTRIBUTOR_ID MATCHES "Ubuntu" AND
+        SUITESPARSE_LIBRARIES MATCHES "/usr/lib/libamd")
+      # We are on Ubuntu, and the SuiteSparse version matches the broken
+      # system install version and is a system install.
+      SET(SUITESPARSE_IS_BROKEN_SHARED_LINKING_UBUNTU_SYSTEM_VERSION TRUE)
+      MESSAGE(STATUS "Found system install of SuiteSparse "
+        "${SUITESPARSE_VERSION} running on Ubuntu, which has a known bug "
+        "preventing linking of shared libraries (static linking unaffected).")
+    ENDIF (LSB_DISTRIBUTOR_ID MATCHES "Ubuntu" AND
+      SUITESPARSE_LIBRARIES MATCHES "/usr/lib/libamd")
+  ENDIF (LSB_RELEASE_EXECUTABLE)
+ENDIF (CMAKE_SYSTEM_NAME MATCHES "Linux" AND
+  SUITESPARSE_VERSION VERSION_EQUAL 3.4.0)
+
+# Handle REQUIRED and QUIET arguments to FIND_PACKAGE
+INCLUDE(FindPackageHandleStandardArgs)
+IF (SUITESPARSE_FOUND)
+  FIND_PACKAGE_HANDLE_STANDARD_ARGS(SuiteSparse
+    REQUIRED_VARS ${SUITESPARSE_FOUND_REQUIRED_VARS}
+    VERSION_VAR SUITESPARSE_VERSION
+    FAIL_MESSAGE "Failed to find some/all required components of SuiteSparse.")
+ELSE (SUITESPARSE_FOUND)
+  # Do not pass VERSION_VAR to FindPackageHandleStandardArgs() if we failed to
+  # find SuiteSparse to avoid a confusing autogenerated failure message
+  # that states 'not found (missing: FOO) (found version: x.y.z)'.
+  FIND_PACKAGE_HANDLE_STANDARD_ARGS(SuiteSparse
+    REQUIRED_VARS ${SUITESPARSE_FOUND_REQUIRED_VARS}
+    FAIL_MESSAGE "Failed to find some/all required components of SuiteSparse.")
+ENDIF (SUITESPARSE_FOUND)
diff --git a/cmake/UpdateCacheVariable.cmake b/cmake/UpdateCacheVariable.cmake
new file mode 100644
index 0000000..759de2e
--- /dev/null
+++ b/cmake/UpdateCacheVariable.cmake
@@ -0,0 +1,43 @@
+# Ceres Solver - A fast non-linear least squares minimizer
+# Copyright 2014 Google Inc. All rights reserved.
+# http://code.google.com/p/ceres-solver/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+#   this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.
+# * Neither the name of Google Inc. nor the names of its contributors may be
+#   used to endorse or promote products derived from this software without
+#   specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: alexs.mac@gmail.com (Alex Stewart)
+
+# By default, there is no easy way in CMake to set the value of a cache
+# variable without reinitialising it, which involves resetting its
+# associated help string.  This is particularly annoying for CMake options
+# where they need to programmatically updated.
+#
+# This function automates this process by getting the current help string
+# for the cache variable to update, then reinitialising it with the new
+# value, but with the original help string.
+FUNCTION(UPDATE_CACHE_VARIABLE VAR_NAME VALUE)
+  GET_PROPERTY(HELP_STRING CACHE ${VAR_NAME} PROPERTY HELPSTRING)
+  GET_PROPERTY(VAR_TYPE CACHE ${VAR_NAME} PROPERTY TYPE)
+  SET(${VAR_NAME} ${VALUE} CACHE ${VAR_TYPE} "${HELP_STRING}" FORCE)
+ENDFUNCTION()
diff --git a/cmake/config.h.in b/cmake/config.h.in
new file mode 100644
index 0000000..2828ab8
--- /dev/null
+++ b/cmake/config.h.in
@@ -0,0 +1,92 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: alexs.mac@gmail.com (Alex Stewart)
+
+// Configuration options for Ceres.
+//
+// Do not edit this file, it was automatically configured by CMake when
+// Ceres was compiled with the relevant configuration for the machine
+// on which Ceres was compiled.
+//
+// Ceres Developers: All options should have the same name as their mapped
+//                   CMake options, in the preconfigured version of this file
+//                   all options should be enclosed in '@'.
+
+#ifndef CERES_PUBLIC_INTERNAL_CONFIG_H_
+#define CERES_PUBLIC_INTERNAL_CONFIG_H_
+
+// If defined, use the LGPL code in Eigen.
+@CERES_USE_EIGEN_SPARSE@
+
+// If defined, Ceres was compiled without LAPACK.
+@CERES_NO_LAPACK@
+
+// If defined, Ceres was compiled without SuiteSparse.
+@CERES_NO_SUITESPARSE@
+
+// If defined, Ceres was compiled without CXSparse.
+@CERES_NO_CXSPARSE@
+
+// If defined, Ceres was compiled without Schur specializations.
+@CERES_RESTRICT_SCHUR_SPECIALIZATION@
+
+// If defined, Ceres was compiled to use Eigen instead of hardcoded BLAS
+// routines.
+@CERES_NO_CUSTOM_BLAS@
+
+// If defined, Ceres was compiled without multithreading support.
+@CERES_NO_THREADS@
+// If defined Ceres was compiled with OpenMP multithreading support.
+@CERES_USE_OPENMP@
+// Additionally defined on *nix if Ceres was compiled with OpenMP support,
+// as in this case pthreads is also required.
+@CERES_HAVE_PTHREAD@
+@CERES_HAVE_RWLOCK@
+
+// Which version of unordered map was used when Ceres was compiled. Exactly
+// one of these will be defined for any given build.
+@CERES_STD_UNORDERED_MAP@
+@CERES_STD_UNORDERED_MAP_IN_TR1_NAMESPACE@
+@CERES_TR1_UNORDERED_MAP@
+@CERES_NO_UNORDERED_MAP@
+
+// If defined Ceres was compiled for Android with noalias() removed from
+// matrix-matrix multiplies to work around a bug in the Android NDK.
+@CERES_WORK_AROUND_ANDROID_NDK_COMPILER_BUG@
+
+// If defined, the memory header is in <tr1/memory>, otherwise <memory>.
+@CERES_TR1_MEMORY_HEADER@
+
+// If defined shared_ptr is in std::tr1 namespace, otherwise std.
+@CERES_TR1_SHARED_PTR@
+
+// If defined, Ceres was built as a shared library.
+@CERES_USING_SHARED_LIBRARY@
+
+#endif  // CERES_PUBLIC_INTERNAL_CONFIG_H_
diff --git a/cmake/depend.cmake b/cmake/depend.cmake
deleted file mode 100644
index 275a440..0000000
--- a/cmake/depend.cmake
+++ /dev/null
@@ -1,103 +0,0 @@
-# Ceres Solver - A fast non-linear least squares minimizer
-# Copyright 2013 Google Inc. All rights reserved.
-# http://code.google.com/p/ceres-solver/
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright notice,
-#   this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-#   this list of conditions and the following disclaimer in the documentation
-#   and/or other materials provided with the distribution.
-# * Neither the name of Google Inc. nor the names of its contributors may be
-#   used to endorse or promote products derived from this software without
-#   specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-#
-# Author: pablo.speciale@gmail.com (Pablo Speciale)
-#
-
-# Default locations to search for on various platforms.
-LIST(APPEND SEARCH_LIBS /usr/lib)
-LIST(APPEND SEARCH_LIBS /usr/local/lib)
-LIST(APPEND SEARCH_LIBS /usr/local/homebrew/lib) # Mac OS X
-LIST(APPEND SEARCH_LIBS /opt/local/lib)
-
-LIST(APPEND SEARCH_HEADERS /usr/include)
-LIST(APPEND SEARCH_HEADERS /usr/local/include)
-LIST(APPEND SEARCH_HEADERS /usr/local/homebrew/include) # Mac OS X
-LIST(APPEND SEARCH_HEADERS /opt/local/include)
-
-# Locations to search for Eigen
-SET(EIGEN_SEARCH_HEADERS ${SEARCH_HEADERS})
-LIST(APPEND EIGEN_SEARCH_HEADERS /usr/include/eigen3) # Ubuntu 10.04's default location.
-LIST(APPEND EIGEN_SEARCH_HEADERS /usr/local/include/eigen3)
-LIST(APPEND EIGEN_SEARCH_HEADERS /usr/local/homebrew/include/eigen3)  # Mac OS X
-LIST(APPEND EIGEN_SEARCH_HEADERS /opt/local/var/macports/software/eigen3/opt/local/include/eigen3) # Mac OS X
-
-# Google Flags
-OPTION(GFLAGS
-       "Enable Google Flags."
-       ON)
-
-IF (GFLAGS)
-  MESSAGE("-- Check for Google Flags")
-  FIND_LIBRARY(GFLAGS_LIB NAMES gflags PATHS ${SEARCH_LIBS})
-  IF (NOT EXISTS ${GFLAGS_LIB})
-    MESSAGE(FATAL_ERROR
-            "Can't find Google Flags. Please specify: "
-            "-DGFLAGS_LIB=...")
-  ENDIF (NOT EXISTS ${GFLAGS_LIB})
-  MESSAGE("-- Found Google Flags library: ${GFLAGS_LIB}")
-  FIND_PATH(GFLAGS_INCLUDE NAMES gflags/gflags.h PATHS ${SEARCH_HEADERS})
-  IF (NOT EXISTS ${GFLAGS_INCLUDE})
-    MESSAGE(FATAL_ERROR
-            "Can't find Google Flags. Please specify: "
-            "-DGFLAGS_INCLUDE=...")
-  ENDIF (NOT EXISTS ${GFLAGS_INCLUDE})
-  MESSAGE("-- Found Google Flags header in: ${GFLAGS_INCLUDE}")
-ENDIF (GFLAGS)
-
-# Google Logging
-MESSAGE("-- Check for Google Log")
-FIND_LIBRARY(GLOG_LIB NAMES glog PATHS ${SEARCH_LIBS})
-IF (NOT EXISTS ${GLOG_LIB})
-  MESSAGE(FATAL_ERROR
-          "Can't find Google Log. Please specify: "
-          "-DGLOG_LIB=...")
-ENDIF (NOT EXISTS ${GLOG_LIB})
-MESSAGE("-- Found Google Log library: ${GLOG_LIB}")
-
-FIND_PATH(GLOG_INCLUDE NAMES glog/logging.h PATHS ${SEARCH_HEADERS})
-IF (NOT EXISTS ${GLOG_INCLUDE})
-  MESSAGE(FATAL_ERROR
-          "Can't find Google Log. Please specify: "
-          "-DGLOG_INCLUDE=...")
-ENDIF (NOT EXISTS ${GLOG_INCLUDE})
-MESSAGE("-- Found Google Log header in: ${GLOG_INCLUDE}")
-
-# Eigen
-MESSAGE("-- Check for Eigen 3.x")
-FIND_PATH(EIGEN_INCLUDE NAMES Eigen/Core PATHS ${EIGEN_SEARCH_HEADERS})
-IF (NOT EXISTS ${EIGEN_INCLUDE})
-  MESSAGE(FATAL_ERROR "Can't find Eigen. Try passing -DEIGEN_INCLUDE=...")
-ENDIF (NOT EXISTS ${EIGEN_INCLUDE})
-MESSAGE("-- Found Eigen 3.x: ${EIGEN_INCLUDE}")
-
-
-INCLUDE_DIRECTORIES(
-  ${GLOG_INCLUDE}
-  ${EIGEN_INCLUDE}
-  )
diff --git a/cmake/iOS.cmake b/cmake/iOS.cmake
new file mode 100644
index 0000000..03b845a
--- /dev/null
+++ b/cmake/iOS.cmake
@@ -0,0 +1,246 @@
+# This file is part of the ios-cmake project. It was retrieved from
+# https://github.com/cristeab/ios-cmake.git, which is a fork of
+# https://code.google.com/p/ios-cmake/. Which in turn is based off of
+# the Platform/Darwin.cmake and Platform/UnixPaths.cmake files which
+# are included with CMake 2.8.4
+#
+# The ios-cmake project is licensed under the new BSD license.
+#
+# Copyright (c) 2014, Bogdan Cristea and LTE Engineering Software,
+# Kitware, Inc., Insight Software Consortium.  All rights reserved.
+
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# This file is based off of the Platform/Darwin.cmake and Platform/UnixPaths.cmake
+# files which are included with CMake 2.8.4
+# It has been altered for iOS development
+
+# Options:
+#
+# IOS_PLATFORM = OS (default) or SIMULATOR or SIMULATOR64
+#   This decides if SDKS will be selected from the iPhoneOS.platform or iPhoneSimulator.platform folders
+#   OS - the default, used to build for iPhone and iPad physical devices, which have an arm arch.
+#   SIMULATOR - used to build for the Simulator platforms, which have an x86 arch.
+#
+# CMAKE_IOS_DEVELOPER_ROOT = automatic(default) or /path/to/platform/Developer folder
+#   By default this location is automatcially chosen based on the IOS_PLATFORM value above.
+#   If set manually, it will override the default location and force the user of a particular Developer Platform
+#
+# CMAKE_IOS_SDK_ROOT = automatic(default) or /path/to/platform/Developer/SDKs/SDK folder
+#   By default this location is automatcially chosen based on the CMAKE_IOS_DEVELOPER_ROOT value.
+#   In this case it will always be the most up-to-date SDK found in the CMAKE_IOS_DEVELOPER_ROOT path.
+#   If set manually, this will force the use of a specific SDK version
+
+# Macros:
+#
+# set_xcode_property (TARGET XCODE_PROPERTY XCODE_VALUE)
+#  A convenience macro for setting xcode specific properties on targets
+#  example: set_xcode_property (myioslib IPHONEOS_DEPLOYMENT_TARGET "3.1")
+#
+# find_host_package (PROGRAM ARGS)
+#  A macro used to find executable programs on the host system, not within the iOS environment.
+#  Thanks to the android-cmake project for providing the command
+
+# Standard settings
+set (CMAKE_SYSTEM_NAME Darwin)
+set (CMAKE_SYSTEM_VERSION 1)
+set (UNIX True)
+set (APPLE True)
+set (IOS True)
+
+# Required as of cmake 2.8.10
+set (CMAKE_OSX_DEPLOYMENT_TARGET "" CACHE STRING "Force unset of the deployment target for iOS" FORCE)
+
+# Determine the cmake host system version so we know where to find the iOS SDKs
+find_program (CMAKE_UNAME uname /bin /usr/bin /usr/local/bin)
+if (CMAKE_UNAME)
+	exec_program(uname ARGS -r OUTPUT_VARIABLE CMAKE_HOST_SYSTEM_VERSION)
+	string (REGEX REPLACE "^([0-9]+)\\.([0-9]+).*$" "\\1" DARWIN_MAJOR_VERSION "${CMAKE_HOST_SYSTEM_VERSION}")
+endif (CMAKE_UNAME)
+
+# Force the compilers to gcc for iOS
+include (CMakeForceCompiler)
+CMAKE_FORCE_C_COMPILER (clang Apple)
+CMAKE_FORCE_CXX_COMPILER (clang++ Apple)
+set(CMAKE_AR ar CACHE FILEPATH "" FORCE)
+
+# Skip the platform compiler checks for cross compiling
+set (CMAKE_CXX_COMPILER_WORKS TRUE)
+set (CMAKE_C_COMPILER_WORKS TRUE)
+
+# All iOS/Darwin specific settings - some may be redundant
+set (CMAKE_SHARED_LIBRARY_PREFIX "lib")
+set (CMAKE_SHARED_LIBRARY_SUFFIX ".dylib")
+set (CMAKE_SHARED_MODULE_PREFIX "lib")
+set (CMAKE_SHARED_MODULE_SUFFIX ".so")
+set (CMAKE_MODULE_EXISTS 1)
+set (CMAKE_DL_LIBS "")
+
+set (CMAKE_C_OSX_COMPATIBILITY_VERSION_FLAG "-compatibility_version ")
+set (CMAKE_C_OSX_CURRENT_VERSION_FLAG "-current_version ")
+set (CMAKE_CXX_OSX_COMPATIBILITY_VERSION_FLAG "${CMAKE_C_OSX_COMPATIBILITY_VERSION_FLAG}")
+set (CMAKE_CXX_OSX_CURRENT_VERSION_FLAG "${CMAKE_C_OSX_CURRENT_VERSION_FLAG}")
+
+# Hidden visibilty is required for cxx on iOS
+set (CMAKE_C_FLAGS_INIT "")
+set (CMAKE_CXX_FLAGS_INIT "-fvisibility=hidden -fvisibility-inlines-hidden")
+set (CMAKE_CXX_FLAGS_RELEASE_INIT "-DNDEBUG -O3 -fomit-frame-pointer -ffast-math")
+
+set (CMAKE_C_LINK_FLAGS "-Wl,-search_paths_first ${CMAKE_C_LINK_FLAGS}")
+set (CMAKE_CXX_LINK_FLAGS "-Wl,-search_paths_first ${CMAKE_CXX_LINK_FLAGS}")
+
+set (CMAKE_PLATFORM_HAS_INSTALLNAME 1)
+set (CMAKE_SHARED_LIBRARY_CREATE_C_FLAGS "-dynamiclib -headerpad_max_install_names")
+set (CMAKE_SHARED_MODULE_CREATE_C_FLAGS "-bundle -headerpad_max_install_names")
+set (CMAKE_SHARED_MODULE_LOADER_C_FLAG "-Wl,-bundle_loader,")
+set (CMAKE_SHARED_MODULE_LOADER_CXX_FLAG "-Wl,-bundle_loader,")
+set (CMAKE_FIND_LIBRARY_SUFFIXES ".dylib" ".so" ".a")
+
+# hack: if a new cmake (which uses CMAKE_INSTALL_NAME_TOOL) runs on an old build tree
+# (where install_name_tool was hardcoded) and where CMAKE_INSTALL_NAME_TOOL isn't in the cache
+# and still cmake didn't fail in CMakeFindBinUtils.cmake (because it isn't rerun)
+# hardcode CMAKE_INSTALL_NAME_TOOL here to install_name_tool, so it behaves as it did before, Alex
+if (NOT DEFINED CMAKE_INSTALL_NAME_TOOL)
+	find_program(CMAKE_INSTALL_NAME_TOOL install_name_tool)
+endif (NOT DEFINED CMAKE_INSTALL_NAME_TOOL)
+
+# Setup iOS platform unless specified manually with IOS_PLATFORM
+if (NOT DEFINED IOS_PLATFORM)
+	set (IOS_PLATFORM "OS")
+endif (NOT DEFINED IOS_PLATFORM)
+set (IOS_PLATFORM ${IOS_PLATFORM} CACHE STRING "Type of iOS Platform")
+
+# Setup building for arm64 or not
+if (NOT DEFINED BUILD_ARM64)
+    set (BUILD_ARM64 true)
+endif (NOT DEFINED BUILD_ARM64)
+set (BUILD_ARM64 ${BUILD_ARM64} CACHE STRING "Build arm64 arch or not")
+
+# Check the platform selection and setup for developer root
+if (${IOS_PLATFORM} STREQUAL "OS")
+	set (IOS_PLATFORM_LOCATION "iPhoneOS.platform")
+
+	# This causes the installers to properly locate the output libraries
+	set (CMAKE_XCODE_EFFECTIVE_PLATFORMS "-iphoneos")
+elseif (${IOS_PLATFORM} STREQUAL "SIMULATOR")
+    set (SIMULATOR true)
+	set (IOS_PLATFORM_LOCATION "iPhoneSimulator.platform")
+
+	# This causes the installers to properly locate the output libraries
+	set (CMAKE_XCODE_EFFECTIVE_PLATFORMS "-iphonesimulator")
+elseif (${IOS_PLATFORM} STREQUAL "SIMULATOR64")
+    set (SIMULATOR true)
+	set (IOS_PLATFORM_LOCATION "iPhoneSimulator.platform")
+
+	# This causes the installers to properly locate the output libraries
+	set (CMAKE_XCODE_EFFECTIVE_PLATFORMS "-iphonesimulator")
+else (${IOS_PLATFORM} STREQUAL "OS")
+	message (FATAL_ERROR "Unsupported IOS_PLATFORM value selected. Please choose OS or SIMULATOR")
+endif (${IOS_PLATFORM} STREQUAL "OS")
+
+# Setup iOS developer location unless specified manually with CMAKE_IOS_DEVELOPER_ROOT
+# Note Xcode 4.3 changed the installation location, choose the most recent one available
+set (XCODE_POST_43_ROOT "/Applications/Xcode.app/Contents/Developer/Platforms/${IOS_PLATFORM_LOCATION}/Developer")
+set (XCODE_PRE_43_ROOT "/Developer/Platforms/${IOS_PLATFORM_LOCATION}/Developer")
+if (NOT DEFINED CMAKE_IOS_DEVELOPER_ROOT)
+	if (EXISTS ${XCODE_POST_43_ROOT})
+		set (CMAKE_IOS_DEVELOPER_ROOT ${XCODE_POST_43_ROOT})
+	elseif(EXISTS ${XCODE_PRE_43_ROOT})
+		set (CMAKE_IOS_DEVELOPER_ROOT ${XCODE_PRE_43_ROOT})
+	endif (EXISTS ${XCODE_POST_43_ROOT})
+endif (NOT DEFINED CMAKE_IOS_DEVELOPER_ROOT)
+set (CMAKE_IOS_DEVELOPER_ROOT ${CMAKE_IOS_DEVELOPER_ROOT} CACHE PATH "Location of iOS Platform")
+
+# Find and use the most recent iOS sdk unless specified manually with CMAKE_IOS_SDK_ROOT
+if (NOT DEFINED CMAKE_IOS_SDK_ROOT)
+	file (GLOB _CMAKE_IOS_SDKS "${CMAKE_IOS_DEVELOPER_ROOT}/SDKs/*")
+	if (_CMAKE_IOS_SDKS)
+		list (SORT _CMAKE_IOS_SDKS)
+		list (REVERSE _CMAKE_IOS_SDKS)
+		list (GET _CMAKE_IOS_SDKS 0 CMAKE_IOS_SDK_ROOT)
+	else (_CMAKE_IOS_SDKS)
+		message (FATAL_ERROR "No iOS SDK's found in default search path ${CMAKE_IOS_DEVELOPER_ROOT}. Manually set CMAKE_IOS_SDK_ROOT or install the iOS SDK.")
+	endif (_CMAKE_IOS_SDKS)
+	message (STATUS "Toolchain using default iOS SDK: ${CMAKE_IOS_SDK_ROOT}")
+endif (NOT DEFINED CMAKE_IOS_SDK_ROOT)
+set (CMAKE_IOS_SDK_ROOT ${CMAKE_IOS_SDK_ROOT} CACHE PATH "Location of the selected iOS SDK")
+
+# Set the sysroot default to the most recent SDK
+set (CMAKE_OSX_SYSROOT ${CMAKE_IOS_SDK_ROOT} CACHE PATH "Sysroot used for iOS support")
+
+# set the architecture for iOS
+if (${IOS_PLATFORM} STREQUAL "OS")
+    set (IOS_ARCH armv7 armv7s arm64)
+elseif (${IOS_PLATFORM} STREQUAL "SIMULATOR")
+    set (IOS_ARCH i386)
+elseif (${IOS_PLATFORM} STREQUAL "SIMULATOR64")
+    set (IOS_ARCH x86_64)
+endif (${IOS_PLATFORM} STREQUAL "OS")
+
+set (CMAKE_OSX_ARCHITECTURES ${IOS_ARCH} CACHE string  "Build architecture for iOS")
+
+# Set the find root to the iOS developer roots and to user defined paths
+set (CMAKE_FIND_ROOT_PATH ${CMAKE_IOS_DEVELOPER_ROOT} ${CMAKE_IOS_SDK_ROOT} ${CMAKE_PREFIX_PATH} CACHE string  "iOS find search path root")
+
+# default to searching for frameworks first
+set (CMAKE_FIND_FRAMEWORK FIRST)
+
+# set up the default search directories for frameworks
+set (CMAKE_SYSTEM_FRAMEWORK_PATH
+	${CMAKE_IOS_SDK_ROOT}/System/Library/Frameworks
+	${CMAKE_IOS_SDK_ROOT}/System/Library/PrivateFrameworks
+	${CMAKE_IOS_SDK_ROOT}/Developer/Library/Frameworks
+)
+
+# only search the iOS sdks, not the remainder of the host filesystem
+set (CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ONLY)
+set (CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
+set (CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
+
+
+# This little macro lets you set any XCode specific property
+macro (set_xcode_property TARGET XCODE_PROPERTY XCODE_VALUE)
+	set_property (TARGET ${TARGET} PROPERTY XCODE_ATTRIBUTE_${XCODE_PROPERTY} ${XCODE_VALUE})
+endmacro (set_xcode_property)
+
+
+# This macro lets you find executable programs on the host system
+macro (find_host_package)
+	set (CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
+	set (CMAKE_FIND_ROOT_PATH_MODE_LIBRARY NEVER)
+	set (CMAKE_FIND_ROOT_PATH_MODE_INCLUDE NEVER)
+	set (IOS FALSE)
+
+	find_package(${ARGN})
+
+	set (IOS TRUE)
+	set (CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ONLY)
+	set (CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
+	set (CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
+endmacro (find_host_package)
diff --git a/cmake/uninstall.cmake.in b/cmake/uninstall.cmake.in
index 66c57f6..dce3284 100644
--- a/cmake/uninstall.cmake.in
+++ b/cmake/uninstall.cmake.in
@@ -27,25 +27,71 @@
 # POSSIBILITY OF SUCH DAMAGE.
 #
 # Author: arnaudgelas@gmail.com (Arnaud Gelas)
+#         alexs.mac@gmail.com (Alex Stewart)
+
+IF (COMMAND cmake_policy)
+  # Ignore empty elements in LIST() commands.
+  CMAKE_POLICY(SET CMP0007 OLD)
+ENDIF (COMMAND cmake_policy)
 
 IF (NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt")
-  MESSAGE(FATAL_ERROR
-          "Cannot find install manifest: \"@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt\"")
+  MESSAGE(FATAL_ERROR "Cannot find install manifest: "
+                      "\"@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt\"")
 ENDIF (NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt")
 
-FILE(READ "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt" files)
-STRING(REGEX REPLACE "\n" ";" files "${files}")
-LIST(REVERSE files)
-FOREACH (file ${files})
-  MESSAGE(STATUS "Uninstalling \"$ENV{DESTDIR}${file}\"")
-  IF (EXISTS "$ENV{DESTDIR}${file}")
-    EXECUTE_PROCESS(COMMAND @CMAKE_COMMAND@ -E remove "$ENV{DESTDIR}${file}"
-                    OUTPUT_VARIABLE rm_out
-                    RESULT_VARIABLE rm_retval)
-    IF (NOT ${rm_retval} EQUAL 0)
-      MESSAGE(FATAL_ERROR "Problem when removing \"$ENV{DESTDIR}${file}\"")
-    ENDIF (NOT ${rm_retval} EQUAL 0)
-  ELSE (EXISTS "$ENV{DESTDIR}${file}")
-    MESSAGE(STATUS "File \"$ENV{DESTDIR}${file}\" does not exist.")
-  ENDIF (EXISTS "$ENV{DESTDIR}${file}")
-ENDFOREACH(file)
+FILE(READ "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt" INSTALL_MANIFEST)
+STRING(REGEX REPLACE "\n" ";" INSTALL_MANIFEST "${INSTALL_MANIFEST}")
+LIST(REVERSE INSTALL_MANIFEST)
+
+FOREACH (INSTALLED_FILE ${INSTALL_MANIFEST})
+  # Save the root ceres include install directory, e.g. /usr/local/include/ceres
+  # so that we can remove it at the end.
+  IF (NOT CERES_INCLUDE_INSTALL_ROOT)
+    GET_FILENAME_COMPONENT(FILE_NAME ${INSTALLED_FILE} NAME)
+    IF (FILE_NAME STREQUAL ceres.h)
+      # Ensure that the directory is nested as we expect, as we are going to
+      # remove it, and we do not want to remove files pertaining to anyone else.
+      GET_FILENAME_COMPONENT(PARENT_DIR ${INSTALLED_FILE} PATH)
+      GET_FILENAME_COMPONENT(PARENT_DIR_NAME ${PARENT_DIR} NAME)
+      IF (PARENT_DIR_NAME STREQUAL ceres AND IS_DIRECTORY ${PARENT_DIR})
+        SET(CERES_INCLUDE_INSTALL_ROOT ${PARENT_DIR})
+      ENDIF (PARENT_DIR_NAME STREQUAL ceres AND IS_DIRECTORY ${PARENT_DIR})
+    ENDIF (FILE_NAME STREQUAL ceres.h)
+  ENDIF (NOT CERES_INCLUDE_INSTALL_ROOT)
+
+  MESSAGE(STATUS "Uninstalling \"$ENV{DESTDIR}${INSTALLED_FILE}\"")
+  IF (EXISTS "$ENV{DESTDIR}${INSTALLED_FILE}")
+    EXECUTE_PROCESS(COMMAND @CMAKE_COMMAND@
+                    -E remove "$ENV{DESTDIR}${INSTALLED_FILE}"
+                    OUTPUT_VARIABLE RM_OUT
+                    RESULT_VARIABLE RM_RETVAL)
+    IF (NOT ${RM_RETVAL} EQUAL 0)
+      MESSAGE(FATAL_ERROR
+              "Problem when removing \"$ENV{DESTDIR}${INSTALLED_FILE}\"")
+    ENDIF (NOT ${RM_RETVAL} EQUAL 0)
+  ELSE (EXISTS "$ENV{DESTDIR}${INSTALLED_FILE}")
+    MESSAGE(STATUS "File \"$ENV{DESTDIR}${INSTALLED_FILE}\" does not exist.")
+  ENDIF (EXISTS "$ENV{DESTDIR}${INSTALLED_FILE}")
+ENDFOREACH(INSTALLED_FILE)
+
+# Removing Ceres include install directory.
+IF (CERES_INCLUDE_INSTALL_ROOT AND
+    EXISTS ${CERES_INCLUDE_INSTALL_ROOT})
+  MESSAGE(STATUS "Removing Ceres include install directory: "
+                 "\"$ENV{DESTDIR}${CERES_INCLUDE_INSTALL_ROOT}\"")
+  EXECUTE_PROCESS(COMMAND @CMAKE_COMMAND@
+                  -E remove_directory
+                  "$ENV{DESTDIR}${CERES_INCLUDE_INSTALL_ROOT}"
+                  OUTPUT_VARIABLE RM_OUT
+                  RESULT_VARIABLE RM_RETVAL)
+  IF (NOT ${RM_RETVAL} EQUAL 0)
+    MESSAGE(FATAL_ERROR
+      "Failed to remove: \"$ENV{DESTDIR}${CERES_INCLUDE_INSTALL_ROOT\"")
+  ENDIF (NOT ${RM_RETVAL} EQUAL 0)
+ELSE (CERES_INCLUDE_INSTALL_ROOT AND
+    EXISTS ${CERES_INCLUDE_INSTALL_ROOT})
+  MESSAGE(FATAL_ERROR "Failed to find Ceres installed include directory "
+                      "(e.g. /usr/local/include/ceres), candidate: "
+                      "\"$ENV{DESTDIR}${CERES_INCLUDE_INSTALL_ROOT}\"")
+ENDIF (CERES_INCLUDE_INSTALL_ROOT AND
+  EXISTS ${CERES_INCLUDE_INSTALL_ROOT})
diff --git a/config/ceres/internal/config.h b/config/ceres/internal/config.h
new file mode 100644
index 0000000..c9d2c16
--- /dev/null
+++ b/config/ceres/internal/config.h
@@ -0,0 +1,45 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: alexs.mac@gmail.com (Alex Stewart)
+
+// Default (empty) configuration options for Ceres.
+//
+// IMPORTANT: Most users of Ceres will not use this file, when compiling Ceres
+//            with CMake, CMake will configure a new config.h with the currently
+//            selected Ceres compile options and copy it into the source
+//            directory before compilation.  However, for some users of Ceres
+//            who compile without CMake, this file ensures that Ceres will
+//            compile, with the user either specifying manually the Ceres
+//            compile options, or passing them directly through the compiler.
+
+#ifndef CERES_PUBLIC_INTERNAL_CONFIG_H_
+#define CERES_PUBLIC_INTERNAL_CONFIG_H_
+
+
+#endif  // CERES_PUBLIC_INTERNAL_CONFIG_H_
diff --git a/docs/source/_templates/layout.html b/docs/source/_templates/layout.html
new file mode 100644
index 0000000..61c8eb5
--- /dev/null
+++ b/docs/source/_templates/layout.html
@@ -0,0 +1,13 @@
+{% extends "!layout.html" %}
+
+{% block footer %}
+{{ super() }}
+<script>
+  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+  })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+  ga('create', 'UA-49769510-1', 'ceres-solver.org');
+  ga('send', 'pageview');
+</script>
+{% endblock %}
diff --git a/docs/source/_themes/armstrong/LICENSE b/docs/source/_themes/armstrong/LICENSE
deleted file mode 100644
index 894aa01..0000000
--- a/docs/source/_themes/armstrong/LICENSE
+++ /dev/null
@@ -1,26 +0,0 @@
-Copyright (c) 2011 Bay Citizen & Texas Tribune
-
-Original ReadTheDocs.org code
-Copyright (c) 2010 Charles Leifer, Eric Holscher, Bobby Grace
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation
-files (the "Software"), to deal in the Software without
-restriction, including without limitation the rights to use,
-copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
-
diff --git a/docs/source/_themes/armstrong/globaltoc.html b/docs/source/_themes/armstrong/globaltoc.html
deleted file mode 100644
index 20d8641..0000000
--- a/docs/source/_themes/armstrong/globaltoc.html
+++ /dev/null
@@ -1,11 +0,0 @@
-{#
-    basic/globaltoc.html
-    ~~~~~~~~~~~~~~~~~~~~
-
-    Sphinx sidebar template: global table of contents.
-
-    :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
-    :license: BSD, see LICENSE for details.
-#}
-<h3><a href="{{ pathto(master_doc) }}">{{ _('Ceres Solver') }}</a></h3>
-{{ toctree() }}
diff --git a/docs/source/_themes/armstrong/layout.html b/docs/source/_themes/armstrong/layout.html
deleted file mode 100644
index 3faa34c..0000000
--- a/docs/source/_themes/armstrong/layout.html
+++ /dev/null
@@ -1,80 +0,0 @@
-{% extends "basic/layout.html" %}
-
-{% set script_files = script_files + [pathto("_static/searchtools.js", 1)] %}
-
-{% block htmltitle %}
-{{ super() }}
-
-<meta name="viewport" content="width=device-width; initial-scale=1.0; maximum-scale=1.0; user-scalable=0;"/>
-
-{% endblock %}
-
-
-{%- macro sidebar() %}
-      {%- if render_sidebar %}
-      <div class="sphinxsidebar">
-        <div class="sphinxsidebarwrapper">
-          {%- block sidebarlogo %}
-          {%- if logo %}
-            <p class="logo"><a href="{{ pathto(master_doc) }}">
-              <img class="logo" src="{{ pathto('_static/' + logo, 1) }}" alt="Logo"/>
-            </a></p>
-          {%- endif %}
-          {%- endblock %}
-          {%- if sidebars != None %}
-            {#- new style sidebar: explicitly include/exclude templates #}
-            {%- for sidebartemplate in sidebars %}
-            {%- include sidebartemplate %}
-            {%- endfor %}
-          {%- else %}
-            {#- old style sidebars: using blocks -- should be deprecated #}
-            {%- block sidebartoc %}
-            {%- include "globaltoc.html" %}
-            {%- endblock %}
-            {%- block sidebarsourcelink %}
-            {%- include "sourcelink.html" %}
-            {%- endblock %}
-            {%- if customsidebar %}
-            {%- include customsidebar %}
-            {%- endif %}
-            {%- block sidebarsearch %}
-            {%- include "searchbox.html" %}
-            {%- endblock %}
-          {%- endif %}
-        </div>
-      </div>
-      {%- endif %}
-{%- endmacro %}
-
-
-{% block footer %}
-<div class="footer">
-{%- if show_copyright %}
-  {%- if hasdoc('copyright') %}
-    {% trans path=pathto('copyright'), copyright=copyright|e %}&copy; <a href="{{ path }}">Copyright</a> {{ copyright }}.{% endtrans %}
-  {%- else %}
-    {% trans copyright=copyright|e %}&copy; Copyright {{ copyright }}.{% endtrans %}
-  {%- endif %}
-{%- endif %}
-{%- if last_updated %}
-  {% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %}
-{%- endif %}
-</div>
-
-
-{% if theme_analytics_code %}
-<!-- Google Analytics Code -->
-<script type="text/javascript">
-  var _gaq = _gaq || [];
-  _gaq.push(['_setAccount', '{{ theme_analytics_code }}']);
-  _gaq.push(['_trackPageview']);
-
-  (function() {
-    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-  })();
-</script>
-{% endif %}
-
-{% endblock %}
diff --git a/docs/source/_themes/armstrong/rtd-themes.conf b/docs/source/_themes/armstrong/rtd-themes.conf
deleted file mode 100644
index 5930488..0000000
--- a/docs/source/_themes/armstrong/rtd-themes.conf
+++ /dev/null
@@ -1,65 +0,0 @@
-[theme]
-inherit = default
-stylesheet = rtd.css
-pygment_style = default
-show_sphinx = False
-
-[options]
-show_rtd = True
-
-white = #ffffff
-almost_white = #f8f8f8
-barely_white = #f2f2f2
-dirty_white = #eeeeee
-almost_dirty_white = #e6e6e6
-dirtier_white = #dddddd
-lighter_gray = #cccccc
-gray_a = #aaaaaa
-gray_9 = #999999
-light_gray = #888888
-gray_7 = #777777
-gray = #666666
-dark_gray = #444444
-gray_2 = #222222
-black = #111111
-light_color = #e8ecef
-light_medium_color = #DDEAF0
-medium_color = #8ca1af
-medium_color_link = #86989b
-medium_color_link_hover = #a6b8bb
-dark_color = #465158
-
-h1 = #000000
-h2 = #465158
-h3 = #6c818f
-
-link_color = #444444
-link_color_decoration = #CCCCCC
-
-medium_color_hover = #697983
-green_highlight = #8ecc4c
-
-
-positive_dark = #609060
-positive_medium = #70a070
-positive_light = #e9ffe9
-
-negative_dark = #900000
-negative_medium = #b04040
-negative_light = #ffe9e9
-negative_text = #c60f0f
-
-ruler = #abc
-
-viewcode_bg = #f4debf
-viewcode_border = #ac9
-
-highlight = #ffe080
-
-code_background = #eeeeee
-
-background = #465158
-background_link = #ffffff
-background_link_half = #ffffff
-background_text = #eeeeee
-background_text_link = #86989b
diff --git a/docs/source/_themes/armstrong/static/rtd.css_t b/docs/source/_themes/armstrong/static/rtd.css_t
deleted file mode 100644
index 90354c3..0000000
--- a/docs/source/_themes/armstrong/static/rtd.css_t
+++ /dev/null
@@ -1,781 +0,0 @@
-/*
- * rtd.css
- * ~~~~~~~~~~~~~~~
- *
- * Sphinx stylesheet -- sphinxdoc theme.  Originally created by
- * Armin Ronacher for Werkzeug.
- *
- * Customized for ReadTheDocs by Eric Pierce & Eric Holscher
- *
- * :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
- * :license: BSD, see LICENSE for details.
- *
- */
-
-/* RTD colors
- * light blue: {{ theme_light_color }}
- * medium blue: {{ theme_medium_color }}
- * dark blue: {{ theme_dark_color }}
- * dark grey: {{ theme_grey_color }}
- *
- * medium blue hover: {{ theme_medium_color_hover }};
- * green highlight: {{ theme_green_highlight }}
- * light blue (project bar): {{ theme_light_color }}
- */
-
-@import url("basic.css");
-
-/* PAGE LAYOUT -------------------------------------------------------------- */
-
-body {
-    font: 100%/1.5 "ff-meta-web-pro-1","ff-meta-web-pro-2",Arial,"Helvetica Neue",sans-serif; 
-    text-align: center;
-    color: black;
-    background-color: {{ theme_background }};
-    padding: 0;
-    margin: 0;
-}
-
-div.document {
-    text-align: left;
-    background-color: {{ theme_light_color }};
-}
-
-div.bodywrapper {
-    background-color: {{ theme_white }};
-    border-left: 1px solid {{ theme_lighter_gray }};
-    border-bottom: 1px solid {{ theme_lighter_gray }};
-    margin: 0 0 0 16em;
-}
-
-div.body {
-    margin: 0;
-    padding: 0.5em 1.3em;
-    max-width: 55em;
-    min-width: 20em;
-}
-
-div.related {
-    font-size: 1em;
-    background-color: {{ theme_background }};
-}
-
-div.documentwrapper {
-    float: left;
-    width: 100%;
-    background-color: {{ theme_light_color }};
-}
-
-
-/* HEADINGS --------------------------------------------------------------- */
-
-h1 {
-    margin: 0;
-    padding: 0.7em 0 0.3em 0;
-    font-size: 1.5em;
-    line-height: 1.15;
-    color: {{ theme_h1 }};
-    clear: both;
-}
-
-h2 {
-    margin: 2em 0 0.2em 0;
-    font-size: 1.35em;
-    padding: 0;
-    color: {{ theme_h2 }};
-}
-
-h3 {
-    margin: 1em 0 -0.3em 0;
-    font-size: 1.2em;
-    color: {{ theme_h3 }};
-}
-
-div.body h1 a, div.body h2 a, div.body h3 a, div.body h4 a, div.body h5 a, div.body h6 a {
-    color: black;
-}
-
-h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor {
-    display: none;
-    margin: 0 0 0 0.3em;
-    padding: 0 0.2em 0 0.2em;
-    color: {{ theme_gray_a }} !important;
-}
-
-h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor,
-h5:hover a.anchor, h6:hover a.anchor {
-    display: inline;
-}
-
-h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover,
-h5 a.anchor:hover, h6 a.anchor:hover {
-    color: {{ theme_gray_7 }};
-    background-color: {{ theme_dirty_white }};
-}
-
-
-/* LINKS ------------------------------------------------------------------ */
-
-/* Normal links get a pseudo-underline */
-a {
-    color: {{ theme_link_color }};
-    text-decoration: none;
-    border-bottom: 1px solid {{ theme_link_color_decoration }};
-}
-
-/* Links in sidebar, TOC, index trees and tables have no underline */
-.sphinxsidebar a,
-.toctree-wrapper a,
-.indextable a,
-#indices-and-tables a {
-    color: {{ theme_dark_gray }};
-    text-decoration: none;
-    border-bottom: none;
-}
-
-/* Most links get an underline-effect when hovered */
-a:hover,
-div.toctree-wrapper a:hover,
-.indextable a:hover,
-#indices-and-tables a:hover {
-    color: {{ theme_black }};
-    text-decoration: none;
-    border-bottom: 1px solid {{ theme_black }};
-}
-
-/* Footer links */
-div.footer a {
-    color: {{ theme_background_text_link }};
-    text-decoration: none;
-    border: none;
-}
-div.footer a:hover {
-    color: {{ theme_medium_color_link_hover }};
-    text-decoration: underline;
-    border: none;
-}
-
-/* Permalink anchor (subtle grey with a red hover) */
-div.body a.headerlink {
-    color: {{ theme_lighter_gray }};
-    font-size: 1em;
-    margin-left: 6px;
-    padding: 0 4px 0 4px;
-    text-decoration: none;
-    border: none;
-}
-div.body a.headerlink:hover {
-    color: {{ theme_negative_text }};
-    border: none;
-}
-
-
-/* NAVIGATION BAR --------------------------------------------------------- */
-
-div.related ul {
-    height: 2.5em;
-}
-
-div.related ul li {
-    margin: 0;
-    padding: 0.65em 0;
-    float: left;
-    display: block;
-    color: {{ theme_background_link_half }}; /* For the >> separators */
-    font-size: 0.8em;
-}
-
-div.related ul li.right {
-    float: right;
-    margin-right: 5px;
-    color: transparent; /* Hide the | separators */
-}
-
-/* "Breadcrumb" links in nav bar */
-div.related ul li a {
-    order: none;
-    background-color: inherit;
-    font-weight: bold;
-    margin: 6px 0 6px 4px;
-    line-height: 1.75em;
-    color: {{ theme_background_link }};
-    text-shadow: 0 1px rgba(0, 0, 0, 0.5);
-    padding: 0.4em 0.8em;
-    border: none;
-    border-radius: 3px;
-}
-/* previous / next / modules / index links look more like buttons */
-div.related ul li.right a {
-    margin: 0.375em 0;
-    background-color: {{ theme_medium_color_hover }};
-    text-shadow: 0 1px rgba(0, 0, 0, 0.5);
-    border-radius: 3px;
-    -webkit-border-radius: 3px;
-    -moz-border-radius: 3px;
-}
-/* All navbar links light up as buttons when hovered */
-div.related ul li a:hover {
-    background-color: {{ theme_medium_color }};
-    color: {{ theme_white }};
-    text-decoration: none;
-    border-radius: 3px;
-    -webkit-border-radius: 3px;
-    -moz-border-radius: 3px;
-}
-/* Take extra precautions for tt within links */
-a tt,
-div.related ul li a tt {
-    background: inherit !important;
-    color: inherit !important;
-}
-
-
-/* SIDEBAR ---------------------------------------------------------------- */
-
-div.sphinxsidebarwrapper {
-    padding: 0;
-}
-
-div.sphinxsidebar {
-    margin: 0;
-    margin-left: -100%;
-    float: left;
-    top: 3em;
-    left: 0;
-    padding: 0 1em;
-    width: 14em;
-    font-size: 1em;
-    text-align: left;
-    background-color: {{ theme_light_color }};
-}
-
-div.sphinxsidebar img {
-    max-width: 12em;
-}
-
-div.sphinxsidebar h3, div.sphinxsidebar h4 {
-    margin: 1.2em 0 0.3em 0;
-    font-size: 1em;
-    padding: 0;
-    color: {{ theme_gray_2 }};
-    font-family: "ff-meta-web-pro-1", "ff-meta-web-pro-2", "Arial", "Helvetica Neue", sans-serif;
-}
-
-div.sphinxsidebar h3 a {
-    color: {{ theme_grey_color }};
-}
-
-div.sphinxsidebar ul,
-div.sphinxsidebar p {
-    margin-top: 0;
-    padding-left: 0;
-    line-height: 130%;
-    background-color: {{ theme_light_color }};
-}
-
-/* No bullets for nested lists, but a little extra indentation */
-div.sphinxsidebar ul ul {
-    list-style-type: none;
-    margin-left: 1.5em;
-    padding: 0;
-}
-
-/* A little top/bottom padding to prevent adjacent links' borders
- * from overlapping each other */
-div.sphinxsidebar ul li {
-    padding: 1px 0;
-}
-
-/* A little left-padding to make these align with the ULs */
-div.sphinxsidebar p.topless {
-    padding-left: 0 0 0 1em;
-}
-
-/* Make these into hidden one-liners */
-div.sphinxsidebar ul li,
-div.sphinxsidebar p.topless {
-    white-space: nowrap;
-    overflow: hidden;
-}
-/* ...which become visible when hovered */
-div.sphinxsidebar ul li:hover,
-div.sphinxsidebar p.topless:hover {
-    overflow: visible;
-}
-
-/* Search text box and "Go" button */
-#searchbox {
-    margin-top: 2em;
-    margin-bottom: 1em;
-    background: {{ theme_dirtier_white }};
-    padding: 0.5em;
-    border-radius: 6px;
-    -moz-border-radius: 6px;
-    -webkit-border-radius: 6px;
-}
-#searchbox h3 {
-    margin-top: 0;
-}
-
-/* Make search box and button abut and have a border */
-input,
-div.sphinxsidebar input {
-    border: 1px solid {{ theme_gray_9 }};
-    float: left;
-}
-
-/* Search textbox */
-input[type="text"] {
-    margin: 0;
-    padding: 0 3px;
-    height: 20px;
-    width: 144px;
-    border-top-left-radius: 3px;
-    border-bottom-left-radius: 3px;
-    -moz-border-radius-topleft: 3px;
-    -moz-border-radius-bottomleft: 3px;
-    -webkit-border-top-left-radius: 3px;
-    -webkit-border-bottom-left-radius: 3px;
-}
-/* Search button */
-input[type="submit"] {
-    margin: 0 0 0 -1px; /* -1px prevents a double-border with textbox */
-    height: 22px;
-    color: {{ theme_dark_gray }};
-    background-color: {{ theme_light_color }};
-    padding: 1px 4px;
-    font-weight: bold;
-    border-top-right-radius: 3px;
-    border-bottom-right-radius: 3px;
-    -moz-border-radius-topright: 3px;
-    -moz-border-radius-bottomright: 3px;
-    -webkit-border-top-right-radius: 3px;
-    -webkit-border-bottom-right-radius: 3px;
-}
-input[type="submit"]:hover {
-    color: {{ theme_white }};
-    background-color: {{ theme_green_highlight }};
-}
-
-div.sphinxsidebar p.searchtip {
-    clear: both;
-    padding: 0.5em 0 0 0;
-    background: {{ theme_dirtier_white }};
-    color: {{ theme_gray }};
-    font-size: 0.9em;
-}
-
-/* Sidebar links are unusual */
-div.sphinxsidebar li a,
-div.sphinxsidebar p a {
-    background: {{ theme_light_color }}; /* In case links overlap main content */
-    border-radius: 3px;
-    -moz-border-radius: 3px;
-    -webkit-border-radius: 3px;
-    border: 1px solid transparent; /* To prevent things jumping around on hover */
-    padding: 0 5px 0 5px;
-}
-div.sphinxsidebar li a:hover,
-div.sphinxsidebar p a:hover {
-    color: {{ theme_black }};
-    text-decoration: none;
-    border: 1px solid {{ theme_light_gray }};
-}
-
-/* Tweak any link appearing in a heading */
-div.sphinxsidebar h3 a {
-}
-
-
-
-
-/* OTHER STUFF ------------------------------------------------------------ */
-
-cite, code, tt {
-    font-family: 'Consolas', 'Deja Vu Sans Mono',
-                 'Bitstream Vera Sans Mono', monospace;
-    font-size: 0.95em;
-    letter-spacing: 0.01em;
-}
-
-tt {
-    background-color: {{ theme_code_background }};
-    color: {{ theme_dark_gray }};
-}
-
-tt.descname, tt.descclassname, tt.xref {
-    border: 0;
-}
-
-hr {
-    border: 1px solid {{ theme_ruler }};
-    margin: 2em;
-}
-
-pre, #_fontwidthtest {
-    font-family: 'Consolas', 'Deja Vu Sans Mono',
-                 'Bitstream Vera Sans Mono', monospace;
-    margin: 1em 2em;
-    font-size: 0.95em;
-    letter-spacing: 0.015em;
-    line-height: 120%;
-    padding: 0.5em;
-    border: 1px solid {{ theme_lighter_gray }};
-    background-color: {{ theme_code_background }};
-    border-radius: 6px;
-    -moz-border-radius: 6px;
-    -webkit-border-radius: 6px;
-}
-
-pre a {
-    color: inherit;
-    text-decoration: underline;
-}
-
-td.linenos pre {
-    padding: 0.5em 0;
-}
-
-div.quotebar {
-    background-color: {{ theme_almost_white }};
-    max-width: 250px;
-    float: right;
-    padding: 2px 7px;
-    border: 1px solid {{ theme_lighter_gray }};
-}
-
-div.topic {
-    background-color: {{ theme_almost_white }};
-}
-
-table {
-    border-collapse: collapse;
-    margin: 0 -0.5em 0 -0.5em;
-}
-
-table td, table th {
-    padding: 0.2em 0.5em 0.2em 0.5em;
-}
-
-
-/* ADMONITIONS AND WARNINGS ------------------------------------------------- */
-
-/* Shared by admonitions, warnings and sidebars */
-div.admonition,
-div.warning,
-div.sidebar {
-    font-size: 0.9em;
-    margin: 2em;
-    padding: 0;
-    /*
-    border-radius: 6px;
-    -moz-border-radius: 6px;
-    -webkit-border-radius: 6px;
-    */
-}
-div.admonition p,
-div.warning p,
-div.sidebar p {
-    margin: 0.5em 1em 0.5em 1em;
-    padding: 0;
-}
-div.admonition pre,
-div.warning pre,
-div.sidebar pre {
-    margin: 0.4em 1em 0.4em 1em;
-}
-div.admonition p.admonition-title,
-div.warning p.admonition-title,
-div.sidebar p.sidebar-title {
-    margin: 0;
-    padding: 0.1em 0 0.1em 0.5em;
-    color: white;
-    font-weight: bold;
-    font-size: 1.1em;
-    text-shadow: 0 1px rgba(0, 0, 0, 0.5);
-}
-div.admonition ul, div.admonition ol,
-div.warning ul, div.warning ol,
-div.sidebar ul, div.sidebar ol {
-    margin: 0.1em 0.5em 0.5em 3em;
-    padding: 0;
-}
-
-
-/* Admonitions and sidebars only */
-div.admonition, div.sidebar {
-    border: 1px solid {{ theme_positive_dark }};
-    background-color: {{ theme_positive_light }};
-}
-div.admonition p.admonition-title,
-div.sidebar p.sidebar-title {
-    background-color: {{ theme_positive_medium }};
-    border-bottom: 1px solid {{ theme_positive_dark }};
-}
-
-
-/* Warnings only */
-div.warning {
-    border: 1px solid {{ theme_negative_dark }};
-    background-color: {{ theme_negative_light }};
-}
-div.warning p.admonition-title {
-    background-color: {{ theme_negative_medium }};
-    border-bottom: 1px solid {{ theme_negative_dark }};
-}
-
-
-/* Sidebars only */
-div.sidebar {
-  max-width: 200px;
-}
-
-
-
-div.versioninfo {
-    margin: 1em 0 0 0;
-    border: 1px solid {{ theme_lighter_gray }};
-    background-color: {{ theme_light_medium_color }};
-    padding: 8px;
-    line-height: 1.3em;
-    font-size: 0.9em;
-}
-
-.viewcode-back {
-    font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
-                 'Verdana', sans-serif;
-}
-
-div.viewcode-block:target {
-    background-color: {{ theme_viewcode_bg }};
-    border-top: 1px solid {{ theme_viewcode_border }};
-    border-bottom: 1px solid {{ theme_viewcode_border }};
-}
-
-dl {
-    margin: 1em 0 2.5em 0;
-}
-
-/* Highlight target when you click an internal link */
-dt:target {
-    background: {{ theme_highlight }};
-}
-/* Don't highlight whole divs */
-div.highlight {
-    background: transparent;
-}
-/* But do highlight spans (so search results can be highlighted) */
-span.highlight {
-    background: {{ theme_highlight }};
-}
-
-div.footer {
-    background-color: {{ theme_background }};
-    color: {{ theme_background_text }};
-    padding: 0 2em 2em 2em;
-    clear: both;
-    font-size: 0.8em;
-    text-align: center;
-}
-
-p {
-    margin: 0.8em 0 0.5em 0;
-}
-
-.section p img {
-    margin: 1em 2em;
-}
-
-
-/* MOBILE LAYOUT -------------------------------------------------------------- */
-
-@media screen and (max-width: 600px) {
-    
-    h1, h2, h3, h4, h5 {
-        position: relative;
-    }
-
-    ul {
-        padding-left: 1.75em;
-    }
-
-    div.bodywrapper a.headerlink, #indices-and-tables h1 a {
-        color: {{ theme_almost_dirty_white }};
-        font-size: 80%;
-        float: right;
-        line-height: 1.8;
-        position: absolute;
-        right: -0.7em;
-        visibility: inherit;
-    }
-
-    div.bodywrapper h1 a.headerlink, #indices-and-tables h1 a {
-        line-height: 1.5;
-    }
-
-    pre {
-        font-size: 0.7em;
-        overflow: auto;
-        word-wrap: break-word;
-        white-space: pre-wrap;
-    }
-
-    div.related ul {
-        height: 2.5em;
-        padding: 0;
-        text-align: left;
-    }
-
-    div.related ul li {
-        clear: both;
-        color: {{ theme_dark_color }};
-        padding: 0.2em 0;
-    }
-
-    div.related ul li:last-child {
-        border-bottom: 1px dotted {{ theme_medium_color }};
-        padding-bottom: 0.4em;
-        margin-bottom: 1em;
-        width: 100%;
-    }
-
-    div.related ul li a {
-        color: {{ theme_dark_color }};
-        padding-right: 0;
-    }
-
-    div.related ul li a:hover {
-        background: inherit;
-        color: inherit;
-    }
-
-    div.related ul li.right {
-        clear: none;
-        padding: 0.65em 0;
-        margin-bottom: 0.5em;
-    }
-
-    div.related ul li.right a {
-        color: {{ theme_white }};
-        padding-right: 0.8em;
-    }
-
-    div.related ul li.right a:hover {
-        background-color: {{ theme_medium_color }};
-    }
-
-    div.body {
-        clear: both;
-        min-width: 0;
-        word-wrap: break-word;
-    }
-
-    div.bodywrapper {
-        margin: 0 0 0 0;
-    }
-
-    div.sphinxsidebar {
-        float: none;
-        margin: 0;
-        width: auto;
-    }
-
-    div.sphinxsidebar input[type="text"] {
-        height: 2em;
-        line-height: 2em;
-        width: 70%;
-    }
-
-    div.sphinxsidebar input[type="submit"] {
-        height: 2em;
-        margin-left: 0.5em;
-        width: 20%;
-    }
-
-    div.sphinxsidebar p.searchtip {
-        background: inherit;
-        margin-bottom: 1em;
-    }
-
-    div.sphinxsidebar ul li, div.sphinxsidebar p.topless {
-        white-space: normal;
-    }
-
-    .bodywrapper img {
-        display: block;
-        margin-left: auto;
-        margin-right: auto;
-        max-width: 100%;
-    }
-
-    div.documentwrapper {
-        float: none;
-    }
-
-    div.admonition, div.warning, pre, blockquote {
-        margin-left: 0em;
-        margin-right: 0em;
-    }
-
-    .body p img {
-        margin: 0;
-    }
-
-    #searchbox {
-        background: transparent;
-    }
-
-    .related:not(:first-child) li {
-        display: none;
-    }
-
-    .related:not(:first-child) li.right {
-        display: block;
-    }
-
-    div.footer {
-        padding: 1em;
-    }
-
-    .rtd_doc_footer .badge {
-        float: none;
-        margin: 1em auto;
-        position: static;
-    }
-
-    .rtd_doc_footer .badge.revsys-inline {
-        margin-right: auto;
-        margin-bottom: 2em;
-    }
-
-    table.indextable {
-        display: block;
-        width: auto; 
-    }
-
-    .indextable tr {
-        display: block;
-    }
-
-    .indextable td {
-        display: block;
-        padding: 0;
-        width: auto !important;
-    }
-
-    .indextable td dt {
-        margin: 1em 0;
-    }
-
-    ul.search {
-        margin-left: 0.25em;
-    }
-
-    ul.search li div.context {
-        font-size: 90%;
-        line-height: 1.1;
-        margin-bottom: 1;
-        margin-left: 0;
-    }
-
-}
diff --git a/docs/source/_themes/armstrong/theme.conf b/docs/source/_themes/armstrong/theme.conf
deleted file mode 100644
index 5930488..0000000
--- a/docs/source/_themes/armstrong/theme.conf
+++ /dev/null
@@ -1,65 +0,0 @@
-[theme]
-inherit = default
-stylesheet = rtd.css
-pygment_style = default
-show_sphinx = False
-
-[options]
-show_rtd = True
-
-white = #ffffff
-almost_white = #f8f8f8
-barely_white = #f2f2f2
-dirty_white = #eeeeee
-almost_dirty_white = #e6e6e6
-dirtier_white = #dddddd
-lighter_gray = #cccccc
-gray_a = #aaaaaa
-gray_9 = #999999
-light_gray = #888888
-gray_7 = #777777
-gray = #666666
-dark_gray = #444444
-gray_2 = #222222
-black = #111111
-light_color = #e8ecef
-light_medium_color = #DDEAF0
-medium_color = #8ca1af
-medium_color_link = #86989b
-medium_color_link_hover = #a6b8bb
-dark_color = #465158
-
-h1 = #000000
-h2 = #465158
-h3 = #6c818f
-
-link_color = #444444
-link_color_decoration = #CCCCCC
-
-medium_color_hover = #697983
-green_highlight = #8ecc4c
-
-
-positive_dark = #609060
-positive_medium = #70a070
-positive_light = #e9ffe9
-
-negative_dark = #900000
-negative_medium = #b04040
-negative_light = #ffe9e9
-negative_text = #c60f0f
-
-ruler = #abc
-
-viewcode_bg = #f4debf
-viewcode_border = #ac9
-
-highlight = #ffe080
-
-code_background = #eeeeee
-
-background = #465158
-background_link = #ffffff
-background_link_half = #ffffff
-background_text = #eeeeee
-background_text_link = #86989b
diff --git a/docs/source/acknowledgements.rst b/docs/source/acknowledgements.rst
deleted file mode 100644
index 36c1562..0000000
--- a/docs/source/acknowledgements.rst
+++ /dev/null
@@ -1,25 +0,0 @@
-.. _chapter-acknowledgements:
-
-================
-Acknowledgements
-================
-
-A number of people have helped with the development and open sourcing
-of Ceres.
-
-Fredrik Schaffalitzky when he was at Google started the development of
-Ceres, and even though much has changed since then, many of the ideas
-from his original design are still present in the current code.
-
-Amongst Ceres' users at Google two deserve special mention: William
-Rucklidge and James Roseborough. William was the first user of
-Ceres. He bravely took on the task of porting production code to an
-as-yet unproven optimization library, reporting bugs and helping fix
-them along the way. James is perhaps the most sophisticated user of
-Ceres at Google. He has reported and fixed bugs and helped evolve the
-API for the better.
-
-Since the initial release of Ceres, a number of people have
-contributed to Ceres by porting it to new platforms, reporting bugs,
-fixing bugs and adding new functionality. We acknowledge all of these
-contributions in the :ref:`chapter-version-history`.
diff --git a/docs/source/bibliography.rst b/docs/source/bibliography.rst
index 7ba435a..bd99758 100644
--- a/docs/source/bibliography.rst
+++ b/docs/source/bibliography.rst
@@ -48,6 +48,12 @@
    preconditioning for bundle adjustment**, *In Proceedings of the
    IEEE Conference on Computer Vision and Pattern Recognition*, 2012.
 
+.. [Kanzow] C. Kanzow, N. Yamashita and M. Fukushima,
+   **Levenberg–Marquardt methods with strong local convergence
+   properties for solving nonlinear equations with convex
+   constraints**, *Journal of Computational and Applied Mathematics*,
+   177(2):375–397, 2005.
+
 .. [Levenberg] K. Levenberg, **A method for the solution of certain
    nonlinear problems in least squares**, *Quart. Appl.  Math*,
    2(2):164–168, 1944.
@@ -113,7 +119,3 @@
    Levenberg Marquardt Method for Large Sparse Nonlinear Least
    Squares**, *Journal of the Australian Mathematical Society Series
    B*, 26(4):387–403, 1985.
-
-
-
-
diff --git a/docs/source/building.rst b/docs/source/building.rst
index c326fd1..4860b0d 100644
--- a/docs/source/building.rst
+++ b/docs/source/building.rst
@@ -1,13 +1,20 @@
 .. _chapter-building:
 
-=====================
-Building Ceres Solver
-=====================
+=======================
+Building & Installation
+=======================
 
-Stable Ceres Solver releases are available for download at
-`code.google.com <http://code.google.com/p/ceres-solver/>`_. For the
-more adventurous, the git repository is hosted on `Gerrit
-<https://ceres-solver-review.googlesource.com/>`_.
+Getting the source code
+=======================
+.. _section-source:
+
+You can start with the `latest stable release
+<http://ceres-solver.org/ceres-solver-1.9.0.tar.gz>`_ . Or if you want
+the latest version, you can clone the git repository
+
+.. code-block:: bash
+
+       git clone https://ceres-solver.googlesource.com/ceres-solver
 
 .. _section-dependencies:
 
@@ -18,53 +25,88 @@
 optional. For details on customizing the build process, see
 :ref:`section-customizing` .
 
-1. `CMake <http://www.cmake.org>`_ is a cross platform build
-system. Ceres needs a relatively recent version of CMake (version
-2.8.0 or better).
+- `Eigen <http://eigen.tuxfamily.org/index.php?title=Main_Page>`_
+  3.2.1 or later.  **Required**
 
-2. `eigen3 <http://eigen.tuxfamily.org/index.php?title=Main_Page>`_ is
-used for doing all the low level matrix and linear algebra operations.
+  .. NOTE ::
 
-3. `google-glog <http://code.google.com/p/google-glog>`_ is
-used for error checking and logging. Ceres needs glog version 0.3.1 or
-later. Version 0.3 (which ships with Fedora 16) has a namespace bug
-which prevents Ceres from building.
+    Ceres can also use Eigen as a sparse linear algebra
+    library. Please see the documentation for ``-DEIGENSPARSE`` for
+    more details.
 
-4. `gflags <http://code.google.com/p/gflags>`_ is a library for
-processing command line flags. It is used by some of the examples and
-tests. While it is not strictly necessary to build the library, we
-strongly recommend building the library with gflags.
+- `CMake <http://www.cmake.org>`_ 2.8.0 or later.
+  **Required on all platforms except for Android.**
 
+- `Google Log <http://code.google.com/p/google-glog>`_ 0.3.1 or
+  later. **Recommended**
 
-5. `SuiteSparse
-<http://www.cise.ufl.edu/research/sparse/SuiteSparse/>`_ is used for
-sparse matrix analysis, ordering and factorization. In particular
-Ceres uses the AMD, CAMD, COLAMD and CHOLMOD libraries. This is an optional
-dependency.
+  .. NOTE::
 
-6. `CXSparse <http://www.cise.ufl.edu/research/sparse/CXSparse/>`_ is
-a sparse matrix library similar in scope to ``SuiteSparse`` but with
-no dependencies on ``LAPACK`` and ``BLAS``. This makes for a simpler
-build process and a smaller binary.  The simplicity comes at a cost --
-for all but the most trivial matrices, ``SuiteSparse`` is
-significantly faster than ``CXSparse``.
+    Ceres has a minimal replacement of ``glog`` called ``miniglog``
+    that can be enabled with the ``MINIGLOG`` build
+    option. ``miniglog`` is needed on Android as ``glog`` currently
+    does not build using the NDK. It can however be used on other
+    platforms too.
 
-7. `BLAS <http://www.netlib.org/blas/>`_ and `LAPACK
-<http://www.netlib.org/lapack/>`_ routines are needed by
-SuiteSparse. We recommend `ATLAS
-<http://math-atlas.sourceforge.net/>`_, which includes BLAS and LAPACK
-routines. It is also possible to use `OpenBLAS
-<https://github.com/xianyi/OpenBLAS>`_ . However, one needs to be
-careful to `turn off the threading
-<https://github.com/xianyi/OpenBLAS/wiki/faq#wiki-multi-threaded>`_
-inside ``OpenBLAS`` as it conflicts with use of threads in Ceres.
+    **We do not advise using** ``miniglog`` **on platforms other than
+    Android due to the various performance and functionality
+    compromises in** ``miniglog``.
+
+- `Google Flags <http://code.google.com/p/gflags>`_. Needed to build
+  examples and tests.
+
+- `SuiteSparse
+  <http://www.cise.ufl.edu/research/sparse/SuiteSparse/>`_. Needed for
+  solving large sparse linear systems. **Optional; strongly recomended
+  for large scale bundle adjustment**
+
+- `CXSparse <http://www.cise.ufl.edu/research/sparse/CXSparse/>`_.
+  Similar to ``SuiteSparse`` but simpler and slower. CXSparse has
+  no dependencies on ``LAPACK`` and ``BLAS``. This makes for a simpler
+  build process and a smaller binary. **Optional**
+
+- `BLAS <http://www.netlib.org/blas/>`_ and `LAPACK
+  <http://www.netlib.org/lapack/>`_ routines are needed by
+  ``SuiteSparse``, and optionally used by Ceres directly for some
+  operations.
+
+  On ``UNIX`` OSes other than Mac OS X we recommend `ATLAS
+  <http://math-atlas.sourceforge.net/>`_, which includes ``BLAS`` and
+  ``LAPACK`` routines. It is also possible to use `OpenBLAS
+  <https://github.com/xianyi/OpenBLAS>`_ . However, one needs to be
+  careful to `turn off the threading
+  <https://github.com/xianyi/OpenBLAS/wiki/faq#wiki-multi-threaded>`_
+  inside ``OpenBLAS`` as it conflicts with use of threads in Ceres.
+
+  MAC OS X ships with an optimized ``LAPACK`` and ``BLAS``
+  implementation as part of the ``Accelerate`` framework. The Ceres
+  build system will automatically detect and use it.
+
+  For Windows things are much more complicated. `LAPACK For
+  Windows <http://icl.cs.utk.edu/lapack-for-windows/lapack/>`_
+  has detailed instructions..
+
+  **Optional but required for** ``SuiteSparse``.
 
 .. _section-linux:
 
-Building on Linux
-=================
-We will use `Ubuntu <http://www.ubuntu.com>`_ as our example
-platform. Start by installing all the dependencies.
+Linux
+=====
+
+We will use `Ubuntu <http://www.ubuntu.com>`_ as our example linux
+distribution.
+
+.. NOTE::
+
+ Up to at least Ubuntu 13.10, the SuiteSparse package in the official
+ package repository (built from SuiteSparse v3.4.0) **cannot** be used
+ to build Ceres as a *shared* library.  Thus if you want to build
+ Ceres as a shared library using SuiteSparse, you must perform a
+ source install of SuiteSparse.  It is recommended that you use the
+ current version of SuiteSparse (4.2.1 at the time of writing).
+
+
+Start by installing all the dependencies.
 
 .. code-block:: bash
 
@@ -86,19 +128,26 @@
      sudo apt-get install libatlas-base-dev
      # Eigen3
      sudo apt-get install libeigen3-dev
-     # SuiteSparse and CXSparse
+     # SuiteSparse and CXSparse (optional)
+     # - If you want to build Ceres as a *static* library (the default)
+     #   you can use the SuiteSparse package in the main Ubuntu package
+     #   repository:
      sudo apt-get install libsuitesparse-dev
+     # - However, if you want to build Ceres as a *shared* library, you must
+     #   perform a source install of SuiteSparse (and uninstall the Ubuntu
+     #   package if it is currently installed.
 
-We are now ready to build and test Ceres.
+We are now ready to build, test, and install Ceres.
 
 .. code-block:: bash
 
- tar zxf ceres-solver-1.7.0.tar.gz
+ tar zxf ceres-solver-1.9.0.tar.gz
  mkdir ceres-bin
  cd ceres-bin
- cmake ../ceres-solver-1.7.0
+ cmake ../ceres-solver-1.9.0
  make -j3
  make test
+ make install
 
 You can also try running the command line bundling application with one of the
 included problems, which comes from the University of Washington's BAL
@@ -106,7 +155,7 @@
 
 .. code-block:: bash
 
- bin/simple_bundle_adjuster ../ceres-solver-1.7.0/data/problem-16-22106-pre.txt
+ bin/simple_bundle_adjuster ../ceres-solver-1.9.0/data/problem-16-22106-pre.txt
 
 This runs Ceres for a maximum of 10 iterations using the
 ``DENSE_SCHUR`` linear solver. The output should look something like
@@ -114,63 +163,87 @@
 
 .. code-block:: bash
 
-    0: f: 4.185660e+06 d: 0.00e+00 g: 1.09e+08 h: 0.00e+00 rho: 0.00e+00 mu: 1.00e+04 li:  0 it: 1.16e-01 tt: 3.39e-01
-    1: f: 1.062590e+05 d: 4.08e+06 g: 8.99e+06 h: 5.36e+02 rho: 9.82e-01 mu: 3.00e+04 li:  1 it: 3.90e-01 tt: 7.29e-01
-    2: f: 4.992817e+04 d: 5.63e+04 g: 8.32e+06 h: 3.19e+02 rho: 6.52e-01 mu: 3.09e+04 li:  1 it: 3.52e-01 tt: 1.08e+00
-    3: f: 1.899774e+04 d: 3.09e+04 g: 1.60e+06 h: 1.24e+02 rho: 9.77e-01 mu: 9.26e+04 li:  1 it: 3.60e-01 tt: 1.44e+00
-    4: f: 1.808729e+04 d: 9.10e+02 g: 3.97e+05 h: 6.39e+01 rho: 9.51e-01 mu: 2.78e+05 li:  1 it: 3.62e-01 tt: 1.80e+00
-    5: f: 1.803399e+04 d: 5.33e+01 g: 1.48e+04 h: 1.23e+01 rho: 9.99e-01 mu: 8.33e+05 li:  1 it: 3.54e-01 tt: 2.16e+00
-    6: f: 1.803390e+04 d: 9.02e-02 g: 6.35e+01 h: 8.00e-01 rho: 1.00e+00 mu: 2.50e+06 li:  1 it: 3.59e-01 tt: 2.52e+00
+    iter      cost      cost_change  |gradient|   |step|    tr_ratio  tr_radius  ls_iter  iter_time  total_time
+       0  4.185660e+06    0.00e+00    1.09e+08   0.00e+00   0.00e+00  1.00e+04       0    7.59e-02    3.37e-01
+       1  1.062590e+05    4.08e+06    8.99e+06   5.36e+02   9.82e-01  3.00e+04       1    1.65e-01    5.03e-01
+       2  4.992817e+04    5.63e+04    8.32e+06   3.19e+02   6.52e-01  3.09e+04       1    1.45e-01    6.48e-01
+       3  1.899774e+04    3.09e+04    1.60e+06   1.24e+02   9.77e-01  9.26e+04       1    1.43e-01    7.92e-01
+       4  1.808729e+04    9.10e+02    3.97e+05   6.39e+01   9.51e-01  2.78e+05       1    1.45e-01    9.36e-01
+       5  1.803399e+04    5.33e+01    1.48e+04   1.23e+01   9.99e-01  8.33e+05       1    1.45e-01    1.08e+00
+       6  1.803390e+04    9.02e-02    6.35e+01   8.00e-01   1.00e+00  2.50e+06       1    1.50e-01    1.23e+00
 
- Ceres Solver Report
- -------------------
-                                      Original                  Reduced
- Parameter blocks                        22122                    22122
- Parameters                              66462                    66462
- Residual blocks                         83718                    83718
- Residual                               167436                   167436
- Trust Region Strategy     LEVENBERG_MARQUARDT
+    Ceres Solver v1.10.0 Solve Report
+    ----------------------------------
+                                         Original                  Reduced
+    Parameter blocks                        22122                    22122
+    Parameters                              66462                    66462
+    Residual blocks                         83718                    83718
+    Residual                               167436                   167436
 
-                                         Given                     Used
- Linear solver                     DENSE_SCHUR              DENSE_SCHUR
- Preconditioner                            N/A                      N/A
- Threads:                                    1                        1
- Linear solver threads                       1                        1
- Linear solver ordering              AUTOMATIC                 22106,16
+    Minimizer                        TRUST_REGION
 
- Cost:
- Initial                          4.185660e+06
- Final                            1.803390e+04
- Change                           4.167626e+06
+    Dense linear algebra library            EIGEN
+    Trust region strategy     LEVENBERG_MARQUARDT
 
- Number of iterations:
- Successful                                  6
- Unsuccessful                                0
- Total                                       6
+                                            Given                     Used
+    Linear solver                     DENSE_SCHUR              DENSE_SCHUR
+    Threads                                     1                        1
+    Linear solver threads                       1                        1
+    Linear solver ordering              AUTOMATIC                22106, 16
 
- Time (in seconds):
- Preprocessor                        2.229e-01
+    Cost:
+    Initial                          4.185660e+06
+    Final                            1.803390e+04
+    Change                           4.167626e+06
 
-   Evaluator::Residuals              7.438e-02
-   Evaluator::Jacobians              6.790e-01
-   Linear Solver                     1.681e+00
- Minimizer                           2.547e+00
+    Minimizer iterations                        6
+    Successful steps                            6
+    Unsuccessful steps                          0
 
- Postprocessor                       1.920e-02
- Total                               2.823e+00
+    Time (in seconds):
+    Preprocessor                            0.261
 
- Termination:               FUNCTION_TOLERANCE
+      Residual evaluation                   0.082
+      Jacobian evaluation                   0.412
+      Linear solver                         0.442
+    Minimizer                               1.051
+
+    Postprocessor                           0.002
+    Total                                   1.357
+
+    Termination:                      CONVERGENCE (Function tolerance reached. |cost_change|/cost: 1.769766e-09 <= 1.000000e-06)
 
 .. section-osx:
 
-Building on Mac OS X
-====================
+Mac OS X
+========
+.. NOTE::
+
+ Ceres will not compile using Xcode 4.5.x (Clang version 4.1) due to a bug in that version of
+ Clang.  If you are running Xcode 4.5.x, please update to Xcode >= 4.6.x before attempting to
+ build Ceres.
+
 
 On OS X, we recommend using the `homebrew
-<http://mxcl.github.com/homebrew/>`_ package manager to install the
-dependencies. There is no need to install ``BLAS`` or ``LAPACK``
-separately as OS X ships with optimized ``BLAS`` and ``LAPACK``
-routines as part of the `vecLib
+<http://mxcl.github.com/homebrew/>`_ package manager to install Ceres.
+
+.. code-block:: bash
+
+      brew install ceres-solver
+
+will install the latest stable version along with all the required
+dependencies and
+
+.. code-block:: bash
+
+      brew install ceres-solver --HEAD
+
+will install the latest version in the git repo.
+
+You can also install each of the dependencies by hand using `homebrew
+<http://mxcl.github.com/homebrew/>`_. There is no need to install
+``BLAS`` or ``LAPACK`` separately as OS X ships with optimized
+``BLAS`` and ``LAPACK`` routines as part of the `vecLib
 <https://developer.apple.com/library/mac/#documentation/Performance/Conceptual/vecLib/Reference/reference.html>`_
 framework.
 
@@ -185,32 +258,51 @@
       # SuiteSparse and CXSparse
       brew install suite-sparse
 
-
-We are now ready to build and test Ceres.
+We are now ready to build, test, and install Ceres.
 
 .. code-block:: bash
 
-   tar zxf ceres-solver-1.7.0.tar.gz
+   tar zxf ceres-solver-1.9.0.tar.gz
    mkdir ceres-bin
    cd ceres-bin
-   cmake ../ceres-solver-1.7.0
+   cmake ../ceres-solver-1.9.0
    make -j3
    make test
-
+   make install
 
 Like the Linux build, you should now be able to run
 ``bin/simple_bundle_adjuster``.
 
 .. _section-windows:
 
-Building on Windows with Visual Studio
-======================================
+Windows
+=======
 
 On Windows, we support building with Visual Studio 2010 or newer. Note
 that the Windows port is less featureful and less tested than the
-Linux or Mac OS X versions due to the unavailability of SuiteSparse
-and ``CXSparse``. Building is also more involved since there is no
-automated way to install the dependencies.
+Linux or Mac OS X versions due to the lack of an officially supported
+way of building SuiteSparse and CXSparse.  There are however a number
+of unofficial ways of building these libraries. Building on Windows
+also a bit more involved since there is no automated way to install
+dependencies.
+
+.. NOTE:: Using ``google-glog`` & ``miniglog`` with windows.h.
+
+ The windows.h header if used with GDI (Graphics Device Interface)
+ defines ``ERROR``, which conflicts with the definition of ``ERROR``
+ as a LogSeverity level in ``google-glog`` and ``miniglog``.  There
+ are at least two possible fixes to this problem:
+
+ #. Use ``google-glog`` and define ``GLOG_NO_ABBREVIATED_SEVERITIES``
+    when building Ceres and your own project, as documented
+    `here <http://google-glog.googlecode.com/svn/trunk/doc/glog.html>`__.
+    Note that this fix will not work for ``miniglog``,
+    but use of ``miniglog`` is strongly discouraged on any platform for which
+    ``google-glog`` is available (which includes Windows).
+ #. If you do not require GDI, then define ``NOGDI`` **before** including
+    windows.h.  This solution should work for both ``google-glog`` and
+    ``miniglog`` and is documented for ``google-glog``
+    `here <https://code.google.com/p/google-glog/issues/detail?id=33>`__.
 
 #. Make a toplevel directory for deps & build & src somewhere: ``ceres/``
 #. Get dependencies; unpack them as subdirectories in ``ceres/``
@@ -222,6 +314,18 @@
    #. ``google-glog`` Open up the Visual Studio solution and build it.
    #. ``gflags`` Open up the Visual Studio solution and build it.
 
+   #. (Experimental) ``SuiteSparse`` Previously SuiteSparse was not available
+      on Windows, recently it has become possible to build it on Windows using
+      the `suitesparse-metis-for-windows <https://github.com/jlblancoc/suitesparse-metis-for-windows>`_
+      project.  If you wish to use ``SuiteSparse``, follow their instructions
+      for obtaining and building it.
+
+   #. (Experimental) ``CXSparse`` Previously CXSparse was not available on
+      Windows, there are now several ports that enable it to be, including:
+      `[1] <https://github.com/PetterS/CXSparse>`_ and
+      `[2] <https://github.com/TheFrenchLeaf/CXSparse>`_.  If you wish to use
+      ``CXSparse``, follow their instructions for obtaining and building it.
+
 #. Unpack the Ceres tarball into ``ceres``. For the tarball, you
    should get a directory inside ``ceres`` similar to
    ``ceres-solver-1.3.0``. Alternately, checkout Ceres via ``git`` to
@@ -238,12 +342,22 @@
 #. Try running ``Configure``. It won't work. It'll show a bunch of options.
    You'll need to set:
 
-   #. ``GLOG_INCLUDE``
-   #. ``GLOG_LIB``
-   #. ``GFLAGS_LIB``
-   #. ``GFLAGS_INCLUDE``
+   #. ``EIGEN_INCLUDE_DIR_HINTS``
+   #. ``GLOG_INCLUDE_DIR_HINTS``
+   #. ``GLOG_LIBRARY_DIR_HINTS``
+   #. ``GFLAGS_INCLUDE_DIR_HINTS``
+   #. ``GFLAGS_LIBRARY_DIR_HINTS``
+   #. (Optional) ``SUITESPARSE_INCLUDE_DIR_HINTS``
+   #. (Optional) ``SUITESPARSE_LIBRARY_DIR_HINTS``
+   #. (Optional) ``CXSPARSE_INCLUDE_DIR_HINTS``
+   #. (Optional) ``CXSPARSE_LIBRARY_DIR_HINTS``
 
-   to the appropriate place where you unpacked/built them.
+   to the appropriate directories where you unpacked/built them. If any of
+   the variables are not visible in the ``CMake`` GUI, create a new entry
+   for them.  We recommend using the ``<NAME>_(INCLUDE/LIBRARY)_DIR_HINTS``
+   variables rather than setting the ``<NAME>_INCLUDE_DIR`` &
+   ``<NAME>_LIBRARY`` variables directly to keep all of the validity
+   checking, and to avoid having to specify the library files manually.
 
 #. You may have to tweak some more settings to generate a MSVC
    project.  After each adjustment, try pressing Configure & Generate
@@ -255,30 +369,66 @@
 To run the tests, select the ``RUN_TESTS`` target and hit **Build
 RUN_TESTS** from the build menu.
 
-Like the Linux build, you should now be able to run ``bin/simple_bundle_adjuster``.
+Like the Linux build, you should now be able to run
+``bin/simple_bundle_adjuster``.
 
 Notes:
 
 #. The default build is Debug; consider switching it to release mode.
 #. Currently ``system_test`` is not working properly.
-#. Building Ceres as a DLL is not supported; patches welcome.
 #. CMake puts the resulting test binaries in ``ceres-bin/examples/Debug``
    by default.
 #. The solvers supported on Windows are ``DENSE_QR``, ``DENSE_SCHUR``,
    ``CGNR``, and ``ITERATIVE_SCHUR``.
 #. We're looking for someone to work with upstream ``SuiteSparse`` to
    port their build system to something sane like ``CMake``, and get a
-   supported Windows port.
+   fully supported Windows port.
 
 
 .. _section-android:
 
-Building on Android
-===================
+Android
+=======
 
+Download the ``Android NDK`` version ``r9d`` or later. Run
+``ndk-build`` from inside the ``jni`` directory. Use the
+``libceres.a`` that gets created.
 
-Download the ``Android NDK``. Run ``ndk-build`` from inside the
-``jni`` directory. Use the ``libceres.a`` that gets created.
+.. _section-ios:
+
+iOS
+===
+
+.. NOTE::
+
+   You need iOS version 6.0 or higher to build Ceres Solver.
+
+To build Ceres for iOS, we need to force ``CMake`` to find the toolchains from
+the iOS SDK instead of using the standard ones. For example:
+
+.. code-block:: bash
+
+   cmake ../ceres-solver \
+   -DCMAKE_TOOLCHAIN_FILE=../ceres-solver/cmake/iOS.cmake \
+   -DEIGEN_INCLUDE_DIR=/path/to/eigen/header \
+   -DIOS_PLATFORM=<PLATFORM>
+
+``PLATFORM`` can be one of ``OS``, ``SIMULATOR`` and ``SIMULATOR64``. You can
+build for ``OS`` (``armv7``, ``armv7s``, ``arm64``), ``SIMULATOR`` (``i386``) or
+``SIMULATOR64`` (``x86_64``) separately and use ``LIPO`` to merge them into
+one static library.  See ``cmake/iOS.cmake`` for more options.
+
+After building, you will get a ``libceres.a`` library, which you will need to
+add to your Xcode project.
+
+The default CMake configuration builds a bare bones version of Ceres
+Solver that only depends on Eigen (``MINIGLOG`` is compiled into Ceres if it is
+used), this should be sufficient for solving small to moderate sized problems
+(No ``SPARSE_SCHUR``, ``SPARSE_NORMAL_CHOLESKY`` linear solvers and no
+``CLUSTER_JACOBI`` and ``CLUSTER_TRIDIAGONAL`` preconditioners).
+
+If you decide to use ``LAPACK`` and ``BLAS``, then you also need to add
+``Accelerate.framework`` to your XCode project's linking dependency.
 
 .. _section-customizing:
 
@@ -286,42 +436,147 @@
 =====================
 
 It is possible to reduce the libraries needed to build Ceres and
-customize the build process by passing appropriate flags to
-``CMake``. Use these flags only if you really know what you are doing.
+customize the build process by setting the appropriate options in
+``CMake``.  These options can either be set in the ``CMake`` GUI,
+or via ``-D<OPTION>=<ON/OFF>`` when running ``CMake`` from the
+command line.  In general, you should only modify these options from
+their defaults if you know what you are doing.
 
-#. ``-DSUITESPARSE=OFF``: By default, Ceres will link to
-   ``SuiteSparse`` if all its dependencies are present. Use this flag
-   to build Ceres without ``SuiteSparse``. This will also disable
-   dependency checking for ``LAPACK`` and ``BLAS``. This will reduce
-   Ceres' dependencies down to ``Eigen``, ``gflags`` and
-   ``google-glog``.
+.. NOTE::
 
-#. ``-DCXSPARSE=OFF``: By default, Ceres will link to ``CXSparse`` if
-   all its dependencies are present. Use this flag to builds Ceres
-   without ``CXSparse``. This will reduce Ceres' dependencies down to
-   ``Eigen``, ``gflags`` and ``google-glog``.
+ If you are setting variables via ``-D<VARIABLE>=<VALUE>`` when calling
+ ``CMake``, it is important to understand that this forcibly **overwrites** the
+ variable ``<VARIABLE>`` in the ``CMake`` cache at the start of *every configure*.
 
-#. ``-DGFLAGS=OFF``: Use this flag to build Ceres without
+ This can lead to confusion if you are invoking the ``CMake``
+ `curses <http://www.gnu.org/software/ncurses/ncurses.html>`_ terminal GUI
+ (via ``ccmake``, e.g. ```ccmake -D<VARIABLE>=<VALUE> <PATH_TO_SRC>``).
+ In this case, even if you change the value of ``<VARIABLE>`` in the ``CMake``
+ GUI, your changes will be **overwritten** with the value passed via
+ ``-D<VARIABLE>=<VALUE>`` (if one exists) at the start of each configure.
+
+ As such, it is generally easier not to pass values to ``CMake`` via ``-D``
+ and instead interactively experiment with their values in the ``CMake`` GUI.
+ If they are not present in the *Standard View*, toggle to the *Advanced View*
+ with ``<t>``.
+
+Options controlling Ceres configuration
+---------------------------------------
+
+#. ``LAPACK [Default: ON]``: By default Ceres will use ``LAPACK`` (&
+   ``BLAS``) if they are found.  Turn this ``OFF`` to build Ceres
+   without ``LAPACK``. Turning this ``OFF`` also disables
+   ``SUITESPARSE`` as it depends on ``LAPACK``.
+
+#. ``SUITESPARSE [Default: ON]``: By default, Ceres will link to
+   ``SuiteSparse`` if it and all of its dependencies are present. Turn
+   this ``OFF`` to build Ceres without ``SuiteSparse``. Note that
+   ``LAPACK`` must be ``ON`` in order to build with ``SuiteSparse``.
+
+#. ``CXSPARSE [Default: ON]``: By default, Ceres will link to
+   ``CXSparse`` if all its dependencies are present. Turn this ``OFF``
+   to build Ceres without ``CXSparse``.
+
+#. ``EIGENSPARSE [Default: OFF]``: By default, Ceres will not use
+   Eigen's sparse Cholesky factorization. The is because this part of
+   the code is licensed under the ``LGPL`` and since ``Eigen`` is a
+   header only library, including this code will result in an ``LGPL``
+   licensed version of Ceres.
+
+#. ``GFLAGS [Default: ON]``: Turn this ``OFF`` to build Ceres without
    ``gflags``. This will also prevent some of the example code from
    building.
 
-#. ``-DSCHUR_SPECIALIZATIONS=OFF``: If you are concerned about binary
-   size/compilation time over some small (10-20%) performance gains in
-   the ``SPARSE_SCHUR`` solver, you can disable some of the template
-   specializations by using this flag.
+#. ``MINIGLOG [Default: OFF]``: Ceres includes a stripped-down,
+   minimal implementation of ``glog`` which can optionally be used as
+   a substitute for ``glog``, thus removing ``glog`` as a required
+   dependency. Turn this ``ON`` to use this minimal ``glog``
+   implementation.
 
-#. ``-DLINE_SEARCH_MINIMIZER=OFF``: The line search based minimizer is
-   mostly suitable for large scale optimization problems, or when sparse
-   linear algebra libraries are not available. You can further save on
-   some compile time and binary size by using this flag.
+#. ``SCHUR_SPECIALIZATIONS [Default: ON]``: If you are concerned about
+   binary size/compilation time over some small (10-20%) performance
+   gains in the ``SPARSE_SCHUR`` solver, you can disable some of the
+   template specializations by turning this ``OFF``.
 
-#. ``-DOPENMP=OFF``: On certain platforms like Android,
-   multi-threading with ``OpenMP`` is not supported. Use this flag to
-   disable multithreading.
+#. ``OPENMP [Default: ON]``: On certain platforms like Android,
+   multi-threading with ``OpenMP`` is not supported. Turn this ``OFF``
+   to disable multithreading.
 
-#. ``-DBUILD_DOCUMENTATION=ON``: Use this flag to enable building the
-   documentation. In addition, ``make ceres_docs`` can be used to
-   build only the documentation.
+#. ``BUILD_SHARED_LIBS [Default: OFF]``: By default Ceres is built as
+   a static library, turn this ``ON`` to instead build Ceres as a
+   shared library.
+
+#. ``BUILD_DOCUMENTATION [Default: OFF]``: Use this to enable building
+   the documentation, requires `Sphinx <http://sphinx-doc.org/>`_ and the
+   `sphinx_rtd_theme <https://pypi.python.org/pypi/sphinx_rtd_theme>`_
+   package available from the Python package index. In addition,
+   ``make ceres_docs`` can be used to build only the documentation.
+
+#. ``MSVC_USE_STATIC_CRT [Default: OFF]`` *Windows Only*: By default
+   Ceres will use the Visual Studio default, *shared* C-Run Time (CRT) library.
+   Turn this ``ON`` to use the *static* C-Run Time library instead.
+
+
+Options controlling Ceres dependency locations
+----------------------------------------------
+
+Ceres uses the ``CMake``
+`find_package <http://www.cmake.org/cmake/help/v2.8.12/cmake.html#command:find_package>`_
+function to find all of its dependencies using
+``Find<DEPENDENCY_NAME>.cmake`` scripts which are either included in Ceres
+(for most dependencies) or are shipped as standard with ``CMake``
+(for ``LAPACK`` & ``BLAS``).  These scripts will search all of the "standard"
+install locations for various OSs for each dependency.  However, particularly
+for Windows, they may fail to find the library, in this case you will have to
+manually specify its installed location.  The ``Find<DEPENDENCY_NAME>.cmake``
+scripts shipped with Ceres support two ways for you to do this:
+
+#. Set the *hints* variables specifying the *directories* to search in
+   preference, but in addition, to the search directories in the
+   ``Find<DEPENDENCY_NAME>.cmake`` script:
+
+   - ``<DEPENDENCY_NAME (CAPS)>_INCLUDE_DIR_HINTS``
+   - ``<DEPENDENCY_NAME (CAPS)>_LIBRARY_DIR_HINTS``
+
+   These variables should be set via ``-D<VAR>=<VALUE>``
+   ``CMake`` arguments as they are not visible in the GUI.
+
+#. Set the variables specifying the *explicit* include directory
+   and library file to use:
+
+   - ``<DEPENDENCY_NAME (CAPS)>_INCLUDE_DIR``
+   - ``<DEPENDENCY_NAME (CAPS)>_LIBRARY``
+
+   This bypasses *all* searching in the
+   ``Find<DEPENDENCY_NAME>.cmake`` script, but validation is still
+   performed.
+
+   These variables are available to set in the ``CMake`` GUI. They
+   are visible in the *Standard View* if the library has not been
+   found (but the current Ceres configuration requires it), but
+   are always visible in the *Advanced View*.  They can also be
+   set directly via ``-D<VAR>=<VALUE>`` arguments to ``CMake``.
+
+Building using custom BLAS & LAPACK installs
+----------------------------------------------
+
+If the standard find package scripts for ``BLAS`` & ``LAPACK`` which ship with
+``CMake`` fail to find the desired libraries on your system, try setting
+``CMAKE_LIBRARY_PATH`` to the path(s) to the directories containing the
+``BLAS`` & ``LAPACK`` libraries when invoking ``CMake`` to build Ceres via
+``-D<VAR>=<VALUE>``.  This should result in the libraries being found for any
+common variant of each.
+
+If you are building on an exotic system, or setting ``CMAKE_LIBRARY_PATH``
+does not work, or is not appropriate for some other reason, one option would be
+to write your own custom versions of ``FindBLAS.cmake`` &
+``FindLAPACK.cmake`` specific to your environment.  In this case you must set
+``CMAKE_MODULE_PATH`` to the directory containing these custom scripts when
+invoking ``CMake`` to build Ceres and they will be used in preference to the
+default versions.  However, in order for this to work, your scripts must provide
+the full set of variables provided by the default scripts.  Also, if you are
+building Ceres with ``SuiteSparse``, the versions of ``BLAS`` & ``LAPACK``
+used by ``SuiteSparse`` and Ceres should be the same.
 
 .. _section-using-ceres:
 
@@ -343,7 +598,7 @@
     PROJECT(helloworld)
 
     FIND_PACKAGE(Ceres REQUIRED)
-    INCLUDE_DIRECTORIES(${CERES_INCLUDES})
+    INCLUDE_DIRECTORIES(${CERES_INCLUDE_DIRS})
 
     # helloworld
     ADD_EXECUTABLE(helloworld helloworld.cc)
@@ -374,19 +629,5 @@
 
    FIND_PACKAGE(Ceres REQUIRED PATHS "/some/where/local/")
 
-Note that this can be used to have multiple versions of Ceres installed.
-
-Compiling against static or shared library
-------------------------------------------
-
-.. code-block:: cmake
-
-    TARGET_LINK_LIBRARIES(helloworld ${CERES_LIBRARIES})
-
-will result in a statically linked binary. Changing this line to
-
-.. code-block:: cmake
-
-    TARGET_LINK_LIBRARIES(helloworld ${CERES_LIBRARIES_SHARED})
-
-will result in a dynamically linked binary.
+Note that this can be used to have multiple versions of Ceres
+installed.
diff --git a/docs/source/conf.py b/docs/source/conf.py
index f5ffb6d..478682f 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -41,16 +41,16 @@
 
 # General information about the project.
 project = u'Ceres Solver'
-copyright = u'2013, Google Inc.'
+copyright = u'2014 Google Inc'
 
 # The version info for the project you're documenting, acts as replacement for
 # |version| and |release|, also used in various other places throughout the
 # built documents.
 #
 # The short X.Y version.
-version = '1.7'
+version = '1.9'
 # The full version, including alpha/beta/rc tags.
-release = '1.7.0'
+release = '1.9.0'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
@@ -91,7 +91,7 @@
 
 # The theme to use for HTML and HTML Help pages.  See the documentation for
 # a list of builtin themes.
-html_theme = 'armstrong'
+html_theme = 'sphinx_rtd_theme'
 
 # Theme options are theme-specific and customize the look and feel of a theme
 # further.  For a list of options available for each theme, see the
@@ -100,6 +100,8 @@
 
 # Add any paths that contain custom themes here, relative to this directory.
 html_theme_path = ["_themes",]
+import sphinx_rtd_theme
+html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
 
 # The name for this set of Sphinx documents.  If None, it defaults to
 # "<project> v<release> documentation".
@@ -120,7 +122,7 @@
 # Add any paths that contain custom static files (such as style sheets) here,
 # relative to this directory. They are copied after the builtin static files,
 # so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
+#html_static_path = ['_static']
 
 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
 # using the given strftime format.
diff --git a/docs/source/contributing.rst b/docs/source/contributing.rst
index 20fe34d..b169dbf 100644
--- a/docs/source/contributing.rst
+++ b/docs/source/contributing.rst
@@ -1,9 +1,8 @@
 .. _chapter-contributing:
 
-=============
-Contributions
-=============
-
+============
+Contributing
+============
 
 We welcome contributions to Ceres, whether they are new features, bug
 fixes or tests. The Ceres `mailing
@@ -27,8 +26,8 @@
 We now describe how to set up your development environment and submit
 a change list for review via Gerrit.
 
-Setting up your Development Environment
-=======================================
+Setting up your Environment
+===========================
 
 1. Download and configure ``git``.
 
@@ -98,13 +97,16 @@
        name.
 
 
-Submitting a change to Ceres Solver
-===================================
+Submitting a change
+===================
 
 1. Make your changes against master or whatever branch you
    like. Commit your changes as one patch. When you commit, the Gerrit
    hook will add a `Change-Id:` line as the last line of the commit.
 
+   Make sure that your commit message is formatted in the `50/72 style
+   <http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html>`_.
+
 2. Push your changes to the Ceres Gerrit instance:
 
    .. code-block:: bash
@@ -112,8 +114,8 @@
       git push origin HEAD:refs/for/master
 
    When the push succeeds, the console will display a URL showing the
-   address of the review. Go to the URL and add reviewers; typically
-   this is Sameer or Keir at this point.
+   address of the review. Go to the URL and add atleast one of the
+   maintainers (Sameer Agarwal, Keir Mierle, or Alex Stewart) as reviewers.
 
 3. Wait for a review.
 
diff --git a/docs/source/faqs.rst b/docs/source/faqs.rst
new file mode 100644
index 0000000..73ad41d
--- /dev/null
+++ b/docs/source/faqs.rst
@@ -0,0 +1,282 @@
+.. _chapter-tricks:
+
+===================
+FAQS, Tips & Tricks
+===================
+
+Answers to frequently asked questions, tricks of the trade and general
+wisdom.
+
+Building
+========
+
+#. Use `google-glog <http://code.google.com/p/google-glog>`_.
+
+   Ceres has extensive support for logging detailed information about
+   memory allocations and time consumed in various parts of the solve,
+   internal error conditions etc. This is done logging using the
+   `google-glog <http://code.google.com/p/google-glog>`_ library. We
+   use it extensively to observe and analyze Ceres's
+   performance. `google-glog <http://code.google.com/p/google-glog>`_
+   allows you to control its behaviour from the command line `flags
+   <http://google-glog.googlecode.com/svn/trunk/doc/glog.html>`_. Starting
+   with ``-logtostdterr`` you can add ``-v=N`` for increasing values
+   of ``N`` to get more and more verbose and detailed information
+   about Ceres internals.
+
+   In an attempt to reduce dependencies, it is tempting to use
+   `miniglog` - a minimal implementation of the ``glog`` interface
+   that ships with Ceres. This is a bad idea. ``miniglog`` was written
+   primarily for building and using Ceres on Android because the
+   current version of `google-glog
+   <http://code.google.com/p/google-glog>`_ does not build using the
+   NDK. It has worse performance than the full fledged glog library
+   and is much harder to control and use.
+
+
+Modeling
+========
+
+#. Use analytical/automatic derivatives.
+
+   This is the single most important piece of advice we can give to
+   you. It is tempting to take the easy way out and use numeric
+   differentiation. This is a bad idea. Numeric differentiation is
+   slow, ill-behaved, hard to get right, and results in poor
+   convergence behaviour.
+
+   Ceres allows the user to define templated functors which will
+   be automatically differentiated. For most situations this is enough
+   and we recommend using this facility. In some cases the derivatives
+   are simple enough or the performance considerations are such that
+   the overhead of automatic differentiation is too much. In such
+   cases, analytic derivatives are recommended.
+
+   The use of numerical derivatives should be a measure of last
+   resort, where it is simply not possible to write a templated
+   implementation of the cost function.
+
+   In many cases it is not possible to do analytic or automatic
+   differentiation of the entire cost function, but it is generally
+   the case that it is possible to decompose the cost function into
+   parts that need to be numerically differentiated and parts that can
+   be automatically or analytically differentiated.
+
+   To this end, Ceres has extensive support for mixing analytic,
+   automatic and numeric differentiation. See
+   :class:`NumericDiffFunctor` and :class:`CostFunctionToFunctor`.
+
+#. Putting `Inverse Function Theorem
+   <http://en.wikipedia.org/wiki/Inverse_function_theorem>`_ to use.
+
+   Every now and then we have to deal with functions which cannot be
+   evaluated analytically. Computing the Jacobian in such cases is
+   tricky. A particularly interesting case is where the inverse of the
+   function is easy to compute analytically. An example of such a
+   function is the Coordinate transformation between the `ECEF
+   <http://en.wikipedia.org/wiki/ECEF>`_ and the `WGS84
+   <http://en.wikipedia.org/wiki/World_Geodetic_System>`_ where the
+   conversion from WGS84 from ECEF is analytic, but the conversion
+   back to ECEF uses an iterative algorithm. So how do you compute the
+   derivative of the ECEF to WGS84 transformation?
+
+   One obvious approach would be to numerically
+   differentiate the conversion function. This is not a good idea. For
+   one, it will be slow, but it will also be numerically quite
+   bad.
+
+   Turns out you can use the `Inverse Function Theorem
+   <http://en.wikipedia.org/wiki/Inverse_function_theorem>`_ in this
+   case to compute the derivatives more or less analytically.
+
+   The key result here is. If :math:`x = f^{-1}(y)`, and :math:`Df(x)`
+   is the invertible Jacobian of :math:`f` at :math:`x`. Then the
+   Jacobian :math:`Df^{-1}(y) = [Df(x)]^{-1}`, i.e., the Jacobian of
+   the :math:`f^{-1}` is the inverse of the Jacobian of :math:`f`.
+
+   Algorithmically this means that given :math:`y`, compute :math:`x =
+   f^{-1}(y)` by whatever means you can. Evaluate the Jacobian of
+   :math:`f` at :math:`x`. If the Jacobian matrix is invertible, then
+   the inverse is the Jacobian of the inverse at :math:`y`.
+
+   One can put this into practice with the following code fragment.
+
+   .. code-block:: c++
+
+      Eigen::Vector3d ecef; // Fill some values
+      // Iterative computation.
+      Eigen::Vector3d lla = ECEFToLLA(ecef);
+      // Analytic derivatives
+      Eigen::Matrix3d lla_to_ecef_jacobian = LLAToECEFJacobian(lla);
+      bool invertible;
+      Eigen::Matrix3d ecef_to_lla_jacobian;
+      lla_to_ecef_jacobian.computeInverseWithCheck(ecef_to_lla_jacobian, invertible);
+
+#. When using Quaternions, use :class:`QuaternionParameterization`.
+
+   TBD
+
+#. How to choose a parameter block size?
+
+   TBD
+
+Solving
+=======
+
+#. Choosing a linear solver.
+
+   When using the ``TRUST_REGION`` minimizer, the choice of linear
+   solver is an important decision. It affects solution quality and
+   runtime. Here is a simple way to reason about it.
+
+   1. For small (a few hundred parameters) or dense problems use
+      ``DENSE_QR``.
+
+   2. For general sparse problems (i.e., the Jacobian matrix has a
+      substantial number of zeros) use
+      ``SPARSE_NORMAL_CHOLESKY``. This requires that you have
+      ``SuiteSparse`` or ``CXSparse`` installed.
+
+   3. For bundle adjustment problems with up to a hundred or so
+      cameras, use ``DENSE_SCHUR``.
+
+   4. For larger bundle adjustment problems with sparse Schur
+      Complement/Reduced camera matrices use ``SPARSE_SCHUR``. This
+      requires that you have ``SuiteSparse`` or ``CXSparse``
+      installed.
+
+   5. For large bundle adjustment problems (a few thousand cameras or
+      more) use the ``ITERATIVE_SCHUR`` solver. There are a number of
+      preconditioner choices here. ``SCHUR_JACOBI`` offers an
+      excellent balance of speed and accuracy. This is also the
+      recommended option if you are solving medium sized problems for
+      which ``DENSE_SCHUR`` is too slow but ``SuiteSparse`` is not
+      available.
+
+      If you are not satisfied with ``SCHUR_JACOBI``'s performance try
+      ``CLUSTER_JACOBI`` and ``CLUSTER_TRIDIAGONAL`` in that
+      order. They require that you have ``SuiteSparse``
+      installed. Both of these preconditioners use a clustering
+      algorithm. Use ``SINGLE_LINKAGE`` before ``CANONICAL_VIEWS``.
+
+#. Use `Solver::Summary::FullReport` to diagnose performance problems.
+
+   When diagnosing Ceres performance issues - runtime and convergence,
+   the first place to start is by looking at the output of
+   ``Solver::Summary::FullReport``. Here is an example
+
+   .. code-block:: bash
+
+     ./bin/bundle_adjuster --input ../data/problem-16-22106-pre.txt
+
+     iter      cost      cost_change  |gradient|   |step|    tr_ratio  tr_radius  ls_iter  iter_time  total_time
+        0  4.185660e+06    0.00e+00    2.16e+07   0.00e+00   0.00e+00  1.00e+04       0    7.50e-02    3.58e-01
+        1  1.980525e+05    3.99e+06    5.34e+06   2.40e+03   9.60e-01  3.00e+04       1    1.84e-01    5.42e-01
+        2  5.086543e+04    1.47e+05    2.11e+06   1.01e+03   8.22e-01  4.09e+04       1    1.53e-01    6.95e-01
+        3  1.859667e+04    3.23e+04    2.87e+05   2.64e+02   9.85e-01  1.23e+05       1    1.71e-01    8.66e-01
+        4  1.803857e+04    5.58e+02    2.69e+04   8.66e+01   9.93e-01  3.69e+05       1    1.61e-01    1.03e+00
+        5  1.803391e+04    4.66e+00    3.11e+02   1.02e+01   1.00e+00  1.11e+06       1    1.49e-01    1.18e+00
+
+     Ceres Solver v1.10.0 Solve Report
+     ----------------------------------
+                                          Original                  Reduced
+     Parameter blocks                        22122                    22122
+     Parameters                              66462                    66462
+     Residual blocks                         83718                    83718
+     Residual                               167436                   167436
+
+     Minimizer                        TRUST_REGION
+
+     Sparse linear algebra library    SUITE_SPARSE
+     Trust region strategy     LEVENBERG_MARQUARDT
+
+                                             Given                     Used
+     Linear solver                    SPARSE_SCHUR             SPARSE_SCHUR
+     Threads                                     1                        1
+     Linear solver threads                       1                        1
+     Linear solver ordering              AUTOMATIC                22106, 16
+
+     Cost:
+     Initial                          4.185660e+06
+     Final                            1.803391e+04
+     Change                           4.167626e+06
+
+     Minimizer iterations                        5
+     Successful steps                            5
+     Unsuccessful steps                          0
+
+     Time (in seconds):
+     Preprocessor                            0.283
+
+       Residual evaluation                   0.061
+       Jacobian evaluation                   0.361
+       Linear solver                         0.382
+     Minimizer                               0.895
+
+     Postprocessor                           0.002
+     Total                                   1.220
+
+     Termination:                   NO_CONVERGENCE (Maximum number of iterations reached.)
+
+  Let us focus on run-time performance. The relevant lines to look at
+  are
+
+
+   .. code-block:: bash
+
+     Time (in seconds):
+     Preprocessor                            0.283
+
+       Residual evaluation                   0.061
+       Jacobian evaluation                   0.361
+       Linear solver                         0.382
+     Minimizer                               0.895
+
+     Postprocessor                           0.002
+     Total                                   1.220
+
+
+  Which tell us that of the total 1.2 seconds, about .3 seconds was
+  spent in the linear solver and the rest was mostly spent in
+  preprocessing and jacobian evaluation.
+
+  The preprocessing seems particularly expensive. Looking back at the
+  report, we observe
+
+   .. code-block:: bash
+
+     Linear solver ordering              AUTOMATIC                22106, 16
+
+  Which indicates that we are using automatic ordering for the
+  ``SPARSE_SCHUR`` solver. This can be expensive at times. A straight
+  forward way to deal with this is to give the ordering manually. For
+  ``bundle_adjuster`` this can be done by passing the flag
+  ``-ordering=user``. Doing so and looking at the timing block of the
+  full report gives us
+
+   .. code-block:: bash
+
+     Time (in seconds):
+     Preprocessor                            0.051
+
+       Residual evaluation                   0.053
+       Jacobian evaluation                   0.344
+       Linear solver                         0.372
+     Minimizer                               0.854
+
+     Postprocessor                           0.002
+     Total                                   0.935
+
+
+
+  The preprocessor time has gone down by more than 5.5x!.
+
+Further Reading
+===============
+
+For a short but informative introduction to the subject we recommend
+the booklet by [Madsen]_ . For a general introduction to non-linear
+optimization we recommend [NocedalWright]_. [Bjorck]_ remains the
+seminal reference on least squares problems. [TrefethenBau]_ book is
+our favorite text on introductory numerical linear algebra. [Triggs]_
+provides a thorough coverage of the bundle adjustment problem.
diff --git a/docs/source/features.rst b/docs/source/features.rst
new file mode 100644
index 0000000..50f22e7
--- /dev/null
+++ b/docs/source/features.rst
@@ -0,0 +1,92 @@
+========
+Features
+========
+.. _chapter-features:
+
+* **Code Quality** - Ceres Solver has been used in production at
+  Google for more than three years now. It is used to solve a wide
+  variety of problems, both in size and complexity. The code runs on
+  Google's data centers, desktops and on cellphones. It is clean,
+  extensively tested and well documented code that is actively
+  developed and supported.
+
+* **Modeling API** - It is rarely the case that one starts with the
+  exact and complete formulation of the problem that one is trying to
+  solve. Ceres's modeling API has been designed so that the user can
+  easily build and modify the objective function, one term at a
+  time. And to do so without worrying about how the solver is going to
+  deal with the resulting changes in the sparsity/structure of the
+  underlying problem. Indeed we take great care to separate the
+  modeling of the optimization problem from solving it. The two can be
+  done more or less completely independently of each other.
+
+  - **Derivatives** Supplying derivatives is perhaps the most tedious
+    and error prone part of using an optimization library.  Ceres
+    ships with `automatic`_ and `numeric`_ differentiation. So you
+    never have to compute derivatives by hand (unless you really want
+    to). Not only this, Ceres allows you to mix automatic, numeric and
+    analytical derivatives in any combination that you want.
+
+  - **Robust Loss Functions** Most non-linear least squares problems
+    involve data. If there is data, there will be outliers. Ceres
+    allows the user to *shape* their residuals using robust loss
+    functions to reduce the influence of outliers.
+
+  - **Local Parameterization** In many cases, some parameters lie on a
+    manifold other than Euclidean space, e.g., rotation matrices. In
+    such cases, the user can specify the geometry of the local tangent
+    space by specifying a LocalParameterization object.
+
+* **Solver Choice** Depending on the size, sparsity structure, time &
+  memory budgets, and solution quality requiremnts, different
+  optimization algorithms will suit different needs. To this end,
+  Ceres Solver comes with a variety of optimization algorithms, some
+  of them the result of the author's own research.
+
+  - **Trust Region Solvers** - Ceres supports Levenberg-Marquardt,
+    Powell's Dogleg, and Subspace dogleg methods. The key
+    computational cost in all of these methods is the solution of a
+    linear system. To this end Ceres ships with a variety of linear
+    solvers - dense QR and dense Cholesky factorization (using
+    `Eigen`_ or `LAPACK`_) for dense problems, sparse Cholesky
+    factorization (`SuiteSparse`_ or `CXSparse`_) for large sparse
+    problems custom Schur complement based dense, sparse, and
+    iterative linear solvers for `bundle adjustment`_ problems.
+
+  - **Line Search Solvers** - When the problem size is so large that
+    storing and factoring the Jacobian is not feasible or a low
+    accuracy solution is required cheaply, Ceres offers a number of
+    line search based algorithms. This includes a number of variants
+    of Non-linear Conjugate Gradients, BFGS and LBFGS.
+
+* **Speed** - Ceres code has been extensively optimized, with C++
+  templating, hand written linear algebra routines and OpenMP based
+  multithreading of the Jacobian evaluation and the linear solvers.
+
+* **Solution Quality** Ceres is the best performing solver on the NIST
+  problem set used by Mondragon and Borchers for benchmarking
+  non-linear least squares solvers.
+
+* **Covariance estimation** - Evaluate the sensitivity/uncertainty of
+  the solution by evaluating all or part of the covariance
+  matrix. Ceres is one of the few solvers that allows you to to do
+  this analysis at scale.
+
+* **Community** Since its release as an open source software, Ceres
+  has developed an active developer community that contributes new
+  features, bug fixes and support.
+
+* **Portability** - Runs on *Linux*, *Windows*, *Mac OS X*, *Android*
+  *and iOS*.
+
+* **BSD Licensed** The BSD license offers the flexibility to ship your
+  application
+
+.. _solution quality: https://groups.google.com/forum/#!topic/ceres-solver/UcicgMPgbXw
+.. _bundle adjustment: http://en.wikipedia.org/wiki/Bundle_adjustment
+.. _SuiteSparse: http://www.cise.ufl.edu/research/sparse/SuiteSparse/
+.. _Eigen: http://eigen.tuxfamily.org/
+.. _LAPACK: http://www.netlib.org/lapack/
+.. _CXSparse: https://www.cise.ufl.edu/research/sparse/CXSparse/
+.. _automatic: http://en.wikipedia.org/wiki/Automatic_differentiation
+.. _numeric: http://en.wikipedia.org/wiki/Numerical_differentiation
diff --git a/docs/source/history.rst b/docs/source/history.rst
new file mode 100644
index 0000000..b159284
--- /dev/null
+++ b/docs/source/history.rst
@@ -0,0 +1,27 @@
+.. _chapter-history:
+
+=======
+History
+=======
+
+Ceres Solver grew out of the need for general least squares solving at
+Google. In early 2010, Sameer Agarwal and Fredrik Schaffalitzky
+started the development of Ceres Solver. Fredrik left Google shortly
+thereafter and Keir Mierle stepped in to take his place. After two
+years of on-and-off development, Ceres Solver was released as open
+source in May of 2012.
+
+Origin of the name
+------------------
+
+While there is some debate as to who invented the method of Least
+Squares [Stigler]_, there is no debate that it was `Carl Friedrich
+Gauss
+<http://www-groups.dcs.st-and.ac.uk/~history/Biographies/Gauss.html>`_
+who brought it to the attention of the world. Using just 22
+observations of the newly discovered asteroid `Ceres
+<http://en.wikipedia.org/wiki/Ceres_(dwarf_planet)>`_, Gauss used the
+method of least squares to correctly predict when and where the
+asteroid will emerge from behind the Sun [TenenbaumDirector]_. We
+named our solver after Ceres to celebrate this seminal event in the
+history of astronomy, statistics and optimization.
diff --git a/docs/source/index.rst b/docs/source/index.rst
index f20dad4..26b318a 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -7,45 +7,75 @@
 Ceres Solver
 ============
 
-Ceres Solver is a portable C++ library for solving non-linear least
-squares problems.
-
-* Download the latest stable `release
-  <https://ceres-solver.googlecode.com/files/ceres-solver-1.6.0.tar.gz>`_
-  or clone the `repository
-  <https://ceres-solver.googlesource.com/ceres-solver>`_
-
-* Read the :ref:`chapter-tutorial`
-
-* Browse the :ref:`chapter-modeling` API and :ref:`chapter-solving` API.
-
-* Join the `mailing list
-  <https://groups.google.com/forum/?fromgroups#!forum/ceres-solver>`_
-  and ask questions.
-
-* File bugs, feature requests in the `issue tracker
-  <https://code.google.com/p/ceres-solver/issues/list>`_.
-
-* If you use Ceres Solver for a publication, you must cite it as::
-
-    @misc{ceres-solver,
-      author = "Sameer Agarwal and Keir Mierle and Others",
-      title = "Ceres Solver",
-      howpublished = "\url{https://code.google.com/p/ceres-solver/}",
-    }
-
 .. toctree::
-   :maxdepth: 1
+   :maxdepth: 3
    :hidden:
 
-   introduction
+   features
    building
    tutorial
    modeling
    solving
-   reading
+   faqs
    contributing
-   acknowledgements
    version_history
+   history
    bibliography
    license
+
+Ceres Solver is an open source C++ library for modeling and solving
+large complicated `nonlinear least squares`_ problems. It is a feature
+rich, mature and performant library which has been used in production
+since 2010. At Google, Ceres Solver is used to:
+
+* Estimate the pose of `Street View`_ cars, aircrafts, and satellites.
+* Build 3D models for `PhotoTours`_.
+* Estimate satellite image sensor characteristics.
+* Stitch `panoramas`_ or apply `Lens Blur`_ on Android.
+* Solve `bundle adjustment`_ and SLAM problems in `Project Tango`_.
+
+Outside Google, Ceres is used for solving problems in computer vision,
+computer graphics, astronomy and physics. e.g., `Willow Garage`_ uses
+it to solve SLAM problems and `Blender`_ uses it for for planar
+tracking and bundle adjustment.
+
+.. _nonlinear least squares: http://en.wikipedia.org/wiki/Non-linear_least_squares
+.. _fitting curves: http://en.wikipedia.org/wiki/Nonlinear_regression
+.. _bundle adjustment: http://en.wikipedia.org/wiki/Structure_from_motion
+.. _Street View: http://youtu.be/z00ORu4bU-A
+.. _PhotoTours: http://google-latlong.blogspot.com/2012/04/visit-global-landmarks-with-photo-tours.html
+.. _panoramas: http://www.google.com/maps/about/contribute/photosphere/
+.. _Project Tango: https://www.google.com/atap/projecttango/
+.. _Blender: http://mango.blender.org/development/planar-tracking-preview/
+.. _Willow Garage: https://www.willowgarage.com/blog/2013/08/09/enabling-robots-see-better-through-improved-camera-calibration
+.. _Lens Blur: http://googleresearch.blogspot.com/2014/04/lens-blur-in-new-google-camera-app.html
+
+Getting started
+---------------
+
+* Download the `latest stable release
+  <http://ceres-solver.org/ceres-solver-1.9.0.tar.gz>`_ or clone the
+  Git repository for the latest development version.
+
+  .. code-block:: bash
+
+       git clone https://ceres-solver.googlesource.com/ceres-solver
+
+* Read the :ref:`chapter-tutorial`, browse the chapters on the
+  :ref:`chapter-modeling` API and the :ref:`chapter-solving` API.
+* Join the `mailing list
+  <https://groups.google.com/forum/?fromgroups#!forum/ceres-solver>`_
+  and ask questions.
+* File bugs, feature requests in the `issue tracker
+  <https://code.google.com/p/ceres-solver/issues/list>`_.
+
+
+Cite Us
+-------
+If you use Ceres Solver for a publication, please cite it as::
+
+    @misc{ceres-solver,
+      author = "Sameer Agarwal and Keir Mierle and Others",
+      title = "Ceres Solver",
+      howpublished = "\url{http://ceres-solver.org}",
+    }
diff --git a/docs/source/introduction.rst b/docs/source/introduction.rst
deleted file mode 100644
index 19a6f2e..0000000
--- a/docs/source/introduction.rst
+++ /dev/null
@@ -1,81 +0,0 @@
-.. _chapter-introduction:
-
-============
-Introduction
-============
-
-Solving nonlinear least squares problems [#f1]_ comes up in a broad
-range of areas across science and engineering - from fitting curves in
-statistics, to constructing 3D models from photographs in computer
-vision. Ceres Solver [#f2]_ [#f3]_ is a portable C++ library for
-solving non-linear least squares problems accurately and efficiently.
-
-**Features**
-
-#. A friendly :ref:`chapter-modeling` API.
-
-#. Automatic and numeric differentiation.
-
-#. Robust loss functions and local parameterizations.
-
-#. Multithreading.
-
-#. Trust-Region (Levenberg-Marquardt and Dogleg) and Line Search
-   (Nonlinear CG and L-BFGS) solvers.
-
-#. Variety of linear solvers.
-
-   a. Dense QR and Cholesky factorization (using `Eigen
-      <http://eigen.tuxfamily.org/index.php?title=Main_Page>`_) for
-      small problems.
-
-   b. Sparse Cholesky factorization (using `SuiteSparse
-      <http://www.cise.ufl.edu/research/sparse/SuiteSparse/>`_ and
-      `CXSparse <http://www.cise.ufl.edu/research/sparse/CSparse/>`_) for
-      large sparse problems.
-
-   c. Specialized solvers for bundle adjustment problems in computer
-      vision.
-
-   d. Iterative linear solvers with preconditioners for general sparse
-      and bundle adjustment problems.
-
-#. Portable: Runs on Linux, Windows, Mac OS X and Android.
-
-
-At Google, Ceres Solver has been used for solving a variety of
-problems in computer vision and machine learning. e.g., it is used to
-to estimate the pose of Street View cars, aircrafts, and satellites;
-to build 3D models for PhotoTours; to estimate satellite image sensor
-characteristics, and more.
-
-`Blender <http://www.blender.org>`_ uses Ceres for `motion tracking
-<http://mango.blender.org/development/planar-tracking-preview/>`_ and
-`bundle adjustment
-<http://wiki.blender.org/index.php/Dev:Ref/Release_Notes/2.67/Motion_Tracker>`_.
-
-
-.. rubric:: Footnotes
-
-.. [#f1] For a gentle but brief introduction to non-linear least
-         squares problems, please start by reading the
-         :ref:`chapter-tutorial`.
-
-.. [#f2] While there is some debate as to who invented the method of
-         Least Squares [Stigler]_, there is no debate that it was
-         `Carl Friedrich Gauss
-         <http://en.wikipedia.org/wiki/Carl_Friedrich_Gauss>`_ who
-         brought it to the attention of the world. Using just 22
-         observations of the newly discovered asteroid `Ceres
-         <http://en.wikipedia.org/wiki/Ceres_(dwarf_planet)>`_, Gauss
-         used the method of least squares to correctly predict when
-         and where the asteroid will emerge from behind the Sun
-         [TenenbaumDirector]_. We named our solver after Ceres to
-         celebrate this seminal event in the history of astronomy,
-         statistics and optimization.
-
-.. [#f3] For brevity, in the rest of this document we will just use
-         the term Ceres.
-
-
-
diff --git a/docs/source/license.rst b/docs/source/license.rst
index 58d70df..cfa1d79 100644
--- a/docs/source/license.rst
+++ b/docs/source/license.rst
@@ -4,7 +4,7 @@
 
 Ceres Solver is licensed under the New BSD license, whose terms are as follows.
 
-Copyright (c) 2010, 2011, 2012, 2013 Google Inc. All rights reserved.
+Copyright (c) 2014 Google Inc. All rights reserved.
 
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are met:
diff --git a/docs/source/modeling.rst b/docs/source/modeling.rst
index 8e6de12..5bbd441 100644
--- a/docs/source/modeling.rst
+++ b/docs/source/modeling.rst
@@ -8,41 +8,64 @@
 Modeling
 ========
 
-Recall that Ceres solves robustified non-linear least squares problems
-of the form
+Ceres solver consists of two distinct parts. A modeling API which
+provides a rich set of tools to construct an optimization problem one
+term at a time and a solver API that controls the minimization
+algorithm. This chapter is devoted to the task of modeling
+optimization problems using Ceres. :ref:`chapter-solving` discusses
+the various ways in which an optimization problem can be solved using
+Ceres.
 
-.. math:: \frac{1}{2}\sum_{i=1} \rho_i\left(\left\|f_i\left(x_{i_1}, ... ,x_{i_k}\right)\right\|^2\right).
-   :label: ceresproblem
+Ceres solves robustified bounds constrained non-linear least squares
+problems of the form:
 
-The expression
+.. math:: :label: ceresproblem
+
+   \min_{\mathbf{x}} &\quad \frac{1}{2}\sum_{i}
+   \rho_i\left(\left\|f_i\left(x_{i_1},
+   ... ,x_{i_k}\right)\right\|^2\right)  \\
+   \text{s.t.} &\quad l_j \le x_j \le u_j
+
+In Ceres parlance, the expression
 :math:`\rho_i\left(\left\|f_i\left(x_{i_1},...,x_{i_k}\right)\right\|^2\right)`
-is known as a ``ResidualBlock``, where :math:`f_i(\cdot)` is a
-:class:`CostFunction` that depends on the parameter blocks
-:math:`\left[x_{i_1},... , x_{i_k}\right]`. In most optimization
-problems small groups of scalars occur together. For example the three
-components of a translation vector and the four components of the
-quaternion that define the pose of a camera. We refer to such a group
-of small scalars as a ``ParameterBlock``. Of course a
-``ParameterBlock`` can just be a single parameter. :math:`\rho_i` is a
-:class:`LossFunction`. A :class:`LossFunction` is a scalar function
-that is used to reduce the influence of outliers on the solution of
-non-linear least squares problems.
+is known as a **residual block**, where :math:`f_i(\cdot)` is a
+:class:`CostFunction` that depends on the **parameter blocks**
+:math:`\left\{x_{i_1},... , x_{i_k}\right\}`.
 
-In this chapter we will describe the various classes that are part of
-Ceres Solver's modeling API, and how they can be used to construct an
-optimization problem. Once a problem has been constructed, various
-methods for solving them will be discussed in
-:ref:`chapter-solving`. It is by design that the modeling and the
-solving APIs are orthogonal to each other. This enables
-switching/tweaking of various solver parameters without having to
-touch the problem once it has been successfully modeled.
+In most optimization problems small groups of scalars occur
+together. For example the three components of a translation vector and
+the four components of the quaternion that define the pose of a
+camera. We refer to such a group of scalars as a **parameter block**. Of
+course a parameter block can be just a single scalar too.
+
+:math:`\rho_i` is a :class:`LossFunction`. A :class:`LossFunction` is
+a scalar valued function that is used to reduce the influence of
+outliers on the solution of non-linear least squares problems.
+
+:math:`l_j` and :math:`u_j` are lower and upper bounds on the
+parameter block :math:`x_j`.
+
+As a special case, when :math:`\rho_i(x) = x`, i.e., the identity
+function, and :math:`l_j = -\infty` and :math:`u_j = \infty` we get
+the more familiar unconstrained `non-linear least squares problem
+<http://en.wikipedia.org/wiki/Non-linear_least_squares>`_.
+
+.. math:: :label: ceresproblemunconstrained
+
+   \frac{1}{2}\sum_{i} \left\|f_i\left(x_{i_1}, ... ,x_{i_k}\right)\right\|^2.
 
 :class:`CostFunction`
 ---------------------
 
-The single biggest task when modeling a problem is specifying the
-residuals and their derivatives. This is done using
-:class:`CostFunction` objects.
+For each term in the objective function, a :class:`CostFunction` is
+responsible for computing a vector of residuals and if asked a vector
+of Jacobian matrices, i.e., given :math:`\left[x_{i_1}, ... ,
+x_{i_k}\right]`, compute the vector
+:math:`f_i\left(x_{i_1},...,x_{i_k}\right)` and the matrices
+
+ .. math:: J_{ij} = \frac{\partial}{\partial
+	   x_{i_j}}f_i\left(x_{i_1},...,x_{i_k}\right),\quad \forall j
+	   \in \{1, \ldots, k\}
 
 .. class:: CostFunction
 
@@ -53,30 +76,22 @@
       virtual bool Evaluate(double const* const* parameters,
                             double* residuals,
                             double** jacobians) = 0;
-      const vector<int16>& parameter_block_sizes();
+      const vector<int32>& parameter_block_sizes();
       int num_residuals() const;
 
      protected:
-      vector<int16>* mutable_parameter_block_sizes();
+      vector<int32>* mutable_parameter_block_sizes();
       void set_num_residuals(int num_residuals);
     };
 
-   Given parameter blocks :math:`\left[x_{i_1}, ... , x_{i_k}\right]`,
-   a :class:`CostFunction` is responsible for computing a vector of
-   residuals and if asked a vector of Jacobian matrices, i.e., given
-   :math:`\left[x_{i_1}, ... , x_{i_k}\right]`, compute the vector
-   :math:`f_i\left(x_{i_1},...,x_{i_k}\right)` and the matrices
 
-   .. math:: J_{ij} = \frac{\partial}{\partial x_{i_j}}f_i\left(x_{i_1},...,x_{i_k}\right),\quad \forall j \in \{i_1,..., i_k\}
-
-   The signature of the :class:`CostFunction` (number and sizes of
-   input parameter blocks and number of outputs) is stored in
-   :member:`CostFunction::parameter_block_sizes_` and
-   :member:`CostFunction::num_residuals_` respectively. User code
-   inheriting from this class is expected to set these two members
-   with the corresponding accessors. This information will be verified
-   by the :class:`Problem` when added with
-   :func:`Problem::AddResidualBlock`.
+The signature of the :class:`CostFunction` (number and sizes of input
+parameter blocks and number of outputs) is stored in
+:member:`CostFunction::parameter_block_sizes_` and
+:member:`CostFunction::num_residuals_` respectively. User code
+inheriting from this class is expected to set these two members with
+the corresponding accessors. This information will be verified by the
+:class:`Problem` when added with :func:`Problem::AddResidualBlock`.
 
 .. function:: bool CostFunction::Evaluate(double const* const* parameters, double* residuals, double** jacobians)
 
@@ -114,19 +129,6 @@
    This can be used to communicate numerical failures in Jacobian
    computations for instance.
 
-   A more interesting and common use is to impose constraints on the
-   parameters. If the initial values of the parameter blocks satisfy
-   the constraints, then returning false whenever the constraints are
-   not satisfied will prevent the solver from moving into the
-   infeasible region. This is not a very sophisticated mechanism for
-   enforcing constraints, but is often good enough for things like
-   non-negativity constraints.
-
-   Note that it is important that the initial values of the parameter
-   block must be feasible, otherwise the solver will declare a
-   numerical problem at iteration 0.
-
-
 :class:`SizedCostFunction`
 --------------------------
 
@@ -164,7 +166,7 @@
    .. code-block:: c++
 
      template <typename CostFunctor,
-            int M,        // Number of residuals, or ceres::DYNAMIC.
+            int kNumResiduals,  // Number of residuals, or ceres::DYNAMIC.
             int N0,       // Number of parameters in block 0.
             int N1 = 0,   // Number of parameters in block 1.
             int N2 = 0,   // Number of parameters in block 2.
@@ -176,8 +178,13 @@
             int N8 = 0,   // Number of parameters in block 8.
             int N9 = 0>   // Number of parameters in block 9.
      class AutoDiffCostFunction : public
-     SizedCostFunction<M, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9> {
-     };
+     SizedCostFunction<kNumResiduals, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9> {
+      public:
+       explicit AutoDiffCostFunction(CostFunctor* functor);
+       // Ignore the template parameter kNumResiduals and use
+       // num_residuals instead.
+       AutoDiffCostFunction(CostFunctor* functor, int num_residuals);
+     }
 
    To get an auto differentiated cost function, you must define a
    class with a templated ``operator()`` (a functor) that computes the
@@ -189,9 +196,6 @@
 
    The function must write the computed value in the last argument
    (the only non-``const`` one) and return true to indicate success.
-   Please see :class:`CostFunction` for details on how the return
-   value may be used to impose simple constraints on the parameter
-   block.
 
    For example, consider a scalar error :math:`e = k - x^\top y`,
    where both :math:`x` and :math:`y` are two-dimensional vector
@@ -254,6 +258,22 @@
    computing a 1-dimensional output from two arguments, both
    2-dimensional.
 
+   :class:`AutoDiffCostFunction` also supports cost functions with a
+   runtime-determined number of residuals. For example:
+
+   .. code-block:: c++
+
+     CostFunction* cost_function
+         = new AutoDiffCostFunction<MyScalarCostFunctor, DYNAMIC, 2, 2>(
+             new CostFunctorWithDynamicNumResiduals(1.0),   ^     ^  ^
+             runtime_number_of_residuals); <----+           |     |  |
+                                                |           |     |  |
+                                                |           |     |  |
+               Actual number of residuals ------+           |     |  |
+               Indicate dynamic number of residuals --------+     |  |
+               Dimension of x ------------------------------------+  |
+               Dimension of y ---------------------------------------+
+
    The framework can currently accommodate cost functions of up to 10
    independent variables, and there is no limit on the dimensionality
    of each of them.
@@ -279,10 +299,10 @@
 .. class:: DynamicAutoDiffCostFunction
 
    :class:`AutoDiffCostFunction` requires that the number of parameter
-   blocks and their sizes be known at compile time, e.g., Bezier curve
-   fitting, Neural Network training etc. It also has an upper limit of
-   10 parameter blocks. In a number of applications, this is not
-   enough.
+   blocks and their sizes be known at compile time. It also has an
+   upper limit of 10 parameter blocks. In a number of applications,
+   this is not enough e.g., Bezier curve fitting, Neural Network
+   training etc.
 
      .. code-block:: c++
 
@@ -342,12 +362,21 @@
 
     .. code-block:: c++
 
-      template <typename CostFunctionNoJacobian,
-                NumericDiffMethod method = CENTRAL, int M = 0,
-                int N0 = 0, int N1 = 0, int N2 = 0, int N3 = 0, int N4 = 0,
-                int N5 = 0, int N6 = 0, int N7 = 0, int N8 = 0, int N9 = 0>
-      class NumericDiffCostFunction
-        : public SizedCostFunction<M, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9> {
+      template <typename CostFunctor,
+                NumericDiffMethod method = CENTRAL,
+                int kNumResiduals,  // Number of residuals, or ceres::DYNAMIC.
+                int N0,       // Number of parameters in block 0.
+                int N1 = 0,   // Number of parameters in block 1.
+                int N2 = 0,   // Number of parameters in block 2.
+                int N3 = 0,   // Number of parameters in block 3.
+                int N4 = 0,   // Number of parameters in block 4.
+                int N5 = 0,   // Number of parameters in block 5.
+                int N6 = 0,   // Number of parameters in block 6.
+                int N7 = 0,   // Number of parameters in block 7.
+                int N8 = 0,   // Number of parameters in block 8.
+                int N9 = 0>   // Number of parameters in block 9.
+      class NumericDiffCostFunction : public
+      SizedCostFunction<kNumResiduals, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9> {
       };
 
    To get a numerically differentiated :class:`CostFunction`, you must
@@ -426,6 +455,24 @@
    computing a 1-dimensional output from two arguments, both
    2-dimensional.
 
+   NumericDiffCostFunction also supports cost functions with a
+   runtime-determined number of residuals. For example:
+
+   .. code-block:: c++
+
+     CostFunction* cost_function
+         = new NumericDiffCostFunction<MyScalarCostFunctor, CENTRAL, DYNAMIC, 2, 2>(
+             new CostFunctorWithDynamicNumResiduals(1.0),               ^     ^  ^
+             TAKE_OWNERSHIP,                                            |     |  |
+             runtime_number_of_residuals); <----+                       |     |  |
+                                                |                       |     |  |
+                                                |                       |     |  |
+               Actual number of residuals ------+                       |     |  |
+               Indicate dynamic number of residuals --------------------+     |  |
+               Dimension of x ------------------------------------------------+  |
+               Dimension of y ---------------------------------------------------+
+
+
    The framework can currently accommodate cost functions of up to 10
    independent variables, and there is no limit on the dimensionality
    of each of them.
@@ -475,6 +522,52 @@
    sizes 4 and 8 respectively. Look at the tests for a more detailed
    example.
 
+:class:`DynamicNumericDiffCostFunction`
+---------------------------------------
+
+.. class:: DynamicNumericDiffCostFunction
+
+   Like :class:`AutoDiffCostFunction` :class:`NumericDiffCostFunction`
+   requires that the number of parameter blocks and their sizes be
+   known at compile time. It also has an upper limit of 10 parameter
+   blocks. In a number of applications, this is not enough.
+
+     .. code-block:: c++
+
+      template <typename CostFunctor, NumericDiffMethod method = CENTRAL>
+      class DynamicNumericDiffCostFunction : public CostFunction {
+      };
+
+   In such cases when numeric differentiation is desired,
+   :class:`DynamicNumericDiffCostFunction` can be used.
+
+   Like :class:`NumericDiffCostFunction` the user must define a
+   functor, but the signature of the functor differs slightly. The
+   expected interface for the cost functors is:
+
+     .. code-block:: c++
+
+       struct MyCostFunctor {
+         bool operator()(double const* const* parameters, double* residuals) const {
+         }
+       }
+
+   Since the sizing of the parameters is done at runtime, you must
+   also specify the sizes after creating the dynamic numeric diff cost
+   function. For example:
+
+     .. code-block:: c++
+
+       DynamicNumericDiffCostFunction<MyCostFunctor> cost_function(
+           new MyCostFunctor());
+       cost_function.AddParameterBlock(5);
+       cost_function.AddParameterBlock(10);
+       cost_function.SetNumResiduals(21);
+
+   As a rule of thumb, try using :class:`NumericDiffCostFunction` before
+   you use :class:`DynamicNumericDiffCostFunction`.
+
+
 :class:`NumericDiffFunctor`
 ---------------------------
 
@@ -713,6 +806,8 @@
 
 
 
+.. _`section-loss_function`:
+
 :class:`LossFunction`
 ---------------------
 
@@ -1080,8 +1175,8 @@
 
   For example, Quaternions have a three dimensional local
   parameterization. It's plus operation can be implemented as (taken
-  from `internal/ceres/auto_diff_local_parameterization_test.cc
-  <https://ceres-solver.googlesource.com/ceres-solver/+/master/include/ceres/local_parameterization.h>`_
+  from `internal/ceres/autodiff_local_parameterization_test.cc
+  <https://ceres-solver.googlesource.com/ceres-solver/+/master/internal/ceres/autodiff_local_parameterization_test.cc>`_
   )
 
     .. code-block:: c++
@@ -1139,10 +1234,10 @@
 
 .. class:: Problem
 
-   :class:`Problem` holds the robustified non-linear least squares
-   problem :eq:`ceresproblem`. To create a least squares problem, use
-   the :func:`Problem::AddResidualBlock` and
-   :func:`Problem::AddParameterBlock` methods.
+   :class:`Problem` holds the robustified bounds constrained
+   non-linear least squares problem :eq:`ceresproblem`. To create a
+   least squares problem, use the :func:`Problem::AddResidualBlock`
+   and :func:`Problem::AddParameterBlock` methods.
 
    For example a problem containing 3 parameter blocks of sizes 3, 4
    and 5 respectively and two residual blocks of size 2 and 6:
@@ -1274,7 +1369,10 @@
    Remove a residual block from the problem. Any parameters that the residual
    block depends on are not removed. The cost and loss functions for the
    residual block will not get deleted immediately; won't happen until the
-   problem itself is deleted.
+   problem itself is deleted.  If Problem::Options::enable_fast_removal is
+   true, then the removal is fast (almost constant time). Otherwise, removing a
+   residual block will incur a scan of the entire Problem object to verify that
+   the residual_block represents a valid residual in the problem.
 
    **WARNING:** Removing a residual or parameter block will destroy
    the implicit ordering, rendering the jacobian or residuals returned
@@ -1289,7 +1387,7 @@
    of the problem (similar to cost/loss functions in residual block
    removal). Any residual blocks that depend on the parameter are also
    removed, as described above in RemoveResidualBlock().  If
-   Problem::Options::enable_fast_parameter_block_removal is true, then
+   Problem::Options::enable_fast_removal is true, then
    the removal is fast (almost constant time). Otherwise, removing a
    parameter block will incur a scan of the entire Problem object.
 
@@ -1315,6 +1413,24 @@
    parameterizations only once. The local parameterization can only be
    set once per parameter, and cannot be changed once set.
 
+.. function:: LocalParameterization* Problem::GetParameterization(double* values) const
+
+   Get the local parameterization object associated with this
+   parameter block. If there is no parameterization object associated
+   then `NULL` is returned
+
+.. function:: void Problem::SetParameterLowerBound(double* values, int index, double lower_bound)
+
+   Set the lower bound for the parameter at position `index` in the
+   parameter block corresponding to `values`. By default the lower
+   bound is :math:`-\infty`.
+
+.. function:: void Problem::SetParameterUpperBound(double* values, int index, double upper_bound)
+
+   Set the upper bound for the parameter at position `index` in the
+   parameter block corresponding to `values`. By default the value is
+   :math:`\infty`.
+
 .. function:: int Problem::NumParameterBlocks() const
 
    Number of parameter blocks in the problem. Always equals
@@ -1335,22 +1451,47 @@
    The size of the residual vector obtained by summing over the sizes
    of all of the residual blocks.
 
-.. function int Problem::ParameterBlockSize(const double* values) const;
+.. function:: int Problem::ParameterBlockSize(const double* values) const
 
    The size of the parameter block.
 
-.. function int Problem::ParameterBlockLocalSize(const double* values) const;
+.. function:: int Problem::ParameterBlockLocalSize(const double* values) const
 
-  The size of local parameterization for the parameter block. If
-  there is no local parameterization associated with this parameter
-  block, then ``ParameterBlockLocalSize`` = ``ParameterBlockSize``.
+   The size of local parameterization for the parameter block. If
+   there is no local parameterization associated with this parameter
+   block, then ``ParameterBlockLocalSize`` = ``ParameterBlockSize``.
 
+.. function:: bool Problem::HasParameterBlock(const double* values) const
 
-.. function void Problem::GetParameterBlocks(vector<double*>* parameter_blocks) const;
+   Is the given parameter block present in the problem or not?
 
-  Fills the passed ``parameter_blocks`` vector with pointers to the
-  parameter blocks currently in the problem. After this call,
-  ``parameter_block.size() == NumParameterBlocks``.
+.. function:: void Problem::GetParameterBlocks(vector<double*>* parameter_blocks) const
+
+   Fills the passed ``parameter_blocks`` vector with pointers to the
+   parameter blocks currently in the problem. After this call,
+   ``parameter_block.size() == NumParameterBlocks``.
+
+.. function:: void Problem::GetResidualBlocks(vector<ResidualBlockId>* residual_blocks) const
+
+   Fills the passed `residual_blocks` vector with pointers to the
+   residual blocks currently in the problem. After this call,
+   `residual_blocks.size() == NumResidualBlocks`.
+
+.. function:: void Problem::GetParameterBlocksForResidualBlock(const ResidualBlockId residual_block, vector<double*>* parameter_blocks) const
+
+   Get all the parameter blocks that depend on the given residual
+   block.
+
+.. function:: void Problem::GetResidualBlocksForParameterBlock(const double* values, vector<ResidualBlockId>* residual_blocks) const
+
+   Get all the residual blocks that depend on the given parameter
+   block.
+
+   If `Problem::Options::enable_fast_removal` is
+   `true`, then getting the residual blocks is fast and depends only
+   on the number of residual blocks. Otherwise, getting the residual
+   blocks for a parameter block will incur a scan of the entire
+   :class:`Problem` object.
 
 .. function:: bool Problem::Evaluate(const Problem::EvaluateOptions& options, double* cost, vector<double>* residuals, vector<double>* gradient, CRSMatrix* jacobian)
 
diff --git a/docs/source/reading.rst b/docs/source/reading.rst
deleted file mode 100644
index 4e27567..0000000
--- a/docs/source/reading.rst
+++ /dev/null
@@ -1,10 +0,0 @@
-===============
-Further Reading
-===============
-
-For a short but informative introduction to the subject we recommend
-the booklet by [Madsen]_ . For a general introduction to non-linear
-optimization we recommend [NocedalWright]_. [Bjorck]_ remains the
-seminal reference on least squares problems. [TrefethenBau]_ book is
-our favorite text on introductory numerical linear algebra. [Triggs]_
-provides a thorough coverage of the bundle adjustment problem.
diff --git a/docs/source/solving.rst b/docs/source/solving.rst
index f17c695..5f3711a 100644
--- a/docs/source/solving.rst
+++ b/docs/source/solving.rst
@@ -9,7 +9,6 @@
 Solving
 =======
 
-
 Introduction
 ============
 
@@ -24,16 +23,22 @@
 :math:`m`-dimensional function of :math:`x`.  We are interested in
 solving the following optimization problem [#f1]_ .
 
-.. math:: \arg \min_x \frac{1}{2}\|F(x)\|^2\ .
+.. math:: \arg \min_x \frac{1}{2}\|F(x)\|^2\ . \\
+          L \le x \le U
   :label: nonlinsq
 
-Here, the Jacobian :math:`J(x)` of :math:`F(x)` is an :math:`m\times
-n` matrix, where :math:`J_{ij}(x) = \partial_j f_i(x)` and the
-gradient vector :math:`g(x) = \nabla \frac{1}{2}\|F(x)\|^2 = J(x)^\top
-F(x)`. Since the efficient global minimization of :eq:`nonlinsq` for
+Where, :math:`L` and :math:`U` are lower and upper bounds on the
+parameter vector :math:`x`.
+
+Since the efficient global minimization of :eq:`nonlinsq` for
 general :math:`F(x)` is an intractable problem, we will have to settle
 for finding a local minimum.
 
+In the following, the Jacobian :math:`J(x)` of :math:`F(x)` is an
+:math:`m\times n` matrix, where :math:`J_{ij}(x) = \partial_j f_i(x)`
+and the gradient vector is :math:`g(x) = \nabla \frac{1}{2}\|F(x)\|^2
+= J(x)^\top F(x)`.
+
 The general strategy when solving non-linear optimization problems is
 to solve a sequence of approximations to the original problem
 [NocedalWright]_. At each iteration, the approximation is solved to
@@ -81,15 +86,20 @@
 The basic trust region algorithm looks something like this.
 
    1. Given an initial point :math:`x` and a trust region radius :math:`\mu`.
-   2. :math:`\arg \min_{\Delta x} \frac{1}{2}\|J(x)\Delta
-      x + F(x)\|^2` s.t. :math:`\|D(x)\Delta x\|^2 \le \mu`
+   2. Solve
+
+      .. math::
+         \arg \min_{\Delta x}& \frac{1}{2}\|J(x)\Delta x + F(x)\|^2 \\
+         \text{such that} &\|D(x)\Delta x\|^2 \le \mu\\
+         &L \le x + \Delta x \le U.
+
    3. :math:`\rho = \frac{\displaystyle \|F(x + \Delta x)\|^2 -
       \|F(x)\|^2}{\displaystyle \|J(x)\Delta x + F(x)\|^2 -
       \|F(x)\|^2}`
    4. if :math:`\rho > \epsilon` then  :math:`x = x + \Delta x`.
    5. if :math:`\rho > \eta_1` then :math:`\rho = 2  \rho`
    6. else if :math:`\rho < \eta_2` then :math:`\rho = 0.5 * \rho`
-   7. Goto 2.
+   7. Go to 2.
 
 Here, :math:`\mu` is the trust region radius, :math:`D(x)` is some
 matrix used to define a metric on the domain of :math:`F(x)` and
@@ -103,21 +113,27 @@
 The key computational step in a trust-region algorithm is the solution
 of the constrained optimization problem
 
-.. math:: \arg\min_{\Delta x} \frac{1}{2}\|J(x)\Delta x +  F(x)\|^2\quad \text{such that}\quad  \|D(x)\Delta x\|^2 \le \mu
+.. math::
+   \arg \min_{\Delta x}& \frac{1}{2}\|J(x)\Delta x + F(x)\|^2 \\
+   \text{such that} &\|D(x)\Delta x\|^2 \le \mu\\
+    &L \le x + \Delta x \le U.
    :label: trp
 
 There are a number of different ways of solving this problem, each
 giving rise to a different concrete trust-region algorithm. Currently
 Ceres, implements two trust-region algorithms - Levenberg-Marquardt
-and Dogleg. The user can choose between them by setting
-:member:`Solver::Options::trust_region_strategy_type`.
+and Dogleg, each of which is augmented with a line search if bounds
+constraints are present [Kanzow]_. The user can choose between them by
+setting :member:`Solver::Options::trust_region_strategy_type`.
 
 .. rubric:: Footnotes
 
-.. [#f1] At the level of the non-linear solver, the block
-         structure is not relevant, therefore our discussion here is
-         in terms of an optimization problem defined over a state
-         vector of size :math:`n`.
+.. [#f1] At the level of the non-linear solver, the block structure is
+         not relevant, therefore our discussion here is in terms of an
+         optimization problem defined over a state vector of size
+         :math:`n`. Similarly the presence of loss functions is also
+         ignored as the problem is internally converted into a pure
+         non-linear least squares problem.
 
 
 .. _section-levenberg-marquardt:
@@ -291,9 +307,10 @@
 and then use it as the starting point to further optimize just `a_1`
 and `a_2`. For the linear case, this amounts to doing a single linear
 least squares solve. For non-linear problems, any method for solving
-the `a_1` and `a_2` optimization problems will do. The only constraint
-on `a_1` and `a_2` (if they are two different parameter block) is that
-they do not co-occur in a residual block.
+the :math:`a_1` and :math:`a_2` optimization problems will do. The
+only constraint on :math:`a_1` and :math:`a_2` (if they are two
+different parameter block) is that they do not co-occur in a residual
+block.
 
 This idea can be further generalized, by not just optimizing
 :math:`(a_1, a_2)`, but decomposing the graph corresponding to the
@@ -315,7 +332,7 @@
 -------------------
 
 Note that the basic trust-region algorithm described in
-Algorithm~\ref{alg:trust-region} is a descent algorithm in that they
+:ref:`section-trust-region-methods` is a descent algorithm in that it
 only accepts a point if it strictly reduces the value of the objective
 function.
 
@@ -346,10 +363,9 @@
 Line Search Methods
 ===================
 
-**The implementation of line search algorithms in Ceres Solver is
-fairly new and not very well tested, so for now this part of the
-solver should be considered beta quality. We welcome reports of your
-experiences both good and bad on the mailinglist.**
+The line search method in Ceres Solver cannot handle bounds
+constraints right now, so it can only be used for solving
+unconstrained problems.
 
 Line search algorithms
 
@@ -362,7 +378,7 @@
 Here :math:`H(x)` is some approximation to the Hessian of the
 objective function, and :math:`g(x)` is the gradient at
 :math:`x`. Depending on the choice of :math:`H(x)` we get a variety of
-different search directions -`\Delta x`.
+different search directions :math:`\Delta x`.
 
 Step 4, which is a one dimensional optimization or `Line Search` along
 :math:`\Delta x` is what gives this class of methods its name.
@@ -383,7 +399,7 @@
    Gradient method to non-linear functions. The generalization can be
    performed in a number of different ways, resulting in a variety of
    search directions. Ceres Solver currently supports
-   ``FLETCHER_REEVES``, ``POLAK_RIBIRERE`` and ``HESTENES_STIEFEL``
+   ``FLETCHER_REEVES``, ``POLAK_RIBIERE`` and ``HESTENES_STIEFEL``
    directions.
 
 3. ``BFGS`` A generalization of the Secant method to multiple
@@ -474,7 +490,9 @@
 Cholesky factorization of the normal equations. This leads to
 substantial savings in time and memory for large sparse
 problems. Ceres uses the sparse Cholesky factorization routines in
-Professor Tim Davis' ``SuiteSparse`` or ``CXSparse`` packages [Chen]_.
+Professor Tim Davis' ``SuiteSparse`` or ``CXSparse`` packages [Chen]_
+or the sparse Cholesky factorization algorithm in ``Eigen`` (which
+incidently is a port of the algorithm implemented inside ``CXSparse``)
 
 .. _section-schur:
 
@@ -775,9 +793,14 @@
 
 .. class:: Solver::Options
 
-  :class:`Solver::Options` controls the overall behavior of the
-  solver. We list the various settings and their default values below.
+   :class:`Solver::Options` controls the overall behavior of the
+   solver. We list the various settings and their default values below.
 
+.. function:: bool Solver::Options::IsValid(string* error) const
+
+   Validate the values in the options struct and returns true on
+   success. If there is a problem, the method returns false with
+   ``error`` containing a textual description of the cause.
 
 .. member:: MinimizerType Solver::Options::minimizer_type
 
@@ -807,7 +830,7 @@
 
    Default: ``FLETCHER_REEVES``
 
-   Choices are ``FLETCHER_REEVES``, ``POLAK_RIBIRERE`` and
+   Choices are ``FLETCHER_REEVES``, ``POLAK_RIBIERE`` and
    ``HESTENES_STIEFEL``.
 
 .. member:: int Solver::Options::max_lbfs_rank
@@ -1099,7 +1122,7 @@
 
    Solver terminates if
 
-   .. math:: \frac{|\Delta \text{cost}|}{\text{cost} < \text{function_tolerance}}
+   .. math:: \frac{|\Delta \text{cost}|}{\text{cost}} < \text{function_tolerance}
 
    where, :math:`\Delta \text{cost}` is the change in objective
    function value (up or down) in the current iteration of
@@ -1111,10 +1134,12 @@
 
    Solver terminates if
 
-   .. math:: \frac{\|g(x)\|_\infty}{\|g(x_0)\|_\infty} < \text{gradient_tolerance}
+   .. math:: \|x - \Pi \boxplus(x, -g(x))\|_\infty < \text{gradient_tolerance}
 
-   where :math:`\|\cdot\|_\infty` refers to the max norm, and :math:`x_0` is
-   the vector of initial parameter values.
+   where :math:`\|\cdot\|_\infty` refers to the max norm, :math:`\Pi`
+   is projection onto the bounds constraints and :math:`\boxplus` is
+   Plus operation for the overall local parameterization associated
+   with the parameter vector.
 
 .. member:: double Solver::Options::parameter_tolerance
 
@@ -1133,8 +1158,9 @@
 
    Type of linear solver used to compute the solution to the linear
    least squares problem in each iteration of the Levenberg-Marquardt
-   algorithm. If Ceres is build with ``SuiteSparse`` linked in then
-   the default is ``SPARSE_NORMAL_CHOLESKY``, it is ``DENSE_QR``
+   algorithm. If Ceres is built with support for ``SuiteSparse`` or
+   ``CXSparse`` or ``Eigen``'s sparse Cholesky factorization, the
+   default is ``SPARSE_NORMAL_CHOLESKY``, it is ``DENSE_QR``
    otherwise.
 
 .. member:: PreconditionerType Solver::Options::preconditioner_type
@@ -1147,6 +1173,28 @@
    ``CLUSTER_JACOBI`` and ``CLUSTER_TRIDIAGONAL``. See
    :ref:`section-preconditioner` for more details.
 
+.. member:: VisibilityClusteringType Solver::Options::visibility_clustering_type
+
+   Default: ``CANONICAL_VIEWS``
+
+   Type of clustering algorithm to use when constructing a visibility
+   based preconditioner. The original visibility based preconditioning
+   paper and implementation only used the canonical views algorithm.
+
+   This algorithm gives high quality results but for large dense
+   graphs can be particularly expensive. As its worst case complexity
+   is cubic in size of the graph.
+
+   Another option is to use ``SINGLE_LINKAGE`` which is a simple
+   thresholded single linkage clustering algorithm that only pays
+   attention to tightly coupled blocks in the Schur complement. This
+   is a fast algorithm that works well.
+
+   The optimal choice of the clustering algorithm depends on the
+   sparsity structure of the problem, but generally speaking we
+   recommend that you try ``CANONICAL_VIEWS`` first and if it is too
+   expensive try ``SINGLE_LINKAGE``.
+
 .. member:: DenseLinearAlgebraLibrary Solver::Options::dense_linear_algebra_library_type
 
    Default:``EIGEN``
@@ -1167,16 +1215,33 @@
 
    Default:``SUITE_SPARSE``
 
-   Ceres supports the use of two sparse linear algebra libraries,
+   Ceres supports the use of three sparse linear algebra libraries,
    ``SuiteSparse``, which is enabled by setting this parameter to
-   ``SUITE_SPARSE`` and ``CXSparse``, which can be selected by setting
-   this parameter to ```CX_SPARSE``. ``SuiteSparse`` is a
-   sophisticated and complex sparse linear algebra library and should
-   be used in general. If your needs/platforms prevent you from using
-   ``SuiteSparse``, consider using ``CXSparse``, which is a much
-   smaller, easier to build library. As can be expected, its
-   performance on large problems is not comparable to that of
-   ``SuiteSparse``.
+   ``SUITE_SPARSE``, ``CXSparse``, which can be selected by setting
+   this parameter to ```CX_SPARSE`` and ``Eigen`` which is enabled by
+   setting this parameter to ``EIGEN_SPARSE``.
+
+   ``SuiteSparse`` is a sophisticated and complex sparse linear
+   algebra library and should be used in general.
+
+   If your needs/platforms prevent you from using ``SuiteSparse``,
+   consider using ``CXSparse``, which is a much smaller, easier to
+   build library. As can be expected, its performance on large
+   problems is not comparable to that of ``SuiteSparse``.
+
+   Last but not the least you can use the sparse linear algebra
+   routines in ``Eigen``. Currently the performance of this library is
+   the poorest of the three. But this should change in the near
+   future.
+
+   Another thing to consider here is that the sparse Cholesky
+   factorization libraries in Eigen are licensed under ``LGPL`` and
+   building Ceres with support for ``EIGEN_SPARSE`` will result in an
+   LGPL licensed library (since the corresponding code from Eigen is
+   compiled into the library).
+
+   The upside is that you do not need to build and link to an external
+   library to use ``EIGEN_SPARSE``.
 
 .. member:: int Solver::Options::num_linear_solver_threads
 
@@ -1184,7 +1249,7 @@
 
    Number of threads used by the linear solver.
 
-.. member:: ParameterBlockOrdering* Solver::Options::linear_solver_ordering
+.. member:: shared_ptr<ParameterBlockOrdering> Solver::Options::linear_solver_ordering
 
    Default: ``NULL``
 
@@ -1221,6 +1286,22 @@
    expense of an extra copy of the Jacobian matrix. Setting
    ``use_postordering`` to ``true`` enables this tradeoff.
 
+.. member:: bool Solver::Options::dynamic_sparsity
+
+   Some non-linear least squares problems are symbolically dense but
+   numerically sparse. i.e. at any given state only a small number of
+   Jacobian entries are non-zero, but the position and number of
+   non-zeros is different depending on the state. For these problems
+   it can be useful to factorize the sparse jacobian at each solver
+   iteration instead of including all of the zero entries in a single
+   general factorization.
+
+   If your problem does not have this property (or you do not know),
+   then it is probably best to keep this false, otherwise it will
+   likely lead to worse performance.
+
+   This settings affects the `SPARSE_NORMAL_CHOLESKY` solver.
+
 .. member:: int Solver::Options::min_linear_solver_iterations
 
    Default: ``1``
@@ -1280,7 +1361,7 @@
    inner iterations in subsequent trust region minimizer iterations is
    disabled.
 
-.. member:: ParameterBlockOrdering*  Solver::Options::inner_iteration_ordering
+.. member:: shared_ptr<ParameterBlockOrdering> Solver::Options::inner_iteration_ordering
 
    Default: ``NULL``
 
@@ -1316,28 +1397,29 @@
 
    .. code-block:: bash
 
-      0: f: 1.250000e+01 d: 0.00e+00 g: 5.00e+00 h: 0.00e+00 rho: 0.00e+00 mu: 1.00e+04 li:  0 it: 6.91e-06 tt: 1.91e-03
-      1: f: 1.249750e-07 d: 1.25e+01 g: 5.00e-04 h: 5.00e+00 rho: 1.00e+00 mu: 3.00e+04 li:  1 it: 2.81e-05 tt: 1.99e-03
-      2: f: 1.388518e-16 d: 1.25e-07 g: 1.67e-08 h: 5.00e-04 rho: 1.00e+00 mu: 9.00e+04 li:  1 it: 1.00e-05 tt: 2.01e-03
+      iter      cost      cost_change  |gradient|   |step|    tr_ratio  tr_radius  ls_iter  iter_time  total_time
+         0  4.185660e+06    0.00e+00    1.09e+08   0.00e+00   0.00e+00  1.00e+04       0    7.59e-02    3.37e-01
+         1  1.062590e+05    4.08e+06    8.99e+06   5.36e+02   9.82e-01  3.00e+04       1    1.65e-01    5.03e-01
+         2  4.992817e+04    5.63e+04    8.32e+06   3.19e+02   6.52e-01  3.09e+04       1    1.45e-01    6.48e-01
 
    Here
 
-   #. ``f`` is the value of the objective function.
-   #. ``d`` is the change in the value of the objective function if
-      the step computed in this iteration is accepted.
-   #. ``g`` is the max norm of the gradient.
-   #. ``h`` is the change in the parameter vector.
-   #. ``rho`` is the ratio of the actual change in the objective
+   #. ``cost`` is the value of the objective function.
+   #. ``cost_change`` is the change in the value of the objective
+      function if the step computed in this iteration is accepted.
+   #. ``|gradient|`` is the max norm of the gradient.
+   #. ``|step|`` is the change in the parameter vector.
+   #. ``tr_ratio`` is the ratio of the actual change in the objective
       function value to the change in the the value of the trust
       region model.
-   #. ``mu`` is the size of the trust region radius.
-   #. ``li`` is the number of linear solver iterations used to compute
-      the trust region step. For direct/factorization based solvers it
-      is always 1, for iterative solvers like ``ITERATIVE_SCHUR`` it
-      is the number of iterations of the Conjugate Gradients
-      algorithm.
-   #. ``it`` is the time take by the current iteration.
-   #. ``tt`` is the the total time taken by the minimizer.
+   #. ``tr_radius`` is the size of the trust region radius.
+   #. ``ls_iter`` is the number of linear solver iterations used to
+      compute the trust region step. For direct/factorization based
+      solvers it is always 1, for iterative solvers like
+      ``ITERATIVE_SCHUR`` it is the number of iterations of the
+      Conjugate Gradients algorithm.
+   #. ``iter_time`` is the time take by the current iteration.
+   #. ``total_time`` is the the total time taken by the minimizer.
 
    For ``LINE_SEARCH_MINIMIZER`` the progress display looks like
 
@@ -1466,17 +1548,6 @@
    iteration. This setting is useful when building an interactive
    application using Ceres and using an :class:`IterationCallback`.
 
-.. member:: string Solver::Options::solver_log
-
-   Default: ``empty``
-
-   If non-empty, a summary of the execution of the solver is recorded
-   to this file.  This file is used for recording and Ceres'
-   performance. Currently, only the iteration number, total time and
-   the objective function value are logged. The format of this file is
-   expected to change over time as the performance evaluation
-   framework is fleshed out.
-
 :class:`ParameterBlockOrdering`
 -------------------------------
 
@@ -1542,95 +1613,127 @@
 
 .. class:: IterationSummary
 
-   :class:`IterationSummary` describes the state of the optimizer
-   after each iteration of the minimization. Note that all times are
-   wall times.
+   :class:`IterationSummary` describes the state of the minimizer at
+   the end of each iteration.
 
-   .. code-block:: c++
+.. member:: int32 IterationSummary::iteration
 
-     struct IterationSummary {
-       // Current iteration number.
-       int32 iteration;
+   Current iteration number.
 
-       // Step was numerically valid, i.e., all values are finite and the
-       // step reduces the value of the linearized model.
-       //
-       // Note: step_is_valid is false when iteration = 0.
-       bool step_is_valid;
+.. member:: bool IterationSummary::step_is_valid
 
-       // Step did not reduce the value of the objective function
-       // sufficiently, but it was accepted because of the relaxed
-       // acceptance criterion used by the non-monotonic trust region
-       // algorithm.
-       //
-       // Note: step_is_nonmonotonic is false when iteration = 0;
-       bool step_is_nonmonotonic;
+   Step was numerically valid, i.e., all values are finite and the
+   step reduces the value of the linearized model.
 
-       // Whether or not the minimizer accepted this step or not. If the
-       // ordinary trust region algorithm is used, this means that the
-       // relative reduction in the objective function value was greater
-       // than Solver::Options::min_relative_decrease. However, if the
-       // non-monotonic trust region algorithm is used
-       // (Solver::Options:use_nonmonotonic_steps = true), then even if the
-       // relative decrease is not sufficient, the algorithm may accept the
-       // step and the step is declared successful.
-       //
-       // Note: step_is_successful is false when iteration = 0.
-       bool step_is_successful;
+    **Note**: :member:`IterationSummary::step_is_valid` is `false`
+    when :member:`IterationSummary::iteration` = 0.
 
-       // Value of the objective function.
-       double cost;
+.. member::  bool IterationSummary::step_is_nonmonotonic
 
-       // Change in the value of the objective function in this
-       // iteration. This can be positive or negative.
-       double cost_change;
+    Step did not reduce the value of the objective function
+    sufficiently, but it was accepted because of the relaxed
+    acceptance criterion used by the non-monotonic trust region
+    algorithm.
 
-       // Infinity norm of the gradient vector.
-       double gradient_max_norm;
+    **Note**: :member:`IterationSummary::step_is_nonmonotonic` is
+    `false` when when :member:`IterationSummary::iteration` = 0.
 
-       // 2-norm of the size of the step computed by the optimization
-       // algorithm.
-       double step_norm;
+.. member:: bool IterationSummary::step_is_successful
 
-       // For trust region algorithms, the ratio of the actual change in
-       // cost and the change in the cost of the linearized approximation.
-       double relative_decrease;
+   Whether or not the minimizer accepted this step or not.
 
-       // Size of the trust region at the end of the current iteration. For
-       // the Levenberg-Marquardt algorithm, the regularization parameter
-       // mu = 1.0 / trust_region_radius.
-       double trust_region_radius;
+   If the ordinary trust region algorithm is used, this means that the
+   relative reduction in the objective function value was greater than
+   :member:`Solver::Options::min_relative_decrease`. However, if the
+   non-monotonic trust region algorithm is used
+   (:member:`Solver::Options::use_nonmonotonic_steps` = `true`), then
+   even if the relative decrease is not sufficient, the algorithm may
+   accept the step and the step is declared successful.
 
-       // For the inexact step Levenberg-Marquardt algorithm, this is the
-       // relative accuracy with which the Newton(LM) step is solved. This
-       // number affects only the iterative solvers capable of solving
-       // linear systems inexactly. Factorization-based exact solvers
-       // ignore it.
-       double eta;
+   **Note**: :member:`IterationSummary::step_is_successful` is `false`
+   when when :member:`IterationSummary::iteration` = 0.
 
-       // Step sized computed by the line search algorithm.
-       double step_size;
+.. member:: double IterationSummary::cost
 
-       // Number of function evaluations used by the line search algorithm.
-       int line_search_function_evaluations;
+   Value of the objective function.
 
-       // Number of iterations taken by the linear solver to solve for the
-       // Newton step.
-       int linear_solver_iterations;
+.. member:: double IterationSummary::cost_change
 
-       // Time (in seconds) spent inside the minimizer loop in the current
-       // iteration.
-       double iteration_time_in_seconds;
+   Change in the value of the objective function in this
+   iteration. This can be positive or negative.
 
-       // Time (in seconds) spent inside the trust region step solver.
-       double step_solver_time_in_seconds;
+.. member:: double IterationSummary::gradient_max_norm
 
-       // Time (in seconds) since the user called Solve().
-       double cumulative_time_in_seconds;
-    };
+   Infinity norm of the gradient vector.
+
+.. member:: double IterationSummary::gradient_norm
+
+   2-norm of the gradient vector.
+
+.. member:: double IterationSummary::step_norm
+
+   2-norm of the size of the step computed in this iteration.
+
+.. member:: double IterationSummary::relative_decrease
+
+   For trust region algorithms, the ratio of the actual change in cost
+   and the change in the cost of the linearized approximation.
+
+   This field is not used when a linear search minimizer is used.
+
+.. member:: double IterationSummary::trust_region_radius
+
+   Size of the trust region at the end of the current iteration. For
+   the Levenberg-Marquardt algorithm, the regularization parameter is
+   1.0 / member::`IterationSummary::trust_region_radius`.
+
+.. member:: double IterationSummary::eta
+
+   For the inexact step Levenberg-Marquardt algorithm, this is the
+   relative accuracy with which the step is solved. This number is
+   only applicable to the iterative solvers capable of solving linear
+   systems inexactly. Factorization-based exact solvers always have an
+   eta of 0.0.
+
+.. member:: double IterationSummary::step_size
+
+   Step sized computed by the line search algorithm.
+
+   This field is not used when a trust region minimizer is used.
+
+.. member:: int IterationSummary::line_search_function_evaluations
+
+   Number of function evaluations used by the line search algorithm.
+
+   This field is not used when a trust region minimizer is used.
+
+.. member:: int IterationSummary::linear_solver_iterations
+
+   Number of iterations taken by the linear solver to solve for the
+   trust region step.
+
+   Currently this field is not used when a line search minimizer is
+   used.
+
+.. member:: double IterationSummary::iteration_time_in_seconds
+
+   Time (in seconds) spent inside the minimizer loop in the current
+   iteration.
+
+.. member:: double IterationSummary::step_solver_time_in_seconds
+
+   Time (in seconds) spent inside the trust region step solver.
+
+.. member:: double IterationSummary::cumulative_time_in_seconds
+
+   Time (in seconds) since the user called Solve().
+
 
 .. class:: IterationCallback
 
+   Interface for specifying callbacks that are executed at the end of
+   each iteration of the minimizer.
+
    .. code-block:: c++
 
       class IterationCallback {
@@ -1639,16 +1742,16 @@
         virtual CallbackReturnType operator()(const IterationSummary& summary) = 0;
       };
 
-  Interface for specifying callbacks that are executed at the end of
-  each iteration of the Minimizer. The solver uses the return value of
-  ``operator()`` to decide whether to continue solving or to
-  terminate. The user can return three values.
+
+  The solver uses the return value of ``operator()`` to decide whether
+  to continue solving or to terminate. The user can return three
+  values.
 
   #. ``SOLVER_ABORT`` indicates that the callback detected an abnormal
      situation. The solver returns without updating the parameter
      blocks (unless ``Solver::Options::update_state_every_iteration`` is
      set true). Solver returns with ``Solver::Summary::termination_type``
-     set to ``USER_ABORT``.
+     set to ``USER_FAILURE``.
 
   #. ``SOLVER_TERMINATE_SUCCESSFULLY`` indicates that there is no need
      to optimize anymore (some user specified termination criterion
@@ -1658,8 +1761,8 @@
   #. ``SOLVER_CONTINUE`` indicates that the solver should continue
      optimizing.
 
-  For example, the following ``IterationCallback`` is used internally
-  by Ceres to log the progress of the optimization.
+  For example, the following :class:`IterationCallback` is used
+  internally by Ceres to log the progress of the optimization.
 
   .. code-block:: c++
 
@@ -1703,50 +1806,56 @@
 
 .. class:: CRSMatrix
 
-   .. code-block:: c++
-
-      struct CRSMatrix {
-        int num_rows;
-        int num_cols;
-        vector<int> cols;
-        vector<int> rows;
-        vector<double> values;
-      };
-
    A compressed row sparse matrix used primarily for communicating the
    Jacobian matrix to the user.
 
-   A compressed row matrix stores its contents in three arrays,
-   ``rows``, ``cols`` and ``values``.
+.. member:: int CRSMatrix::num_rows
 
-   ``rows`` is a ``num_rows + 1`` sized array that points into the ``cols`` and
-   ``values`` array. For each row ``i``:
+   Number of rows.
 
-   ``cols[rows[i]]`` ... ``cols[rows[i + 1] - 1]`` are the indices of the
-   non-zero columns of row ``i``.
+.. member:: int CRSMatrix::num_cols
 
-   ``values[rows[i]]`` ... ``values[rows[i + 1] - 1]`` are the values of the
-   corresponding entries.
+   Number of columns.
 
-   ``cols`` and ``values`` contain as many entries as there are
+.. member:: vector<int> CRSMatrix::rows
+
+   :member:`CRSMatrix::rows` is a :member:`CRSMatrix::num_rows` + 1
+   sized array that points into the :member:`CRSMatrix::cols` and
+   :member:`CRSMatrix::values` array.
+
+.. member:: vector<int> CRSMatrix::cols
+
+   :member:`CRSMatrix::cols` contain as many entries as there are
    non-zeros in the matrix.
 
-   e.g, consider the 3x4 sparse matrix
+   For each row ``i``, ``cols[rows[i]]`` ... ``cols[rows[i + 1] - 1]``
+   are the indices of the non-zero columns of row ``i``.
 
-   .. code-block:: c++
+.. member:: vector<int> CRSMatrix::values
 
-     0 10  0  4
-     0  2 -3  2
-     1  2  0  0
+   :member:`CRSMatrix::values` contain as many entries as there are
+   non-zeros in the matrix.
 
-   The three arrays will be:
+   For each row ``i``,
+   ``values[rows[i]]`` ... ``values[rows[i + 1] - 1]`` are the values
+   of the non-zero columns of row ``i``.
 
-   .. code-block:: c++
+e.g, consider the 3x4 sparse matrix
 
-                 -row0-  ---row1---  -row2-
-       rows   = [ 0,      2,          5,     7]
-       cols   = [ 1,  3,  1,  2,  3,  0,  1]
-       values = [10,  4,  2, -3,  2,  1,  2]
+.. code-block:: c++
+
+   0 10  0  4
+   0  2 -3  2
+   1  2  0  0
+
+The three arrays will be:
+
+.. code-block:: c++
+
+            -row0-  ---row1---  -row2-
+   rows   = [ 0,      2,          5,     7]
+   cols   = [ 1,  3,  1,  2,  3,  0,  1]
+   values = [10,  4,  2, -3,  2,  1,  2]
 
 
 :class:`Solver::Summary`
@@ -1754,113 +1863,289 @@
 
 .. class:: Solver::Summary
 
-  Note that all times reported in this struct are wall times.
+   Summary of the various stages of the solver after termination.
 
-  .. code-block:: c++
 
-     struct Summary {
-       // A brief one line description of the state of the solver after
-       // termination.
-       string BriefReport() const;
+.. function:: string Solver::Summary::BriefReport() const
 
-       // A full multiline description of the state of the solver after
-       // termination.
-       string FullReport() const;
+   A brief one line description of the state of the solver after
+   termination.
 
-       // Minimizer summary -------------------------------------------------
-       MinimizerType minimizer_type;
+.. function:: string Solver::Summary::FullReport() const
 
-       SolverTerminationType termination_type;
+   A full multiline description of the state of the solver after
+   termination.
 
-       // If the solver did not run, or there was a failure, a
-       // description of the error.
-       string error;
+.. function:: bool Solver::Summary::IsSolutionUsable() const
 
-       // Cost of the problem before and after the optimization. See
-       // problem.h for definition of the cost of a problem.
-       double initial_cost;
-       double final_cost;
+   Whether the solution returned by the optimization algorithm can be
+   relied on to be numerically sane. This will be the case if
+   `Solver::Summary:termination_type` is set to `CONVERGENCE`,
+   `USER_SUCCESS` or `NO_CONVERGENCE`, i.e., either the solver
+   converged by meeting one of the convergence tolerances or because
+   the user indicated that it had converged or it ran to the maximum
+   number of iterations or time.
 
-       // The part of the total cost that comes from residual blocks that
-       // were held fixed by the preprocessor because all the parameter
-       // blocks that they depend on were fixed.
-       double fixed_cost;
+.. member:: MinimizerType Solver::Summary::minimizer_type
 
-       vector<IterationSummary> iterations;
+   Type of minimization algorithm used.
 
-       int num_successful_steps;
-       int num_unsuccessful_steps;
-       int num_inner_iteration_steps;
+.. member:: TerminationType Solver::Summary::termination_type
 
-       // When the user calls Solve, before the actual optimization
-       // occurs, Ceres performs a number of preprocessing steps. These
-       // include error checks, memory allocations, and reorderings. This
-       // time is accounted for as preprocessing time.
-       double preprocessor_time_in_seconds;
+   The cause of the minimizer terminating.
 
-       // Time spent in the TrustRegionMinimizer.
-       double minimizer_time_in_seconds;
+.. member:: string Solver::Summary::message
 
-       // After the Minimizer is finished, some time is spent in
-       // re-evaluating residuals etc. This time is accounted for in the
-       // postprocessor time.
-       double postprocessor_time_in_seconds;
+   Reason why the solver terminated.
 
-       // Some total of all time spent inside Ceres when Solve is called.
-       double total_time_in_seconds;
+.. member:: double Solver::Summary::initial_cost
 
-       double linear_solver_time_in_seconds;
-       double residual_evaluation_time_in_seconds;
-       double jacobian_evaluation_time_in_seconds;
-       double inner_iteration_time_in_seconds;
+   Cost of the problem (value of the objective function) before the
+   optimization.
 
-       // Preprocessor summary.
-       int num_parameter_blocks;
-       int num_parameters;
-       int num_effective_parameters;
-       int num_residual_blocks;
-       int num_residuals;
+.. member:: double Solver::Summary::final_cost
 
-       int num_parameter_blocks_reduced;
-       int num_parameters_reduced;
-       int num_effective_parameters_reduced;
-       int num_residual_blocks_reduced;
-       int num_residuals_reduced;
+   Cost of the problem (value of the objective function) after the
+   optimization.
 
-       int num_eliminate_blocks_given;
-       int num_eliminate_blocks_used;
+.. member:: double Solver::Summary::fixed_cost
 
-       int num_threads_given;
-       int num_threads_used;
+   The part of the total cost that comes from residual blocks that
+   were held fixed by the preprocessor because all the parameter
+   blocks that they depend on were fixed.
 
-       int num_linear_solver_threads_given;
-       int num_linear_solver_threads_used;
+.. member:: vector<IterationSummary> Solver::Summary::iterations
 
-       LinearSolverType linear_solver_type_given;
-       LinearSolverType linear_solver_type_used;
+   :class:`IterationSummary` for each minimizer iteration in order.
 
-       vector<int> linear_solver_ordering_given;
-       vector<int> linear_solver_ordering_used;
+.. member:: int Solver::Summary::num_successful_steps
 
-       bool inner_iterations_given;
-       bool inner_iterations_used;
+   Number of minimizer iterations in which the step was
+   accepted. Unless :member:`Solver::Options::use_non_monotonic_steps`
+   is `true` this is also the number of steps in which the objective
+   function value/cost went down.
 
-       vector<int> inner_iteration_ordering_given;
-       vector<int> inner_iteration_ordering_used;
+.. member:: int Solver::Summary::num_unsuccessful_steps
 
-       PreconditionerType preconditioner_type;
+   Number of minimizer iterations in which the step was rejected
+   either because it did not reduce the cost enough or the step was
+   not numerically valid.
 
-       TrustRegionStrategyType trust_region_strategy_type;
-       DoglegType dogleg_type;
+.. member:: int Solver::Summary::num_inner_iteration_steps
 
-       DenseLinearAlgebraLibraryType dense_linear_algebra_library_type;
-       SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type;
+   Number of times inner iterations were performed.
 
-       LineSearchDirectionType line_search_direction_type;
-       LineSearchType line_search_type;
-       int max_lbfgs_rank;
-    };
+.. member:: double Solver::Summary::preprocessor_time_in_seconds
 
+   Time (in seconds) spent in the preprocessor.
+
+.. member:: double Solver::Summary::minimizer_time_in_seconds
+
+   Time (in seconds) spent in the Minimizer.
+
+.. member:: double Solver::Summary::postprocessor_time_in_seconds
+
+   Time (in seconds) spent in the post processor.
+
+.. member:: double Solver::Summary::total_time_in_seconds
+
+   Time (in seconds) spent in the solver.
+
+.. member:: double Solver::Summary::linear_solver_time_in_seconds
+
+   Time (in seconds) spent in the linear solver computing the trust
+   region step.
+
+.. member:: double Solver::Summary::residual_evaluation_time_in_seconds
+
+   Time (in seconds) spent evaluating the residual vector.
+
+.. member:: double Solver::Summary::jacobian_evaluation_time_in_seconds
+
+   Time (in seconds) spent evaluating the Jacobian matrix.
+
+.. member:: double Solver::Summary::inner_iteration_time_in_seconds
+
+   Time (in seconds) spent doing inner iterations.
+
+.. member:: int Solver::Summary::num_parameter_blocks
+
+   Number of parameter blocks in the problem.
+
+.. member:: int Solver::Summary::num_parameters
+
+   Number of parameters in the problem.
+
+.. member:: int Solver::Summary::num_effective_parameters
+
+   Dimension of the tangent space of the problem (or the number of
+   columns in the Jacobian for the problem). This is different from
+   :member:`Solver::Summary::num_parameters` if a parameter block is
+   associated with a :class:`LocalParameterization`.
+
+.. member:: int Solver::Summary::num_residual_blocks
+
+   Number of residual blocks in the problem.
+
+.. member:: int Solver::Summary::num_residuals
+
+   Number of residuals in the problem.
+
+.. member:: int Solver::Summary::num_parameter_blocks_reduced
+
+   Number of parameter blocks in the problem after the inactive and
+   constant parameter blocks have been removed. A parameter block is
+   inactive if no residual block refers to it.
+
+.. member:: int Solver::Summary::num_parameters_reduced
+
+   Number of parameters in the reduced problem.
+
+.. member:: int Solver::Summary::num_effective_parameters_reduced
+
+   Dimension of the tangent space of the reduced problem (or the
+   number of columns in the Jacobian for the reduced problem). This is
+   different from :member:`Solver::Summary::num_parameters_reduced` if
+   a parameter block in the reduced problem is associated with a
+   :class:`LocalParameterization`.
+
+.. member:: int Solver::Summary::num_residual_blocks_reduced
+
+   Number of residual blocks in the reduced problem.
+
+.. member:: int Solver::Summary::num_residuals_reduced
+
+   Number of residuals in the reduced problem.
+
+.. member:: int Solver::Summary::num_threads_given
+
+   Number of threads specified by the user for Jacobian and residual
+   evaluation.
+
+.. member:: int Solver::Summary::num_threads_used
+
+   Number of threads actually used by the solver for Jacobian and
+   residual evaluation. This number is not equal to
+   :member:`Solver::Summary::num_threads_given` if `OpenMP` is not
+   available.
+
+.. member:: int Solver::Summary::num_linear_solver_threads_given
+
+   Number of threads specified by the user for solving the trust
+   region problem.
+
+.. member:: int Solver::Summary::num_linear_solver_threads_used
+
+   Number of threads actually used by the solver for solving the trust
+   region problem. This number is not equal to
+   :member:`Solver::Summary::num_linear_solver_threads_given` if
+   `OpenMP` is not available.
+
+.. member:: LinearSolverType Solver::Summary::linear_solver_type_given
+
+   Type of the linear solver requested by the user.
+
+.. member:: LinearSolverType Solver::Summary::linear_solver_type_used
+
+   Type of the linear solver actually used. This may be different from
+   :member:`Solver::Summary::linear_solver_type_given` if Ceres
+   determines that the problem structure is not compatible with the
+   linear solver requested or if the linear solver requested by the
+   user is not available, e.g. The user requested
+   `SPARSE_NORMAL_CHOLESKY` but no sparse linear algebra library was
+   available.
+
+.. member:: vector<int> Solver::Summary::linear_solver_ordering_given
+
+   Size of the elimination groups given by the user as hints to the
+   linear solver.
+
+.. member:: vector<int> Solver::Summary::linear_solver_ordering_used
+
+   Size of the parameter groups used by the solver when ordering the
+   columns of the Jacobian.  This maybe different from
+   :member:`Solver::Summary::linear_solver_ordering_given` if the user
+   left :member:`Solver::Summary::linear_solver_ordering_given` blank
+   and asked for an automatic ordering, or if the problem contains
+   some constant or inactive parameter blocks.
+
+.. member:: bool Solver::Summary::inner_iterations_given
+
+   `True` if the user asked for inner iterations to be used as part of
+   the optimization.
+
+.. member:: bool Solver::Summary::inner_iterations_used
+
+   `True` if the user asked for inner iterations to be used as part of
+   the optimization and the problem structure was such that they were
+   actually performed. e.g., in a problem with just one parameter
+   block, inner iterations are not performed.
+
+.. member:: vector<int> inner_iteration_ordering_given
+
+   Size of the parameter groups given by the user for performing inner
+   iterations.
+
+.. member:: vector<int> inner_iteration_ordering_used
+
+   Size of the parameter groups given used by the solver for
+   performing inner iterations. This maybe different from
+   :member:`Solver::Summary::inner_iteration_ordering_given` if the
+   user left :member:`Solver::Summary::inner_iteration_ordering_given`
+   blank and asked for an automatic ordering, or if the problem
+   contains some constant or inactive parameter blocks.
+
+.. member:: PreconditionerType Solver::Summary::preconditioner_type
+
+   Type of preconditioner used for solving the trust region step. Only
+   meaningful when an iterative linear solver is used.
+
+.. member:: VisibilityClusteringType Solver::Summary::visibility_clustering_type
+
+   Type of clustering algorithm used for visibility based
+   preconditioning. Only meaningful when the
+   :member:`Solver::Summary::preconditioner_type` is
+   ``CLUSTER_JACOBI`` or ``CLUSTER_TRIDIAGONAL``.
+
+.. member:: TrustRegionStrategyType Solver::Summary::trust_region_strategy_type
+
+   Type of trust region strategy.
+
+.. member:: DoglegType Solver::Summary::dogleg_type
+
+   Type of dogleg strategy used for solving the trust region problem.
+
+.. member:: DenseLinearAlgebraLibraryType Solver::Summary::dense_linear_algebra_library_type
+
+   Type of the dense linear algebra library used.
+
+.. member:: SparseLinearAlgebraLibraryType Solver::Summary::sparse_linear_algebra_library_type
+
+   Type of the sparse linear algebra library used.
+
+.. member:: LineSearchDirectionType Solver::Summary::line_search_direction_type
+
+   Type of line search direction used.
+
+.. member:: LineSearchType Solver::Summary::line_search_type
+
+   Type of the line search algorithm used.
+
+.. member:: LineSearchInterpolationType Solver::Summary::line_search_interpolation_type
+
+   When performing line search, the degree of the polynomial used to
+   approximate the objective function.
+
+.. member:: NonlinearConjugateGradientType Solver::Summary::nonlinear_conjugate_gradient_type
+
+   If the line search direction is `NONLINEAR_CONJUGATE_GRADIENT`,
+   then this indicates the particular variant of non-linear conjugate
+   gradient used.
+
+.. member:: int Solver::Summary::max_lbfgs_rank
+
+   If the type of the line search direction is `LBFGS`, then this
+   indicates the rank of the Hessian approximation.
 
 Covariance Estimation
 =====================
@@ -1997,7 +2282,8 @@
 
 .. member:: CovarianceAlgorithmType Covariance::Options::algorithm_type
 
-   Default: ``SPARSE_QR`` or ``DENSE_SVD``
+   Default: ``SUITE_SPARSE_QR`` if ``SuiteSparseQR`` is installed and
+   ``EIGEN_SPARSE_QR`` otherwise.
 
    Ceres supports three different algorithms for covariance
    estimation, which represent different tradeoffs in speed, accuracy
@@ -2016,47 +2302,23 @@
       small to moderate sized problems. It can handle full-rank as
       well as rank deficient Jacobians.
 
-   2. ``SPARSE_CHOLESKY`` uses the ``CHOLMOD`` sparse Cholesky
-      factorization library to compute the decomposition :
-
-      .. math::   R^\top R = J^\top J
-
-      and then
-
-      .. math::   \left(J^\top J\right)^{-1}  = \left(R^\top R\right)^{-1}
-
-      It a fast algorithm for sparse matrices that should be used when
-      the Jacobian matrix J is well conditioned. For ill-conditioned
-      matrices, this algorithm can fail unpredictabily. This is
-      because Cholesky factorization is not a rank-revealing
-      factorization, i.e., it cannot reliably detect when the matrix
-      being factorized is not of full
-      rank. ``SuiteSparse``/``CHOLMOD`` supplies a heuristic for
-      checking if the matrix is rank deficient (cholmod_rcond), but it
-      is only a heuristic and can have both false positive and false
-      negatives.
-
-      Recent versions of ``SuiteSparse`` (>= 4.2.0) provide a much more
-      efficient method for solving for rows of the covariance
-      matrix. Therefore, if you are doing ``SPARSE_CHOLESKY``, we strongly
-      recommend using a recent version of ``SuiteSparse``.
-
-   3. ``SPARSE_QR`` uses the ``SuiteSparseQR`` sparse QR factorization
-      library to compute the decomposition
+   2. ``EIGEN_SPARSE_QR`` uses the sparse QR factorization algorithm
+      in ``Eigen`` to compute the decomposition
 
        .. math::
 
           QR &= J\\
           \left(J^\top J\right)^{-1} &= \left(R^\top R\right)^{-1}
 
-      It is a moderately fast algorithm for sparse matrices, which at
-      the price of more time and memory than the ``SPARSE_CHOLESKY``
-      algorithm is numerically better behaved and is rank revealing,
-      i.e., it can reliably detect when the Jacobian matrix is rank
-      deficient.
+      It is a moderately fast algorithm for sparse matrices.
 
-   Neither ``SPARSE_CHOLESKY`` or ``SPARSE_QR`` are capable of computing
-   the covariance if the Jacobian is rank deficient.
+   3. ``SUITE_SPARSE_QR`` uses the sparse QR factorization algorithm
+      in ``SuiteSparse``. It uses dense linear algebra and is multi
+      threaded, so for large sparse sparse matrices it is
+      significantly faster than ``EIGEN_SPARSE_QR``.
+
+   Neither ``EIGEN_SPARSE_QR`` nor ``SUITE_SPARSE_QR`` are capable of
+   computing the covariance if the Jacobian is rank deficient.
 
 .. member:: int Covariance::Options::min_reciprocal_condition_number
 
@@ -2095,29 +2357,14 @@
       :math:`\sigma_{\text{max}}` are the minimum and maxiumum
       singular values of :math:`J` respectively.
 
-    2. ``SPARSE_CHOLESKY``
-
-       .. math::  \text{cholmod_rcond} < \text{min_reciprocal_conditioner_number}
-
-      Here cholmod_rcond is a crude estimate of the reciprocal
-      condition number of :math:`J^\top J` by using the maximum and
-      minimum diagonal entries of the Cholesky factor :math:`R`. There
-      are no theoretical guarantees associated with this test. It can
-      give false positives and negatives. Use at your own risk. The
-      default value of ``min_reciprocal_condition_number`` has been
-      set to a conservative value, and sometimes the
-      :func:`Covariance::Compute` may return false even if it is
-      possible to estimate the covariance reliably. In such cases, the
-      user should exercise their judgement before lowering the value
-      of ``min_reciprocal_condition_number``.
-
-    3. ``SPARSE_QR``
+   2. ``EIGEN_SPARSE_QR`` and ``SUITE_SPARSE_QR``
 
        .. math:: \operatorname{rank}(J) < \operatorname{num\_col}(J)
 
        Here :\math:`\operatorname{rank}(J)` is the estimate of the
-       rank of `J` returned by the ``SuiteSparseQR`` algorithm. It is
-       a fairly reliable indication of rank deficiency.
+       rank of `J` returned by the sparse QR factorization
+       algorithm. It is a fairly reliable indication of rank
+       deficiency.
 
 .. member:: int Covariance::Options::null_space_rank
 
@@ -2152,8 +2399,8 @@
 
     .. math::  \frac{\lambda_i}{\lambda_{\textrm{max}}} < \textrm{min_reciprocal_condition_number}
 
-    This option has no effect on ``SPARSE_QR`` and ``SPARSE_CHOLESKY``
-      algorithms.
+    This option has no effect on ``EIGEN_SPARSE_QR`` and
+    ``SUITE_SPARSE_QR``.
 
 .. member:: bool Covariance::Options::apply_loss_function
 
@@ -2243,4 +2490,3 @@
  covariance.GetCovarianceBlock(x, x, covariance_xx)
  covariance.GetCovarianceBlock(y, y, covariance_yy)
  covariance.GetCovarianceBlock(x, y, covariance_xy)
-
diff --git a/docs/source/tutorial.rst b/docs/source/tutorial.rst
index 1e5756a..79714f6 100644
--- a/docs/source/tutorial.rst
+++ b/docs/source/tutorial.rst
@@ -7,10 +7,27 @@
 ========
 Tutorial
 ========
-Ceres solves robustified non-linear least squares problems of the form
 
-.. math:: \frac{1}{2}\sum_{i=1} \rho_i\left(\left\|f_i\left(x_{i_1}, ... ,x_{i_k}\right)\right\|^2\right).
-   :label: ceresproblem
+Ceres solves robustified non-linear bounds constrained least squares
+problems of the form
+
+.. math:: :label: ceresproblem
+
+   \min_{\mathbf{x}} &\quad \frac{1}{2}\sum_{i} \rho_i\left(\left\|f_i\left(x_{i_1}, ... ,x_{i_k}\right)\right\|^2\right) \\
+   \text{s.t.} &\quad l_j \le x_j \le u_j
+
+Problems of this form comes up in a broad range of areas across
+science and engineering - from `fitting curves`_ in statistics, to
+constructing `3D models from photographs`_ in computer vision.
+
+.. _fitting curves: http://en.wikipedia.org/wiki/Nonlinear_regression
+.. _3D models from photographs: http://en.wikipedia.org/wiki/Bundle_adjustment
+
+In this chapter we will learn how to solve :eq:`ceresproblem` using
+Ceres Solver. Full working code for all the examples described in this
+chapter and more can be found in the `examples
+<https://ceres-solver.googlesource.com/ceres-solver/+/master/examples/>`_
+directory.
 
 The expression
 :math:`\rho_i\left(\left\|f_i\left(x_{i_1},...,x_{i_k}\right)\right\|^2\right)`
@@ -21,24 +38,21 @@
 components of a translation vector and the four components of the
 quaternion that define the pose of a camera. We refer to such a group
 of small scalars as a ``ParameterBlock``. Of course a
-``ParameterBlock`` can just be a single parameter.
+``ParameterBlock`` can just be a single parameter. :math:`l_j` and
+:math:`u_j` are bounds on the parameter block :math:`x_j`.
 
 :math:`\rho_i` is a :class:`LossFunction`. A :class:`LossFunction` is
 a scalar function that is used to reduce the influence of outliers on
-the solution of non-linear least squares problems. As a special case,
-when :math:`\rho_i(x) = x`, i.e., the identity function, we get the
-more familiar `non-linear least squares problem
+the solution of non-linear least squares problems.
+
+As a special case, when :math:`\rho_i(x) = x`, i.e., the identity
+function, and :math:`l_j = -\infty` and :math:`u_j = \infty` we get
+the more familiar `non-linear least squares problem
 <http://en.wikipedia.org/wiki/Non-linear_least_squares>`_.
 
-.. math:: \frac{1}{2}\sum_{i=1} \left\|f_i\left(x_{i_1}, ... ,x_{i_k}\right)\right\|^2.
+.. math:: \frac{1}{2}\sum_{i} \left\|f_i\left(x_{i_1}, ... ,x_{i_k}\right)\right\|^2.
    :label: ceresproblem2
 
-In this chapter we will learn how to solve :eq:`ceresproblem` using
-Ceres Solver. Full working code for all the examples described in this
-chapter and more can be found in the `examples
-<https://ceres-solver.googlesource.com/ceres-solver/+/master/examples/>`_
-directory.
-
 .. _section-hello-world:
 
 Hello World!
@@ -68,10 +82,10 @@
 
 The important thing to note here is that ``operator()`` is a templated
 method, which assumes that all its inputs and outputs are of some type
-``T``. The reason for using templates here is because Ceres will call
-``CostFunctor::operator<T>()``, with ``T=double`` when just the
-residual is needed, and with a special type ``T=Jet`` when the
-Jacobians are needed. In :ref:`section-derivatives` we discuss the
+``T``. The use of templating here allows Ceres to call
+``CostFunctor::operator<T>()``, with ``T=double`` when just the value
+of the residual is needed, and with a special type ``T=Jet`` when the
+Jacobians are needed. In :ref:`section-derivatives` we will discuss the
 various ways of supplying derivatives to Ceres in more detail.
 
 Once we have a way of computing the residual function, it is now time
@@ -119,11 +133,12 @@
 
 .. code-block:: bash
 
-      0: f: 1.250000e+01 d: 0.00e+00 g: 5.00e+00 h: 0.00e+00 rho: 0.00e+00 mu: 1.00e+04 li:  0 it: 6.91e-06 tt: 1.91e-03
-      1: f: 1.249750e-07 d: 1.25e+01 g: 5.00e-04 h: 5.00e+00 rho: 1.00e+00 mu: 3.00e+04 li:  1 it: 2.81e-05 tt: 1.99e-03
-      2: f: 1.388518e-16 d: 1.25e-07 g: 1.67e-08 h: 5.00e-04 rho: 1.00e+00 mu: 9.00e+04 li:  1 it: 1.00e-05 tt: 2.01e-03
-   Ceres Solver Report: Iterations: 2, Initial cost: 1.250000e+01, Final cost: 1.388518e-16, Termination: PARAMETER_TOLERANCE.
-   x : 5 -> 10
+   iter      cost      cost_change  |gradient|   |step|    tr_ratio  tr_radius  ls_iter  iter_time  total_time
+      0  4.512500e+01    0.00e+00    9.50e+00   0.00e+00   0.00e+00  1.00e+04       0    5.33e-04    3.46e-03
+      1  4.511598e-07    4.51e+01    9.50e-04   9.50e+00   1.00e+00  3.00e+04       1    5.00e-04    4.05e-03
+      2  5.012552e-16    4.51e-07    3.17e-08   9.50e-04   1.00e+00  9.00e+04       1    1.60e-05    4.09e-03
+   Ceres Solver Report: Iterations: 2, Initial cost: 4.512500e+01, Final cost: 5.012552e-16, Termination: CONVERGENCE
+   x : 0.5 -> 10
 
 Starting from a :math:`x=5`, the solver in two iterations goes to 10
 [#f2]_. The careful reader will note that this is a linear problem and
@@ -359,21 +374,64 @@
 
 .. code-block:: bash
 
- Initial x1 = 3, x2 = -1, x3 = 0, x4 = 1
-    0: f: 1.075000e+02 d: 0.00e+00 g: 1.55e+02 h: 0.00e+00 rho: 0.00e+00 mu: 1.00e+04 li:  0 it: 0.00e+00 tt: 0.00e+00
-    1: f: 5.036190e+00 d: 1.02e+02 g: 2.00e+01 h: 2.16e+00 rho: 9.53e-01 mu: 3.00e+04 li:  1 it: 0.00e+00 tt: 0.00e+00
-    2: f: 3.148168e-01 d: 4.72e+00 g: 2.50e+00 h: 6.23e-01 rho: 9.37e-01 mu: 9.00e+04 li:  1 it: 0.00e+00 tt: 0.00e+00
-    3: f: 1.967760e-02 d: 2.95e-01 g: 3.13e-01 h: 3.08e-01 rho: 9.37e-01 mu: 2.70e+05 li:  1 it: 0.00e+00 tt: 0.00e+00
-    4: f: 1.229900e-03 d: 1.84e-02 g: 3.91e-02 h: 1.54e-01 rho: 9.37e-01 mu: 8.10e+05 li:  1 it: 0.00e+00 tt: 0.00e+00
-    5: f: 7.687123e-05 d: 1.15e-03 g: 4.89e-03 h: 7.69e-02 rho: 9.37e-01 mu: 2.43e+06 li:  1 it: 0.00e+00 tt: 0.00e+00
-    6: f: 4.804625e-06 d: 7.21e-05 g: 6.11e-04 h: 3.85e-02 rho: 9.37e-01 mu: 7.29e+06 li:  1 it: 0.00e+00 tt: 0.00e+00
-    7: f: 3.003028e-07 d: 4.50e-06 g: 7.64e-05 h: 1.92e-02 rho: 9.37e-01 mu: 2.19e+07 li:  1 it: 0.00e+00 tt: 0.00e+00
-    8: f: 1.877006e-08 d: 2.82e-07 g: 9.54e-06 h: 9.62e-03 rho: 9.37e-01 mu: 6.56e+07 li:  1 it: 0.00e+00 tt: 0.00e+00
-    9: f: 1.173223e-09 d: 1.76e-08 g: 1.19e-06 h: 4.81e-03 rho: 9.37e-01 mu: 1.97e+08 li:  1 it: 0.00e+00 tt: 0.00e+00
-   10: f: 7.333425e-11 d: 1.10e-09 g: 1.49e-07 h: 2.40e-03 rho: 9.37e-01 mu: 5.90e+08 li:  1 it: 0.00e+00 tt: 0.00e+00
-   11: f: 4.584044e-12 d: 6.88e-11 g: 1.86e-08 h: 1.20e-03 rho: 9.37e-01 mu: 1.77e+09 li:  1 it: 0.00e+00 tt: 0.00e+00
- Ceres Solver Report: Iterations: 12, Initial cost: 1.075000e+02, Final cost: 4.584044e-12, Termination: GRADIENT_TOLERANCE.
- Final x1 = 0.00116741, x2 = -0.000116741, x3 = 0.000190535, x4 = 0.000190535
+    Initial x1 = 3, x2 = -1, x3 = 0, x4 = 1
+    iter      cost      cost_change  |gradient|   |step|    tr_ratio  tr_radius  ls_iter  iter_time  total_time
+       0  1.075000e+02    0.00e+00    1.55e+02   0.00e+00   0.00e+00  1.00e+04       0    4.95e-04    2.30e-03
+       1  5.036190e+00    1.02e+02    2.00e+01   2.16e+00   9.53e-01  3.00e+04       1    4.39e-05    2.40e-03
+       2  3.148168e-01    4.72e+00    2.50e+00   6.23e-01   9.37e-01  9.00e+04       1    9.06e-06    2.43e-03
+       3  1.967760e-02    2.95e-01    3.13e-01   3.08e-01   9.37e-01  2.70e+05       1    8.11e-06    2.45e-03
+       4  1.229900e-03    1.84e-02    3.91e-02   1.54e-01   9.37e-01  8.10e+05       1    6.91e-06    2.48e-03
+       5  7.687123e-05    1.15e-03    4.89e-03   7.69e-02   9.37e-01  2.43e+06       1    7.87e-06    2.50e-03
+       6  4.804625e-06    7.21e-05    6.11e-04   3.85e-02   9.37e-01  7.29e+06       1    5.96e-06    2.52e-03
+       7  3.003028e-07    4.50e-06    7.64e-05   1.92e-02   9.37e-01  2.19e+07       1    5.96e-06    2.55e-03
+       8  1.877006e-08    2.82e-07    9.54e-06   9.62e-03   9.37e-01  6.56e+07       1    5.96e-06    2.57e-03
+       9  1.173223e-09    1.76e-08    1.19e-06   4.81e-03   9.37e-01  1.97e+08       1    7.87e-06    2.60e-03
+      10  7.333425e-11    1.10e-09    1.49e-07   2.40e-03   9.37e-01  5.90e+08       1    6.20e-06    2.63e-03
+      11  4.584044e-12    6.88e-11    1.86e-08   1.20e-03   9.37e-01  1.77e+09       1    6.91e-06    2.65e-03
+      12  2.865573e-13    4.30e-12    2.33e-09   6.02e-04   9.37e-01  5.31e+09       1    5.96e-06    2.67e-03
+      13  1.791438e-14    2.69e-13    2.91e-10   3.01e-04   9.37e-01  1.59e+10       1    7.15e-06    2.69e-03
+
+    Ceres Solver v1.10.0 Solve Report
+    ----------------------------------
+                                         Original                  Reduced
+    Parameter blocks                            4                        4
+    Parameters                                  4                        4
+    Residual blocks                             4                        4
+    Residual                                    4                        4
+
+    Minimizer                        TRUST_REGION
+
+    Dense linear algebra library            EIGEN
+    Trust region strategy     LEVENBERG_MARQUARDT
+
+                                            Given                     Used
+    Linear solver                        DENSE_QR                 DENSE_QR
+    Threads                                     1                        1
+    Linear solver threads                       1                        1
+
+    Cost:
+    Initial                          1.075000e+02
+    Final                            1.791438e-14
+    Change                           1.075000e+02
+
+    Minimizer iterations                       14
+    Successful steps                           14
+    Unsuccessful steps                          0
+
+    Time (in seconds):
+    Preprocessor                            0.002
+
+      Residual evaluation                   0.000
+      Jacobian evaluation                   0.000
+      Linear solver                         0.000
+    Minimizer                               0.001
+
+    Postprocessor                           0.000
+    Total                                   0.005
+
+    Termination:                      CONVERGENCE (Gradient tolerance reached. Gradient max norm: 3.642190e-11 <= 1.000000e-10)
+
+    Final x1 = 0.000292189, x2 = -2.92189e-05, x3 = 4.79511e-05, x4 = 4.79511e-05
 
 It is easy to see that the optimal solution to this problem is at
 :math:`x_1=0, x_2=0, x_3=0, x_4=0` with an objective function value of
@@ -447,24 +505,24 @@
 
 .. code-block:: bash
 
-    0: f: 1.211734e+02 d: 0.00e+00 g: 3.61e+02 h: 0.00e+00 rho: 0.00e+00 mu: 1.00e+04 li:  0 it: 0.00e+00 tt: 0.00e+00
-    1: f: 1.211734e+02 d:-2.21e+03 g: 3.61e+02 h: 7.52e-01 rho:-1.87e+01 mu: 5.00e+03 li:  1 it: 0.00e+00 tt: 0.00e+00
-    2: f: 1.211734e+02 d:-2.21e+03 g: 3.61e+02 h: 7.51e-01 rho:-1.86e+01 mu: 1.25e+03 li:  1 it: 0.00e+00 tt: 0.00e+00
-    3: f: 1.211734e+02 d:-2.19e+03 g: 3.61e+02 h: 7.48e-01 rho:-1.85e+01 mu: 1.56e+02 li:  1 it: 0.00e+00 tt: 0.00e+00
-    4: f: 1.211734e+02 d:-2.02e+03 g: 3.61e+02 h: 7.22e-01 rho:-1.70e+01 mu: 9.77e+00 li:  1 it: 0.00e+00 tt: 0.00e+00
-    5: f: 1.211734e+02 d:-7.34e+02 g: 3.61e+02 h: 5.78e-01 rho:-6.32e+00 mu: 3.05e-01 li:  1 it: 0.00e+00 tt: 0.00e+00
-    6: f: 3.306595e+01 d: 8.81e+01 g: 4.10e+02 h: 3.18e-01 rho: 1.37e+00 mu: 9.16e-01 li:  1 it: 0.00e+00 tt: 0.00e+00
-    7: f: 6.426770e+00 d: 2.66e+01 g: 1.81e+02 h: 1.29e-01 rho: 1.10e+00 mu: 2.75e+00 li:  1 it: 0.00e+00 tt: 0.00e+00
-    8: f: 3.344546e+00 d: 3.08e+00 g: 5.51e+01 h: 3.05e-02 rho: 1.03e+00 mu: 8.24e+00 li:  1 it: 0.00e+00 tt: 0.00e+00
-    9: f: 1.987485e+00 d: 1.36e+00 g: 2.33e+01 h: 8.87e-02 rho: 9.94e-01 mu: 2.47e+01 li:  1 it: 0.00e+00 tt: 0.00e+00
-   10: f: 1.211585e+00 d: 7.76e-01 g: 8.22e+00 h: 1.05e-01 rho: 9.89e-01 mu: 7.42e+01 li:  1 it: 0.00e+00 tt: 0.00e+00
-   11: f: 1.063265e+00 d: 1.48e-01 g: 1.44e+00 h: 6.06e-02 rho: 9.97e-01 mu: 2.22e+02 li:  1 it: 0.00e+00 tt: 0.00e+00
-   12: f: 1.056795e+00 d: 6.47e-03 g: 1.18e-01 h: 1.47e-02 rho: 1.00e+00 mu: 6.67e+02 li:  1 it: 0.00e+00 tt: 0.00e+00
-   13: f: 1.056751e+00 d: 4.39e-05 g: 3.79e-03 h: 1.28e-03 rho: 1.00e+00 mu: 2.00e+03 li:  1 it: 0.00e+00 tt: 0.00e+00
- Ceres Solver Report: Iterations: 13, Initial cost: 1.211734e+02, Final cost: 1.056751e+00, Termination: FUNCTION_TOLERANCE.
- Initial m: 0 c: 0
- Final   m: 0.291861 c: 0.131439
-
+    iter      cost      cost_change  |gradient|   |step|    tr_ratio  tr_radius  ls_iter  iter_time  total_time
+       0  1.211734e+02    0.00e+00    3.61e+02   0.00e+00   0.00e+00  1.00e+04       0    5.34e-04    2.56e-03
+       1  1.211734e+02   -2.21e+03    0.00e+00   7.52e-01  -1.87e+01  5.00e+03       1    4.29e-05    3.25e-03
+       2  1.211734e+02   -2.21e+03    0.00e+00   7.51e-01  -1.86e+01  1.25e+03       1    1.10e-05    3.28e-03
+       3  1.211734e+02   -2.19e+03    0.00e+00   7.48e-01  -1.85e+01  1.56e+02       1    1.41e-05    3.31e-03
+       4  1.211734e+02   -2.02e+03    0.00e+00   7.22e-01  -1.70e+01  9.77e+00       1    1.00e-05    3.34e-03
+       5  1.211734e+02   -7.34e+02    0.00e+00   5.78e-01  -6.32e+00  3.05e-01       1    1.00e-05    3.36e-03
+       6  3.306595e+01    8.81e+01    4.10e+02   3.18e-01   1.37e+00  9.16e-01       1    2.79e-05    3.41e-03
+       7  6.426770e+00    2.66e+01    1.81e+02   1.29e-01   1.10e+00  2.75e+00       1    2.10e-05    3.45e-03
+       8  3.344546e+00    3.08e+00    5.51e+01   3.05e-02   1.03e+00  8.24e+00       1    2.10e-05    3.48e-03
+       9  1.987485e+00    1.36e+00    2.33e+01   8.87e-02   9.94e-01  2.47e+01       1    2.10e-05    3.52e-03
+      10  1.211585e+00    7.76e-01    8.22e+00   1.05e-01   9.89e-01  7.42e+01       1    2.10e-05    3.56e-03
+      11  1.063265e+00    1.48e-01    1.44e+00   6.06e-02   9.97e-01  2.22e+02       1    2.60e-05    3.61e-03
+      12  1.056795e+00    6.47e-03    1.18e-01   1.47e-02   1.00e+00  6.67e+02       1    2.10e-05    3.64e-03
+      13  1.056751e+00    4.39e-05    3.79e-03   1.28e-03   1.00e+00  2.00e+03       1    2.10e-05    3.68e-03
+    Ceres Solver Report: Iterations: 13, Initial cost: 1.211734e+02, Final cost: 1.056751e+00, Termination: CONVERGENCE
+    Initial m: 0 c: 0
+    Final   m: 0.291861 c: 0.131439
 
 Starting from parameter values :math:`m = 0, c=0` with an initial
 objective function value of :math:`121.173` Ceres finds a solution
@@ -635,10 +693,9 @@
  ceres::Problem problem;
  for (int i = 0; i < bal_problem.num_observations(); ++i) {
    ceres::CostFunction* cost_function =
-       new ceres::AutoDiffCostFunction<SnavelyReprojectionError, 2, 9, 3>(
-           new SnavelyReprojectionError(
-               bal_problem.observations()[2 * i + 0],
-               bal_problem.observations()[2 * i + 1]));
+       SnavelyReprojectionError::Create(
+            bal_problem.observations()[2 * i + 0],
+            bal_problem.observations()[2 * i + 1]);
    problem.AddResidualBlock(cost_function,
                             NULL /* squared loss */,
                             bal_problem.mutable_camera_for_observation(i),
@@ -713,5 +770,3 @@
 #. `libmv_bundle_adjuster.cc
    <https://ceres-solver.googlesource.com/ceres-solver/+/master/examples/libmv_bundle_adjuster.cc>`_
    is the bundle adjustment algorithm used by `Blender <www.blender.org>`_/libmv.
-
-
diff --git a/docs/source/version_history.rst b/docs/source/version_history.rst
index f9bc273..a52ab30 100644
--- a/docs/source/version_history.rst
+++ b/docs/source/version_history.rst
@@ -1,8 +1,217 @@
 .. _chapter-version-history:
 
-===============
-Version History
-===============
+========
+Releases
+========
+
+HEAD
+====
+
+#. Added ``Solver::Options::IsValid`` which allows users to validate
+   their solver configuration before calling ``Solve``.
+
+#. Added ``EIGEN_SPARSE_QR`` algorithm for covariance estimation using
+   ``Eigen``'s sparse QR factorization. (Michael Vitus)
+
+Backward Incompatible API Changes
+---------------------------------
+
+#. ``Solver::Options::solver_log`` has been removed. If needed this
+   iteration callback can easily be implemented in user code.
+
+#. The ``SPARSE_CHOLESKY`` algorithm for covariance estimation has
+   been removed. It is not rank revealing and numerically poorly
+   behaved. Sparse QR factorization is a much better way to do this.
+
+#. The ``SPARSE_QR`` algorithm for covariance estimation has been
+   renamed to ``SUITE_SPARSE_QR`` to be consistent with
+   ``EIGEN_SPARSE_QR``.
+
+
+1.9.0
+=====
+
+New Features
+------------
+
+#. Bounds constraints: Support for upper and/or lower bounds on
+   parameters when using the trust region minimizer.
+#. Dynamic Sparsity: Problems in which the sparsity structure of the
+   Jacobian changes over the course of the optimization can now be
+   solved much more efficiently. (Richard Stebbing)
+#. Improved support for Microsoft Visual C++ including the ability to
+   build and ship DLLs. (Björn Piltz, Alex Stewart and Sergey
+   Sharybin)
+#. Support for building on iOS 6.0 or higher (Jack Feng).
+#. Autogeneration of config.h that captures all the defines used to
+   build and use Ceres Solver.
+#. Simpler and more informative solver termination type
+   reporting. (See below for more details)
+#. New `website <http://www.ceres-solver.org>`_ based entirely on
+   Sphinx.
+#. ``AutoDiffLocalParameterization`` allows the use of automatic
+   differentiation for defining ``LocalParameterization`` objects
+   (Alex Stewart)
+#. LBFGS is faster due to fewer memory copies.
+#. Parameter blocks are not restricted to be less than 32k in size,
+   they can be up to 2G in size.
+#. Faster ``SPARSE_NORMAL_CHOLESKY`` solver when using ``CX_SPARSE``
+   as the sparse linear algebra library.
+#. Added ``Problem::IsParameterBlockPresent`` and
+   ``Problem::GetParameterization``.
+#. Added the (2,4,9) and (2,4,8) template specializations.
+#. An example demonstrating the use of
+   DynamicAutoDiffCostFunction. (Joydeep Biswas)
+#. Homography estimation example from Blender demonstrating the use of
+   a custom ``IterationCallback``. (Sergey Sharybin)
+#. Support user passing a custom CMAKE_MODULE_PATH (for BLAS /
+   LAPACK).
+
+Backward Incompatible API Changes
+---------------------------------
+
+#. ``Solver::Options::linear_solver_ordering`` used to be a naked
+   pointer that Ceres took ownership of. This is error prone behaviour
+   which leads to problems when copying the ``Solver::Options`` struct
+   around. This has been replaced with a ``shared_ptr`` to handle
+   ownership correctly across copies.
+
+#. The enum used for reporting the termination/convergence status of
+   the solver has been renamed from ``SolverTerminationType`` to
+   ``TerminationType``.
+
+   The enum values have also changed. ``FUNCTION_TOLERANCE``,
+   ``GRADIENT_TOLERANCE`` and ``PARAMETER_TOLERANCE`` have all been
+   replaced by ``CONVERGENCE``.
+
+   ``NUMERICAL_FAILURE`` has been replaed by ``FAILURE``.
+
+   ``USER_ABORT`` has been renamed to ``USER_FAILURE``.
+
+   Further ``Solver::Summary::error`` has been renamed to
+   ``Solver::Summary::message``. It contains a more detailed
+   explanation for why the solver terminated.
+
+#. ``Solver::Options::gradient_tolerance`` used to be a relative
+   gradient tolerance. i.e., The solver converged when
+
+   .. math::
+      \|g(x)\|_\infty < \text{gradient_tolerance} * \|g(x_0)\|_\infty
+
+   where :math:`g(x)` is the gradient of the objective function at
+   :math:`x` and :math:`x_0` is the parmeter vector at the start of
+   the optimization.
+
+   This has changed to an absolute tolerance, i.e. the solver
+   converges when
+
+   .. math::
+      \|g(x)\|_\infty < \text{gradient_tolerance}
+
+#. Ceres cannot be built without the line search minimizer
+   anymore. Thus the preprocessor define
+   ``CERES_NO_LINE_SEARCH_MINIMIZER`` has been removed.
+
+Bug Fixes
+---------
+
+#. Disabled warning C4251. (Björn Piltz)
+#. Do not propagate 3d party libs through
+   `IMPORTED_LINK_INTERFACE_LIBRARIES_[DEBUG/RELEASE]` mechanism when
+   building shared libraries. (Björn Piltz)
+#. Fixed errant verbose levels (Björn Piltz)
+#. Variety of code cleanups, optimizations and bug fixes to the line
+   search minimizer code (Alex Stewart)
+#. Fixed ``BlockSparseMatrix::Transpose`` when the matrix has row and
+   column blocks. (Richard Bowen)
+#. Better error checking when ``Problem::RemoveResidualBlock`` is
+   called. (Alex Stewart)
+#. Fixed a memory leak in ``SchurComplementSolver``.
+#. Added ``epsilon()`` method to ``NumTraits<ceres::Jet<T, N> >``. (Filippo
+   Basso)
+#. Fixed a bug in `CompressedRowSparseMatrix::AppendRows`` and
+   ``DeleteRows``.q
+#. Handle empty problems consistently.
+#. Restore the state of the ``Problem`` after a call to
+   ``Problem::Evaluate``. (Stefan Leutenegger)
+#. Better error checking and reporting for linear solvers.
+#. Use explicit formula to solve quadratic polynomials instead of the
+   eigenvalue solver.
+#. Fix constant parameter handling in inner iterations (Mikael
+   Persson).
+#. SuiteSparse errors do not cause a fatal crash anymore.
+#. Fix ``corrector_test.cc``.
+#. Relax the requirements on loss function derivatives.
+#. Minor bugfix to logging.h (Scott Ettinger)
+#. Updated ``gmock`` and ``gtest`` to the latest upstream version.
+#. Fix build breakage on old versions of SuiteSparse.
+#. Fixed build issues related to Clang / LLVM 3.4 (Johannes
+   Schönberger)
+#. METIS_FOUND is never set. Changed the commit to fit the setting of
+   the other #._FOUND definitions. (Andreas Franek)
+#. Variety of bug fixes and cleanups to the ``CMake`` build system
+   (Alex Stewart)
+#. Removed fictious shared library target from the NDK build.
+#. Solver::Options now uses ``shared_ptr`` to handle ownership of
+   ``Solver::Options::linear_solver_ordering`` and
+   ``Solver::Options::inner_iteration_ordering``. As a consequence the
+   ``NDK`` build now depends on ``libc++`` from the ``LLVM`` project.
+#. Variety of lint cleanups (William Rucklidge & Jim Roseborough)
+#. Various internal cleanups including dead code removal.
+
+
+1.8.0
+=====
+
+New Features
+------------
+#. Significant improved ``CMake`` files with better robustness,
+   dependency checking and GUI support. (Alex Stewart)
+#. Added ``DynamicNumericDiffCostFunction`` for numerically
+   differentiated cost functions whose sizing is determined at run
+   time.
+#. ``NumericDiffCostFunction`` now supports a dynamic number of
+   residuals just like ``AutoDiffCostFunction``.
+#. ``Problem`` exposes more of its structure in its API.
+#. Faster automatic differentiation (Tim Langlois)
+#. Added the commonly occuring ``2_d_d`` template specialization for
+   the Schur Eliminator.
+#. Faster ``ITERATIVE_SCHUR`` solver using template specializations.
+#. Faster ``SCHUR_JACOBI`` preconditioner construction.
+#. Faster ``AngleAxisRotatePoint``.
+#. Faster Jacobian evaluation when a loss function is used.
+#. Added support for multiple clustering algorithms in visibility
+   based preconditioning, including a new fast single linkage
+   clustering algorithm.
+
+Bug Fixes
+---------
+#. Fix ordering of ParseCommandLineFlags() & InitGoogleTest() for
+   Windows. (Alex Stewart)
+#. Remove DCHECK_GE checks from fixed_array.h.
+#. Fix build on MSVC 2013 (Petter Strandmark)
+#. Fixed ``AngleAxisToRotationMatrix`` near zero.
+#. Move ``CERES_HASH_NAMESPACE`` macros to ``collections_port.h``.
+#. Fix handling of unordered_map/unordered_set on OSX 10.9.0.
+#. Explicitly link to libm for ``curve_fitting_c.c``. (Alex Stewart)
+#. Minor type conversion fix to autodiff.h
+#. Remove RuntimeNumericDiffCostFunction.
+#. Fix operator= ambiguity on some versions of Clang. (Alex Stewart)
+#. Various Lint cleanups (William Rucklidge & Jim Roseborough)
+#. Modified installation folders for Windows. (Pablo Speciale)
+#. Added librt to link libraries for SuiteSparse_config on Linux. (Alex Stewart)
+#. Check for presence of return-type-c-linkage option with
+   Clang. (Alex Stewart)
+#. Fix Problem::RemoveParameterBlock after calling solve. (Simon Lynen)
+#. Fix a free/delete bug in covariance_impl.cc
+#. Fix two build errors. (Dustin Lang)
+#. Add RequireInitialization = 1 to NumTraits::Jet.
+#. Update gmock/gtest to 1.7.0
+#. Added IterationSummary::gradient_norm.
+#. Reduced verbosity of the inner iteration minimizer.
+#. Fixed a bug in TrustRegionMinimizer. (Michael Vitus)
+#. Removed android/build_android.sh.
+
 
 1.7.0
 =====
@@ -35,7 +244,10 @@
 #. Add BlockRandomAccessCRSMatrix.
 #. Speeded up automatic differentiation by 7\%.
 #. Bundle adjustment example from libmv/Blender (Sergey Sharybin)
-#. Add the ability to turn shared library compilation on and off
+#. Shared library building is now controlled by CMake, rather than a custom
+   solution. Previously, Ceres had a custom option, but this is now deprecated
+   in favor of CMake's built in support for switching between static and
+   shared. Turn on BUILD_SHARED_LIBS to get shared Ceres libraries.
 #. No more dependence on Protocol Buffers.
 #. Incomplete LQ factorization.
 #. Ability to write trust region problems to disk.
@@ -96,6 +308,7 @@
 #. Fix a reallocation bug in
    ``CreateJacobianBlockSparsityTranspose``. (Yuliy Schwartzburg)
 #. Add a define for O_BINARY.
+#. Fix miniglog-based Android NDK build; now works with NDK r9. (Scott Ettinger)
 
 
 1.6.0
diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt
index 9dfc80b..dbbcb81 100644
--- a/examples/CMakeLists.txt
+++ b/examples/CMakeLists.txt
@@ -28,6 +28,13 @@
 #
 # Author: keir@google.com (Keir Mierle)
 
+# Only Ceres itself should be compiled with CERES_BUILDING_SHARED_LIBRARY
+# defined, any users of Ceres will have CERES_USING_SHARED_LIBRARY defined
+# for them in Ceres' config.h if appropriate.
+IF (BUILD_SHARED_LIBS)
+  REMOVE_DEFINITIONS(-DCERES_BUILDING_SHARED_LIBRARY)
+ENDIF()
+
 ADD_EXECUTABLE(helloworld helloworld.cc)
 TARGET_LINK_LIBRARIES(helloworld ceres)
 
@@ -42,7 +49,15 @@
 
 ADD_EXECUTABLE(curve_fitting_c curve_fitting.c)
 TARGET_LINK_LIBRARIES(curve_fitting_c ceres)
+# As this is a C file #including <math.h> we have to explicitly add the math
+# library (libm). Although some compilers (dependent upon options) will accept
+# the indirect link to libm via Ceres, at least GCC 4.8 on pure Debian won't.
+IF (NOT MSVC)
+  TARGET_LINK_LIBRARIES(curve_fitting_c m)
+ENDIF (NOT MSVC)
 
+ADD_EXECUTABLE(ellipse_approximation ellipse_approximation.cc)
+TARGET_LINK_LIBRARIES(ellipse_approximation ceres)
 
 ADD_EXECUTABLE(robust_curve_fitting robust_curve_fitting.cc)
 TARGET_LINK_LIBRARIES(robust_curve_fitting ceres)
@@ -53,26 +68,37 @@
 
 IF (GFLAGS)
   ADD_EXECUTABLE(powell powell.cc)
-  TARGET_LINK_LIBRARIES(powell ceres)
+  TARGET_LINK_LIBRARIES(powell ceres ${GFLAGS_LIBRARIES})
 
   ADD_EXECUTABLE(nist nist.cc)
-  TARGET_LINK_LIBRARIES(nist ceres)
+  TARGET_LINK_LIBRARIES(nist ceres ${GFLAGS_LIBRARIES})
+
+  ADD_EXECUTABLE(more_garbow_hillstrom more_garbow_hillstrom.cc)
+  TARGET_LINK_LIBRARIES(more_garbow_hillstrom ceres ${GFLAGS_LIBRARIES})
 
   ADD_EXECUTABLE(circle_fit circle_fit.cc)
-  TARGET_LINK_LIBRARIES(circle_fit ceres)
+  TARGET_LINK_LIBRARIES(circle_fit ceres ${GFLAGS_LIBRARIES})
 
   ADD_EXECUTABLE(bundle_adjuster
                  bundle_adjuster.cc
                  bal_problem.cc)
-  TARGET_LINK_LIBRARIES(bundle_adjuster ceres)
+  TARGET_LINK_LIBRARIES(bundle_adjuster ceres ${GFLAGS_LIBRARIES})
 
   ADD_EXECUTABLE(libmv_bundle_adjuster
                  libmv_bundle_adjuster.cc)
-  TARGET_LINK_LIBRARIES(libmv_bundle_adjuster ceres)
+  TARGET_LINK_LIBRARIES(libmv_bundle_adjuster ceres ${GFLAGS_LIBRARIES})
+
+  ADD_EXECUTABLE(libmv_homography
+                 libmv_homography.cc)
+  TARGET_LINK_LIBRARIES(libmv_homography ceres ${GFLAGS_LIBRARIES})
 
   ADD_EXECUTABLE(denoising
                  denoising.cc
                  fields_of_experts.cc)
-  TARGET_LINK_LIBRARIES(denoising ceres)
-ENDIF (GFLAGS)
+  TARGET_LINK_LIBRARIES(denoising ceres ${GFLAGS_LIBRARIES})
 
+  ADD_EXECUTABLE(robot_pose_mle
+                 robot_pose_mle.cc)
+  TARGET_LINK_LIBRARIES(robot_pose_mle ceres ${GFLAGS_LIBRARIES})
+
+ENDIF (GFLAGS)
diff --git a/examples/bal_problem.cc b/examples/bal_problem.cc
index c118f88..40d0fdf 100644
--- a/examples/bal_problem.cc
+++ b/examples/bal_problem.cc
@@ -37,6 +37,7 @@
 #include "Eigen/Core"
 #include "ceres/rotation.h"
 #include "glog/logging.h"
+#include "random.h"
 
 namespace ceres {
 namespace examples {
@@ -44,25 +45,6 @@
 typedef Eigen::Map<Eigen::VectorXd> VectorRef;
 typedef Eigen::Map<const Eigen::VectorXd> ConstVectorRef;
 
-inline double RandDouble() {
-  double r = static_cast<double>(rand());
-  return r / RAND_MAX;
-}
-
-// Box-Muller algorithm for normal random number generation.
-// http://en.wikipedia.org/wiki/Box-Muller_transform
-inline double RandNormal() {
-  double x1, x2, w;
-  do {
-    x1 = 2.0 * RandDouble() - 1.0;
-    x2 = 2.0 * RandDouble() - 1.0;
-    w = x1 * x1 + x2 * x2;
-  } while ( w >= 1.0 || w == 0.0 );
-
-  w = sqrt((-2.0 * log(w)) / w);
-  return x1 * w;
-}
-
 template<typename T>
 void FscanfOrDie(FILE* fptr, const char* format, T* value) {
   int num_scanned = fscanf(fptr, format, value);
diff --git a/examples/bundle_adjuster.cc b/examples/bundle_adjuster.cc
index 224ad74..296611f 100644
--- a/examples/bundle_adjuster.cc
+++ b/examples/bundle_adjuster.cc
@@ -82,6 +82,9 @@
 DEFINE_string(preconditioner, "jacobi", "Options are: "
               "identity, jacobi, schur_jacobi, cluster_jacobi, "
               "cluster_tridiagonal.");
+DEFINE_string(visibility_clustering, "canonical_views",
+              "single_linkage, canonical_views");
+
 DEFINE_string(sparse_linear_algebra_library, "suite_sparse",
               "Options are: suite_sparse and cx_sparse.");
 DEFINE_string(dense_linear_algebra_library, "eigen",
@@ -113,7 +116,6 @@
 DEFINE_int32(random_seed, 38401, "Random seed used to set the state "
              "of the pseudo random number generator used to generate "
              "the pertubations.");
-DEFINE_string(solver_log, "", "File to record the solver execution to.");
 DEFINE_bool(line_search, false, "Use a line search instead of trust region "
             "algorithm.");
 
@@ -125,6 +127,8 @@
                                  &options->linear_solver_type));
   CHECK(StringToPreconditionerType(FLAGS_preconditioner,
                                    &options->preconditioner_type));
+  CHECK(StringToVisibilityClusteringType(FLAGS_visibility_clustering,
+                                         &options->visibility_clustering_type));
   CHECK(StringToSparseLinearAlgebraLibraryType(
             FLAGS_sparse_linear_algebra_library,
             &options->sparse_linear_algebra_library_type));
@@ -146,19 +150,19 @@
   if (options->use_inner_iterations) {
     if (FLAGS_blocks_for_inner_iterations == "cameras") {
       LOG(INFO) << "Camera blocks for inner iterations";
-      options->inner_iteration_ordering = new ParameterBlockOrdering;
+      options->inner_iteration_ordering.reset(new ParameterBlockOrdering);
       for (int i = 0; i < num_cameras; ++i) {
         options->inner_iteration_ordering->AddElementToGroup(cameras + camera_block_size * i, 0);
       }
     } else if (FLAGS_blocks_for_inner_iterations == "points") {
       LOG(INFO) << "Point blocks for inner iterations";
-      options->inner_iteration_ordering = new ParameterBlockOrdering;
+      options->inner_iteration_ordering.reset(new ParameterBlockOrdering);
       for (int i = 0; i < num_points; ++i) {
         options->inner_iteration_ordering->AddElementToGroup(points + point_block_size * i, 0);
       }
     } else if (FLAGS_blocks_for_inner_iterations == "cameras,points") {
       LOG(INFO) << "Camera followed by point blocks for inner iterations";
-      options->inner_iteration_ordering = new ParameterBlockOrdering;
+      options->inner_iteration_ordering.reset(new ParameterBlockOrdering);
       for (int i = 0; i < num_cameras; ++i) {
         options->inner_iteration_ordering->AddElementToGroup(cameras + camera_block_size * i, 0);
       }
@@ -167,7 +171,7 @@
       }
     } else if (FLAGS_blocks_for_inner_iterations == "points,cameras") {
       LOG(INFO) << "Point followed by camera blocks for inner iterations";
-      options->inner_iteration_ordering = new ParameterBlockOrdering;
+      options->inner_iteration_ordering.reset(new ParameterBlockOrdering);
       for (int i = 0; i < num_cameras; ++i) {
         options->inner_iteration_ordering->AddElementToGroup(cameras + camera_block_size * i, 1);
       }
@@ -216,7 +220,7 @@
     }
   }
 
-  options->linear_solver_ordering = ordering;
+  options->linear_solver_ordering.reset(ordering);
 }
 
 void SetMinimizerOptions(Solver::Options* options) {
@@ -258,18 +262,14 @@
     CostFunction* cost_function;
     // Each Residual block takes a point and a camera as input and
     // outputs a 2 dimensional residual.
-    if (FLAGS_use_quaternions) {
-      cost_function = new AutoDiffCostFunction<
-          SnavelyReprojectionErrorWithQuaternions, 2, 4, 6, 3>(
-              new SnavelyReprojectionErrorWithQuaternions(
-                  observations[2 * i + 0],
-                  observations[2 * i + 1]));
-    } else {
-      cost_function =
-          new AutoDiffCostFunction<SnavelyReprojectionError, 2, 9, 3>(
-              new SnavelyReprojectionError(observations[2 * i + 0],
-                                           observations[2 * i + 1]));
-    }
+    cost_function =
+        (FLAGS_use_quaternions)
+        ? SnavelyReprojectionErrorWithQuaternions::Create(
+            observations[2 * i + 0],
+            observations[2 * i + 1])
+        : SnavelyReprojectionError::Create(
+            observations[2 * i + 0],
+            observations[2 * i + 1]);
 
     // If enabled use Huber's loss function.
     LossFunction* loss_function = FLAGS_robustify ? new HuberLoss(1.0) : NULL;
@@ -319,7 +319,6 @@
   BuildProblem(&bal_problem, &problem);
   Solver::Options options;
   SetSolverOptionsFromFlags(&bal_problem, &options);
-  options.solver_log = FLAGS_solver_log;
   options.gradient_tolerance = 1e-16;
   options.function_tolerance = 1e-16;
   Solver::Summary summary;
diff --git a/examples/data_fitting.cc b/examples/data_fitting.cc
deleted file mode 100644
index 5d54123..0000000
--- a/examples/data_fitting.cc
+++ /dev/null
@@ -1,165 +0,0 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
-// http://code.google.com/p/ceres-solver/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-//   this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-//   this list of conditions and the following disclaimer in the documentation
-//   and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-//   used to endorse or promote products derived from this software without
-//   specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: sameeragarwal@google.com (Sameer Agarwal)
-
-#include "ceres/ceres.h"
-#include "gflags/gflags.h"
-
-using ceres::AutoDiffCostFunction;
-using ceres::CostFunction;
-using ceres::Problem;
-using ceres::Solver;
-using ceres::Solve;
-
-// Data generated using the following octave code.
-//   randn('seed', 23497);
-//   m = 0.3;
-//   c = 0.1;
-//   x=[0:0.075:5];
-//   y = exp(m * x + c);
-//   noise = randn(size(x)) * 0.2;
-//   y_observed = y + noise;
-//   data = [x', y_observed'];
-
-const int kNumObservations = 67;
-const double data[] = {
-  0.000000e+00, 1.133898e+00,
-  7.500000e-02, 1.334902e+00,
-  1.500000e-01, 1.213546e+00,
-  2.250000e-01, 1.252016e+00,
-  3.000000e-01, 1.392265e+00,
-  3.750000e-01, 1.314458e+00,
-  4.500000e-01, 1.472541e+00,
-  5.250000e-01, 1.536218e+00,
-  6.000000e-01, 1.355679e+00,
-  6.750000e-01, 1.463566e+00,
-  7.500000e-01, 1.490201e+00,
-  8.250000e-01, 1.658699e+00,
-  9.000000e-01, 1.067574e+00,
-  9.750000e-01, 1.464629e+00,
-  1.050000e+00, 1.402653e+00,
-  1.125000e+00, 1.713141e+00,
-  1.200000e+00, 1.527021e+00,
-  1.275000e+00, 1.702632e+00,
-  1.350000e+00, 1.423899e+00,
-  1.425000e+00, 1.543078e+00,
-  1.500000e+00, 1.664015e+00,
-  1.575000e+00, 1.732484e+00,
-  1.650000e+00, 1.543296e+00,
-  1.725000e+00, 1.959523e+00,
-  1.800000e+00, 1.685132e+00,
-  1.875000e+00, 1.951791e+00,
-  1.950000e+00, 2.095346e+00,
-  2.025000e+00, 2.361460e+00,
-  2.100000e+00, 2.169119e+00,
-  2.175000e+00, 2.061745e+00,
-  2.250000e+00, 2.178641e+00,
-  2.325000e+00, 2.104346e+00,
-  2.400000e+00, 2.584470e+00,
-  2.475000e+00, 1.914158e+00,
-  2.550000e+00, 2.368375e+00,
-  2.625000e+00, 2.686125e+00,
-  2.700000e+00, 2.712395e+00,
-  2.775000e+00, 2.499511e+00,
-  2.850000e+00, 2.558897e+00,
-  2.925000e+00, 2.309154e+00,
-  3.000000e+00, 2.869503e+00,
-  3.075000e+00, 3.116645e+00,
-  3.150000e+00, 3.094907e+00,
-  3.225000e+00, 2.471759e+00,
-  3.300000e+00, 3.017131e+00,
-  3.375000e+00, 3.232381e+00,
-  3.450000e+00, 2.944596e+00,
-  3.525000e+00, 3.385343e+00,
-  3.600000e+00, 3.199826e+00,
-  3.675000e+00, 3.423039e+00,
-  3.750000e+00, 3.621552e+00,
-  3.825000e+00, 3.559255e+00,
-  3.900000e+00, 3.530713e+00,
-  3.975000e+00, 3.561766e+00,
-  4.050000e+00, 3.544574e+00,
-  4.125000e+00, 3.867945e+00,
-  4.200000e+00, 4.049776e+00,
-  4.275000e+00, 3.885601e+00,
-  4.350000e+00, 4.110505e+00,
-  4.425000e+00, 4.345320e+00,
-  4.500000e+00, 4.161241e+00,
-  4.575000e+00, 4.363407e+00,
-  4.650000e+00, 4.161576e+00,
-  4.725000e+00, 4.619728e+00,
-  4.800000e+00, 4.737410e+00,
-  4.875000e+00, 4.727863e+00,
-  4.950000e+00, 4.669206e+00,
-};
-
-class ExponentialResidual {
- public:
-  ExponentialResidual(double x, double y)
-      : x_(x), y_(y) {}
-
-  template <typename T> bool operator()(const T* const m,
-                                        const T* const c,
-                                        T* residual) const {
-    residual[0] = T(y_) - exp(m[0] * T(x_) + c[0]);
-    return true;
-  }
-
- private:
-  const double x_;
-  const double y_;
-};
-
-int main(int argc, char** argv) {
-  google::ParseCommandLineFlags(&argc, &argv, true);
-  google::InitGoogleLogging(argv[0]);
-
-  double m = 0.0;
-  double c = 0.0;
-
-  Problem problem;
-  for (int i = 0; i < kNumObservations; ++i) {
-    problem.AddResidualBlock(
-        new AutoDiffCostFunction<ExponentialResidual, 1, 1, 1>(
-            new ExponentialResidual(data[2 * i], data[2 * i + 1])),
-        NULL,
-        &m, &c);
-  }
-
-  Solver::Options options;
-  options.max_num_iterations = 25;
-  options.linear_solver_type = ceres::DENSE_QR;
-  options.minimizer_progress_to_stdout = true;
-
-  Solver::Summary summary;
-  Solve(options, &problem, &summary);
-  std::cout << summary.BriefReport() << "\n";
-  std::cout << "Initial m: " << 0.0 << " c: " << 0.0 << "\n";
-  std::cout << "Final   m: " << m << " c: " << c << "\n";
-  return 0;
-}
diff --git a/examples/ellipse_approximation.cc b/examples/ellipse_approximation.cc
new file mode 100644
index 0000000..a5bbe02
--- /dev/null
+++ b/examples/ellipse_approximation.cc
@@ -0,0 +1,451 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: richie.stebbing@gmail.com (Richard Stebbing)
+//
+// This fits points randomly distributed on an ellipse with an approximate
+// line segment contour. This is done by jointly optimizing the control points
+// of the line segment contour along with the preimage positions for the data
+// points. The purpose of this example is to show an example use case for
+// dynamic_sparsity, and how it can benefit problems which are numerically
+// dense but dynamically sparse.
+
+#include <cmath>
+#include <vector>
+#include "ceres/ceres.h"
+#include "glog/logging.h"
+
+// Data generated with the following Python code.
+//   import numpy as np
+//   np.random.seed(1337)
+//   t = np.linspace(0.0, 2.0 * np.pi, 212, endpoint=False)
+//   t += 2.0 * np.pi * 0.01 * np.random.randn(t.size)
+//   theta = np.deg2rad(15)
+//   a, b = np.cos(theta), np.sin(theta)
+//   R = np.array([[a, -b],
+//                 [b, a]])
+//   Y = np.dot(np.c_[4.0 * np.cos(t), np.sin(t)], R.T)
+
+const int kYRows = 212;
+const int kYCols = 2;
+const double kYData[kYRows * kYCols] = {
+  +3.871364e+00, +9.916027e-01,
+  +3.864003e+00, +1.034148e+00,
+  +3.850651e+00, +1.072202e+00,
+  +3.868350e+00, +1.014408e+00,
+  +3.796381e+00, +1.153021e+00,
+  +3.857138e+00, +1.056102e+00,
+  +3.787532e+00, +1.162215e+00,
+  +3.704477e+00, +1.227272e+00,
+  +3.564711e+00, +1.294959e+00,
+  +3.754363e+00, +1.191948e+00,
+  +3.482098e+00, +1.322725e+00,
+  +3.602777e+00, +1.279658e+00,
+  +3.585433e+00, +1.286858e+00,
+  +3.347505e+00, +1.356415e+00,
+  +3.220855e+00, +1.378914e+00,
+  +3.558808e+00, +1.297174e+00,
+  +3.403618e+00, +1.343809e+00,
+  +3.179828e+00, +1.384721e+00,
+  +3.054789e+00, +1.398759e+00,
+  +3.294153e+00, +1.366808e+00,
+  +3.247312e+00, +1.374813e+00,
+  +2.988547e+00, +1.404247e+00,
+  +3.114508e+00, +1.392698e+00,
+  +2.899226e+00, +1.409802e+00,
+  +2.533256e+00, +1.414778e+00,
+  +2.654773e+00, +1.415909e+00,
+  +2.565100e+00, +1.415313e+00,
+  +2.976456e+00, +1.405118e+00,
+  +2.484200e+00, +1.413640e+00,
+  +2.324751e+00, +1.407476e+00,
+  +1.930468e+00, +1.378221e+00,
+  +2.329017e+00, +1.407688e+00,
+  +1.760640e+00, +1.360319e+00,
+  +2.147375e+00, +1.396603e+00,
+  +1.741989e+00, +1.358178e+00,
+  +1.743859e+00, +1.358394e+00,
+  +1.557372e+00, +1.335208e+00,
+  +1.280551e+00, +1.295087e+00,
+  +1.429880e+00, +1.317546e+00,
+  +1.213485e+00, +1.284400e+00,
+  +9.168172e-01, +1.232870e+00,
+  +1.311141e+00, +1.299839e+00,
+  +1.231969e+00, +1.287382e+00,
+  +7.453773e-01, +1.200049e+00,
+  +6.151587e-01, +1.173683e+00,
+  +5.935666e-01, +1.169193e+00,
+  +2.538707e-01, +1.094227e+00,
+  +6.806136e-01, +1.187089e+00,
+  +2.805447e-01, +1.100405e+00,
+  +6.184807e-01, +1.174371e+00,
+  +1.170550e-01, +1.061762e+00,
+  +2.890507e-01, +1.102365e+00,
+  +3.834234e-01, +1.123772e+00,
+  +3.980161e-04, +1.033061e+00,
+  -3.651680e-01, +9.370367e-01,
+  -8.386351e-01, +7.987201e-01,
+  -8.105704e-01, +8.073702e-01,
+  -8.735139e-01, +7.878886e-01,
+  -9.913836e-01, +7.506100e-01,
+  -8.784011e-01, +7.863636e-01,
+  -1.181440e+00, +6.882566e-01,
+  -1.229556e+00, +6.720191e-01,
+  -1.035839e+00, +7.362765e-01,
+  -8.031520e-01, +8.096470e-01,
+  -1.539136e+00, +5.629549e-01,
+  -1.755423e+00, +4.817306e-01,
+  -1.337589e+00, +6.348763e-01,
+  -1.836966e+00, +4.499485e-01,
+  -1.913367e+00, +4.195617e-01,
+  -2.126467e+00, +3.314900e-01,
+  -1.927625e+00, +4.138238e-01,
+  -2.339862e+00, +2.379074e-01,
+  -1.881736e+00, +4.322152e-01,
+  -2.116753e+00, +3.356163e-01,
+  -2.255733e+00, +2.754930e-01,
+  -2.555834e+00, +1.368473e-01,
+  -2.770277e+00, +2.895711e-02,
+  -2.563376e+00, +1.331890e-01,
+  -2.826715e+00, -9.000818e-04,
+  -2.978191e+00, -8.457804e-02,
+  -3.115855e+00, -1.658786e-01,
+  -2.982049e+00, -8.678322e-02,
+  -3.307892e+00, -2.902083e-01,
+  -3.038346e+00, -1.194222e-01,
+  -3.190057e+00, -2.122060e-01,
+  -3.279086e+00, -2.705777e-01,
+  -3.322028e+00, -2.999889e-01,
+  -3.122576e+00, -1.699965e-01,
+  -3.551973e+00, -4.768674e-01,
+  -3.581866e+00, -5.032175e-01,
+  -3.497799e+00, -4.315203e-01,
+  -3.565384e+00, -4.885602e-01,
+  -3.699493e+00, -6.199815e-01,
+  -3.585166e+00, -5.061925e-01,
+  -3.758914e+00, -6.918275e-01,
+  -3.741104e+00, -6.689131e-01,
+  -3.688331e+00, -6.077239e-01,
+  -3.810425e+00, -7.689015e-01,
+  -3.791829e+00, -7.386911e-01,
+  -3.789951e+00, -7.358189e-01,
+  -3.823100e+00, -7.918398e-01,
+  -3.857021e+00, -8.727074e-01,
+  -3.858250e+00, -8.767645e-01,
+  -3.872100e+00, -9.563174e-01,
+  -3.864397e+00, -1.032630e+00,
+  -3.846230e+00, -1.081669e+00,
+  -3.834799e+00, -1.102536e+00,
+  -3.866684e+00, -1.022901e+00,
+  -3.808643e+00, -1.139084e+00,
+  -3.868840e+00, -1.011569e+00,
+  -3.791071e+00, -1.158615e+00,
+  -3.797999e+00, -1.151267e+00,
+  -3.696278e+00, -1.232314e+00,
+  -3.779007e+00, -1.170504e+00,
+  -3.622855e+00, -1.270793e+00,
+  -3.647249e+00, -1.259166e+00,
+  -3.655412e+00, -1.255042e+00,
+  -3.573218e+00, -1.291696e+00,
+  -3.638019e+00, -1.263684e+00,
+  -3.498409e+00, -1.317750e+00,
+  -3.304143e+00, -1.364970e+00,
+  -3.183001e+00, -1.384295e+00,
+  -3.202456e+00, -1.381599e+00,
+  -3.244063e+00, -1.375332e+00,
+  -3.233308e+00, -1.377019e+00,
+  -3.060112e+00, -1.398264e+00,
+  -3.078187e+00, -1.396517e+00,
+  -2.689594e+00, -1.415761e+00,
+  -2.947662e+00, -1.407039e+00,
+  -2.854490e+00, -1.411860e+00,
+  -2.660499e+00, -1.415900e+00,
+  -2.875955e+00, -1.410930e+00,
+  -2.675385e+00, -1.415848e+00,
+  -2.813155e+00, -1.413363e+00,
+  -2.417673e+00, -1.411512e+00,
+  -2.725461e+00, -1.415373e+00,
+  -2.148334e+00, -1.396672e+00,
+  -2.108972e+00, -1.393738e+00,
+  -2.029905e+00, -1.387302e+00,
+  -2.046214e+00, -1.388687e+00,
+  -2.057402e+00, -1.389621e+00,
+  -1.650250e+00, -1.347160e+00,
+  -1.806764e+00, -1.365469e+00,
+  -1.206973e+00, -1.283343e+00,
+  -8.029259e-01, -1.211308e+00,
+  -1.229551e+00, -1.286993e+00,
+  -1.101507e+00, -1.265754e+00,
+  -9.110645e-01, -1.231804e+00,
+  -1.110046e+00, -1.267211e+00,
+  -8.465274e-01, -1.219677e+00,
+  -7.594163e-01, -1.202818e+00,
+  -8.023823e-01, -1.211203e+00,
+  -3.732519e-01, -1.121494e+00,
+  -1.918373e-01, -1.079668e+00,
+  -4.671988e-01, -1.142253e+00,
+  -4.033645e-01, -1.128215e+00,
+  -1.920740e-01, -1.079724e+00,
+  -3.022157e-01, -1.105389e+00,
+  -1.652831e-01, -1.073354e+00,
+  +4.671625e-01, -9.085886e-01,
+  +5.940178e-01, -8.721832e-01,
+  +3.147557e-01, -9.508290e-01,
+  +6.383631e-01, -8.591867e-01,
+  +9.888923e-01, -7.514088e-01,
+  +7.076339e-01, -8.386023e-01,
+  +1.326682e+00, -6.386698e-01,
+  +1.149834e+00, -6.988221e-01,
+  +1.257742e+00, -6.624207e-01,
+  +1.492352e+00, -5.799632e-01,
+  +1.595574e+00, -5.421766e-01,
+  +1.240173e+00, -6.684113e-01,
+  +1.706612e+00, -5.004442e-01,
+  +1.873984e+00, -4.353002e-01,
+  +1.985633e+00, -3.902561e-01,
+  +1.722880e+00, -4.942329e-01,
+  +2.095182e+00, -3.447402e-01,
+  +2.018118e+00, -3.768991e-01,
+  +2.422702e+00, -1.999563e-01,
+  +2.370611e+00, -2.239326e-01,
+  +2.152154e+00, -3.205250e-01,
+  +2.525121e+00, -1.516499e-01,
+  +2.422116e+00, -2.002280e-01,
+  +2.842806e+00, +9.536372e-03,
+  +3.030128e+00, +1.146027e-01,
+  +2.888424e+00, +3.433444e-02,
+  +2.991609e+00, +9.226409e-02,
+  +2.924807e+00, +5.445844e-02,
+  +3.007772e+00, +1.015875e-01,
+  +2.781973e+00, -2.282382e-02,
+  +3.164737e+00, +1.961781e-01,
+  +3.237671e+00, +2.430139e-01,
+  +3.046123e+00, +1.240014e-01,
+  +3.414834e+00, +3.669060e-01,
+  +3.436591e+00, +3.833600e-01,
+  +3.626207e+00, +5.444311e-01,
+  +3.223325e+00, +2.336361e-01,
+  +3.511963e+00, +4.431060e-01,
+  +3.698380e+00, +6.187442e-01,
+  +3.670244e+00, +5.884943e-01,
+  +3.558833e+00, +4.828230e-01,
+  +3.661807e+00, +5.797689e-01,
+  +3.767261e+00, +7.030893e-01,
+  +3.801065e+00, +7.532650e-01,
+  +3.828523e+00, +8.024454e-01,
+  +3.840719e+00, +8.287032e-01,
+  +3.848748e+00, +8.485921e-01,
+  +3.865801e+00, +9.066551e-01,
+  +3.870983e+00, +9.404873e-01,
+  +3.870263e+00, +1.001884e+00,
+  +3.864462e+00, +1.032374e+00,
+  +3.870542e+00, +9.996121e-01,
+  +3.865424e+00, +1.028474e+00
+};
+ceres::ConstMatrixRef kY(kYData, kYRows, kYCols);
+
+class PointToLineSegmentContourCostFunction : public ceres::CostFunction {
+ public:
+  PointToLineSegmentContourCostFunction(const int num_segments,
+                                        const Eigen::Vector2d y)
+      : num_segments_(num_segments), y_(y) {
+    // The first parameter is the preimage position.
+    mutable_parameter_block_sizes()->push_back(1);
+    // The next parameters are the control points for the line segment contour.
+    for (int i = 0; i < num_segments_; ++i) {
+      mutable_parameter_block_sizes()->push_back(2);
+    }
+    set_num_residuals(2);
+  }
+
+  virtual bool Evaluate(const double* const* x,
+                        double* residuals,
+                        double** jacobians) const {
+    // Convert the preimage position `t` into a segment index `i0` and the
+    // line segment interpolation parameter `u`. `i1` is the index of the next
+    // control point.
+    const double t = ModuloNumSegments(*x[0]);
+    CHECK_GE(t, 0.0);
+    CHECK_LT(t, num_segments_);
+    const int i0 = floor(t), i1 = (i0 + 1) % num_segments_;
+    const double u = t - i0;
+
+    // Linearly interpolate between control points `i0` and `i1`.
+    residuals[0] = y_[0] - ((1.0 - u) * x[1 + i0][0] + u * x[1 + i1][0]);
+    residuals[1] = y_[1] - ((1.0 - u) * x[1 + i0][1] + u * x[1 + i1][1]);
+
+    if (jacobians == NULL) {
+      return true;
+    }
+
+    if (jacobians[0] != NULL) {
+      jacobians[0][0] = x[1 + i0][0] - x[1 + i1][0];
+      jacobians[0][1] = x[1 + i0][1] - x[1 + i1][1];
+    }
+    for (int i = 0; i < num_segments_; ++i) {
+      if (jacobians[i + 1] != NULL) {
+        ceres::MatrixRef(jacobians[i + 1], 2, 2).setZero();
+        if (i == i0) {
+          jacobians[i + 1][0] = -(1.0 - u);
+          jacobians[i + 1][3] = -(1.0 - u);
+        } else if (i == i1) {
+          jacobians[i + 1][0] = -u;
+          jacobians[i + 1][3] = -u;
+        }
+      }
+    }
+    return true;
+  }
+
+  static ceres::CostFunction* Create(const int num_segments,
+                                     const Eigen::Vector2d y) {
+    return new PointToLineSegmentContourCostFunction(num_segments, y);
+  }
+
+ private:
+  inline double ModuloNumSegments(const double& t) const {
+    return t - num_segments_ * floor(t / num_segments_);
+  }
+
+  const int num_segments_;
+  const Eigen::Vector2d y_;
+};
+
+struct EuclideanDistanceFunctor {
+  EuclideanDistanceFunctor(const double& sqrt_weight)
+      : sqrt_weight_(sqrt_weight) {}
+
+  template <typename T>
+  bool operator()(const T* x0, const T* x1, T* residuals) const {
+    residuals[0] = T(sqrt_weight_) * (x0[0] - x1[0]);
+    residuals[1] = T(sqrt_weight_) * (x0[1] - x1[1]);
+    return true;
+  }
+
+  static ceres::CostFunction* Create(const double& sqrt_weight) {
+    return new ceres::AutoDiffCostFunction<EuclideanDistanceFunctor, 2, 2, 2>(
+        new EuclideanDistanceFunctor(sqrt_weight));
+  }
+
+ private:
+  const double sqrt_weight_;
+};
+
+bool SolveWithFullReport(ceres::Solver::Options options,
+                         ceres::Problem* problem,
+                         bool dynamic_sparsity) {
+  options.dynamic_sparsity = dynamic_sparsity;
+
+  ceres::Solver::Summary summary;
+  ceres::Solve(options, problem, &summary);
+
+  std::cout << "####################" << std::endl;
+  std::cout << "dynamic_sparsity = " << dynamic_sparsity << std::endl;
+  std::cout << "####################" << std::endl;
+  std::cout << summary.FullReport() << std::endl;
+
+  return summary.termination_type == ceres::CONVERGENCE;
+}
+
+int main(int argc, char** argv) {
+  google::InitGoogleLogging(argv[0]);
+
+  // Problem configuration.
+  const int num_segments = 151;
+  const double regularization_weight = 1e-2;
+
+  // Eigen::MatrixXd is column major so we define our own MatrixXd which is
+  // row major. Eigen::VectorXd can be used directly.
+  typedef Eigen::Matrix<double,
+                        Eigen::Dynamic, Eigen::Dynamic,
+                        Eigen::RowMajor> MatrixXd;
+  using Eigen::VectorXd;
+
+  // `X` is the matrix of control points which make up the contour of line
+  // segments. The number of control points is equal to the number of line
+  // segments because the contour is closed.
+  //
+  // Initialize `X` to points on the unit circle.
+  VectorXd w(num_segments + 1);
+  w.setLinSpaced(num_segments + 1, 0.0, 2.0 * M_PI);
+  w.conservativeResize(num_segments);
+  MatrixXd X(num_segments, 2);
+  X.col(0) = w.array().cos();
+  X.col(1) = w.array().sin();
+
+  // Each data point has an associated preimage position on the line segment
+  // contour. For each data point we initialize the preimage positions to
+  // the index of the closest control point.
+  const int num_observations = kY.rows();
+  VectorXd t(num_observations);
+  for (int i = 0; i < num_observations; ++i) {
+    (X.rowwise() - kY.row(i)).rowwise().squaredNorm().minCoeff(&t[i]);
+  }
+
+  ceres::Problem problem;
+
+  // For each data point add a residual which measures its distance to its
+  // corresponding position on the line segment contour.
+  std::vector<double*> parameter_blocks(1 + num_segments);
+  parameter_blocks[0] = NULL;
+  for (int i = 0; i < num_segments; ++i) {
+    parameter_blocks[i + 1] = X.data() + 2 * i;
+  }
+  for (int i = 0; i < num_observations; ++i) {
+    parameter_blocks[0] = &t[i];
+    problem.AddResidualBlock(
+      PointToLineSegmentContourCostFunction::Create(num_segments, kY.row(i)),
+      NULL,
+      parameter_blocks);
+  }
+
+  // Add regularization to minimize the length of the line segment contour.
+  for (int i = 0; i < num_segments; ++i) {
+    problem.AddResidualBlock(
+      EuclideanDistanceFunctor::Create(sqrt(regularization_weight)),
+      NULL,
+      X.data() + 2 * i,
+      X.data() + 2 * ((i + 1) % num_segments));
+  }
+
+  ceres::Solver::Options options;
+  options.max_num_iterations = 100;
+  options.linear_solver_type = ceres::SPARSE_NORMAL_CHOLESKY;
+
+  // First, solve `X` and `t` jointly with dynamic_sparsity = true.
+  MatrixXd X0 = X;
+  VectorXd t0 = t;
+  CHECK(SolveWithFullReport(options, &problem, true));
+
+  // Second, solve with dynamic_sparsity = false.
+  X = X0;
+  t = t0;
+  CHECK(SolveWithFullReport(options, &problem, false));
+
+  return 0;
+}
diff --git a/examples/libmv_homography.cc b/examples/libmv_homography.cc
new file mode 100644
index 0000000..8bc7136
--- /dev/null
+++ b/examples/libmv_homography.cc
@@ -0,0 +1,414 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Copyright (c) 2014 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// Author: sergey.vfx@gmail.com (Sergey Sharybin)
+//
+// This file demonstrates solving for a homography between two sets of points.
+// A homography describes a transformation between a sets of points on a plane,
+// perspectively projected into two images. The first step is to solve a
+// homogeneous system of equations via singular value decompposition, giving an
+// algebraic solution for the homography, then solving for a final solution by
+// minimizing the symmetric transfer error in image space with Ceres (called the
+// Gold Standard Solution in "Multiple View Geometry"). The routines are based on
+// the routines from the Libmv library.
+//
+// This example demonstrates custom exit criterion by having a callback check
+// for image-space error.
+
+#include "ceres/ceres.h"
+#include "glog/logging.h"
+
+typedef Eigen::NumTraits<double> EigenDouble;
+
+typedef Eigen::MatrixXd Mat;
+typedef Eigen::VectorXd Vec;
+typedef Eigen::Matrix<double, 3, 3> Mat3;
+typedef Eigen::Matrix<double, 2, 1> Vec2;
+typedef Eigen::Matrix<double, Eigen::Dynamic,  8> MatX8;
+typedef Eigen::Vector3d Vec3;
+
+namespace {
+
+// This structure contains options that controls how the homography
+// estimation operates.
+//
+// Defaults should be suitable for a wide range of use cases, but
+// better performance and accuracy might require tweaking.
+struct EstimateHomographyOptions {
+  // Default settings for homography estimation which should be suitable
+  // for a wide range of use cases.
+  EstimateHomographyOptions()
+    :  max_num_iterations(50),
+       expected_average_symmetric_distance(1e-16) {}
+
+  // Maximal number of iterations for the refinement step.
+  int max_num_iterations;
+
+  // Expected average of symmetric geometric distance between
+  // actual destination points and original ones transformed by
+  // estimated homography matrix.
+  //
+  // Refinement will finish as soon as average of symmetric
+  // geometric distance is less or equal to this value.
+  //
+  // This distance is measured in the same units as input points are.
+  double expected_average_symmetric_distance;
+};
+
+// Calculate symmetric geometric cost terms:
+//
+// forward_error = D(H * x1, x2)
+// backward_error = D(H^-1 * x2, x1)
+//
+// Templated to be used with autodifferenciation.
+template <typename T>
+void SymmetricGeometricDistanceTerms(const Eigen::Matrix<T, 3, 3> &H,
+                                     const Eigen::Matrix<T, 2, 1> &x1,
+                                     const Eigen::Matrix<T, 2, 1> &x2,
+                                     T forward_error[2],
+                                     T backward_error[2]) {
+  typedef Eigen::Matrix<T, 3, 1> Vec3;
+  Vec3 x(x1(0), x1(1), T(1.0));
+  Vec3 y(x2(0), x2(1), T(1.0));
+
+  Vec3 H_x = H * x;
+  Vec3 Hinv_y = H.inverse() * y;
+
+  H_x /= H_x(2);
+  Hinv_y /= Hinv_y(2);
+
+  forward_error[0] = H_x(0) - y(0);
+  forward_error[1] = H_x(1) - y(1);
+  backward_error[0] = Hinv_y(0) - x(0);
+  backward_error[1] = Hinv_y(1) - x(1);
+}
+
+// Calculate symmetric geometric cost:
+//
+//   D(H * x1, x2)^2 + D(H^-1 * x2, x1)^2
+//
+double SymmetricGeometricDistance(const Mat3 &H,
+                                  const Vec2 &x1,
+                                  const Vec2 &x2) {
+  Vec2 forward_error, backward_error;
+  SymmetricGeometricDistanceTerms<double>(H,
+                                          x1,
+                                          x2,
+                                          forward_error.data(),
+                                          backward_error.data());
+  return forward_error.squaredNorm() +
+         backward_error.squaredNorm();
+}
+
+// A parameterization of the 2D homography matrix that uses 8 parameters so
+// that the matrix is normalized (H(2,2) == 1).
+// The homography matrix H is built from a list of 8 parameters (a, b,...g, h)
+// as follows
+//
+//         |a b c|
+//     H = |d e f|
+//         |g h 1|
+//
+template<typename T = double>
+class Homography2DNormalizedParameterization {
+ public:
+  typedef Eigen::Matrix<T, 8, 1> Parameters;     // a, b, ... g, h
+  typedef Eigen::Matrix<T, 3, 3> Parameterized;  // H
+
+  // Convert from the 8 parameters to a H matrix.
+  static void To(const Parameters &p, Parameterized *h) {
+    *h << p(0), p(1), p(2),
+          p(3), p(4), p(5),
+          p(6), p(7), 1.0;
+  }
+
+  // Convert from a H matrix to the 8 parameters.
+  static void From(const Parameterized &h, Parameters *p) {
+    *p << h(0, 0), h(0, 1), h(0, 2),
+          h(1, 0), h(1, 1), h(1, 2),
+          h(2, 0), h(2, 1);
+  }
+};
+
+// 2D Homography transformation estimation in the case that points are in
+// euclidean coordinates.
+//
+//   x = H y
+//
+// x and y vector must have the same direction, we could write
+//
+//   crossproduct(|x|, * H * |y| ) = |0|
+//
+//   | 0 -1  x2|   |a b c|   |y1|    |0|
+//   | 1  0 -x1| * |d e f| * |y2| =  |0|
+//   |-x2  x1 0|   |g h 1|   |1 |    |0|
+//
+// That gives:
+//
+//   (-d+x2*g)*y1    + (-e+x2*h)*y2 + -f+x2          |0|
+//   (a-x1*g)*y1     + (b-x1*h)*y2  + c-x1         = |0|
+//   (-x2*a+x1*d)*y1 + (-x2*b+x1*e)*y2 + -x2*c+x1*f  |0|
+//
+bool Homography2DFromCorrespondencesLinearEuc(
+    const Mat &x1,
+    const Mat &x2,
+    Mat3 *H,
+    double expected_precision) {
+  assert(2 == x1.rows());
+  assert(4 <= x1.cols());
+  assert(x1.rows() == x2.rows());
+  assert(x1.cols() == x2.cols());
+
+  int n = x1.cols();
+  MatX8 L = Mat::Zero(n * 3, 8);
+  Mat b = Mat::Zero(n * 3, 1);
+  for (int i = 0; i < n; ++i) {
+    int j = 3 * i;
+    L(j, 0) =  x1(0, i);             // a
+    L(j, 1) =  x1(1, i);             // b
+    L(j, 2) =  1.0;                  // c
+    L(j, 6) = -x2(0, i) * x1(0, i);  // g
+    L(j, 7) = -x2(0, i) * x1(1, i);  // h
+    b(j, 0) =  x2(0, i);             // i
+
+    ++j;
+    L(j, 3) =  x1(0, i);             // d
+    L(j, 4) =  x1(1, i);             // e
+    L(j, 5) =  1.0;                  // f
+    L(j, 6) = -x2(1, i) * x1(0, i);  // g
+    L(j, 7) = -x2(1, i) * x1(1, i);  // h
+    b(j, 0) =  x2(1, i);             // i
+
+    // This ensures better stability
+    // TODO(julien) make a lite version without this 3rd set
+    ++j;
+    L(j, 0) =  x2(1, i) * x1(0, i);  // a
+    L(j, 1) =  x2(1, i) * x1(1, i);  // b
+    L(j, 2) =  x2(1, i);             // c
+    L(j, 3) = -x2(0, i) * x1(0, i);  // d
+    L(j, 4) = -x2(0, i) * x1(1, i);  // e
+    L(j, 5) = -x2(0, i);             // f
+  }
+  // Solve Lx=B
+  const Vec h = L.fullPivLu().solve(b);
+  Homography2DNormalizedParameterization<double>::To(h, H);
+  return (L * h).isApprox(b, expected_precision);
+}
+
+// Cost functor which computes symmetric geometric distance
+// used for homography matrix refinement.
+class HomographySymmetricGeometricCostFunctor {
+ public:
+  HomographySymmetricGeometricCostFunctor(const Vec2 &x,
+                                          const Vec2 &y)
+      : x_(x), y_(y) { }
+
+  template<typename T>
+  bool operator()(const T* homography_parameters, T* residuals) const {
+    typedef Eigen::Matrix<T, 3, 3> Mat3;
+    typedef Eigen::Matrix<T, 2, 1> Vec2;
+
+    Mat3 H(homography_parameters);
+    Vec2 x(T(x_(0)), T(x_(1)));
+    Vec2 y(T(y_(0)), T(y_(1)));
+
+    SymmetricGeometricDistanceTerms<T>(H,
+                                       x,
+                                       y,
+                                       &residuals[0],
+                                       &residuals[2]);
+    return true;
+  }
+
+  const Vec2 x_;
+  const Vec2 y_;
+};
+
+// Termination checking callback. This is needed to finish the
+// optimization when an absolute error threshold is met, as opposed
+// to Ceres's function_tolerance, which provides for finishing when
+// successful steps reduce the cost function by a fractional amount.
+// In this case, the callback checks for the absolute average reprojection
+// error and terminates when it's below a threshold (for example all
+// points < 0.5px error).
+class TerminationCheckingCallback : public ceres::IterationCallback {
+ public:
+  TerminationCheckingCallback(const Mat &x1, const Mat &x2,
+                              const EstimateHomographyOptions &options,
+                              Mat3 *H)
+      : options_(options), x1_(x1), x2_(x2), H_(H) {}
+
+  virtual ceres::CallbackReturnType operator()(
+      const ceres::IterationSummary& summary) {
+    // If the step wasn't successful, there's nothing to do.
+    if (!summary.step_is_successful) {
+      return ceres::SOLVER_CONTINUE;
+    }
+
+    // Calculate average of symmetric geometric distance.
+    double average_distance = 0.0;
+    for (int i = 0; i < x1_.cols(); i++) {
+      average_distance += SymmetricGeometricDistance(*H_,
+                                                     x1_.col(i),
+                                                     x2_.col(i));
+    }
+    average_distance /= x1_.cols();
+
+    if (average_distance <= options_.expected_average_symmetric_distance) {
+      return ceres::SOLVER_TERMINATE_SUCCESSFULLY;
+    }
+
+    return ceres::SOLVER_CONTINUE;
+  }
+
+ private:
+  const EstimateHomographyOptions &options_;
+  const Mat &x1_;
+  const Mat &x2_;
+  Mat3 *H_;
+};
+
+bool EstimateHomography2DFromCorrespondences(
+    const Mat &x1,
+    const Mat &x2,
+    const EstimateHomographyOptions &options,
+    Mat3 *H) {
+  assert(2 == x1.rows());
+  assert(4 <= x1.cols());
+  assert(x1.rows() == x2.rows());
+  assert(x1.cols() == x2.cols());
+
+  // Step 1: Algebraic homography estimation.
+  // Assume algebraic estimation always succeeds.
+  Homography2DFromCorrespondencesLinearEuc(x1,
+                                           x2,
+                                           H,
+                                           EigenDouble::dummy_precision());
+
+  LOG(INFO) << "Estimated matrix after algebraic estimation:\n" << *H;
+
+  // Step 2: Refine matrix using Ceres minimizer.
+  ceres::Problem problem;
+  for (int i = 0; i < x1.cols(); i++) {
+    HomographySymmetricGeometricCostFunctor
+        *homography_symmetric_geometric_cost_function =
+            new HomographySymmetricGeometricCostFunctor(x1.col(i),
+                                                        x2.col(i));
+
+    problem.AddResidualBlock(
+        new ceres::AutoDiffCostFunction<
+            HomographySymmetricGeometricCostFunctor,
+            4,  // num_residuals
+            9>(homography_symmetric_geometric_cost_function),
+        NULL,
+        H->data());
+  }
+
+  // Configure the solve.
+  ceres::Solver::Options solver_options;
+  solver_options.linear_solver_type = ceres::DENSE_QR;
+  solver_options.max_num_iterations = options.max_num_iterations;
+  solver_options.update_state_every_iteration = true;
+
+  // Terminate if the average symmetric distance is good enough.
+  TerminationCheckingCallback callback(x1, x2, options, H);
+  solver_options.callbacks.push_back(&callback);
+
+  // Run the solve.
+  ceres::Solver::Summary summary;
+  ceres::Solve(solver_options, &problem, &summary);
+
+  LOG(INFO) << "Summary:\n" << summary.FullReport();
+  LOG(INFO) << "Final refined matrix:\n" << *H;
+
+  return summary.IsSolutionUsable();
+}
+
+}  // namespace libmv
+
+int main(int argc, char **argv) {
+  google::InitGoogleLogging(argv[0]);
+
+  Mat x1(2, 100);
+  for (int i = 0; i < x1.cols(); ++i) {
+    x1(0, i) = rand() % 1024;
+    x1(1, i) = rand() % 1024;
+  }
+
+  Mat3 homography_matrix;
+  // This matrix has been dumped from a Blender test file of plane tracking.
+  homography_matrix << 1.243715, -0.461057, -111.964454,
+                       0.0,       0.617589, -192.379252,
+                       0.0,      -0.000983,    1.0;
+
+  Mat x2 = x1;
+  for (int i = 0; i < x2.cols(); ++i) {
+    Vec3 homogenous_x1 = Vec3(x1(0, i), x1(1, i), 1.0);
+    Vec3 homogenous_x2 = homography_matrix * homogenous_x1;
+    x2(0, i) = homogenous_x2(0) / homogenous_x2(2);
+    x2(1, i) = homogenous_x2(1) / homogenous_x2(2);
+
+    // Apply some noise so algebraic estimation is not good enough.
+    x2(0, i) += static_cast<double>(rand() % 1000) / 5000.0;
+    x2(1, i) += static_cast<double>(rand() % 1000) / 5000.0;
+  }
+
+  Mat3 estimated_matrix;
+
+  EstimateHomographyOptions options;
+  options.expected_average_symmetric_distance = 0.02;
+  EstimateHomography2DFromCorrespondences(x1, x2, options, &estimated_matrix);
+
+  // Normalize the matrix for easier comparison.
+  estimated_matrix /= estimated_matrix(2 ,2);
+
+  std::cout << "Original matrix:\n" << homography_matrix << "\n";
+  std::cout << "Estimated matrix:\n" << estimated_matrix << "\n";
+
+  return EXIT_SUCCESS;
+}
diff --git a/examples/more_garbow_hillstrom.cc b/examples/more_garbow_hillstrom.cc
new file mode 100644
index 0000000..d98e57c
--- /dev/null
+++ b/examples/more_garbow_hillstrom.cc
@@ -0,0 +1,374 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Bounds constrained test problems from the paper
+//
+// Testing Unconstrained Optimization Software
+// Jorge J. More, Burton S. Garbow and Kenneth E. Hillstrom
+// ACM Transactions on Mathematical Software, 7(1), pp. 17-41, 1981
+//
+// A subset of these problems were augmented with bounds and used for
+// testing bounds constrained optimization algorithms by
+//
+// A Trust Region Approach to Linearly Constrained Optimization
+// David M. Gay
+// Numerical Analysis (Griffiths, D.F., ed.), pp. 72-105
+// Lecture Notes in Mathematics 1066, Springer Verlag, 1984.
+//
+// The latter paper is behind a paywall. We obtained the bounds on the
+// variables and the function values at the global minimums from
+//
+// http://www.mat.univie.ac.at/~neum/glopt/bounds.html
+//
+// A problem is considered solved if of the log relative error of its
+// objective function is at least 5.
+
+
+#include <cmath>
+#include <iostream>  // NOLINT
+#include "ceres/ceres.h"
+#include "gflags/gflags.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace examples {
+
+const double kDoubleMax = std::numeric_limits<double>::max();
+
+#define BEGIN_MGH_PROBLEM(name, num_parameters, num_residuals)          \
+  struct name {                                                         \
+    static const int kNumParameters = num_parameters;                   \
+    static const double initial_x[kNumParameters];                      \
+    static const double lower_bounds[kNumParameters];                   \
+    static const double upper_bounds[kNumParameters];                   \
+    static const double constrained_optimal_cost;                       \
+    static const double unconstrained_optimal_cost;                     \
+    static CostFunction* Create() {                                     \
+      return new AutoDiffCostFunction<name,                             \
+                                      num_residuals,                    \
+                                      num_parameters>(new name);        \
+    }                                                                   \
+    template <typename T>                                               \
+    bool operator()(const T* const x, T* residual) const {
+
+#define END_MGH_PROBLEM return true; } };  // NOLINT
+
+// Rosenbrock function.
+BEGIN_MGH_PROBLEM(TestProblem1, 2, 2)
+  const T x1 = x[0];
+  const T x2 = x[1];
+  residual[0] = T(10.0) * (x2 - x1 * x1);
+  residual[1] = T(1.0) - x1;
+END_MGH_PROBLEM;
+
+const double TestProblem1::initial_x[] = {-1.2, 1.0};
+const double TestProblem1::lower_bounds[] = {-kDoubleMax, -kDoubleMax};
+const double TestProblem1::upper_bounds[] = {kDoubleMax, kDoubleMax};
+const double TestProblem1::constrained_optimal_cost =
+    std::numeric_limits<double>::quiet_NaN();
+const double TestProblem1::unconstrained_optimal_cost = 0.0;
+
+// Freudenstein and Roth function.
+BEGIN_MGH_PROBLEM(TestProblem2, 2, 2)
+  const T x1 = x[0];
+  const T x2 = x[1];
+  residual[0] = T(-13.0) + x1 + ((T(5.0) - x2) * x2 - T(2.0)) * x2;
+  residual[1] = T(-29.0) + x1 + ((x2 + T(1.0)) * x2 - T(14.0)) * x2;
+END_MGH_PROBLEM;
+
+const double TestProblem2::initial_x[] = {0.5, -2.0};
+const double TestProblem2::lower_bounds[] = {-kDoubleMax, -kDoubleMax};
+const double TestProblem2::upper_bounds[] = {kDoubleMax, kDoubleMax};
+const double TestProblem2::constrained_optimal_cost =
+    std::numeric_limits<double>::quiet_NaN();
+const double TestProblem2::unconstrained_optimal_cost = 0.0;
+
+// Powell badly scaled function.
+BEGIN_MGH_PROBLEM(TestProblem3, 2, 2)
+  const T x1 = x[0];
+  const T x2 = x[1];
+  residual[0] = T(10000.0) * x1 * x2 - T(1.0);
+  residual[1] = exp(-x1) + exp(-x2) - T(1.0001);
+END_MGH_PROBLEM;
+
+const double TestProblem3::initial_x[] = {0.0, 1.0};
+const double TestProblem3::lower_bounds[] = {0.0, 1.0};
+const double TestProblem3::upper_bounds[] = {1.0, 9.0};
+const double TestProblem3::constrained_optimal_cost = 0.15125900e-9;
+const double TestProblem3::unconstrained_optimal_cost = 0.0;
+
+// Brown badly scaled function.
+BEGIN_MGH_PROBLEM(TestProblem4, 2, 3)
+  const T x1 = x[0];
+  const T x2 = x[1];
+  residual[0] = x1  - T(1000000.0);
+  residual[1] = x2 - T(0.000002);
+  residual[2] = x1 * x2 - T(2.0);
+END_MGH_PROBLEM;
+
+const double TestProblem4::initial_x[] = {1.0, 1.0};
+const double TestProblem4::lower_bounds[] = {0.0, 0.00003};
+const double TestProblem4::upper_bounds[] = {1000000.0, 100.0};
+const double TestProblem4::constrained_optimal_cost = 0.78400000e3;
+const double TestProblem4::unconstrained_optimal_cost = 0.0;
+
+// Beale function.
+BEGIN_MGH_PROBLEM(TestProblem5, 2, 3)
+  const T x1 = x[0];
+  const T x2 = x[1];
+  residual[0] = T(1.5) - x1 * (T(1.0) - x2);
+  residual[1] = T(2.25) - x1 * (T(1.0) - x2 * x2);
+  residual[2] = T(2.625) - x1 * (T(1.0) - x2 * x2 * x2);
+END_MGH_PROBLEM;
+
+const double TestProblem5::initial_x[] = {1.0, 1.0};
+const double TestProblem5::lower_bounds[] = {0.6, 0.5};
+const double TestProblem5::upper_bounds[] = {10.0, 100.0};
+const double TestProblem5::constrained_optimal_cost = 0.0;
+const double TestProblem5::unconstrained_optimal_cost = 0.0;
+
+// Jennrich and Sampson function.
+BEGIN_MGH_PROBLEM(TestProblem6, 2, 10)
+  const T x1 = x[0];
+  const T x2 = x[1];
+  for (int i = 1; i <= 10; ++i) {
+    residual[i - 1] = T(2.0) + T(2.0 * i) -
+        exp(T(static_cast<double>(i)) * x1) -
+        exp(T(static_cast<double>(i) * x2));
+  }
+END_MGH_PROBLEM;
+
+const double TestProblem6::initial_x[] = {1.0, 1.0};
+const double TestProblem6::lower_bounds[] = {-kDoubleMax, -kDoubleMax};
+const double TestProblem6::upper_bounds[] = {kDoubleMax, kDoubleMax};
+const double TestProblem6::constrained_optimal_cost =
+    std::numeric_limits<double>::quiet_NaN();
+const double TestProblem6::unconstrained_optimal_cost = 124.362;
+
+// Helical valley function.
+BEGIN_MGH_PROBLEM(TestProblem7, 3, 3)
+  const T x1 = x[0];
+  const T x2 = x[1];
+  const T x3 = x[2];
+  const T theta = T(0.5 / M_PI)  * atan(x2 / x1) + (x1 > 0.0 ? T(0.0) : T(0.5));
+
+  residual[0] = T(10.0) * (x3 - T(10.0) * theta);
+  residual[1] = T(10.0) * (sqrt(x1 * x1 + x2 * x2) - T(1.0));
+  residual[2] = x3;
+END_MGH_PROBLEM;
+
+const double TestProblem7::initial_x[] = {-1.0, 0.0, 0.0};
+const double TestProblem7::lower_bounds[] = {-100.0, -1.0, -1.0};
+const double TestProblem7::upper_bounds[] = {0.8, 1.0, 1.0};
+const double TestProblem7::constrained_optimal_cost = 0.99042212;
+const double TestProblem7::unconstrained_optimal_cost = 0.0;
+
+// Bard function
+BEGIN_MGH_PROBLEM(TestProblem8, 3, 15)
+  const T x1 = x[0];
+  const T x2 = x[1];
+  const T x3 = x[2];
+
+  double y[] = {0.14, 0.18, 0.22, 0.25,
+                0.29, 0.32, 0.35, 0.39, 0.37, 0.58,
+                0.73, 0.96, 1.34, 2.10, 4.39};
+
+  for (int i = 1; i <=15; ++i) {
+    const T u = T(static_cast<double>(i));
+    const T v = T(static_cast<double>(16 - i));
+    const T w = T(static_cast<double>(std::min(i, 16 - i)));
+    residual[i - 1] = T(y[i - 1]) - x1 + u / (v * x2 + w * x3);
+  }
+END_MGH_PROBLEM;
+
+const double TestProblem8::initial_x[] = {1.0, 1.0, 1.0};
+const double TestProblem8::lower_bounds[] = {
+  -kDoubleMax, -kDoubleMax, -kDoubleMax};
+const double TestProblem8::upper_bounds[] = {
+  kDoubleMax, kDoubleMax, kDoubleMax};
+const double TestProblem8::constrained_optimal_cost =
+    std::numeric_limits<double>::quiet_NaN();
+const double TestProblem8::unconstrained_optimal_cost = 8.21487e-3;
+
+// Gaussian function.
+BEGIN_MGH_PROBLEM(TestProblem9, 3, 15)
+  const T x1 = x[0];
+  const T x2 = x[1];
+  const T x3 = x[2];
+
+  const double y[] = {0.0009, 0.0044, 0.0175, 0.0540, 0.1295, 0.2420, 0.3521,
+                      0.3989,
+                      0.3521, 0.2420, 0.1295, 0.0540, 0.0175, 0.0044, 0.0009};
+  for (int i = 0; i < 15; ++i) {
+    const T t_i = T((8.0 - i - 1.0) / 2.0);
+    const T y_i = T(y[i]);
+    residual[i] = x1 * exp(-x2 * (t_i - x3) * (t_i - x3) / T(2.0)) - y_i;
+  }
+END_MGH_PROBLEM;
+
+const double TestProblem9::initial_x[] = {0.4, 1.0, 0.0};
+const double TestProblem9::lower_bounds[] = {0.398, 1.0, -0.5};
+const double TestProblem9::upper_bounds[] = {4.2, 2.0, 0.1};
+const double TestProblem9::constrained_optimal_cost = 0.11279300e-7;
+const double TestProblem9::unconstrained_optimal_cost = 0.112793e-7;
+
+// Meyer function.
+BEGIN_MGH_PROBLEM(TestProblem10, 3, 16)
+  const T x1 = x[0];
+  const T x2 = x[1];
+  const T x3 = x[2];
+
+  const double y[] = {34780, 28610, 23650, 19630, 16370, 13720, 11540, 9744,
+                      8261, 7030, 6005, 5147, 4427, 3820, 3307, 2872};
+
+  for (int i = 0; i < 16; ++i) {
+    T t = T(45 + 5.0 * (i + 1));
+    residual[i] = x1 * exp(x2 / (t + x3)) - y[i];
+  }
+END_MGH_PROBLEM
+
+
+const double TestProblem10::initial_x[] = {0.02, 4000, 250};
+const double TestProblem10::lower_bounds[] ={
+  -kDoubleMax, -kDoubleMax, -kDoubleMax};
+const double TestProblem10::upper_bounds[] ={
+  kDoubleMax, kDoubleMax, kDoubleMax};
+const double TestProblem10::constrained_optimal_cost =
+    std::numeric_limits<double>::quiet_NaN();
+const double TestProblem10::unconstrained_optimal_cost = 87.9458;
+
+#undef BEGIN_MGH_PROBLEM
+#undef END_MGH_PROBLEM
+
+template<typename TestProblem> string ConstrainedSolve() {
+  double x[TestProblem::kNumParameters];
+  std::copy(TestProblem::initial_x,
+            TestProblem::initial_x + TestProblem::kNumParameters,
+            x);
+
+  Problem problem;
+  problem.AddResidualBlock(TestProblem::Create(), NULL, x);
+  for (int i = 0; i < TestProblem::kNumParameters; ++i) {
+    problem.SetParameterLowerBound(x, i, TestProblem::lower_bounds[i]);
+    problem.SetParameterUpperBound(x, i, TestProblem::upper_bounds[i]);
+  }
+
+  Solver::Options options;
+  options.parameter_tolerance = 1e-18;
+  options.function_tolerance = 1e-18;
+  options.gradient_tolerance = 1e-18;
+  options.max_num_iterations = 1000;
+  options.linear_solver_type = DENSE_QR;
+  Solver::Summary summary;
+  Solve(options, &problem, &summary);
+
+  const double kMinLogRelativeError = 5.0;
+  const double log_relative_error = -std::log10(
+      std::abs(2.0 * summary.final_cost -
+               TestProblem::constrained_optimal_cost) /
+      (TestProblem::constrained_optimal_cost > 0.0
+       ? TestProblem::constrained_optimal_cost
+       : 1.0));
+
+  return (log_relative_error >= kMinLogRelativeError
+          ? "Success\n"
+          : "Failure\n");
+}
+
+template<typename TestProblem> string UnconstrainedSolve() {
+  double x[TestProblem::kNumParameters];
+  std::copy(TestProblem::initial_x,
+            TestProblem::initial_x + TestProblem::kNumParameters,
+            x);
+
+  Problem problem;
+  problem.AddResidualBlock(TestProblem::Create(), NULL, x);
+
+  Solver::Options options;
+  options.parameter_tolerance = 1e-18;
+  options.function_tolerance = 0.0;
+  options.gradient_tolerance = 1e-18;
+  options.max_num_iterations = 1000;
+  options.linear_solver_type = DENSE_QR;
+  Solver::Summary summary;
+  Solve(options, &problem, &summary);
+
+  const double kMinLogRelativeError = 5.0;
+  const double log_relative_error = -std::log10(
+      std::abs(2.0 * summary.final_cost -
+               TestProblem::unconstrained_optimal_cost) /
+      (TestProblem::unconstrained_optimal_cost > 0.0
+       ? TestProblem::unconstrained_optimal_cost
+       : 1.0));
+
+  return (log_relative_error >= kMinLogRelativeError
+          ? "Success\n"
+          : "Failure\n");
+}
+
+}  // namespace examples
+}  // namespace ceres
+
+int main(int argc, char** argv) {
+  google::ParseCommandLineFlags(&argc, &argv, true);
+  google::InitGoogleLogging(argv[0]);
+
+  using ceres::examples::UnconstrainedSolve;
+  using ceres::examples::ConstrainedSolve;
+
+#define UNCONSTRAINED_SOLVE(n)                                          \
+  std::cout << "Problem " << n << " : "                                 \
+            << UnconstrainedSolve<ceres::examples::TestProblem##n>();
+
+#define CONSTRAINED_SOLVE(n)                                            \
+  std::cout << "Problem " << n << " : "                                 \
+            << ConstrainedSolve<ceres::examples::TestProblem##n>();
+
+  std::cout << "Unconstrained problems\n";
+  UNCONSTRAINED_SOLVE(1);
+  UNCONSTRAINED_SOLVE(2);
+  UNCONSTRAINED_SOLVE(3);
+  UNCONSTRAINED_SOLVE(4);
+  UNCONSTRAINED_SOLVE(5);
+  UNCONSTRAINED_SOLVE(6);
+  UNCONSTRAINED_SOLVE(7);
+  UNCONSTRAINED_SOLVE(8);
+  UNCONSTRAINED_SOLVE(9);
+  UNCONSTRAINED_SOLVE(10);
+
+  std::cout << "\nConstrained problems\n";
+  CONSTRAINED_SOLVE(3);
+  CONSTRAINED_SOLVE(4);
+  CONSTRAINED_SOLVE(5);
+  CONSTRAINED_SOLVE(7);
+  CONSTRAINED_SOLVE(9);
+
+  return 0;
+}
diff --git a/examples/nist.cc b/examples/nist.cc
index 1773a0f..b29b285 100644
--- a/examples/nist.cc
+++ b/examples/nist.cc
@@ -159,14 +159,6 @@
   }
 }
 
-bool IsSuccessfulTermination(ceres::SolverTerminationType status) {
-  return
-      (status == ceres::FUNCTION_TOLERANCE) ||
-      (status == ceres::GRADIENT_TOLERANCE) ||
-      (status == ceres::PARAMETER_TOLERANCE) ||
-      (status == ceres::USER_SUCCESS);
-}
-
 class NISTProblem {
  public:
   explicit NISTProblem(const std::string& filename) {
diff --git a/examples/pgm_image.h b/examples/pgm_image.h
index 15e99e4..1328d75 100644
--- a/examples/pgm_image.h
+++ b/examples/pgm_image.h
@@ -197,7 +197,7 @@
     outputfile << static_cast<int>(data_[i] + 0.5) << ' ';
   }
 
-  return outputfile;  // Returns true/false
+  return bool(outputfile);  // Returns true/false
 }
 
 namespace  {
diff --git a/examples/quadratic.cc b/examples/quadratic.cc
deleted file mode 100644
index 8527af3..0000000
--- a/examples/quadratic.cc
+++ /dev/null
@@ -1,90 +0,0 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
-// http://code.google.com/p/ceres-solver/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-//   this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-//   this list of conditions and the following disclaimer in the documentation
-//   and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-//   used to endorse or promote products derived from this software without
-//   specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: keir@google.com (Keir Mierle)
-//
-// A simple example of using the Ceres minimizer.
-//
-// Minimize 0.5 (10 - x)^2 using analytic jacobian matrix.
-
-#include <vector>
-#include "ceres/ceres.h"
-#include "gflags/gflags.h"
-#include "glog/logging.h"
-
-using ceres::SizedCostFunction;
-using ceres::Problem;
-using ceres::Solver;
-using ceres::Solve;
-
-class SimpleCostFunction
-  : public SizedCostFunction<1 /* number of residuals */,
-                             1 /* size of first parameter */> {
- public:
-  virtual ~SimpleCostFunction() {}
-  virtual bool Evaluate(double const* const* parameters,
-                        double* residuals,
-                        double** jacobians) const {
-    double x = parameters[0][0];
-
-    // f(x) = 10 - x.
-    residuals[0] = 10 - x;
-
-    // f'(x) = -1. Since there's only 1 parameter and that parameter
-    // has 1 dimension, there is only 1 element to fill in the
-    // jacobians.
-    if (jacobians != NULL && jacobians[0] != NULL) {
-      jacobians[0][0] = -1;
-    }
-    return true;
-  }
-};
-
-int main(int argc, char** argv) {
-  google::ParseCommandLineFlags(&argc, &argv, true);
-  google::InitGoogleLogging(argv[0]);
-
-  // The variable with its initial value that we will be solving for.
-  double x = 5.0;
-
-  // Build the problem.
-  Problem problem;
-  // Set up the only cost function (also known as residual).
-  problem.AddResidualBlock(new SimpleCostFunction, NULL, &x);
-
-  // Run the solver!
-  Solver::Options options;
-  options.max_num_iterations = 10;
-  options.linear_solver_type = ceres::DENSE_QR;
-  options.minimizer_progress_to_stdout = true;
-  Solver::Summary summary;
-  Solve(options, &problem, &summary);
-  std::cout << summary.BriefReport() << "\n";
-  std::cout << "x : 5.0 -> " << x << "\n";
-  return 0;
-}
diff --git a/examples/quadratic_auto_diff.cc b/examples/quadratic_auto_diff.cc
deleted file mode 100644
index 1e2f3ef..0000000
--- a/examples/quadratic_auto_diff.cc
+++ /dev/null
@@ -1,89 +0,0 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
-// http://code.google.com/p/ceres-solver/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-//   this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-//   this list of conditions and the following disclaimer in the documentation
-//   and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-//   used to endorse or promote products derived from this software without
-//   specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: keir@google.com (Keir Mierle)
-//
-// A simple example of using the Ceres minimizer.
-//
-// Minimize 0.5 (10 - x)^2 using jacobian matrix computed using
-// automatic differentiation.
-
-#include <vector>
-#include "ceres/ceres.h"
-#include "gflags/gflags.h"
-#include "glog/logging.h"
-
-using ceres::AutoDiffCostFunction;
-using ceres::CostFunction;
-using ceres::Problem;
-using ceres::Solver;
-using ceres::Solve;
-
-// A templated cost functor that implements the residual r = 10 -
-// x. The method operator() is templated so that we can then use an
-// automatic differentiation wrapper around it to generate its
-// derivatives.
-class QuadraticCostFunctor {
- public:
-  template <typename T> bool operator()(const T* const x, T* residual) const {
-    residual[0] = T(10.0) - x[0];
-    return true;
-  }
-};
-
-int main(int argc, char** argv) {
-  google::ParseCommandLineFlags(&argc, &argv, true);
-  google::InitGoogleLogging(argv[0]);
-
-  // The variable to solve for with its initial value.
-  double initial_x = 5.0;
-  double x = initial_x;
-
-  // Build the problem.
-  Problem problem;
-
-  // Set up the only cost function (also known as residual). This uses
-  // auto-differentiation to obtain the derivative (jacobian).
-  problem.AddResidualBlock(
-      new AutoDiffCostFunction<QuadraticCostFunctor, 1, 1>(
-          new QuadraticCostFunctor),
-      NULL,
-      &x);
-
-  // Run the solver!
-  Solver::Options options;
-  options.max_num_iterations = 10;
-  options.linear_solver_type = ceres::DENSE_QR;
-  options.minimizer_progress_to_stdout = true;
-  Solver::Summary summary;
-  Solve(options, &problem, &summary);
-  std::cout << summary.BriefReport() << "\n";
-  std::cout << "x : " << initial_x
-            << " -> " << x << "\n";
-  return 0;
-}
diff --git a/examples/quadratic_numeric_diff.cc b/examples/quadratic_numeric_diff.cc
deleted file mode 100644
index 1082616..0000000
--- a/examples/quadratic_numeric_diff.cc
+++ /dev/null
@@ -1,84 +0,0 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
-// http://code.google.com/p/ceres-solver/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-//   this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-//   this list of conditions and the following disclaimer in the documentation
-//   and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-//   used to endorse or promote products derived from this software without
-//   specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: keir@google.com (Keir Mierle)
-//
-// Minimize 0.5 (10 - x)^2 using jacobian matrix computed using
-// numeric differentiation.
-
-#include <vector>
-#include "ceres/ceres.h"
-#include "gflags/gflags.h"
-#include "glog/logging.h"
-
-using ceres::NumericDiffCostFunction;
-using ceres::CENTRAL;
-using ceres::CostFunction;
-using ceres::Problem;
-using ceres::Solver;
-using ceres::Solve;
-
-// A cost functor that implements the residual r = 10 - x.
-class QuadraticCostFunctor {
- public:
-  bool operator()(const double* const x, double* residual) const {
-    residual[0] = 10.0 - x[0];
-    return true;
-  }
-};
-
-int main(int argc, char** argv) {
-  google::ParseCommandLineFlags(&argc, &argv, true);
-  google::InitGoogleLogging(argv[0]);
-
-  // The variable to solve for with its initial value.
-  double initial_x = 5.0;
-  double x = initial_x;
-
-  // Set up the only cost function (also known as residual). This uses
-  // numeric differentiation to obtain the derivative (jacobian).
-  CostFunction* cost =
-      new NumericDiffCostFunction<QuadraticCostFunctor, CENTRAL, 1, 1> (
-          new QuadraticCostFunctor);
-
-  // Build the problem.
-  Problem problem;
-  problem.AddResidualBlock(cost, NULL, &x);
-
-  // Run the solver!
-  Solver::Options options;
-  options.max_num_iterations = 10;
-  options.linear_solver_type = ceres::DENSE_QR;
-  options.minimizer_progress_to_stdout = true;
-  Solver::Summary summary;
-  Solve(options, &problem, &summary);
-  std::cout << summary.BriefReport() << "\n";
-  std::cout << "x : " << initial_x
-            << " -> " << x << "\n";
-  return 0;
-}
diff --git a/examples/random.h b/examples/random.h
new file mode 100644
index 0000000..0d55b49
--- /dev/null
+++ b/examples/random.h
@@ -0,0 +1,64 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_EXAMPLES_RANDOM_H_
+#define CERES_EXAMPLES_RANDOM_H_
+
+#include <math.h>
+#include <stdlib.h>
+
+namespace ceres {
+namespace examples {
+
+// Return a random number sampled from a uniform distribution in the range
+// [0,1].
+inline double RandDouble() {
+  double r = static_cast<double>(rand());
+  return r / RAND_MAX;
+}
+
+// Marsaglia Polar method for generation standard normal (pseudo)
+// random numbers http://en.wikipedia.org/wiki/Marsaglia_polar_method
+inline double RandNormal() {
+  double x1, x2, w;
+  do {
+    x1 = 2.0 * RandDouble() - 1.0;
+    x2 = 2.0 * RandDouble() - 1.0;
+    w = x1 * x1 + x2 * x2;
+  } while ( w >= 1.0 || w == 0.0 );
+
+  w = sqrt((-2.0 * log(w)) / w);
+  return x1 * w;
+}
+
+}  // namespace examples
+}  // namespace ceres
+
+#endif  // CERES_EXAMPLES_RANDOM_H_
diff --git a/examples/robot_pose_mle.cc b/examples/robot_pose_mle.cc
new file mode 100644
index 0000000..e1a1dd0
--- /dev/null
+++ b/examples/robot_pose_mle.cc
@@ -0,0 +1,316 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: joydeepb@ri.cmu.edu (Joydeep Biswas)
+//
+// This example demonstrates how to use the DynamicAutoDiffCostFunction
+// variant of CostFunction. The DynamicAutoDiffCostFunction is meant to
+// be used in cases where the number of parameter blocks or the sizes are not
+// known at compile time.
+//
+// This example simulates a robot traversing down a 1-dimension hallway with
+// noise odometry readings and noisy range readings of the end of the hallway.
+// By fusing the noisy odometry and sensor readings this example demonstrates
+// how to compute the maximum likelihood estimate (MLE) of the robot's pose at
+// each timestep.
+//
+// The robot starts at the origin, and it is travels to the end of a corridor of
+// fixed length specified by the "--corridor_length" flag. It executes a series
+// of motion commands to move forward a fixed length, specified by the
+// "--pose_separation" flag, at which pose it receives relative odometry
+// measurements as well as a range reading of the distance to the end of the
+// hallway. The odometry readings are drawn with Gaussian noise and standard
+// deviation specified by the "--odometry_stddev" flag, and the range readings
+// similarly with standard deviation specified by the "--range-stddev" flag.
+//
+// There are two types of residuals in this problem:
+// 1) The OdometryConstraint residual, that accounts for the odometry readings
+//    between successive pose estimatess of the robot.
+// 2) The RangeConstraint residual, that accounts for the errors in the observed
+//    range readings from each pose.
+//
+// The OdometryConstraint residual is modeled as an AutoDiffCostFunction with
+// a fixed parameter block size of 1, which is the relative odometry being
+// solved for, between a pair of successive poses of the robot. Differences
+// between observed and computed relative odometry values are penalized weighted
+// by the known standard deviation of the odometry readings.
+//
+// The RangeConstraint residual is modeled as a DynamicAutoDiffCostFunction
+// which sums up the relative odometry estimates to compute the estimated
+// global pose of the robot, and then computes the expected range reading.
+// Differences between the observed and expected range readings are then
+// penalized weighted by the standard deviation of readings of the sensor.
+// Since the number of poses of the robot is not known at compile time, this
+// cost function is implemented as a DynamicAutoDiffCostFunction.
+//
+// The outputs of the example are the initial values of the odometry and range
+// readings, and the range and odometry errors for every pose of the robot.
+// After computing the MLE, the computed poses and corrected odometry values
+// are printed out, along with the corresponding range and odometry errors. Note
+// that as an MLE of a noisy system the errors will not be reduced to zero, but
+// the odometry estimates will be updated to maximize the joint likelihood of
+// all odometry and range readings of the robot.
+//
+// Mathematical Formulation
+// ======================================================
+//
+// Let p_0, .., p_N be (N+1) robot poses, where the robot moves down the
+// corridor starting from p_0 and ending at p_N. We assume that p_0 is the
+// origin of the coordinate system.
+// Odometry u_i is the observed relative odometry between pose p_(i-1) and p_i,
+// and range reading y_i is the range reading of the end of the corridor from
+// pose p_i. Both odometry as well as range readings are noisy, but we wish to
+// compute the maximum likelihood estimate (MLE) of corrected odometry values
+// u*_0 to u*_(N-1), such that the Belief is optimized:
+//
+// Belief(u*_(0:N-1) | u_(0:N-1), y_(0:N-1))                                  1.
+//   =        P(u*_(0:N-1) | u_(0:N-1), y_(0:N-1))                            2.
+//   \propto  P(y_(0:N-1) | u*_(0:N-1), u_(0:N-1)) P(u*_(0:N-1) | u_(0:N-1))  3.
+//   =       \prod_i{ P(y_i | u*_(0:i)) P(u*_i | u_i) }                       4.
+//
+// Here, the subscript "(0:i)" is used as shorthand to indicate entries from all
+// timesteps 0 to i for that variable, both inclusive.
+//
+// Bayes' rule is used to derive eq. 3 from 2, and the independence of
+// odometry observations and range readings is expolited to derive 4 from 3.
+//
+// Thus, the Belief, up to scale, is factored as a product of a number of
+// terms, two for each pose, where for each pose term there is one term for the
+// range reading, P(y_i | u*_(0:i) and one term for the odometry reading,
+// P(u*_i | u_i) . Note that the term for the range reading is dependent on all
+// odometry values u*_(0:i), while the odometry term, P(u*_i | u_i) depends only
+// on a single value, u_i. Both the range reading as well as odoemtry
+// probability terms are modeled as the Normal distribution, and have the form:
+//
+// p(x) \propto \exp{-((x - x_mean) / x_stddev)^2}
+//
+// where x refers to either the MLE odometry u* or range reading y, and x_mean
+// is the corresponding mean value, u for the odometry terms, and y_expected,
+// the expected range reading based on all the previous odometry terms.
+// The MLE is thus found by finding those values x* which minimize:
+//
+// x* = \arg\min{((x - x_mean) / x_stddev)^2}
+//
+// which is in the nonlinear least-square form, suited to being solved by Ceres.
+// The non-linear component arise from the computation of x_mean. The residuals
+// ((x - x_mean) / x_stddev) for the residuals that Ceres will optimize. As
+// mentioned earlier, the odometry term for each pose depends only on one
+// variable, and will be computed by an AutoDiffCostFunction, while the term
+// for the range reading will depend on all previous odometry observations, and
+// will be computed by a DynamicAutoDiffCostFunction since the number of
+// odoemtry observations will only be known at run time.
+
+#include <cstdio>
+#include <math.h>
+#include <vector>
+
+#include "ceres/ceres.h"
+#include "ceres/dynamic_autodiff_cost_function.h"
+#include "gflags/gflags.h"
+#include "glog/logging.h"
+#include "random.h"
+
+using ceres::AutoDiffCostFunction;
+using ceres::DynamicAutoDiffCostFunction;
+using ceres::CauchyLoss;
+using ceres::CostFunction;
+using ceres::LossFunction;
+using ceres::Problem;
+using ceres::Solve;
+using ceres::Solver;
+using ceres::examples::RandNormal;
+using std::min;
+using std::vector;
+
+DEFINE_double(corridor_length, 30.0, "Length of the corridor that the robot is "
+              "travelling down.");
+
+DEFINE_double(pose_separation, 0.5, "The distance that the robot traverses "
+              "between successive odometry updates.");
+
+DEFINE_double(odometry_stddev, 0.1, "The standard deviation of "
+              "odometry error of the robot.");
+
+DEFINE_double(range_stddev, 0.01, "The standard deviation of range readings of "
+              "the robot.");
+
+// The stride length of the dynamic_autodiff_cost_function evaluator.
+static const int kStride = 10;
+
+struct OdometryConstraint {
+  typedef AutoDiffCostFunction<OdometryConstraint, 1, 1> OdometryCostFunction;
+
+  OdometryConstraint(double odometry_mean, double odometry_stddev) :
+      odometry_mean(odometry_mean), odometry_stddev(odometry_stddev) {}
+
+  template <typename T>
+  bool operator()(const T* const odometry, T* residual) const {
+    *residual = (*odometry - T(odometry_mean)) / T(odometry_stddev);
+    return true;
+  }
+
+  static OdometryCostFunction* Create(const double odometry_value) {
+    return new OdometryCostFunction(
+        new OdometryConstraint(odometry_value, FLAGS_odometry_stddev));
+  }
+
+  const double odometry_mean;
+  const double odometry_stddev;
+};
+
+struct RangeConstraint {
+  typedef DynamicAutoDiffCostFunction<RangeConstraint, kStride>
+      RangeCostFunction;
+
+  RangeConstraint(
+      int pose_index,
+      double range_reading,
+      double range_stddev,
+      double corridor_length) :
+      pose_index(pose_index), range_reading(range_reading),
+      range_stddev(range_stddev), corridor_length(corridor_length) {}
+
+  template <typename T>
+  bool operator()(T const* const* relative_poses, T* residuals) const {
+    T global_pose(0);
+    for (int i = 0; i <= pose_index; ++i) {
+      global_pose += relative_poses[i][0];
+    }
+    residuals[0] = (global_pose + T(range_reading) - T(corridor_length)) /
+        T(range_stddev);
+    return true;
+  }
+
+  // Factory method to create a CostFunction from a RangeConstraint to
+  // conveniently add to a ceres problem.
+  static RangeCostFunction* Create(const int pose_index,
+                                   const double range_reading,
+                                   vector<double>* odometry_values,
+                                   vector<double*>* parameter_blocks) {
+    RangeConstraint* constraint = new RangeConstraint(
+        pose_index, range_reading, FLAGS_range_stddev, FLAGS_corridor_length);
+    RangeCostFunction* cost_function = new RangeCostFunction(constraint);
+    // Add all the parameter blocks that affect this constraint.
+    parameter_blocks->clear();
+    for (int i = 0; i <= pose_index; ++i) {
+      parameter_blocks->push_back(&((*odometry_values)[i]));
+      cost_function->AddParameterBlock(1);
+    }
+    cost_function->SetNumResiduals(1);
+    return (cost_function);
+  }
+
+  const int pose_index;
+  const double range_reading;
+  const double range_stddev;
+  const double corridor_length;
+};
+
+void SimulateRobot(vector<double>* odometry_values,
+                   vector<double>* range_readings) {
+  const int num_steps = static_cast<int>(
+      ceil(FLAGS_corridor_length / FLAGS_pose_separation));
+
+  // The robot starts out at the origin.
+  double robot_location = 0.0;
+  for (int i = 0; i < num_steps; ++i) {
+    const double actual_odometry_value = min(
+        FLAGS_pose_separation, FLAGS_corridor_length - robot_location);
+    robot_location += actual_odometry_value;
+    const double actual_range = FLAGS_corridor_length - robot_location;
+    const double observed_odometry =
+        RandNormal() * FLAGS_odometry_stddev + actual_odometry_value;
+    const double observed_range =
+        RandNormal() * FLAGS_range_stddev + actual_range;
+    odometry_values->push_back(observed_odometry);
+    range_readings->push_back(observed_range);
+  }
+}
+
+void PrintState(const vector<double>& odometry_readings,
+                const vector<double>& range_readings) {
+  CHECK_EQ(odometry_readings.size(), range_readings.size());
+  double robot_location = 0.0;
+  printf("pose: location     odom    range  r.error  o.error\n");
+  for (int i = 0; i < odometry_readings.size(); ++i) {
+    robot_location += odometry_readings[i];
+    const double range_error =
+        robot_location + range_readings[i] - FLAGS_corridor_length;
+    const double odometry_error =
+        FLAGS_pose_separation - odometry_readings[i];
+    printf("%4d: %8.3f %8.3f %8.3f %8.3f %8.3f\n",
+           static_cast<int>(i), robot_location, odometry_readings[i],
+           range_readings[i], range_error, odometry_error);
+  }
+}
+
+int main(int argc, char** argv) {
+  google::InitGoogleLogging(argv[0]);
+  google::ParseCommandLineFlags(&argc, &argv, true);
+  // Make sure that the arguments parsed are all positive.
+  CHECK_GT(FLAGS_corridor_length, 0.0);
+  CHECK_GT(FLAGS_pose_separation, 0.0);
+  CHECK_GT(FLAGS_odometry_stddev, 0.0);
+  CHECK_GT(FLAGS_range_stddev, 0.0);
+
+  vector<double> odometry_values;
+  vector<double> range_readings;
+  SimulateRobot(&odometry_values, &range_readings);
+
+  printf("Initial values:\n");
+  PrintState(odometry_values, range_readings);
+  ceres::Problem problem;
+
+  for (int i = 0; i < odometry_values.size(); ++i) {
+    // Create and add a DynamicAutoDiffCostFunction for the RangeConstraint from
+    // pose i.
+    vector<double*> parameter_blocks;
+    RangeConstraint::RangeCostFunction* range_cost_function =
+        RangeConstraint::Create(
+            i, range_readings[i], &odometry_values, &parameter_blocks);
+    problem.AddResidualBlock(range_cost_function, NULL, parameter_blocks);
+
+    // Create and add an AutoDiffCostFunction for the OdometryConstraint for
+    // pose i.
+    problem.AddResidualBlock(OdometryConstraint::Create(odometry_values[i]),
+                             NULL,
+                             &(odometry_values[i]));
+  }
+
+  ceres::Solver::Options solver_options;
+  solver_options.minimizer_progress_to_stdout = true;
+
+  Solver::Summary summary;
+  printf("Solving...\n");
+  Solve(solver_options, &problem, &summary);
+  printf("Done.\n");
+  std::cout << summary.FullReport() << "\n";
+  printf("Final values:\n");
+  PrintState(odometry_values, range_readings);
+  return 0;
+}
diff --git a/examples/snavely_reprojection_error.h b/examples/snavely_reprojection_error.h
index 0704217..d3263f3 100644
--- a/examples/snavely_reprojection_error.h
+++ b/examples/snavely_reprojection_error.h
@@ -91,6 +91,14 @@
     return true;
   }
 
+  // Factory to hide the construction of the CostFunction object from
+  // the client code.
+  static ceres::CostFunction* Create(const double observed_x,
+                                     const double observed_y) {
+    return (new ceres::AutoDiffCostFunction<SnavelyReprojectionError, 2, 9, 3>(
+                new SnavelyReprojectionError(observed_x, observed_y)));
+  }
+
   double observed_x;
   double observed_y;
 };
@@ -146,6 +154,16 @@
     return true;
   }
 
+  // Factory to hide the construction of the CostFunction object from
+  // the client code.
+  static ceres::CostFunction* Create(const double observed_x,
+                                     const double observed_y) {
+    return (new ceres::AutoDiffCostFunction<
+            SnavelyReprojectionErrorWithQuaternions, 2, 4, 6, 3>(
+                new SnavelyReprojectionErrorWithQuaternions(observed_x,
+                                                            observed_y)));
+  }
+
   double observed_x;
   double observed_y;
 };
diff --git a/include/ceres/autodiff_cost_function.h b/include/ceres/autodiff_cost_function.h
index 371a11f..7c0fa79 100644
--- a/include/ceres/autodiff_cost_function.h
+++ b/include/ceres/autodiff_cost_function.h
@@ -96,7 +96,7 @@
 // "MyScalarCostFunctor", "1, 2, 2", describe the functor as computing a
 // 1-dimensional output from two arguments, both 2-dimensional.
 //
-// The autodiff cost function also supports cost functions with a
+// AutoDiffCostFunction also supports cost functions with a
 // runtime-determined number of residuals. For example:
 //
 //   CostFunction* cost_function
@@ -110,8 +110,9 @@
 //             Dimension of x ------------------------------------+  |
 //             Dimension of y ---------------------------------------+
 //
-// The framework can currently accommodate cost functions of up to 6 independent
-// variables, and there is no limit on the dimensionality of each of them.
+// The framework can currently accommodate cost functions of up to 10
+// independent variables, and there is no limit on the dimensionality
+// of each of them.
 //
 // WARNING #1: Since the functor will get instantiated with different types for
 // T, you must to convert from other numeric types to T before mixing
@@ -145,13 +146,13 @@
 //
 // The constructors take ownership of the cost functor.
 //
-// If the number of residuals (argument "M" below) is ceres::DYNAMIC, then the
-// two-argument constructor must be used. The second constructor takes a number
-// of residuals (in addition to the templated number of residuals). This allows
-// for varying the number of residuals for a single autodiff cost function at
-// runtime.
+// If the number of residuals (argument kNumResiduals below) is
+// ceres::DYNAMIC, then the two-argument constructor must be used. The
+// second constructor takes a number of residuals (in addition to the
+// templated number of residuals). This allows for varying the number
+// of residuals for a single autodiff cost function at runtime.
 template <typename CostFunctor,
-          int M,        // Number of residuals, or ceres::DYNAMIC.
+          int kNumResiduals,  // Number of residuals, or ceres::DYNAMIC.
           int N0,       // Number of parameters in block 0.
           int N1 = 0,   // Number of parameters in block 1.
           int N2 = 0,   // Number of parameters in block 2.
@@ -162,28 +163,32 @@
           int N7 = 0,   // Number of parameters in block 7.
           int N8 = 0,   // Number of parameters in block 8.
           int N9 = 0>   // Number of parameters in block 9.
-class AutoDiffCostFunction : public SizedCostFunction<M,
+class AutoDiffCostFunction : public SizedCostFunction<kNumResiduals,
                                                       N0, N1, N2, N3, N4,
                                                       N5, N6, N7, N8, N9> {
  public:
   // Takes ownership of functor. Uses the template-provided value for the
-  // number of residuals ("M").
+  // number of residuals ("kNumResiduals").
   explicit AutoDiffCostFunction(CostFunctor* functor)
       : functor_(functor) {
-    CHECK_NE(M, DYNAMIC) << "Can't run the fixed-size constructor if the "
-                         << "number of residuals is set to ceres::DYNAMIC.";
+    CHECK_NE(kNumResiduals, DYNAMIC)
+        << "Can't run the fixed-size constructor if the "
+        << "number of residuals is set to ceres::DYNAMIC.";
   }
 
-  // Takes ownership of functor. Ignores the template-provided number of
-  // residuals ("M") in favor of the "num_residuals" argument provided.
+  // Takes ownership of functor. Ignores the template-provided
+  // kNumResiduals in favor of the "num_residuals" argument provided.
   //
   // This allows for having autodiff cost functions which return varying
   // numbers of residuals at runtime.
   AutoDiffCostFunction(CostFunctor* functor, int num_residuals)
       : functor_(functor) {
-    CHECK_EQ(M, DYNAMIC) << "Can't run the dynamic-size constructor if the "
-                         << "number of residuals is not ceres::DYNAMIC.";
-    SizedCostFunction<M, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>
+    CHECK_EQ(kNumResiduals, DYNAMIC)
+        << "Can't run the dynamic-size constructor if the "
+        << "number of residuals is not ceres::DYNAMIC.";
+    SizedCostFunction<kNumResiduals,
+                      N0, N1, N2, N3, N4,
+                      N5, N6, N7, N8, N9>
         ::set_num_residuals(num_residuals);
   }
 
@@ -206,8 +211,9 @@
            N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>::Differentiate(
                *functor_,
                parameters,
-               SizedCostFunction<M, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>
-                   ::num_residuals(),
+               SizedCostFunction<kNumResiduals,
+                                 N0, N1, N2, N3, N4,
+                                 N5, N6, N7, N8, N9>::num_residuals(),
                residuals,
                jacobians);
   }
diff --git a/include/ceres/autodiff_local_parameterization.h b/include/ceres/autodiff_local_parameterization.h
index 0aae6c7..c100d48 100644
--- a/include/ceres/autodiff_local_parameterization.h
+++ b/include/ceres/autodiff_local_parameterization.h
@@ -107,11 +107,18 @@
 template <typename Functor, int kGlobalSize, int kLocalSize>
 class AutoDiffLocalParameterization : public LocalParameterization {
  public:
+  AutoDiffLocalParameterization() :
+      functor_(new Functor()) {}
+
+  // Takes ownership of functor.
+  explicit AutoDiffLocalParameterization(Functor* functor) :
+      functor_(functor) {}
+
   virtual ~AutoDiffLocalParameterization() {}
   virtual bool Plus(const double* x,
                     const double* delta,
                     double* x_plus_delta) const {
-    return Functor()(x, delta, x_plus_delta);
+    return (*functor_)(x, delta, x_plus_delta);
   }
 
   virtual bool ComputeJacobian(const double* x, double* jacobian) const {
@@ -128,7 +135,7 @@
     const double* parameter_ptrs[2] = {x, zero_delta};
     double* jacobian_ptrs[2] = { NULL, jacobian };
     return internal::AutoDiff<Functor, double, kGlobalSize, kLocalSize>
-        ::Differentiate(Functor(),
+        ::Differentiate(*functor_,
                         parameter_ptrs,
                         kGlobalSize,
                         x_plus_delta,
@@ -137,6 +144,9 @@
 
   virtual int GlobalSize() const { return kGlobalSize; }
   virtual int LocalSize() const { return kLocalSize; }
+
+ private:
+  internal::scoped_ptr<Functor> functor_;
 };
 
 }  // namespace ceres
diff --git a/include/ceres/c_api.h b/include/ceres/c_api.h
index add68de..71f41fd 100644
--- a/include/ceres/c_api.h
+++ b/include/ceres/c_api.h
@@ -38,12 +38,15 @@
 #ifndef CERES_PUBLIC_C_API_H_
 #define CERES_PUBLIC_C_API_H_
 
+#include "ceres/internal/port.h"
+#include "ceres/internal/disable_warnings.h"
+
 #ifdef __cplusplus
 extern "C" {
 #endif
 
 /* Init the Ceres private data. Must be called before anything else. */
-void ceres_init();
+CERES_EXPORT void ceres_init();
 
 /* Equivalent to CostFunction::Evaluate() in the C++ API.
  *
@@ -88,23 +91,23 @@
  *
  * See loss_function.h for the details of each loss function.
  */
-void* ceres_create_huber_loss_function_data(double a);
-void* ceres_create_softl1_loss_function_data(double a);
-void* ceres_create_cauchy_loss_function_data(double a);
-void* ceres_create_arctan_loss_function_data(double a);
-void* ceres_create_tolerant_loss_function_data(double a, double b);
+CERES_EXPORT void* ceres_create_huber_loss_function_data(double a);
+CERES_EXPORT void* ceres_create_softl1_loss_function_data(double a);
+CERES_EXPORT void* ceres_create_cauchy_loss_function_data(double a);
+CERES_EXPORT void* ceres_create_arctan_loss_function_data(double a);
+CERES_EXPORT void* ceres_create_tolerant_loss_function_data(double a, double b);
 
 /* Free the given stock loss function data. */
-void ceres_free_stock_loss_function_data(void* loss_function_data);
+CERES_EXPORT void ceres_free_stock_loss_function_data(void* loss_function_data);
 
 /* This is an implementation of ceres_loss_function_t contained within Ceres
  * itself, intended as a way to access the various stock Ceres loss functions
  * from the C API. This should be passed to ceres_add_residual() below, in
  * combination with a user_data pointer generated by
  * ceres_create_stock_loss_function() above. */
-void ceres_stock_loss_function(void* user_data,
-                               double squared_norm,
-                               double out[3]);
+CERES_EXPORT void ceres_stock_loss_function(void* user_data,
+                                            double squared_norm,
+                                            double out[3]);
 
 /* Equivalent to Problem from the C++ API. */
 struct ceres_problem_s;
@@ -115,11 +118,11 @@
 
 /* Create and destroy a problem */
 /* TODO(keir): Add options for the problem. */
-ceres_problem_t* ceres_create_problem();
-void ceres_free_problem(ceres_problem_t* problem);
+CERES_EXPORT ceres_problem_t* ceres_create_problem();
+CERES_EXPORT void ceres_free_problem(ceres_problem_t* problem);
 
 /* Add a residual block. */
-ceres_residual_block_id_t* ceres_problem_add_residual_block(
+CERES_EXPORT ceres_residual_block_id_t* ceres_problem_add_residual_block(
     ceres_problem_t* problem,
     ceres_cost_function_t cost_function,
     void* cost_function_data,
@@ -130,7 +133,7 @@
     int* parameter_block_sizes,
     double** parameters);
 
-void ceres_solve(ceres_problem_t* problem);
+CERES_EXPORT void ceres_solve(ceres_problem_t* problem);
 
 /* TODO(keir): Figure out a way to pass a config in. */
 
@@ -138,4 +141,6 @@
 }
 #endif
 
+#include "ceres/internal/reenable_warnings.h"
+
 #endif  /* CERES_PUBLIC_C_API_H_ */
diff --git a/include/ceres/ceres.h b/include/ceres/ceres.h
index 61b8b94..41bd649 100644
--- a/include/ceres/ceres.h
+++ b/include/ceres/ceres.h
@@ -1,5 +1,5 @@
 // Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// Copyright 2014 Google Inc. All rights reserved.
 // http://code.google.com/p/ceres-solver/
 //
 // Redistribution and use in source and binary forms, with or without
@@ -34,15 +34,14 @@
 #ifndef CERES_PUBLIC_CERES_H_
 #define CERES_PUBLIC_CERES_H_
 
-#define CERES_VERSION 1.7.0
-#define CERES_ABI_VERSION 1.7.0
-
 #include "ceres/autodiff_cost_function.h"
 #include "ceres/autodiff_local_parameterization.h"
 #include "ceres/cost_function.h"
 #include "ceres/cost_function_to_functor.h"
 #include "ceres/covariance.h"
 #include "ceres/crs_matrix.h"
+#include "ceres/dynamic_autodiff_cost_function.h"
+#include "ceres/dynamic_numeric_diff_cost_function.h"
 #include "ceres/iteration_callback.h"
 #include "ceres/jet.h"
 #include "ceres/local_parameterization.h"
@@ -54,5 +53,6 @@
 #include "ceres/sized_cost_function.h"
 #include "ceres/solver.h"
 #include "ceres/types.h"
+#include "ceres/version.h"
 
 #endif  // CERES_PUBLIC_CERES_H_
diff --git a/include/ceres/conditioned_cost_function.h b/include/ceres/conditioned_cost_function.h
index 498d36e..3f0087c 100644
--- a/include/ceres/conditioned_cost_function.h
+++ b/include/ceres/conditioned_cost_function.h
@@ -39,6 +39,7 @@
 #include "ceres/cost_function.h"
 #include "ceres/internal/scoped_ptr.h"
 #include "ceres/types.h"
+#include "ceres/internal/disable_warnings.h"
 
 namespace ceres {
 
@@ -70,7 +71,7 @@
 //   ccf_residual[i] = f_i(my_cost_function_residual[i])
 //
 // and the Jacobian will be affected appropriately.
-class ConditionedCostFunction : public CostFunction {
+class CERES_EXPORT ConditionedCostFunction : public CostFunction {
  public:
   // Builds a cost function based on a wrapped cost function, and a
   // per-residual conditioner. Takes ownership of all of the wrapped cost
@@ -93,5 +94,6 @@
 
 }  // namespace ceres
 
+#include "ceres/internal/reenable_warnings.h"
 
 #endif  // CERES_PUBLIC_CONDITIONED_COST_FUNCTION_H_
diff --git a/include/ceres/cost_function.h b/include/ceres/cost_function.h
index 8013e96..45292ec 100644
--- a/include/ceres/cost_function.h
+++ b/include/ceres/cost_function.h
@@ -48,6 +48,7 @@
 #include "ceres/internal/macros.h"
 #include "ceres/internal/port.h"
 #include "ceres/types.h"
+#include "ceres/internal/disable_warnings.h"
 
 namespace ceres {
 
@@ -60,7 +61,7 @@
 // code inheriting from this class is expected to set these two members with the
 // corresponding accessors. This information will be verified by the Problem
 // when added with AddResidualBlock().
-class CostFunction {
+class CERES_EXPORT CostFunction {
  public:
   CostFunction() : num_residuals_(0) {}
 
@@ -115,7 +116,7 @@
                         double* residuals,
                         double** jacobians) const = 0;
 
-  const vector<int16>& parameter_block_sizes() const {
+  const vector<int32>& parameter_block_sizes() const {
     return parameter_block_sizes_;
   }
 
@@ -124,7 +125,7 @@
   }
 
  protected:
-  vector<int16>* mutable_parameter_block_sizes() {
+  vector<int32>* mutable_parameter_block_sizes() {
     return &parameter_block_sizes_;
   }
 
@@ -135,11 +136,13 @@
  private:
   // Cost function signature metadata: number of inputs & their sizes,
   // number of outputs (residuals).
-  vector<int16> parameter_block_sizes_;
+  vector<int32> parameter_block_sizes_;
   int num_residuals_;
   CERES_DISALLOW_COPY_AND_ASSIGN(CostFunction);
 };
 
 }  // namespace ceres
 
+#include "ceres/internal/reenable_warnings.h"
+
 #endif  // CERES_PUBLIC_COST_FUNCTION_H_
diff --git a/include/ceres/cost_function_to_functor.h b/include/ceres/cost_function_to_functor.h
index fa1012d..0d01f77 100644
--- a/include/ceres/cost_function_to_functor.h
+++ b/include/ceres/cost_function_to_functor.h
@@ -127,7 +127,7 @@
         << N3 << ", " << N4 << ", " << N5 << ", " << N6 << ", " << N7 << ", "
         << N8 << ", " << N9;
 
-    const vector<int16>& parameter_block_sizes =
+    const vector<int32>& parameter_block_sizes =
         cost_function->parameter_block_sizes();
     const int num_parameter_blocks =
         (N0 > 0) + (N1 > 0) + (N2 > 0) + (N3 > 0) + (N4 > 0) +
@@ -679,7 +679,7 @@
   template <typename JetT>
   bool EvaluateWithJets(const JetT** inputs, JetT* output) const {
     const int kNumParameters =  N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8 + N9;
-    const vector<int16>& parameter_block_sizes =
+    const vector<int32>& parameter_block_sizes =
         cost_function_->parameter_block_sizes();
     const int num_parameter_blocks = parameter_block_sizes.size();
     const int num_residuals = cost_function_->num_residuals();
@@ -732,7 +732,7 @@
       output[i].v.setZero();
 
       for (int j = 0; j < num_parameter_blocks; ++j) {
-        const int16 block_size = parameter_block_sizes[j];
+        const int32 block_size = parameter_block_sizes[j];
         for (int k = 0; k < parameter_block_sizes[j]; ++k) {
           output[i].v +=
               jacobian_blocks[j][i * block_size + k] * inputs[j][k].v;
diff --git a/include/ceres/covariance.h b/include/ceres/covariance.h
index 83126b5..35fde4d 100644
--- a/include/ceres/covariance.h
+++ b/include/ceres/covariance.h
@@ -36,6 +36,7 @@
 #include "ceres/internal/port.h"
 #include "ceres/internal/scoped_ptr.h"
 #include "ceres/types.h"
+#include "ceres/internal/disable_warnings.h"
 
 namespace ceres {
 
@@ -196,14 +197,14 @@
 //  covariance.GetCovarianceBlock(y, y, covariance_yy)
 //  covariance.GetCovarianceBlock(x, y, covariance_xy)
 //
-class Covariance {
+class CERES_EXPORT Covariance {
  public:
-  struct Options {
+  struct CERES_EXPORT Options {
     Options()
 #ifndef CERES_NO_SUITESPARSE
-        : algorithm_type(SPARSE_QR),
+        : algorithm_type(SUITE_SPARSE_QR),
 #else
-        : algorithm_type(DENSE_SVD),
+        : algorithm_type(EIGEN_SPARSE_QR),
 #endif
           min_reciprocal_condition_number(1e-14),
           null_space_rank(0),
@@ -228,47 +229,22 @@
     //    for small to moderate sized problems. It can handle
     //    full-rank as well as rank deficient Jacobians.
     //
-    // 2. SPARSE_CHOLESKY uses the CHOLMOD sparse Cholesky
-    //    factorization library to compute the decomposition :
-    //
-    //      R'R = J'J
-    //
-    //    and then
-    //
-    //      [J'J]^-1  = [R'R]^-1
-    //
-    //    It a fast algorithm for sparse matrices that should be used
-    //    when the Jacobian matrix J is well conditioned. For
-    //    ill-conditioned matrices, this algorithm can fail
-    //    unpredictabily. This is because Cholesky factorization is
-    //    not a rank-revealing factorization, i.e., it cannot reliably
-    //    detect when the matrix being factorized is not of full
-    //    rank. SuiteSparse/CHOLMOD supplies a heuristic for checking
-    //    if the matrix is rank deficient (cholmod_rcond), but it is
-    //    only a heuristic and can have both false positive and false
-    //    negatives.
-    //
-    //    Recent versions of SuiteSparse (>= 4.2.0) provide a much
-    //    more efficient method for solving for rows of the covariance
-    //    matrix. Therefore, if you are doing SPARSE_CHOLESKY, we
-    //    strongly recommend using a recent version of SuiteSparse.
-    //
-    // 3. SPARSE_QR uses the SuiteSparseQR sparse QR factorization
-    //    library to compute the decomposition
+    // 2. EIGEN_SPARSE_QR uses the sparse QR factorization algorithm
+    //    in Eigen to compute the decomposition
     //
     //      Q * R = J
     //
     //    [J'J]^-1 = [R*R']^-1
     //
-    //    It is a moderately fast algorithm for sparse matrices, which
-    //    at the price of more time and memory than the
-    //    SPARSE_CHOLESKY algorithm is numerically better behaved and
-    //    is rank revealing, i.e., it can reliably detect when the
-    //    Jacobian matrix is rank deficient.
+    //    It is a moderately fast algorithm for sparse matrices.
     //
-    // Neither SPARSE_CHOLESKY or SPARSE_QR are capable of computing
-    // the covariance if the Jacobian is rank deficient.
-
+    // 3. SUITE_SPARSE_QR uses the SuiteSparseQR sparse QR
+    //    factorization algorithm. It uses dense linear algebra and is
+    //    multi threaded, so for large sparse sparse matrices it is
+    //    significantly faster than EIGEN_SPARSE_QR.
+    //
+    // Neither EIGEN_SPARSE_QR not SUITE_SPARSE_QR are capable of
+    // computing the covariance if the Jacobian is rank deficient.
     CovarianceAlgorithmType algorithm_type;
 
     // If the Jacobian matrix is near singular, then inverting J'J
@@ -294,29 +270,13 @@
     //    where min_sigma and max_sigma are the minimum and maxiumum
     //    singular values of J respectively.
     //
-    // 2. SPARSE_CHOLESKY
-    //
-    //      cholmod_rcond < min_reciprocal_conditioner_number
-    //
-    //    Here cholmod_rcond is a crude estimate of the reciprocal
-    //    condition number of J'J by using the maximum and minimum
-    //    diagonal entries of the Cholesky factor R. There are no
-    //    theoretical guarantees associated with this test. It can
-    //    give false positives and negatives. Use at your own
-    //    risk. The default value of min_reciprocal_condition_number
-    //    has been set to a conservative value, and sometimes the
-    //    Covariance::Compute may return false even if it is possible
-    //    to estimate the covariance reliably. In such cases, the user
-    //    should exercise their judgement before lowering the value of
-    //    min_reciprocal_condition_number.
-    //
-    // 3. SPARSE_QR
+    // 2. SUITE_SPARSE_QR and EIGEN_SPARSE_QR
     //
     //      rank(J) < num_col(J)
     //
     //   Here rank(J) is the estimate of the rank of J returned by the
-    //   SuiteSparseQR algorithm. It is a fairly reliable indication
-    //   of rank deficiency.
+    //   sparse QR factorization algorithm. It is a fairly reliable
+    //   indication of rank deficiency.
     //
     double min_reciprocal_condition_number;
 
@@ -351,8 +311,8 @@
     //
     //   lambda_i / lambda_max < min_reciprocal_condition_number.
     //
-    // This option has no effect on the SPARSE_CHOLESKY or SPARSE_QR
-    // algorithms.
+    // This option has no effect on the SUITE_SPARSE_QR and
+    // EIGEN_SPARSE_QR algorithms.
     int null_space_rank;
 
     int num_threads;
@@ -419,4 +379,6 @@
 
 }  // namespace ceres
 
+#include "ceres/internal/reenable_warnings.h"
+
 #endif  // CERES_PUBLIC_COVARIANCE_H_
diff --git a/include/ceres/crs_matrix.h b/include/ceres/crs_matrix.h
index 8c470cd..d2d6289 100644
--- a/include/ceres/crs_matrix.h
+++ b/include/ceres/crs_matrix.h
@@ -33,12 +33,13 @@
 
 #include <vector>
 #include "ceres/internal/port.h"
+#include "ceres/internal/disable_warnings.h"
 
 namespace ceres {
 
 // A compressed row sparse matrix used primarily for communicating the
 // Jacobian matrix to the user.
-struct CRSMatrix {
+struct CERES_EXPORT CRSMatrix {
   CRSMatrix() : num_rows(0), num_cols(0) {}
 
   int num_rows;
@@ -80,4 +81,6 @@
 
 }  // namespace ceres
 
+#include "ceres/internal/reenable_warnings.h"
+
 #endif  // CERES_PUBLIC_CRS_MATRIX_H_
diff --git a/include/ceres/dynamic_autodiff_cost_function.h b/include/ceres/dynamic_autodiff_cost_function.h
index 5d8f188..f9342cd 100644
--- a/include/ceres/dynamic_autodiff_cost_function.h
+++ b/include/ceres/dynamic_autodiff_cost_function.h
@@ -1,5 +1,5 @@
 // Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2012 Google Inc. All rights reserved.
+// Copyright 2013 Google Inc. All rights reserved.
 // http://code.google.com/p/ceres-solver/
 //
 // Redistribution and use in source and binary forms, with or without
@@ -26,18 +26,17 @@
 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 // POSSIBILITY OF SUCH DAMAGE.
 //
-// Author: mierle@gmail.com (Keir Mierle)
-//         sameeragarwal@google.com (Sameer Agarwal)
-//         thadh@gmail.com (Thad Hughes)
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//         mierle@gmail.com (Keir Mierle)
 //
 // This autodiff implementation differs from the one found in
-// autodiff_cost_function.h by supporting autodiff on cost functions with
-// variable numbers of parameters with variable sizes. With the other
-// implementation, all the sizes (both the number of parameter blocks and the
-// size of each block) must be fixed at compile time.
+// autodiff_cost_function.h by supporting autodiff on cost functions
+// with variable numbers of parameters with variable sizes. With the
+// other implementation, all the sizes (both the number of parameter
+// blocks and the size of each block) must be fixed at compile time.
 //
-// The functor API differs slightly from the API for fixed size autodiff; the
-// expected interface for the cost functors is:
+// The functor API differs slightly from the API for fixed size
+// autodiff; the expected interface for the cost functors is:
 //
 //   struct MyCostFunctor {
 //     template<typename T>
@@ -46,8 +45,9 @@
 //     }
 //   }
 //
-// Since the sizing of the parameters is done at runtime, you must also specify
-// the sizes after creating the dynamic autodiff cost function. For example:
+// Since the sizing of the parameters is done at runtime, you must
+// also specify the sizes after creating the dynamic autodiff cost
+// function. For example:
 //
 //   DynamicAutoDiffCostFunction<MyCostFunctor, 3> cost_function(
 //       new MyCostFunctor());
@@ -55,10 +55,11 @@
 //   cost_function.AddParameterBlock(10);
 //   cost_function.SetNumResiduals(21);
 //
-// Under the hood, the implementation evaluates the cost function multiple
-// times, computing a small set of the derivatives (four by default, controlled
-// by the Stride template parameter) with each pass. There is a tradeoff with
-// the size of the passes; you may want to experiment with the stride.
+// Under the hood, the implementation evaluates the cost function
+// multiple times, computing a small set of the derivatives (four by
+// default, controlled by the Stride template parameter) with each
+// pass. There is a tradeoff with the size of the passes; you may want
+// to experiment with the stride.
 
 #ifndef CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_
 #define CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_
diff --git a/include/ceres/dynamic_numeric_diff_cost_function.h b/include/ceres/dynamic_numeric_diff_cost_function.h
new file mode 100644
index 0000000..2b6e826
--- /dev/null
+++ b/include/ceres/dynamic_numeric_diff_cost_function.h
@@ -0,0 +1,265 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+//         sameeragarwal@google.com (Sameer Agarwal)
+//         thadh@gmail.com (Thad Hughes)
+//
+// This numeric diff implementation differs from the one found in
+// numeric_diff_cost_function.h by supporting numericdiff on cost
+// functions with variable numbers of parameters with variable
+// sizes. With the other implementation, all the sizes (both the
+// number of parameter blocks and the size of each block) must be
+// fixed at compile time.
+//
+// The functor API differs slightly from the API for fixed size
+// numeric diff; the expected interface for the cost functors is:
+//
+//   struct MyCostFunctor {
+//     template<typename T>
+//     bool operator()(double const* const* parameters, double* residuals) const {
+//       // Use parameters[i] to access the i'th parameter block.
+//     }
+//   }
+//
+// Since the sizing of the parameters is done at runtime, you must
+// also specify the sizes after creating the
+// DynamicNumericDiffCostFunction. For example:
+//
+//   DynamicAutoDiffCostFunction<MyCostFunctor, CENTRAL> cost_function(
+//       new MyCostFunctor());
+//   cost_function.AddParameterBlock(5);
+//   cost_function.AddParameterBlock(10);
+//   cost_function.SetNumResiduals(21);
+
+#ifndef CERES_PUBLIC_DYNAMIC_NUMERIC_DIFF_COST_FUNCTION_H_
+#define CERES_PUBLIC_DYNAMIC_NUMERIC_DIFF_COST_FUNCTION_H_
+
+#include <cmath>
+#include <numeric>
+#include <vector>
+
+#include "ceres/cost_function.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/numeric_diff.h"
+#include "glog/logging.h"
+
+namespace ceres {
+
+template <typename CostFunctor, NumericDiffMethod method = CENTRAL>
+class DynamicNumericDiffCostFunction : public CostFunction {
+ public:
+  explicit DynamicNumericDiffCostFunction(const CostFunctor* functor,
+                                          Ownership ownership = TAKE_OWNERSHIP,
+                                          double relative_step_size = 1e-6)
+      : functor_(functor),
+        ownership_(ownership),
+        relative_step_size_(relative_step_size) {
+  }
+
+  virtual ~DynamicNumericDiffCostFunction() {
+    if (ownership_ != TAKE_OWNERSHIP) {
+      functor_.release();
+    }
+  }
+
+  void AddParameterBlock(int size) {
+    mutable_parameter_block_sizes()->push_back(size);
+  }
+
+  void SetNumResiduals(int num_residuals) {
+    set_num_residuals(num_residuals);
+  }
+
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    CHECK_GT(num_residuals(), 0)
+        << "You must call DynamicNumericDiffCostFunction::SetNumResiduals() "
+        << "before DynamicNumericDiffCostFunction::Evaluate().";
+
+    const vector<int32>& block_sizes = parameter_block_sizes();
+    CHECK(!block_sizes.empty())
+        << "You must call DynamicNumericDiffCostFunction::AddParameterBlock() "
+        << "before DynamicNumericDiffCostFunction::Evaluate().";
+
+    const bool status = EvaluateCostFunctor(parameters, residuals);
+    if (jacobians == NULL || !status) {
+      return status;
+    }
+
+    // Create local space for a copy of the parameters which will get mutated.
+    int parameters_size = accumulate(block_sizes.begin(), block_sizes.end(), 0);
+    vector<double> parameters_copy(parameters_size);
+    vector<double*> parameters_references_copy(block_sizes.size());
+    parameters_references_copy[0] = &parameters_copy[0];
+    for (int block = 1; block < block_sizes.size(); ++block) {
+      parameters_references_copy[block] = parameters_references_copy[block - 1]
+          + block_sizes[block - 1];
+    }
+
+    // Copy the parameters into the local temp space.
+    for (int block = 0; block < block_sizes.size(); ++block) {
+      memcpy(parameters_references_copy[block],
+             parameters[block],
+             block_sizes[block] * sizeof(*parameters[block]));
+    }
+
+    for (int block = 0; block < block_sizes.size(); ++block) {
+      if (jacobians[block] != NULL &&
+          !EvaluateJacobianForParameterBlock(block_sizes[block],
+                                             block,
+                                             relative_step_size_,
+                                             residuals,
+                                             &parameters_references_copy[0],
+                                             jacobians)) {
+        return false;
+      }
+    }
+    return true;
+  }
+
+ private:
+  bool EvaluateJacobianForParameterBlock(const int parameter_block_size,
+                                         const int parameter_block,
+                                         const double relative_step_size,
+                                         double const* residuals_at_eval_point,
+                                         double** parameters,
+                                         double** jacobians) const {
+    using Eigen::Map;
+    using Eigen::Matrix;
+    using Eigen::Dynamic;
+    using Eigen::RowMajor;
+
+    typedef Matrix<double, Dynamic, 1> ResidualVector;
+    typedef Matrix<double, Dynamic, 1> ParameterVector;
+    typedef Matrix<double, Dynamic, Dynamic, RowMajor> JacobianMatrix;
+
+    int num_residuals = this->num_residuals();
+
+    Map<JacobianMatrix> parameter_jacobian(jacobians[parameter_block],
+                                           num_residuals,
+                                           parameter_block_size);
+
+    // Mutate one element at a time and then restore.
+    Map<ParameterVector> x_plus_delta(parameters[parameter_block],
+                                      parameter_block_size);
+    ParameterVector x(x_plus_delta);
+    ParameterVector step_size = x.array().abs() * relative_step_size;
+
+    // To handle cases where a paremeter is exactly zero, instead use
+    // the mean step_size for the other dimensions.
+    double fallback_step_size = step_size.sum() / step_size.rows();
+    if (fallback_step_size == 0.0) {
+      // If all the parameters are zero, there's no good answer. Use the given
+      // relative step_size as absolute step_size and hope for the best.
+      fallback_step_size = relative_step_size;
+    }
+
+    // For each parameter in the parameter block, use finite
+    // differences to compute the derivative for that parameter.
+    for (int j = 0; j < parameter_block_size; ++j) {
+      if (step_size(j) == 0.0) {
+        // The parameter is exactly zero, so compromise and use the
+        // mean step_size from the other parameters. This can break in
+        // many cases, but it's hard to pick a good number without
+        // problem specific knowledge.
+        step_size(j) = fallback_step_size;
+      }
+      x_plus_delta(j) = x(j) + step_size(j);
+
+      ResidualVector residuals(num_residuals);
+      if (!EvaluateCostFunctor(parameters, &residuals[0])) {
+        // Something went wrong; bail.
+        return false;
+      }
+
+      // Compute this column of the jacobian in 3 steps:
+      // 1. Store residuals for the forward part.
+      // 2. Subtract residuals for the backward (or 0) part.
+      // 3. Divide out the run.
+      parameter_jacobian.col(j).matrix() = residuals;
+
+      double one_over_h = 1 / step_size(j);
+      if (method == CENTRAL) {
+        // Compute the function on the other side of x(j).
+        x_plus_delta(j) = x(j) - step_size(j);
+
+        if (!EvaluateCostFunctor(parameters, &residuals[0])) {
+          // Something went wrong; bail.
+          return false;
+        }
+
+        parameter_jacobian.col(j) -= residuals;
+        one_over_h /= 2;
+      } else {
+        // Forward difference only; reuse existing residuals evaluation.
+        parameter_jacobian.col(j) -=
+            Map<const ResidualVector>(residuals_at_eval_point, num_residuals);
+      }
+      x_plus_delta(j) = x(j);  // Restore x_plus_delta.
+
+      // Divide out the run to get slope.
+      parameter_jacobian.col(j) *= one_over_h;
+    }
+    return true;
+  }
+
+  bool EvaluateCostFunctor(double const* const* parameters,
+                           double* residuals) const {
+    return EvaluateCostFunctorImpl(functor_.get(),
+                                   parameters,
+                                   residuals,
+                                   functor_.get());
+  }
+
+  // Helper templates to allow evaluation of a functor or a
+  // CostFunction.
+  bool EvaluateCostFunctorImpl(const CostFunctor* functor,
+                               double const* const* parameters,
+                               double* residuals,
+                               const void* /* NOT USED */) const {
+    return (*functor)(parameters, residuals);
+  }
+
+  bool EvaluateCostFunctorImpl(const CostFunctor* functor,
+                               double const* const* parameters,
+                               double* residuals,
+                               const CostFunction* /* NOT USED */) const {
+    return functor->Evaluate(parameters, residuals, NULL);
+  }
+
+  internal::scoped_ptr<const CostFunctor> functor_;
+  Ownership ownership_;
+  const double relative_step_size_;
+};
+
+}  // namespace ceres
+
+#endif  // CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_
diff --git a/include/ceres/fpclassify.h b/include/ceres/fpclassify.h
index b730832..da8a4d0 100644
--- a/include/ceres/fpclassify.h
+++ b/include/ceres/fpclassify.h
@@ -46,25 +46,24 @@
 namespace ceres {
 
 #if defined(_MSC_VER)
-inline bool IsFinite  (double x) { return _finite(x);                }
-inline bool IsInfinite(double x) { return !_finite(x) && !_isnan(x); }
-inline bool IsNaN     (double x) { return _isnan(x);                 }
+
+inline bool IsFinite  (double x) { return _finite(x) != 0;                   }
+inline bool IsInfinite(double x) { return _finite(x) == 0 && _isnan(x) == 0; }
+inline bool IsNaN     (double x) { return _isnan(x) != 0;                    }
 inline bool IsNormal  (double x) {
   int classification = _fpclass(x);
   return classification == _FPCLASS_NN ||
          classification == _FPCLASS_PN;
 }
-#elif defined(ANDROID)
 
-// On Android when using the GNU STL, the C++ fpclassify functions are not
-// available. Strictly speaking, the std functions are are not standard until
-// C++11. Instead use the C99 macros on Android.
+#elif defined(ANDROID) && defined(_STLPORT_VERSION)
+
+// On Android, when using the STLPort, the C++ isnan and isnormal functions
+// are defined as macros.
 inline bool IsNaN     (double x) { return isnan(x);    }
 inline bool IsNormal  (double x) { return isnormal(x); }
-
 // On Android NDK r6, when using STLPort, the isinf and isfinite functions are
 // not available, so reimplement them.
-#  if defined(_STLPORT_VERSION)
 inline bool IsInfinite(double x) {
   return x ==  std::numeric_limits<double>::infinity() ||
          x == -std::numeric_limits<double>::infinity();
@@ -72,17 +71,15 @@
 inline bool IsFinite(double x) {
   return !isnan(x) && !IsInfinite(x);
 }
-#  else
-inline bool IsFinite  (double x) { return isfinite(x); }
-inline bool IsInfinite(double x) { return isinf(x);    }
-#  endif  // defined(_STLPORT_VERSION)
-#else
+
+# else
+
 // These definitions are for the normal Unix suspects.
-// TODO(keir): Test the "else" with more platforms.
 inline bool IsFinite  (double x) { return std::isfinite(x); }
 inline bool IsInfinite(double x) { return std::isinf(x);    }
 inline bool IsNaN     (double x) { return std::isnan(x);    }
 inline bool IsNormal  (double x) { return std::isnormal(x); }
+
 #endif
 
 }  // namespace ceres
diff --git a/include/ceres/gradient_checker.h b/include/ceres/gradient_checker.h
index 3ec8056..79ebae5 100644
--- a/include/ceres/gradient_checker.h
+++ b/include/ceres/gradient_checker.h
@@ -119,7 +119,7 @@
     // Do a consistency check between the term and the template parameters.
     CHECK_EQ(M, term->num_residuals());
     const int num_residuals = M;
-    const vector<int16>& block_sizes = term->parameter_block_sizes();
+    const vector<int32>& block_sizes = term->parameter_block_sizes();
     const int num_blocks = block_sizes.size();
 
     CHECK_LE(num_blocks, 5) << "Unable to test functions that take more "
diff --git a/include/ceres/internal/autodiff.h b/include/ceres/internal/autodiff.h
index cf21d7a..3a96625 100644
--- a/include/ceres/internal/autodiff.h
+++ b/include/ceres/internal/autodiff.h
@@ -172,7 +172,7 @@
   for (int j = 0; j < N; ++j) {
     dst[j].a = src[j];
     dst[j].v.setZero();
-    dst[j].v[offset + j] = 1.0;
+    dst[j].v[offset + j] = T(1.0);
   }
 }
 
diff --git a/include/ceres/internal/disable_warnings.h b/include/ceres/internal/disable_warnings.h
new file mode 100644
index 0000000..78924de
--- /dev/null
+++ b/include/ceres/internal/disable_warnings.h
@@ -0,0 +1,44 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// This file has the sole purpose to silence warnings when including Ceres.
+
+// This is not your usual header guard. The macro CERES_WARNINGS_DISABLED
+// shows up again in reenable_warnings.h.
+#ifndef CERES_WARNINGS_DISABLED
+#define CERES_WARNINGS_DISABLED
+
+#ifdef _MSC_VER
+#pragma warning( push )
+// Disable the warning C4251 which is trigerred by stl classes in
+// Ceres' public interface. To quote MSDN: "C4251 can be ignored "
+// "if you are deriving from a type in the Standard C++ Library"
+#pragma warning( disable : 4251 )
+#endif
+
+#endif  // CERES_WARNINGS_DISABLED
diff --git a/include/ceres/internal/fixed_array.h b/include/ceres/internal/fixed_array.h
index ee264d1..694070b 100644
--- a/include/ceres/internal/fixed_array.h
+++ b/include/ceres/internal/fixed_array.h
@@ -113,7 +113,6 @@
   // REQUIRES: 0 <= i < size()
   // Returns a reference to the "i"th element.
   inline T& operator[](size_type i) {
-    DCHECK_GE(i, 0);
     DCHECK_LT(i, size_);
     return array_[i].element;
   }
@@ -121,7 +120,6 @@
   // REQUIRES: 0 <= i < size()
   // Returns a reference to the "i"th element.
   inline const T& operator[](size_type i) const {
-    DCHECK_GE(i, 0);
     DCHECK_LT(i, size_);
     return array_[i].element;
   }
@@ -168,8 +166,6 @@
       array_((n <= kInlineElements
               ? reinterpret_cast<InnerContainer*>(inline_space_)
               : new InnerContainer[n])) {
-  DCHECK_GE(n, size_t(0));
-
   // Construct only the elements actually used.
   if (array_ == reinterpret_cast<InnerContainer*>(inline_space_)) {
     for (size_t i = 0; i != size_; ++i) {
diff --git a/include/ceres/internal/macros.h b/include/ceres/internal/macros.h
index 388cf30..1ed55be 100644
--- a/include/ceres/internal/macros.h
+++ b/include/ceres/internal/macros.h
@@ -145,12 +145,11 @@
 //
 //   Sprocket* AllocateSprocket() MUST_USE_RESULT;
 //
-#undef MUST_USE_RESULT
 #if (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) \
   && !defined(COMPILER_ICC)
-#define MUST_USE_RESULT __attribute__ ((warn_unused_result))
+#define CERES_MUST_USE_RESULT __attribute__ ((warn_unused_result))
 #else
-#define MUST_USE_RESULT
+#define CERES_MUST_USE_RESULT
 #endif
 
 // Platform independent macros to get aligned memory allocations.
diff --git a/include/ceres/internal/numeric_diff.h b/include/ceres/internal/numeric_diff.h
index 4058366..5048348 100644
--- a/include/ceres/internal/numeric_diff.h
+++ b/include/ceres/internal/numeric_diff.h
@@ -90,6 +90,7 @@
       const CostFunctor* functor,
       double const* residuals_at_eval_point,
       const double relative_step_size,
+      int num_residuals,
       double **parameters,
       double *jacobian) {
     using Eigen::Map;
@@ -97,15 +98,21 @@
     using Eigen::RowMajor;
     using Eigen::ColMajor;
 
+    const int NUM_RESIDUALS =
+        (kNumResiduals != ceres::DYNAMIC ? kNumResiduals : num_residuals);
+
     typedef Matrix<double, kNumResiduals, 1> ResidualVector;
     typedef Matrix<double, kParameterBlockSize, 1> ParameterVector;
-    typedef Matrix<double, kNumResiduals, kParameterBlockSize,
+    typedef Matrix<double,
+                   kNumResiduals,
+                   kParameterBlockSize,
                    (kParameterBlockSize == 1 &&
-                    kNumResiduals > 1) ? ColMajor : RowMajor> JacobianMatrix;
+                    kNumResiduals > 1) ? ColMajor : RowMajor>
+        JacobianMatrix;
 
 
     Map<JacobianMatrix> parameter_jacobian(jacobian,
-                                           kNumResiduals,
+                                           NUM_RESIDUALS,
                                            kParameterBlockSize);
 
     // Mutate 1 element at a time and then restore.
@@ -125,16 +132,16 @@
 
     // For each parameter in the parameter block, use finite differences to
     // compute the derivative for that parameter.
+
+    ResidualVector residuals(NUM_RESIDUALS);
     for (int j = 0; j < kParameterBlockSize; ++j) {
       const double delta =
           (step_size(j) == 0.0) ? fallback_step_size : step_size(j);
 
       x_plus_delta(j) = x(j) + delta;
 
-      double residuals[kNumResiduals];  // NOLINT
-
       if (!EvaluateImpl<CostFunctor, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>(
-              functor, parameters, residuals, functor)) {
+              functor, parameters, residuals.data(), functor)) {
         return false;
       }
 
@@ -142,8 +149,7 @@
       // 1. Store residuals for the forward part.
       // 2. Subtract residuals for the backward (or 0) part.
       // 3. Divide out the run.
-      parameter_jacobian.col(j) =
-          Map<const ResidualVector>(residuals, kNumResiduals);
+      parameter_jacobian.col(j) = residuals;
 
       double one_over_delta = 1.0 / delta;
       if (kMethod == CENTRAL) {
@@ -151,17 +157,16 @@
         x_plus_delta(j) = x(j) - delta;
 
         if (!EvaluateImpl<CostFunctor, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>(
-                functor, parameters, residuals, functor)) {
+                functor, parameters, residuals.data(), functor)) {
           return false;
         }
 
-        parameter_jacobian.col(j) -=
-            Map<ResidualVector>(residuals, kNumResiduals, 1);
+        parameter_jacobian.col(j) -= residuals;
         one_over_delta /= 2;
       } else {
         // Forward difference only; reuse existing residuals evaluation.
         parameter_jacobian.col(j) -=
-            Map<const ResidualVector>(residuals_at_eval_point, kNumResiduals);
+            Map<const ResidualVector>(residuals_at_eval_point, NUM_RESIDUALS);
       }
       x_plus_delta(j) = x(j);  // Restore x_plus_delta.
 
@@ -186,6 +191,7 @@
       const CostFunctor* functor,
       double const* residuals_at_eval_point,
       const double relative_step_size,
+      const int num_residuals,
       double **parameters,
       double *jacobian) {
     LOG(FATAL) << "Control should never reach here.";
diff --git a/include/ceres/internal/port.h b/include/ceres/internal/port.h
index a9fe247..e38eb71 100644
--- a/include/ceres/internal/port.h
+++ b/include/ceres/internal/port.h
@@ -31,8 +31,19 @@
 #ifndef CERES_PUBLIC_INTERNAL_PORT_H_
 #define CERES_PUBLIC_INTERNAL_PORT_H_
 
+// This file needs to compile as c code.
+#ifdef __cplusplus
+
 #include <string>
 
+#include "ceres/internal/config.h"
+
+#if defined(CERES_TR1_MEMORY_HEADER)
+#include <tr1/memory>
+#else
+#include <memory>
+#endif
+
 namespace ceres {
 
 // It is unfortunate that this import of the entire standard namespace is
@@ -45,6 +56,33 @@
 // "string" implementation in the global namespace.
 using std::string;
 
+#if defined(CERES_TR1_SHARED_PTR)
+using std::tr1::shared_ptr;
+#else
+using std::shared_ptr;
+#endif
+
 }  // namespace ceres
 
+#endif  // __cplusplus
+
+// A macro to signal which functions and classes are exported when
+// building a DLL with MSVC.
+//
+// Note that the ordering here is important, CERES_BUILDING_SHARED_LIBRARY
+// is only defined locally when Ceres is compiled, it is never exported to
+// users.  However, in order that we do not have to configure config.h
+// separately for building vs installing, if we are using MSVC and building
+// a shared library, then both CERES_BUILDING_SHARED_LIBRARY and
+// CERES_USING_SHARED_LIBRARY will be defined when Ceres is compiled.
+// Hence it is important that the check for CERES_BUILDING_SHARED_LIBRARY
+// happens first.
+#if defined(_MSC_VER) && defined(CERES_BUILDING_SHARED_LIBRARY)
+# define CERES_EXPORT __declspec(dllexport)
+#elif defined(_MSC_VER) && defined(CERES_USING_SHARED_LIBRARY)
+# define CERES_EXPORT __declspec(dllimport)
+#else
+# define CERES_EXPORT
+#endif
+
 #endif  // CERES_PUBLIC_INTERNAL_PORT_H_
diff --git a/include/ceres/internal/reenable_warnings.h b/include/ceres/internal/reenable_warnings.h
new file mode 100644
index 0000000..1f477d8
--- /dev/null
+++ b/include/ceres/internal/reenable_warnings.h
@@ -0,0 +1,38 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+// This is not your usual header guard. See disable_warnings.h
+#ifdef CERES_WARNINGS_DISABLED
+#undef CERES_WARNINGS_DISABLED
+
+#ifdef _MSC_VER
+#pragma warning( pop )
+#endif
+
+#endif  // CERES_WARNINGS_DISABLED
diff --git a/include/ceres/iteration_callback.h b/include/ceres/iteration_callback.h
index 987c2d9..237ada6 100644
--- a/include/ceres/iteration_callback.h
+++ b/include/ceres/iteration_callback.h
@@ -36,12 +36,13 @@
 #define CERES_PUBLIC_ITERATION_CALLBACK_H_
 
 #include "ceres/types.h"
+#include "ceres/internal/disable_warnings.h"
 
 namespace ceres {
 
 // This struct describes the state of the optimizer after each
 // iteration of the minimization.
-struct IterationSummary {
+struct CERES_EXPORT IterationSummary {
   IterationSummary()
       : iteration(0),
         step_is_valid(false),
@@ -50,6 +51,7 @@
         cost(0.0),
         cost_change(0.0),
         gradient_max_norm(0.0),
+        gradient_norm(0.0),
         step_norm(0.0),
         eta(0.0),
         step_size(0.0),
@@ -100,6 +102,9 @@
   // Infinity norm of the gradient vector.
   double gradient_max_norm;
 
+  // 2-norm of the gradient vector.
+  double gradient_norm;
+
   // 2-norm of the size of the step computed by the optimization
   // algorithm.
   double step_norm;
@@ -207,7 +212,7 @@
 //     const bool log_to_stdout_;
 //   };
 //
-class IterationCallback {
+class CERES_EXPORT IterationCallback {
  public:
   virtual ~IterationCallback() {}
   virtual CallbackReturnType operator()(const IterationSummary& summary) = 0;
@@ -215,4 +220,6 @@
 
 }  // namespace ceres
 
+#include "ceres/internal/reenable_warnings.h"
+
 #endif  // CERES_PUBLIC_ITERATION_CALLBACK_H_
diff --git a/include/ceres/jet.h b/include/ceres/jet.h
index 4d2a857..81f96c7 100644
--- a/include/ceres/jet.h
+++ b/include/ceres/jet.h
@@ -106,8 +106,8 @@
 //   Jet<double, 2> y(1);  // Pick the 1st dual number for y.
 //   Jet<double, 2> z = f(x, y);
 //
-//   LG << "df/dx = " << z.a[0]
-//      << "df/dy = " << z.a[1];
+//   LOG(INFO) << "df/dx = " << z.a[0]
+//             << "df/dy = " << z.a[1];
 //
 // Most users should not use Jet objects directly; a wrapper around Jet objects,
 // which makes computing the derivative, gradient, or jacobian of templated
@@ -192,6 +192,17 @@
     v[k] = T(1.0);
   }
 
+  // Constructor from scalar and vector part
+  // The use of Eigen::DenseBase allows Eigen expressions
+  // to be passed in without being fully evaluated until
+  // they are assigned to v
+  template<typename Derived>
+  Jet(const T& value, const Eigen::DenseBase<Derived> &vIn)
+    : a(value),
+      v(vIn)
+  {
+  }
+
   // Compound operators
   Jet<T, N>& operator+=(const Jet<T, N> &y) {
     *this = *this + y;
@@ -246,101 +257,70 @@
 // Unary -
 template<typename T, int N> inline
 Jet<T, N> operator-(const Jet<T, N>&f) {
-  Jet<T, N> g;
-  g.a = -f.a;
-  g.v = -f.v;
-  return g;
+  return Jet<T, N>(-f.a, -f.v);
 }
 
 // Binary +
 template<typename T, int N> inline
 Jet<T, N> operator+(const Jet<T, N>& f,
                     const Jet<T, N>& g) {
-  Jet<T, N> h;
-  h.a = f.a + g.a;
-  h.v = f.v + g.v;
-  return h;
+  return Jet<T, N>(f.a + g.a, f.v + g.v);
 }
 
 // Binary + with a scalar: x + s
 template<typename T, int N> inline
 Jet<T, N> operator+(const Jet<T, N>& f, T s) {
-  Jet<T, N> h;
-  h.a = f.a + s;
-  h.v = f.v;
-  return h;
+  return Jet<T, N>(f.a + s, f.v);
 }
 
 // Binary + with a scalar: s + x
 template<typename T, int N> inline
 Jet<T, N> operator+(T s, const Jet<T, N>& f) {
-  Jet<T, N> h;
-  h.a = f.a + s;
-  h.v = f.v;
-  return h;
+  return Jet<T, N>(f.a + s, f.v);
 }
 
 // Binary -
 template<typename T, int N> inline
 Jet<T, N> operator-(const Jet<T, N>& f,
                     const Jet<T, N>& g) {
-  Jet<T, N> h;
-  h.a = f.a - g.a;
-  h.v = f.v - g.v;
-  return h;
+  return Jet<T, N>(f.a - g.a, f.v - g.v);
 }
 
 // Binary - with a scalar: x - s
 template<typename T, int N> inline
 Jet<T, N> operator-(const Jet<T, N>& f, T s) {
-  Jet<T, N> h;
-  h.a = f.a - s;
-  h.v = f.v;
-  return h;
+  return Jet<T, N>(f.a - s, f.v);
 }
 
 // Binary - with a scalar: s - x
 template<typename T, int N> inline
 Jet<T, N> operator-(T s, const Jet<T, N>& f) {
-  Jet<T, N> h;
-  h.a = s - f.a;
-  h.v = -f.v;
-  return h;
+  return Jet<T, N>(s - f.a, -f.v);
 }
 
 // Binary *
 template<typename T, int N> inline
 Jet<T, N> operator*(const Jet<T, N>& f,
                     const Jet<T, N>& g) {
-  Jet<T, N> h;
-  h.a = f.a * g.a;
-  h.v = f.a * g.v + f.v * g.a;
-  return h;
+  return Jet<T, N>(f.a * g.a, f.a * g.v + f.v * g.a);
 }
 
 // Binary * with a scalar: x * s
 template<typename T, int N> inline
 Jet<T, N> operator*(const Jet<T, N>& f, T s) {
-  Jet<T, N> h;
-  h.a = f.a * s;
-  h.v = f.v * s;
-  return h;
+  return Jet<T, N>(f.a * s, f.v * s);
 }
 
 // Binary * with a scalar: s * x
 template<typename T, int N> inline
 Jet<T, N> operator*(T s, const Jet<T, N>& f) {
-  Jet<T, N> h;
-  h.a = f.a * s;
-  h.v = f.v * s;
-  return h;
+  return Jet<T, N>(f.a * s, f.v * s);
 }
 
 // Binary /
 template<typename T, int N> inline
 Jet<T, N> operator/(const Jet<T, N>& f,
                     const Jet<T, N>& g) {
-  Jet<T, N> h;
   // This uses:
   //
   //   a + u   (a + u)(b - v)   (a + u)(b - v)
@@ -349,32 +329,22 @@
   //
   // which holds because v*v = 0.
   const T g_a_inverse = T(1.0) / g.a;
-  h.a = f.a * g_a_inverse;
   const T f_a_by_g_a = f.a * g_a_inverse;
-  for (int i = 0; i < N; ++i) {
-    h.v[i] = (f.v[i] - f_a_by_g_a * g.v[i]) * g_a_inverse;
-  }
-  return h;
+  return Jet<T, N>(f.a * g_a_inverse, (f.v - f_a_by_g_a * g.v) * g_a_inverse);
 }
 
 // Binary / with a scalar: s / x
 template<typename T, int N> inline
 Jet<T, N> operator/(T s, const Jet<T, N>& g) {
-  Jet<T, N> h;
-  h.a = s / g.a;
   const T minus_s_g_a_inverse2 = -s / (g.a * g.a);
-  h.v = g.v * minus_s_g_a_inverse2;
-  return h;
+  return Jet<T, N>(s / g.a, g.v * minus_s_g_a_inverse2);
 }
 
 // Binary / with a scalar: x / s
 template<typename T, int N> inline
 Jet<T, N> operator/(const Jet<T, N>& f, T s) {
-  Jet<T, N> h;
   const T s_inverse = 1.0 / s;
-  h.a = f.a * s_inverse;
-  h.v = f.v * s_inverse;
-  return h;
+  return Jet<T, N>(f.a * s_inverse, f.v * s_inverse);
 }
 
 // Binary comparison operators for both scalars and jets.
@@ -433,122 +403,84 @@
 // log(a + h) ~= log(a) + h / a
 template <typename T, int N> inline
 Jet<T, N> log(const Jet<T, N>& f) {
-  Jet<T, N> g;
-  g.a = log(f.a);
   const T a_inverse = T(1.0) / f.a;
-  g.v = f.v * a_inverse;
-  return g;
+  return Jet<T, N>(log(f.a), f.v * a_inverse);
 }
 
 // exp(a + h) ~= exp(a) + exp(a) h
 template <typename T, int N> inline
 Jet<T, N> exp(const Jet<T, N>& f) {
-  Jet<T, N> g;
-  g.a = exp(f.a);
-  g.v = g.a * f.v;
-  return g;
+  const T tmp = exp(f.a);
+  return Jet<T, N>(tmp, tmp * f.v);
 }
 
 // sqrt(a + h) ~= sqrt(a) + h / (2 sqrt(a))
 template <typename T, int N> inline
 Jet<T, N> sqrt(const Jet<T, N>& f) {
-  Jet<T, N> g;
-  g.a = sqrt(f.a);
-  const T two_a_inverse = T(1.0) / (T(2.0) * g.a);
-  g.v = f.v * two_a_inverse;
-  return g;
+  const T tmp = sqrt(f.a);
+  const T two_a_inverse = T(1.0) / (T(2.0) * tmp);
+  return Jet<T, N>(tmp, f.v * two_a_inverse);
 }
 
 // cos(a + h) ~= cos(a) - sin(a) h
 template <typename T, int N> inline
 Jet<T, N> cos(const Jet<T, N>& f) {
-  Jet<T, N> g;
-  g.a = cos(f.a);
-  const T sin_a = sin(f.a);
-  g.v = - sin_a * f.v;
-  return g;
+  return Jet<T, N>(cos(f.a), - sin(f.a) * f.v);
 }
 
 // acos(a + h) ~= acos(a) - 1 / sqrt(1 - a^2) h
 template <typename T, int N> inline
 Jet<T, N> acos(const Jet<T, N>& f) {
-  Jet<T, N> g;
-  g.a = acos(f.a);
   const T tmp = - T(1.0) / sqrt(T(1.0) - f.a * f.a);
-  g.v = tmp * f.v;
-  return g;
+  return Jet<T, N>(acos(f.a), tmp * f.v);
 }
 
 // sin(a + h) ~= sin(a) + cos(a) h
 template <typename T, int N> inline
 Jet<T, N> sin(const Jet<T, N>& f) {
-  Jet<T, N> g;
-  g.a = sin(f.a);
-  const T cos_a = cos(f.a);
-  g.v = cos_a * f.v;
-  return g;
+  return Jet<T, N>(sin(f.a), cos(f.a) * f.v);
 }
 
 // asin(a + h) ~= asin(a) + 1 / sqrt(1 - a^2) h
 template <typename T, int N> inline
 Jet<T, N> asin(const Jet<T, N>& f) {
-  Jet<T, N> g;
-  g.a = asin(f.a);
   const T tmp = T(1.0) / sqrt(T(1.0) - f.a * f.a);
-  g.v = tmp * f.v;
-  return g;
+  return Jet<T, N>(asin(f.a), tmp * f.v);
 }
 
 // tan(a + h) ~= tan(a) + (1 + tan(a)^2) h
 template <typename T, int N> inline
 Jet<T, N> tan(const Jet<T, N>& f) {
-  Jet<T, N> g;
-  g.a = tan(f.a);
-  double tan_a = tan(f.a);
+  const T tan_a = tan(f.a);
   const T tmp = T(1.0) + tan_a * tan_a;
-  g.v = tmp * f.v;
-  return g;
+  return Jet<T, N>(tan_a, tmp * f.v);
 }
 
 // atan(a + h) ~= atan(a) + 1 / (1 + a^2) h
 template <typename T, int N> inline
 Jet<T, N> atan(const Jet<T, N>& f) {
-  Jet<T, N> g;
-  g.a = atan(f.a);
   const T tmp = T(1.0) / (T(1.0) + f.a * f.a);
-  g.v = tmp * f.v;
-  return g;
+  return Jet<T, N>(atan(f.a), tmp * f.v);
 }
 
 // sinh(a + h) ~= sinh(a) + cosh(a) h
 template <typename T, int N> inline
 Jet<T, N> sinh(const Jet<T, N>& f) {
-  Jet<T, N> g;
-  g.a = sinh(f.a);
-  const T cosh_a = cosh(f.a);
-  g.v = cosh_a * f.v;
-  return g;
+  return Jet<T, N>(sinh(f.a), cosh(f.a) * f.v);
 }
 
 // cosh(a + h) ~= cosh(a) + sinh(a) h
 template <typename T, int N> inline
 Jet<T, N> cosh(const Jet<T, N>& f) {
-  Jet<T, N> g;
-  g.a = cosh(f.a);
-  const T sinh_a = sinh(f.a);
-  g.v = sinh_a * f.v;
-  return g;
+  return Jet<T, N>(cosh(f.a), sinh(f.a) * f.v);
 }
 
 // tanh(a + h) ~= tanh(a) + (1 - tanh(a)^2) h
 template <typename T, int N> inline
 Jet<T, N> tanh(const Jet<T, N>& f) {
-  Jet<T, N> g;
-  g.a = tanh(f.a);
-  double tanh_a = tanh(f.a);
+  const T tanh_a = tanh(f.a);
   const T tmp = T(1.0) - tanh_a * tanh_a;
-  g.v = tmp * f.v;
-  return g;
+  return Jet<T, N>(tanh_a, tmp * f.v);
 }
 
 // Jet Classification. It is not clear what the appropriate semantics are for
@@ -628,36 +560,25 @@
   //   f = a + da
   //   g = b + db
 
-  Jet<T, N> out;
-
-  out.a = atan2(g.a, f.a);
-
-  T const temp = T(1.0) / (f.a * f.a + g.a * g.a);
-  out.v = temp * (- g.a * f.v + f.a * g.v);
-  return out;
+  T const tmp = T(1.0) / (f.a * f.a + g.a * g.a);
+  return Jet<T, N>(atan2(g.a, f.a), tmp * (- g.a * f.v + f.a * g.v));
 }
 
 
-// pow -- base is a differentiatble function, exponent is a constant.
+// pow -- base is a differentiable function, exponent is a constant.
 // (a+da)^p ~= a^p + p*a^(p-1) da
 template <typename T, int N> inline
 Jet<T, N> pow(const Jet<T, N>& f, double g) {
-  Jet<T, N> out;
-  out.a = pow(f.a, g);
-  T const temp = g * pow(f.a, g - T(1.0));
-  out.v = temp * f.v;
-  return out;
+  T const tmp = g * pow(f.a, g - T(1.0));
+  return Jet<T, N>(pow(f.a, g), tmp * f.v);
 }
 
 // pow -- base is a constant, exponent is a differentiable function.
 // (a)^(p+dp) ~= a^p + a^p log(a) dp
 template <typename T, int N> inline
 Jet<T, N> pow(double f, const Jet<T, N>& g) {
-  Jet<T, N> out;
-  out.a = pow(f, g.a);
-  T const temp = log(f) * out.a;
-  out.v = temp * g.v;
-  return out;
+  T const tmp = pow(f, g.a);
+  return Jet<T, N>(tmp, log(f) * tmp * g.v);
 }
 
 
@@ -665,15 +586,11 @@
 // (a+da)^(b+db) ~= a^b + b * a^(b-1) da + a^b log(a) * db
 template <typename T, int N> inline
 Jet<T, N> pow(const Jet<T, N>& f, const Jet<T, N>& g) {
-  Jet<T, N> out;
+  T const tmp1 = pow(f.a, g.a);
+  T const tmp2 = g.a * pow(f.a, g.a - T(1.0));
+  T const tmp3 = tmp1 * log(f.a);
 
-  T const temp1 = pow(f.a, g.a);
-  T const temp2 = g.a * pow(f.a, g.a - T(1.0));
-  T const temp3 = temp1 * log(f.a);
-
-  out.a = temp1;
-  out.v = temp2 * f.v + temp3 * g.v;
-  return out;
+  return Jet<T, N>(tmp1, tmp2 * f.v + tmp3 * g.v);
 }
 
 // Define the helper functions Eigen needs to embed Jet types.
@@ -732,6 +649,8 @@
     return ceres::Jet<T, N>(1e-12);
   }
 
+  static inline Real epsilon() { return Real(std::numeric_limits<T>::epsilon()); }
+
   enum {
     IsComplex = 0,
     IsInteger = 0,
@@ -740,7 +659,8 @@
     AddCost = 1,
     // For Jet types, multiplication is more expensive than addition.
     MulCost = 3,
-    HasFloatingPoint = 1
+    HasFloatingPoint = 1,
+    RequireInitialization = 1
   };
 };
 
diff --git a/include/ceres/local_parameterization.h b/include/ceres/local_parameterization.h
index c0f7dc7..3ab21fc 100644
--- a/include/ceres/local_parameterization.h
+++ b/include/ceres/local_parameterization.h
@@ -34,6 +34,7 @@
 
 #include <vector>
 #include "ceres/internal/port.h"
+#include "ceres/internal/disable_warnings.h"
 
 namespace ceres {
 
@@ -107,7 +108,7 @@
 //
 // The class LocalParameterization defines the function Plus and its
 // Jacobian which is needed to compute the Jacobian of f w.r.t delta.
-class LocalParameterization {
+class CERES_EXPORT LocalParameterization {
  public:
   virtual ~LocalParameterization() {}
 
@@ -133,7 +134,7 @@
 // Some basic parameterizations
 
 // Identity Parameterization: Plus(x, delta) = x + delta
-class IdentityParameterization : public LocalParameterization {
+class CERES_EXPORT IdentityParameterization : public LocalParameterization {
  public:
   explicit IdentityParameterization(int size);
   virtual ~IdentityParameterization() {}
@@ -150,7 +151,7 @@
 };
 
 // Hold a subset of the parameters inside a parameter block constant.
-class SubsetParameterization : public LocalParameterization {
+class CERES_EXPORT SubsetParameterization : public LocalParameterization {
  public:
   explicit SubsetParameterization(int size,
                                   const vector<int>& constant_parameters);
@@ -160,7 +161,9 @@
                     double* x_plus_delta) const;
   virtual bool ComputeJacobian(const double* x,
                                double* jacobian) const;
-  virtual int GlobalSize() const { return constancy_mask_.size(); }
+  virtual int GlobalSize() const {
+    return static_cast<int>(constancy_mask_.size());
+  }
   virtual int LocalSize() const { return local_size_; }
 
  private:
@@ -172,7 +175,7 @@
 // with * being the quaternion multiplication operator. Here we assume
 // that the first element of the quaternion vector is the real (cos
 // theta) part.
-class QuaternionParameterization : public LocalParameterization {
+class CERES_EXPORT QuaternionParameterization : public LocalParameterization {
  public:
   virtual ~QuaternionParameterization() {}
   virtual bool Plus(const double* x,
@@ -186,4 +189,6 @@
 
 }  // namespace ceres
 
+#include "ceres/internal/reenable_warnings.h"
+
 #endif  // CERES_PUBLIC_LOCAL_PARAMETERIZATION_H_
diff --git a/include/ceres/loss_function.h b/include/ceres/loss_function.h
index b99c184..2c58500 100644
--- a/include/ceres/loss_function.h
+++ b/include/ceres/loss_function.h
@@ -75,14 +75,15 @@
 #ifndef CERES_PUBLIC_LOSS_FUNCTION_H_
 #define CERES_PUBLIC_LOSS_FUNCTION_H_
 
+#include "glog/logging.h"
 #include "ceres/internal/macros.h"
 #include "ceres/internal/scoped_ptr.h"
 #include "ceres/types.h"
-#include "glog/logging.h"
+#include "ceres/internal/disable_warnings.h"
 
 namespace ceres {
 
-class LossFunction {
+class CERES_EXPORT LossFunction {
  public:
   virtual ~LossFunction() {}
 
@@ -128,7 +129,7 @@
 // It is not normally necessary to use this, as passing NULL for the
 // loss function when building the problem accomplishes the same
 // thing.
-class TrivialLoss : public LossFunction {
+class CERES_EXPORT TrivialLoss : public LossFunction {
  public:
   virtual void Evaluate(double, double*) const;
 };
@@ -171,7 +172,7 @@
 //
 // The scaling parameter 'a' corresponds to 'delta' on this page:
 //   http://en.wikipedia.org/wiki/Huber_Loss_Function
-class HuberLoss : public LossFunction {
+class CERES_EXPORT HuberLoss : public LossFunction {
  public:
   explicit HuberLoss(double a) : a_(a), b_(a * a) { }
   virtual void Evaluate(double, double*) const;
@@ -187,7 +188,7 @@
 //   rho(s) = 2 (sqrt(1 + s) - 1).
 //
 // At s = 0: rho = [0, 1, -1/2].
-class SoftLOneLoss : public LossFunction {
+class CERES_EXPORT SoftLOneLoss : public LossFunction {
  public:
   explicit SoftLOneLoss(double a) : b_(a * a), c_(1 / b_) { }
   virtual void Evaluate(double, double*) const;
@@ -204,7 +205,7 @@
 //   rho(s) = log(1 + s).
 //
 // At s = 0: rho = [0, 1, -1].
-class CauchyLoss : public LossFunction {
+class CERES_EXPORT CauchyLoss : public LossFunction {
  public:
   explicit CauchyLoss(double a) : b_(a * a), c_(1 / b_) { }
   virtual void Evaluate(double, double*) const;
@@ -225,7 +226,7 @@
 //   rho(s) = a atan(s / a).
 //
 // At s = 0: rho = [0, 1, 0].
-class ArctanLoss : public LossFunction {
+class CERES_EXPORT ArctanLoss : public LossFunction {
  public:
   explicit ArctanLoss(double a) : a_(a), b_(1 / (a * a)) { }
   virtual void Evaluate(double, double*) const;
@@ -264,7 +265,7 @@
 // concentrated in the range a - b to a + b.
 //
 // At s = 0: rho = [0, ~0, ~0].
-class TolerantLoss : public LossFunction {
+class CERES_EXPORT TolerantLoss : public LossFunction {
  public:
   explicit TolerantLoss(double a, double b);
   virtual void Evaluate(double, double*) const;
@@ -305,7 +306,7 @@
 // function, rho = NULL is a valid input and will result in the input
 // being scaled by a. This provides a simple way of implementing a
 // scaled ResidualBlock.
-class ScaledLoss : public LossFunction {
+class CERES_EXPORT ScaledLoss : public LossFunction {
  public:
   // Constructs a ScaledLoss wrapping another loss function. Takes
   // ownership of the wrapped loss function or not depending on the
@@ -362,7 +363,7 @@
 //
 //  Solve(options, &problem, &summary)
 //
-class LossFunctionWrapper : public LossFunction {
+class CERES_EXPORT LossFunctionWrapper : public LossFunction {
  public:
   LossFunctionWrapper(LossFunction* rho, Ownership ownership)
       : rho_(rho), ownership_(ownership) {
@@ -395,4 +396,6 @@
 
 }  // namespace ceres
 
+#include "ceres/internal/disable_warnings.h"
+
 #endif  // CERES_PUBLIC_LOSS_FUNCTION_H_
diff --git a/include/ceres/normal_prior.h b/include/ceres/normal_prior.h
index 480a074..df66505 100644
--- a/include/ceres/normal_prior.h
+++ b/include/ceres/normal_prior.h
@@ -36,6 +36,7 @@
 
 #include "ceres/cost_function.h"
 #include "ceres/internal/eigen.h"
+#include "ceres/internal/disable_warnings.h"
 
 namespace ceres {
 
@@ -56,7 +57,7 @@
 // which would be the case if the covariance matrix S is rank
 // deficient.
 
-class NormalPrior: public CostFunction {
+class CERES_EXPORT NormalPrior: public CostFunction {
  public:
   // Check that the number of rows in the vector b are the same as the
   // number of columns in the matrix A, crash otherwise.
@@ -72,4 +73,6 @@
 
 }  // namespace ceres
 
+#include "ceres/internal/reenable_warnings.h"
+
 #endif  // CERES_PUBLIC_NORMAL_PRIOR_H_
diff --git a/include/ceres/numeric_diff_cost_function.h b/include/ceres/numeric_diff_cost_function.h
index a47a66d..de6b74a 100644
--- a/include/ceres/numeric_diff_cost_function.h
+++ b/include/ceres/numeric_diff_cost_function.h
@@ -95,6 +95,21 @@
 // "MyScalarCostFunctor", "1, 2, 2", describe the functor as computing
 // a 1-dimensional output from two arguments, both 2-dimensional.
 //
+// NumericDiffCostFunction also supports cost functions with a
+// runtime-determined number of residuals. For example:
+//
+//   CostFunction* cost_function
+//       = new NumericDiffCostFunction<MyScalarCostFunctor, CENTRAL, DYNAMIC, 2, 2>(
+//           new CostFunctorWithDynamicNumResiduals(1.0),               ^     ^  ^
+//           TAKE_OWNERSHIP,                                            |     |  |
+//           runtime_number_of_residuals); <----+                       |     |  |
+//                                              |                       |     |  |
+//                                              |                       |     |  |
+//             Actual number of residuals ------+                       |     |  |
+//             Indicate dynamic number of residuals --------------------+     |  |
+//             Dimension of x ------------------------------------------------+  |
+//             Dimension of y ---------------------------------------------------+
+//
 // The framework can currently accommodate cost functions of up to 10
 // independent variables, and there is no limit on the dimensionality
 // of each of them.
@@ -104,8 +119,6 @@
 // central differences begin with, and only after that works, trying forward
 // difference to improve performance.
 //
-// TODO(sameeragarwal): Add support for dynamic number of residuals.
-//
 // WARNING #1: A common beginner's error when first using
 // NumericDiffCostFunction is to get the sizing wrong. In particular,
 // there is a tendency to set the template parameters to (dimension of
@@ -177,17 +190,19 @@
                                N5, N6, N7, N8, N9> {
  public:
   NumericDiffCostFunction(CostFunctor* functor,
+                          Ownership ownership = TAKE_OWNERSHIP,
+                          int num_residuals = kNumResiduals,
                           const double relative_step_size = 1e-6)
       :functor_(functor),
-       ownership_(TAKE_OWNERSHIP),
-       relative_step_size_(relative_step_size) {}
-
-  NumericDiffCostFunction(CostFunctor* functor,
-                          Ownership ownership,
-                          const double relative_step_size = 1e-6)
-      : functor_(functor),
-        ownership_(ownership),
-        relative_step_size_(relative_step_size) {}
+       ownership_(ownership),
+       relative_step_size_(relative_step_size) {
+    if (kNumResiduals == DYNAMIC) {
+      SizedCostFunction<kNumResiduals,
+                        N0, N1, N2, N3, N4,
+                        N5, N6, N7, N8, N9>
+          ::set_num_residuals(num_residuals);
+    }
+  }
 
   ~NumericDiffCostFunction() {
     if (ownership_ != TAKE_OWNERSHIP) {
@@ -216,7 +231,7 @@
       return false;
     }
 
-    if (!jacobians) {
+    if (jacobians == NULL) {
       return true;
     }
 
@@ -264,6 +279,9 @@
                            functor_.get(),                              \
                            residuals,                                   \
                            relative_step_size_,                         \
+                          SizedCostFunction<kNumResiduals,              \
+                           N0, N1, N2, N3, N4,                          \
+                           N5, N6, N7, N8, N9>::num_residuals(),        \
                            parameters_reference_copy.get(),             \
                            jacobians[block])) {                         \
         return false;                                                   \
diff --git a/include/ceres/numeric_diff_functor.h b/include/ceres/numeric_diff_functor.h
index 039e1a1..a29eb97 100644
--- a/include/ceres/numeric_diff_functor.h
+++ b/include/ceres/numeric_diff_functor.h
@@ -124,6 +124,8 @@
                                       kNumResiduals,
                                       N0, N1, N2, N3, N4,
                                       N5, N6, N7, N8, N9>(new Functor,
+                                                          TAKE_OWNERSHIP,
+                                                          kNumResiduals,
                                                           relative_step_size)) {
   }
 
@@ -133,7 +135,10 @@
                                              kNumResiduals,
                                              N0, N1, N2, N3, N4,
                                              N5, N6, N7, N8, N9>(
-                                                 functor, relative_step_size)) {
+                                                 functor,
+                                                 TAKE_OWNERSHIP,
+                                                 kNumResiduals,
+                                                 relative_step_size)) {
   }
 
   bool operator()(const double* x0, double* residuals) const {
diff --git a/include/ceres/ordered_groups.h b/include/ceres/ordered_groups.h
index e373d35..c316d71 100644
--- a/include/ceres/ordered_groups.h
+++ b/include/ceres/ordered_groups.h
@@ -33,7 +33,9 @@
 
 #include <map>
 #include <set>
+#include <vector>
 #include "ceres/internal/port.h"
+#include "glog/logging.h"
 
 namespace ceres {
 
@@ -84,11 +86,8 @@
     element_to_group_.clear();
   }
 
-  // Remove the element, no matter what group it is in. If the element
-  // is not a member of any group, calling this method will result in
-  // a crash.
-  //
-  // Return value indicates if the element was actually removed.
+  // Remove the element, no matter what group it is in. Return value
+  // indicates if the element was actually removed.
   bool Remove(const T element) {
     const int current_group = GroupId(element);
     if (current_group < 0) {
@@ -106,6 +105,20 @@
     return true;
   }
 
+  // Bulk remove elements. The return value indicates the number of
+  // elements successfully removed.
+  int Remove(const vector<T>& elements) {
+    if (NumElements() == 0 || elements.size() == 0) {
+      return 0;
+    }
+
+    int num_removed = 0;
+    for (int i = 0; i < elements.size(); ++i) {
+      num_removed += Remove(elements[i]);
+    }
+    return num_removed;
+  }
+
   // Reverse the order of the groups in place.
   void Reverse() {
     typename map<int, set<T> >::reverse_iterator it =
@@ -159,10 +172,22 @@
     return group_to_elements_.size();
   }
 
+  // The first group with one or more elements. Calling this when
+  // there are no groups with non-zero elements will result in a
+  // crash.
+  int MinNonZeroGroup() const {
+    CHECK_NE(NumGroups(), 0);
+    return group_to_elements_.begin()->first;
+  }
+
   const map<int, set<T> >& group_to_elements() const {
     return group_to_elements_;
   }
 
+  const map<T, int>& element_to_group() const {
+    return element_to_group_;
+  }
+
  private:
   map<int, set<T> > group_to_elements_;
   map<T, int> element_to_group_;
diff --git a/include/ceres/problem.h b/include/ceres/problem.h
index 663616d..b1cb99a 100644
--- a/include/ceres/problem.h
+++ b/include/ceres/problem.h
@@ -1,5 +1,5 @@
 // Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// Copyright 2013 Google Inc. All rights reserved.
 // http://code.google.com/p/ceres-solver/
 //
 // Redistribution and use in source and binary forms, with or without
@@ -39,11 +39,12 @@
 #include <set>
 #include <vector>
 
+#include "glog/logging.h"
 #include "ceres/internal/macros.h"
 #include "ceres/internal/port.h"
 #include "ceres/internal/scoped_ptr.h"
 #include "ceres/types.h"
-#include "glog/logging.h"
+#include "ceres/internal/disable_warnings.h"
 
 
 namespace ceres {
@@ -117,14 +118,14 @@
 //   problem.AddResidualBlock(new MyBinaryCostFunction(...), x2, x3);
 //
 // Please see cost_function.h for details of the CostFunction object.
-class Problem {
+class CERES_EXPORT Problem {
  public:
-  struct Options {
+  struct CERES_EXPORT Options {
     Options()
         : cost_function_ownership(TAKE_OWNERSHIP),
           loss_function_ownership(TAKE_OWNERSHIP),
           local_parameterization_ownership(TAKE_OWNERSHIP),
-          enable_fast_parameter_block_removal(false),
+          enable_fast_removal(false),
           disable_all_safety_checks(false) {}
 
     // These flags control whether the Problem object owns the cost
@@ -138,17 +139,21 @@
     Ownership loss_function_ownership;
     Ownership local_parameterization_ownership;
 
-    // If true, trades memory for a faster RemoveParameterBlock() operation.
+    // If true, trades memory for faster RemoveResidualBlock() and
+    // RemoveParameterBlock() operations.
     //
-    // RemoveParameterBlock() takes time proportional to the size of the entire
-    // Problem. If you only remove parameter blocks from the Problem
-    // occassionaly, this may be acceptable. However, if you are modifying the
-    // Problem frequently, and have memory to spare, then flip this switch to
+    // By default, RemoveParameterBlock() and RemoveResidualBlock() take time
+    // proportional to the size of the entire problem.  If you only ever remove
+    // parameters or residuals from the problem occassionally, this might be
+    // acceptable.  However, if you have memory to spare, enable this option to
     // make RemoveParameterBlock() take time proportional to the number of
-    // residual blocks that depend on it.  The increase in memory usage is an
-    // additonal hash set per parameter block containing all the residuals that
-    // depend on the parameter block.
-    bool enable_fast_parameter_block_removal;
+    // residual blocks that depend on it, and RemoveResidualBlock() take (on
+    // average) constant time.
+    //
+    // The increase in memory usage is twofold: an additonal hash set per
+    // parameter block containing all the residuals that depend on the parameter
+    // block; and a hash set in the problem containing all residuals.
+    bool enable_fast_removal;
 
     // By default, Ceres performs a variety of safety checks when constructing
     // the problem. There is a small but measurable performance penalty to
@@ -276,7 +281,7 @@
   // residual blocks that depend on the parameter are also removed, as
   // described above in RemoveResidualBlock().
   //
-  // If Problem::Options::enable_fast_parameter_block_removal is true, then the
+  // If Problem::Options::enable_fast_removal is true, then the
   // removal is fast (almost constant time). Otherwise, removing a parameter
   // block will incur a scan of the entire Problem object.
   //
@@ -300,7 +305,7 @@
   // Hold the indicated parameter block constant during optimization.
   void SetParameterBlockConstant(double* values);
 
-  // Allow the indicated parameter to vary during optimization.
+  // Allow the indicated parameter block to vary during optimization.
   void SetParameterBlockVariable(double* values);
 
   // Set the local parameterization for one of the parameter blocks.
@@ -312,6 +317,15 @@
   void SetParameterization(double* values,
                            LocalParameterization* local_parameterization);
 
+  // Get the local parameterization object associated with this
+  // parameter block. If there is no parameterization object
+  // associated then NULL is returned.
+  const LocalParameterization* GetParameterization(double* values) const;
+
+  // Set the lower/upper bound for the parameter with position "index".
+  void SetParameterLowerBound(double* values, int index, double lower_bound);
+  void SetParameterUpperBound(double* values, int index, double upper_bound);
+
   // Number of parameter blocks in the problem. Always equals
   // parameter_blocks().size() and parameter_block_sizes().size().
   int NumParameterBlocks() const;
@@ -336,11 +350,34 @@
   // block, then ParameterBlockLocalSize = ParameterBlockSize.
   int ParameterBlockLocalSize(const double* values) const;
 
+  // Is the given parameter block present in this problem or not?
+  bool HasParameterBlock(const double* values) const;
+
   // Fills the passed parameter_blocks vector with pointers to the
   // parameter blocks currently in the problem. After this call,
   // parameter_block.size() == NumParameterBlocks.
   void GetParameterBlocks(vector<double*>* parameter_blocks) const;
 
+  // Fills the passed residual_blocks vector with pointers to the
+  // residual blocks currently in the problem. After this call,
+  // residual_blocks.size() == NumResidualBlocks.
+  void GetResidualBlocks(vector<ResidualBlockId>* residual_blocks) const;
+
+  // Get all the parameter blocks that depend on the given residual block.
+  void GetParameterBlocksForResidualBlock(
+      const ResidualBlockId residual_block,
+      vector<double*>* parameter_blocks) const;
+
+  // Get all the residual blocks that depend on the given parameter block.
+  //
+  // If Problem::Options::enable_fast_removal is true, then
+  // getting the residual blocks is fast and depends only on the number of
+  // residual blocks. Otherwise, getting the residual blocks for a parameter
+  // block will incur a scan of the entire Problem object.
+  void GetResidualBlocksForParameterBlock(
+      const double* values,
+      vector<ResidualBlockId>* residual_blocks) const;
+
   // Options struct to control Problem::Evaluate.
   struct EvaluateOptions {
     EvaluateOptions()
@@ -430,4 +467,6 @@
 
 }  // namespace ceres
 
+#include "ceres/internal/reenable_warnings.h"
+
 #endif  // CERES_PUBLIC_PROBLEM_H_
diff --git a/include/ceres/rotation.h b/include/ceres/rotation.h
index ea0b769..e3dbfe8 100644
--- a/include/ceres/rotation.h
+++ b/include/ceres/rotation.h
@@ -395,7 +395,7 @@
     const MatrixAdapter<T, row_stride, col_stride>& R) {
   static const T kOne = T(1.0);
   const T theta2 = DotProduct(angle_axis, angle_axis);
-  if (theta2 > 0.0) {
+  if (theta2 > T(std::numeric_limits<double>::epsilon())) {
     // We want to be careful to only evaluate the square root if the
     // norm of the angle_axis vector is greater than zero. Otherwise
     // we get a division by zero.
@@ -417,15 +417,15 @@
     R(1, 2) = -wx*sintheta   + wy*wz*(kOne -    costheta);
     R(2, 2) =     costheta   + wz*wz*(kOne -    costheta);
   } else {
-    // At zero, we switch to using the first order Taylor expansion.
+    // Near zero, we switch to using the first order Taylor expansion.
     R(0, 0) =  kOne;
-    R(1, 0) = -angle_axis[2];
-    R(2, 0) =  angle_axis[1];
-    R(0, 1) =  angle_axis[2];
+    R(1, 0) =  angle_axis[2];
+    R(2, 0) = -angle_axis[1];
+    R(0, 1) = -angle_axis[2];
     R(1, 1) =  kOne;
-    R(2, 1) = -angle_axis[0];
-    R(0, 2) = -angle_axis[1];
-    R(1, 2) =  angle_axis[0];
+    R(2, 1) =  angle_axis[0];
+    R(0, 2) =  angle_axis[1];
+    R(1, 2) = -angle_axis[0];
     R(2, 2) = kOne;
   }
 }
@@ -580,12 +580,8 @@
 
 template<typename T> inline
 void AngleAxisRotatePoint(const T angle_axis[3], const T pt[3], T result[3]) {
-  T w[3];
-  T sintheta;
-  T costheta;
-
   const T theta2 = DotProduct(angle_axis, angle_axis);
-  if (theta2 > 0.0) {
+  if (theta2 > T(std::numeric_limits<double>::epsilon())) {
     // Away from zero, use the rodriguez formula
     //
     //   result = pt costheta +
@@ -597,19 +593,25 @@
     // we get a division by zero.
     //
     const T theta = sqrt(theta2);
-    w[0] = angle_axis[0] / theta;
-    w[1] = angle_axis[1] / theta;
-    w[2] = angle_axis[2] / theta;
-    costheta = cos(theta);
-    sintheta = sin(theta);
-    T w_cross_pt[3];
-    CrossProduct(w, pt, w_cross_pt);
-    T w_dot_pt = DotProduct(w, pt);
-    for (int i = 0; i < 3; ++i) {
-      result[i] = pt[i] * costheta +
-          w_cross_pt[i] * sintheta +
-          w[i] * (T(1.0) - costheta) * w_dot_pt;
-    }
+    const T costheta = cos(theta);
+    const T sintheta = sin(theta);
+    const T theta_inverse = 1.0 / theta;
+
+    const T w[3] = { angle_axis[0] * theta_inverse,
+                     angle_axis[1] * theta_inverse,
+                     angle_axis[2] * theta_inverse };
+
+    // Explicitly inlined evaluation of the cross product for
+    // performance reasons.
+    const T w_cross_pt[3] = { w[1] * pt[2] - w[2] * pt[1],
+                              w[2] * pt[0] - w[0] * pt[2],
+                              w[0] * pt[1] - w[1] * pt[0] };
+    const T tmp =
+        (w[0] * pt[0] + w[1] * pt[1] + w[2] * pt[2]) * (T(1.0) - costheta);
+
+    result[0] = pt[0] * costheta + w_cross_pt[0] * sintheta + w[0] * tmp;
+    result[1] = pt[1] * costheta + w_cross_pt[1] * sintheta + w[1] * tmp;
+    result[2] = pt[2] * costheta + w_cross_pt[2] * sintheta + w[2] * tmp;
   } else {
     // Near zero, the first order Taylor approximation of the rotation
     // matrix R corresponding to a vector w and angle w is
@@ -623,13 +625,18 @@
     // and actually performing multiplication with the point pt, gives us
     // R * pt = pt + w x pt.
     //
-    // Switching to the Taylor expansion at zero helps avoid all sorts
-    // of numerical nastiness.
-    T w_cross_pt[3];
-    CrossProduct(angle_axis, pt, w_cross_pt);
-    for (int i = 0; i < 3; ++i) {
-      result[i] = pt[i] + w_cross_pt[i];
-    }
+    // Switching to the Taylor expansion near zero provides meaningful
+    // derivatives when evaluated using Jets.
+    //
+    // Explicitly inlined evaluation of the cross product for
+    // performance reasons.
+    const T w_cross_pt[3] = { angle_axis[1] * pt[2] - angle_axis[2] * pt[1],
+                              angle_axis[2] * pt[0] - angle_axis[0] * pt[2],
+                              angle_axis[0] * pt[1] - angle_axis[1] * pt[0] };
+
+    result[0] = pt[0] + w_cross_pt[0];
+    result[1] = pt[1] + w_cross_pt[1];
+    result[2] = pt[2] + w_cross_pt[2];
   }
 }
 
diff --git a/include/ceres/solver.h b/include/ceres/solver.h
index 25b762a..fdc5457 100644
--- a/include/ceres/solver.h
+++ b/include/ceres/solver.h
@@ -1,5 +1,5 @@
 // Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// Copyright 2014 Google Inc. All rights reserved.
 // http://code.google.com/p/ceres-solver/
 //
 // Redistribution and use in source and binary forms, with or without
@@ -40,13 +40,14 @@
 #include "ceres/iteration_callback.h"
 #include "ceres/ordered_groups.h"
 #include "ceres/types.h"
+#include "ceres/internal/disable_warnings.h"
 
 namespace ceres {
 
 class Problem;
 
 // Interface for non-linear least squares solvers.
-class Solver {
+class CERES_EXPORT Solver {
  public:
   virtual ~Solver();
 
@@ -55,7 +56,7 @@
   // problems; however, better performance is often obtainable with tweaking.
   //
   // The constants are defined inside types.h
-  struct Options {
+  struct CERES_EXPORT Options {
     // Default constructor that sets up a generic sparse problem.
     Options() {
       minimizer_type = TRUST_REGION;
@@ -91,31 +92,40 @@
       gradient_tolerance = 1e-10;
       parameter_tolerance = 1e-8;
 
-#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
+#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE) && !defined(CERES_ENABLE_LGPL_CODE)
       linear_solver_type = DENSE_QR;
 #else
       linear_solver_type = SPARSE_NORMAL_CHOLESKY;
 #endif
 
       preconditioner_type = JACOBI;
-
+      visibility_clustering_type = CANONICAL_VIEWS;
       dense_linear_algebra_library_type = EIGEN;
+
+      // Choose a default sparse linear algebra library in the order:
+      //
+      //   SUITE_SPARSE > CX_SPARSE > EIGEN_SPARSE
+#if !defined(CERES_NO_SUITESPARSE)
       sparse_linear_algebra_library_type = SUITE_SPARSE;
-#if defined(CERES_NO_SUITESPARSE) && !defined(CERES_NO_CXSPARSE)
+#else
+  #if !defined(CERES_NO_CXSPARSE)
       sparse_linear_algebra_library_type = CX_SPARSE;
+  #else
+    #if defined(CERES_USE_EIGEN_SPARSE)
+      sparse_linear_algebra_library_type = EIGEN_SPARSE;
+    #endif
+  #endif
 #endif
 
-
       num_linear_solver_threads = 1;
-      linear_solver_ordering = NULL;
       use_postordering = false;
+      dynamic_sparsity = false;
       min_linear_solver_iterations = 1;
       max_linear_solver_iterations = 500;
       eta = 1e-1;
       jacobi_scaling = true;
       use_inner_iterations = false;
       inner_iteration_tolerance = 1e-3;
-      inner_iteration_ordering = NULL;
       logging_type = PER_MINIMIZER_ITERATION;
       minimizer_progress_to_stdout = false;
       trust_region_problem_dump_directory = "/tmp";
@@ -126,7 +136,11 @@
       update_state_every_iteration = false;
     }
 
-    ~Options();
+    // Returns true if the options struct has a valid
+    // configuration. Returns false otherwise, and fills in *error
+    // with a message describing the problem.
+    bool IsValid(string* error) const;
+
     // Minimizer options ----------------------------------------
 
     // Ceres supports the two major families of optimization strategies -
@@ -367,7 +381,7 @@
 
     // Minimizer terminates when
     //
-    //   max_i |gradient_i| < gradient_tolerance * max_i|initial_gradient_i|
+    //   max_i |x - Project(Plus(x, -g(x))| < gradient_tolerance
     //
     // This value should typically be 1e-4 * function_tolerance.
     double gradient_tolerance;
@@ -385,6 +399,11 @@
     // Type of preconditioner to use with the iterative linear solvers.
     PreconditionerType preconditioner_type;
 
+    // Type of clustering algorithm to use for visibility based
+    // preconditioning. This option is used only when the
+    // preconditioner_type is CLUSTER_JACOBI or CLUSTER_TRIDIAGONAL.
+    VisibilityClusteringType visibility_clustering_type;
+
     // Ceres supports using multiple dense linear algebra libraries
     // for dense matrix factorizations. Currently EIGEN and LAPACK are
     // the valid choices. EIGEN is always available, LAPACK refers to
@@ -475,10 +494,7 @@
     // the parameter blocks into two groups, one for the points and one
     // for the cameras, where the group containing the points has an id
     // smaller than the group containing cameras.
-    //
-    // Once assigned, Solver::Options owns this pointer and will
-    // deallocate the memory when destroyed.
-    ParameterBlockOrdering* linear_solver_ordering;
+    shared_ptr<ParameterBlockOrdering> linear_solver_ordering;
 
     // Sparse Cholesky factorization algorithms use a fill-reducing
     // ordering to permute the columns of the Jacobian matrix. There
@@ -501,6 +517,21 @@
     // matrix. Setting use_postordering to true enables this tradeoff.
     bool use_postordering;
 
+    // Some non-linear least squares problems are symbolically dense but
+    // numerically sparse. i.e. at any given state only a small number
+    // of jacobian entries are non-zero, but the position and number of
+    // non-zeros is different depending on the state. For these problems
+    // it can be useful to factorize the sparse jacobian at each solver
+    // iteration instead of including all of the zero entries in a single
+    // general factorization.
+    //
+    // If your problem does not have this property (or you do not know),
+    // then it is probably best to keep this false, otherwise it will
+    // likely lead to worse performance.
+
+    // This settings affects the SPARSE_NORMAL_CHOLESKY solver.
+    bool dynamic_sparsity;
+
     // Some non-linear least squares problems have additional
     // structure in the way the parameter blocks interact that it is
     // beneficial to modify the way the trust region step is computed.
@@ -571,7 +602,7 @@
     //    the lower numbered groups are optimized before the higher
     //    number groups. Each group must be an independent set. Not
     //    all parameter blocks need to be present in the ordering.
-    ParameterBlockOrdering* inner_iteration_ordering;
+    shared_ptr<ParameterBlockOrdering> inner_iteration_ordering;
 
     // Generally speaking, inner iterations make significant progress
     // in the early stages of the solve and then their contribution
@@ -692,13 +723,9 @@
     //
     // The solver does NOT take ownership of these pointers.
     vector<IterationCallback*> callbacks;
-
-    // If non-empty, a summary of the execution of the solver is
-    // recorded to this file.
-    string solver_log;
   };
 
-  struct Summary {
+  struct CERES_EXPORT Summary {
     Summary();
 
     // A brief one line description of the state of the solver after
@@ -709,18 +736,22 @@
     // termination.
     string FullReport() const;
 
+    bool IsSolutionUsable() const;
+
     // Minimizer summary -------------------------------------------------
     MinimizerType minimizer_type;
 
-    SolverTerminationType termination_type;
+    TerminationType termination_type;
 
-    // If the solver did not run, or there was a failure, a
-    // description of the error.
-    string error;
+    // Reason why the solver terminated.
+    string message;
 
-    // Cost of the problem before and after the optimization. See
-    // problem.h for definition of the cost of a problem.
+    // Cost of the problem (value of the objective function) before
+    // the optimization.
     double initial_cost;
+
+    // Cost of the problem (value of the objective function) after the
+    // optimization.
     double final_cost;
 
     // The part of the total cost that comes from residual blocks that
@@ -728,10 +759,21 @@
     // blocks that they depend on were fixed.
     double fixed_cost;
 
+    // IterationSummary for each minimizer iteration in order.
     vector<IterationSummary> iterations;
 
+    // Number of minimizer iterations in which the step was
+    // accepted. Unless use_non_monotonic_steps is true this is also
+    // the number of steps in which the objective function value/cost
+    // went down.
     int num_successful_steps;
+
+    // Number of minimizer iterations in which the step was rejected
+    // either because it did not reduce the cost enough or the step
+    // was not numerically valid.
     int num_unsuccessful_steps;
+
+    // Number of times inner iterations were performed.
     int num_inner_iteration_steps;
 
     // All times reported below are wall times.
@@ -753,58 +795,160 @@
     // Some total of all time spent inside Ceres when Solve is called.
     double total_time_in_seconds;
 
+    // Time (in seconds) spent in the linear solver computing the
+    // trust region step.
     double linear_solver_time_in_seconds;
+
+    // Time (in seconds) spent evaluating the residual vector.
     double residual_evaluation_time_in_seconds;
+
+    // Time (in seconds) spent evaluating the jacobian matrix.
     double jacobian_evaluation_time_in_seconds;
+
+    // Time (in seconds) spent doing inner iterations.
     double inner_iteration_time_in_seconds;
 
-    // Preprocessor summary.
+    // Number of parameter blocks in the problem.
     int num_parameter_blocks;
+
+    // Number of parameters in the probem.
     int num_parameters;
+
+    // Dimension of the tangent space of the problem (or the number of
+    // columns in the Jacobian for the problem). This is different
+    // from num_parameters if a parameter block is associated with a
+    // LocalParameterization
     int num_effective_parameters;
+
+    // Number of residual blocks in the problem.
     int num_residual_blocks;
+
+    // Number of residuals in the problem.
     int num_residuals;
 
+    // Number of parameter blocks in the problem after the inactive
+    // and constant parameter blocks have been removed. A parameter
+    // block is inactive if no residual block refers to it.
     int num_parameter_blocks_reduced;
+
+    // Number of parameters in the reduced problem.
     int num_parameters_reduced;
+
+    // Dimension of the tangent space of the reduced problem (or the
+    // number of columns in the Jacobian for the reduced
+    // problem). This is different from num_parameters_reduced if a
+    // parameter block in the reduced problem is associated with a
+    // LocalParameterization.
     int num_effective_parameters_reduced;
+
+    // Number of residual blocks in the reduced problem.
     int num_residual_blocks_reduced;
+
+    //  Number of residuals in the reduced problem.
     int num_residuals_reduced;
 
-    int num_eliminate_blocks_given;
-    int num_eliminate_blocks_used;
-
+    //  Number of threads specified by the user for Jacobian and
+    //  residual evaluation.
     int num_threads_given;
+
+    // Number of threads actually used by the solver for Jacobian and
+    // residual evaluation. This number is not equal to
+    // num_threads_given if OpenMP is not available.
     int num_threads_used;
 
+    //  Number of threads specified by the user for solving the trust
+    // region problem.
     int num_linear_solver_threads_given;
+
+    // Number of threads actually used by the solver for solving the
+    // trust region problem. This number is not equal to
+    // num_threads_given if OpenMP is not available.
     int num_linear_solver_threads_used;
 
+    // Type of the linear solver requested by the user.
     LinearSolverType linear_solver_type_given;
+
+    // Type of the linear solver actually used. This may be different
+    // from linear_solver_type_given if Ceres determines that the
+    // problem structure is not compatible with the linear solver
+    // requested or if the linear solver requested by the user is not
+    // available, e.g. The user requested SPARSE_NORMAL_CHOLESKY but
+    // no sparse linear algebra library was available.
     LinearSolverType linear_solver_type_used;
 
+    // Size of the elimination groups given by the user as hints to
+    // the linear solver.
     vector<int> linear_solver_ordering_given;
+
+    // Size of the parameter groups used by the solver when ordering
+    // the columns of the Jacobian.  This maybe different from
+    // linear_solver_ordering_given if the user left
+    // linear_solver_ordering_given blank and asked for an automatic
+    // ordering, or if the problem contains some constant or inactive
+    // parameter blocks.
     vector<int> linear_solver_ordering_used;
 
+    // True if the user asked for inner iterations to be used as part
+    // of the optimization.
     bool inner_iterations_given;
+
+    // True if the user asked for inner iterations to be used as part
+    // of the optimization and the problem structure was such that
+    // they were actually performed. e.g., in a problem with just one
+    // parameter block, inner iterations are not performed.
     bool inner_iterations_used;
 
+    // Size of the parameter groups given by the user for performing
+    // inner iterations.
     vector<int> inner_iteration_ordering_given;
+
+    // Size of the parameter groups given used by the solver for
+    // performing inner iterations. This maybe different from
+    // inner_iteration_ordering_given if the user left
+    // inner_iteration_ordering_given blank and asked for an automatic
+    // ordering, or if the problem contains some constant or inactive
+    // parameter blocks.
     vector<int> inner_iteration_ordering_used;
 
+    //  Type of preconditioner used for solving the trust region
+    //  step. Only meaningful when an iterative linear solver is used.
     PreconditionerType preconditioner_type;
 
+    // Type of clustering algorithm used for visibility based
+    // preconditioning. Only meaningful when the preconditioner_type
+    // is CLUSTER_JACOBI or CLUSTER_TRIDIAGONAL.
+    VisibilityClusteringType visibility_clustering_type;
+
+    //  Type of trust region strategy.
     TrustRegionStrategyType trust_region_strategy_type;
+
+    //  Type of dogleg strategy used for solving the trust region
+    //  problem.
     DoglegType dogleg_type;
 
+    //  Type of the dense linear algebra library used.
     DenseLinearAlgebraLibraryType dense_linear_algebra_library_type;
+
+    // Type of the sparse linear algebra library used.
     SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type;
 
+    // Type of line search direction used.
     LineSearchDirectionType line_search_direction_type;
+
+    // Type of the line search algorithm used.
     LineSearchType line_search_type;
+
+    //  When performing line search, the degree of the polynomial used
+    //  to approximate the objective function.
     LineSearchInterpolationType line_search_interpolation_type;
+
+    // If the line search direction is NONLINEAR_CONJUGATE_GRADIENT,
+    // then this indicates the particular variant of non-linear
+    // conjugate gradient used.
     NonlinearConjugateGradientType nonlinear_conjugate_gradient_type;
 
+    // If the type of the line search direction is LBFGS, then this
+    // indicates the rank of the Hessian approximation.
     int max_lbfgs_rank;
   };
 
@@ -819,10 +963,12 @@
 };
 
 // Helper function which avoids going through the interface.
-void Solve(const Solver::Options& options,
+CERES_EXPORT void Solve(const Solver::Options& options,
            Problem* problem,
            Solver::Summary* summary);
 
 }  // namespace ceres
 
+#include "ceres/internal/reenable_warnings.h"
+
 #endif  // CERES_PUBLIC_SOLVER_H_
diff --git a/include/ceres/types.h b/include/ceres/types.h
index ffa743a..a07c893 100644
--- a/include/ceres/types.h
+++ b/include/ceres/types.h
@@ -40,12 +40,12 @@
 #include <string>
 
 #include "ceres/internal/port.h"
+#include "ceres/internal/disable_warnings.h"
 
 namespace ceres {
 
 // Basic integer types. These typedefs are in the Ceres namespace to avoid
 // conflicts with other packages having similar typedefs.
-typedef short int16;
 typedef int   int32;
 
 // Argument type used in interfaces that can optionally take ownership
@@ -102,28 +102,62 @@
   // Block diagonal of the Gauss-Newton Hessian.
   JACOBI,
 
+  // Note: The following three preconditioners can only be used with
+  // the ITERATIVE_SCHUR solver. They are well suited for Structure
+  // from Motion problems.
+
   // Block diagonal of the Schur complement. This preconditioner may
   // only be used with the ITERATIVE_SCHUR solver.
   SCHUR_JACOBI,
 
   // Visibility clustering based preconditioners.
   //
-  // These preconditioners are well suited for Structure from Motion
-  // problems, particularly problems arising from community photo
-  // collections. These preconditioners use the visibility structure
-  // of the scene to determine the sparsity structure of the
-  // preconditioner. Requires SuiteSparse/CHOLMOD.
+  // The following two preconditioners use the visibility structure of
+  // the scene to determine the sparsity structure of the
+  // preconditioner. This is done using a clustering algorithm. The
+  // available visibility clustering algorithms are described below.
+  //
+  // Note: Requires SuiteSparse.
   CLUSTER_JACOBI,
   CLUSTER_TRIDIAGONAL
 };
 
+enum VisibilityClusteringType {
+  // Canonical views algorithm as described in
+  //
+  // "Scene Summarization for Online Image Collections", Ian Simon, Noah
+  // Snavely, Steven M. Seitz, ICCV 2007.
+  //
+  // This clustering algorithm can be quite slow, but gives high
+  // quality clusters. The original visibility based clustering paper
+  // used this algorithm.
+  CANONICAL_VIEWS,
+
+  // The classic single linkage algorithm. It is extremely fast as
+  // compared to CANONICAL_VIEWS, but can give slightly poorer
+  // results. For problems with large number of cameras though, this
+  // is generally a pretty good option.
+  //
+  // If you are using SCHUR_JACOBI preconditioner and have SuiteSparse
+  // available, CLUSTER_JACOBI and CLUSTER_TRIDIAGONAL in combination
+  // with the SINGLE_LINKAGE algorithm will generally give better
+  // results.
+  SINGLE_LINKAGE
+};
+
 enum SparseLinearAlgebraLibraryType {
   // High performance sparse Cholesky factorization and approximate
   // minimum degree ordering.
   SUITE_SPARSE,
 
-  // A lightweight replacment for SuiteSparse.
-  CX_SPARSE
+  // A lightweight replacment for SuiteSparse, which does not require
+  // a LAPACK/BLAS implementation. Consequently, its performance is
+  // also a bit lower than SuiteSparse.
+  CX_SPARSE,
+
+  // Eigen's sparse linear algebra routines. In particular Ceres uses
+  // the Simplicial LDLT routines.
+  EIGEN_SPARSE
 };
 
 enum DenseLinearAlgebraLibraryType {
@@ -131,26 +165,6 @@
   LAPACK
 };
 
-enum LinearSolverTerminationType {
-  // Termination criterion was met. For factorization based solvers
-  // the tolerance is assumed to be zero. Any user provided values are
-  // ignored.
-  TOLERANCE,
-
-  // Solver ran for max_num_iterations and terminated before the
-  // termination tolerance could be satified.
-  MAX_ITERATIONS,
-
-  // Solver is stuck and further iterations will not result in any
-  // measurable progress.
-  STAGNATION,
-
-  // Solver failed. Solver was terminated due to numerical errors. The
-  // exact cause of failure depends on the particular solver being
-  // used.
-  FAILURE
-};
-
 // Logging options
 // The options get progressively noisier.
 enum LoggingType {
@@ -239,7 +253,7 @@
 // details see Numerical Optimization by Nocedal & Wright.
 enum NonlinearConjugateGradientType {
   FLETCHER_REEVES,
-  POLAK_RIBIRERE,
+  POLAK_RIBIERE,
   HESTENES_STIEFEL,
 };
 
@@ -293,41 +307,42 @@
   SUBSPACE_DOGLEG
 };
 
-enum SolverTerminationType {
-  // The minimizer did not run at all; usually due to errors in the user's
-  // Problem or the solver options.
-  DID_NOT_RUN,
+enum TerminationType {
+  // Minimizer terminated because one of the convergence criterion set
+  // by the user was satisfied.
+  //
+  // 1.  (new_cost - old_cost) < function_tolerance * old_cost;
+  // 2.  max_i |gradient_i| < gradient_tolerance
+  // 3.  |step|_2 <= parameter_tolerance * ( |x|_2 +  parameter_tolerance)
+  //
+  // The user's parameter blocks will be updated with the solution.
+  CONVERGENCE,
 
-  // The solver ran for maximum number of iterations specified by the
-  // user, but none of the convergence criterion specified by the user
-  // were met.
+  // The solver ran for maximum number of iterations or maximum amount
+  // of time specified by the user, but none of the convergence
+  // criterion specified by the user were met. The user's parameter
+  // blocks will be updated with the solution found so far.
   NO_CONVERGENCE,
 
-  // Minimizer terminated because
-  //  (new_cost - old_cost) < function_tolerance * old_cost;
-  FUNCTION_TOLERANCE,
-
-  // Minimizer terminated because
-  // max_i |gradient_i| < gradient_tolerance * max_i|initial_gradient_i|
-  GRADIENT_TOLERANCE,
-
-  // Minimized terminated because
-  //  |step|_2 <= parameter_tolerance * ( |x|_2 +  parameter_tolerance)
-  PARAMETER_TOLERANCE,
-
-  // The minimizer terminated because it encountered a numerical error
-  // that it could not recover from.
-  NUMERICAL_FAILURE,
+  // The minimizer terminated because of an error.  The user's
+  // parameter blocks will not be updated.
+  FAILURE,
 
   // Using an IterationCallback object, user code can control the
   // minimizer. The following enums indicate that the user code was
   // responsible for termination.
+  //
+  // Minimizer terminated successfully because a user
+  // IterationCallback returned SOLVER_TERMINATE_SUCCESSFULLY.
+  //
+  // The user's parameter blocks will be updated with the solution.
+  USER_SUCCESS,
 
-  // User's IterationCallback returned SOLVER_ABORT.
-  USER_ABORT,
-
-  // User's IterationCallback returned SOLVER_TERMINATE_SUCCESSFULLY
-  USER_SUCCESS
+  // Minimizer terminated because because a user IterationCallback
+  // returned SOLVER_ABORT.
+  //
+  // The user's parameter blocks will not be updated.
+  USER_FAILURE
 };
 
 // Enums used by the IterationCallback instances to indicate to the
@@ -370,9 +385,9 @@
   TEXTFILE
 };
 
-// For SizedCostFunction and AutoDiffCostFunction, DYNAMIC can be specified for
-// the number of residuals. If specified, then the number of residuas for that
-// cost function can vary at runtime.
+// For SizedCostFunction and AutoDiffCostFunction, DYNAMIC can be
+// specified for the number of residuals. If specified, then the
+// number of residuas for that cost function can vary at runtime.
 enum DimensionType {
   DYNAMIC = -1
 };
@@ -390,74 +405,83 @@
 
 enum CovarianceAlgorithmType {
   DENSE_SVD,
-  SPARSE_CHOLESKY,
-  SPARSE_QR
+  SUITE_SPARSE_QR,
+  EIGEN_SPARSE_QR
 };
 
-const char* LinearSolverTypeToString(LinearSolverType type);
-bool StringToLinearSolverType(string value, LinearSolverType* type);
+CERES_EXPORT const char* LinearSolverTypeToString(
+    LinearSolverType type);
+CERES_EXPORT bool StringToLinearSolverType(string value,
+                                           LinearSolverType* type);
 
-const char* PreconditionerTypeToString(PreconditionerType type);
-bool StringToPreconditionerType(string value, PreconditionerType* type);
+CERES_EXPORT const char* PreconditionerTypeToString(PreconditionerType type);
+CERES_EXPORT bool StringToPreconditionerType(string value,
+                                             PreconditionerType* type);
 
-const char* SparseLinearAlgebraLibraryTypeToString(
+CERES_EXPORT const char* VisibilityClusteringTypeToString(
+    VisibilityClusteringType type);
+CERES_EXPORT bool StringToVisibilityClusteringType(string value,
+                                      VisibilityClusteringType* type);
+
+CERES_EXPORT const char* SparseLinearAlgebraLibraryTypeToString(
     SparseLinearAlgebraLibraryType type);
-bool StringToSparseLinearAlgebraLibraryType(
+CERES_EXPORT bool StringToSparseLinearAlgebraLibraryType(
     string value,
     SparseLinearAlgebraLibraryType* type);
 
-const char* DenseLinearAlgebraLibraryTypeToString(
+CERES_EXPORT const char* DenseLinearAlgebraLibraryTypeToString(
     DenseLinearAlgebraLibraryType type);
-bool StringToDenseLinearAlgebraLibraryType(
+CERES_EXPORT bool StringToDenseLinearAlgebraLibraryType(
     string value,
     DenseLinearAlgebraLibraryType* type);
 
-const char* TrustRegionStrategyTypeToString(TrustRegionStrategyType type);
-bool StringToTrustRegionStrategyType(string value,
+CERES_EXPORT const char* TrustRegionStrategyTypeToString(
+    TrustRegionStrategyType type);
+CERES_EXPORT bool StringToTrustRegionStrategyType(string value,
                                      TrustRegionStrategyType* type);
 
-const char* DoglegTypeToString(DoglegType type);
-bool StringToDoglegType(string value, DoglegType* type);
+CERES_EXPORT const char* DoglegTypeToString(DoglegType type);
+CERES_EXPORT bool StringToDoglegType(string value, DoglegType* type);
 
-const char* MinimizerTypeToString(MinimizerType type);
-bool StringToMinimizerType(string value, MinimizerType* type);
+CERES_EXPORT const char* MinimizerTypeToString(MinimizerType type);
+CERES_EXPORT bool StringToMinimizerType(string value, MinimizerType* type);
 
-const char* LineSearchDirectionTypeToString(LineSearchDirectionType type);
-bool StringToLineSearchDirectionType(string value,
+CERES_EXPORT const char* LineSearchDirectionTypeToString(
+    LineSearchDirectionType type);
+CERES_EXPORT bool StringToLineSearchDirectionType(string value,
                                      LineSearchDirectionType* type);
 
-const char* LineSearchTypeToString(LineSearchType type);
-bool StringToLineSearchType(string value, LineSearchType* type);
+CERES_EXPORT const char* LineSearchTypeToString(LineSearchType type);
+CERES_EXPORT bool StringToLineSearchType(string value, LineSearchType* type);
 
-const char* NonlinearConjugateGradientTypeToString(
+CERES_EXPORT const char* NonlinearConjugateGradientTypeToString(
     NonlinearConjugateGradientType type);
-bool StringToNonlinearConjugateGradientType(
+CERES_EXPORT bool StringToNonlinearConjugateGradientType(
     string value,
     NonlinearConjugateGradientType* type);
 
-const char* LineSearchInterpolationTypeToString(
+CERES_EXPORT const char* LineSearchInterpolationTypeToString(
     LineSearchInterpolationType type);
-bool StringToLineSearchInterpolationType(
+CERES_EXPORT bool StringToLineSearchInterpolationType(
     string value,
     LineSearchInterpolationType* type);
 
-const char* CovarianceAlgorithmTypeToString(
+CERES_EXPORT const char* CovarianceAlgorithmTypeToString(
     CovarianceAlgorithmType type);
-bool StringToCovarianceAlgorithmType(
+CERES_EXPORT bool StringToCovarianceAlgorithmType(
     string value,
     CovarianceAlgorithmType* type);
 
-const char* LinearSolverTerminationTypeToString(
-    LinearSolverTerminationType type);
+CERES_EXPORT const char* TerminationTypeToString(TerminationType type);
 
-const char* SolverTerminationTypeToString(SolverTerminationType type);
-
-bool IsSchurType(LinearSolverType type);
-bool IsSparseLinearAlgebraLibraryTypeAvailable(
+CERES_EXPORT bool IsSchurType(LinearSolverType type);
+CERES_EXPORT bool IsSparseLinearAlgebraLibraryTypeAvailable(
     SparseLinearAlgebraLibraryType type);
-bool IsDenseLinearAlgebraLibraryTypeAvailable(
+CERES_EXPORT bool IsDenseLinearAlgebraLibraryTypeAvailable(
     DenseLinearAlgebraLibraryType type);
 
 }  // namespace ceres
 
+#include "ceres/internal/reenable_warnings.h"
+
 #endif  // CERES_PUBLIC_TYPES_H_
diff --git a/include/ceres/version.h b/include/ceres/version.h
new file mode 100644
index 0000000..370b08a
--- /dev/null
+++ b/include/ceres/version.h
@@ -0,0 +1,49 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+
+#ifndef CERES_PUBLIC_VERSION_H_
+#define CERES_PUBLIC_VERSION_H_
+
+#define CERES_VERSION_MAJOR 1
+#define CERES_VERSION_MINOR 10
+#define CERES_VERSION_REVISION 0
+#define CERES_VERSION_ABI 1
+
+// Classic CPP stringifcation; the extra level of indirection allows the
+// preprocessor to expand the macro before being converted to a string.
+#define CERES_TO_STRING_HELPER(x) #x
+#define CERES_TO_STRING(x) CERES_TO_STRING_HELPER(x)
+
+// The Ceres version as a string; for example "1.9.0".
+#define CERES_VERSION_STRING CERES_TO_STRING(CERES_VERSION_MAJOR) "." \
+                             CERES_TO_STRING(CERES_VERSION_MINOR) "." \
+                             CERES_TO_STRING(CERES_VERSION_REVISION)
+
+#endif  // CERES_PUBLIC_VERSION_H_
diff --git a/internal/ceres/CMakeLists.txt b/internal/ceres/CMakeLists.txt
index 610e816..4d4f873 100644
--- a/internal/ceres/CMakeLists.txt
+++ b/internal/ceres/CMakeLists.txt
@@ -34,8 +34,8 @@
     block_evaluate_preparer.cc
     block_jacobi_preconditioner.cc
     block_jacobian_writer.cc
-    block_random_access_crs_matrix.cc
     block_random_access_dense_matrix.cc
+    block_random_access_diagonal_matrix.cc
     block_random_access_matrix.cc
     block_random_access_sparse_matrix.cc
     block_sparse_matrix.cc
@@ -43,6 +43,7 @@
     c_api.cc
     canonical_views_clustering.cc
     cgnr_solver.cc
+    callbacks.cc
     compressed_col_sparse_matrix_utils.cc
     compressed_row_jacobian_writer.cc
     compressed_row_sparse_matrix.cc
@@ -58,6 +59,8 @@
     dense_sparse_matrix.cc
     detect_structure.cc
     dogleg_strategy.cc
+    dynamic_compressed_row_jacobian_writer.cc
+    dynamic_compressed_row_sparse_matrix.cc
     evaluator.cc
     file.cc
     gradient_checking_cost_function.cc
@@ -84,13 +87,14 @@
     problem.cc
     problem_impl.cc
     program.cc
+    reorder_program.cc
     residual_block.cc
     residual_block_utils.cc
-    runtime_numeric_diff_cost_function.cc
     schur_complement_solver.cc
     schur_eliminator.cc
     schur_jacobi_preconditioner.cc
     scratch_evaluate_preparer.cc
+    single_linkage_clustering.cc
     solver.cc
     solver_impl.cc
     sparse_matrix.cc
@@ -98,6 +102,7 @@
     split.cc
     stringprintf.cc
     suitesparse.cc
+    summary_utils.cc
     triplet_sparse_matrix.cc
     trust_region_minimizer.cc
     trust_region_strategy.cc
@@ -128,62 +133,31 @@
   FILE(GLOB CERES_INTERNAL_SCHUR_FILES generated/*.cc)
 ELSE (SCHUR_SPECIALIZATIONS)
   # Only the fully dynamic solver. The build is much faster this way.
-  FILE(GLOB CERES_INTERNAL_SCHUR_FILES generated/schur_eliminator_d_d_d.cc)
+  FILE(GLOB CERES_INTERNAL_SCHUR_FILES generated/*_d_d_d.cc)
 ENDIF (SCHUR_SPECIALIZATIONS)
 
-# For Android, use the internal Glog implementation.
-IF (MINIGLOG)
-  ADD_LIBRARY(miniglog STATIC miniglog/glog/logging.cc)
-  INSTALL(TARGETS miniglog
-          EXPORT  CeresExport
-          RUNTIME DESTINATION bin
-          LIBRARY DESTINATION lib${LIB_SUFFIX}
-          ARCHIVE DESTINATION lib${LIB_SUFFIX})
-ENDIF (MINIGLOG)
+# Build the list of dependencies for Ceres based on the current configuration.
+IF (NOT MINIGLOG AND GLOG_FOUND)
+  LIST(APPEND CERES_LIBRARY_PUBLIC_DEPENDENCIES ${GLOG_LIBRARIES})
+ENDIF (NOT MINIGLOG AND GLOG_FOUND)
 
-SET(CERES_LIBRARY_DEPENDENCIES ${GLOG_LIB})
+IF (SUITESPARSE AND SUITESPARSE_FOUND)
+  LIST(APPEND CERES_LIBRARY_PRIVATE_DEPENDENCIES ${SUITESPARSE_LIBRARIES})
+ENDIF (SUITESPARSE AND SUITESPARSE_FOUND)
 
-IF (GFLAGS)
-  LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${GFLAGS_LIB})
-ENDIF (GFLAGS)
+IF (CXSPARSE AND CXSPARSE_FOUND)
+  LIST(APPEND CERES_LIBRARY_PRIVATE_DEPENDENCIES ${CXSPARSE_LIBRARIES})
+ENDIF (CXSPARSE AND CXSPARSE_FOUND)
 
-IF (SUITESPARSE_FOUND)
-  LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${SUITESPARSEQR_LIB})
-  LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${CHOLMOD_LIB})
-  LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${CCOLAMD_LIB})
-  LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${CAMD_LIB})
-  LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${COLAMD_LIB})
-  LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${AMD_LIB})
-  IF (EXISTS ${SUITESPARSE_CONFIG_LIB})
-    LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${SUITESPARSE_CONFIG_LIB})
-  ENDIF (EXISTS ${SUITESPARSE_CONFIG_LIB})
-
-  IF (EXISTS ${METIS_LIB})
-    LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${METIS_LIB})
-  ENDIF (EXISTS ${METIS_LIB})
-
-  IF (TBB_FOUND)
-    LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${TBB_LIB})
-    LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${TBB_MALLOC_LIB})
-  ENDIF (TBB_FOUND)
-ENDIF (SUITESPARSE_FOUND)
-
-IF (CXSPARSE_FOUND)
-  LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${CXSPARSE_LIB})
-ENDIF (CXSPARSE_FOUND)
-
-IF (BLAS_AND_LAPACK_FOUND)
-  LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${LAPACK_LIBRARIES})
-  LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${BLAS_LIBRARIES})
-ENDIF (BLAS_AND_LAPACK_FOUND)
-
-IF (CXSPARSE_FOUND)
-  LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${CXSPARSE_LIB})
-ENDIF (CXSPARSE_FOUND)
+IF (BLAS_FOUND AND LAPACK_FOUND)
+  LIST(APPEND CERES_LIBRARY_PRIVATE_DEPENDENCIES ${LAPACK_LIBRARIES})
+  LIST(APPEND CERES_LIBRARY_PRIVATE_DEPENDENCIES ${BLAS_LIBRARIES})
+ENDIF (BLAS_FOUND AND LAPACK_FOUND)
 
 IF (OPENMP_FOUND)
   IF (NOT MSVC)
-    LIST(APPEND CERES_LIBRARY_DEPENDENCIES gomp)
+    LIST(APPEND CERES_LIBRARY_PRIVATE_DEPENDENCIES gomp)
+    LIST(APPEND CERES_LIBRARY_PRIVATE_DEPENDENCIES ${CMAKE_THREAD_LIBS_INIT})
   ENDIF (NOT MSVC)
 ENDIF (OPENMP_FOUND)
 
@@ -192,12 +166,33 @@
     ${CERES_INTERNAL_HDRS}
     ${CERES_INTERNAL_SCHUR_FILES})
 
+# Primarily for Android, but optionally for others, compile the minimal
+# glog implementation into Ceres.
+IF (MINIGLOG)
+  LIST(APPEND CERES_LIBRARY_SOURCE miniglog/glog/logging.cc)
+ENDIF (MINIGLOG)
+
 ADD_LIBRARY(ceres ${CERES_LIBRARY_SOURCE})
 SET_TARGET_PROPERTIES(ceres PROPERTIES
   VERSION ${CERES_VERSION}
   SOVERSION ${CERES_VERSION_MAJOR}
 )
-TARGET_LINK_LIBRARIES(ceres ${CERES_LIBRARY_DEPENDENCIES})
+
+IF (BUILD_SHARED_LIBS)
+  # When building a shared library, mark all external libraries as
+  # PRIVATE so they don't show up as a dependency.
+  TARGET_LINK_LIBRARIES(ceres
+        LINK_PUBLIC ${CERES_LIBRARY_PUBLIC_DEPENDENCIES}
+        LINK_PRIVATE ${CERES_LIBRARY_PRIVATE_DEPENDENCIES})
+ELSE (BUILD_SHARED_LIBS)
+  # When building a static library, all external libraries are
+  # PUBLIC(default) since the user needs to link to them.
+  # They will be listed in CeresTargets.cmake.
+  SET(CERES_LIBRARY_DEPENDENCIES
+        ${CERES_LIBRARY_PUBLIC_DEPENDENCIES}
+        ${CERES_LIBRARY_PRIVATE_DEPENDENCIES})
+  TARGET_LINK_LIBRARIES(ceres ${CERES_LIBRARY_DEPENDENCIES})
+ENDIF (BUILD_SHARED_LIBS)
 
 INSTALL(TARGETS ceres
         EXPORT  CeresExport
@@ -212,8 +207,15 @@
               numeric_diff_test_utils.cc
               test_util.cc)
 
-  TARGET_LINK_LIBRARIES(gtest ${GFLAGS_LIB} ${GLOG_LIB})
-  TARGET_LINK_LIBRARIES(test_util ceres gtest ${GLOG_LIB})
+  IF (MINIGLOG)
+    # When using miniglog, it is compiled into Ceres, thus Ceres becomes
+    # the library against which other libraries should link for logging.
+    TARGET_LINK_LIBRARIES(gtest ${GFLAGS_LIBRARIES} ceres)
+    TARGET_LINK_LIBRARIES(test_util ceres gtest)
+  ELSE (MINIGLOG)
+    TARGET_LINK_LIBRARIES(gtest ${GFLAGS_LIBRARIES} ${GLOG_LIBRARIES})
+    TARGET_LINK_LIBRARIES(test_util ceres gtest ${GLOG_LIBRARIES})
+  ENDIF (MINIGLOG)
 
   MACRO (CERES_TEST NAME)
     ADD_EXECUTABLE(${NAME}_test ${NAME}_test.cc)
@@ -228,8 +230,8 @@
   CERES_TEST(autodiff)
   CERES_TEST(autodiff_cost_function)
   CERES_TEST(autodiff_local_parameterization)
-  CERES_TEST(block_random_access_crs_matrix)
   CERES_TEST(block_random_access_dense_matrix)
+  CERES_TEST(block_random_access_diagonal_matrix)
   CERES_TEST(block_random_access_sparse_matrix)
   CERES_TEST(block_sparse_matrix)
   CERES_TEST(c_api)
@@ -241,6 +243,8 @@
   CERES_TEST(covariance)
   CERES_TEST(dense_sparse_matrix)
   CERES_TEST(dynamic_autodiff_cost_function)
+  CERES_TEST(dynamic_compressed_row_sparse_matrix)
+  CERES_TEST(dynamic_numeric_diff_cost_function)
   CERES_TEST(evaluator)
   CERES_TEST(gradient_checker)
   CERES_TEST(gradient_checking_cost_function)
@@ -264,20 +268,23 @@
   CERES_TEST(partitioned_matrix_view)
   CERES_TEST(polynomial)
   CERES_TEST(problem)
+  CERES_TEST(program)
+  CERES_TEST(reorder_program)
   CERES_TEST(residual_block)
   CERES_TEST(residual_block_utils)
   CERES_TEST(rotation)
-  CERES_TEST(runtime_numeric_diff_cost_function)
   CERES_TEST(schur_complement_solver)
   CERES_TEST(schur_eliminator)
+  CERES_TEST(single_linkage_clustering)
   CERES_TEST(small_blas)
+  CERES_TEST(solver)
   CERES_TEST(solver_impl)
 
   # TODO(sameeragarwal): This test should ultimately be made
   # independent of SuiteSparse.
-  IF (SUITESPARSE_FOUND)
+  IF (SUITESPARSE AND SUITESPARSE_FOUND)
     CERES_TEST(compressed_col_sparse_matrix_utils)
-  ENDIF (SUITESPARSE_FOUND)
+  ENDIF (SUITESPARSE AND SUITESPARSE_FOUND)
 
   CERES_TEST(symmetric_linear_solver)
   CERES_TEST(triplet_sparse_matrix)
diff --git a/internal/ceres/array_utils.cc b/internal/ceres/array_utils.cc
index 673baa4..205ddaf 100644
--- a/internal/ceres/array_utils.cc
+++ b/internal/ceres/array_utils.cc
@@ -30,9 +30,13 @@
 
 #include "ceres/array_utils.h"
 
+#include <algorithm>
 #include <cmath>
 #include <cstddef>
+#include <string>
+#include <vector>
 #include "ceres/fpclassify.h"
+#include "ceres/stringprintf.h"
 
 namespace ceres {
 namespace internal {
@@ -55,6 +59,20 @@
   return true;
 }
 
+int FindInvalidValue(const int size, const double* x) {
+  if (x == NULL) {
+    return size;
+  }
+
+  for (int i = 0; i < size; ++i) {
+    if (!IsFinite(x[i]) || (x[i] == kImpossibleValue))  {
+      return i;
+    }
+  }
+
+  return size;
+};
+
 void InvalidateArray(const int size, double* x) {
   if (x != NULL) {
     for (int i = 0; i < size; ++i) {
@@ -63,5 +81,33 @@
   }
 }
 
+void AppendArrayToString(const int size, const double* x, string* result) {
+  for (int i = 0; i < size; ++i) {
+    if (x == NULL) {
+      StringAppendF(result, "Not Computed  ");
+    } else {
+      if (x[i] == kImpossibleValue) {
+        StringAppendF(result, "Uninitialized ");
+      } else {
+        StringAppendF(result, "%12g ", x[i]);
+      }
+    }
+  }
+}
+
+void MapValuesToContiguousRange(const int size, int* array) {
+  std::vector<int> unique_values(array, array + size);
+  std::sort(unique_values.begin(), unique_values.end());
+  unique_values.erase(std::unique(unique_values.begin(),
+                                  unique_values.end()),
+                      unique_values.end());
+
+  for (int i = 0; i < size; ++i) {
+    array[i] = std::lower_bound(unique_values.begin(),
+                                unique_values.end(),
+                                array[i]) - unique_values.begin();
+  }
+}
+
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/array_utils.h b/internal/ceres/array_utils.h
index 742f439..7f56947 100644
--- a/internal/ceres/array_utils.h
+++ b/internal/ceres/array_utils.h
@@ -57,8 +57,31 @@
 // equal to the "impossible" value used by InvalidateArray.
 bool IsArrayValid(int size, const double* x);
 
+// If the array contains an invalid value, return the index for it,
+// otherwise return size.
+int FindInvalidValue(const int size, const double* x);
+
+// Utility routine to print an array of doubles to a string. If the
+// array pointer is NULL, it is treated as an array of zeros.
+void AppendArrayToString(const int size, const double* x, string* result);
+
 extern const double kImpossibleValue;
 
+// This routine takes an array of integer values, sorts and uniques
+// them and then maps each value in the array to its position in the
+// sorted+uniqued array. By doing this, if there are are k unique
+// values in the array, each value is replaced by an integer in the
+// range [0, k-1], while preserving their relative order.
+//
+// For example
+//
+// [1 0 3 5 0 1 5]
+//
+// gets mapped to
+//
+// [1 0 2 3 0 1 3]
+void MapValuesToContiguousRange(int size, int* array);
+
 }  // namespace internal
 }  // namespace ceres
 
diff --git a/internal/ceres/array_utils_test.cc b/internal/ceres/array_utils_test.cc
index c19a44a..203a301 100644
--- a/internal/ceres/array_utils_test.cc
+++ b/internal/ceres/array_utils_test.cc
@@ -32,6 +32,7 @@
 
 #include <limits>
 #include <cmath>
+#include <vector>
 #include "gtest/gtest.h"
 
 namespace ceres {
@@ -54,5 +55,68 @@
   EXPECT_FALSE(IsArrayValid(3, x));
 }
 
+TEST(ArrayUtils, FindInvalidIndex) {
+  double x[3];
+  x[0] = 0.0;
+  x[1] = 1.0;
+  x[2] = 2.0;
+  EXPECT_EQ(FindInvalidValue(3, x), 3);
+  x[1] = std::numeric_limits<double>::infinity();
+  EXPECT_EQ(FindInvalidValue(3, x), 1);
+  x[1] = std::numeric_limits<double>::quiet_NaN();
+  EXPECT_EQ(FindInvalidValue(3, x), 1);
+  x[1] = std::numeric_limits<double>::signaling_NaN();
+  EXPECT_EQ(FindInvalidValue(3, x), 1);
+  EXPECT_EQ(FindInvalidValue(1, NULL), 1);
+  InvalidateArray(3, x);
+  EXPECT_EQ(FindInvalidValue(3, x), 0);
+}
+
+TEST(MapValuesToContiguousRange, ContiguousEntries) {
+  vector<int> array;
+  array.push_back(0);
+  array.push_back(1);
+  vector<int> expected = array;
+  MapValuesToContiguousRange(array.size(), &array[0]);
+  EXPECT_EQ(array, expected);
+  array.clear();
+
+  array.push_back(1);
+  array.push_back(0);
+  expected = array;
+  MapValuesToContiguousRange(array.size(), &array[0]);
+  EXPECT_EQ(array, expected);
+}
+
+TEST(MapValuesToContiguousRange, NonContiguousEntries) {
+  vector<int> array;
+  array.push_back(0);
+  array.push_back(2);
+  vector<int> expected;
+  expected.push_back(0);
+  expected.push_back(1);
+  MapValuesToContiguousRange(array.size(), &array[0]);
+  EXPECT_EQ(array, expected);
+}
+
+TEST(MapValuesToContiguousRange, NonContiguousRepeatingEntries) {
+  vector<int> array;
+  array.push_back(3);
+  array.push_back(1);
+  array.push_back(0);
+  array.push_back(0);
+  array.push_back(0);
+  array.push_back(5);
+  vector<int> expected;
+  expected.push_back(2);
+  expected.push_back(1);
+  expected.push_back(0);
+  expected.push_back(0);
+  expected.push_back(0);
+  expected.push_back(3);
+  MapValuesToContiguousRange(array.size(), &array[0]);
+  EXPECT_EQ(array, expected);
+}
+
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/autodiff_local_parameterization_test.cc b/internal/ceres/autodiff_local_parameterization_test.cc
index 7e90177..a0f705d 100644
--- a/internal/ceres/autodiff_local_parameterization_test.cc
+++ b/internal/ceres/autodiff_local_parameterization_test.cc
@@ -48,7 +48,6 @@
   }
 };
 
-
 TEST(AutoDiffLocalParameterizationTest, IdentityParameterization) {
   AutoDiffLocalParameterization<IdentityPlus, 3, 3>
       parameterization;
@@ -72,6 +71,47 @@
   }
 }
 
+struct ScaledPlus {
+  ScaledPlus(const double &scale_factor)
+     : scale_factor_(scale_factor)
+  {}
+
+  template <typename T>
+  bool operator()(const T* x, const T* delta, T* x_plus_delta) const {
+    for (int i = 0; i < 3; ++i) {
+      x_plus_delta[i] = x[i] + T(scale_factor_) * delta[i];
+    }
+    return true;
+  }
+
+  const double scale_factor_;
+};
+
+TEST(AutoDiffLocalParameterizationTest, ScaledParameterization) {
+  const double kTolerance = 1e-14;
+
+  AutoDiffLocalParameterization<ScaledPlus, 3, 3>
+      parameterization(new ScaledPlus(1.2345));
+
+  double x[3] = {1.0, 2.0, 3.0};
+  double delta[3] = {0.0, 1.0, 2.0};
+  double x_plus_delta[3] = {0.0, 0.0, 0.0};
+  parameterization.Plus(x, delta, x_plus_delta);
+
+  EXPECT_NEAR(x_plus_delta[0], 1.0, kTolerance);
+  EXPECT_NEAR(x_plus_delta[1], 3.2345, kTolerance);
+  EXPECT_NEAR(x_plus_delta[2], 5.469, kTolerance);
+
+  double jacobian[9];
+  parameterization.ComputeJacobian(x, jacobian);
+  int k = 0;
+  for (int i = 0; i < 3; ++i) {
+    for (int j = 0; j < 3; ++j, ++k) {
+      EXPECT_NEAR(jacobian[k], (i == j) ? 1.2345 : 0.0, kTolerance);
+    }
+  }
+}
+
 struct QuaternionPlus {
   template<typename T>
   bool operator()(const T* x, const T* delta, T* x_plus_delta) const {
diff --git a/internal/ceres/blas.cc b/internal/ceres/blas.cc
index f79b1eb..b919e13 100644
--- a/internal/ceres/blas.cc
+++ b/internal/ceres/blas.cc
@@ -29,6 +29,7 @@
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
 #include "ceres/blas.h"
+#include "ceres/internal/port.h"
 #include "glog/logging.h"
 
 extern "C" void dsyrk_(char* uplo,
diff --git a/internal/ceres/blas_test.cc b/internal/ceres/blas_test.cc
deleted file mode 100644
index efa7e7b..0000000
--- a/internal/ceres/blas_test.cc
+++ /dev/null
@@ -1,303 +0,0 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2013 Google Inc. All rights reserved.
-// http://code.google.com/p/ceres-solver/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-//   this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-//   this list of conditions and the following disclaimer in the documentation
-//   and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-//   used to endorse or promote products derived from this software without
-//   specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: keir@google.com (Keir Mierle)
-
-#include "ceres/blas.h"
-
-#include "gtest/gtest.h"
-#include "ceres/internal/eigen.h"
-
-namespace ceres {
-namespace internal {
-
-TEST(BLAS, MatrixMatrixMultiply) {
-  const double kTolerance = 1e-16;
-  const int kRowA = 3;
-  const int kColA = 5;
-  Matrix A(kRowA, kColA);
-  A.setOnes();
-
-  const int kRowB = 5;
-  const int kColB = 7;
-  Matrix B(kRowB, kColB);
-  B.setOnes();
-
-  for (int row_stride_c = kRowA; row_stride_c < 3 * kRowA; ++row_stride_c) {
-    for (int col_stride_c = kColB; col_stride_c < 3 * kColB; ++col_stride_c) {
-      Matrix C(row_stride_c, col_stride_c);
-      C.setOnes();
-
-      Matrix C_plus = C;
-      Matrix C_minus = C;
-      Matrix C_assign = C;
-
-      Matrix C_plus_ref = C;
-      Matrix C_minus_ref = C;
-      Matrix C_assign_ref = C;
-      for (int start_row_c = 0; start_row_c + kRowA < row_stride_c; ++start_row_c) {
-        for (int start_col_c = 0; start_col_c + kColB < col_stride_c; ++start_col_c) {
-          C_plus_ref.block(start_row_c, start_col_c, kRowA, kColB) +=
-              A * B;
-
-          MatrixMatrixMultiply<kRowA, kColA, kRowB, kColB, 1>(
-              A.data(), kRowA, kColA,
-              B.data(), kRowB, kColB,
-              C_plus.data(), start_row_c, start_col_c, row_stride_c, col_stride_c);
-
-          EXPECT_NEAR((C_plus_ref - C_plus).norm(), 0.0, kTolerance)
-              << "C += A * B \n"
-              << "row_stride_c : " << row_stride_c << "\n"
-              << "col_stride_c : " << col_stride_c << "\n"
-              << "start_row_c  : " << start_row_c << "\n"
-              << "start_col_c  : " << start_col_c << "\n"
-              << "Cref : \n" << C_plus_ref << "\n"
-              << "C: \n" << C_plus;
-
-
-          C_minus_ref.block(start_row_c, start_col_c, kRowA, kColB) -=
-              A * B;
-
-          MatrixMatrixMultiply<kRowA, kColA, kRowB, kColB, -1>(
-              A.data(), kRowA, kColA,
-              B.data(), kRowB, kColB,
-              C_minus.data(), start_row_c, start_col_c, row_stride_c, col_stride_c);
-
-           EXPECT_NEAR((C_minus_ref - C_minus).norm(), 0.0, kTolerance)
-              << "C -= A * B \n"
-              << "row_stride_c : " << row_stride_c << "\n"
-              << "col_stride_c : " << col_stride_c << "\n"
-              << "start_row_c  : " << start_row_c << "\n"
-              << "start_col_c  : " << start_col_c << "\n"
-              << "Cref : \n" << C_minus_ref << "\n"
-              << "C: \n" << C_minus;
-
-          C_assign_ref.block(start_row_c, start_col_c, kRowA, kColB) =
-              A * B;
-
-          MatrixMatrixMultiply<kRowA, kColA, kRowB, kColB, 0>(
-              A.data(), kRowA, kColA,
-              B.data(), kRowB, kColB,
-              C_assign.data(), start_row_c, start_col_c, row_stride_c, col_stride_c);
-
-          EXPECT_NEAR((C_assign_ref - C_assign).norm(), 0.0, kTolerance)
-              << "C = A * B \n"
-              << "row_stride_c : " << row_stride_c << "\n"
-              << "col_stride_c : " << col_stride_c << "\n"
-              << "start_row_c  : " << start_row_c << "\n"
-              << "start_col_c  : " << start_col_c << "\n"
-              << "Cref : \n" << C_assign_ref << "\n"
-              << "C: \n" << C_assign;
-        }
-      }
-    }
-  }
-}
-
-TEST(BLAS, MatrixTransposeMatrixMultiply) {
-  const double kTolerance = 1e-16;
-  const int kRowA = 5;
-  const int kColA = 3;
-  Matrix A(kRowA, kColA);
-  A.setOnes();
-
-  const int kRowB = 5;
-  const int kColB = 7;
-  Matrix B(kRowB, kColB);
-  B.setOnes();
-
-  for (int row_stride_c = kColA; row_stride_c < 3 * kColA; ++row_stride_c) {
-    for (int col_stride_c = kColB; col_stride_c <  3 * kColB; ++col_stride_c) {
-      Matrix C(row_stride_c, col_stride_c);
-      C.setOnes();
-
-      Matrix C_plus = C;
-      Matrix C_minus = C;
-      Matrix C_assign = C;
-
-      Matrix C_plus_ref = C;
-      Matrix C_minus_ref = C;
-      Matrix C_assign_ref = C;
-      for (int start_row_c = 0; start_row_c + kColA < row_stride_c; ++start_row_c) {
-        for (int start_col_c = 0; start_col_c + kColB < col_stride_c; ++start_col_c) {
-          C_plus_ref.block(start_row_c, start_col_c, kColA, kColB) +=
-              A.transpose() * B;
-
-          MatrixTransposeMatrixMultiply<kRowA, kColA, kRowB, kColB, 1>(
-              A.data(), kRowA, kColA,
-              B.data(), kRowB, kColB,
-              C_plus.data(), start_row_c, start_col_c, row_stride_c, col_stride_c);
-
-          EXPECT_NEAR((C_plus_ref - C_plus).norm(), 0.0, kTolerance)
-              << "C += A' * B \n"
-              << "row_stride_c : " << row_stride_c << "\n"
-              << "col_stride_c : " << col_stride_c << "\n"
-              << "start_row_c  : " << start_row_c << "\n"
-              << "start_col_c  : " << start_col_c << "\n"
-              << "Cref : \n" << C_plus_ref << "\n"
-              << "C: \n" << C_plus;
-
-          C_minus_ref.block(start_row_c, start_col_c, kColA, kColB) -=
-              A.transpose() * B;
-
-          MatrixTransposeMatrixMultiply<kRowA, kColA, kRowB, kColB, -1>(
-              A.data(), kRowA, kColA,
-              B.data(), kRowB, kColB,
-              C_minus.data(), start_row_c, start_col_c, row_stride_c, col_stride_c);
-
-          EXPECT_NEAR((C_minus_ref - C_minus).norm(), 0.0, kTolerance)
-              << "C -= A' * B \n"
-              << "row_stride_c : " << row_stride_c << "\n"
-              << "col_stride_c : " << col_stride_c << "\n"
-              << "start_row_c  : " << start_row_c << "\n"
-              << "start_col_c  : " << start_col_c << "\n"
-              << "Cref : \n" << C_minus_ref << "\n"
-              << "C: \n" << C_minus;
-
-          C_assign_ref.block(start_row_c, start_col_c, kColA, kColB) =
-              A.transpose() * B;
-
-          MatrixTransposeMatrixMultiply<kRowA, kColA, kRowB, kColB, 0>(
-              A.data(), kRowA, kColA,
-              B.data(), kRowB, kColB,
-              C_assign.data(), start_row_c, start_col_c, row_stride_c, col_stride_c);
-
-          EXPECT_NEAR((C_assign_ref - C_assign).norm(), 0.0, kTolerance)
-              << "C = A' * B \n"
-              << "row_stride_c : " << row_stride_c << "\n"
-              << "col_stride_c : " << col_stride_c << "\n"
-              << "start_row_c  : " << start_row_c << "\n"
-              << "start_col_c  : " << start_col_c << "\n"
-              << "Cref : \n" << C_assign_ref << "\n"
-              << "C: \n" << C_assign;
-        }
-      }
-    }
-  }
-}
-
-TEST(BLAS, MatrixVectorMultiply) {
-  const double kTolerance = 1e-16;
-  const int kRowA = 5;
-  const int kColA = 3;
-  Matrix A(kRowA, kColA);
-  A.setOnes();
-
-  Vector b(kColA);
-  b.setOnes();
-
-  Vector c(kRowA);
-  c.setOnes();
-
-  Vector c_plus = c;
-  Vector c_minus = c;
-  Vector c_assign = c;
-
-  Vector c_plus_ref = c;
-  Vector c_minus_ref = c;
-  Vector c_assign_ref = c;
-
-  c_plus_ref += A * b;
-  MatrixVectorMultiply<kRowA, kColA, 1>(A.data(), kRowA, kColA,
-                                        b.data(),
-                                        c_plus.data());
-  EXPECT_NEAR((c_plus_ref - c_plus).norm(), 0.0, kTolerance)
-      << "c += A * b \n"
-      << "c_ref : \n" << c_plus_ref << "\n"
-      << "c: \n" << c_plus;
-
-  c_minus_ref -= A * b;
-  MatrixVectorMultiply<kRowA, kColA, -1>(A.data(), kRowA, kColA,
-                                                 b.data(),
-                                                 c_minus.data());
-  EXPECT_NEAR((c_minus_ref - c_minus).norm(), 0.0, kTolerance)
-      << "c += A * b \n"
-      << "c_ref : \n" << c_minus_ref << "\n"
-      << "c: \n" << c_minus;
-
-  c_assign_ref = A * b;
-  MatrixVectorMultiply<kRowA, kColA, 0>(A.data(), kRowA, kColA,
-                                                  b.data(),
-                                                  c_assign.data());
-  EXPECT_NEAR((c_assign_ref - c_assign).norm(), 0.0, kTolerance)
-      << "c += A * b \n"
-      << "c_ref : \n" << c_assign_ref << "\n"
-      << "c: \n" << c_assign;
-}
-
-TEST(BLAS, MatrixTransposeVectorMultiply) {
-  const double kTolerance = 1e-16;
-  const int kRowA = 5;
-  const int kColA = 3;
-  Matrix A(kRowA, kColA);
-  A.setRandom();
-
-  Vector b(kRowA);
-  b.setRandom();
-
-  Vector c(kColA);
-  c.setOnes();
-
-  Vector c_plus = c;
-  Vector c_minus = c;
-  Vector c_assign = c;
-
-  Vector c_plus_ref = c;
-  Vector c_minus_ref = c;
-  Vector c_assign_ref = c;
-
-  c_plus_ref += A.transpose() * b;
-  MatrixTransposeVectorMultiply<kRowA, kColA, 1>(A.data(), kRowA, kColA,
-                                                 b.data(),
-                                                 c_plus.data());
-  EXPECT_NEAR((c_plus_ref - c_plus).norm(), 0.0, kTolerance)
-      << "c += A' * b \n"
-      << "c_ref : \n" << c_plus_ref << "\n"
-      << "c: \n" << c_plus;
-
-  c_minus_ref -= A.transpose() * b;
-  MatrixTransposeVectorMultiply<kRowA, kColA, -1>(A.data(), kRowA, kColA,
-                                                 b.data(),
-                                                 c_minus.data());
-  EXPECT_NEAR((c_minus_ref - c_minus).norm(), 0.0, kTolerance)
-      << "c += A' * b \n"
-      << "c_ref : \n" << c_minus_ref << "\n"
-      << "c: \n" << c_minus;
-
-  c_assign_ref = A.transpose() * b;
-  MatrixTransposeVectorMultiply<kRowA, kColA, 0>(A.data(), kRowA, kColA,
-                                                  b.data(),
-                                                  c_assign.data());
-  EXPECT_NEAR((c_assign_ref - c_assign).norm(), 0.0, kTolerance)
-      << "c += A' * b \n"
-      << "c_ref : \n" << c_assign_ref << "\n"
-      << "c: \n" << c_assign;
-}
-
-}  // namespace internal
-}  // namespace ceres
diff --git a/internal/ceres/block_jacobi_preconditioner.cc b/internal/ceres/block_jacobi_preconditioner.cc
index 29974d4..19b749b 100644
--- a/internal/ceres/block_jacobi_preconditioner.cc
+++ b/internal/ceres/block_jacobi_preconditioner.cc
@@ -94,7 +94,9 @@
       //
       //   MatrixRef(blocks_[cells[c].block_id],
       //             col_block_size,
-      //             col_block_size).selfadjointView<Eigen::Upper>().rankUpdate(m);
+      //             col_block_size)
+      //      .selfadjointView<Eigen::Upper>()
+      //      .rankUpdate(m);
       //
     }
   }
diff --git a/internal/ceres/block_random_access_crs_matrix.cc b/internal/ceres/block_random_access_crs_matrix.cc
deleted file mode 100644
index 5b008e2..0000000
--- a/internal/ceres/block_random_access_crs_matrix.cc
+++ /dev/null
@@ -1,170 +0,0 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2013 Google Inc. All rights reserved.
-// http://code.google.com/p/ceres-solver/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-//   this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-//   this list of conditions and the following disclaimer in the documentation
-//   and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-//   used to endorse or promote products derived from this software without
-//   specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: sameeragarwal@google.com (Sameer Agarwal)
-
-#include "ceres/block_random_access_crs_matrix.h"
-
-#include <algorithm>
-#include <set>
-#include <utility>
-#include <vector>
-#include "ceres/compressed_row_sparse_matrix.h"
-#include "ceres/internal/port.h"
-#include "ceres/internal/scoped_ptr.h"
-#include "ceres/mutex.h"
-#include "ceres/triplet_sparse_matrix.h"
-#include "ceres/types.h"
-#include "glog/logging.h"
-
-namespace ceres {
-namespace internal {
-
-BlockRandomAccessCRSMatrix::BlockRandomAccessCRSMatrix(
-    const vector<int>& blocks,
-    const set<pair<int, int> >& block_pairs)
-    : kMaxRowBlocks(10 * 1000 * 1000),
-      blocks_(blocks) {
-  CHECK_LT(blocks.size(), kMaxRowBlocks);
-
-  col_layout_.resize(blocks_.size(), 0);
-  row_strides_.resize(blocks_.size(), 0);
-
-  // Build the row/column layout vector and count the number of scalar
-  // rows/columns.
-  int num_cols = 0;
-  for (int i = 0; i < blocks_.size(); ++i) {
-    col_layout_[i] = num_cols;
-    num_cols += blocks_[i];
-  }
-
-  // Walk the sparsity pattern and count the number of non-zeros.
-  int num_nonzeros = 0;
-  for (set<pair<int, int> >::const_iterator it = block_pairs.begin();
-       it != block_pairs.end();
-       ++it) {
-    const int row_block_size = blocks_[it->first];
-    const int col_block_size = blocks_[it->second];
-    num_nonzeros += row_block_size * col_block_size;
-  }
-
-  VLOG(2) << "Matrix Size [" << num_cols
-          << "," << num_cols
-          << "] " << num_nonzeros;
-
-  crsm_.reset(new CompressedRowSparseMatrix(num_cols, num_cols, num_nonzeros));
-  int* rows = crsm_->mutable_rows();
-  int* cols = crsm_->mutable_cols();
-  double* values = crsm_->mutable_values();
-
-  // Iterate over the sparsity pattern and fill the scalar sparsity
-  // pattern of the underlying compressed sparse row matrix. Along the
-  // way also fill out the Layout object which will allow random
-  // access into the CRS Matrix.
-  set<pair<int, int> >::const_iterator it = block_pairs.begin();
-  vector<int> col_blocks;
-  int row_pos = 0;
-  rows[0] = 0;
-  while (it != block_pairs.end()) {
-    // Add entries to layout_ for all the blocks for this row.
-    col_blocks.clear();
-    const int row_block_id = it->first;
-    const int row_block_size = blocks_[row_block_id];
-    int num_cols = 0;
-    while ((it != block_pairs.end()) && (it->first == row_block_id)) {
-      layout_[IntPairToLong(it->first, it->second)] =
-          new CellInfo(values + num_cols);
-      col_blocks.push_back(it->second);
-      num_cols += blocks_[it->second];
-      ++it;
-    };
-
-    // Count the number of non-zeros in the row block.
-    for (int j = 0; j < row_block_size; ++j) {
-      rows[row_pos + j + 1] = rows[row_pos + j] + num_cols;
-    }
-
-    // Fill out the sparsity pattern for each row.
-    int col_pos = 0;
-    for (int j = 0; j < col_blocks.size(); ++j) {
-      const int col_block_id = col_blocks[j];
-      const int col_block_size = blocks_[col_block_id];
-      for (int r = 0; r < row_block_size; ++r) {
-        const int column_block_begin = rows[row_pos + r] + col_pos;
-        for (int c = 0; c < col_block_size; ++c) {
-          cols[column_block_begin + c] = col_layout_[col_block_id] + c;
-        }
-      }
-      col_pos += col_block_size;
-    }
-
-    row_pos += row_block_size;
-    values += row_block_size * num_cols;
-    row_strides_[row_block_id] = num_cols;
-  }
-}
-
-// Assume that the user does not hold any locks on any cell blocks
-// when they are calling SetZero.
-BlockRandomAccessCRSMatrix::~BlockRandomAccessCRSMatrix() {
-  // TODO(sameeragarwal) this should be rationalized going forward and
-  // perhaps moved into BlockRandomAccessMatrix.
-  for (LayoutType::iterator it = layout_.begin();
-       it != layout_.end();
-       ++it) {
-    delete it->second;
-  }
-}
-
-CellInfo* BlockRandomAccessCRSMatrix::GetCell(int row_block_id,
-                                              int col_block_id,
-                                              int* row,
-                                              int* col,
-                                              int* row_stride,
-                                              int* col_stride) {
-  const LayoutType::iterator it  =
-      layout_.find(IntPairToLong(row_block_id, col_block_id));
-  if (it == layout_.end()) {
-    return NULL;
-  }
-
-  *row = 0;
-  *col = 0;
-  *row_stride = blocks_[row_block_id];
-  *col_stride = row_strides_[row_block_id];
-  return it->second;
-}
-
-// Assume that the user does not hold any locks on any cell blocks
-// when they are calling SetZero.
-void BlockRandomAccessCRSMatrix::SetZero() {
-  crsm_->SetZero();
-}
-
-}  // namespace internal
-}  // namespace ceres
diff --git a/internal/ceres/block_random_access_crs_matrix_test.cc b/internal/ceres/block_random_access_crs_matrix_test.cc
deleted file mode 100644
index 1266c4f..0000000
--- a/internal/ceres/block_random_access_crs_matrix_test.cc
+++ /dev/null
@@ -1,148 +0,0 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2013 Google Inc. All rights reserved.
-// http://code.google.com/p/ceres-solver/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-//   this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-//   this list of conditions and the following disclaimer in the documentation
-//   and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-//   used to endorse or promote products derived from this software without
-//   specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: sameeragarwal@google.com (Sameer Agarwal)
-
-#include <limits>
-#include <vector>
-#include "ceres/block_random_access_crs_matrix.h"
-#include "ceres/internal/eigen.h"
-#include "glog/logging.h"
-#include "gtest/gtest.h"
-
-namespace ceres {
-namespace internal {
-
-TEST(BlockRandomAccessCRSMatrix, GetCell) {
-  vector<int> blocks;
-  blocks.push_back(3);
-  blocks.push_back(4);
-  blocks.push_back(5);
-  const int num_rows = 3 + 4 + 5;
-
-  set< pair<int, int> > block_pairs;
-  int num_nonzeros = 0;
-  block_pairs.insert(make_pair(0, 0));
-  num_nonzeros += blocks[0] * blocks[0];
-
-  block_pairs.insert(make_pair(1, 1));
-  num_nonzeros += blocks[1] * blocks[1];
-
-  block_pairs.insert(make_pair(1, 2));
-  num_nonzeros += blocks[1] * blocks[2];
-
-  block_pairs.insert(make_pair(2, 0));
-  num_nonzeros += blocks[2] * blocks[0];
-
-  BlockRandomAccessCRSMatrix m(blocks, block_pairs);
-  EXPECT_EQ(m.num_rows(), num_rows);
-  EXPECT_EQ(m.num_cols(), num_rows);
-
-  for (set<pair<int, int> >::const_iterator it = block_pairs.begin();
-       it != block_pairs.end();
-       ++it) {
-    const int row_block_id = it->first;
-    const int col_block_id = it->second;
-    int row;
-    int col;
-    int row_stride;
-    int col_stride;
-    CellInfo* cell =  m.GetCell(row_block_id, col_block_id,
-                                &row, &col,
-                                &row_stride, &col_stride);
-    EXPECT_TRUE(cell != NULL);
-    EXPECT_EQ(row, 0);
-    EXPECT_EQ(col, 0);
-
-    // Write into the block.
-    MatrixRef(cell->values, row_stride, col_stride).block(
-        row, col, blocks[row_block_id], blocks[col_block_id]) =
-        (row_block_id + 1) * (col_block_id +1) *
-        Matrix::Ones(blocks[row_block_id], blocks[col_block_id]);
-  }
-
-  const CompressedRowSparseMatrix* crsm = m.matrix();
-  EXPECT_EQ(crsm->num_nonzeros(), num_nonzeros);
-
-  Matrix dense;
-  crsm->ToDenseMatrix(&dense);
-
-  double kTolerance = 1e-14;
-
-  // (0,0)
-  EXPECT_NEAR((dense.block(0, 0, 3, 3) - Matrix::Ones(3, 3)).norm(),
-              0.0,
-              kTolerance);
-  // (1,1)
-  EXPECT_NEAR((dense.block(3, 3, 4, 4) - 2 * 2 * Matrix::Ones(4, 4)).norm(),
-              0.0,
-              kTolerance);
-  // (1,2)
-  EXPECT_NEAR((dense.block(3, 3 + 4, 4, 5) - 2 * 3 * Matrix::Ones(4, 5)).norm(),
-              0.0,
-              kTolerance);
-  // (2,0)
-  EXPECT_NEAR((dense.block(3 + 4, 0, 5, 3) - 3 * 1 * Matrix::Ones(5, 3)).norm(),
-              0.0,
-              kTolerance);
-
-  // There is nothing else in the matrix besides these four blocks.
-  EXPECT_NEAR(dense.norm(), sqrt(9. + 16. * 16. + 36. * 20. + 9. * 15.),
-              kTolerance);
-}
-
-// IntPairToLong is private, thus this fixture is needed to access and
-// test it.
-class BlockRandomAccessCRSMatrixTest : public ::testing::Test {
- public:
-  virtual void SetUp() {
-    vector<int> blocks;
-    blocks.push_back(1);
-    set< pair<int, int> > block_pairs;
-    block_pairs.insert(make_pair(0, 0));
-    m_.reset(new BlockRandomAccessCRSMatrix(blocks, block_pairs));
-  }
-
-  void CheckIntPair(int a, int b) {
-    int64 value = m_->IntPairToLong(a, b);
-    EXPECT_GT(value, 0) << "Overflow a = " << a << " b = " << b;
-    EXPECT_GT(value, a) << "Overflow a = " << a << " b = " << b;
-    EXPECT_GT(value, b) << "Overflow a = " << a << " b = " << b;
-  }
-
- private:
-  scoped_ptr<BlockRandomAccessCRSMatrix> m_;
-};
-
-TEST_F(BlockRandomAccessCRSMatrixTest, IntPairToLongOverflow) {
-  CheckIntPair(numeric_limits<int>::max(), numeric_limits<int>::max());
-}
-
-
-}  // namespace internal
-}  // namespace ceres
diff --git a/internal/ceres/block_random_access_diagonal_matrix.cc b/internal/ceres/block_random_access_diagonal_matrix.cc
new file mode 100644
index 0000000..d8bf4ef
--- /dev/null
+++ b/internal/ceres/block_random_access_diagonal_matrix.cc
@@ -0,0 +1,120 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/block_random_access_diagonal_matrix.h"
+
+#include <algorithm>
+#include <set>
+#include <utility>
+#include <vector>
+#include "ceres/internal/port.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "ceres/stl_util.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+BlockRandomAccessDiagonalMatrix::BlockRandomAccessDiagonalMatrix(
+    const vector<int>& blocks)
+    : blocks_(blocks) {
+  // Build the row/column layout vector and count the number of scalar
+  // rows/columns.
+  int num_cols = 0;
+  int num_nonzeros = 0;
+  vector<int> col_layout;
+  for (int i = 0; i < blocks_.size(); ++i) {
+    col_layout.push_back(num_cols);
+    num_cols += blocks_[i];
+    num_nonzeros += blocks_[i] * blocks_[i];
+  }
+
+  VLOG(1) << "Matrix Size [" << num_cols
+          << "," << num_cols
+          << "] " << num_nonzeros;
+
+  tsm_.reset(new TripletSparseMatrix(num_cols, num_cols, num_nonzeros));
+  tsm_->set_num_nonzeros(num_nonzeros);
+  int* rows = tsm_->mutable_rows();
+  int* cols = tsm_->mutable_cols();
+  double* values = tsm_->mutable_values();
+
+  int pos = 0;
+  for (int i = 0; i < blocks_.size(); ++i) {
+    const int block_size = blocks_[i];
+    layout_.push_back(new CellInfo(values + pos));
+    const int block_begin = col_layout[i];
+    for (int r = 0; r < block_size; ++r) {
+      for (int c = 0; c < block_size; ++c, ++pos) {
+        rows[pos] = block_begin + r;
+        cols[pos] = block_begin + c;
+      }
+    }
+  }
+}
+
+// Assume that the user does not hold any locks on any cell blocks
+// when they are calling SetZero.
+BlockRandomAccessDiagonalMatrix::~BlockRandomAccessDiagonalMatrix() {
+  STLDeleteContainerPointers(layout_.begin(), layout_.end());
+}
+
+CellInfo* BlockRandomAccessDiagonalMatrix::GetCell(int row_block_id,
+                                                   int col_block_id,
+                                                   int* row,
+                                                   int* col,
+                                                   int* row_stride,
+                                                   int* col_stride) {
+  if (row_block_id != col_block_id) {
+    return NULL;
+  }
+  const int stride = blocks_[row_block_id];
+
+  // Each cell is stored contiguously as its own little dense matrix.
+  *row = 0;
+  *col = 0;
+  *row_stride = stride;
+  *col_stride = stride;
+  return layout_[row_block_id];
+}
+
+// Assume that the user does not hold any locks on any cell blocks
+// when they are calling SetZero.
+void BlockRandomAccessDiagonalMatrix::SetZero() {
+  if (tsm_->num_nonzeros()) {
+    VectorRef(tsm_->mutable_values(),
+              tsm_->num_nonzeros()).setZero();
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/block_random_access_crs_matrix.h b/internal/ceres/block_random_access_diagonal_matrix.h
similarity index 63%
rename from internal/ceres/block_random_access_crs_matrix.h
rename to internal/ceres/block_random_access_diagonal_matrix.h
index 11a203b..6b3cff2 100644
--- a/internal/ceres/block_random_access_crs_matrix.h
+++ b/internal/ceres/block_random_access_diagonal_matrix.h
@@ -28,16 +28,16 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
-#ifndef CERES_INTERNAL_BLOCK_RANDOM_ACCESS_CRS_MATRIX_H_
-#define CERES_INTERNAL_BLOCK_RANDOM_ACCESS_CRS_MATRIX_H_
+#ifndef CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DIAGONAL_MATRIX_H_
+#define CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DIAGONAL_MATRIX_H_
 
 #include <set>
 #include <vector>
 #include <utility>
 #include "ceres/mutex.h"
 #include "ceres/block_random_access_matrix.h"
-#include "ceres/compressed_row_sparse_matrix.h"
 #include "ceres/collections_port.h"
+#include "ceres/triplet_sparse_matrix.h"
 #include "ceres/integral_types.h"
 #include "ceres/internal/macros.h"
 #include "ceres/internal/port.h"
@@ -47,19 +47,16 @@
 namespace ceres {
 namespace internal {
 
-// A square BlockRandomAccessMatrix where the underlying storage is a
-// compressed row sparse matrix. The matrix need not be symmetric.
-class BlockRandomAccessCRSMatrix : public BlockRandomAccessMatrix {
+// A thread safe block diagonal matrix implementation of
+// BlockRandomAccessMatrix.
+class BlockRandomAccessDiagonalMatrix : public BlockRandomAccessMatrix {
  public:
-  // blocks is an array of block sizes. block_pairs is a set of
-  // <row_block_id, col_block_id> pairs to identify the non-zero cells
-  // of this matrix.
-  BlockRandomAccessCRSMatrix(const vector<int>& blocks,
-                             const set<pair<int, int> >& block_pairs);
+  // blocks is an array of block sizes.
+  BlockRandomAccessDiagonalMatrix(const vector<int>& blocks);
 
   // The destructor is not thread safe. It assumes that no one is
   // modifying any cells when the matrix is being destroyed.
-  virtual ~BlockRandomAccessCRSMatrix();
+  virtual ~BlockRandomAccessDiagonalMatrix();
 
   // BlockRandomAccessMatrix Interface.
   virtual CellInfo* GetCell(int row_block_id,
@@ -74,35 +71,26 @@
   virtual void SetZero();
 
   // Since the matrix is square, num_rows() == num_cols().
-  virtual int num_rows() const { return crsm_->num_rows(); }
-  virtual int num_cols() const { return crsm_->num_cols(); }
+  virtual int num_rows() const { return tsm_->num_rows(); }
+  virtual int num_cols() const { return tsm_->num_cols(); }
 
-    // Access to the underlying matrix object.
-  const CompressedRowSparseMatrix* matrix() const { return crsm_.get(); }
-  CompressedRowSparseMatrix* mutable_matrix() { return crsm_.get(); }
+  // Access to the underlying matrix object.
+  const TripletSparseMatrix* matrix() const { return tsm_.get(); }
+  TripletSparseMatrix* mutable_matrix() { return tsm_.get(); }
 
  private:
-  int64 IntPairToLong(int a, int b) {
-    return a * kMaxRowBlocks + b;
-  }
-
-  const int64 kMaxRowBlocks;
   // row/column block sizes.
   const vector<int> blocks_;
-  vector<int> col_layout_;
-  vector<int> row_strides_;
+  vector<CellInfo*> layout_;
 
-  // A mapping from <row_block_id, col_block_id> to the position in
-  // the values array of tsm_ where the block is stored.
-  typedef HashMap<long int, CellInfo* > LayoutType;
-  LayoutType layout_;
+  // The underlying matrix object which actually stores the cells.
+  scoped_ptr<TripletSparseMatrix> tsm_;
 
-  scoped_ptr<CompressedRowSparseMatrix> crsm_;
-  friend class BlockRandomAccessCRSMatrixTest;
-  CERES_DISALLOW_COPY_AND_ASSIGN(BlockRandomAccessCRSMatrix);
+  friend class BlockRandomAccessDiagonalMatrixTest;
+  CERES_DISALLOW_COPY_AND_ASSIGN(BlockRandomAccessDiagonalMatrix);
 };
 
 }  // namespace internal
 }  // namespace ceres
 
-#endif  // CERES_INTERNAL_BLOCK_RANDOM_ACCESS_CRS_MATRIX_H_
+#endif  // CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DIAGONAL_MATRIX_H_
diff --git a/internal/ceres/block_random_access_diagonal_matrix_test.cc b/internal/ceres/block_random_access_diagonal_matrix_test.cc
new file mode 100644
index 0000000..e19268b
--- /dev/null
+++ b/internal/ceres/block_random_access_diagonal_matrix_test.cc
@@ -0,0 +1,116 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <limits>
+#include <vector>
+
+#include "ceres/block_random_access_diagonal_matrix.h"
+#include "ceres/internal/eigen.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+TEST(BlockRandomAccessDiagonalMatrix, GetCell) {
+  vector<int> blocks;
+  blocks.push_back(3);
+  blocks.push_back(4);
+  blocks.push_back(5);
+  const int num_rows = 3 + 4 + 5;
+  const int num_nonzeros =  3 * 3 + 4 * 4 + 5 * 5;
+
+  BlockRandomAccessDiagonalMatrix m(blocks);
+  EXPECT_EQ(m.num_rows(), num_rows);
+  EXPECT_EQ(m.num_cols(), num_rows);
+
+  for (int i = 0; i < blocks.size(); ++i) {
+    const int row_block_id = i;
+    int col_block_id;
+    int row;
+    int col;
+    int row_stride;
+    int col_stride;
+
+    for (int j = 0; j < blocks.size(); ++j) {
+      col_block_id = j;
+      CellInfo* cell =  m.GetCell(row_block_id, col_block_id,
+                                  &row, &col,
+                                  &row_stride, &col_stride);
+      // Off diagonal entries are not present.
+      if (i != j) {
+        EXPECT_TRUE(cell == NULL);
+        continue;
+      }
+
+      EXPECT_TRUE(cell != NULL);
+      EXPECT_EQ(row, 0);
+      EXPECT_EQ(col, 0);
+      EXPECT_EQ(row_stride, blocks[row_block_id]);
+      EXPECT_EQ(col_stride, blocks[col_block_id]);
+
+      // Write into the block
+      MatrixRef(cell->values, row_stride, col_stride).block(
+          row, col, blocks[row_block_id], blocks[col_block_id]) =
+          (row_block_id + 1) * (col_block_id +1) *
+          Matrix::Ones(blocks[row_block_id], blocks[col_block_id]);
+    }
+  }
+
+  const TripletSparseMatrix* tsm = m.matrix();
+  EXPECT_EQ(tsm->num_nonzeros(), num_nonzeros);
+  EXPECT_EQ(tsm->max_num_nonzeros(), num_nonzeros);
+
+  Matrix dense;
+  tsm->ToDenseMatrix(&dense);
+
+  double kTolerance = 1e-14;
+
+  // (0,0)
+  EXPECT_NEAR((dense.block(0, 0, 3, 3) - Matrix::Ones(3, 3)).norm(),
+              0.0,
+              kTolerance);
+
+  // (1,1)
+  EXPECT_NEAR((dense.block(3, 3, 4, 4) - 2 * 2 * Matrix::Ones(4, 4)).norm(),
+              0.0,
+              kTolerance);
+
+  // (1,1)
+  EXPECT_NEAR((dense.block(7, 7, 5, 5) - 3 * 3 * Matrix::Ones(5, 5)).norm(),
+              0.0,
+              kTolerance);
+
+  // There is nothing else in the matrix besides these four blocks.
+  EXPECT_NEAR(dense.norm(), sqrt(9.0 + 16. * 16. + 81.0 * 25.), kTolerance);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/block_random_access_sparse_matrix.h b/internal/ceres/block_random_access_sparse_matrix.h
index a6b5f39..27b1029 100644
--- a/internal/ceres/block_random_access_sparse_matrix.h
+++ b/internal/ceres/block_random_access_sparse_matrix.h
@@ -47,7 +47,7 @@
 namespace ceres {
 namespace internal {
 
-// A threaf safe square block sparse implementation of
+// A thread safe square block sparse implementation of
 // BlockRandomAccessMatrix. Internally a TripletSparseMatrix is used
 // for doing the actual storage. This class augments this matrix with
 // an unordered_map that allows random read/write access.
diff --git a/internal/ceres/block_structure.cc b/internal/ceres/block_structure.cc
index 5a1a5e1..00c4ce2 100644
--- a/internal/ceres/block_structure.cc
+++ b/internal/ceres/block_structure.cc
@@ -34,6 +34,9 @@
 namespace internal {
 
 bool CellLessThan(const Cell& lhs, const Cell& rhs) {
+  if (lhs.block_id == rhs.block_id) {
+    return (lhs.position  < rhs.position);
+  }
   return (lhs.block_id < rhs.block_id);
 }
 
diff --git a/internal/ceres/block_structure.h b/internal/ceres/block_structure.h
index f509067..656716e 100644
--- a/internal/ceres/block_structure.h
+++ b/internal/ceres/block_structure.h
@@ -45,9 +45,7 @@
 namespace ceres {
 namespace internal {
 
-class BlockStructureProto;
-
-typedef int16 BlockSize;
+typedef int32 BlockSize;
 
 struct Block {
   Block() : size(-1), position(-1) {}
@@ -89,16 +87,6 @@
   vector<CompressedColumn> cols;
 };
 
-// Deserialize the given block structure proto to the given block structure.
-// Destroys previous contents of block_structure.
-void ProtoToBlockStructure(const BlockStructureProto &proto,
-                           CompressedRowBlockStructure *block_structure);
-
-// Serialize the given block structure to the given proto. Destroys previous
-// contents of proto.
-void BlockStructureToProto(const CompressedRowBlockStructure &block_structure,
-                           BlockStructureProto *proto);
-
 }  // namespace internal
 }  // namespace ceres
 
diff --git a/internal/ceres/callbacks.cc b/internal/ceres/callbacks.cc
new file mode 100644
index 0000000..d223633
--- /dev/null
+++ b/internal/ceres/callbacks.cc
@@ -0,0 +1,109 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <iostream>  // NO LINT
+#include "ceres/callbacks.h"
+#include "ceres/program.h"
+#include "ceres/stringprintf.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+StateUpdatingCallback::StateUpdatingCallback(Program* program,
+                                             double* parameters)
+    : program_(program), parameters_(parameters) {}
+
+StateUpdatingCallback::~StateUpdatingCallback() {}
+
+CallbackReturnType StateUpdatingCallback::operator()(
+    const IterationSummary& summary) {
+  if (summary.step_is_successful) {
+    program_->StateVectorToParameterBlocks(parameters_);
+    program_->CopyParameterBlockStateToUserState();
+  }
+  return SOLVER_CONTINUE;
+}
+
+LoggingCallback::LoggingCallback(const MinimizerType minimizer_type,
+                                 const bool log_to_stdout)
+    : minimizer_type(minimizer_type),
+      log_to_stdout_(log_to_stdout) {}
+
+LoggingCallback::~LoggingCallback() {}
+
+CallbackReturnType LoggingCallback::operator()(
+    const IterationSummary& summary) {
+  string output;
+  if (minimizer_type == LINE_SEARCH) {
+    const char* kReportRowFormat =
+        "% 4d: f:% 8e d:% 3.2e g:% 3.2e h:% 3.2e "
+        "s:% 3.2e e:% 3d it:% 3.2e tt:% 3.2e";
+    output = StringPrintf(kReportRowFormat,
+                          summary.iteration,
+                          summary.cost,
+                          summary.cost_change,
+                          summary.gradient_max_norm,
+                          summary.step_norm,
+                          summary.step_size,
+                          summary.line_search_function_evaluations,
+                          summary.iteration_time_in_seconds,
+                          summary.cumulative_time_in_seconds);
+  } else if (minimizer_type == TRUST_REGION) {
+    if (summary.iteration == 0) {
+      output = "iter      cost      cost_change  |gradient|   |step|    tr_ratio  tr_radius  ls_iter  iter_time  total_time\n";
+    }
+    const char* kReportRowFormat =
+        "% 4d % 8e   % 3.2e   % 3.2e  % 3.2e  % 3.2e % 3.2e     % 3d   % 3.2e   % 3.2e";
+    output += StringPrintf(kReportRowFormat,
+                          summary.iteration,
+                          summary.cost,
+                          summary.cost_change,
+                          summary.gradient_max_norm,
+                          summary.step_norm,
+                          summary.relative_decrease,
+                          summary.trust_region_radius,
+                          summary.linear_solver_iterations,
+                          summary.iteration_time_in_seconds,
+                          summary.cumulative_time_in_seconds);
+  } else {
+    LOG(FATAL) << "Unknown minimizer type.";
+  }
+
+  if (log_to_stdout_) {
+    cout << output << endl;
+  } else {
+    VLOG(1) << output;
+  }
+  return SOLVER_CONTINUE;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/callbacks.h b/internal/ceres/callbacks.h
new file mode 100644
index 0000000..93704df
--- /dev/null
+++ b/internal/ceres/callbacks.h
@@ -0,0 +1,71 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_CALLBACKS_H_
+#define CERES_INTERNAL_CALLBACKS_H_
+
+#include <string>
+#include "ceres/iteration_callback.h"
+#include "ceres/internal/port.h"
+
+namespace ceres {
+namespace internal {
+
+class Program;
+
+// Callback for updating the externally visible state of parameter
+// blocks.
+class StateUpdatingCallback : public IterationCallback {
+ public:
+  StateUpdatingCallback(Program* program, double* parameters);
+  virtual ~StateUpdatingCallback();
+  virtual CallbackReturnType operator()(const IterationSummary& summary);
+ private:
+  Program* program_;
+  double* parameters_;
+};
+
+// Callback for logging the state of the minimizer to STDERR or
+// STDOUT depending on the user's preferences and logging level.
+class LoggingCallback : public IterationCallback {
+ public:
+  LoggingCallback(MinimizerType minimizer_type, bool log_to_stdout);
+  virtual ~LoggingCallback();
+  virtual CallbackReturnType operator()(const IterationSummary& summary);
+
+ private:
+  const MinimizerType minimizer_type;
+  const bool log_to_stdout_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_CALLBACKS_H_
diff --git a/internal/ceres/canonical_views_clustering.cc b/internal/ceres/canonical_views_clustering.cc
index 6531945..2f032e6 100644
--- a/internal/ceres/canonical_views_clustering.cc
+++ b/internal/ceres/canonical_views_clustering.cc
@@ -29,6 +29,9 @@
 // Author: David Gallup (dgallup@google.com)
 //         Sameer Agarwal (sameeragarwal@google.com)
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifndef CERES_NO_SUITESPARSE
 
 #include "ceres/canonical_views_clustering.h"
@@ -57,8 +60,8 @@
   // configuration of the clustering algorithm that some of the
   // vertices may not be assigned to any cluster. In this case they
   // are assigned to a cluster with id = kInvalidClusterId.
-  void ComputeClustering(const Graph<int>& graph,
-                         const CanonicalViewsClusteringOptions& options,
+  void ComputeClustering(const CanonicalViewsClusteringOptions& options,
+                         const Graph<int>& graph,
                          vector<int>* centers,
                          IntMap* membership);
 
@@ -81,21 +84,21 @@
 };
 
 void ComputeCanonicalViewsClustering(
-    const Graph<int>& graph,
     const CanonicalViewsClusteringOptions& options,
+    const Graph<int>& graph,
     vector<int>* centers,
     IntMap* membership) {
   time_t start_time = time(NULL);
   CanonicalViewsClustering cv;
-  cv.ComputeClustering(graph, options, centers, membership);
+  cv.ComputeClustering(options, graph, centers, membership);
   VLOG(2) << "Canonical views clustering time (secs): "
           << time(NULL) - start_time;
 }
 
 // Implementation of CanonicalViewsClustering
 void CanonicalViewsClustering::ComputeClustering(
-    const Graph<int>& graph,
     const CanonicalViewsClusteringOptions& options,
+    const Graph<int>& graph,
     vector<int>* centers,
     IntMap* membership) {
   options_ = options;
diff --git a/internal/ceres/canonical_views_clustering.h b/internal/ceres/canonical_views_clustering.h
index 48d1ed2..1b4c4ee 100644
--- a/internal/ceres/canonical_views_clustering.h
+++ b/internal/ceres/canonical_views_clustering.h
@@ -41,15 +41,15 @@
 #ifndef CERES_INTERNAL_CANONICAL_VIEWS_CLUSTERING_H_
 #define CERES_INTERNAL_CANONICAL_VIEWS_CLUSTERING_H_
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifndef CERES_NO_SUITESPARSE
 
 #include <vector>
 
 #include "ceres/collections_port.h"
 #include "ceres/graph.h"
-#include "ceres/internal/macros.h"
-#include "ceres/map_util.h"
-#include "glog/logging.h"
 
 namespace ceres {
 namespace internal {
@@ -100,8 +100,8 @@
 // algorithm that some of the vertices may not be assigned to any
 // cluster. In this case they are assigned to a cluster with id = -1;
 void ComputeCanonicalViewsClustering(
-    const Graph<int>& graph,
     const CanonicalViewsClusteringOptions& options,
+    const Graph<int>& graph,
     vector<int>* centers,
     HashMap<int, int>* membership);
 
diff --git a/internal/ceres/canonical_views_clustering_test.cc b/internal/ceres/canonical_views_clustering_test.cc
index 78d5635..f86084a 100644
--- a/internal/ceres/canonical_views_clustering_test.cc
+++ b/internal/ceres/canonical_views_clustering_test.cc
@@ -29,6 +29,9 @@
 // Author: Sameer Agarwal (sameeragarwal@google.com)
 //         David Gallup (dgallup@google.com)
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifndef CERES_NO_SUITESPARSE
 
 #include "ceres/canonical_views_clustering.h"
@@ -69,7 +72,7 @@
   }
 
   void ComputeClustering() {
-    ComputeCanonicalViewsClustering(graph_, options_, &centers_, &membership_);
+    ComputeCanonicalViewsClustering(options_, graph_, &centers_, &membership_);
   }
 
   Graph<int> graph_;
diff --git a/internal/ceres/cgnr_solver.cc b/internal/ceres/cgnr_solver.cc
index 9b8f980..88e61d9 100644
--- a/internal/ceres/cgnr_solver.cc
+++ b/internal/ceres/cgnr_solver.cc
@@ -33,6 +33,7 @@
 #include "ceres/block_jacobi_preconditioner.h"
 #include "ceres/cgnr_linear_operator.h"
 #include "ceres/conjugate_gradients_solver.h"
+#include "ceres/internal/eigen.h"
 #include "ceres/linear_solver.h"
 #include "ceres/wall_time.h"
 #include "glog/logging.h"
@@ -43,6 +44,10 @@
 CgnrSolver::CgnrSolver(const LinearSolver::Options& options)
   : options_(options),
     preconditioner_(NULL) {
+  if (options_.preconditioner_type != JACOBI &&
+      options_.preconditioner_type != IDENTITY) {
+    LOG(FATAL) << "CGNR only supports IDENTITY and JACOBI preconditioners.";
+  }
 }
 
 LinearSolver::Summary CgnrSolver::SolveImpl(
@@ -53,9 +58,9 @@
   EventLogger event_logger("CgnrSolver::Solve");
 
   // Form z = Atb.
-  scoped_array<double> z(new double[A->num_cols()]);
-  std::fill(z.get(), z.get() + A->num_cols(), 0.0);
-  A->LeftMultiply(b, z.get());
+  Vector z(A->num_cols());
+  z.setZero();
+  A->LeftMultiply(b, z.data());
 
   // Precondition if necessary.
   LinearSolver::PerSolveOptions cg_per_solve_options = per_solve_options;
@@ -65,20 +70,17 @@
     }
     preconditioner_->Update(*A, per_solve_options.D);
     cg_per_solve_options.preconditioner = preconditioner_.get();
-  } else if (options_.preconditioner_type != IDENTITY) {
-    LOG(FATAL) << "CGNR only supports IDENTITY and JACOBI preconditioners.";
   }
 
   // Solve (AtA + DtD)x = z (= Atb).
-  std::fill(x, x + A->num_cols(), 0.0);
+  VectorRef(x, A->num_cols()).setZero();
   CgnrLinearOperator lhs(*A, per_solve_options.D);
   event_logger.AddEvent("Setup");
 
   ConjugateGradientsSolver conjugate_gradient_solver(options_);
   LinearSolver::Summary summary =
-      conjugate_gradient_solver.Solve(&lhs, z.get(), cg_per_solve_options, x);
+      conjugate_gradient_solver.Solve(&lhs, z.data(), cg_per_solve_options, x);
   event_logger.AddEvent("Solve");
-
   return summary;
 }
 
diff --git a/internal/ceres/collections_port.h b/internal/ceres/collections_port.h
index 715c975..3f976b9 100644
--- a/internal/ceres/collections_port.h
+++ b/internal/ceres/collections_port.h
@@ -33,26 +33,48 @@
 #ifndef CERES_INTERNAL_COLLECTIONS_PORT_H_
 #define CERES_INTERNAL_COLLECTIONS_PORT_H_
 
-#if defined(CERES_NO_TR1)
+#include "ceres/internal/port.h"
+
+#if defined(CERES_NO_UNORDERED_MAP)
 #  include <map>
 #  include <set>
-#else
-#  if defined(_MSC_VER)
-#    include <unordered_map>
-#    include <unordered_set>
-#  else
-#    include <tr1/unordered_map>
-#    include <tr1/unordered_set>
-#  endif
 #endif
+
+#if defined(CERES_TR1_UNORDERED_MAP)
+#  include <tr1/unordered_map>
+#  include <tr1/unordered_set>
+#  define CERES_HASH_NAMESPACE_START namespace std { namespace tr1 {
+#  define CERES_HASH_NAMESPACE_END } }
+#endif
+
+#if defined(CERES_STD_UNORDERED_MAP)
+#  include <unordered_map>
+#  include <unordered_set>
+#  define CERES_HASH_NAMESPACE_START namespace std {
+#  define CERES_HASH_NAMESPACE_END }
+#endif
+
+#if defined(CERES_STD_UNORDERED_MAP_IN_TR1_NAMESPACE)
+#  include <unordered_map>
+#  include <unordered_set>
+#  define CERES_HASH_NAMESPACE_START namespace std { namespace tr1 {
+#  define CERES_HASH_NAMESPACE_END } }
+#endif
+
+#if !defined(CERES_NO_UNORDERED_MAP) && !defined(CERES_TR1_UNORDERED_MAP) && \
+    !defined(CERES_STD_UNORDERED_MAP) && !defined(CERES_STD_UNORDERED_MAP_IN_TR1_NAMESPACE)  // NOLINT
+#  error One of: CERES_NO_UNORDERED_MAP, CERES_TR1_UNORDERED_MAP,\
+ CERES_STD_UNORDERED_MAP, CERES_STD_UNORDERED_MAP_IN_TR1_NAMESPACE must be defined!  // NOLINT
+#endif
+
 #include <utility>
 #include "ceres/integral_types.h"
 #include "ceres/internal/port.h"
 
-// Some systems don't have access to TR1. In that case, substitute the hash
-// map/set with normal map/set. The price to pay is slightly slower speed for
-// some operations.
-#if defined(CERES_NO_TR1)
+// Some systems don't have access to unordered_map/unordered_set. In
+// that case, substitute the hash map/set with normal map/set. The
+// price to pay is slower speed for some operations.
+#if defined(CERES_NO_UNORDERED_MAP)
 
 namespace ceres {
 namespace internal {
@@ -71,11 +93,20 @@
 namespace ceres {
 namespace internal {
 
+#if defined(CERES_TR1_UNORDERED_MAP) || \
+    defined(CERES_STD_UNORDERED_MAP_IN_TR1_NAMESPACE)
 template<typename K, typename V>
 struct HashMap : std::tr1::unordered_map<K, V> {};
-
 template<typename K>
 struct HashSet : std::tr1::unordered_set<K> {};
+#endif
+
+#if defined(CERES_STD_UNORDERED_MAP)
+template<typename K, typename V>
+struct HashMap : std::unordered_map<K, V> {};
+template<typename K>
+struct HashSet : std::unordered_set<K> {};
+#endif
 
 #if defined(_WIN32) && !defined(__MINGW64__) && !defined(__MINGW32__)
 #define GG_LONGLONG(x) x##I64
@@ -162,6 +193,5 @@
 
 CERES_HASH_NAMESPACE_END
 
-#endif  // CERES_NO_TR1
-
+#endif  // CERES_NO_UNORDERED_MAP
 #endif  // CERES_INTERNAL_COLLECTIONS_PORT_H_
diff --git a/internal/ceres/compressed_row_jacobian_writer.cc b/internal/ceres/compressed_row_jacobian_writer.cc
index bbadb77..ed8db14 100644
--- a/internal/ceres/compressed_row_jacobian_writer.cc
+++ b/internal/ceres/compressed_row_jacobian_writer.cc
@@ -40,6 +40,44 @@
 namespace ceres {
 namespace internal {
 
+void CompressedRowJacobianWriter::PopulateJacobianRowAndColumnBlockVectors(
+    const Program* program, CompressedRowSparseMatrix* jacobian) {
+  const vector<ParameterBlock*>& parameter_blocks =
+      program->parameter_blocks();
+  vector<int>& col_blocks = *(jacobian->mutable_col_blocks());
+  col_blocks.resize(parameter_blocks.size());
+  for (int i = 0; i < parameter_blocks.size(); ++i) {
+    col_blocks[i] = parameter_blocks[i]->LocalSize();
+  }
+
+  const vector<ResidualBlock*>& residual_blocks =
+      program->residual_blocks();
+  vector<int>& row_blocks = *(jacobian->mutable_row_blocks());
+  row_blocks.resize(residual_blocks.size());
+  for (int i = 0; i < residual_blocks.size(); ++i) {
+    row_blocks[i] = residual_blocks[i]->NumResiduals();
+  }
+}
+
+void CompressedRowJacobianWriter::GetOrderedParameterBlocks(
+      const Program* program,
+      int residual_id,
+      vector<pair<int, int> >* evaluated_jacobian_blocks) {
+  const ResidualBlock* residual_block =
+      program->residual_blocks()[residual_id];
+  const int num_parameter_blocks = residual_block->NumParameterBlocks();
+
+  for (int j = 0; j < num_parameter_blocks; ++j) {
+    const ParameterBlock* parameter_block =
+        residual_block->parameter_blocks()[j];
+    if (!parameter_block->IsConstant()) {
+      evaluated_jacobian_blocks->push_back(
+          make_pair(parameter_block->index(), j));
+    }
+  }
+  sort(evaluated_jacobian_blocks->begin(), evaluated_jacobian_blocks->end());
+}
+
 SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
   const vector<ResidualBlock*>& residual_blocks =
       program_->residual_blocks();
@@ -71,7 +109,7 @@
           total_num_effective_parameters,
           num_jacobian_nonzeros + total_num_effective_parameters);
 
-  // At this stage, the CompressedSparseMatrix is an invalid state. But this
+  // At this stage, the CompressedRowSparseMatrix is an invalid state. But this
   // seems to be the only way to construct it without doing a memory copy.
   int* rows = jacobian->mutable_rows();
   int* cols = jacobian->mutable_cols();
@@ -132,22 +170,7 @@
   }
   CHECK_EQ(num_jacobian_nonzeros, rows[total_num_residuals]);
 
-  // Populate the row and column block vectors for use by block
-  // oriented ordering algorithms. This is useful when
-  // Solver::Options::use_block_amd = true.
-  const vector<ParameterBlock*>& parameter_blocks =
-      program_->parameter_blocks();
-  vector<int>& col_blocks = *(jacobian->mutable_col_blocks());
-  col_blocks.resize(parameter_blocks.size());
-  for (int i = 0; i <  parameter_blocks.size(); ++i) {
-    col_blocks[i] = parameter_blocks[i]->LocalSize();
-  }
-
-  vector<int>& row_blocks = *(jacobian->mutable_row_blocks());
-  row_blocks.resize(residual_blocks.size());
-  for (int i = 0; i <  residual_blocks.size(); ++i) {
-    row_blocks[i] = residual_blocks[i]->NumResiduals();
-  }
+  PopulateJacobianRowAndColumnBlockVectors(program_, jacobian);
 
   return jacobian;
 }
@@ -164,25 +187,10 @@
 
   const ResidualBlock* residual_block =
       program_->residual_blocks()[residual_id];
-  const int num_parameter_blocks = residual_block->NumParameterBlocks();
   const int num_residuals = residual_block->NumResiduals();
 
-  // It is necessary to determine the order of the jacobian blocks before
-  // copying them into the CompressedRowSparseMatrix. Just because a cost
-  // function uses parameter blocks 1 after 2 in its arguments does not mean
-  // that the block 1 occurs before block 2 in the column layout of the
-  // jacobian. Thus, determine the order by sorting the jacobian blocks by their
-  // position in the state vector.
   vector<pair<int, int> > evaluated_jacobian_blocks;
-  for (int j = 0; j < num_parameter_blocks; ++j) {
-    const ParameterBlock* parameter_block =
-        residual_block->parameter_blocks()[j];
-    if (!parameter_block->IsConstant()) {
-      evaluated_jacobian_blocks.push_back(
-          make_pair(parameter_block->index(), j));
-    }
-  }
-  sort(evaluated_jacobian_blocks.begin(), evaluated_jacobian_blocks.end());
+  GetOrderedParameterBlocks(program_, residual_id, &evaluated_jacobian_blocks);
 
   // Where in the current row does the jacobian for a parameter block begin.
   int col_pos = 0;
diff --git a/internal/ceres/compressed_row_jacobian_writer.h b/internal/ceres/compressed_row_jacobian_writer.h
index c103165..a722a7c 100644
--- a/internal/ceres/compressed_row_jacobian_writer.h
+++ b/internal/ceres/compressed_row_jacobian_writer.h
@@ -39,6 +39,7 @@
 namespace ceres {
 namespace internal {
 
+class CompressedRowSparseMatrix;
 class Program;
 class SparseMatrix;
 
@@ -49,11 +50,44 @@
     : program_(program) {
   }
 
+  // PopulateJacobianRowAndColumnBlockVectors sets col_blocks and
+  // row_blocks for a CompressedRowSparseMatrix, based on the
+  // parameter block sizes and residual sizes respectively from the
+  // program. This is useful when Solver::Options::use_block_amd =
+  // true;
+  //
+  // This function is static so that it is available to other jacobian
+  // writers which use CompressedRowSparseMatrix (or derived types).
+  // (Jacobian writers do not fall under any type hierarchy; they only
+  // have to provide an interface as specified in program_evaluator.h).
+  static void PopulateJacobianRowAndColumnBlockVectors(
+      const Program* program,
+      CompressedRowSparseMatrix* jacobian);
+
+  // It is necessary to determine the order of the jacobian blocks
+  // before copying them into a CompressedRowSparseMatrix (or derived
+  // type).  Just because a cost function uses parameter blocks 1
+  // after 2 in its arguments does not mean that the block 1 occurs
+  // before block 2 in the column layout of the jacobian. Thus,
+  // GetOrderedParameterBlocks determines the order by sorting the
+  // jacobian blocks by their position in the state vector.
+  //
+  // This function is static so that it is available to other jacobian
+  // writers which use CompressedRowSparseMatrix (or derived types).
+  // (Jacobian writers do not fall under any type hierarchy; they only
+  // have to provide an interface as specified in
+  // program_evaluator.h).
+  static void GetOrderedParameterBlocks(
+      const Program* program,
+      int residual_id,
+      vector<pair<int, int> >* evaluated_jacobian_blocks);
+
   // JacobianWriter interface.
 
-  // Since the compressed row matrix has different layout than that assumed by
-  // the cost functions, use scratch space to store the jacobians temporarily
-  // then copy them over to the larger jacobian in the Write() function.
+  // Since the compressed row matrix has different layout than that
+  // assumed by the cost functions, use scratch space to store the
+  // jacobians temporarily then copy them over to the larger jacobian
+  // in the Write() function.
   ScratchEvaluatePreparer* CreateEvaluatePreparers(int num_threads) {
     return ScratchEvaluatePreparer::Create(*program_, num_threads);
   }
diff --git a/internal/ceres/compressed_row_sparse_matrix.cc b/internal/ceres/compressed_row_sparse_matrix.cc
index e200c92..7993ed6 100644
--- a/internal/ceres/compressed_row_sparse_matrix.cc
+++ b/internal/ceres/compressed_row_sparse_matrix.cc
@@ -31,6 +31,7 @@
 #include "ceres/compressed_row_sparse_matrix.h"
 
 #include <algorithm>
+#include <numeric>
 #include <vector>
 #include "ceres/crs_matrix.h"
 #include "ceres/internal/port.h"
@@ -124,7 +125,7 @@
 
   // Find the cumulative sum of the row counts.
   for (int i = 1; i < num_rows_ + 1; ++i) {
-    rows_[i] += rows_[i-1];
+    rows_[i] += rows_[i - 1];
   }
 
   CHECK_EQ(num_nonzeros(), m.num_nonzeros());
@@ -215,11 +216,28 @@
 
   num_rows_ -= delta_rows;
   rows_.resize(num_rows_ + 1);
+
+  // Walk the list of row blocks until we reach the new number of rows
+  // and the drop the rest of the row blocks.
+  int num_row_blocks = 0;
+  int num_rows = 0;
+  while (num_row_blocks < row_blocks_.size() && num_rows < num_rows_) {
+    num_rows += row_blocks_[num_row_blocks];
+    ++num_row_blocks;
+  }
+
+  row_blocks_.resize(num_row_blocks);
 }
 
 void CompressedRowSparseMatrix::AppendRows(const CompressedRowSparseMatrix& m) {
   CHECK_EQ(m.num_cols(), num_cols_);
 
+  CHECK(row_blocks_.size() == 0 || m.row_blocks().size() !=0)
+      << "Cannot append a matrix with row blocks to one without and vice versa."
+      << "This matrix has : " << row_blocks_.size() << " row blocks."
+      << "The matrix being appended has: " << m.row_blocks().size()
+      << " row blocks.";
+
   if (cols_.size() < num_nonzeros() + m.num_nonzeros()) {
     cols_.resize(num_nonzeros() + m.num_nonzeros());
     values_.resize(num_nonzeros() + m.num_nonzeros());
@@ -239,6 +257,7 @@
   }
 
   num_rows_ += m.num_rows();
+  row_blocks_.insert(row_blocks_.end(), m.row_blocks().begin(), m.row_blocks().end());
 }
 
 void CompressedRowSparseMatrix::ToTextFile(FILE* file) const {
@@ -267,6 +286,13 @@
   matrix->values.resize(matrix->rows[matrix->num_rows]);
 }
 
+void CompressedRowSparseMatrix::SetMaxNumNonZeros(int num_nonzeros) {
+  CHECK_GE(num_nonzeros, 0);
+
+  cols_.resize(num_nonzeros);
+  values_.resize(num_nonzeros);
+}
+
 void CompressedRowSparseMatrix::SolveLowerTriangularInPlace(
     double* solution) const {
   for (int r = 0; r < num_rows_; ++r) {
@@ -358,9 +384,161 @@
   }
   transpose_rows[0] = 0;
 
+  *(transpose->mutable_row_blocks()) = col_blocks_;
+  *(transpose->mutable_col_blocks()) = row_blocks_;
+
   return transpose;
 }
 
+namespace {
+// A ProductTerm is a term in the outer product of a matrix with
+// itself.
+struct ProductTerm {
+  ProductTerm(const int row, const int col, const int index)
+      : row(row), col(col), index(index) {
+  }
+
+  bool operator<(const ProductTerm& right) const {
+    if (row == right.row) {
+      if (col == right.col) {
+        return index < right.index;
+      }
+      return col < right.col;
+    }
+    return row < right.row;
+  }
+
+  int row;
+  int col;
+  int index;
+};
+
+CompressedRowSparseMatrix*
+CompressAndFillProgram(const int num_rows,
+                       const int num_cols,
+                       const vector<ProductTerm>& product,
+                       vector<int>* program) {
+  CHECK_GT(product.size(), 0);
+
+  // Count the number of unique product term, which in turn is the
+  // number of non-zeros in the outer product.
+  int num_nonzeros = 1;
+  for (int i = 1; i < product.size(); ++i) {
+    if (product[i].row != product[i - 1].row ||
+        product[i].col != product[i - 1].col) {
+      ++num_nonzeros;
+    }
+  }
+
+  CompressedRowSparseMatrix* matrix =
+      new CompressedRowSparseMatrix(num_rows, num_cols, num_nonzeros);
+
+  int* crsm_rows = matrix->mutable_rows();
+  std::fill(crsm_rows, crsm_rows + num_rows + 1, 0);
+  int* crsm_cols = matrix->mutable_cols();
+  std::fill(crsm_cols, crsm_cols + num_nonzeros, 0);
+
+  CHECK_NOTNULL(program)->clear();
+  program->resize(product.size());
+
+  // Iterate over the sorted product terms. This means each row is
+  // filled one at a time, and we are able to assign a position in the
+  // values array to each term.
+  //
+  // If terms repeat, i.e., they contribute to the same entry in the
+  // result matrix), then they do not affect the sparsity structure of
+  // the result matrix.
+  int nnz = 0;
+  crsm_cols[0] = product[0].col;
+  crsm_rows[product[0].row + 1]++;
+  (*program)[product[0].index] = nnz;
+  for (int i = 1; i < product.size(); ++i) {
+    const ProductTerm& previous = product[i - 1];
+    const ProductTerm& current = product[i];
+
+    // Sparsity structure is updated only if the term is not a repeat.
+    if (previous.row != current.row || previous.col != current.col) {
+      crsm_cols[++nnz] = current.col;
+      crsm_rows[current.row + 1]++;
+    }
+
+    // All terms get assigned the position in the values array where
+    // their value is accumulated.
+    (*program)[current.index] = nnz;
+  }
+
+  for (int i = 1; i < num_rows + 1; ++i) {
+    crsm_rows[i] += crsm_rows[i - 1];
+  }
+
+  return matrix;
+}
+
+}  // namespace
+
+CompressedRowSparseMatrix*
+CompressedRowSparseMatrix::CreateOuterProductMatrixAndProgram(
+      const CompressedRowSparseMatrix& m,
+      vector<int>* program) {
+  CHECK_NOTNULL(program)->clear();
+  CHECK_GT(m.num_nonzeros(), 0) << "Congratulations, "
+                                << "you found a bug in Ceres. Please report it.";
+
+  vector<ProductTerm> product;
+  const vector<int>& row_blocks = m.row_blocks();
+  int row_block_begin = 0;
+  // Iterate over row blocks
+  for (int row_block = 0; row_block < row_blocks.size(); ++row_block) {
+    const int row_block_end = row_block_begin + row_blocks[row_block];
+    // Compute the outer product terms for just one row per row block.
+    const int r = row_block_begin;
+    // Compute the lower triangular part of the product.
+    for (int idx1 = m.rows()[r]; idx1 < m.rows()[r + 1]; ++idx1) {
+      for (int idx2 = m.rows()[r]; idx2 <= idx1; ++idx2) {
+        product.push_back(ProductTerm(m.cols()[idx1], m.cols()[idx2], product.size()));
+      }
+    }
+    row_block_begin = row_block_end;
+  }
+  CHECK_EQ(row_block_begin, m.num_rows());
+  sort(product.begin(), product.end());
+  return CompressAndFillProgram(m.num_cols(), m.num_cols(), product, program);
+}
+
+void CompressedRowSparseMatrix::ComputeOuterProduct(
+    const CompressedRowSparseMatrix& m,
+    const vector<int>& program,
+    CompressedRowSparseMatrix* result) {
+  result->SetZero();
+  double* values = result->mutable_values();
+  const vector<int>& row_blocks = m.row_blocks();
+
+  int cursor = 0;
+  int row_block_begin = 0;
+  const double* m_values = m.values();
+  const int* m_rows = m.rows();
+  // Iterate over row blocks.
+  for (int row_block = 0; row_block < row_blocks.size(); ++row_block) {
+    const int row_block_end = row_block_begin + row_blocks[row_block];
+    const int saved_cursor = cursor;
+    for (int r = row_block_begin; r < row_block_end; ++r) {
+      // Reuse the program segment for each row in this row block.
+      cursor = saved_cursor;
+      const int row_begin = m_rows[r];
+      const int row_end = m_rows[r + 1];
+      for (int idx1 = row_begin; idx1 < row_end; ++idx1) {
+        const double v1 =  m_values[idx1];
+        for (int idx2 = row_begin; idx2 <= idx1; ++idx2, ++cursor) {
+          values[program[cursor]] += v1 * m_values[idx2];
+        }
+      }
+    }
+    row_block_begin = row_block_end;
+  }
+
+  CHECK_EQ(row_block_begin, m.num_rows());
+  CHECK_EQ(cursor, program.size());
+}
 
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/compressed_row_sparse_matrix.h b/internal/ceres/compressed_row_sparse_matrix.h
index c5721eb..a0ba7ee 100644
--- a/internal/ceres/compressed_row_sparse_matrix.h
+++ b/internal/ceres/compressed_row_sparse_matrix.h
@@ -115,6 +115,9 @@
   const vector<int>& col_blocks() const { return col_blocks_; }
   vector<int>* mutable_col_blocks() { return &col_blocks_; }
 
+  // Destructive array resizing method.
+  void SetMaxNumNonZeros(int num_nonzeros);
+
   // Non-destructive array resizing method.
   void set_num_rows(const int num_rows) { num_rows_ = num_rows; }
   void set_num_cols(const int num_cols) { num_cols_ = num_cols; }
@@ -128,6 +131,32 @@
       const double* diagonal,
       const vector<int>& blocks);
 
+  // Compute the sparsity structure of the product m.transpose() * m
+  // and create a CompressedRowSparseMatrix corresponding to it.
+  //
+  // Also compute a "program" vector, which for every term in the
+  // outer product points to the entry in the values array of the
+  // result matrix where it should be accumulated.
+  //
+  // This program is used by the ComputeOuterProduct function below to
+  // compute the outer product.
+  //
+  // Since the entries of the program are the same for rows with the
+  // same sparsity structure, the program only stores the result for
+  // one row per row block. The ComputeOuterProduct function reuses
+  // this information for each row in the row block.
+  static CompressedRowSparseMatrix* CreateOuterProductMatrixAndProgram(
+      const CompressedRowSparseMatrix& m,
+      vector<int>* program);
+
+  // Compute the values array for the expression m.transpose() * m,
+  // where the matrix used to store the result and a program have been
+  // created using the CreateOuterProductMatrixAndProgram function
+  // above.
+  static void ComputeOuterProduct(const CompressedRowSparseMatrix& m,
+                                  const vector<int>& program,
+                                  CompressedRowSparseMatrix* result);
+
  private:
   int num_rows_;
   int num_cols_;
diff --git a/internal/ceres/compressed_row_sparse_matrix_test.cc b/internal/ceres/compressed_row_sparse_matrix_test.cc
index 02109cc..999a661 100644
--- a/internal/ceres/compressed_row_sparse_matrix_test.cc
+++ b/internal/ceres/compressed_row_sparse_matrix_test.cc
@@ -30,11 +30,14 @@
 
 #include "ceres/compressed_row_sparse_matrix.h"
 
+#include <numeric>
 #include "ceres/casts.h"
 #include "ceres/crs_matrix.h"
+#include "ceres/cxsparse.h"
 #include "ceres/internal/eigen.h"
 #include "ceres/internal/scoped_ptr.h"
 #include "ceres/linear_least_squares_problems.h"
+#include "ceres/random.h"
 #include "ceres/triplet_sparse_matrix.h"
 #include "glog/logging.h"
 #include "gtest/gtest.h"
@@ -76,6 +79,14 @@
 
     num_rows = tsm->num_rows();
     num_cols = tsm->num_cols();
+
+    vector<int>* row_blocks = crsm->mutable_row_blocks();
+    row_blocks->resize(num_rows);
+    std::fill(row_blocks->begin(), row_blocks->end(), 1);
+
+    vector<int>* col_blocks = crsm->mutable_col_blocks();
+    col_blocks->resize(num_cols);
+    std::fill(col_blocks->begin(), col_blocks->end(), 1);
   }
 
   int num_rows;
@@ -126,6 +137,9 @@
 }
 
 TEST_F(CompressedRowSparseMatrixTest, DeleteRows) {
+  // Clear the row and column blocks as these are purely scalar tests.
+  crsm->mutable_row_blocks()->clear();
+  crsm->mutable_col_blocks()->clear();
   for (int i = 0; i < num_rows; ++i) {
     tsm->Resize(num_rows - i, num_cols);
     crsm->DeleteRows(crsm->num_rows() - tsm->num_rows());
@@ -134,6 +148,10 @@
 }
 
 TEST_F(CompressedRowSparseMatrixTest, AppendRows) {
+  // Clear the row and column blocks as these are purely scalar tests.
+  crsm->mutable_row_blocks()->clear();
+  crsm->mutable_col_blocks()->clear();
+
   for (int i = 0; i < num_rows; ++i) {
     TripletSparseMatrix tsm_appendage(*tsm);
     tsm_appendage.Resize(i, num_cols);
@@ -146,6 +164,47 @@
   }
 }
 
+TEST_F(CompressedRowSparseMatrixTest, AppendAndDeleteBlockDiagonalMatrix) {
+  int num_diagonal_rows = crsm->num_cols();
+
+  scoped_array<double> diagonal(new double[num_diagonal_rows]);
+  for (int i = 0; i < num_diagonal_rows; ++i) {
+    diagonal[i] =i;
+  }
+
+  vector<int> row_and_column_blocks;
+  row_and_column_blocks.push_back(1);
+  row_and_column_blocks.push_back(2);
+  row_and_column_blocks.push_back(2);
+
+  const vector<int> pre_row_blocks = crsm->row_blocks();
+  const vector<int> pre_col_blocks = crsm->col_blocks();
+
+  scoped_ptr<CompressedRowSparseMatrix> appendage(
+      CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
+          diagonal.get(), row_and_column_blocks));
+  LOG(INFO) << appendage->row_blocks().size();
+
+  crsm->AppendRows(*appendage);
+
+  const vector<int> post_row_blocks = crsm->row_blocks();
+  const vector<int> post_col_blocks = crsm->col_blocks();
+
+  vector<int> expected_row_blocks = pre_row_blocks;
+  expected_row_blocks.insert(expected_row_blocks.end(),
+                             row_and_column_blocks.begin(),
+                             row_and_column_blocks.end());
+
+  vector<int> expected_col_blocks = pre_col_blocks;
+
+  EXPECT_EQ(expected_row_blocks, crsm->row_blocks());
+  EXPECT_EQ(expected_col_blocks, crsm->col_blocks());
+
+  crsm->DeleteRows(num_diagonal_rows);
+  EXPECT_EQ(crsm->row_blocks(), pre_row_blocks);
+  EXPECT_EQ(crsm->col_blocks(), pre_col_blocks);
+}
+
 TEST_F(CompressedRowSparseMatrixTest, ToDenseMatrix) {
   Matrix tsm_dense;
   Matrix crsm_dense;
@@ -279,10 +338,22 @@
   // 13  0 14 15  9  0
   //  0 16 17  0  0  0
 
+  // Block structure:
+  //  A  A  A  A  B  B
+  //  A  A  A  A  B  B
+  //  A  A  A  A  B  B
+  //  C  C  C  C  D  D
+  //  C  C  C  C  D  D
+  //  C  C  C  C  D  D
+
   CompressedRowSparseMatrix matrix(5, 6, 30);
   int* rows = matrix.mutable_rows();
   int* cols = matrix.mutable_cols();
   double* values = matrix.mutable_values();
+  matrix.mutable_row_blocks()->push_back(3);
+  matrix.mutable_row_blocks()->push_back(3);
+  matrix.mutable_col_blocks()->push_back(4);
+  matrix.mutable_col_blocks()->push_back(2);
 
   rows[0] = 0;
   cols[0] = 1;
@@ -317,6 +388,16 @@
 
   scoped_ptr<CompressedRowSparseMatrix> transpose(matrix.Transpose());
 
+  ASSERT_EQ(transpose->row_blocks().size(), matrix.col_blocks().size());
+  for (int i = 0; i < transpose->row_blocks().size(); ++i) {
+    EXPECT_EQ(transpose->row_blocks()[i], matrix.col_blocks()[i]);
+  }
+
+  ASSERT_EQ(transpose->col_blocks().size(), matrix.row_blocks().size());
+  for (int i = 0; i < transpose->col_blocks().size(); ++i) {
+    EXPECT_EQ(transpose->col_blocks()[i], matrix.row_blocks()[i]);
+  }
+
   Matrix dense_matrix;
   matrix.ToDenseMatrix(&dense_matrix);
 
@@ -325,5 +406,170 @@
   EXPECT_NEAR((dense_matrix - dense_transpose.transpose()).norm(), 0.0, 1e-14);
 }
 
+#ifndef CERES_NO_CXSPARSE
+
+struct RandomMatrixOptions {
+  int num_row_blocks;
+  int min_row_block_size;
+  int max_row_block_size;
+  int num_col_blocks;
+  int min_col_block_size;
+  int max_col_block_size;
+  double block_density;
+};
+
+CompressedRowSparseMatrix* CreateRandomCompressedRowSparseMatrix(
+    const RandomMatrixOptions& options) {
+  vector<int> row_blocks;
+  for (int i = 0; i < options.num_row_blocks; ++i) {
+    const int delta_block_size =
+        Uniform(options.max_row_block_size - options.min_row_block_size);
+    row_blocks.push_back(options.min_row_block_size + delta_block_size);
+  }
+
+  vector<int> col_blocks;
+  for (int i = 0; i < options.num_col_blocks; ++i) {
+    const int delta_block_size =
+        Uniform(options.max_col_block_size - options.min_col_block_size);
+    col_blocks.push_back(options.min_col_block_size + delta_block_size);
+  }
+
+  vector<int> rows;
+  vector<int> cols;
+  vector<double> values;
+
+  while (values.size() == 0) {
+    int row_block_begin = 0;
+    for (int r = 0; r < options.num_row_blocks; ++r) {
+      int col_block_begin = 0;
+      for (int c = 0; c < options.num_col_blocks; ++c) {
+        if (RandDouble() <= options.block_density) {
+          for (int i = 0; i < row_blocks[r]; ++i) {
+            for (int j = 0; j < col_blocks[c]; ++j) {
+              rows.push_back(row_block_begin + i);
+              cols.push_back(col_block_begin + j);
+              values.push_back(RandNormal());
+            }
+          }
+        }
+        col_block_begin += col_blocks[c];
+      }
+      row_block_begin += row_blocks[r];
+    }
+  }
+
+  const int num_rows = std::accumulate(row_blocks.begin(), row_blocks.end(), 0);
+  const int num_cols = std::accumulate(col_blocks.begin(), col_blocks.end(), 0);
+  const int num_nonzeros = values.size();
+
+  TripletSparseMatrix tsm(num_rows, num_cols, num_nonzeros);
+  std::copy(rows.begin(), rows.end(), tsm.mutable_rows());
+  std::copy(cols.begin(), cols.end(), tsm.mutable_cols());
+  std::copy(values.begin(), values.end(), tsm.mutable_values());
+  tsm.set_num_nonzeros(num_nonzeros);
+  CompressedRowSparseMatrix* matrix = new CompressedRowSparseMatrix(tsm);
+  (*matrix->mutable_row_blocks())  = row_blocks;
+  (*matrix->mutable_col_blocks())  = col_blocks;
+  return matrix;
+}
+
+void ToDenseMatrix(const cs_di* matrix, Matrix* dense_matrix) {
+  dense_matrix->resize(matrix->m, matrix->n);
+  dense_matrix->setZero();
+
+  for (int c = 0; c < matrix->n; ++c) {
+   for (int idx = matrix->p[c]; idx < matrix->p[c + 1]; ++idx) {
+     const int r = matrix->i[idx];
+     (*dense_matrix)(r, c) = matrix->x[idx];
+   }
+ }
+}
+
+TEST(CompressedRowSparseMatrix, ComputeOuterProduct) {
+  // "Randomly generated seed."
+  SetRandomState(29823);
+  int kMaxNumRowBlocks = 10;
+  int kMaxNumColBlocks = 10;
+  int kNumTrials = 10;
+
+  CXSparse cxsparse;
+  const double kTolerance = 1e-18;
+
+  // Create a random matrix, compute its outer product using CXSParse
+  // and ComputeOuterProduct. Convert both matrices to dense matrices
+  // and compare their upper triangular parts. They should be within
+  // kTolerance of each other.
+  for (int num_row_blocks = 1;
+       num_row_blocks < kMaxNumRowBlocks;
+       ++num_row_blocks) {
+    for (int num_col_blocks = 1;
+         num_col_blocks < kMaxNumColBlocks;
+         ++num_col_blocks) {
+      for (int trial = 0; trial < kNumTrials; ++trial) {
+
+
+        RandomMatrixOptions options;
+        options.num_row_blocks = num_row_blocks;
+        options.num_col_blocks = num_col_blocks;
+        options.min_row_block_size = 1;
+        options.max_row_block_size = 5;
+        options.min_col_block_size = 1;
+        options.max_col_block_size = 10;
+        options.block_density = std::max(0.1, RandDouble());
+
+        VLOG(2) << "num row blocks: " << options.num_row_blocks;
+        VLOG(2) << "num col blocks: " << options.num_col_blocks;
+        VLOG(2) << "min row block size: " << options.min_row_block_size;
+        VLOG(2) << "max row block size: " << options.max_row_block_size;
+        VLOG(2) << "min col block size: " << options.min_col_block_size;
+        VLOG(2) << "max col block size: " << options.max_col_block_size;
+        VLOG(2) << "block density: " << options.block_density;
+
+        scoped_ptr<CompressedRowSparseMatrix> matrix(
+            CreateRandomCompressedRowSparseMatrix(options));
+
+        cs_di cs_matrix_transpose = cxsparse.CreateSparseMatrixTransposeView(matrix.get());
+        cs_di* cs_matrix = cxsparse.TransposeMatrix(&cs_matrix_transpose);
+        cs_di* expected_outer_product =
+            cxsparse.MatrixMatrixMultiply(&cs_matrix_transpose, cs_matrix);
+
+        vector<int> program;
+        scoped_ptr<CompressedRowSparseMatrix> outer_product(
+            CompressedRowSparseMatrix::CreateOuterProductMatrixAndProgram(
+                *matrix, &program));
+        CompressedRowSparseMatrix::ComputeOuterProduct(*matrix,
+                                                       program,
+                                                       outer_product.get());
+
+        cs_di actual_outer_product =
+            cxsparse.CreateSparseMatrixTransposeView(outer_product.get());
+
+        ASSERT_EQ(actual_outer_product.m, actual_outer_product.n);
+        ASSERT_EQ(expected_outer_product->m, expected_outer_product->n);
+        ASSERT_EQ(actual_outer_product.m, expected_outer_product->m);
+
+        Matrix actual_matrix;
+        Matrix expected_matrix;
+
+        ToDenseMatrix(expected_outer_product, &expected_matrix);
+        expected_matrix.triangularView<Eigen::StrictlyLower>().setZero();
+
+        ToDenseMatrix(&actual_outer_product, &actual_matrix);
+        const double diff_norm = (actual_matrix - expected_matrix).norm() / expected_matrix.norm();
+        ASSERT_NEAR(diff_norm, 0.0, kTolerance)
+            << "expected: \n"
+            << expected_matrix
+            << "\nactual: \n"
+            << actual_matrix;
+
+        cxsparse.Free(cs_matrix);
+        cxsparse.Free(expected_outer_product);
+      }
+    }
+  }
+}
+
+#endif  // CERES_NO_CXSPARSE
+
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/conjugate_gradients_solver.cc b/internal/ceres/conjugate_gradients_solver.cc
index ae8e877..524cb8a 100644
--- a/internal/ceres/conjugate_gradients_solver.cc
+++ b/internal/ceres/conjugate_gradients_solver.cc
@@ -44,6 +44,7 @@
 #include "ceres/fpclassify.h"
 #include "ceres/internal/eigen.h"
 #include "ceres/linear_operator.h"
+#include "ceres/stringprintf.h"
 #include "ceres/types.h"
 #include "glog/logging.h"
 
@@ -55,9 +56,6 @@
   return ((x == 0.0) || (IsInfinite(x)));
 }
 
-// Constant used in the MATLAB implementation ~ 2 * eps.
-const double kEpsilon = 2.2204e-16;
-
 }  // namespace
 
 ConjugateGradientsSolver::ConjugateGradientsSolver(
@@ -76,17 +74,19 @@
   CHECK_EQ(A->num_rows(), A->num_cols());
 
   LinearSolver::Summary summary;
-  summary.termination_type = MAX_ITERATIONS;
+  summary.termination_type = LINEAR_SOLVER_NO_CONVERGENCE;
+  summary.message = "Maximum number of iterations reached.";
   summary.num_iterations = 0;
 
-  int num_cols = A->num_cols();
+  const int num_cols = A->num_cols();
   VectorRef xref(x, num_cols);
   ConstVectorRef bref(b, num_cols);
 
-  double norm_b = bref.norm();
+  const double norm_b = bref.norm();
   if (norm_b == 0.0) {
     xref.setZero();
-    summary.termination_type = TOLERANCE;
+    summary.termination_type = LINEAR_SOLVER_SUCCESS;
+    summary.message = "Convergence. |b| = 0.";
     return summary;
   }
 
@@ -95,15 +95,16 @@
   Vector z(num_cols);
   Vector tmp(num_cols);
 
-  double tol_r = per_solve_options.r_tolerance * norm_b;
+  const double tol_r = per_solve_options.r_tolerance * norm_b;
 
   tmp.setZero();
   A->RightMultiply(x, tmp.data());
   r = bref - tmp;
   double norm_r = r.norm();
-
   if (norm_r <= tol_r) {
-    summary.termination_type = TOLERANCE;
+    summary.termination_type = LINEAR_SOLVER_SUCCESS;
+    summary.message =
+        StringPrintf("Convergence. |r| = %e <= %e.", norm_r, tol_r);
     return summary;
   }
 
@@ -115,8 +116,6 @@
   for (summary.num_iterations = 1;
        summary.num_iterations < options_.max_num_iterations;
        ++summary.num_iterations) {
-    VLOG(3) << "cg iteration " << summary.num_iterations;
-
     // Apply preconditioner
     if (per_solve_options.preconditioner != NULL) {
       z.setZero();
@@ -127,10 +126,9 @@
 
     double last_rho = rho;
     rho = r.dot(z);
-
     if (IsZeroOrInfinity(rho)) {
-      LOG(ERROR) << "Numerical failure. rho = " << rho;
-      summary.termination_type = FAILURE;
+      summary.termination_type = LINEAR_SOLVER_FAILURE;
+      summary.message = StringPrintf("Numerical failure. rho = r'z = %e.", rho);
       break;
     };
 
@@ -139,8 +137,9 @@
     } else {
       double beta = rho / last_rho;
       if (IsZeroOrInfinity(beta)) {
-        LOG(ERROR) << "Numerical failure. beta = " << beta;
-        summary.termination_type = FAILURE;
+        summary.termination_type = LINEAR_SOLVER_FAILURE;
+        summary.message = StringPrintf(
+            "Numerical failure. beta = rho_n / rho_{n-1} = %e.", beta);
         break;
       }
       p = z + beta * p;
@@ -149,18 +148,18 @@
     Vector& q = z;
     q.setZero();
     A->RightMultiply(p.data(), q.data());
-    double pq = p.dot(q);
-
+    const double pq = p.dot(q);
     if ((pq <= 0) || IsInfinite(pq))  {
-      LOG(ERROR) << "Numerical failure. pq = " << pq;
-      summary.termination_type = FAILURE;
+      summary.termination_type = LINEAR_SOLVER_FAILURE;
+      summary.message = StringPrintf("Numerical failure. p'q = %e.", pq);
       break;
     }
 
-    double alpha = rho / pq;
+    const double alpha = rho / pq;
     if (IsInfinite(alpha)) {
-      LOG(ERROR) << "Numerical failure. alpha " << alpha;
-      summary.termination_type = FAILURE;
+      summary.termination_type = LINEAR_SOLVER_FAILURE;
+      summary.message =
+          StringPrintf("Numerical failure. alpha = rho / pq = %e", alpha);
       break;
     }
 
@@ -183,7 +182,7 @@
 
     // Quadratic model based termination.
     //   Q1 = x'Ax - 2 * b' x.
-    double Q1 = -1.0 * xref.dot(bref + r);
+    const double Q1 = -1.0 * xref.dot(bref + r);
 
     // For PSD matrices A, let
     //
@@ -207,21 +206,23 @@
     //   Journal of Computational and Applied Mathematics,
     //   124(1-2), 45-59, 2000.
     //
-    double zeta = summary.num_iterations * (Q1 - Q0) / Q1;
-    VLOG(3) << "Q termination: zeta " << zeta
-            << " " << per_solve_options.q_tolerance;
+    const double zeta = summary.num_iterations * (Q1 - Q0) / Q1;
     if (zeta < per_solve_options.q_tolerance) {
-      summary.termination_type = TOLERANCE;
+      summary.termination_type = LINEAR_SOLVER_SUCCESS;
+      summary.message =
+          StringPrintf("Convergence: zeta = %e < %e",
+                       zeta,
+                       per_solve_options.q_tolerance);
       break;
     }
     Q0 = Q1;
 
     // Residual based termination.
     norm_r = r. norm();
-    VLOG(3) << "R termination: norm_r " << norm_r
-            << " " << tol_r;
     if (norm_r <= tol_r) {
-      summary.termination_type = TOLERANCE;
+      summary.termination_type = LINEAR_SOLVER_SUCCESS;
+      summary.message =
+          StringPrintf("Convergence. |r| = %e <= %e.", norm_r, tol_r);
       break;
     }
   }
diff --git a/internal/ceres/coordinate_descent_minimizer.cc b/internal/ceres/coordinate_descent_minimizer.cc
index c4da987..3b0553e 100644
--- a/internal/ceres/coordinate_descent_minimizer.cc
+++ b/internal/ceres/coordinate_descent_minimizer.cc
@@ -40,15 +40,15 @@
 #include "ceres/evaluator.h"
 #include "ceres/linear_solver.h"
 #include "ceres/minimizer.h"
-#include "ceres/ordered_groups.h"
 #include "ceres/parameter_block.h"
+#include "ceres/parameter_block_ordering.h"
 #include "ceres/problem_impl.h"
 #include "ceres/program.h"
 #include "ceres/residual_block.h"
 #include "ceres/solver.h"
-#include "ceres/solver_impl.h"
 #include "ceres/trust_region_minimizer.h"
 #include "ceres/trust_region_strategy.h"
+#include "ceres/parameter_block_ordering.h"
 
 namespace ceres {
 namespace internal {
@@ -227,10 +227,44 @@
   minimizer_options.evaluator = evaluator.get();
   minimizer_options.jacobian = jacobian.get();
   minimizer_options.trust_region_strategy = trust_region_strategy.get();
+  minimizer_options.is_silent = true;
 
   TrustRegionMinimizer minimizer;
   minimizer.Minimize(minimizer_options, parameter, summary);
 }
 
+bool CoordinateDescentMinimizer::IsOrderingValid(
+    const Program& program,
+    const ParameterBlockOrdering& ordering,
+    string* message) {
+  const map<int, set<double*> >& group_to_elements =
+      ordering.group_to_elements();
+
+  // Verify that each group is an independent set
+  map<int, set<double*> >::const_iterator it = group_to_elements.begin();
+  for ( ; it != group_to_elements.end(); ++it) {
+    if (!program.IsParameterBlockSetIndependent(it->second)) {
+      *message =
+          StringPrintf("The user-provided "
+                       "parameter_blocks_for_inner_iterations does not "
+                       "form an independent set. Group Id: %d", it->first);
+      return false;
+    }
+  }
+  return true;
+}
+
+// Find a recursive decomposition of the Hessian matrix as a set
+// of independent sets of decreasing size and invert it. This
+// seems to work better in practice, i.e., Cameras before
+// points.
+ParameterBlockOrdering* CoordinateDescentMinimizer::CreateOrdering(
+    const Program& program) {
+  scoped_ptr<ParameterBlockOrdering> ordering(new ParameterBlockOrdering);
+  ComputeRecursiveIndependentSetOrdering(program, ordering.get());
+  ordering->Reverse();
+  return ordering.release();
+}
+
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/coordinate_descent_minimizer.h b/internal/ceres/coordinate_descent_minimizer.h
index 424acda..e324b38 100644
--- a/internal/ceres/coordinate_descent_minimizer.h
+++ b/internal/ceres/coordinate_descent_minimizer.h
@@ -37,12 +37,13 @@
 #include "ceres/evaluator.h"
 #include "ceres/minimizer.h"
 #include "ceres/problem_impl.h"
-#include "ceres/program.h"
 #include "ceres/solver.h"
 
 namespace ceres {
 namespace internal {
 
+class Program;
+
 // Given a Program, and a ParameterBlockOrdering which partitions
 // (non-exhaustively) the Hessian matrix into independent sets,
 // perform coordinate descent on the parameter blocks in the
@@ -66,6 +67,17 @@
                         double* parameters,
                         Solver::Summary* summary);
 
+  // Verify that each group in the ordering forms an independent set.
+  static bool IsOrderingValid(const Program& program,
+                              const ParameterBlockOrdering& ordering,
+                              string* message);
+
+  // Find a recursive decomposition of the Hessian matrix as a set
+  // of independent sets of decreasing size and invert it. This
+  // seems to work better in practice, i.e., Cameras before
+  // points.
+  static ParameterBlockOrdering* CreateOrdering(const Program& program);
+
  private:
   void Solve(Program* program,
              LinearSolver* linear_solver,
diff --git a/internal/ceres/corrector.cc b/internal/ceres/corrector.cc
index 60269a6..581fc6d 100644
--- a/internal/ceres/corrector.cc
+++ b/internal/ceres/corrector.cc
@@ -32,14 +32,14 @@
 
 #include <cstddef>
 #include <cmath>
+#include "ceres/internal/eigen.h"
 #include "glog/logging.h"
 
 namespace ceres {
 namespace internal {
 
-Corrector::Corrector(double sq_norm, const double rho[3]) {
+Corrector::Corrector(const double sq_norm, const double rho[3]) {
   CHECK_GE(sq_norm, 0.0);
-  CHECK_GT(rho[1], 0.0);
   sqrt_rho1_ = sqrt(rho[1]);
 
   // If sq_norm = 0.0, the correction becomes trivial, the residual
@@ -84,6 +84,14 @@
     return;
   }
 
+  // We now require that the first derivative of the loss function be
+  // positive only if the second derivative is positive. This is
+  // because when the second derivative is non-positive, we do not use
+  // the second order correction suggested by BANS and instead use a
+  // simpler first order strategy which does not use a division by the
+  // gradient of the loss function.
+  CHECK_GT(rho[1], 0.0);
+
   // Calculate the smaller of the two solutions to the equation
   //
   // 0.5 *  alpha^2 - alpha - rho'' / rho' *  z'z = 0.
@@ -101,20 +109,25 @@
   alpha_sq_norm_ = alpha / sq_norm;
 }
 
-void Corrector::CorrectResiduals(int num_rows, double* residuals) {
+void Corrector::CorrectResiduals(const int num_rows, double* residuals) {
   DCHECK(residuals != NULL);
   // Equation 11 in BANS.
-  for (int r = 0; r < num_rows; ++r) {
-    residuals[r] *= residual_scaling_;
-  }
+  VectorRef(residuals, num_rows) *= residual_scaling_;
 }
 
-void Corrector::CorrectJacobian(int num_rows,
-                                int num_cols,
+void Corrector::CorrectJacobian(const int num_rows,
+                                const int num_cols,
                                 double* residuals,
                                 double* jacobian) {
   DCHECK(residuals != NULL);
   DCHECK(jacobian != NULL);
+
+  // The common case (rho[2] <= 0).
+  if (alpha_sq_norm_ == 0.0) {
+    VectorRef(jacobian, num_rows * num_cols) *= sqrt_rho1_;
+    return;
+  }
+
   // Equation 11 in BANS.
   //
   //  J = sqrt(rho) * (J - alpha^2 r * r' J)
diff --git a/internal/ceres/corrector_test.cc b/internal/ceres/corrector_test.cc
index 55e7d6b..9355616 100644
--- a/internal/ceres/corrector_test.cc
+++ b/internal/ceres/corrector_test.cc
@@ -43,14 +43,14 @@
 
 // If rho[1] is zero, the Corrector constructor should crash.
 TEST(Corrector, ZeroGradientDeathTest) {
-  const double kRho[] = {0.0, 0.0, 0.0};
+  const double kRho[] = {0.0, 0.0, 1.0};
   EXPECT_DEATH_IF_SUPPORTED({Corrector c(1.0, kRho);},
                ".*");
 }
 
 // If rho[1] is negative, the Corrector constructor should crash.
 TEST(Corrector, NegativeGradientDeathTest) {
-  const double kRho[] = {0.0, -0.1, 0.0};
+  const double kRho[] = {0.0, -0.1, 1.0};
   EXPECT_DEATH_IF_SUPPORTED({Corrector c(1.0, kRho);},
                ".*");
 }
diff --git a/internal/ceres/cost_function_to_functor_test.cc b/internal/ceres/cost_function_to_functor_test.cc
index 90ccc82..fd828ce 100644
--- a/internal/ceres/cost_function_to_functor_test.cc
+++ b/internal/ceres/cost_function_to_functor_test.cc
@@ -42,9 +42,9 @@
   EXPECT_EQ(cost_function.num_residuals(),
             actual_cost_function.num_residuals());
   const int num_residuals = cost_function.num_residuals();
-  const vector<int16>& parameter_block_sizes =
+  const vector<int32>& parameter_block_sizes =
       cost_function.parameter_block_sizes();
-  const vector<int16>& actual_parameter_block_sizes =
+  const vector<int32>& actual_parameter_block_sizes =
       actual_cost_function.parameter_block_sizes();
   EXPECT_EQ(parameter_block_sizes.size(),
             actual_parameter_block_sizes.size());
diff --git a/internal/ceres/covariance_impl.cc b/internal/ceres/covariance_impl.cc
index 19d545c..821be49 100644
--- a/internal/ceres/covariance_impl.cc
+++ b/internal/ceres/covariance_impl.cc
@@ -35,8 +35,28 @@
 #endif
 
 #include <algorithm>
+#include <cstdlib>
 #include <utility>
 #include <vector>
+#include "Eigen/SparseCore"
+
+// Suppress unused local variable warning from Eigen Ordering.h #included by
+// SparseQR in Eigen 3.2.0. This was fixed in Eigen 3.2.1, but 3.2.0 is still
+// widely used (Ubuntu 14.04), and Ceres won't compile otherwise due to -Werror.
+#if defined(_MSC_VER)
+#pragma warning( push )
+#pragma warning( disable : 4189 )
+#else
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
+#endif
+#include "Eigen/SparseQR"
+#if defined(_MSC_VER)
+#pragma warning( pop )
+#else
+#pragma GCC diagnostic pop
+#endif
+
 #include "Eigen/SVD"
 #include "ceres/compressed_col_sparse_matrix_utils.h"
 #include "ceres/compressed_row_sparse_matrix.h"
@@ -52,40 +72,6 @@
 
 namespace ceres {
 namespace internal {
-namespace {
-
-// Per thread storage for SuiteSparse.
-#ifndef CERES_NO_SUITESPARSE
-
-struct PerThreadContext {
-  explicit PerThreadContext(int num_rows)
-      : solution(NULL),
-        solution_set(NULL),
-        y_workspace(NULL),
-        e_workspace(NULL),
-        rhs(NULL) {
-    rhs = ss.CreateDenseVector(NULL, num_rows, num_rows);
-  }
-
-  ~PerThreadContext() {
-    ss.Free(solution);
-    ss.Free(solution_set);
-    ss.Free(y_workspace);
-    ss.Free(e_workspace);
-    ss.Free(rhs);
-  }
-
-  cholmod_dense* solution;
-  cholmod_sparse* solution_set;
-  cholmod_dense* y_workspace;
-  cholmod_dense* e_workspace;
-  cholmod_dense* rhs;
-  SuiteSparse ss;
-};
-
-#endif
-
-}  // namespace
 
 typedef vector<pair<const double*, const double*> > CovarianceBlocks;
 
@@ -164,9 +150,9 @@
   }
 
   if (offset == row_size) {
-    LOG(WARNING) << "Unable to find covariance block for "
-                 << original_parameter_block1 << " "
-                 << original_parameter_block2;
+    LOG(ERROR) << "Unable to find covariance block for "
+               << original_parameter_block1 << " "
+               << original_parameter_block2;
     return false;
   }
 
@@ -347,8 +333,8 @@
   // values of the parameter blocks. Thus iterating over the keys of
   // parameter_block_to_row_index_ corresponds to iterating over the
   // rows of the covariance matrix in order.
-  int i = 0; // index into covariance_blocks.
-  int cursor = 0; // index into the covariance matrix.
+  int i = 0;  // index into covariance_blocks.
+  int cursor = 0;  // index into the covariance matrix.
   for (map<const double*, int>::const_iterator it =
            parameter_block_to_row_index_.begin();
        it != parameter_block_to_row_index_.end();
@@ -392,14 +378,18 @@
 
 bool CovarianceImpl::ComputeCovarianceValues() {
   switch (options_.algorithm_type) {
-    case (DENSE_SVD):
+    case DENSE_SVD:
       return ComputeCovarianceValuesUsingDenseSVD();
 #ifndef CERES_NO_SUITESPARSE
-    case (SPARSE_CHOLESKY):
-      return ComputeCovarianceValuesUsingSparseCholesky();
-    case (SPARSE_QR):
-      return ComputeCovarianceValuesUsingSparseQR();
+    case SUITE_SPARSE_QR:
+      return ComputeCovarianceValuesUsingSuiteSparseQR();
+#else
+      LOG(ERROR) << "SuiteSparse is required to use the "
+                 << "SUITE_SPARSE_QR algorithm.";
+      return false;
 #endif
+    case EIGEN_SPARSE_QR:
+      return ComputeCovarianceValuesUsingEigenSparseQR();
     default:
       LOG(ERROR) << "Unsupported covariance estimation algorithm type: "
                  << CovarianceAlgorithmTypeToString(options_.algorithm_type);
@@ -408,186 +398,7 @@
   return false;
 }
 
-bool CovarianceImpl::ComputeCovarianceValuesUsingSparseCholesky() {
-  EventLogger event_logger(
-      "CovarianceImpl::ComputeCovarianceValuesUsingSparseCholesky");
-#ifndef CERES_NO_SUITESPARSE
-  if (covariance_matrix_.get() == NULL) {
-    // Nothing to do, all zeros covariance matrix.
-    return true;
-  }
-
-  SuiteSparse ss;
-
-  CRSMatrix jacobian;
-  problem_->Evaluate(evaluate_options_, NULL, NULL, NULL, &jacobian);
-
-  event_logger.AddEvent("Evaluate");
-  // m is a transposed view of the Jacobian.
-  cholmod_sparse cholmod_jacobian_view;
-  cholmod_jacobian_view.nrow = jacobian.num_cols;
-  cholmod_jacobian_view.ncol = jacobian.num_rows;
-  cholmod_jacobian_view.nzmax = jacobian.values.size();
-  cholmod_jacobian_view.nz = NULL;
-  cholmod_jacobian_view.p = reinterpret_cast<void*>(&jacobian.rows[0]);
-  cholmod_jacobian_view.i = reinterpret_cast<void*>(&jacobian.cols[0]);
-  cholmod_jacobian_view.x = reinterpret_cast<void*>(&jacobian.values[0]);
-  cholmod_jacobian_view.z = NULL;
-  cholmod_jacobian_view.stype = 0;  // Matrix is not symmetric.
-  cholmod_jacobian_view.itype = CHOLMOD_INT;
-  cholmod_jacobian_view.xtype = CHOLMOD_REAL;
-  cholmod_jacobian_view.dtype = CHOLMOD_DOUBLE;
-  cholmod_jacobian_view.sorted = 1;
-  cholmod_jacobian_view.packed = 1;
-
-  cholmod_factor* factor = ss.AnalyzeCholesky(&cholmod_jacobian_view);
-  event_logger.AddEvent("Symbolic Factorization");
-  bool factorization_succeeded = ss.Cholesky(&cholmod_jacobian_view, factor);
-  if (factorization_succeeded) {
-    const double reciprocal_condition_number =
-        cholmod_rcond(factor, ss.mutable_cc());
-    if (reciprocal_condition_number <
-        options_.min_reciprocal_condition_number) {
-      LOG(WARNING) << "Cholesky factorization of J'J is not reliable. "
-                   << "Reciprocal condition number: "
-                   << reciprocal_condition_number << " "
-                   << "min_reciprocal_condition_number : "
-                   << options_.min_reciprocal_condition_number;
-      factorization_succeeded = false;
-    }
-  }
-
-  event_logger.AddEvent("Numeric Factorization");
-  if (!factorization_succeeded) {
-    ss.Free(factor);
-    LOG(WARNING) << "Cholesky factorization failed.";
-    return false;
-  }
-
-  const int num_rows = covariance_matrix_->num_rows();
-  const int* rows = covariance_matrix_->rows();
-  const int* cols = covariance_matrix_->cols();
-  double* values = covariance_matrix_->mutable_values();
-
-  // The following loop exploits the fact that the i^th column of A^{-1}
-  // is given by the solution to the linear system
-  //
-  //  A x = e_i
-  //
-  // where e_i is a vector with e(i) = 1 and all other entries zero.
-  //
-  // Since the covariance matrix is symmetric, the i^th row and column
-  // are equal.
-  //
-  // The ifdef separates two different version of SuiteSparse. Newer
-  // versions of SuiteSparse have the cholmod_solve2 function which
-  // re-uses memory across calls.
-#if (SUITESPARSE_VERSION < 4002)
-  cholmod_dense* rhs = ss.CreateDenseVector(NULL, num_rows, num_rows);
-  double* rhs_x = reinterpret_cast<double*>(rhs->x);
-
-  for (int r = 0; r < num_rows; ++r) {
-    int row_begin = rows[r];
-    int row_end = rows[r + 1];
-    if (row_end == row_begin) {
-      continue;
-    }
-
-    rhs_x[r] = 1.0;
-    cholmod_dense* solution = ss.Solve(factor, rhs);
-    double* solution_x = reinterpret_cast<double*>(solution->x);
-    for (int idx = row_begin; idx < row_end; ++idx) {
-      const int c = cols[idx];
-      values[idx] = solution_x[c];
-    }
-    ss.Free(solution);
-    rhs_x[r] = 0.0;
-  }
-
-  ss.Free(rhs);
-#else  // SUITESPARSE_VERSION < 4002
-
-  const int num_threads = options_.num_threads;
-  vector<PerThreadContext*> contexts(num_threads);
-  for (int i = 0; i < num_threads; ++i) {
-    contexts[i] = new PerThreadContext(num_rows);
-  }
-
-  // The first call to cholmod_solve2 is not thread safe, since it
-  // changes the factorization from supernodal to simplicial etc.
-  {
-    PerThreadContext* context = contexts[0];
-    double* context_rhs_x =  reinterpret_cast<double*>(context->rhs->x);
-    context_rhs_x[0] = 1.0;
-    cholmod_solve2(CHOLMOD_A,
-                   factor,
-                   context->rhs,
-                   NULL,
-                   &context->solution,
-                   &context->solution_set,
-                   &context->y_workspace,
-                   &context->e_workspace,
-                   context->ss.mutable_cc());
-    context_rhs_x[0] = 0.0;
-  }
-
-#pragma omp parallel for num_threads(num_threads) schedule(dynamic)
-  for (int r = 0; r < num_rows; ++r) {
-    int row_begin = rows[r];
-    int row_end = rows[r + 1];
-    if (row_end == row_begin) {
-      continue;
-    }
-
-#  ifdef CERES_USE_OPENMP
-    int thread_id = omp_get_thread_num();
-#  else
-    int thread_id = 0;
-#  endif
-
-    PerThreadContext* context = contexts[thread_id];
-    double* context_rhs_x =  reinterpret_cast<double*>(context->rhs->x);
-    context_rhs_x[r] = 1.0;
-
-    // TODO(sameeragarwal) There should be a more efficient way
-    // involving the use of Bset but I am unable to make it work right
-    // now.
-    cholmod_solve2(CHOLMOD_A,
-                   factor,
-                   context->rhs,
-                   NULL,
-                   &context->solution,
-                   &context->solution_set,
-                   &context->y_workspace,
-                   &context->e_workspace,
-                   context->ss.mutable_cc());
-
-    double* solution_x = reinterpret_cast<double*>(context->solution->x);
-    for (int idx = row_begin; idx < row_end; ++idx) {
-      const int c = cols[idx];
-      values[idx] = solution_x[c];
-    }
-    context_rhs_x[r] = 0.0;
-  }
-
-  for (int i = 0; i < num_threads; ++i) {
-    delete contexts[i];
-  }
-
-#endif  // SUITESPARSE_VERSION < 4002
-
-  ss.Free(factor);
-  event_logger.AddEvent("Inversion");
-  return true;
-
-#else  // CERES_NO_SUITESPARSE
-
-  return false;
-
-#endif  // CERES_NO_SUITESPARSE
-};
-
-bool CovarianceImpl::ComputeCovarianceValuesUsingSparseQR() {
+bool CovarianceImpl::ComputeCovarianceValuesUsingSuiteSparseQR() {
   EventLogger event_logger(
       "CovarianceImpl::ComputeCovarianceValuesUsingSparseQR");
 
@@ -681,10 +492,10 @@
   CHECK_NOTNULL(R);
 
   if (rank < cholmod_jacobian.ncol) {
-    LOG(WARNING) << "Jacobian matrix is rank deficient."
-                 << "Number of columns: " << cholmod_jacobian.ncol
-                 << " rank: " << rank;
-    delete []permutation;
+    LOG(ERROR) << "Jacobian matrix is rank deficient. "
+               << "Number of columns: " << cholmod_jacobian.ncol
+               << " rank: " << rank;
+    free(permutation);
     cholmod_l_free_sparse(&R, &cc);
     cholmod_l_finish(&cc);
     return false;
@@ -739,7 +550,7 @@
     }
   }
 
-  delete []permutation;
+  free(permutation);
   cholmod_l_free_sparse(&R, &cc);
   cholmod_l_finish(&cc);
   event_logger.AddEvent("Inversion");
@@ -807,11 +618,11 @@
       if (automatic_truncation) {
         break;
       } else {
-        LOG(WARNING) << "Cholesky factorization of J'J is not reliable. "
-                     << "Reciprocal condition number: "
-                     << singular_value_ratio * singular_value_ratio << " "
-                     << "min_reciprocal_condition_number : "
-                     << options_.min_reciprocal_condition_number;
+        LOG(ERROR) << "Cholesky factorization of J'J is not reliable. "
+                   << "Reciprocal condition number: "
+                   << singular_value_ratio * singular_value_ratio << " "
+                   << "min_reciprocal_condition_number: "
+                   << options_.min_reciprocal_condition_number;
         return false;
       }
     }
@@ -839,7 +650,102 @@
   }
   event_logger.AddEvent("CopyToCovarianceMatrix");
   return true;
-};
+}
+
+bool CovarianceImpl::ComputeCovarianceValuesUsingEigenSparseQR() {
+  EventLogger event_logger(
+      "CovarianceImpl::ComputeCovarianceValuesUsingEigenSparseQR");
+  if (covariance_matrix_.get() == NULL) {
+    // Nothing to do, all zeros covariance matrix.
+    return true;
+  }
+
+  CRSMatrix jacobian;
+  problem_->Evaluate(evaluate_options_, NULL, NULL, NULL, &jacobian);
+  event_logger.AddEvent("Evaluate");
+
+  typedef Eigen::SparseMatrix<double, Eigen::ColMajor> EigenSparseMatrix;
+
+  // Convert the matrix to column major order as required by SparseQR.
+  EigenSparseMatrix sparse_jacobian =
+      Eigen::MappedSparseMatrix<double, Eigen::RowMajor>(
+          jacobian.num_rows, jacobian.num_cols,
+          static_cast<int>(jacobian.values.size()),
+          jacobian.rows.data(), jacobian.cols.data(), jacobian.values.data());
+  event_logger.AddEvent("ConvertToSparseMatrix");
+
+  Eigen::SparseQR<EigenSparseMatrix, Eigen::COLAMDOrdering<int> >
+      qr_solver(sparse_jacobian);
+  event_logger.AddEvent("QRDecomposition");
+
+  if(qr_solver.info() != Eigen::Success) {
+    LOG(ERROR) << "Eigen::SparseQR decomposition failed.";
+    return false;
+  }
+
+  if (qr_solver.rank() < jacobian.num_cols) {
+    LOG(ERROR) << "Jacobian matrix is rank deficient. "
+               << "Number of columns: " << jacobian.num_cols
+               << " rank: " << qr_solver.rank();
+    return false;
+  }
+
+  const int* rows = covariance_matrix_->rows();
+  const int* cols = covariance_matrix_->cols();
+  double* values = covariance_matrix_->mutable_values();
+
+  // Compute the inverse column permutation used by QR factorization.
+  Eigen::PermutationMatrix<Eigen::Dynamic, Eigen::Dynamic> inverse_permutation =
+      qr_solver.colsPermutation().inverse();
+
+  // The following loop exploits the fact that the i^th column of A^{-1}
+  // is given by the solution to the linear system
+  //
+  //  A x = e_i
+  //
+  // where e_i is a vector with e(i) = 1 and all other entries zero.
+  //
+  // Since the covariance matrix is symmetric, the i^th row and column
+  // are equal.
+  const int num_cols = jacobian.num_cols;
+  const int num_threads = options_.num_threads;
+  scoped_array<double> workspace(new double[num_threads * num_cols]);
+
+#pragma omp parallel for num_threads(num_threads) schedule(dynamic)
+  for (int r = 0; r < num_cols; ++r) {
+    const int row_begin = rows[r];
+    const int row_end = rows[r + 1];
+    if (row_end == row_begin) {
+      continue;
+    }
+
+#  ifdef CERES_USE_OPENMP
+    int thread_id = omp_get_thread_num();
+#  else
+    int thread_id = 0;
+#  endif
+
+    double* solution = workspace.get() + thread_id * num_cols;
+    SolveRTRWithSparseRHS<int>(
+        num_cols,
+        qr_solver.matrixR().innerIndexPtr(),
+        qr_solver.matrixR().outerIndexPtr(),
+        &qr_solver.matrixR().data().value(0),
+        inverse_permutation.indices().coeff(r),
+        solution);
+
+    // Assign the values of the computed covariance using the
+    // inverse permutation used in the QR factorization.
+    for (int idx = row_begin; idx < row_end; ++idx) {
+     const int c = cols[idx];
+     values[idx] = solution[inverse_permutation.indices().coeff(c)];
+    }
+  }
+
+  event_logger.AddEvent("Inverse");
+
+  return true;
+}
 
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/covariance_impl.h b/internal/ceres/covariance_impl.h
index 0e7e217..135f4a1 100644
--- a/internal/ceres/covariance_impl.h
+++ b/internal/ceres/covariance_impl.h
@@ -64,9 +64,9 @@
       ProblemImpl* problem);
 
   bool ComputeCovarianceValues();
-  bool ComputeCovarianceValuesUsingSparseCholesky();
-  bool ComputeCovarianceValuesUsingSparseQR();
   bool ComputeCovarianceValuesUsingDenseSVD();
+  bool ComputeCovarianceValuesUsingSuiteSparseQR();
+  bool ComputeCovarianceValuesUsingEigenSparseQR();
 
   const CompressedRowSparseMatrix* covariance_matrix() const {
     return covariance_matrix_.get();
diff --git a/internal/ceres/covariance_test.cc b/internal/ceres/covariance_test.cc
index f3a5051..6c506b7 100644
--- a/internal/ceres/covariance_test.cc
+++ b/internal/ceres/covariance_test.cc
@@ -125,7 +125,7 @@
 class UnaryCostFunction: public CostFunction {
  public:
   UnaryCostFunction(const int num_residuals,
-                    const int16 parameter_block_size,
+                    const int32 parameter_block_size,
                     const double* jacobian)
       : jacobian_(jacobian, jacobian + num_residuals * parameter_block_size) {
     set_num_residuals(num_residuals);
@@ -158,8 +158,8 @@
 class BinaryCostFunction: public CostFunction {
  public:
   BinaryCostFunction(const int num_residuals,
-                     const int16 parameter_block1_size,
-                     const int16 parameter_block2_size,
+                     const int32 parameter_block1_size,
+                     const int32 parameter_block2_size,
                      const double* jacobian1,
                      const double* jacobian2)
       : jacobian1_(jacobian1,
@@ -400,15 +400,15 @@
   Covariance::Options options;
 
 #ifndef CERES_NO_SUITESPARSE
-  options.algorithm_type = SPARSE_CHOLESKY;
-  ComputeAndCompareCovarianceBlocks(options, expected_covariance);
-
-  options.algorithm_type = SPARSE_QR;
+  options.algorithm_type = SUITE_SPARSE_QR;
   ComputeAndCompareCovarianceBlocks(options, expected_covariance);
 #endif
 
   options.algorithm_type = DENSE_SVD;
   ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+
+  options.algorithm_type = EIGEN_SPARSE_QR;
+  ComputeAndCompareCovarianceBlocks(options, expected_covariance);
 }
 
 #ifdef CERES_USE_OPENMP
@@ -448,15 +448,15 @@
   options.num_threads = 4;
 
 #ifndef CERES_NO_SUITESPARSE
-  options.algorithm_type = SPARSE_CHOLESKY;
-  ComputeAndCompareCovarianceBlocks(options, expected_covariance);
-
-  options.algorithm_type = SPARSE_QR;
+  options.algorithm_type = SUITE_SPARSE_QR;
   ComputeAndCompareCovarianceBlocks(options, expected_covariance);
 #endif
 
   options.algorithm_type = DENSE_SVD;
   ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+
+  options.algorithm_type = EIGEN_SPARSE_QR;
+  ComputeAndCompareCovarianceBlocks(options, expected_covariance);
 }
 
 #endif  // CERES_USE_OPENMP
@@ -497,15 +497,15 @@
   Covariance::Options options;
 
 #ifndef CERES_NO_SUITESPARSE
-  options.algorithm_type = SPARSE_CHOLESKY;
-  ComputeAndCompareCovarianceBlocks(options, expected_covariance);
-
-  options.algorithm_type = SPARSE_QR;
+  options.algorithm_type = SUITE_SPARSE_QR;
   ComputeAndCompareCovarianceBlocks(options, expected_covariance);
 #endif
 
   options.algorithm_type = DENSE_SVD;
   ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+
+  options.algorithm_type = EIGEN_SPARSE_QR;
+  ComputeAndCompareCovarianceBlocks(options, expected_covariance);
 }
 
 TEST_F(CovarianceTest, LocalParameterization) {
@@ -553,15 +553,15 @@
   Covariance::Options options;
 
 #ifndef CERES_NO_SUITESPARSE
-  options.algorithm_type = SPARSE_CHOLESKY;
-  ComputeAndCompareCovarianceBlocks(options, expected_covariance);
-
-  options.algorithm_type = SPARSE_QR;
+  options.algorithm_type = SUITE_SPARSE_QR;
   ComputeAndCompareCovarianceBlocks(options, expected_covariance);
 #endif
 
   options.algorithm_type = DENSE_SVD;
   ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+
+  options.algorithm_type = EIGEN_SPARSE_QR;
+  ComputeAndCompareCovarianceBlocks(options, expected_covariance);
 }
 
 
@@ -727,7 +727,7 @@
                                                       parameter_block_size_,
                                                       jacobian.data()),
                                 NULL,
-                                block_i );
+                                block_i);
       for (int j = i; j < num_parameter_blocks_; ++j) {
         double* block_j = parameters_.get() + j * parameter_block_size_;
         all_covariance_blocks_.push_back(make_pair(block_i, block_j));
@@ -781,8 +781,7 @@
 #if !defined(CERES_NO_SUITESPARSE) && defined(CERES_USE_OPENMP)
 
 TEST_F(LargeScaleCovarianceTest, Parallel) {
-  ComputeAndCompare(SPARSE_CHOLESKY, 4);
-  ComputeAndCompare(SPARSE_QR, 4);
+  ComputeAndCompare(SUITE_SPARSE_QR, 4);
 }
 
 #endif  // !defined(CERES_NO_SUITESPARSE) && defined(CERES_USE_OPENMP)
diff --git a/internal/ceres/cxsparse.cc b/internal/ceres/cxsparse.cc
index c6d7743..87503d0 100644
--- a/internal/ceres/cxsparse.cc
+++ b/internal/ceres/cxsparse.cc
@@ -28,6 +28,9 @@
 //
 // Author: strandmark@google.com (Petter Strandmark)
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifndef CERES_NO_CXSPARSE
 
 #include "ceres/cxsparse.h"
@@ -175,8 +178,8 @@
 
 cs_di* CXSparse::CreateSparseMatrix(TripletSparseMatrix* tsm) {
   cs_di_sparse tsm_wrapper;
-  tsm_wrapper.nzmax = tsm->num_nonzeros();;
-  tsm_wrapper.nz = tsm->num_nonzeros();;
+  tsm_wrapper.nzmax = tsm->num_nonzeros();
+  tsm_wrapper.nz = tsm->num_nonzeros();
   tsm_wrapper.m = tsm->num_rows();
   tsm_wrapper.n = tsm->num_cols();
   tsm_wrapper.p = tsm->mutable_cols();
diff --git a/internal/ceres/cxsparse.h b/internal/ceres/cxsparse.h
index cd87908..5868401 100644
--- a/internal/ceres/cxsparse.h
+++ b/internal/ceres/cxsparse.h
@@ -31,11 +31,13 @@
 #ifndef CERES_INTERNAL_CXSPARSE_H_
 #define CERES_INTERNAL_CXSPARSE_H_
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifndef CERES_NO_CXSPARSE
 
 #include <vector>
 #include "cs.h"
-#include "ceres/internal/port.h"
 
 namespace ceres {
 namespace internal {
@@ -127,9 +129,13 @@
 
 #else  // CERES_NO_CXSPARSE
 
-class CXSparse {};
 typedef void cs_dis;
 
+class CXSparse {
+ public:
+  void Free(void*) {};
+
+};
 #endif  // CERES_NO_CXSPARSE
 
 #endif  // CERES_INTERNAL_CXSPARSE_H_
diff --git a/internal/ceres/dense_normal_cholesky_solver.cc b/internal/ceres/dense_normal_cholesky_solver.cc
index fbf3cbe..f44d6da 100644
--- a/internal/ceres/dense_normal_cholesky_solver.cc
+++ b/internal/ceres/dense_normal_cholesky_solver.cc
@@ -95,9 +95,19 @@
 
   LinearSolver::Summary summary;
   summary.num_iterations = 1;
-  summary.termination_type = TOLERANCE;
-  VectorRef(x, num_cols) =
-      lhs.selfadjointView<Eigen::Upper>().llt().solve(rhs);
+  summary.termination_type = LINEAR_SOLVER_SUCCESS;
+  Eigen::LLT<Matrix, Eigen::Upper> llt =
+      lhs.selfadjointView<Eigen::Upper>().llt();
+
+  if (llt.info() != Eigen::Success) {
+    summary.termination_type = LINEAR_SOLVER_FAILURE;
+    summary.message = "Eigen LLT decomposition failed.";
+  } else {
+    summary.termination_type = LINEAR_SOLVER_SUCCESS;
+    summary.message = "Success.";
+  }
+
+  VectorRef(x, num_cols) = llt.solve(rhs);
   event_logger.AddEvent("Solve");
   return summary;
 }
@@ -142,14 +152,14 @@
       A->matrix().transpose() * ConstVectorRef(b, A->num_rows());
   event_logger.AddEvent("Product");
 
-  const int info = LAPACK::SolveInPlaceUsingCholesky(num_cols, lhs.data(), x);
-  event_logger.AddEvent("Solve");
-
   LinearSolver::Summary summary;
   summary.num_iterations = 1;
-  summary.termination_type = info == 0 ? TOLERANCE : FAILURE;
-
-  event_logger.AddEvent("TearDown");
+  summary.termination_type =
+      LAPACK::SolveInPlaceUsingCholesky(num_cols,
+                                        lhs.data(),
+                                        x,
+                                        &summary.message);
+  event_logger.AddEvent("Solve");
   return summary;
 }
 }   // namespace internal
diff --git a/internal/ceres/dense_qr_solver.cc b/internal/ceres/dense_qr_solver.cc
index d76d58b..4388357 100644
--- a/internal/ceres/dense_qr_solver.cc
+++ b/internal/ceres/dense_qr_solver.cc
@@ -60,6 +60,7 @@
     return SolveUsingLAPACK(A, b, per_solve_options, x);
   }
 }
+
 LinearSolver::Summary DenseQRSolver::SolveUsingLAPACK(
     DenseSparseMatrix* A,
     const double* b,
@@ -100,21 +101,18 @@
     work_.resize(work_size);
   }
 
-  const int info = LAPACK::SolveUsingQR(lhs_.rows(),
-                                        lhs_.cols(),
-                                        lhs_.data(),
-                                        work_.rows(),
-                                        work_.data(),
-                                        rhs_.data());
-  event_logger.AddEvent("Solve");
-
   LinearSolver::Summary summary;
   summary.num_iterations = 1;
-  if (info == 0) {
+  summary.termination_type = LAPACK::SolveInPlaceUsingQR(lhs_.rows(),
+                                                         lhs_.cols(),
+                                                         lhs_.data(),
+                                                         work_.rows(),
+                                                         work_.data(),
+                                                         rhs_.data(),
+                                                         &summary.message);
+  event_logger.AddEvent("Solve");
+  if (summary.termination_type == LINEAR_SOLVER_SUCCESS) {
     VectorRef(x, num_cols) = rhs_.head(num_cols);
-    summary.termination_type = TOLERANCE;
-  } else {
-    summary.termination_type = FAILURE;
   }
 
   event_logger.AddEvent("TearDown");
@@ -161,7 +159,8 @@
   // is good enough or not.
   LinearSolver::Summary summary;
   summary.num_iterations = 1;
-  summary.termination_type = TOLERANCE;
+  summary.termination_type = LINEAR_SOLVER_SUCCESS;
+  summary.message = "Success.";
 
   event_logger.AddEvent("TearDown");
   return summary;
diff --git a/internal/ceres/dogleg_strategy.cc b/internal/ceres/dogleg_strategy.cc
index c85c8e5..f29376d 100644
--- a/internal/ceres/dogleg_strategy.cc
+++ b/internal/ceres/dogleg_strategy.cc
@@ -99,7 +99,7 @@
     }
     TrustRegionStrategy::Summary summary;
     summary.num_iterations = 0;
-    summary.termination_type = TOLERANCE;
+    summary.termination_type = LINEAR_SOLVER_SUCCESS;
     return summary;
   }
 
@@ -135,7 +135,11 @@
   summary.num_iterations = linear_solver_summary.num_iterations;
   summary.termination_type = linear_solver_summary.termination_type;
 
-  if (linear_solver_summary.termination_type != FAILURE) {
+  if (linear_solver_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) {
+    return summary;
+  }
+
+  if (linear_solver_summary.termination_type != LINEAR_SOLVER_FAILURE) {
     switch (dogleg_type_) {
       // Interpolate the Cauchy point and the Gauss-Newton step.
       case TRADITIONAL_DOGLEG:
@@ -146,7 +150,7 @@
       // Cauchy point and the (Gauss-)Newton step.
       case SUBSPACE_DOGLEG:
         if (!ComputeSubspaceModel(jacobian)) {
-          summary.termination_type = FAILURE;
+          summary.termination_type = LINEAR_SOLVER_FAILURE;
           break;
         }
         ComputeSubspaceDoglegStep(step);
@@ -513,7 +517,7 @@
     const double* residuals) {
   const int n = jacobian->num_cols();
   LinearSolver::Summary linear_solver_summary;
-  linear_solver_summary.termination_type = FAILURE;
+  linear_solver_summary.termination_type = LINEAR_SOLVER_FAILURE;
 
   // The Jacobian matrix is often quite poorly conditioned. Thus it is
   // necessary to add a diagonal matrix at the bottom to prevent the
@@ -526,7 +530,7 @@
   // If the solve fails, the multiplier to the diagonal is increased
   // up to max_mu_ by a factor of mu_increase_factor_ every time. If
   // the linear solver is still not successful, the strategy returns
-  // with FAILURE.
+  // with LINEAR_SOLVER_FAILURE.
   //
   // Next time when a new Gauss-Newton step is requested, the
   // multiplier starts out from the last successful solve.
@@ -579,17 +583,21 @@
       }
     }
 
-    if (linear_solver_summary.termination_type == FAILURE ||
+    if (linear_solver_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) {
+      return linear_solver_summary;
+    }
+
+    if (linear_solver_summary.termination_type == LINEAR_SOLVER_FAILURE ||
         !IsArrayValid(n, gauss_newton_step_.data())) {
       mu_ *= mu_increase_factor_;
       VLOG(2) << "Increasing mu " << mu_;
-      linear_solver_summary.termination_type = FAILURE;
+      linear_solver_summary.termination_type = LINEAR_SOLVER_FAILURE;
       continue;
     }
     break;
   }
 
-  if (linear_solver_summary.termination_type != FAILURE) {
+  if (linear_solver_summary.termination_type != LINEAR_SOLVER_FAILURE) {
     // The scaled Gauss-Newton step is D * GN:
     //
     //     - (D^-1 J^T J D^-1)^-1 (D^-1 g)
diff --git a/internal/ceres/dogleg_strategy_test.cc b/internal/ceres/dogleg_strategy_test.cc
index ace635f..795719d 100644
--- a/internal/ceres/dogleg_strategy_test.cc
+++ b/internal/ceres/dogleg_strategy_test.cc
@@ -144,7 +144,7 @@
                                                               residual_.data(),
                                                               x_.data());
 
-  EXPECT_NE(summary.termination_type, FAILURE);
+  EXPECT_NE(summary.termination_type, LINEAR_SOLVER_FAILURE);
   EXPECT_LE(x_.norm(), options_.initial_radius * (1.0 + 4.0 * kEpsilon));
 }
 
@@ -164,7 +164,7 @@
                                                               residual_.data(),
                                                               x_.data());
 
-  EXPECT_NE(summary.termination_type, FAILURE);
+  EXPECT_NE(summary.termination_type, LINEAR_SOLVER_FAILURE);
   EXPECT_LE(x_.norm(), options_.initial_radius * (1.0 + 4.0 * kEpsilon));
 }
 
@@ -184,7 +184,7 @@
                                                               residual_.data(),
                                                               x_.data());
 
-  EXPECT_NE(summary.termination_type, FAILURE);
+  EXPECT_NE(summary.termination_type, LINEAR_SOLVER_FAILURE);
   EXPECT_NEAR(x_(0), 1.0, kToleranceLoose);
   EXPECT_NEAR(x_(1), 1.0, kToleranceLoose);
   EXPECT_NEAR(x_(2), 1.0, kToleranceLoose);
@@ -246,7 +246,7 @@
                                                               residual_.data(),
                                                               x_.data());
 
-  EXPECT_NE(summary.termination_type, FAILURE);
+  EXPECT_NE(summary.termination_type, LINEAR_SOLVER_FAILURE);
   EXPECT_NEAR(x_(0), 0.0, kToleranceLoose);
   EXPECT_NEAR(x_(1), 0.0, kToleranceLoose);
   EXPECT_NEAR(x_(2), options_.initial_radius, kToleranceLoose);
@@ -274,7 +274,7 @@
                                                               residual_.data(),
                                                               x_.data());
 
-  EXPECT_NE(summary.termination_type, FAILURE);
+  EXPECT_NE(summary.termination_type, LINEAR_SOLVER_FAILURE);
   EXPECT_NEAR(x_(0), 0.0, kToleranceLoose);
   EXPECT_NEAR(x_(1), 0.0, kToleranceLoose);
   EXPECT_NEAR(x_(2), 1.0, kToleranceLoose);
diff --git a/internal/ceres/dynamic_compressed_row_finalizer.h b/internal/ceres/dynamic_compressed_row_finalizer.h
new file mode 100644
index 0000000..5e6b0d8
--- /dev/null
+++ b/internal/ceres/dynamic_compressed_row_finalizer.h
@@ -0,0 +1,51 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: richie.stebbing@gmail.com (Richard Stebbing)
+
+#ifndef CERES_INTERNAL_DYNAMIC_COMPRESED_ROW_FINALIZER_H_
+#define CERES_INTERNAL_DYNAMIC_COMPRESED_ROW_FINALIZER_H_
+
+#include "ceres/casts.h"
+#include "ceres/dynamic_compressed_row_sparse_matrix.h"
+
+namespace ceres {
+namespace internal {
+
+struct DynamicCompressedRowJacobianFinalizer {
+  void operator()(SparseMatrix* base_jacobian, int num_parameters) {
+    DynamicCompressedRowSparseMatrix* jacobian =
+      down_cast<DynamicCompressedRowSparseMatrix*>(base_jacobian);
+    jacobian->Finalize(num_parameters);
+  }
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif // CERES_INTERNAL_DYNAMIC_COMPRESED_ROW_FINALISER_H_
diff --git a/internal/ceres/dynamic_compressed_row_jacobian_writer.cc b/internal/ceres/dynamic_compressed_row_jacobian_writer.cc
new file mode 100644
index 0000000..2f01617
--- /dev/null
+++ b/internal/ceres/dynamic_compressed_row_jacobian_writer.cc
@@ -0,0 +1,107 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: richie.stebbing@gmail.com (Richard Stebbing)
+
+#include "ceres/compressed_row_jacobian_writer.h"
+#include "ceres/dynamic_compressed_row_jacobian_writer.h"
+#include "ceres/casts.h"
+#include "ceres/dynamic_compressed_row_sparse_matrix.h"
+#include "ceres/parameter_block.h"
+#include "ceres/program.h"
+#include "ceres/residual_block.h"
+
+namespace ceres {
+namespace internal {
+
+ScratchEvaluatePreparer*
+DynamicCompressedRowJacobianWriter::CreateEvaluatePreparers(int num_threads) {
+  return ScratchEvaluatePreparer::Create(*program_, num_threads);
+}
+
+SparseMatrix* DynamicCompressedRowJacobianWriter::CreateJacobian() const {
+  // Initialize `jacobian` with zero number of `max_num_nonzeros`.
+  const int num_residuals = program_->NumResiduals();
+  const int num_effective_parameters = program_->NumEffectiveParameters();
+
+  DynamicCompressedRowSparseMatrix* jacobian =
+      new DynamicCompressedRowSparseMatrix(num_residuals,
+                                           num_effective_parameters,
+                                           0);
+
+  CompressedRowJacobianWriter::PopulateJacobianRowAndColumnBlockVectors(
+      program_, jacobian);
+
+  return jacobian;
+}
+
+void DynamicCompressedRowJacobianWriter::Write(int residual_id,
+                                               int residual_offset,
+                                               double **jacobians,
+                                               SparseMatrix* base_jacobian) {
+  DynamicCompressedRowSparseMatrix* jacobian =
+    down_cast<DynamicCompressedRowSparseMatrix*>(base_jacobian);
+
+  // Get the `residual_block` of interest.
+  const ResidualBlock* residual_block =
+      program_->residual_blocks()[residual_id];
+  const int num_residuals = residual_block->NumResiduals();
+
+  vector<pair<int, int> > evaluated_jacobian_blocks;
+  CompressedRowJacobianWriter::GetOrderedParameterBlocks(
+    program_, residual_id, &evaluated_jacobian_blocks);
+
+  // `residual_offset` is the residual row in the global jacobian.
+  // Empty the jacobian rows.
+  jacobian->ClearRows(residual_offset, num_residuals);
+
+  // Iterate over each parameter block.
+  for (int i = 0; i < evaluated_jacobian_blocks.size(); ++i) {
+    const ParameterBlock* parameter_block =
+        program_->parameter_blocks()[evaluated_jacobian_blocks[i].first];
+    const int parameter_block_jacobian_index =
+        evaluated_jacobian_blocks[i].second;
+    const int parameter_block_size = parameter_block->LocalSize();
+
+    // For each parameter block only insert its non-zero entries.
+    for (int r = 0; r < num_residuals; ++r) {
+      for (int c = 0; c < parameter_block_size; ++c) {
+        const double& v = jacobians[parameter_block_jacobian_index][
+            r * parameter_block_size + c];
+        // Only insert non-zero entries.
+        if (v != 0.0) {
+          jacobian->InsertEntry(
+            residual_offset + r, parameter_block->delta_offset() + c, v);
+        }
+      }
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/dynamic_compressed_row_jacobian_writer.h b/internal/ceres/dynamic_compressed_row_jacobian_writer.h
new file mode 100644
index 0000000..df9581b
--- /dev/null
+++ b/internal/ceres/dynamic_compressed_row_jacobian_writer.h
@@ -0,0 +1,83 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: richie.stebbing@gmail.com (Richard Stebbing)
+//
+// A jacobian writer that directly writes to dynamic compressed row sparse
+// matrices.
+
+#ifndef CERES_INTERNAL_DYNAMIC_COMPRESSED_ROW_JACOBIAN_WRITER_H_
+#define CERES_INTERNAL_DYNAMIC_COMPRESSED_ROW_JACOBIAN_WRITER_H_
+
+#include "ceres/evaluator.h"
+#include "ceres/scratch_evaluate_preparer.h"
+
+namespace ceres {
+namespace internal {
+
+class Program;
+class SparseMatrix;
+
+class DynamicCompressedRowJacobianWriter {
+ public:
+  DynamicCompressedRowJacobianWriter(Evaluator::Options /* ignored */,
+                                     Program* program)
+    : program_(program) {
+  }
+
+  // JacobianWriter interface.
+
+  // The compressed row matrix has different layout than that assumed by
+  // the cost functions. The scratch space is therefore used to store
+  // the jacobians (including zeros) temporarily before only the non-zero
+  // entries are copied over to the larger jacobian in `Write`.
+  ScratchEvaluatePreparer* CreateEvaluatePreparers(int num_threads);
+
+  // Return a `DynamicCompressedRowSparseMatrix` which is filled by
+  // `Write`. Note that `Finalize` must be called to make the
+  // `CompressedRowSparseMatrix` interface valid.
+  SparseMatrix* CreateJacobian() const;
+
+  // Write only the non-zero jacobian entries for a residual block
+  // (specified by `residual_id`) into `base_jacobian`, starting at the row
+  // specifed by `residual_offset`.
+  //
+  // This method is thread-safe over residual blocks (each `residual_id`).
+  void Write(int residual_id,
+             int residual_offset,
+             double **jacobians,
+             SparseMatrix* base_jacobian);
+
+ private:
+  Program* program_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif // CERES_INTERNAL_DYNAMIC_COMPRESSED_ROW_JACOBIAN_WRITER_H_
diff --git a/internal/ceres/dynamic_compressed_row_sparse_matrix.cc b/internal/ceres/dynamic_compressed_row_sparse_matrix.cc
new file mode 100644
index 0000000..f285d52
--- /dev/null
+++ b/internal/ceres/dynamic_compressed_row_sparse_matrix.cc
@@ -0,0 +1,107 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: richie.stebbing@gmail.com (Richard Stebbing)
+
+#include <cstring>
+#include "ceres/dynamic_compressed_row_sparse_matrix.h"
+
+namespace ceres {
+namespace internal {
+
+DynamicCompressedRowSparseMatrix::DynamicCompressedRowSparseMatrix(
+  int num_rows,
+  int num_cols,
+  int initial_max_num_nonzeros)
+    : CompressedRowSparseMatrix(num_rows,
+                                num_cols,
+                                initial_max_num_nonzeros) {
+    dynamic_cols_.resize(num_rows);
+    dynamic_values_.resize(num_rows);
+  }
+
+void DynamicCompressedRowSparseMatrix::InsertEntry(int row,
+                                                   int col,
+                                                   const double& value) {
+  CHECK_GE(row, 0);
+  CHECK_LT(row, num_rows());
+  CHECK_GE(col, 0);
+  CHECK_LT(col, num_cols());
+  dynamic_cols_[row].push_back(col);
+  dynamic_values_[row].push_back(value);
+}
+
+void DynamicCompressedRowSparseMatrix::ClearRows(int row_start,
+                                                 int num_rows) {
+  for (int r = 0; r < num_rows; ++r) {
+    const int i = row_start + r;
+    CHECK_GE(i, 0);
+    CHECK_LT(i, this->num_rows());
+    dynamic_cols_[i].resize(0);
+    dynamic_values_[i].resize(0);
+  }
+}
+
+void DynamicCompressedRowSparseMatrix::Finalize(int num_additional_elements) {
+  // `num_additional_elements` is provided as an argument so that additional
+  // storage can be reserved when it is known by the finalizer.
+  CHECK_GE(num_additional_elements, 0);
+
+  // Count the number of non-zeros and resize `cols_` and `values_`.
+  int num_jacobian_nonzeros = 0;
+  for (int i = 0; i < dynamic_cols_.size(); ++i) {
+    num_jacobian_nonzeros += dynamic_cols_[i].size();
+  }
+
+  SetMaxNumNonZeros(num_jacobian_nonzeros + num_additional_elements);
+
+  // Flatten `dynamic_cols_` into `cols_` and `dynamic_values_`
+  // into `values_`.
+  int index_into_values_and_cols = 0;
+  for (int i = 0; i < num_rows(); ++i) {
+    mutable_rows()[i] = index_into_values_and_cols;
+    const int num_nonzero_columns = dynamic_cols_[i].size();
+    if (num_nonzero_columns > 0) {
+      memcpy(mutable_cols() + index_into_values_and_cols,
+             &dynamic_cols_[i][0],
+             dynamic_cols_[i].size() * sizeof(dynamic_cols_[0][0]));
+      memcpy(mutable_values() + index_into_values_and_cols,
+             &dynamic_values_[i][0],
+             dynamic_values_[i].size() * sizeof(dynamic_values_[0][0]));
+      index_into_values_and_cols += dynamic_cols_[i].size();
+    }
+  }
+  mutable_rows()[num_rows()] = index_into_values_and_cols;
+
+  CHECK_EQ(index_into_values_and_cols, num_jacobian_nonzeros)
+    << "Ceres bug: final index into values_ and cols_ should be equal to "
+    << "the number of jacobian nonzeros. Please contact the developers!";
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/dynamic_compressed_row_sparse_matrix.h b/internal/ceres/dynamic_compressed_row_sparse_matrix.h
new file mode 100644
index 0000000..7a89a70
--- /dev/null
+++ b/internal/ceres/dynamic_compressed_row_sparse_matrix.h
@@ -0,0 +1,99 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: richie.stebbing@gmail.com (Richard Stebbing)
+//
+// A compressed row sparse matrix that provides an extended interface to
+// allow dynamic insertion of entries. This is provided for the use case
+// where the sparsity structure and number of non-zero entries is dynamic.
+// This flexibility is achieved by using an (internal) scratch space that
+// allows independent insertion of entries into each row (thread-safe).
+// Once insertion is complete, the `Finalize` method must be called to ensure
+// that the underlying `CompressedRowSparseMatrix` is consistent.
+//
+// This should only be used if you really do need a dynamic sparsity pattern.
+
+#ifndef CERES_INTERNAL_DYNAMIC_COMPRESSED_ROW_SPARSE_MATRIX_H_
+#define CERES_INTERNAL_DYNAMIC_COMPRESSED_ROW_SPARSE_MATRIX_H_
+
+#include "ceres/compressed_row_sparse_matrix.h"
+
+namespace ceres {
+namespace internal {
+
+class DynamicCompressedRowSparseMatrix : public CompressedRowSparseMatrix {
+ public:
+  // Set the number of rows and columns for the underlyig
+  // `CompressedRowSparseMatrix` and set the initial number of maximum non-zero
+  // entries. Note that following the insertion of entries, when `Finalize`
+  // is called the number of non-zeros is determined and all internal
+  // structures are adjusted as required. If you know the upper limit on the
+  // number of non-zeros, then passing this value here can prevent future
+  // memory reallocations which may improve performance. Otherwise, if no
+  // upper limit is available a value of 0 is sufficient.
+  //
+  // Typical usage of this class is to define a new instance with a given
+  // number of rows, columns and maximum number of non-zero elements
+  // (if available). Next, entries are inserted at row and column positions
+  // using `InsertEntry`. Finally, once all elements have been inserted,
+  // `Finalize` must be called to make the underlying
+  // `CompressedRowSparseMatrix` consistent.
+  DynamicCompressedRowSparseMatrix(int num_rows,
+                                   int num_cols,
+                                   int initial_max_num_nonzeros);
+
+  // Insert an entry at a given row and column position. This method is
+  // thread-safe across rows i.e. different threads can insert values
+  // simultaneously into different rows. It should be emphasised that this
+  // method always inserts a new entry and does not check for existing
+  // entries at the specified row and column position. Duplicate entries
+  // for a given row and column position will result in undefined
+  // behavior.
+  void InsertEntry(int row, int col, const double& value);
+
+  // Clear all entries for rows, starting from row index `row_start`
+  // and proceeding for `num_rows`.
+  void ClearRows(int row_start, int num_rows);
+
+  // Make the underlying internal `CompressedRowSparseMatrix` data structures
+  // consistent. Additional space for non-zero entries in the
+  // `CompressedRowSparseMatrix` can be reserved by specifying
+  // `num_additional_elements`. This is useful when it is known that rows will
+  // be appended to the `CompressedRowSparseMatrix` (e.g. appending a diagonal
+  // matrix to the jacobian) as it prevents need for future reallocation.
+  void Finalize(int num_additional_elements);
+
+ private:
+  vector<vector<int> > dynamic_cols_;
+  vector<vector<double> > dynamic_values_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif // CERES_INTERNAL_DYNAMIC_COMPRESSED_ROW_SPARSE_MATRIX_H_
diff --git a/internal/ceres/dynamic_compressed_row_sparse_matrix_test.cc b/internal/ceres/dynamic_compressed_row_sparse_matrix_test.cc
new file mode 100644
index 0000000..03bfcb6
--- /dev/null
+++ b/internal/ceres/dynamic_compressed_row_sparse_matrix_test.cc
@@ -0,0 +1,217 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: richie.stebbing@gmail.com (Richard Stebbing)
+
+#include "ceres/dynamic_compressed_row_sparse_matrix.h"
+
+#include "ceres/casts.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/casts.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+class DynamicCompressedRowSparseMatrixTest : public ::testing::Test {
+ protected:
+  virtual void SetUp() {
+    num_rows = 7;
+    num_cols = 4;
+
+    // The number of additional elements reserved when `Finalize` is called
+    // should have no effect on the number of rows, columns or nonzeros.
+    // Set this to some nonzero value to be sure.
+    num_additional_elements = 13;
+
+    expected_num_nonzeros = num_rows * num_cols - min(num_rows, num_cols);
+
+    InitialiseDenseReference();
+    InitialiseSparseMatrixReferences();
+
+    dcrsm.reset(new DynamicCompressedRowSparseMatrix(num_rows,
+                                                     num_cols,
+                                                     0));
+  }
+
+  void Finalize() {
+    dcrsm->Finalize(num_additional_elements);
+  }
+
+  void InitialiseDenseReference() {
+    dense.resize(num_rows, num_cols);
+    dense.setZero();
+    int num_nonzeros = 0;
+    for (int i = 0; i < (num_rows * num_cols); ++i) {
+      const int r = i / num_cols, c = i % num_cols;
+      if (r != c) {
+        dense(r, c) = i + 1;
+        ++num_nonzeros;
+      }
+    }
+    ASSERT_EQ(num_nonzeros, expected_num_nonzeros);
+  }
+
+  void InitialiseSparseMatrixReferences() {
+    std::vector<int> rows, cols;
+    std::vector<double> values;
+    for (int i = 0; i < (num_rows * num_cols); ++i) {
+      const int r = i / num_cols, c = i % num_cols;
+      if (r != c) {
+        rows.push_back(r);
+        cols.push_back(c);
+        values.push_back(i + 1);
+      }
+    }
+    ASSERT_EQ(values.size(), expected_num_nonzeros);
+
+    tsm.reset(new TripletSparseMatrix(num_rows,
+                                      num_cols,
+                                      expected_num_nonzeros));
+    std::copy(rows.begin(), rows.end(), tsm->mutable_rows());
+    std::copy(cols.begin(), cols.end(), tsm->mutable_cols());
+    std::copy(values.begin(), values.end(), tsm->mutable_values());
+    tsm->set_num_nonzeros(values.size());
+
+    Matrix dense_from_tsm;
+    tsm->ToDenseMatrix(&dense_from_tsm);
+    ASSERT_TRUE((dense.array() == dense_from_tsm.array()).all());
+
+    crsm.reset(new CompressedRowSparseMatrix(*tsm));
+    Matrix dense_from_crsm;
+    crsm->ToDenseMatrix(&dense_from_crsm);
+    ASSERT_TRUE((dense.array() == dense_from_crsm.array()).all());
+  }
+
+  void InsertNonZeroEntriesFromDenseReference() {
+    for (int r = 0; r < num_rows; ++r) {
+      for (int c = 0; c < num_cols; ++c) {
+        const double& v = dense(r, c);
+        if (v != 0.0) {
+          dcrsm->InsertEntry(r, c, v);
+        }
+      }
+    }
+  }
+
+  void ExpectEmpty() {
+    EXPECT_EQ(dcrsm->num_rows(), num_rows);
+    EXPECT_EQ(dcrsm->num_cols(), num_cols);
+    EXPECT_EQ(dcrsm->num_nonzeros(), 0);
+
+    Matrix dense_from_dcrsm;
+    dcrsm->ToDenseMatrix(&dense_from_dcrsm);
+    EXPECT_EQ(dense_from_dcrsm.rows(), num_rows);
+    EXPECT_EQ(dense_from_dcrsm.cols(), num_cols);
+    EXPECT_TRUE((dense_from_dcrsm.array() == 0.0).all());
+  }
+
+  void ExpectEqualToDenseReference() {
+    Matrix dense_from_dcrsm;
+    dcrsm->ToDenseMatrix(&dense_from_dcrsm);
+    EXPECT_TRUE((dense.array() == dense_from_dcrsm.array()).all());
+  }
+
+  void ExpectEqualToCompressedRowSparseMatrixReference() {
+    typedef Eigen::Map<const Eigen::VectorXi> ConstIntVectorRef;
+
+    ConstIntVectorRef crsm_rows(crsm->rows(), crsm->num_rows() + 1);
+    ConstIntVectorRef dcrsm_rows(dcrsm->rows(), dcrsm->num_rows() + 1);
+    EXPECT_TRUE((crsm_rows.array() == dcrsm_rows.array()).all());
+
+    ConstIntVectorRef crsm_cols(crsm->cols(), crsm->num_nonzeros());
+    ConstIntVectorRef dcrsm_cols(dcrsm->cols(), dcrsm->num_nonzeros());
+    EXPECT_TRUE((crsm_cols.array() == dcrsm_cols.array()).all());
+
+    ConstVectorRef crsm_values(crsm->values(), crsm->num_nonzeros());
+    ConstVectorRef dcrsm_values(dcrsm->values(), dcrsm->num_nonzeros());
+    EXPECT_TRUE((crsm_values.array() == dcrsm_values.array()).all());
+  }
+
+  int num_rows;
+  int num_cols;
+
+  int num_additional_elements;
+
+  int expected_num_nonzeros;
+
+  Matrix dense;
+  scoped_ptr<TripletSparseMatrix> tsm;
+  scoped_ptr<CompressedRowSparseMatrix> crsm;
+
+  scoped_ptr<DynamicCompressedRowSparseMatrix> dcrsm;
+};
+
+TEST_F(DynamicCompressedRowSparseMatrixTest, Initialization) {
+  ExpectEmpty();
+
+  Finalize();
+  ExpectEmpty();
+}
+
+TEST_F(DynamicCompressedRowSparseMatrixTest, InsertEntryAndFinalize) {
+  InsertNonZeroEntriesFromDenseReference();
+  ExpectEmpty();
+
+  Finalize();
+  ExpectEqualToDenseReference();
+  ExpectEqualToCompressedRowSparseMatrixReference();
+}
+
+TEST_F(DynamicCompressedRowSparseMatrixTest, ClearRows) {
+  InsertNonZeroEntriesFromDenseReference();
+  Finalize();
+  ExpectEqualToDenseReference();
+  ExpectEqualToCompressedRowSparseMatrixReference();
+
+  dcrsm->ClearRows(0, 0);
+  Finalize();
+  ExpectEqualToDenseReference();
+  ExpectEqualToCompressedRowSparseMatrixReference();
+
+  dcrsm->ClearRows(0, num_rows);
+  ExpectEqualToCompressedRowSparseMatrixReference();
+
+  Finalize();
+  ExpectEmpty();
+
+  InsertNonZeroEntriesFromDenseReference();
+  dcrsm->ClearRows(1, 2);
+  Finalize();
+  dense.block(1, 0, 2, num_cols).setZero();
+  ExpectEqualToDenseReference();
+
+  InitialiseDenseReference();
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/dynamic_numeric_diff_cost_function_test.cc b/internal/ceres/dynamic_numeric_diff_cost_function_test.cc
new file mode 100644
index 0000000..19f4d88
--- /dev/null
+++ b/internal/ceres/dynamic_numeric_diff_cost_function_test.cc
@@ -0,0 +1,519 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//         mierle@gmail.com (Keir Mierle)
+
+#include <cstddef>
+
+#include "ceres/dynamic_numeric_diff_cost_function.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+const double kTolerance = 1e-6;
+
+// Takes 2 parameter blocks:
+//     parameters[0] is size 10.
+//     parameters[1] is size 5.
+// Emits 21 residuals:
+//     A: i - parameters[0][i], for i in [0,10)  -- this is 10 residuals
+//     B: parameters[0][i] - i, for i in [0,10)  -- this is another 10.
+//     C: sum(parameters[0][i]^2 - 8*parameters[0][i]) + sum(parameters[1][i])
+class MyCostFunctor {
+ public:
+  bool operator()(double const* const* parameters, double* residuals) const {
+    const double* params0 = parameters[0];
+    int r = 0;
+    for (int i = 0; i < 10; ++i) {
+      residuals[r++] = i - params0[i];
+      residuals[r++] = params0[i] - i;
+    }
+
+    double c_residual = 0.0;
+    for (int i = 0; i < 10; ++i) {
+      c_residual += pow(params0[i], 2) - 8.0 * params0[i];
+    }
+
+    const double* params1 = parameters[1];
+    for (int i = 0; i < 5; ++i) {
+      c_residual += params1[i];
+    }
+    residuals[r++] = c_residual;
+    return true;
+  }
+};
+
+TEST(DynamicNumericdiffCostFunctionTest, TestResiduals) {
+  vector<double> param_block_0(10, 0.0);
+  vector<double> param_block_1(5, 0.0);
+  DynamicNumericDiffCostFunction<MyCostFunctor> cost_function(
+      new MyCostFunctor());
+  cost_function.AddParameterBlock(param_block_0.size());
+  cost_function.AddParameterBlock(param_block_1.size());
+  cost_function.SetNumResiduals(21);
+
+  // Test residual computation.
+  vector<double> residuals(21, -100000);
+  vector<double*> parameter_blocks(2);
+  parameter_blocks[0] = &param_block_0[0];
+  parameter_blocks[1] = &param_block_1[0];
+  EXPECT_TRUE(cost_function.Evaluate(&parameter_blocks[0],
+                                     residuals.data(),
+                                     NULL));
+  for (int r = 0; r < 10; ++r) {
+    EXPECT_EQ(1.0 * r, residuals.at(r * 2));
+    EXPECT_EQ(-1.0 * r, residuals.at(r * 2 + 1));
+  }
+  EXPECT_EQ(0, residuals.at(20));
+}
+
+
+TEST(DynamicNumericdiffCostFunctionTest, TestJacobian) {
+  // Test the residual counting.
+  vector<double> param_block_0(10, 0.0);
+  for (int i = 0; i < 10; ++i) {
+    param_block_0[i] = 2 * i;
+  }
+  vector<double> param_block_1(5, 0.0);
+  DynamicNumericDiffCostFunction<MyCostFunctor> cost_function(
+      new MyCostFunctor());
+  cost_function.AddParameterBlock(param_block_0.size());
+  cost_function.AddParameterBlock(param_block_1.size());
+  cost_function.SetNumResiduals(21);
+
+  // Prepare the residuals.
+  vector<double> residuals(21, -100000);
+
+  // Prepare the parameters.
+  vector<double*> parameter_blocks(2);
+  parameter_blocks[0] = &param_block_0[0];
+  parameter_blocks[1] = &param_block_1[0];
+
+  // Prepare the jacobian.
+  vector<vector<double> > jacobian_vect(2);
+  jacobian_vect[0].resize(21 * 10, -100000);
+  jacobian_vect[1].resize(21 * 5, -100000);
+  vector<double*> jacobian;
+  jacobian.push_back(jacobian_vect[0].data());
+  jacobian.push_back(jacobian_vect[1].data());
+
+  // Test jacobian computation.
+  EXPECT_TRUE(cost_function.Evaluate(parameter_blocks.data(),
+                                     residuals.data(),
+                                     jacobian.data()));
+
+  for (int r = 0; r < 10; ++r) {
+    EXPECT_EQ(-1.0 * r, residuals.at(r * 2));
+    EXPECT_EQ(+1.0 * r, residuals.at(r * 2 + 1));
+  }
+  EXPECT_EQ(420, residuals.at(20));
+  for (int p = 0; p < 10; ++p) {
+    // Check "A" Jacobian.
+    EXPECT_NEAR(-1.0, jacobian_vect[0][2*p * 10 + p], kTolerance);
+    // Check "B" Jacobian.
+    EXPECT_NEAR(+1.0, jacobian_vect[0][(2*p+1) * 10 + p], kTolerance);
+    jacobian_vect[0][2*p * 10 + p] = 0.0;
+    jacobian_vect[0][(2*p+1) * 10 + p] = 0.0;
+  }
+
+  // Check "C" Jacobian for first parameter block.
+  for (int p = 0; p < 10; ++p) {
+    EXPECT_NEAR(4 * p - 8, jacobian_vect[0][20 * 10 + p], kTolerance);
+    jacobian_vect[0][20 * 10 + p] = 0.0;
+  }
+  for (int i = 0; i < jacobian_vect[0].size(); ++i) {
+    EXPECT_NEAR(0.0, jacobian_vect[0][i], kTolerance);
+  }
+
+  // Check "C" Jacobian for second parameter block.
+  for (int p = 0; p < 5; ++p) {
+    EXPECT_NEAR(1.0, jacobian_vect[1][20 * 5 + p], kTolerance);
+    jacobian_vect[1][20 * 5 + p] = 0.0;
+  }
+  for (int i = 0; i < jacobian_vect[1].size(); ++i) {
+    EXPECT_NEAR(0.0, jacobian_vect[1][i], kTolerance);
+  }
+}
+
+TEST(DynamicNumericdiffCostFunctionTest, JacobianWithFirstParameterBlockConstant) {  // NOLINT
+  // Test the residual counting.
+  vector<double> param_block_0(10, 0.0);
+  for (int i = 0; i < 10; ++i) {
+    param_block_0[i] = 2 * i;
+  }
+  vector<double> param_block_1(5, 0.0);
+  DynamicNumericDiffCostFunction<MyCostFunctor> cost_function(
+      new MyCostFunctor());
+  cost_function.AddParameterBlock(param_block_0.size());
+  cost_function.AddParameterBlock(param_block_1.size());
+  cost_function.SetNumResiduals(21);
+
+  // Prepare the residuals.
+  vector<double> residuals(21, -100000);
+
+  // Prepare the parameters.
+  vector<double*> parameter_blocks(2);
+  parameter_blocks[0] = &param_block_0[0];
+  parameter_blocks[1] = &param_block_1[0];
+
+  // Prepare the jacobian.
+  vector<vector<double> > jacobian_vect(2);
+  jacobian_vect[0].resize(21 * 10, -100000);
+  jacobian_vect[1].resize(21 * 5, -100000);
+  vector<double*> jacobian;
+  jacobian.push_back(NULL);
+  jacobian.push_back(jacobian_vect[1].data());
+
+  // Test jacobian computation.
+  EXPECT_TRUE(cost_function.Evaluate(parameter_blocks.data(),
+                                     residuals.data(),
+                                     jacobian.data()));
+
+  for (int r = 0; r < 10; ++r) {
+    EXPECT_EQ(-1.0 * r, residuals.at(r * 2));
+    EXPECT_EQ(+1.0 * r, residuals.at(r * 2 + 1));
+  }
+  EXPECT_EQ(420, residuals.at(20));
+
+  // Check "C" Jacobian for second parameter block.
+  for (int p = 0; p < 5; ++p) {
+    EXPECT_NEAR(1.0, jacobian_vect[1][20 * 5 + p], kTolerance);
+    jacobian_vect[1][20 * 5 + p] = 0.0;
+  }
+  for (int i = 0; i < jacobian_vect[1].size(); ++i) {
+    EXPECT_EQ(0.0, jacobian_vect[1][i]);
+  }
+}
+
+TEST(DynamicNumericdiffCostFunctionTest, JacobianWithSecondParameterBlockConstant) {  // NOLINT
+  // Test the residual counting.
+  vector<double> param_block_0(10, 0.0);
+  for (int i = 0; i < 10; ++i) {
+    param_block_0[i] = 2 * i;
+  }
+  vector<double> param_block_1(5, 0.0);
+  DynamicNumericDiffCostFunction<MyCostFunctor> cost_function(
+      new MyCostFunctor());
+  cost_function.AddParameterBlock(param_block_0.size());
+  cost_function.AddParameterBlock(param_block_1.size());
+  cost_function.SetNumResiduals(21);
+
+  // Prepare the residuals.
+  vector<double> residuals(21, -100000);
+
+  // Prepare the parameters.
+  vector<double*> parameter_blocks(2);
+  parameter_blocks[0] = &param_block_0[0];
+  parameter_blocks[1] = &param_block_1[0];
+
+  // Prepare the jacobian.
+  vector<vector<double> > jacobian_vect(2);
+  jacobian_vect[0].resize(21 * 10, -100000);
+  jacobian_vect[1].resize(21 * 5, -100000);
+  vector<double*> jacobian;
+  jacobian.push_back(jacobian_vect[0].data());
+  jacobian.push_back(NULL);
+
+  // Test jacobian computation.
+  EXPECT_TRUE(cost_function.Evaluate(parameter_blocks.data(),
+                                     residuals.data(),
+                                     jacobian.data()));
+
+  for (int r = 0; r < 10; ++r) {
+    EXPECT_EQ(-1.0 * r, residuals.at(r * 2));
+    EXPECT_EQ(+1.0 * r, residuals.at(r * 2 + 1));
+  }
+  EXPECT_EQ(420, residuals.at(20));
+  for (int p = 0; p < 10; ++p) {
+    // Check "A" Jacobian.
+    EXPECT_NEAR(-1.0, jacobian_vect[0][2*p * 10 + p], kTolerance);
+    // Check "B" Jacobian.
+    EXPECT_NEAR(+1.0, jacobian_vect[0][(2*p+1) * 10 + p], kTolerance);
+    jacobian_vect[0][2*p * 10 + p] = 0.0;
+    jacobian_vect[0][(2*p+1) * 10 + p] = 0.0;
+  }
+
+  // Check "C" Jacobian for first parameter block.
+  for (int p = 0; p < 10; ++p) {
+    EXPECT_NEAR(4 * p - 8, jacobian_vect[0][20 * 10 + p], kTolerance);
+    jacobian_vect[0][20 * 10 + p] = 0.0;
+  }
+  for (int i = 0; i < jacobian_vect[0].size(); ++i) {
+    EXPECT_EQ(0.0, jacobian_vect[0][i]);
+  }
+}
+
+// Takes 3 parameter blocks:
+//     parameters[0] (x) is size 1.
+//     parameters[1] (y) is size 2.
+//     parameters[2] (z) is size 3.
+// Emits 7 residuals:
+//     A: x[0] (= sum_x)
+//     B: y[0] + 2.0 * y[1] (= sum_y)
+//     C: z[0] + 3.0 * z[1] + 6.0 * z[2] (= sum_z)
+//     D: sum_x * sum_y
+//     E: sum_y * sum_z
+//     F: sum_x * sum_z
+//     G: sum_x * sum_y * sum_z
+class MyThreeParameterCostFunctor {
+ public:
+  template <typename T>
+  bool operator()(T const* const* parameters, T* residuals) const {
+    const T* x = parameters[0];
+    const T* y = parameters[1];
+    const T* z = parameters[2];
+
+    T sum_x = x[0];
+    T sum_y = y[0] + 2.0 * y[1];
+    T sum_z = z[0] + 3.0 * z[1] + 6.0 * z[2];
+
+    residuals[0] = sum_x;
+    residuals[1] = sum_y;
+    residuals[2] = sum_z;
+    residuals[3] = sum_x * sum_y;
+    residuals[4] = sum_y * sum_z;
+    residuals[5] = sum_x * sum_z;
+    residuals[6] = sum_x * sum_y * sum_z;
+    return true;
+  }
+};
+
+class ThreeParameterCostFunctorTest : public ::testing::Test {
+ protected:
+  virtual void SetUp() {
+    // Prepare the parameters.
+    x_.resize(1);
+    x_[0] = 0.0;
+
+    y_.resize(2);
+    y_[0] = 1.0;
+    y_[1] = 3.0;
+
+    z_.resize(3);
+    z_[0] = 2.0;
+    z_[1] = 4.0;
+    z_[2] = 6.0;
+
+    parameter_blocks_.resize(3);
+    parameter_blocks_[0] = &x_[0];
+    parameter_blocks_[1] = &y_[0];
+    parameter_blocks_[2] = &z_[0];
+
+    // Prepare the cost function.
+    typedef DynamicNumericDiffCostFunction<MyThreeParameterCostFunctor>
+      DynamicMyThreeParameterCostFunction;
+    DynamicMyThreeParameterCostFunction * cost_function =
+      new DynamicMyThreeParameterCostFunction(
+        new MyThreeParameterCostFunctor());
+    cost_function->AddParameterBlock(1);
+    cost_function->AddParameterBlock(2);
+    cost_function->AddParameterBlock(3);
+    cost_function->SetNumResiduals(7);
+
+    cost_function_.reset(cost_function);
+
+    // Setup jacobian data.
+    jacobian_vect_.resize(3);
+    jacobian_vect_[0].resize(7 * x_.size(), -100000);
+    jacobian_vect_[1].resize(7 * y_.size(), -100000);
+    jacobian_vect_[2].resize(7 * z_.size(), -100000);
+
+    // Prepare the expected residuals.
+    const double sum_x = x_[0];
+    const double sum_y = y_[0] + 2.0 * y_[1];
+    const double sum_z = z_[0] + 3.0 * z_[1] + 6.0 * z_[2];
+
+    expected_residuals_.resize(7);
+    expected_residuals_[0] = sum_x;
+    expected_residuals_[1] = sum_y;
+    expected_residuals_[2] = sum_z;
+    expected_residuals_[3] = sum_x * sum_y;
+    expected_residuals_[4] = sum_y * sum_z;
+    expected_residuals_[5] = sum_x * sum_z;
+    expected_residuals_[6] = sum_x * sum_y * sum_z;
+
+    // Prepare the expected jacobian entries.
+    expected_jacobian_x_.resize(7);
+    expected_jacobian_x_[0] = 1.0;
+    expected_jacobian_x_[1] = 0.0;
+    expected_jacobian_x_[2] = 0.0;
+    expected_jacobian_x_[3] = sum_y;
+    expected_jacobian_x_[4] = 0.0;
+    expected_jacobian_x_[5] = sum_z;
+    expected_jacobian_x_[6] = sum_y * sum_z;
+
+    expected_jacobian_y_.resize(14);
+    expected_jacobian_y_[0] = 0.0;
+    expected_jacobian_y_[1] = 0.0;
+    expected_jacobian_y_[2] = 1.0;
+    expected_jacobian_y_[3] = 2.0;
+    expected_jacobian_y_[4] = 0.0;
+    expected_jacobian_y_[5] = 0.0;
+    expected_jacobian_y_[6] = sum_x;
+    expected_jacobian_y_[7] = 2.0 * sum_x;
+    expected_jacobian_y_[8] = sum_z;
+    expected_jacobian_y_[9] = 2.0 * sum_z;
+    expected_jacobian_y_[10] = 0.0;
+    expected_jacobian_y_[11] = 0.0;
+    expected_jacobian_y_[12] = sum_x * sum_z;
+    expected_jacobian_y_[13] = 2.0 * sum_x * sum_z;
+
+    expected_jacobian_z_.resize(21);
+    expected_jacobian_z_[0] = 0.0;
+    expected_jacobian_z_[1] = 0.0;
+    expected_jacobian_z_[2] = 0.0;
+    expected_jacobian_z_[3] = 0.0;
+    expected_jacobian_z_[4] = 0.0;
+    expected_jacobian_z_[5] = 0.0;
+    expected_jacobian_z_[6] = 1.0;
+    expected_jacobian_z_[7] = 3.0;
+    expected_jacobian_z_[8] = 6.0;
+    expected_jacobian_z_[9] = 0.0;
+    expected_jacobian_z_[10] = 0.0;
+    expected_jacobian_z_[11] = 0.0;
+    expected_jacobian_z_[12] = sum_y;
+    expected_jacobian_z_[13] = 3.0 * sum_y;
+    expected_jacobian_z_[14] = 6.0 * sum_y;
+    expected_jacobian_z_[15] = sum_x;
+    expected_jacobian_z_[16] = 3.0 * sum_x;
+    expected_jacobian_z_[17] = 6.0 * sum_x;
+    expected_jacobian_z_[18] = sum_x * sum_y;
+    expected_jacobian_z_[19] = 3.0 * sum_x * sum_y;
+    expected_jacobian_z_[20] = 6.0 * sum_x * sum_y;
+  }
+
+ protected:
+  vector<double> x_;
+  vector<double> y_;
+  vector<double> z_;
+
+  vector<double*> parameter_blocks_;
+
+  scoped_ptr<CostFunction> cost_function_;
+
+  vector<vector<double> > jacobian_vect_;
+
+  vector<double> expected_residuals_;
+
+  vector<double> expected_jacobian_x_;
+  vector<double> expected_jacobian_y_;
+  vector<double> expected_jacobian_z_;
+};
+
+TEST_F(ThreeParameterCostFunctorTest, TestThreeParameterResiduals) {
+  vector<double> residuals(7, -100000);
+  EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
+                                       residuals.data(),
+                                       NULL));
+  for (int i = 0; i < 7; ++i) {
+    EXPECT_EQ(expected_residuals_[i], residuals[i]);
+  }
+}
+
+TEST_F(ThreeParameterCostFunctorTest, TestThreeParameterJacobian) {
+  vector<double> residuals(7, -100000);
+
+  vector<double*> jacobian;
+  jacobian.push_back(jacobian_vect_[0].data());
+  jacobian.push_back(jacobian_vect_[1].data());
+  jacobian.push_back(jacobian_vect_[2].data());
+
+  EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
+                                       residuals.data(),
+                                       jacobian.data()));
+
+  for (int i = 0; i < 7; ++i) {
+    EXPECT_EQ(expected_residuals_[i], residuals[i]);
+  }
+
+  for (int i = 0; i < 7; ++i) {
+    EXPECT_NEAR(expected_jacobian_x_[i], jacobian[0][i], kTolerance);
+  }
+
+  for (int i = 0; i < 14; ++i) {
+    EXPECT_NEAR(expected_jacobian_y_[i], jacobian[1][i], kTolerance);
+  }
+
+  for (int i = 0; i < 21; ++i) {
+    EXPECT_NEAR(expected_jacobian_z_[i], jacobian[2][i], kTolerance);
+  }
+}
+
+TEST_F(ThreeParameterCostFunctorTest,
+       ThreeParameterJacobianWithFirstAndLastParameterBlockConstant) {
+  vector<double> residuals(7, -100000);
+
+  vector<double*> jacobian;
+  jacobian.push_back(NULL);
+  jacobian.push_back(jacobian_vect_[1].data());
+  jacobian.push_back(NULL);
+
+  EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
+                                       residuals.data(),
+                                       jacobian.data()));
+
+  for (int i = 0; i < 7; ++i) {
+    EXPECT_EQ(expected_residuals_[i], residuals[i]);
+  }
+
+  for (int i = 0; i < 14; ++i) {
+    EXPECT_NEAR(expected_jacobian_y_[i], jacobian[1][i], kTolerance);
+  }
+}
+
+TEST_F(ThreeParameterCostFunctorTest,
+       ThreeParameterJacobianWithSecondParameterBlockConstant) {
+  vector<double> residuals(7, -100000);
+
+  vector<double*> jacobian;
+  jacobian.push_back(jacobian_vect_[0].data());
+  jacobian.push_back(NULL);
+  jacobian.push_back(jacobian_vect_[2].data());
+
+  EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
+                                       residuals.data(),
+                                       jacobian.data()));
+
+  for (int i = 0; i < 7; ++i) {
+    EXPECT_EQ(expected_residuals_[i], residuals[i]);
+  }
+
+  for (int i = 0; i < 7; ++i) {
+    EXPECT_NEAR(expected_jacobian_x_[i], jacobian[0][i], kTolerance);
+  }
+
+  for (int i = 0; i < 21; ++i) {
+    EXPECT_NEAR(expected_jacobian_z_[i], jacobian[2][i], kTolerance);
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/evaluator.cc b/internal/ceres/evaluator.cc
index 31a4176..c94c62c 100644
--- a/internal/ceres/evaluator.cc
+++ b/internal/ceres/evaluator.cc
@@ -35,6 +35,8 @@
 #include "ceres/compressed_row_sparse_matrix.h"
 #include "ceres/crs_matrix.h"
 #include "ceres/dense_jacobian_writer.h"
+#include "ceres/dynamic_compressed_row_finalizer.h"
+#include "ceres/dynamic_compressed_row_jacobian_writer.h"
 #include "ceres/evaluator.h"
 #include "ceres/internal/port.h"
 #include "ceres/program_evaluator.h"
@@ -63,9 +65,17 @@
                                   BlockJacobianWriter>(options,
                                                        program);
     case SPARSE_NORMAL_CHOLESKY:
-      return new ProgramEvaluator<ScratchEvaluatePreparer,
-                                  CompressedRowJacobianWriter>(options,
-                                                               program);
+      if (options.dynamic_sparsity) {
+        return new ProgramEvaluator<ScratchEvaluatePreparer,
+                                    DynamicCompressedRowJacobianWriter,
+                                    DynamicCompressedRowJacobianFinalizer>(
+                                        options, program);
+      } else {
+        return new ProgramEvaluator<ScratchEvaluatePreparer,
+                                    CompressedRowJacobianWriter>(options,
+                                                                 program);
+      }
+
     default:
       *error = "Invalid Linear Solver Type. Unable to create evaluator.";
       return NULL;
diff --git a/internal/ceres/evaluator.h b/internal/ceres/evaluator.h
index 3d25462..8fc60b8 100644
--- a/internal/ceres/evaluator.h
+++ b/internal/ceres/evaluator.h
@@ -61,11 +61,13 @@
     Options()
         : num_threads(1),
           num_eliminate_blocks(-1),
-          linear_solver_type(DENSE_QR) {}
+          linear_solver_type(DENSE_QR),
+          dynamic_sparsity(false) {}
 
     int num_threads;
     int num_eliminate_blocks;
     LinearSolverType linear_solver_type;
+    bool dynamic_sparsity;
   };
 
   static Evaluator* Create(const Options& options,
diff --git a/internal/ceres/evaluator_test.cc b/internal/ceres/evaluator_test.cc
index ea24504..c0de3fc 100644
--- a/internal/ceres/evaluator_test.cc
+++ b/internal/ceres/evaluator_test.cc
@@ -44,6 +44,7 @@
 #include "ceres/program.h"
 #include "ceres/sized_cost_function.h"
 #include "ceres/sparse_matrix.h"
+#include "ceres/stringprintf.h"
 #include "ceres/types.h"
 #include "gtest/gtest.h"
 
@@ -91,18 +92,42 @@
   }
 };
 
+struct EvaluatorTestOptions {
+  EvaluatorTestOptions(LinearSolverType linear_solver_type,
+                       int num_eliminate_blocks,
+                       bool dynamic_sparsity = false)
+    : linear_solver_type(linear_solver_type),
+      num_eliminate_blocks(num_eliminate_blocks),
+      dynamic_sparsity(dynamic_sparsity) {}
+
+  LinearSolverType linear_solver_type;
+  int num_eliminate_blocks;
+  bool dynamic_sparsity;
+};
+
 struct EvaluatorTest
-    : public ::testing::TestWithParam<pair<LinearSolverType, int> > {
+    : public ::testing::TestWithParam<EvaluatorTestOptions> {
   Evaluator* CreateEvaluator(Program* program) {
     // This program is straight from the ProblemImpl, and so has no index/offset
     // yet; compute it here as required by the evalutor implementations.
     program->SetParameterOffsetsAndIndex();
 
-    VLOG(1) << "Creating evaluator with type: " << GetParam().first
-            << " and num_eliminate_blocks: " << GetParam().second;
+    if (VLOG_IS_ON(1)) {
+      string report;
+      StringAppendF(&report, "Creating evaluator with type: %d",
+                    GetParam().linear_solver_type);
+      if (GetParam().linear_solver_type == SPARSE_NORMAL_CHOLESKY) {
+        StringAppendF(&report, ", dynamic_sparsity: %d",
+                      GetParam().dynamic_sparsity);
+      }
+      StringAppendF(&report, " and num_eliminate_blocks: %d",
+                    GetParam().num_eliminate_blocks);
+      VLOG(1) << report;
+    }
     Evaluator::Options options;
-    options.linear_solver_type = GetParam().first;
-    options.num_eliminate_blocks = GetParam().second;
+    options.linear_solver_type = GetParam().linear_solver_type;
+    options.num_eliminate_blocks = GetParam().num_eliminate_blocks;
+    options.dynamic_sparsity = GetParam().dynamic_sparsity;
     string error;
     return Evaluator::Create(options, program, &error);
   }
@@ -517,23 +542,25 @@
 INSTANTIATE_TEST_CASE_P(
     LinearSolvers,
     EvaluatorTest,
-    ::testing::Values(make_pair(DENSE_QR, 0),
-                      make_pair(DENSE_SCHUR, 0),
-                      make_pair(DENSE_SCHUR, 1),
-                      make_pair(DENSE_SCHUR, 2),
-                      make_pair(DENSE_SCHUR, 3),
-                      make_pair(DENSE_SCHUR, 4),
-                      make_pair(SPARSE_SCHUR, 0),
-                      make_pair(SPARSE_SCHUR, 1),
-                      make_pair(SPARSE_SCHUR, 2),
-                      make_pair(SPARSE_SCHUR, 3),
-                      make_pair(SPARSE_SCHUR, 4),
-                      make_pair(ITERATIVE_SCHUR, 0),
-                      make_pair(ITERATIVE_SCHUR, 1),
-                      make_pair(ITERATIVE_SCHUR, 2),
-                      make_pair(ITERATIVE_SCHUR, 3),
-                      make_pair(ITERATIVE_SCHUR, 4),
-                      make_pair(SPARSE_NORMAL_CHOLESKY, 0)));
+    ::testing::Values(
+      EvaluatorTestOptions(DENSE_QR, 0),
+      EvaluatorTestOptions(DENSE_SCHUR, 0),
+      EvaluatorTestOptions(DENSE_SCHUR, 1),
+      EvaluatorTestOptions(DENSE_SCHUR, 2),
+      EvaluatorTestOptions(DENSE_SCHUR, 3),
+      EvaluatorTestOptions(DENSE_SCHUR, 4),
+      EvaluatorTestOptions(SPARSE_SCHUR, 0),
+      EvaluatorTestOptions(SPARSE_SCHUR, 1),
+      EvaluatorTestOptions(SPARSE_SCHUR, 2),
+      EvaluatorTestOptions(SPARSE_SCHUR, 3),
+      EvaluatorTestOptions(SPARSE_SCHUR, 4),
+      EvaluatorTestOptions(ITERATIVE_SCHUR, 0),
+      EvaluatorTestOptions(ITERATIVE_SCHUR, 1),
+      EvaluatorTestOptions(ITERATIVE_SCHUR, 2),
+      EvaluatorTestOptions(ITERATIVE_SCHUR, 3),
+      EvaluatorTestOptions(ITERATIVE_SCHUR, 4),
+      EvaluatorTestOptions(SPARSE_NORMAL_CHOLESKY, 0, false),
+      EvaluatorTestOptions(SPARSE_NORMAL_CHOLESKY, 0, true)));
 
 // Simple cost function used to check if the evaluator is sensitive to
 // state changes.
diff --git a/internal/ceres/generate_eliminator_specialization.py b/internal/ceres/generate_eliminator_specialization.py
index caeca69..2ec3c5b 100644
--- a/internal/ceres/generate_eliminator_specialization.py
+++ b/internal/ceres/generate_eliminator_specialization.py
@@ -59,7 +59,10 @@
                    (2, 3, "Eigen::Dynamic"),
                    (2, 4, 3),
                    (2, 4, 4),
+                   (2, 4, 8),
+                   (2, 4, 9),
                    (2, 4, "Eigen::Dynamic"),
+                   (2, "Eigen::Dynamic", "Eigen::Dynamic"),
                    (4, 4, 2),
                    (4, 4, 3),
                    (4, 4, 4),
@@ -123,6 +126,9 @@
 """
 
 SPECIALIZATION_FILE = """
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
 
 #include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generate_partitioned_matrix_view_specializations.py b/internal/ceres/generate_partitioned_matrix_view_specializations.py
new file mode 100644
index 0000000..c9bdf23
--- /dev/null
+++ b/internal/ceres/generate_partitioned_matrix_view_specializations.py
@@ -0,0 +1,231 @@
+# Ceres Solver - A fast non-linear least squares minimizer
+# Copyright 2013 Google Inc. All rights reserved.
+# http://code.google.com/p/ceres-solver/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+#   this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.
+# * Neither the name of Google Inc. nor the names of its contributors may be
+#   used to endorse or promote products derived from this software without
+#   specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: sameeragarwal@google.com (Sameer Agarwal)
+#
+# Script for explicitly generating template specialization of the
+# PartitionedMatrixView class. Explicitly generating these
+# instantiations in separate .cc files breaks the compilation into
+# separate compilation unit rather than one large cc file.
+#
+# This script creates two sets of files.
+#
+# 1. partitioned_matrix_view_x_x_x.cc
+# where the x indicates the template parameters and
+#
+# 2. partitioned_matrix_view.cc
+#
+# that contains a factory function for instantiating these classes
+# based on runtime parameters.
+#
+# The list of tuples, specializations indicates the set of
+# specializations that is generated.
+
+# Set of template specializations to generate
+SPECIALIZATIONS = [(2, 2, 2),
+                   (2, 2, 3),
+                   (2, 2, 4),
+                   (2, 2, "Eigen::Dynamic"),
+                   (2, 3, 3),
+                   (2, 3, 4),
+                   (2, 3, 9),
+                   (2, 3, "Eigen::Dynamic"),
+                   (2, 4, 3),
+                   (2, 4, 4),
+                   (2, 4, 8),
+                   (2, 4, 9),
+                   (2, 4, "Eigen::Dynamic"),
+                   (2, "Eigen::Dynamic", "Eigen::Dynamic"),
+                   (4, 4, 2),
+                   (4, 4, 3),
+                   (4, 4, 4),
+                   (4, 4, "Eigen::Dynamic"),
+                   ("Eigen::Dynamic", "Eigen::Dynamic", "Eigen::Dynamic")]
+HEADER = """// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+"""
+
+DYNAMIC_FILE = """
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<%s, %s, %s>;
+
+}  // namespace internal
+}  // namespace ceres
+"""
+
+SPECIALIZATION_FILE = """
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<%s, %s, %s>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
+"""
+
+FACTORY_FILE_HEADER = """
+#include "ceres/linear_solver.h"
+#include "ceres/partitioned_matrix_view.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+PartitionedMatrixViewBase*
+PartitionedMatrixViewBase::Create(const LinearSolver::Options& options,
+                                  const BlockSparseMatrix& matrix) {
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+"""
+
+FACTORY_CONDITIONAL = """  if ((options.row_block_size == %s) &&
+      (options.e_block_size == %s) &&
+      (options.f_block_size == %s)) {
+    return new PartitionedMatrixView<%s, %s, %s>(
+                 matrix, options.elimination_groups[0]);
+  }
+"""
+
+FACTORY_FOOTER = """
+#endif
+  VLOG(1) << "Template specializations not found for <"
+          << options.row_block_size << ","
+          << options.e_block_size << ","
+          << options.f_block_size << ">";
+  return new PartitionedMatrixView<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>(
+               matrix, options.elimination_groups[0]);
+};
+
+}  // namespace internal
+}  // namespace ceres
+"""
+
+
+def SuffixForSize(size):
+  if size == "Eigen::Dynamic":
+    return "d"
+  return str(size)
+
+
+def SpecializationFilename(prefix, row_block_size, e_block_size, f_block_size):
+  return "_".join([prefix] + map(SuffixForSize, (row_block_size,
+                                                 e_block_size,
+                                                 f_block_size)))
+
+
+def Specialize():
+  """
+  Generate specialization code and the conditionals to instantiate it.
+  """
+  f = open("partitioned_matrix_view.cc", "w")
+  f.write(HEADER)
+  f.write(FACTORY_FILE_HEADER)
+
+  for row_block_size, e_block_size, f_block_size in SPECIALIZATIONS:
+    output = SpecializationFilename("generated/partitioned_matrix_view",
+                                    row_block_size,
+                                    e_block_size,
+                                    f_block_size) + ".cc"
+    fptr = open(output, "w")
+    fptr.write(HEADER)
+
+    template = SPECIALIZATION_FILE
+    if (row_block_size == "Eigen::Dynamic" and
+        e_block_size == "Eigen::Dynamic" and
+        f_block_size == "Eigen::Dynamic"):
+      template = DYNAMIC_FILE
+
+    fptr.write(template % (row_block_size, e_block_size, f_block_size))
+    fptr.close()
+
+    f.write(FACTORY_CONDITIONAL % (row_block_size,
+                                   e_block_size,
+                                   f_block_size,
+                                   row_block_size,
+                                   e_block_size,
+                                   f_block_size))
+  f.write(FACTORY_FOOTER)
+  f.close()
+
+
+if __name__ == "__main__":
+  Specialize()
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_2_2.cc b/internal/ceres/generated/partitioned_matrix_view_2_2_2.cc
new file mode 100644
index 0000000..a7d802a
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_2_2.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 2, 2>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_2_3.cc b/internal/ceres/generated/partitioned_matrix_view_2_2_3.cc
new file mode 100644
index 0000000..89e6f77
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_2_3.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 2, 3>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_2_4.cc b/internal/ceres/generated/partitioned_matrix_view_2_2_4.cc
new file mode 100644
index 0000000..3a3e8b6
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_2_4.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 2, 4>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_2_d.cc b/internal/ceres/generated/partitioned_matrix_view_2_2_d.cc
new file mode 100644
index 0000000..661f135
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_2_d.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 2, Eigen::Dynamic>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_3_3.cc b/internal/ceres/generated/partitioned_matrix_view_2_3_3.cc
new file mode 100644
index 0000000..e79e001
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_3_3.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 3, 3>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_3_4.cc b/internal/ceres/generated/partitioned_matrix_view_2_3_4.cc
new file mode 100644
index 0000000..2f1ae68
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_3_4.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 3, 4>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_3_9.cc b/internal/ceres/generated/partitioned_matrix_view_2_3_9.cc
new file mode 100644
index 0000000..ab40550
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_3_9.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 3, 9>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_3_d.cc b/internal/ceres/generated/partitioned_matrix_view_2_3_d.cc
new file mode 100644
index 0000000..89ecff7
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_3_d.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 3, Eigen::Dynamic>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_4_3.cc b/internal/ceres/generated/partitioned_matrix_view_2_4_3.cc
new file mode 100644
index 0000000..182707d
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_4_3.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 4, 3>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_4_4.cc b/internal/ceres/generated/partitioned_matrix_view_2_4_4.cc
new file mode 100644
index 0000000..a2cf8f4
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_4_4.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 4, 4>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_4_8.cc b/internal/ceres/generated/partitioned_matrix_view_2_4_8.cc
new file mode 100644
index 0000000..a263769
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_4_8.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 4, 8>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_4_9.cc b/internal/ceres/generated/partitioned_matrix_view_2_4_9.cc
new file mode 100644
index 0000000..d853860
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_4_9.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 4, 9>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_4_d.cc b/internal/ceres/generated/partitioned_matrix_view_2_4_d.cc
new file mode 100644
index 0000000..7d622fc
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_4_d.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 4, Eigen::Dynamic>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_d_d.cc b/internal/ceres/generated/partitioned_matrix_view_2_d_d.cc
new file mode 100644
index 0000000..31981ca
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_d_d.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, Eigen::Dynamic, Eigen::Dynamic>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_4_4_2.cc b/internal/ceres/generated/partitioned_matrix_view_4_4_2.cc
new file mode 100644
index 0000000..d51ab5f
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_4_4_2.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<4, 4, 2>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_4_4_3.cc b/internal/ceres/generated/partitioned_matrix_view_4_4_3.cc
new file mode 100644
index 0000000..4b17fbd
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_4_4_3.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<4, 4, 3>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_4_4_4.cc b/internal/ceres/generated/partitioned_matrix_view_4_4_4.cc
new file mode 100644
index 0000000..7b5fe0f
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_4_4_4.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<4, 4, 4>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_4_4_d.cc b/internal/ceres/generated/partitioned_matrix_view_4_4_d.cc
new file mode 100644
index 0000000..c31fed3
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_4_4_d.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<4, 4, Eigen::Dynamic>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_d_d_d.cc b/internal/ceres/generated/partitioned_matrix_view_d_d_d.cc
new file mode 100644
index 0000000..a3308ed
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_d_d_d.cc
@@ -0,0 +1,53 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>;
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/generated/schur_eliminator_2_2_2.cc b/internal/ceres/generated/schur_eliminator_2_2_2.cc
index 7f9ce14..db2a4dc 100644
--- a/internal/ceres/generated/schur_eliminator_2_2_2.cc
+++ b/internal/ceres/generated/schur_eliminator_2_2_2.cc
@@ -37,9 +37,12 @@
 // THIS FILE IS AUTOGENERATED. DO NOT EDIT.
 //=========================================
 //
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
 // Editing it manually is not recommended.
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
 
 #include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_2_2_3.cc b/internal/ceres/generated/schur_eliminator_2_2_3.cc
index d9ab1dd..f53c12a 100644
--- a/internal/ceres/generated/schur_eliminator_2_2_3.cc
+++ b/internal/ceres/generated/schur_eliminator_2_2_3.cc
@@ -37,9 +37,12 @@
 // THIS FILE IS AUTOGENERATED. DO NOT EDIT.
 //=========================================
 //
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
 // Editing it manually is not recommended.
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
 
 #include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_2_2_4.cc b/internal/ceres/generated/schur_eliminator_2_2_4.cc
index a268810..9e29383 100644
--- a/internal/ceres/generated/schur_eliminator_2_2_4.cc
+++ b/internal/ceres/generated/schur_eliminator_2_2_4.cc
@@ -37,9 +37,12 @@
 // THIS FILE IS AUTOGENERATED. DO NOT EDIT.
 //=========================================
 //
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
 // Editing it manually is not recommended.
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
 
 #include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_2_2_d.cc b/internal/ceres/generated/schur_eliminator_2_2_d.cc
index 46f9492..541def6 100644
--- a/internal/ceres/generated/schur_eliminator_2_2_d.cc
+++ b/internal/ceres/generated/schur_eliminator_2_2_d.cc
@@ -37,9 +37,12 @@
 // THIS FILE IS AUTOGENERATED. DO NOT EDIT.
 //=========================================
 //
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
 // Editing it manually is not recommended.
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
 
 #include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_2_3_3.cc b/internal/ceres/generated/schur_eliminator_2_3_3.cc
index ce53c6c..e450263 100644
--- a/internal/ceres/generated/schur_eliminator_2_3_3.cc
+++ b/internal/ceres/generated/schur_eliminator_2_3_3.cc
@@ -37,9 +37,12 @@
 // THIS FILE IS AUTOGENERATED. DO NOT EDIT.
 //=========================================
 //
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
 // Editing it manually is not recommended.
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
 
 #include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_2_3_4.cc b/internal/ceres/generated/schur_eliminator_2_3_4.cc
index 7f6d41d..0618c68 100644
--- a/internal/ceres/generated/schur_eliminator_2_3_4.cc
+++ b/internal/ceres/generated/schur_eliminator_2_3_4.cc
@@ -37,9 +37,12 @@
 // THIS FILE IS AUTOGENERATED. DO NOT EDIT.
 //=========================================
 //
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
 // Editing it manually is not recommended.
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
 
 #include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_2_3_9.cc b/internal/ceres/generated/schur_eliminator_2_3_9.cc
index 10f84af..c1ca665 100644
--- a/internal/ceres/generated/schur_eliminator_2_3_9.cc
+++ b/internal/ceres/generated/schur_eliminator_2_3_9.cc
@@ -37,9 +37,12 @@
 // THIS FILE IS AUTOGENERATED. DO NOT EDIT.
 //=========================================
 //
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
 // Editing it manually is not recommended.
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
 
 #include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_2_3_d.cc b/internal/ceres/generated/schur_eliminator_2_3_d.cc
index 047d473..1b6092c 100644
--- a/internal/ceres/generated/schur_eliminator_2_3_d.cc
+++ b/internal/ceres/generated/schur_eliminator_2_3_d.cc
@@ -37,9 +37,12 @@
 // THIS FILE IS AUTOGENERATED. DO NOT EDIT.
 //=========================================
 //
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
 // Editing it manually is not recommended.
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
 
 #include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_2_4_3.cc b/internal/ceres/generated/schur_eliminator_2_4_3.cc
index 12fdb86..edce8ef 100644
--- a/internal/ceres/generated/schur_eliminator_2_4_3.cc
+++ b/internal/ceres/generated/schur_eliminator_2_4_3.cc
@@ -37,9 +37,12 @@
 // THIS FILE IS AUTOGENERATED. DO NOT EDIT.
 //=========================================
 //
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
 // Editing it manually is not recommended.
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
 
 #include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_2_4_4.cc b/internal/ceres/generated/schur_eliminator_2_4_4.cc
index 0e29dc1..a6f3c52 100644
--- a/internal/ceres/generated/schur_eliminator_2_4_4.cc
+++ b/internal/ceres/generated/schur_eliminator_2_4_4.cc
@@ -37,9 +37,12 @@
 // THIS FILE IS AUTOGENERATED. DO NOT EDIT.
 //=========================================
 //
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
 // Editing it manually is not recommended.
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
 
 #include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_2_4_8.cc b/internal/ceres/generated/schur_eliminator_2_4_8.cc
new file mode 100644
index 0000000..bf2f0ab
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_4_8.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012, 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_eliminator_specialization.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 4, 8>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/schur_eliminator_2_4_9.cc b/internal/ceres/generated/schur_eliminator_2_4_9.cc
new file mode 100644
index 0000000..a63d0bb
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_4_9.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012, 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_eliminator_specialization.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 4, 9>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/schur_eliminator_2_4_d.cc b/internal/ceres/generated/schur_eliminator_2_4_d.cc
index 4d4ac56..b3a7fff 100644
--- a/internal/ceres/generated/schur_eliminator_2_4_d.cc
+++ b/internal/ceres/generated/schur_eliminator_2_4_d.cc
@@ -37,9 +37,12 @@
 // THIS FILE IS AUTOGENERATED. DO NOT EDIT.
 //=========================================
 //
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
 // Editing it manually is not recommended.
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
 
 #include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_2_d_d.cc b/internal/ceres/generated/schur_eliminator_2_d_d.cc
new file mode 100644
index 0000000..f4d28cd
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_d_d.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012, 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_eliminator_specialization.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, Eigen::Dynamic, Eigen::Dynamic>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/schur_eliminator_4_4_2.cc b/internal/ceres/generated/schur_eliminator_4_4_2.cc
index 4ad7d41..d1eadc1 100644
--- a/internal/ceres/generated/schur_eliminator_4_4_2.cc
+++ b/internal/ceres/generated/schur_eliminator_4_4_2.cc
@@ -37,9 +37,12 @@
 // THIS FILE IS AUTOGENERATED. DO NOT EDIT.
 //=========================================
 //
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
 // Editing it manually is not recommended.
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
 
 #include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_4_4_3.cc b/internal/ceres/generated/schur_eliminator_4_4_3.cc
index 87f2fc5..c340dbf 100644
--- a/internal/ceres/generated/schur_eliminator_4_4_3.cc
+++ b/internal/ceres/generated/schur_eliminator_4_4_3.cc
@@ -37,9 +37,12 @@
 // THIS FILE IS AUTOGENERATED. DO NOT EDIT.
 //=========================================
 //
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
 // Editing it manually is not recommended.
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
 
 #include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_4_4_4.cc b/internal/ceres/generated/schur_eliminator_4_4_4.cc
index 8b3f570..b7d58ad 100644
--- a/internal/ceres/generated/schur_eliminator_4_4_4.cc
+++ b/internal/ceres/generated/schur_eliminator_4_4_4.cc
@@ -37,9 +37,12 @@
 // THIS FILE IS AUTOGENERATED. DO NOT EDIT.
 //=========================================
 //
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
 // Editing it manually is not recommended.
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
 
 #include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_4_4_d.cc b/internal/ceres/generated/schur_eliminator_4_4_d.cc
index b21feb2..47e0059 100644
--- a/internal/ceres/generated/schur_eliminator_4_4_d.cc
+++ b/internal/ceres/generated/schur_eliminator_4_4_d.cc
@@ -37,9 +37,12 @@
 // THIS FILE IS AUTOGENERATED. DO NOT EDIT.
 //=========================================
 //
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
 // Editing it manually is not recommended.
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
 
 #include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_d_d_d.cc b/internal/ceres/generated/schur_eliminator_d_d_d.cc
index d483db7..d54a03c 100644
--- a/internal/ceres/generated/schur_eliminator_d_d_d.cc
+++ b/internal/ceres/generated/schur_eliminator_d_d_d.cc
@@ -37,7 +37,7 @@
 // THIS FILE IS AUTOGENERATED. DO NOT EDIT.
 //=========================================
 //
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
 // Editing it manually is not recommended.
 
 
diff --git a/internal/ceres/gradient_checking_cost_function.cc b/internal/ceres/gradient_checking_cost_function.cc
index 3edf95d..bca22e6 100644
--- a/internal/ceres/gradient_checking_cost_function.cc
+++ b/internal/ceres/gradient_checking_cost_function.cc
@@ -44,7 +44,7 @@
 #include "ceres/problem_impl.h"
 #include "ceres/program.h"
 #include "ceres/residual_block.h"
-#include "ceres/runtime_numeric_diff_cost_function.h"
+#include "ceres/dynamic_numeric_diff_cost_function.h"
 #include "ceres/stringprintf.h"
 #include "ceres/types.h"
 #include "glog/logging.h"
@@ -84,14 +84,24 @@
                                double relative_precision,
                                const string& extra_info)
       : function_(function),
-        finite_diff_cost_function_(
-            CreateRuntimeNumericDiffCostFunction(function,
-                                                 CENTRAL,
-                                                 relative_step_size)),
         relative_precision_(relative_precision),
         extra_info_(extra_info) {
-    *mutable_parameter_block_sizes() = function->parameter_block_sizes();
+    DynamicNumericDiffCostFunction<CostFunction, CENTRAL>*
+        finite_diff_cost_function =
+        new DynamicNumericDiffCostFunction<CostFunction, CENTRAL>(
+            function,
+            DO_NOT_TAKE_OWNERSHIP,
+            relative_step_size);
+
+    const vector<int32>& parameter_block_sizes =
+        function->parameter_block_sizes();
+    for (int i = 0; i < parameter_block_sizes.size(); ++i) {
+      finite_diff_cost_function->AddParameterBlock(parameter_block_sizes[i]);
+    }
+    *mutable_parameter_block_sizes() = parameter_block_sizes;
     set_num_residuals(function->num_residuals());
+    finite_diff_cost_function->SetNumResiduals(num_residuals());
+    finite_diff_cost_function_.reset(finite_diff_cost_function);
   }
 
   virtual ~GradientCheckingCostFunction() { }
@@ -107,7 +117,7 @@
     int num_residuals = function_->num_residuals();
 
     // Make space for the jacobians of the two methods.
-    const vector<int16>& block_sizes = function_->parameter_block_sizes();
+    const vector<int32>& block_sizes = function_->parameter_block_sizes();
     vector<Matrix> term_jacobians(block_sizes.size());
     vector<Matrix> finite_difference_jacobians(block_sizes.size());
     vector<double*> term_jacobian_pointers(block_sizes.size());
diff --git a/internal/ceres/gradient_checking_cost_function_test.cc b/internal/ceres/gradient_checking_cost_function_test.cc
index ac06503..caba2f6 100644
--- a/internal/ceres/gradient_checking_cost_function_test.cc
+++ b/internal/ceres/gradient_checking_cost_function_test.cc
@@ -264,7 +264,7 @@
 // Trivial cost function that accepts a single argument.
 class UnaryCostFunction : public CostFunction {
  public:
-  UnaryCostFunction(int num_residuals, int16 parameter_block_size) {
+  UnaryCostFunction(int num_residuals, int32 parameter_block_size) {
     set_num_residuals(num_residuals);
     mutable_parameter_block_sizes()->push_back(parameter_block_size);
   }
@@ -284,8 +284,8 @@
 class BinaryCostFunction: public CostFunction {
  public:
   BinaryCostFunction(int num_residuals,
-                     int16 parameter_block1_size,
-                     int16 parameter_block2_size) {
+                     int32 parameter_block1_size,
+                     int32 parameter_block2_size) {
     set_num_residuals(num_residuals);
     mutable_parameter_block_sizes()->push_back(parameter_block1_size);
     mutable_parameter_block_sizes()->push_back(parameter_block2_size);
@@ -305,9 +305,9 @@
 class TernaryCostFunction: public CostFunction {
  public:
   TernaryCostFunction(int num_residuals,
-                      int16 parameter_block1_size,
-                      int16 parameter_block2_size,
-                      int16 parameter_block3_size) {
+                      int32 parameter_block1_size,
+                      int32 parameter_block2_size,
+                      int32 parameter_block3_size) {
     set_num_residuals(num_residuals);
     mutable_parameter_block_sizes()->push_back(parameter_block1_size);
     mutable_parameter_block_sizes()->push_back(parameter_block2_size);
diff --git a/internal/ceres/implicit_schur_complement.cc b/internal/ceres/implicit_schur_complement.cc
index 32722bb..2da6235 100644
--- a/internal/ceres/implicit_schur_complement.cc
+++ b/internal/ceres/implicit_schur_complement.cc
@@ -35,21 +35,18 @@
 #include "ceres/block_structure.h"
 #include "ceres/internal/eigen.h"
 #include "ceres/internal/scoped_ptr.h"
+#include "ceres/linear_solver.h"
 #include "ceres/types.h"
 #include "glog/logging.h"
 
 namespace ceres {
 namespace internal {
 
-ImplicitSchurComplement::ImplicitSchurComplement(int num_eliminate_blocks,
-                                                 bool preconditioner)
-    : num_eliminate_blocks_(num_eliminate_blocks),
-      preconditioner_(preconditioner),
-      A_(NULL),
+ImplicitSchurComplement::ImplicitSchurComplement(
+    const LinearSolver::Options& options)
+    : options_(options),
       D_(NULL),
-      b_(NULL),
-      block_diagonal_EtE_inverse_(NULL),
-      block_diagonal_FtF_inverse_(NULL) {
+      b_(NULL) {
 }
 
 ImplicitSchurComplement::~ImplicitSchurComplement() {
@@ -61,7 +58,7 @@
   // Since initialization is reasonably heavy, perhaps we can save on
   // constructing a new object everytime.
   if (A_ == NULL) {
-    A_.reset(new PartitionedMatrixView(A, num_eliminate_blocks_));
+    A_.reset(PartitionedMatrixViewBase::Create(options_, A));
   }
 
   D_ = D;
@@ -71,7 +68,7 @@
   // E'E and F'E.
   if (block_diagonal_EtE_inverse_ == NULL) {
     block_diagonal_EtE_inverse_.reset(A_->CreateBlockDiagonalEtE());
-    if (preconditioner_) {
+    if (options_.preconditioner_type == JACOBI) {
       block_diagonal_FtF_inverse_.reset(A_->CreateBlockDiagonalFtF());
     }
     rhs_.resize(A_->num_cols_f());
@@ -82,7 +79,7 @@
     tmp_f_cols_.resize(A_->num_cols_f());
   } else {
     A_->UpdateBlockDiagonalEtE(block_diagonal_EtE_inverse_.get());
-    if (preconditioner_) {
+    if (options_.preconditioner_type == JACOBI) {
       A_->UpdateBlockDiagonalFtF(block_diagonal_FtF_inverse_.get());
     }
   }
@@ -91,7 +88,7 @@
   // contributions from the diagonal D if it is non-null. Add that to
   // the block diagonals and invert them.
   AddDiagonalAndInvert(D_, block_diagonal_EtE_inverse_.get());
-  if (preconditioner_)  {
+  if (options_.preconditioner_type == JACOBI) {
     AddDiagonalAndInvert((D_ ==  NULL) ? NULL : D_ + A_->num_cols_e(),
                          block_diagonal_FtF_inverse_.get());
   }
diff --git a/internal/ceres/implicit_schur_complement.h b/internal/ceres/implicit_schur_complement.h
index c1bb6e1..c992bdc 100644
--- a/internal/ceres/implicit_schur_complement.h
+++ b/internal/ceres/implicit_schur_complement.h
@@ -35,6 +35,7 @@
 #define CERES_INTERNAL_IMPLICIT_SCHUR_COMPLEMENT_H_
 
 #include "ceres/linear_operator.h"
+#include "ceres/linear_solver.h"
 #include "ceres/partitioned_matrix_view.h"
 #include "ceres/internal/eigen.h"
 #include "ceres/internal/scoped_ptr.h"
@@ -96,7 +97,7 @@
   //
   // TODO(sameeragarwal): Get rid of the two bools below and replace
   // them with enums.
-  ImplicitSchurComplement(int num_eliminate_blocks, bool preconditioner);
+  ImplicitSchurComplement(const LinearSolver::Options& options);
   virtual ~ImplicitSchurComplement();
 
   // Initialize the Schur complement for a linear least squares
@@ -142,10 +143,9 @@
   void AddDiagonalAndInvert(const double* D, BlockSparseMatrix* matrix);
   void UpdateRhs();
 
-  int num_eliminate_blocks_;
-  bool preconditioner_;
+  const LinearSolver::Options& options_;
 
-  scoped_ptr<PartitionedMatrixView> A_;
+  scoped_ptr<PartitionedMatrixViewBase> A_;
   const double* D_;
   const double* b_;
 
diff --git a/internal/ceres/implicit_schur_complement_test.cc b/internal/ceres/implicit_schur_complement_test.cc
index 1694273..3369ecb 100644
--- a/internal/ceres/implicit_schur_complement_test.cc
+++ b/internal/ceres/implicit_schur_complement_test.cc
@@ -120,7 +120,10 @@
     Vector reference_solution;
     ReducedLinearSystemAndSolution(D, &lhs, &rhs, &reference_solution);
 
-    ImplicitSchurComplement isc(num_eliminate_blocks_, true);
+    LinearSolver::Options options;
+    options.elimination_groups.push_back(num_eliminate_blocks_);
+    options.preconditioner_type = JACOBI;
+    ImplicitSchurComplement isc(options);
     isc.Init(*A_, D, b_.get());
 
     int num_sc_cols = lhs.cols();
diff --git a/internal/ceres/integral_types.h b/internal/ceres/integral_types.h
index 01e0493..d4913f5 100644
--- a/internal/ceres/integral_types.h
+++ b/internal/ceres/integral_types.h
@@ -77,7 +77,6 @@
 #undef CERES_INTSIZE
 
 typedef Integer< 8>::type int8;
-typedef Integer<16>::type int16;
 typedef Integer<32>::type int32;
 typedef Integer<64>::type int64;
 
diff --git a/internal/ceres/iterative_schur_complement_solver.cc b/internal/ceres/iterative_schur_complement_solver.cc
index 1aac565..6de410b 100644
--- a/internal/ceres/iterative_schur_complement_solver.cc
+++ b/internal/ceres/iterative_schur_complement_solver.cc
@@ -38,6 +38,7 @@
 #include "ceres/block_sparse_matrix.h"
 #include "ceres/block_structure.h"
 #include "ceres/conjugate_gradients_solver.h"
+#include "ceres/detect_structure.h"
 #include "ceres/implicit_schur_complement.h"
 #include "ceres/internal/eigen.h"
 #include "ceres/internal/scoped_ptr.h"
@@ -69,35 +70,36 @@
   EventLogger event_logger("IterativeSchurComplementSolver::Solve");
 
   CHECK_NOTNULL(A->block_structure());
-
+  const int num_eliminate_blocks = options_.elimination_groups[0];
   // Initialize a ImplicitSchurComplement object.
   if (schur_complement_ == NULL) {
-    schur_complement_.reset(
-        new ImplicitSchurComplement(options_.elimination_groups[0],
-                                    options_.preconditioner_type == JACOBI));
+    DetectStructure(*(A->block_structure()),
+                    num_eliminate_blocks,
+                    &options_.row_block_size,
+                    &options_.e_block_size,
+                    &options_.f_block_size);
+    schur_complement_.reset(new ImplicitSchurComplement(options_));
   }
   schur_complement_->Init(*A, per_solve_options.D, b);
 
   const int num_schur_complement_blocks =
-      A->block_structure()->cols.size() - options_.elimination_groups[0];
+      A->block_structure()->cols.size() - num_eliminate_blocks;
   if (num_schur_complement_blocks == 0) {
     VLOG(2) << "No parameter blocks left in the schur complement.";
     LinearSolver::Summary cg_summary;
     cg_summary.num_iterations = 0;
-    cg_summary.termination_type = TOLERANCE;
+    cg_summary.termination_type = LINEAR_SOLVER_SUCCESS;
     schur_complement_->BackSubstitute(NULL, x);
     return cg_summary;
   }
 
   // Initialize the solution to the Schur complement system to zero.
-  //
-  // TODO(sameeragarwal): There maybe a better initialization than an
-  // all zeros solution. Explore other cheap starting points.
   reduced_linear_system_solution_.resize(schur_complement_->num_rows());
   reduced_linear_system_solution_.setZero();
 
-  // Instantiate a conjugate gradient solver that runs on the Schur complement
-  // matrix with the block diagonal of the matrix F'F as the preconditioner.
+  // Instantiate a conjugate gradient solver that runs on the Schur
+  // complement matrix with the block diagonal of the matrix F'F as
+  // the preconditioner.
   LinearSolver::Options cg_options;
   cg_options.max_num_iterations = options_.max_num_iterations;
   ConjugateGradientsSolver cg_solver(cg_options);
@@ -108,6 +110,8 @@
 
   Preconditioner::Options preconditioner_options;
   preconditioner_options.type = options_.preconditioner_type;
+  preconditioner_options.visibility_clustering_type =
+      options_.visibility_clustering_type;
   preconditioner_options.sparse_linear_algebra_library_type =
       options_.sparse_linear_algebra_library_type;
   preconditioner_options.num_threads = options_.num_threads;
@@ -149,26 +153,26 @@
         preconditioner_->Update(*A, per_solve_options.D);
     cg_per_solve_options.preconditioner = preconditioner_.get();
   }
-
   event_logger.AddEvent("Setup");
 
   LinearSolver::Summary cg_summary;
   cg_summary.num_iterations = 0;
-  cg_summary.termination_type = FAILURE;
+  cg_summary.termination_type = LINEAR_SOLVER_FAILURE;
 
+  // TODO(sameeragarwal): Refactor preconditioners to return a more
+  // sane message.
+  cg_summary.message = "Preconditioner update failed.";
   if (preconditioner_update_was_successful) {
     cg_summary = cg_solver.Solve(schur_complement_.get(),
                                  schur_complement_->rhs().data(),
                                  cg_per_solve_options,
                                  reduced_linear_system_solution_.data());
-    if (cg_summary.termination_type != FAILURE) {
+    if (cg_summary.termination_type != LINEAR_SOLVER_FAILURE &&
+        cg_summary.termination_type != LINEAR_SOLVER_FATAL_ERROR) {
       schur_complement_->BackSubstitute(
           reduced_linear_system_solution_.data(), x);
     }
   }
-
-  VLOG(2) << "CG Iterations : " << cg_summary.num_iterations;
-
   event_logger.AddEvent("Solve");
   return cg_summary;
 }
diff --git a/internal/ceres/jet_quaternion_integration_test.cc b/internal/ceres/jet_quaternion_integration_test.cc
deleted file mode 100644
index 63101fb..0000000
--- a/internal/ceres/jet_quaternion_integration_test.cc
+++ /dev/null
@@ -1,201 +0,0 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
-// http://code.google.com/p/ceres-solver/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-//   this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-//   this list of conditions and the following disclaimer in the documentation
-//   and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-//   used to endorse or promote products derived from this software without
-//   specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: keir@google.com (Keir Mierle)
-//
-// Tests the use of Cere's Jet type with the quaternions found in util/math/. In
-// theory, the unittests for the quaternion class should be type parameterized
-// to make for easier testing of instantiations of the quaternion class, but it
-// is not so, and not obviously worth the work to make the switch at this time.
-
-#include "base/stringprintf.h"
-#include "gtest/gtest.h"
-#include "util/math/mathlimits.h"
-#include "util/math/matrix3x3.h"
-#include "util/math/quaternion.h"
-#include "util/math/vector3.h"
-#include "ceres/test_util.h"
-#include "ceres/jet.h"
-#include "ceres/jet_traits.h"
-
-namespace ceres {
-namespace internal {
-
-// Use a 4-element derivative to simulate the case where each of the
-// quaternion elements are derivative parameters.
-typedef Jet<double, 4> J;
-
-struct JetTraitsTest : public ::testing::Test {
- protected:
-  JetTraitsTest()
-      : a(J(1.1, 0), J(2.1, 1), J(3.1, 2), J(4.1, 3)),
-        b(J(0.1, 0), J(1.1, 1), J(2.1, 2), J(5.0, 3)),
-        double_a(a[0].a, a[1].a, a[2].a, a[3].a),
-        double_b(b[0].a, b[1].a, b[2].a, b[3].a) {
-    // The quaternions should be valid rotations, so normalize them.
-    a.Normalize();
-    b.Normalize();
-    double_a.Normalize();
-    double_b.Normalize();
-  }
-
-  virtual ~JetTraitsTest() {}
-
-  // A couple of arbitrary normalized quaternions.
-  Quaternion<J> a, b;
-
-  // The equivalent of a, b but in scalar form.
-  Quaternion<double> double_a, double_b;
-};
-
-// Compare scalar multiplication to jet multiplication. Ignores derivatives.
-TEST_F(JetTraitsTest, QuaternionScalarMultiplicationWorks) {
-  Quaternion<J> c = a * b;
-  Quaternion<double> double_c = double_a * double_b;
-
-  for (int i = 0; i < 4; ++i) {
-    EXPECT_EQ(double_c[i], c[i].a);
-  }
-}
-
-// Compare scalar slerp to jet slerp. Ignores derivatives.
-TEST_F(JetTraitsTest, QuaternionScalarSlerpWorks) {
-  const J fraction(0.1);
-  Quaternion<J> c = Quaternion<J>::Slerp(a, b, fraction);
-  Quaternion<double> double_c =
-      Quaternion<double>::Slerp(double_a, double_b, fraction.a);
-
-  for (int i = 0; i < 4; ++i) {
-    EXPECT_EQ(double_c[i], c[i].a);
-  }
-}
-
-// On a 32-bit optimized build, the mismatch is about 1.4e-14.
-double const kTolerance = 1e-13;
-
-void ExpectJetsClose(const J &x, const J &y) {
-  ExpectClose(x.a, y.a, kTolerance);
-  ExpectClose(x.v[0], y.v[0], kTolerance);
-  ExpectClose(x.v[1], y.v[1], kTolerance);
-  ExpectClose(x.v[2], y.v[2], kTolerance);
-  ExpectClose(x.v[3], y.v[3], kTolerance);
-}
-
-void ExpectQuaternionsClose(const Quaternion<J>& x, const Quaternion<J>& y) {
-  for (int i = 0; i < 4; ++i) {
-    ExpectJetsClose(x[i], y[i]);
-  }
-}
-
-// Compare jet slurp to jet slerp using identies, checking derivatives.
-TEST_F(JetTraitsTest, CheckSlerpIdentitiesWithNontrivialDerivatives) {
-  // Do a slerp to 0.75 directly.
-  Quaternion<J> direct = Quaternion<J>::Slerp(a, b, J(0.75));
-
-  // Now go part way twice, in theory ending at the same place.
-  Quaternion<J> intermediate = Quaternion<J>::Slerp(a, b, J(0.5));
-  Quaternion<J> indirect = Quaternion<J>::Slerp(intermediate, b, J(0.5));
-
-  // Check that the destination is the same, including derivatives.
-  ExpectQuaternionsClose(direct, indirect);
-}
-
-TEST_F(JetTraitsTest, CheckAxisAngleIsInvertibleWithNontrivialDerivatives) {
-  Vector3<J> axis;
-  J angle;
-  a.GetAxisAngle(&axis, &angle);
-  b.SetFromAxisAngle(axis, angle);
-
-  ExpectQuaternionsClose(a, b);
-}
-
-TEST_F(JetTraitsTest,
-       CheckRotationMatrixIsInvertibleWithNontrivialDerivatives) {
-  Vector3<J> axis;
-  J angle;
-  Matrix3x3<J> R;
-  a.ToRotationMatrix(&R);
-  b.SetFromRotationMatrix(R);
-
-  ExpectQuaternionsClose(a, b);
-}
-
-// This doesn't check correctnenss, only that the instantiation compiles.
-TEST_F(JetTraitsTest, CheckRotationBetweenIsCompilable) {
-  // Get two arbitrary vectors x and y.
-  Vector3<J> x, y;
-  J ignored_angle;
-  a.GetAxisAngle(&x, &ignored_angle);
-  b.GetAxisAngle(&y, &ignored_angle);
-
-  Quaternion<J> between_x_and_y = Quaternion<J>::RotationBetween(x, y);
-
-  // Prevent optimizing this away.
-  EXPECT_NE(between_x_and_y[0].a, 0.0);
-}
-
-TEST_F(JetTraitsTest, CheckRotatedWorksAsExpected) {
-  // Get two arbitrary vectors x and y.
-  Vector3<J> x;
-  J ignored_angle;
-  a.GetAxisAngle(&x, &ignored_angle);
-
-  // Rotate via a quaternion.
-  Vector3<J> y = b.Rotated(x);
-
-  // Rotate via a rotation matrix.
-  Matrix3x3<J> R;
-  b.ToRotationMatrix(&R);
-  Vector3<J> yp = R * x;
-
-  ExpectJetsClose(yp[0], y[0]);
-  ExpectJetsClose(yp[1], y[1]);
-  ExpectJetsClose(yp[2], y[2]);
-}
-
-TEST_F(JetTraitsTest, CheckRotatedWorksAsExpectedWithDoubles) {
-  // Get two arbitrary vectors x and y.
-  Vector3<double> x;
-  double ignored_angle;
-  double_a.GetAxisAngle(&x, &ignored_angle);
-
-  // Rotate via a quaternion.
-  Vector3<double> y = double_b.Rotated(x);
-
-  // Rotate via a rotation matrix.
-  Matrix3x3<double> R;
-  double_b.ToRotationMatrix(&R);
-  Vector3<double> yp = R * x;
-
-  ExpectClose(yp[0], y[0], kTolerance);
-  ExpectClose(yp[1], y[1], kTolerance);
-  ExpectClose(yp[2], y[2], kTolerance);
-}
-
-}  // namespace internal
-}  // namespace ceres
diff --git a/internal/ceres/lapack.cc b/internal/ceres/lapack.cc
index 73bfa69..e124d75 100644
--- a/internal/ceres/lapack.cc
+++ b/internal/ceres/lapack.cc
@@ -29,6 +29,9 @@
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
 #include "ceres/lapack.h"
+
+#include "ceres/internal/port.h"
+#include "ceres/linear_solver.h"
 #include "glog/logging.h"
 
 // C interface to the LAPACK Cholesky factorization and triangular solve.
@@ -63,12 +66,14 @@
 namespace ceres {
 namespace internal {
 
-int LAPACK::SolveInPlaceUsingCholesky(int num_rows,
-                                      const double* in_lhs,
-                                      double* rhs_and_solution) {
+LinearSolverTerminationType LAPACK::SolveInPlaceUsingCholesky(
+    int num_rows,
+    const double* in_lhs,
+    double* rhs_and_solution,
+    string* message) {
 #ifdef CERES_NO_LAPACK
   LOG(FATAL) << "Ceres was built without a BLAS library.";
-  return -1;
+  return LINEAR_SOLVER_FATAL_ERROR;
 #else
   char uplo = 'L';
   int n = num_rows;
@@ -77,17 +82,33 @@
   double* lhs = const_cast<double*>(in_lhs);
 
   dpotrf_(&uplo, &n, lhs, &n, &info);
-  if (info != 0) {
-    LOG(INFO) << "Cholesky factorization (dpotrf) failed: " << info;
-    return info;
+  if (info < 0) {
+    LOG(FATAL) << "Congratulations, you found a bug in Ceres."
+               << "Please report it."
+               << "LAPACK::dpotrf fatal error."
+               << "Argument: " << -info << " is invalid.";
+    return LINEAR_SOLVER_FATAL_ERROR;
+  }
+
+  if (info > 0) {
+    *message =
+        StringPrintf(
+            "LAPACK::dpotrf numerical failure. "
+             "The leading minor of order %d is not positive definite.", info);
+    return LINEAR_SOLVER_FAILURE;
   }
 
   dpotrs_(&uplo, &n, &nrhs, lhs, &n, rhs_and_solution, &n, &info);
-  if (info != 0) {
-    LOG(INFO) << "Triangular solve (dpotrs) failed: " << info;
+  if (info < 0) {
+    LOG(FATAL) << "Congratulations, you found a bug in Ceres."
+               << "Please report it."
+               << "LAPACK::dpotrs fatal error."
+               << "Argument: " << -info << " is invalid.";
+    return LINEAR_SOLVER_FATAL_ERROR;
   }
 
-  return info;
+  *message = "Success";
+  return LINEAR_SOLVER_SUCCESS;
 #endif
 };
 
@@ -113,20 +134,27 @@
          &lwork,
          &info);
 
-  CHECK_EQ(info, 0);
-  return work;
+  if (info < 0) {
+    LOG(FATAL) << "Congratulations, you found a bug in Ceres."
+               << "Please report it."
+               << "LAPACK::dgels fatal error."
+               << "Argument: " << -info << " is invalid.";
+  }
+  return static_cast<int>(work);
 #endif
 }
 
-int LAPACK::SolveUsingQR(int num_rows,
-                         int num_cols,
-                         const double* in_lhs,
-                         int work_size,
-                         double* work,
-                         double* rhs_and_solution) {
+LinearSolverTerminationType LAPACK::SolveInPlaceUsingQR(
+    int num_rows,
+    int num_cols,
+    const double* in_lhs,
+    int work_size,
+    double* work,
+    double* rhs_and_solution,
+    string* message) {
 #ifdef CERES_NO_LAPACK
   LOG(FATAL) << "Ceres was built without a LAPACK library.";
-  return -1;
+  return LINEAR_SOLVER_FATAL_ERROR;
 #else
   char trans = 'N';
   int m = num_rows;
@@ -149,7 +177,15 @@
          &work_size,
          &info);
 
-  return info;
+  if (info < 0) {
+    LOG(FATAL) << "Congratulations, you found a bug in Ceres."
+               << "Please report it."
+               << "LAPACK::dgels fatal error."
+               << "Argument: " << -info << " is invalid.";
+  }
+
+  *message = "Success.";
+  return LINEAR_SOLVER_SUCCESS;
 #endif
 }
 
diff --git a/internal/ceres/lapack.h b/internal/ceres/lapack.h
index 4f3a88c..8933c2c 100644
--- a/internal/ceres/lapack.h
+++ b/internal/ceres/lapack.h
@@ -31,6 +31,10 @@
 #ifndef CERES_INTERNAL_LAPACK_H_
 #define CERES_INTERNAL_LAPACK_H_
 
+#include <string>
+#include "ceres/internal/port.h"
+#include "ceres/linear_solver.h"
+
 namespace ceres {
 namespace internal {
 
@@ -47,10 +51,14 @@
   //
   // This function uses the LAPACK dpotrf and dpotrs routines.
   //
-  // The return value is zero if the solve is successful.
-  static int SolveInPlaceUsingCholesky(int num_rows,
-                                       const double* lhs,
-                                       double* rhs_and_solution);
+  // The return value and the message string together describe whether
+  // the solver terminated successfully or not and if so, what was the
+  // reason for failure.
+  static LinearSolverTerminationType SolveInPlaceUsingCholesky(
+      int num_rows,
+      const double* lhs,
+      double* rhs_and_solution,
+      string* message);
 
   // The SolveUsingQR function requires a buffer for its temporary
   // computation. This function given the size of the lhs matrix will
@@ -73,13 +81,17 @@
   //
   // This function uses the LAPACK dgels routine.
   //
-  // The return value is zero if the solve is successful.
-  static int SolveUsingQR(int num_rows,
-                          int num_cols,
-                          const double* lhs,
-                          int work_size,
-                          double* work,
-                          double* rhs_and_solution);
+  // The return value and the message string together describe whether
+  // the solver terminated successfully or not and if so, what was the
+  // reason for failure.
+  static LinearSolverTerminationType SolveInPlaceUsingQR(
+      int num_rows,
+      int num_cols,
+      const double* lhs,
+      int work_size,
+      double* work,
+      double* rhs_and_solution,
+      string* message);
 };
 
 }  // namespace internal
diff --git a/internal/ceres/levenberg_marquardt_strategy.cc b/internal/ceres/levenberg_marquardt_strategy.cc
index fad7c1f..ce3b69a 100644
--- a/internal/ceres/levenberg_marquardt_strategy.cc
+++ b/internal/ceres/levenberg_marquardt_strategy.cc
@@ -105,10 +105,13 @@
   // do not need to be modified.
   LinearSolver::Summary linear_solver_summary =
       linear_solver_->Solve(jacobian, residuals, solve_options, step);
-  if (linear_solver_summary.termination_type == FAILURE ||
-      !IsArrayValid(num_parameters, step)) {
+
+  if (linear_solver_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) {
+    LOG(WARNING) << "Linear solver fatal error.";
+  } else if (linear_solver_summary.termination_type == LINEAR_SOLVER_FAILURE ||
+             !IsArrayValid(num_parameters, step)) {
     LOG(WARNING) << "Linear solver failure. Failed to compute a finite step.";
-    linear_solver_summary.termination_type = FAILURE;
+    linear_solver_summary.termination_type = LINEAR_SOLVER_FAILURE;
   } else {
     VectorRef(step, num_parameters) *= -1.0;
   }
diff --git a/internal/ceres/levenberg_marquardt_strategy_test.cc b/internal/ceres/levenberg_marquardt_strategy_test.cc
index 86302b7..ac7ddbc 100644
--- a/internal/ceres/levenberg_marquardt_strategy_test.cc
+++ b/internal/ceres/levenberg_marquardt_strategy_test.cc
@@ -150,7 +150,7 @@
 
     TrustRegionStrategy::Summary summary =
         lms.ComputeStep(pso, &dsm, &residual, x);
-    EXPECT_EQ(summary.termination_type, FAILURE);
+    EXPECT_EQ(summary.termination_type, LINEAR_SOLVER_FAILURE);
   }
 }
 
diff --git a/internal/ceres/line_search.cc b/internal/ceres/line_search.cc
index 8323896..7ff1164 100644
--- a/internal/ceres/line_search.cc
+++ b/internal/ceres/line_search.cc
@@ -28,7 +28,9 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
-#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
+#include <iomanip>
+#include <iostream>  // NOLINT
+
 #include "ceres/line_search.h"
 
 #include "ceres/fpclassify.h"
@@ -41,6 +43,8 @@
 namespace ceres {
 namespace internal {
 namespace {
+// Precision used for floating point values in error message output.
+const int kErrorMessageNumericPrecision = 8;
 
 FunctionSample ValueSample(const double x, const double value) {
   FunctionSample sample;
@@ -64,13 +68,12 @@
 
 }  // namespace
 
+
+std::ostream& operator<<(std::ostream &os, const FunctionSample& sample);
+
 // Convenience stream operator for pushing FunctionSamples into log messages.
-std::ostream& operator<<(std::ostream &os,
-                         const FunctionSample& sample) {
-  os << "[x: " << sample.x << ", value: " << sample.value
-     << ", gradient: " << sample.gradient << ", value_is_valid: "
-     << std::boolalpha << sample.value_is_valid << ", gradient_is_valid: "
-     << std::boolalpha << sample.gradient_is_valid << "]";
+std::ostream& operator<<(std::ostream &os, const FunctionSample& sample) {
+  os << sample.ToDebugString();
   return os;
 }
 
@@ -170,6 +173,7 @@
   // to avoid replicating current.value_is_valid == false
   // behaviour in WolfeLineSearch.
   CHECK(lowerbound.value_is_valid)
+      << std::scientific << std::setprecision(kErrorMessageNumericPrecision)
       << "Ceres bug: lower-bound sample for interpolation is invalid, "
       << "please contact the developers!, interpolation_type: "
       << LineSearchInterpolationTypeToString(interpolation_type)
@@ -237,20 +241,26 @@
   FunctionSample current = ValueAndGradientSample(step_size_estimate, 0.0, 0.0);
   current.value_is_valid = false;
 
-  const bool interpolation_uses_gradients =
+  // As the Armijo line search algorithm always uses the initial point, for
+  // which both the function value and derivative are known, when fitting a
+  // minimizing polynomial, we can fit up to a quadratic without requiring the
+  // gradient at the current query point.
+  const bool interpolation_uses_gradient_at_current_sample =
       options().interpolation_type == CUBIC;
   const double descent_direction_max_norm =
       static_cast<const LineSearchFunction*>(function)->DirectionInfinityNorm();
 
   ++summary->num_function_evaluations;
-  if (interpolation_uses_gradients) { ++summary->num_gradient_evaluations; }
+  if (interpolation_uses_gradient_at_current_sample) {
+    ++summary->num_gradient_evaluations;
+  }
   current.value_is_valid =
       function->Evaluate(current.x,
                          &current.value,
-                         interpolation_uses_gradients
+                         interpolation_uses_gradient_at_current_sample
                          ? &current.gradient : NULL);
   current.gradient_is_valid =
-      interpolation_uses_gradients && current.value_is_valid;
+      interpolation_uses_gradient_at_current_sample && current.value_is_valid;
   while (!current.value_is_valid ||
          current.value > (initial_cost
                           + options().sufficient_decrease
@@ -265,7 +275,7 @@
                        "satisfying the sufficient decrease condition within "
                        "specified max_num_iterations: %d.",
                        options().max_num_iterations);
-      LOG(WARNING) << summary->error;
+      LOG_IF(WARNING, !options().is_silent) << summary->error;
       return;
     }
 
@@ -283,7 +293,7 @@
           StringPrintf("Line search failed: step_size too small: %.5e "
                        "with descent_direction_max_norm: %.5e.", step_size,
                        descent_direction_max_norm);
-      LOG(WARNING) << summary->error;
+      LOG_IF(WARNING, !options().is_silent) << summary->error;
       return;
     }
 
@@ -291,14 +301,16 @@
     current.x = step_size;
 
     ++summary->num_function_evaluations;
-    if (interpolation_uses_gradients) { ++summary->num_gradient_evaluations; }
+    if (interpolation_uses_gradient_at_current_sample) {
+      ++summary->num_gradient_evaluations;
+    }
     current.value_is_valid =
       function->Evaluate(current.x,
                          &current.value,
-                         interpolation_uses_gradients
+                         interpolation_uses_gradient_at_current_sample
                          ? &current.gradient : NULL);
     current.gradient_is_valid =
-        interpolation_uses_gradients && current.value_is_valid;
+        interpolation_uses_gradient_at_current_sample && current.value_is_valid;
   }
 
   summary->optimal_step_size = current.x;
@@ -350,33 +362,36 @@
                              &bracket_low,
                              &bracket_high,
                              &do_zoom_search,
-                             summary) &&
-      summary->num_iterations < options().max_num_iterations) {
-    // Failed to find either a valid point or a valid bracket, but we did not
-    // run out of iterations.
+                             summary)) {
+    // Failed to find either a valid point, a valid bracket satisfying the Wolfe
+    // conditions, or even a step size > minimum tolerance satisfying the Armijo
+    // condition.
     return;
   }
+
   if (!do_zoom_search) {
     // Either: Bracketing phase already found a point satisfying the strong
     // Wolfe conditions, thus no Zoom required.
     //
     // Or: Bracketing failed to find a valid bracket or a point satisfying the
-    // strong Wolfe conditions within max_num_iterations.  As this is an
-    // 'artificial' constraint, and we would otherwise fail to produce a valid
-    // point when ArmijoLineSearch would succeed, we return the lowest point
-    // found thus far which satsifies the Armijo condition (but not the Wolfe
-    // conditions).
-    CHECK(bracket_low.value_is_valid)
-        << "Ceres bug: Bracketing produced an invalid bracket_low, please "
-        << "contact the developers!, bracket_low: " << bracket_low
-        << ", bracket_high: " << bracket_high << ", num_iterations: "
-        << summary->num_iterations << ", max_num_iterations: "
-        << options().max_num_iterations;
+    // strong Wolfe conditions within max_num_iterations, or whilst searching
+    // shrank the bracket width until it was below our minimum tolerance.
+    // As these are 'artificial' constraints, and we would otherwise fail to
+    // produce a valid point when ArmijoLineSearch would succeed, we return the
+    // point with the lowest cost found thus far which satsifies the Armijo
+    // condition (but not the Wolfe conditions).
     summary->optimal_step_size = bracket_low.x;
     summary->success = true;
     return;
   }
 
+  VLOG(3) << std::scientific << std::setprecision(kErrorMessageNumericPrecision)
+          << "Starting line search zoom phase with bracket_low: "
+          << bracket_low << ", bracket_high: " << bracket_high
+          << ", bracket width: " << fabs(bracket_low.x - bracket_high.x)
+          << ", bracket abs delta cost: "
+          << fabs(bracket_low.value - bracket_high.value);
+
   // Wolfe Zoom phase: Called when the Bracketing phase finds an interval of
   // non-zero, finite width that should bracket step sizes which satisfy the
   // (strong) Wolfe conditions (before finding a step size that satisfies the
@@ -419,11 +434,22 @@
   summary->success = true;
 }
 
-// Returns true iff bracket_low & bracket_high bound a bracket that contains
-// points which satisfy the strong Wolfe conditions. Otherwise, on return false,
-// if we stopped searching due to the 'artificial' condition of reaching
-// max_num_iterations, bracket_low is the step size amongst all those
-// tested, which satisfied the Armijo decrease condition and minimized f().
+// Returns true if either:
+//
+// A termination condition satisfying the (strong) Wolfe bracketing conditions
+// is found:
+//
+// - A valid point, defined as a bracket of zero width [zoom not required].
+// - A valid bracket (of width > tolerance), [zoom required].
+//
+// Or, searching was stopped due to an 'artificial' constraint, i.e. not
+// a condition imposed / required by the underlying algorithm, but instead an
+// engineering / implementation consideration. But a step which exceeds the
+// minimum step size, and satsifies the Armijo condition was still found,
+// and should thus be used [zoom not required].
+//
+// Returns false if no step size > minimum step size was found which
+// satisfies at least the Armijo condition.
 bool WolfeLineSearch::BracketingPhase(
     const FunctionSample& initial_position,
     const double step_size_estimate,
@@ -437,23 +463,28 @@
   FunctionSample current = ValueAndGradientSample(step_size_estimate, 0.0, 0.0);
   current.value_is_valid = false;
 
-  const bool interpolation_uses_gradients =
-      options().interpolation_type == CUBIC;
   const double descent_direction_max_norm =
       static_cast<const LineSearchFunction*>(function)->DirectionInfinityNorm();
 
   *do_zoom_search = false;
   *bracket_low = initial_position;
 
+  // As we require the gradient to evaluate the Wolfe condition, we always
+  // calculate it together with the value, irrespective of the interpolation
+  // type.  As opposed to only calculating the gradient after the Armijo
+  // condition is satisifed, as the computational saving from this approach
+  // would be slight (perhaps even negative due to the extra call).  Also,
+  // always calculating the value & gradient together protects against us
+  // reporting invalid solutions if the cost function returns slightly different
+  // function values when evaluated with / without gradients (due to numerical
+  // issues).
   ++summary->num_function_evaluations;
-  if (interpolation_uses_gradients) { ++summary->num_gradient_evaluations; }
+  ++summary->num_gradient_evaluations;
   current.value_is_valid =
       function->Evaluate(current.x,
                          &current.value,
-                         interpolation_uses_gradients
-                         ? &current.gradient : NULL);
-  current.gradient_is_valid =
-      interpolation_uses_gradients && current.value_is_valid;
+                         &current.gradient);
+  current.gradient_is_valid = current.value_is_valid;
 
   while (true) {
     ++summary->num_iterations;
@@ -470,22 +501,14 @@
       *do_zoom_search = true;
       *bracket_low = previous;
       *bracket_high = current;
+      VLOG(3) << std::scientific
+              << std::setprecision(kErrorMessageNumericPrecision)
+              << "Bracket found: current step (" << current.x
+              << ") violates Armijo sufficient condition, or has passed an "
+              << "inflection point of f() based on value.";
       break;
     }
 
-    // Irrespective of the interpolation type we are using, we now need the
-    // gradient at the current point (which satisfies the Armijo condition)
-    // in order to check the strong Wolfe conditions.
-    if (!interpolation_uses_gradients) {
-      ++summary->num_function_evaluations;
-      ++summary->num_gradient_evaluations;
-      current.value_is_valid =
-          function->Evaluate(current.x,
-                             &current.value,
-                             &current.gradient);
-      current.gradient_is_valid = current.value_is_valid;
-    }
-
     if (current.value_is_valid &&
         fabs(current.gradient) <=
         -options().sufficient_curvature_decrease * initial_position.gradient) {
@@ -493,6 +516,11 @@
       // valid termination point, therefore a Zoom not required.
       *bracket_low = current;
       *bracket_high = current;
+      VLOG(3) << std::scientific
+              << std::setprecision(kErrorMessageNumericPrecision)
+              << "Bracketing phase found step size: " << current.x
+              << ", satisfying strong Wolfe conditions, initial_position: "
+              << initial_position << ", current: " << current;
       break;
 
     } else if (current.value_is_valid && current.gradient >= 0) {
@@ -505,6 +533,30 @@
       // Note inverse ordering from first bracket case.
       *bracket_low = current;
       *bracket_high = previous;
+      VLOG(3) << "Bracket found: current step (" << current.x
+              << ") satisfies Armijo, but has gradient >= 0, thus have passed "
+              << "an inflection point of f().";
+      break;
+
+    } else if (current.value_is_valid &&
+               fabs(current.x - previous.x) * descent_direction_max_norm
+               < options().min_step_size) {
+      // We have shrunk the search bracket to a width less than our tolerance,
+      // and still not found either a point satisfying the strong Wolfe
+      // conditions, or a valid bracket containing such a point. Stop searching
+      // and set bracket_low to the size size amongst all those tested which
+      // minimizes f() and satisfies the Armijo condition.
+      LOG_IF(WARNING, !options().is_silent)
+          << "Line search failed: Wolfe bracketing phase shrank "
+          << "bracket width: " << fabs(current.x - previous.x)
+          <<  ", to < tolerance: " << options().min_step_size
+          << ", with descent_direction_max_norm: "
+          << descent_direction_max_norm << ", and failed to find "
+          << "a point satisfying the strong Wolfe conditions or a "
+          << "bracketing containing such a point. Accepting "
+          << "point found satisfying Armijo condition only, to "
+          << "allow continuation.";
+      *bracket_low = current;
       break;
 
     } else if (summary->num_iterations >= options().max_num_iterations) {
@@ -516,14 +568,14 @@
                        "find a point satisfying strong Wolfe conditions, or a "
                        "bracket containing such a point within specified "
                        "max_num_iterations: %d", options().max_num_iterations);
-      LOG(WARNING) << summary->error;
+      LOG_IF(WARNING, !options().is_silent) << summary->error;
       // Ensure that bracket_low is always set to the step size amongst all
       // those tested which minimizes f() and satisfies the Armijo condition
       // when we terminate due to the 'artificial' max_num_iterations condition.
       *bracket_low =
           current.value_is_valid && current.value < bracket_low->value
           ? current : *bracket_low;
-      return false;
+      break;
     }
     // Either: f(current) is invalid; or, f(current) is valid, but does not
     // satisfy the strong Wolfe conditions itself, or the conditions for
@@ -555,7 +607,7 @@
           StringPrintf("Line search failed: step_size too small: %.5e "
                        "with descent_direction_max_norm: %.5e", step_size,
                        descent_direction_max_norm);
-      LOG(WARNING) << summary->error;
+      LOG_IF(WARNING, !options().is_silent) << summary->error;
       return false;
     }
 
@@ -563,17 +615,22 @@
     current.x = step_size;
 
     ++summary->num_function_evaluations;
-    if (interpolation_uses_gradients) { ++summary->num_gradient_evaluations; }
+    ++summary->num_gradient_evaluations;
     current.value_is_valid =
         function->Evaluate(current.x,
                            &current.value,
-                           interpolation_uses_gradients
-                           ? &current.gradient : NULL);
-    current.gradient_is_valid =
-        interpolation_uses_gradients && current.value_is_valid;
+                           &current.gradient);
+    current.gradient_is_valid = current.value_is_valid;
   }
-  // Either we have a valid point, defined as a bracket of zero width, in which
-  // case no zoom is required, or a valid bracket in which to zoom.
+
+  // Ensure that even if a valid bracket was found, we will only mark a zoom
+  // as required if the bracket's width is greater than our minimum tolerance.
+  if (*do_zoom_search &&
+      fabs(bracket_high->x - bracket_low->x) * descent_direction_max_norm
+      < options().min_step_size) {
+    *do_zoom_search = false;
+  }
+
   return true;
 }
 
@@ -589,6 +646,7 @@
   Function* function = options().function;
 
   CHECK(bracket_low.value_is_valid && bracket_low.gradient_is_valid)
+      << std::scientific << std::setprecision(kErrorMessageNumericPrecision)
       << "Ceres bug: f_low input to Wolfe Zoom invalid, please contact "
       << "the developers!, initial_position: " << initial_position
       << ", bracket_low: " << bracket_low
@@ -599,22 +657,46 @@
   // not have been calculated (if bracket_high.value does not satisfy the
   // Armijo sufficient decrease condition and interpolation method does not
   // require it).
+  //
+  // We also do not require that: bracket_low.value < bracket_high.value,
+  // although this is typical. This is to deal with the case when
+  // bracket_low = initial_position, bracket_high is the first sample,
+  // and bracket_high does not satisfy the Armijo condition, but still has
+  // bracket_high.value < initial_position.value.
   CHECK(bracket_high.value_is_valid)
+      << std::scientific << std::setprecision(kErrorMessageNumericPrecision)
       << "Ceres bug: f_high input to Wolfe Zoom invalid, please "
       << "contact the developers!, initial_position: " << initial_position
       << ", bracket_low: " << bracket_low
       << ", bracket_high: "<< bracket_high;
-  CHECK_LT(bracket_low.gradient *
-           (bracket_high.x - bracket_low.x), 0.0)
-      << "Ceres bug: f_high input to Wolfe Zoom does not satisfy gradient "
-      << "condition combined with f_low, please contact the developers!"
-      << ", initial_position: " << initial_position
-      << ", bracket_low: " << bracket_low
-      << ", bracket_high: "<< bracket_high;
+
+  if (bracket_low.gradient * (bracket_high.x - bracket_low.x) >= 0) {
+    // The third condition for a valid initial bracket:
+    //
+    //   3. bracket_high is chosen after bracket_low, s.t.
+    //      bracket_low.gradient * (bracket_high.x - bracket_low.x) < 0.
+    //
+    // is not satisfied.  As this can happen when the users' cost function
+    // returns inconsistent gradient values relative to the function values,
+    // we do not CHECK_LT(), but we do stop processing and return an invalid
+    // value.
+    summary->error =
+        StringPrintf("Line search failed: Wolfe zoom phase passed a bracket "
+                     "which does not satisfy: bracket_low.gradient * "
+                     "(bracket_high.x - bracket_low.x) < 0 [%.8e !< 0] "
+                     "with initial_position: %s, bracket_low: %s, bracket_high:"
+                     " %s, the most likely cause of which is the cost function "
+                     "returning inconsistent gradient & function values.",
+                     bracket_low.gradient * (bracket_high.x - bracket_low.x),
+                     initial_position.ToDebugString().c_str(),
+                     bracket_low.ToDebugString().c_str(),
+                     bracket_high.ToDebugString().c_str());
+    LOG_IF(WARNING, !options().is_silent) << summary->error;
+    solution->value_is_valid = false;
+    return false;
+  }
 
   const int num_bracketing_iterations = summary->num_iterations;
-  const bool interpolation_uses_gradients =
-      options().interpolation_type == CUBIC;
   const double descent_direction_max_norm =
       static_cast<const LineSearchFunction*>(function)->DirectionInfinityNorm();
 
@@ -630,7 +712,7 @@
                        "within specified max_num_iterations: %d, "
                        "(num iterations taken for bracketing: %d).",
                        options().max_num_iterations, num_bracketing_iterations);
-      LOG(WARNING) << summary->error;
+      LOG_IF(WARNING, !options().is_silent) << summary->error;
       return false;
     }
     if (fabs(bracket_high.x - bracket_low.x) * descent_direction_max_norm
@@ -642,7 +724,7 @@
                        "too small with descent_direction_max_norm: %.5e.",
                        fabs(bracket_high.x - bracket_low.x),
                        descent_direction_max_norm);
-      LOG(WARNING) << summary->error;
+      LOG_IF(WARNING, !options().is_silent) << summary->error;
       return false;
     }
 
@@ -669,15 +751,23 @@
             upper_bound_step.x);
     // No check on magnitude of step size being too small here as it is
     // lower-bounded by the initial bracket start point, which was valid.
+    //
+    // As we require the gradient to evaluate the Wolfe condition, we always
+    // calculate it together with the value, irrespective of the interpolation
+    // type.  As opposed to only calculating the gradient after the Armijo
+    // condition is satisifed, as the computational saving from this approach
+    // would be slight (perhaps even negative due to the extra call).  Also,
+    // always calculating the value & gradient together protects against us
+    // reporting invalid solutions if the cost function returns slightly
+    // different function values when evaluated with / without gradients (due
+    // to numerical issues).
     ++summary->num_function_evaluations;
-    if (interpolation_uses_gradients) { ++summary->num_gradient_evaluations; }
+    ++summary->num_gradient_evaluations;
     solution->value_is_valid =
         function->Evaluate(solution->x,
                            &solution->value,
-                           interpolation_uses_gradients
-                           ? &solution->gradient : NULL);
-    solution->gradient_is_valid =
-        interpolation_uses_gradients && solution->value_is_valid;
+                           &solution->gradient);
+    solution->gradient_is_valid = solution->value_is_valid;
     if (!solution->value_is_valid) {
       summary->error =
           StringPrintf("Line search failed: Wolfe Zoom phase found "
@@ -685,10 +775,16 @@
                        "between low_step: %.5e and high_step: %.5e "
                        "at which function is valid.",
                        solution->x, bracket_low.x, bracket_high.x);
-      LOG(WARNING) << summary->error;
+      LOG_IF(WARNING, !options().is_silent) << summary->error;
       return false;
     }
 
+    VLOG(3) << "Zoom iteration: "
+            << summary->num_iterations - num_bracketing_iterations
+            << ", bracket_low: " << bracket_low
+            << ", bracket_high: " << bracket_high
+            << ", minimizing solution: " << *solution;
+
     if ((solution->value > (initial_position.value
                             + options().sufficient_decrease
                             * initial_position.gradient
@@ -701,31 +797,13 @@
     }
 
     // Armijo sufficient decrease satisfied, check strong Wolfe condition.
-    if (!interpolation_uses_gradients) {
-      // Irrespective of the interpolation type we are using, we now need the
-      // gradient at the current point (which satisfies the Armijo condition)
-      // in order to check the strong Wolfe conditions.
-      ++summary->num_function_evaluations;
-      ++summary->num_gradient_evaluations;
-      solution->value_is_valid =
-          function->Evaluate(solution->x,
-                             &solution->value,
-                             &solution->gradient);
-      solution->gradient_is_valid = solution->value_is_valid;
-      if (!solution->value_is_valid) {
-        summary->error =
-            StringPrintf("Line search failed: Wolfe Zoom phase found "
-                         "step_size: %.5e, for which function is invalid, "
-                         "between low_step: %.5e and high_step: %.5e "
-                         "at which function is valid.",
-                         solution->x, bracket_low.x, bracket_high.x);
-        LOG(WARNING) << summary->error;
-        return false;
-      }
-    }
     if (fabs(solution->gradient) <=
         -options().sufficient_curvature_decrease * initial_position.gradient) {
       // Found a valid termination point satisfying strong Wolfe conditions.
+      VLOG(3) << std::scientific
+              << std::setprecision(kErrorMessageNumericPrecision)
+              << "Zoom phase found step size: " << solution->x
+              << ", satisfying strong Wolfe conditions.";
       break;
 
     } else if (solution->gradient * (bracket_high.x - bracket_low.x) >= 0) {
@@ -741,5 +819,3 @@
 
 }  // namespace internal
 }  // namespace ceres
-
-#endif  // CERES_NO_LINE_SEARCH_MINIMIZER
diff --git a/internal/ceres/line_search.h b/internal/ceres/line_search.h
index 5f24e9f..97b9bc6 100644
--- a/internal/ceres/line_search.h
+++ b/internal/ceres/line_search.h
@@ -33,8 +33,6 @@
 #ifndef CERES_INTERNAL_LINE_SEARCH_H_
 #define CERES_INTERNAL_LINE_SEARCH_H_
 
-#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
-
 #include <string>
 #include <vector>
 #include "ceres/internal/eigen.h"
@@ -71,6 +69,7 @@
           max_num_iterations(20),
           sufficient_curvature_decrease(0.9),
           max_step_expansion(10.0),
+          is_silent(false),
           function(NULL) {}
 
     // Degree of the polynomial used to approximate the objective
@@ -144,6 +143,8 @@
     // By definition for expansion, max_step_expansion > 1.0.
     double max_step_expansion;
 
+    bool is_silent;
+
     // The one dimensional function that the line search algorithm
     // minimizes.
     Function* function;
@@ -295,5 +296,4 @@
 }  // namespace internal
 }  // namespace ceres
 
-#endif  // CERES_NO_LINE_SEARCH_MINIMIZER
 #endif  // CERES_INTERNAL_LINE_SEARCH_H_
diff --git a/internal/ceres/line_search_direction.cc b/internal/ceres/line_search_direction.cc
index 8ded823..dddcecd 100644
--- a/internal/ceres/line_search_direction.cc
+++ b/internal/ceres/line_search_direction.cc
@@ -28,8 +28,6 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
-#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
-
 #include "ceres/line_search_direction.h"
 #include "ceres/line_search_minimizer.h"
 #include "ceres/low_rank_inverse_hessian.h"
@@ -67,7 +65,7 @@
       case FLETCHER_REEVES:
         beta = current.gradient_squared_norm / previous.gradient_squared_norm;
         break;
-      case POLAK_RIBIRERE:
+      case POLAK_RIBIERE:
         gradient_change = current.gradient - previous.gradient;
         beta = (current.gradient.dot(gradient_change) /
                 previous.gradient_squared_norm);
@@ -121,6 +119,7 @@
     low_rank_inverse_hessian_.Update(
         previous.search_direction * previous.step_size,
         current.gradient - previous.gradient);
+
     search_direction->setZero();
     low_rank_inverse_hessian_.RightMultiply(current.gradient.data(),
                                             search_direction->data());
@@ -176,9 +175,46 @@
     const Vector delta_gradient = current.gradient - previous.gradient;
     const double delta_x_dot_delta_gradient = delta_x.dot(delta_gradient);
 
-    if (delta_x_dot_delta_gradient <= 1e-10) {
+    // The (L)BFGS algorithm explicitly requires that the secant equation:
+    //
+    //   B_{k+1} * s_k = y_k
+    //
+    // Is satisfied at each iteration, where B_{k+1} is the approximated
+    // Hessian at the k+1-th iteration, s_k = (x_{k+1} - x_{k}) and
+    // y_k = (grad_{k+1} - grad_{k}). As the approximated Hessian must be
+    // positive definite, this is equivalent to the condition:
+    //
+    //   s_k^T * y_k > 0     [s_k^T * B_{k+1} * s_k = s_k^T * y_k > 0]
+    //
+    // This condition would always be satisfied if the function was strictly
+    // convex, alternatively, it is always satisfied provided that a Wolfe line
+    // search is used (even if the function is not strictly convex).  See [1]
+    // (p138) for a proof.
+    //
+    // Although Ceres will always use a Wolfe line search when using (L)BFGS,
+    // practical implementation considerations mean that the line search
+    // may return a point that satisfies only the Armijo condition, and thus
+    // could violate the Secant equation.  As such, we will only use a step
+    // to update the Hessian approximation if:
+    //
+    //   s_k^T * y_k > tolerance
+    //
+    // It is important that tolerance is very small (and >=0), as otherwise we
+    // might skip the update too often and fail to capture important curvature
+    // information in the Hessian.  For example going from 1e-10 -> 1e-14
+    // improves the NIST benchmark score from 43/54 to 53/54.
+    //
+    // [1] Nocedal J, Wright S, Numerical Optimization, 2nd Ed. Springer, 1999.
+    //
+    // TODO(alexs.mac): Consider using Damped BFGS update instead of
+    // skipping update.
+    const double kBFGSSecantConditionHessianUpdateTolerance = 1e-14;
+    if (delta_x_dot_delta_gradient <=
+        kBFGSSecantConditionHessianUpdateTolerance) {
       VLOG(2) << "Skipping BFGS Update, delta_x_dot_delta_gradient too "
-              << "small: " << delta_x_dot_delta_gradient;
+              << "small: " << delta_x_dot_delta_gradient << ", tolerance: "
+              << kBFGSSecantConditionHessianUpdateTolerance
+              << " (Secant condition).";
     } else {
       // Update dense inverse Hessian approximation.
 
@@ -214,8 +250,13 @@
         //     Part II: Implementation and experiments, Management Science,
         //     20(5), 863-874, 1974.
         // [2] Nocedal J., Wright S., Numerical Optimization, Springer, 1999.
-        inverse_hessian_ *=
+        const double approximate_eigenvalue_scale =
             delta_x_dot_delta_gradient / delta_gradient.dot(delta_gradient);
+        inverse_hessian_ *= approximate_eigenvalue_scale;
+
+        VLOG(4) << "Applying approximate_eigenvalue_scale: "
+                << approximate_eigenvalue_scale << " to initial inverse "
+                << "Hessian approximation.";
       }
       initialized_ = true;
 
@@ -329,5 +370,3 @@
 
 }  // namespace internal
 }  // namespace ceres
-
-#endif  // CERES_NO_LINE_SEARCH_MINIMIZER
diff --git a/internal/ceres/line_search_direction.h b/internal/ceres/line_search_direction.h
index 0857cb0..c77fdc8 100644
--- a/internal/ceres/line_search_direction.h
+++ b/internal/ceres/line_search_direction.h
@@ -31,8 +31,6 @@
 #ifndef CERES_INTERNAL_LINE_SEARCH_DIRECTION_H_
 #define CERES_INTERNAL_LINE_SEARCH_DIRECTION_H_
 
-#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
-
 #include "ceres/internal/eigen.h"
 #include "ceres/line_search_minimizer.h"
 #include "ceres/types.h"
@@ -71,5 +69,4 @@
 }  // namespace internal
 }  // namespace ceres
 
-#endif  // CERES_NO_LINE_SEARCH_MINIMIZER
 #endif  // CERES_INTERNAL_LINE_SEARCH_DIRECTION_H_
diff --git a/internal/ceres/line_search_minimizer.cc b/internal/ceres/line_search_minimizer.cc
index 2cc89fa..ae77a73 100644
--- a/internal/ceres/line_search_minimizer.cc
+++ b/internal/ceres/line_search_minimizer.cc
@@ -38,8 +38,6 @@
 // For details on the theory and implementation see "Numerical
 // Optimization" by Nocedal & Wright.
 
-#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
-
 #include "ceres/line_search_minimizer.h"
 
 #include <algorithm>
@@ -64,25 +62,36 @@
 namespace ceres {
 namespace internal {
 namespace {
-// Small constant for various floating point issues.
-// TODO(sameeragarwal): Change to a better name if this has only one
-// use.
-const double kEpsilon = 1e-12;
 
+// TODO(sameeragarwal): I think there is a small bug here, in that if
+// the evaluation fails, then the state can contain garbage. Look at
+// this more carefully.
 bool Evaluate(Evaluator* evaluator,
               const Vector& x,
-              LineSearchMinimizer::State* state) {
-  const bool status = evaluator->Evaluate(x.data(),
-                                          &(state->cost),
-                                          NULL,
-                                          state->gradient.data(),
-                                          NULL);
-  if (status) {
-    state->gradient_squared_norm = state->gradient.squaredNorm();
-    state->gradient_max_norm = state->gradient.lpNorm<Eigen::Infinity>();
+              LineSearchMinimizer::State* state,
+              string* message) {
+  if (!evaluator->Evaluate(x.data(),
+                           &(state->cost),
+                           NULL,
+                           state->gradient.data(),
+                           NULL)) {
+    *message = "Gradient evaluation failed.";
+    return false;
   }
 
-  return status;
+  Vector negative_gradient = -state->gradient;
+  Vector projected_gradient_step(x.size());
+  if (!evaluator->Plus(x.data(),
+                       negative_gradient.data(),
+                       projected_gradient_step.data())) {
+    *message = "projected_gradient_step = Plus(x, -gradient) failed.";
+    return false;
+  }
+
+  state->gradient_squared_norm = (x - projected_gradient_step).squaredNorm();
+  state->gradient_max_norm =
+      (x - projected_gradient_step).lpNorm<Eigen::Infinity>();
+  return true;
 }
 
 }  // namespace
@@ -90,6 +99,7 @@
 void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
                                    double* parameters,
                                    Solver::Summary* summary) {
+  const bool is_not_silent = !options.is_silent;
   double start_time = WallTimeInSeconds();
   double iteration_start_time =  start_time;
 
@@ -115,14 +125,17 @@
   iteration_summary.step_is_successful = false;
   iteration_summary.cost_change = 0.0;
   iteration_summary.gradient_max_norm = 0.0;
+  iteration_summary.gradient_norm = 0.0;
   iteration_summary.step_norm = 0.0;
   iteration_summary.linear_solver_iterations = 0;
   iteration_summary.step_solver_time_in_seconds = 0;
 
   // Do initial cost and Jacobian evaluation.
-  if (!Evaluate(evaluator, x, &current_state)) {
-    LOG(WARNING) << "Terminating: Cost and gradient evaluation failed.";
-    summary->termination_type = NUMERICAL_FAILURE;
+  if (!Evaluate(evaluator, x, &current_state, &summary->message)) {
+    summary->termination_type = FAILURE;
+    summary->message = "Initial cost and jacobian evaluation failed. "
+        "More details: " + summary->message;
+    LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
     return;
   }
 
@@ -130,20 +143,15 @@
   iteration_summary.cost = current_state.cost + summary->fixed_cost;
 
   iteration_summary.gradient_max_norm = current_state.gradient_max_norm;
+  iteration_summary.gradient_norm = sqrt(current_state.gradient_squared_norm);
 
-  // The initial gradient max_norm is bounded from below so that we do
-  // not divide by zero.
-  const double initial_gradient_max_norm =
-      max(iteration_summary.gradient_max_norm, kEpsilon);
-  const double absolute_gradient_tolerance =
-      options.gradient_tolerance * initial_gradient_max_norm;
-
-  if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) {
-    summary->termination_type = GRADIENT_TOLERANCE;
-    VLOG(1) << "Terminating: Gradient tolerance reached."
-            << "Relative gradient max norm: "
-            << iteration_summary.gradient_max_norm / initial_gradient_max_norm
-            << " <= " << options.gradient_tolerance;
+  if (iteration_summary.gradient_max_norm <= options.gradient_tolerance) {
+    summary->message = StringPrintf("Gradient tolerance reached. "
+                                    "Gradient max norm: %e <= %e",
+                                    iteration_summary.gradient_max_norm,
+                                    options.gradient_tolerance);
+    summary->termination_type = CONVERGENCE;
+    VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
     return;
   }
 
@@ -188,11 +196,10 @@
   scoped_ptr<LineSearch>
       line_search(LineSearch::Create(options.line_search_type,
                                      line_search_options,
-                                     &summary->error));
+                                     &summary->message));
   if (line_search.get() == NULL) {
-    LOG(ERROR) << "Ceres bug: Unable to create a LineSearch object, please "
-               << "contact the developers!, error: " << summary->error;
-    summary->termination_type = DID_NOT_RUN;
+    summary->termination_type = FAILURE;
+    LOG_IF(ERROR, is_not_silent) << "Terminating: " << summary->message;
     return;
   }
 
@@ -200,22 +207,24 @@
   int num_line_search_direction_restarts = 0;
 
   while (true) {
-    if (!RunCallbacks(options.callbacks, iteration_summary, summary)) {
-      return;
+    if (!RunCallbacks(options, iteration_summary, summary)) {
+      break;
     }
 
     iteration_start_time = WallTimeInSeconds();
     if (iteration_summary.iteration >= options.max_num_iterations) {
+      summary->message = "Maximum number of iterations reached.";
       summary->termination_type = NO_CONVERGENCE;
-      VLOG(1) << "Terminating: Maximum number of iterations reached.";
+      VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
       break;
     }
 
     const double total_solver_time = iteration_start_time - start_time +
         summary->preprocessor_time_in_seconds;
     if (total_solver_time >= options.max_solver_time_in_seconds) {
+      summary->message = "Maximum solver time reached.";
       summary->termination_type = NO_CONVERGENCE;
-      VLOG(1) << "Terminating: Maximum solver time reached.";
+      VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
       break;
     }
 
@@ -240,14 +249,13 @@
       // Line search direction failed to generate a new direction, and we
       // have already reached our specified maximum number of restarts,
       // terminate optimization.
-      summary->error =
+      summary->message =
           StringPrintf("Line search direction failure: specified "
                        "max_num_line_search_direction_restarts: %d reached.",
                        options.max_num_line_search_direction_restarts);
-      LOG(WARNING) << summary->error << " terminating optimization.";
-      summary->termination_type = NUMERICAL_FAILURE;
+      summary->termination_type = FAILURE;
+      LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
       break;
-
     } else if (!line_search_status) {
       // Restart line search direction with gradient descent on first iteration
       // as we have not yet reached our maximum number of restarts.
@@ -255,13 +263,16 @@
                options.max_num_line_search_direction_restarts);
 
       ++num_line_search_direction_restarts;
-      LOG(WARNING)
+      LOG_IF(WARNING, is_not_silent)
           << "Line search direction algorithm: "
-          << LineSearchDirectionTypeToString(options.line_search_direction_type)
-          << ", failed to produce a valid new direction at iteration: "
-          << iteration_summary.iteration << ". Restarting, number of "
-          << "restarts: " << num_line_search_direction_restarts << " / "
-          << options.max_num_line_search_direction_restarts << " [max].";
+          << LineSearchDirectionTypeToString(
+              options.line_search_direction_type)
+          << ", failed to produce a valid new direction at "
+          << "iteration: " << iteration_summary.iteration
+          << ". Restarting, number of restarts: "
+          << num_line_search_direction_restarts << " / "
+          << options.max_num_line_search_direction_restarts
+          << " [max].";
       line_search_direction.reset(
           LineSearchDirection::Create(line_search_direction_options));
       current_state.search_direction = -current_state.gradient;
@@ -286,14 +297,14 @@
     // direction in a line search, most likely cause for this being violated
     // would be a numerical failure in the line search direction calculation.
     if (initial_step_size < 0.0) {
-      summary->error =
+      summary->message =
           StringPrintf("Numerical failure in line search, initial_step_size is "
                        "negative: %.5e, directional_derivative: %.5e, "
                        "(current_cost - previous_cost): %.5e",
                        initial_step_size, current_state.directional_derivative,
                        (current_state.cost - previous_state.cost));
-      LOG(WARNING) << summary->error;
-      summary->termination_type = NUMERICAL_FAILURE;
+      summary->termination_type = FAILURE;
+      LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
       break;
     }
 
@@ -301,6 +312,18 @@
                         current_state.cost,
                         current_state.directional_derivative,
                         &line_search_summary);
+    if (!line_search_summary.success) {
+      summary->message =
+          StringPrintf("Numerical failure in line search, failed to find "
+                       "a valid step size, (did not run out of iterations) "
+                       "using initial_step_size: %.5e, initial_cost: %.5e, "
+                       "initial_gradient: %.5e.",
+                       initial_step_size, current_state.cost,
+                       current_state.directional_derivative);
+      LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
+      summary->termination_type = FAILURE;
+      break;
+    }
 
     current_state.step_size = line_search_summary.optimal_step_size;
     delta = current_state.step_size * current_state.search_direction;
@@ -309,36 +332,31 @@
     iteration_summary.step_solver_time_in_seconds =
         WallTimeInSeconds() - iteration_start_time;
 
-    // TODO(sameeragarwal): Collect stats.
-    if (!evaluator->Plus(x.data(), delta.data(), x_plus_delta.data()) ||
-        !Evaluate(evaluator, x_plus_delta, &current_state)) {
-      LOG(WARNING) << "Evaluation failed.";
+    if (!evaluator->Plus(x.data(), delta.data(), x_plus_delta.data())) {
+      summary->termination_type = FAILURE;
+      summary->message =
+          "x_plus_delta = Plus(x, delta) failed. This should not happen "
+          "as the step was valid when it was selected by the line search.";
+      LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
+      break;
+    } else if (!Evaluate(evaluator,
+                         x_plus_delta,
+                         &current_state,
+                         &summary->message)) {
+      summary->termination_type = FAILURE;
+      summary->message =
+          "Step failed to evaluate. This should not happen as the step was "
+          "valid when it was selected by the line search. More details: " +
+          summary->message;
+      LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
+      break;
     } else {
       x = x_plus_delta;
     }
 
     iteration_summary.gradient_max_norm = current_state.gradient_max_norm;
-    if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) {
-      summary->termination_type = GRADIENT_TOLERANCE;
-      VLOG(1) << "Terminating: Gradient tolerance reached."
-              << "Relative gradient max norm: "
-              << iteration_summary.gradient_max_norm / initial_gradient_max_norm
-              << " <= " << options.gradient_tolerance;
-      break;
-    }
-
+    iteration_summary.gradient_norm = sqrt(current_state.gradient_squared_norm);
     iteration_summary.cost_change = previous_state.cost - current_state.cost;
-    const double absolute_function_tolerance =
-        options.function_tolerance * previous_state.cost;
-    if (fabs(iteration_summary.cost_change) < absolute_function_tolerance) {
-      VLOG(1) << "Terminating. Function tolerance reached. "
-              << "|cost_change|/cost: "
-              << fabs(iteration_summary.cost_change) / previous_state.cost
-              << " <= " << options.function_tolerance;
-      summary->termination_type = FUNCTION_TOLERANCE;
-      return;
-    }
-
     iteration_summary.cost = current_state.cost + summary->fixed_cost;
     iteration_summary.step_norm = delta.norm();
     iteration_summary.step_is_valid = true;
@@ -359,10 +377,32 @@
 
     summary->iterations.push_back(iteration_summary);
     ++summary->num_successful_steps;
+
+    if (iteration_summary.gradient_max_norm <= options.gradient_tolerance) {
+      summary->message = StringPrintf("Gradient tolerance reached. "
+                                      "Gradient max norm: %e <= %e",
+                                      iteration_summary.gradient_max_norm,
+                                      options.gradient_tolerance);
+      summary->termination_type = CONVERGENCE;
+      VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
+      break;
+    }
+
+    const double absolute_function_tolerance =
+        options.function_tolerance * previous_state.cost;
+    if (fabs(iteration_summary.cost_change) < absolute_function_tolerance) {
+      summary->message =
+          StringPrintf("Function tolerance reached. "
+                       "|cost_change|/cost: %e <= %e",
+                       fabs(iteration_summary.cost_change) /
+                       previous_state.cost,
+                       options.function_tolerance);
+      summary->termination_type = CONVERGENCE;
+      VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
+      break;
+    }
   }
 }
 
 }  // namespace internal
 }  // namespace ceres
-
-#endif  // CERES_NO_LINE_SEARCH_MINIMIZER
diff --git a/internal/ceres/line_search_minimizer.h b/internal/ceres/line_search_minimizer.h
index 59f5c3f..f82f139 100644
--- a/internal/ceres/line_search_minimizer.h
+++ b/internal/ceres/line_search_minimizer.h
@@ -31,8 +31,6 @@
 #ifndef CERES_INTERNAL_LINE_SEARCH_MINIMIZER_H_
 #define CERES_INTERNAL_LINE_SEARCH_MINIMIZER_H_
 
-#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
-
 #include "ceres/minimizer.h"
 #include "ceres/solver.h"
 #include "ceres/types.h"
@@ -76,5 +74,4 @@
 }  // namespace internal
 }  // namespace ceres
 
-#endif  // CERES_NO_LINE_SEARCH_MINIMIZER
 #endif  // CERES_INTERNAL_LINE_SEARCH_MINIMIZER_H_
diff --git a/internal/ceres/linear_solver.cc b/internal/ceres/linear_solver.cc
index 08c3ba1..e983e2c 100644
--- a/internal/ceres/linear_solver.cc
+++ b/internal/ceres/linear_solver.cc
@@ -45,6 +45,30 @@
 LinearSolver::~LinearSolver() {
 }
 
+LinearSolverType LinearSolver::LinearSolverForZeroEBlocks(
+    LinearSolverType linear_solver_type) {
+  if (!IsSchurType(linear_solver_type)) {
+    return linear_solver_type;
+  }
+
+  if (linear_solver_type == SPARSE_SCHUR) {
+    return SPARSE_NORMAL_CHOLESKY;
+  }
+
+  if (linear_solver_type == DENSE_SCHUR) {
+    // TODO(sameeragarwal): This is probably not a great choice.
+    // Ideally, we should have a DENSE_NORMAL_CHOLESKY, that can take
+    // a BlockSparseMatrix as input.
+    return DENSE_QR;
+  }
+
+  if (linear_solver_type == ITERATIVE_SCHUR) {
+    return CGNR;
+  }
+
+  return linear_solver_type;
+}
+
 LinearSolver* LinearSolver::Create(const LinearSolver::Options& options) {
   switch (options.type) {
     case CGNR:
@@ -52,9 +76,6 @@
 
     case SPARSE_NORMAL_CHOLESKY:
 #if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
-      LOG(WARNING) << "SPARSE_NORMAL_CHOLESKY is not available. Please "
-                   << "build Ceres with SuiteSparse or CXSparse. "
-                   << "Returning NULL.";
       return NULL;
 #else
       return new SparseNormalCholeskySolver(options);
@@ -62,9 +83,6 @@
 
     case SPARSE_SCHUR:
 #if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
-      LOG(WARNING) << "SPARSE_SCHUR is not available. Please "
-                   << "build Ceres with SuiteSparse or CXSparse. "
-                   << "Returning NULL.";
       return NULL;
 #else
       return new SparseSchurComplementSolver(options);
diff --git a/internal/ceres/linear_solver.h b/internal/ceres/linear_solver.h
index 22691b3..58b9044 100644
--- a/internal/ceres/linear_solver.h
+++ b/internal/ceres/linear_solver.h
@@ -50,6 +50,26 @@
 namespace ceres {
 namespace internal {
 
+enum LinearSolverTerminationType {
+  // Termination criterion was met.
+  LINEAR_SOLVER_SUCCESS,
+
+  // Solver ran for max_num_iterations and terminated before the
+  // termination tolerance could be satisfied.
+  LINEAR_SOLVER_NO_CONVERGENCE,
+
+  // Solver was terminated due to numerical problems, generally due to
+  // the linear system being poorly conditioned.
+  LINEAR_SOLVER_FAILURE,
+
+  // Solver failed with a fatal error that cannot be recovered from,
+  // e.g. CHOLMOD ran out of memory when computing the symbolic or
+  // numeric factorization or an underlying library was called with
+  // the wrong arguments.
+  LINEAR_SOLVER_FATAL_ERROR
+};
+
+
 class LinearOperator;
 
 // Abstract base class for objects that implement algorithms for
@@ -74,9 +94,11 @@
     Options()
         : type(SPARSE_NORMAL_CHOLESKY),
           preconditioner_type(JACOBI),
+          visibility_clustering_type(CANONICAL_VIEWS),
           dense_linear_algebra_library_type(EIGEN),
           sparse_linear_algebra_library_type(SUITE_SPARSE),
           use_postordering(false),
+          dynamic_sparsity(false),
           min_num_iterations(1),
           max_num_iterations(1),
           num_threads(1),
@@ -87,14 +109,14 @@
     }
 
     LinearSolverType type;
-
     PreconditionerType preconditioner_type;
-
+    VisibilityClusteringType visibility_clustering_type;
     DenseLinearAlgebraLibraryType dense_linear_algebra_library_type;
     SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type;
 
     // See solver.h for information about this flag.
     bool use_postordering;
+    bool dynamic_sparsity;
 
     // Number of internal iterations that the solver uses. This
     // parameter only makes sense for iterative solvers like CG.
@@ -243,14 +265,23 @@
     Summary()
         : residual_norm(0.0),
           num_iterations(-1),
-          termination_type(FAILURE) {
+          termination_type(LINEAR_SOLVER_FAILURE) {
     }
 
     double residual_norm;
     int num_iterations;
     LinearSolverTerminationType termination_type;
+    string message;
   };
 
+  // If the optimization problem is such that there are no remaining
+  // e-blocks, a Schur type linear solver cannot be used. If the
+  // linear solver is of Schur type, this function implements a policy
+  // to select an alternate nearest linear solver to the one selected
+  // by the user. The input linear_solver_type is returned otherwise.
+  static LinearSolverType LinearSolverForZeroEBlocks(
+      LinearSolverType linear_solver_type);
+
   virtual ~LinearSolver();
 
   // Solve Ax = b.
diff --git a/internal/ceres/loss_function.cc b/internal/ceres/loss_function.cc
index b948f28..4ad01e3 100644
--- a/internal/ceres/loss_function.cc
+++ b/internal/ceres/loss_function.cc
@@ -39,8 +39,8 @@
 
 void TrivialLoss::Evaluate(double s, double rho[3]) const {
   rho[0] = s;
-  rho[1] = 1;
-  rho[2] = 0;
+  rho[1] = 1.0;
+  rho[2] = 0.0;
 }
 
 void HuberLoss::Evaluate(double s, double rho[3]) const {
@@ -48,32 +48,32 @@
     // Outlier region.
     // 'r' is always positive.
     const double r = sqrt(s);
-    rho[0] = 2 * a_ * r - b_;
-    rho[1] = a_ / r;
-    rho[2] = - rho[1] / (2 * s);
+    rho[0] = 2.0 * a_ * r - b_;
+    rho[1] = std::max(std::numeric_limits<double>::min(), a_ / r);
+    rho[2] = - rho[1] / (2.0 * s);
   } else {
     // Inlier region.
     rho[0] = s;
-    rho[1] = 1;
-    rho[2] = 0;
+    rho[1] = 1.0;
+    rho[2] = 0.0;
   }
 }
 
 void SoftLOneLoss::Evaluate(double s, double rho[3]) const {
-  const double sum = 1 + s * c_;
+  const double sum = 1.0 + s * c_;
   const double tmp = sqrt(sum);
   // 'sum' and 'tmp' are always positive, assuming that 's' is.
-  rho[0] = 2 * b_ * (tmp - 1);
-  rho[1] = 1 / tmp;
-  rho[2] = - (c_ * rho[1]) / (2 * sum);
+  rho[0] = 2.0 * b_ * (tmp - 1.0);
+  rho[1] = std::max(std::numeric_limits<double>::min(), 1.0 / tmp);
+  rho[2] = - (c_ * rho[1]) / (2.0 * sum);
 }
 
 void CauchyLoss::Evaluate(double s, double rho[3]) const {
-  const double sum = 1 + s * c_;
-  const double inv = 1 / sum;
+  const double sum = 1.0 + s * c_;
+  const double inv = 1.0 / sum;
   // 'sum' and 'inv' are always positive, assuming that 's' is.
   rho[0] = b_ * log(sum);
-  rho[1] = inv;
+  rho[1] = std::max(std::numeric_limits<double>::min(), inv);
   rho[2] = - c_ * (inv * inv);
 }
 
@@ -82,8 +82,8 @@
   const double inv = 1 / sum;
   // 'sum' and 'inv' are always positive.
   rho[0] = a_ * atan2(s, a_);
-  rho[1] = inv;
-  rho[2] = -2 * s * b_ * (inv * inv);
+  rho[1] = std::max(std::numeric_limits<double>::min(), inv);
+  rho[2] = -2.0 * s * b_ * (inv * inv);
 }
 
 TolerantLoss::TolerantLoss(double a, double b)
@@ -108,7 +108,7 @@
   } else {
     const double e_x = exp(x);
     rho[0] = b_ * log(1.0 + e_x) - c_;
-    rho[1] = e_x / (1.0 + e_x);
+    rho[1] = std::max(std::numeric_limits<double>::min(), e_x / (1.0 + e_x));
     rho[2] = 0.5 / (b_ * (1.0 + cosh(x)));
   }
 }
diff --git a/internal/ceres/low_rank_inverse_hessian.cc b/internal/ceres/low_rank_inverse_hessian.cc
index 372165f..4816e3c 100644
--- a/internal/ceres/low_rank_inverse_hessian.cc
+++ b/internal/ceres/low_rank_inverse_hessian.cc
@@ -28,6 +28,8 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
+#include <list>
+
 #include "ceres/internal/eigen.h"
 #include "ceres/low_rank_inverse_hessian.h"
 #include "glog/logging.h"
@@ -35,6 +37,41 @@
 namespace ceres {
 namespace internal {
 
+// The (L)BFGS algorithm explicitly requires that the secant equation:
+//
+//   B_{k+1} * s_k = y_k
+//
+// Is satisfied at each iteration, where B_{k+1} is the approximated
+// Hessian at the k+1-th iteration, s_k = (x_{k+1} - x_{k}) and
+// y_k = (grad_{k+1} - grad_{k}). As the approximated Hessian must be
+// positive definite, this is equivalent to the condition:
+//
+//   s_k^T * y_k > 0     [s_k^T * B_{k+1} * s_k = s_k^T * y_k > 0]
+//
+// This condition would always be satisfied if the function was strictly
+// convex, alternatively, it is always satisfied provided that a Wolfe line
+// search is used (even if the function is not strictly convex).  See [1]
+// (p138) for a proof.
+//
+// Although Ceres will always use a Wolfe line search when using (L)BFGS,
+// practical implementation considerations mean that the line search
+// may return a point that satisfies only the Armijo condition, and thus
+// could violate the Secant equation.  As such, we will only use a step
+// to update the Hessian approximation if:
+//
+//   s_k^T * y_k > tolerance
+//
+// It is important that tolerance is very small (and >=0), as otherwise we
+// might skip the update too often and fail to capture important curvature
+// information in the Hessian.  For example going from 1e-10 -> 1e-14 improves
+// the NIST benchmark score from 43/54 to 53/54.
+//
+// [1] Nocedal J., Wright S., Numerical Optimization, 2nd Ed. Springer, 1999.
+//
+// TODO(alexs.mac): Consider using Damped BFGS update instead of
+// skipping update.
+const double kLBFGSSecantConditionHessianUpdateTolerance = 1e-14;
+
 LowRankInverseHessian::LowRankInverseHessian(
     int num_parameters,
     int max_num_corrections,
@@ -42,7 +79,6 @@
     : num_parameters_(num_parameters),
       max_num_corrections_(max_num_corrections),
       use_approximate_eigenvalue_scaling_(use_approximate_eigenvalue_scaling),
-      num_corrections_(0),
       approximate_eigenvalue_scale_(1.0),
       delta_x_history_(num_parameters, max_num_corrections),
       delta_gradient_history_(num_parameters, max_num_corrections),
@@ -52,35 +88,29 @@
 bool LowRankInverseHessian::Update(const Vector& delta_x,
                                    const Vector& delta_gradient) {
   const double delta_x_dot_delta_gradient = delta_x.dot(delta_gradient);
-  if (delta_x_dot_delta_gradient <= 1e-10) {
-    VLOG(2) << "Skipping LBFGS Update, delta_x_dot_delta_gradient too small: "
-            << delta_x_dot_delta_gradient;
+  if (delta_x_dot_delta_gradient <=
+      kLBFGSSecantConditionHessianUpdateTolerance) {
+    VLOG(2) << "Skipping L-BFGS Update, delta_x_dot_delta_gradient too "
+            << "small: " << delta_x_dot_delta_gradient << ", tolerance: "
+            << kLBFGSSecantConditionHessianUpdateTolerance
+            << " (Secant condition).";
     return false;
   }
 
-  if (num_corrections_ == max_num_corrections_) {
-    // TODO(sameeragarwal): This can be done more efficiently using
-    // a circular buffer/indexing scheme, but for simplicity we will
-    // do the expensive copy for now.
-    delta_x_history_.block(0, 0, num_parameters_, max_num_corrections_ - 1) =
-        delta_x_history_
-        .block(0, 1, num_parameters_, max_num_corrections_ - 1);
 
-    delta_gradient_history_
-        .block(0, 0, num_parameters_, max_num_corrections_ - 1) =
-        delta_gradient_history_
-        .block(0, 1, num_parameters_, max_num_corrections_ - 1);
-
-    delta_x_dot_delta_gradient_.head(num_corrections_ - 1) =
-        delta_x_dot_delta_gradient_.tail(num_corrections_ - 1);
-  } else {
-    ++num_corrections_;
+  int next = indices_.size();
+  // Once the size of the list reaches max_num_corrections_, simulate
+  // a circular buffer by removing the first element of the list and
+  // making it the next position where the LBFGS history is stored.
+  if (next == max_num_corrections_) {
+    next = indices_.front();
+    indices_.pop_front();
   }
 
-  delta_x_history_.col(num_corrections_ - 1) = delta_x;
-  delta_gradient_history_.col(num_corrections_ - 1) = delta_gradient;
-  delta_x_dot_delta_gradient_(num_corrections_ - 1) =
-      delta_x_dot_delta_gradient;
+  indices_.push_back(next);
+  delta_x_history_.col(next) = delta_x;
+  delta_gradient_history_.col(next) = delta_gradient;
+  delta_x_dot_delta_gradient_(next) = delta_x_dot_delta_gradient;
   approximate_eigenvalue_scale_ =
       delta_x_dot_delta_gradient / delta_gradient.squaredNorm();
   return true;
@@ -93,12 +123,16 @@
 
   search_direction = gradient;
 
-  Vector alpha(num_corrections_);
+  const int num_corrections = indices_.size();
+  Vector alpha(num_corrections);
 
-  for (int i = num_corrections_ - 1; i >= 0; --i) {
-    alpha(i) = delta_x_history_.col(i).dot(search_direction) /
-        delta_x_dot_delta_gradient_(i);
-    search_direction -= alpha(i) * delta_gradient_history_.col(i);
+  for (std::list<int>::const_reverse_iterator it = indices_.rbegin();
+       it != indices_.rend();
+       ++it) {
+    const double alpha_i = delta_x_history_.col(*it).dot(search_direction) /
+        delta_x_dot_delta_gradient_(*it);
+    search_direction -= alpha_i * delta_gradient_history_.col(*it);
+    alpha(*it) = alpha_i;
   }
 
   if (use_approximate_eigenvalue_scaling_) {
@@ -133,12 +167,18 @@
     //     20(5), 863-874, 1974.
     // [2] Nocedal J., Wright S., Numerical Optimization, Springer, 1999.
     search_direction *= approximate_eigenvalue_scale_;
+
+    VLOG(4) << "Applying approximate_eigenvalue_scale: "
+            << approximate_eigenvalue_scale_ << " to initial inverse Hessian "
+            << "approximation.";
   }
 
-  for (int i = 0; i < num_corrections_; ++i) {
-    const double beta = delta_gradient_history_.col(i).dot(search_direction) /
-        delta_x_dot_delta_gradient_(i);
-    search_direction += delta_x_history_.col(i) * (alpha(i) - beta);
+  for (std::list<int>::const_iterator it = indices_.begin();
+       it != indices_.end();
+       ++it) {
+    const double beta = delta_gradient_history_.col(*it).dot(search_direction) /
+        delta_x_dot_delta_gradient_(*it);
+    search_direction += delta_x_history_.col(*it) * (alpha(*it) - beta);
   }
 }
 
diff --git a/internal/ceres/low_rank_inverse_hessian.h b/internal/ceres/low_rank_inverse_hessian.h
index 7d293d0..19ab760 100644
--- a/internal/ceres/low_rank_inverse_hessian.h
+++ b/internal/ceres/low_rank_inverse_hessian.h
@@ -34,6 +34,8 @@
 #ifndef CERES_INTERNAL_LOW_RANK_INVERSE_HESSIAN_H_
 #define CERES_INTERNAL_LOW_RANK_INVERSE_HESSIAN_H_
 
+#include <list>
+
 #include "ceres/internal/eigen.h"
 #include "ceres/linear_operator.h"
 
@@ -93,11 +95,11 @@
   const int num_parameters_;
   const int max_num_corrections_;
   const bool use_approximate_eigenvalue_scaling_;
-  int num_corrections_;
   double approximate_eigenvalue_scale_;
-  Matrix delta_x_history_;
-  Matrix delta_gradient_history_;
+  ColMajorMatrix delta_x_history_;
+  ColMajorMatrix delta_gradient_history_;
   Vector delta_x_dot_delta_gradient_;
+  std::list<int> indices_;
 };
 
 }  // namespace internal
diff --git a/internal/ceres/miniglog/glog/logging.cc b/internal/ceres/miniglog/glog/logging.cc
new file mode 100644
index 0000000..32a78ce
--- /dev/null
+++ b/internal/ceres/miniglog/glog/logging.cc
@@ -0,0 +1,39 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "glog/logging.h"
+
+namespace google {
+
+// This is the set of log sinks. This must be in a separate library to ensure
+// that there is only one instance of this across the entire program.
+std::set<google::LogSink *> log_sinks_global;
+
+}  // namespace ceres
diff --git a/internal/ceres/miniglog/glog/logging.h b/internal/ceres/miniglog/glog/logging.h
index bab3191..e9c0dff 100644
--- a/internal/ceres/miniglog/glog/logging.h
+++ b/internal/ceres/miniglog/glog/logging.h
@@ -1,83 +1,114 @@
-// Copyright 2011 Google Inc. All Rights Reserved.
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
 // Author: settinger@google.com (Scott Ettinger)
-
-// Simplified Google3 style logging with Android support.
-// Supported macros are : LOG(INFO), LOG(WARNING), LOG(ERROR), LOG(FATAL),
-//                        and VLOG(n).
+//         mierle@gmail.com (Keir Mierle)
 //
-// Portions of this code are taken from the GLOG package.  This code
-// is only a small subset of the GLOG functionality. And like GLOG,
-// higher levels are more verbose.
+// Simplified Glog style logging with Android support. Supported macros in
+// decreasing severity level per line:
 //
-// Notable differences from GLOG :
+//   VLOG(2), VLOG(N)
+//   VLOG(1),
+//   LOG(INFO), VLOG(0), LG
+//   LOG(WARNING),
+//   LOG(ERROR),
+//   LOG(FATAL),
 //
-// 1. lack of support for displaying unprintable characters and lack
-// of stack trace information upon failure of the CHECK macros.
-// 2. All output is tagged with the string "native".
-// 3. While there is no runtime flag filtering logs (-v, -vmodule), the
-//    compile time define MAX_LOG_LEVEL can be used to silence any
-//    logging above the given level.
+// With VLOG(n), the output is directed to one of the 5 Android log levels:
 //
-// -------------------------------- Usage ------------------------------------
-// Basic usage :
-// LOG(<severity level>) acts as a c++ stream to the Android logcat output.
-// e.g. LOG(INFO) << "Value of counter = " << counter;
+//   2 - Verbose
+//   1 - Debug
+//   0 - Info
+//  -1 - Warning
+//  -2 - Error
+//  -3 - Fatal
 //
-// Valid severity levels include INFO, WARNING, ERROR, FATAL.
-// The various severity levels are routed to the corresponding Android logcat
-// output.
-// LOG(FATAL) outputs to the log and then terminates.
+// Any logging of level 2 and above is directed to the Verbose level. All
+// Android log output is tagged with the string "native".
 //
-// VLOG(<severity level>) can also be used.
-// VLOG(n) output is directed to the Android logcat levels as follows :
-//  >=2 - Verbose
-//    1 - Debug
-//    0 - Info
-//   -1 - Warning
-//   -2 - Error
-// <=-3 - Fatal
-// Note that VLOG(FATAL) will terminate the program.
+// If the symbol ANDROID is not defined, all output goes to std::cerr.
+// This allows code to be built on a different system for debug.
 //
-// CHECK macros are defined to test for conditions within code.  Any CHECK
-// that fails will log the failure and terminate the application.
+// Portions of this code are taken from the GLOG package.  This code is only a
+// small subset of the GLOG functionality. Notable differences from GLOG
+// behavior include lack of support for displaying unprintable characters and
+// lack of stack trace information upon failure of the CHECK macros.  On
+// non-Android systems, log output goes to std::cerr and is not written to a
+// file.
+//
+// CHECK macros are defined to test for conditions within code.  Any CHECK that
+// fails will log the failure and terminate the application.
 // e.g. CHECK_GE(3, 2) will pass while CHECK_GE(3, 4) will fail after logging
 //      "Check failed 3 >= 4".
-// The following CHECK macros are defined :
 //
-// CHECK(condition) - fails if condition is false and logs condition.
-// CHECK_NOTNULL(variable) - fails if the variable is NULL.
+// The following CHECK macros are defined:
+//
+//   CHECK(condition)        - fails if condition is false and logs condition.
+//   CHECK_NOTNULL(variable) - fails if the variable is NULL.
 //
 // The following binary check macros are also defined :
-//    Macro                 operator applied
-// ------------------------------------------
-// CHECK_EQ(val1, val2)      val1 == val2
-// CHECK_NE(val1, val2)      val1 != val2
-// CHECK_GT(val1, val2)      val1 > val2
-// CHECK_GE(val1, val2)      val1 >= val2
-// CHECK_LT(val1, val2)      val1 < val2
-// CHECK_LE(val1, val2)      val1 <= val2
+//
+//   Macro                     Operator equivalent
+//   --------------------      -------------------
+//   CHECK_EQ(val1, val2)      val1 == val2
+//   CHECK_NE(val1, val2)      val1 != val2
+//   CHECK_GT(val1, val2)      val1 > val2
+//   CHECK_GE(val1, val2)      val1 >= val2
+//   CHECK_LT(val1, val2)      val1 < val2
+//   CHECK_LE(val1, val2)      val1 <= val2
 //
 // Debug only versions of all of the check macros are also defined.  These
 // macros generate no code in a release build, but avoid unused variable
 // warnings / errors.
-// To use the debug only versions, Prepend a D to the normal check macros.
-// e.g. DCHECK_EQ(a, b);
+//
+// To use the debug only versions, prepend a D to the normal check macros, e.g.
+// DCHECK_EQ(a, b).
 
-#ifndef MOBILE_BASE_LOGGING_H_
-#define MOBILE_BASE_LOGGING_H_
+#ifndef CERCES_INTERNAL_MINIGLOG_GLOG_LOGGING_H_
+#define CERCES_INTERNAL_MINIGLOG_GLOG_LOGGING_H_
 
-// Definitions for building on an Android system.
-#include <android/log.h>
-#include <time.h>
+#ifdef ANDROID
+#  include <android/log.h>
+#endif  // ANDROID
 
 #include <algorithm>
-#include <iostream>
-#include <string>
+#include <ctime>
 #include <fstream>
+#include <iostream>
 #include <set>
 #include <sstream>
+#include <string>
 #include <vector>
 
+// For appropriate definition of CERES_EXPORT macro.
+#include "ceres/internal/port.h"
+#include "ceres/internal/disable_warnings.h"
+
 // Log severity level constants.
 const int FATAL   = -3;
 const int ERROR   = -2;
@@ -94,26 +125,29 @@
 const int ERROR   = ::ERROR;
 const int FATAL   = ::FATAL;
 
-#ifdef ENABLE_LOG_SINKS
-
-// Sink class used for integration with mock and test functions.
-// If sinks are added, all log output is also sent to each sink through
-// the send function.  In this implementation, WaitTillSent() is called
-// immediately after the send.
+// Sink class used for integration with mock and test functions. If sinks are
+// added, all log output is also sent to each sink through the send function.
+// In this implementation, WaitTillSent() is called immediately after the send.
 // This implementation is not thread safe.
-class LogSink {
+class CERES_EXPORT LogSink {
  public:
   virtual ~LogSink() {}
-  virtual void send(LogSeverity severity, const char* full_filename,
-                    const char* base_filename, int line,
+  virtual void send(LogSeverity severity,
+                    const char* full_filename,
+                    const char* base_filename,
+                    int line,
                     const struct tm* tm_time,
-                    const char* message, size_t message_len) = 0;
+                    const char* message,
+                    size_t message_len) = 0;
   virtual void WaitTillSent() = 0;
 };
 
-// Global set of log sinks.
-// TODO(settinger): Move this into a .cc file.
-static std::set<LogSink *> log_sinks_global;
+// Global set of log sinks. The actual object is defined in logging.cc.
+extern CERES_EXPORT std::set<LogSink *> log_sinks_global;
+
+inline void InitGoogleLogging(char *argv) {
+  // Do nothing; this is ignored.
+}
 
 // Note: the Log sink functions are not thread safe.
 inline void AddLogSink(LogSink *sink) {
@@ -124,20 +158,17 @@
   log_sinks_global.erase(sink);
 }
 
-#endif  // #ifdef ENABLE_LOG_SINKS
-
-inline void InitGoogleLogging(char *argv) {}
-
 }  // namespace google
 
 // ---------------------------- Logger Class --------------------------------
 
 // Class created for each use of the logging macros.
 // The logger acts as a stream and routes the final stream contents to the
-// Android logcat output at the proper filter level.  This class should not
+// Android logcat output at the proper filter level.  If ANDROID is not
+// defined, output is directed to std::cerr.  This class should not
 // be directly instantiated in code, rather it should be invoked through the
-// use of the log macros LOG, or VLOG.
-class MessageLogger {
+// use of the log macros LG, LOG, or VLOG.
+class CERES_EXPORT MessageLogger {
  public:
   MessageLogger(const char *file, int line, const char *tag, int severity)
     : file_(file), line_(line), tag_(tag), severity_(severity) {
@@ -148,17 +179,14 @@
 
   // Output the contents of the stream to the proper channel on destruction.
   ~MessageLogger() {
-#ifdef MAX_LOG_LEVEL
-    if (severity_ > MAX_LOG_LEVEL && severity_ > FATAL) {
-      return;
-    }
-#endif
     stream_ << "\n";
+
+#ifdef ANDROID
     static const int android_log_levels[] = {
         ANDROID_LOG_FATAL,    // LOG(FATAL)
         ANDROID_LOG_ERROR,    // LOG(ERROR)
         ANDROID_LOG_WARN,     // LOG(WARNING)
-        ANDROID_LOG_INFO,     // LOG(INFO), VLOG(0)
+        ANDROID_LOG_INFO,     // LOG(INFO), LG, VLOG(0)
         ANDROID_LOG_DEBUG,    // VLOG(1)
         ANDROID_LOG_VERBOSE,  // VLOG(2) .. VLOG(N)
     };
@@ -178,14 +206,14 @@
                           tag_.c_str(),
                           "terminating.\n");
     }
-
-#ifdef ENABLE_LOG_SINKS
+#else
+    // If not building on Android, log all output to std::cerr.
+    std::cerr << stream_.str();
+#endif  // ANDROID
 
     LogToSinks(severity_);
     WaitForSinks();
 
-#endif  // #ifdef ENABLE_LOG_SINKS
-
     // Android logging at level FATAL does not terminate execution, so abort()
     // is still required to stop the program.
     if (severity_ == FATAL) {
@@ -197,41 +225,49 @@
   std::stringstream &stream() { return stream_; }
 
  private:
-#ifdef ENABLE_LOG_SINKS
-
   void LogToSinks(int severity) {
     time_t rawtime;
-    struct tm * timeinfo;
+    time (&rawtime);
 
-    time ( &rawtime );
-    timeinfo = localtime ( &rawtime );
-    std::set<google::LogSink *>::iterator iter;
+    struct tm* timeinfo;
+#if defined(WIN32) || defined(_WIN32) || defined(__WIN32__)
+    // On Windows, use secure localtime_s not localtime.
+    struct tm windows_timeinfo;
+    timeinfo = &windows_timeinfo;
+    localtime_s(timeinfo, &rawtime);
+#else
+    timeinfo = localtime(&rawtime);
+#endif
+
+    std::set<google::LogSink*>::iterator iter;
     // Send the log message to all sinks.
     for (iter = google::log_sinks_global.begin();
-         iter != google::log_sinks_global.end(); ++iter)
+         iter != google::log_sinks_global.end(); ++iter) {
       (*iter)->send(severity, file_.c_str(), filename_only_.c_str(), line_,
                     timeinfo, stream_.str().c_str(), stream_.str().size());
+    }
   }
 
   void WaitForSinks() {
-    // TODO(settinger): add locks for thread safety.
+    // TODO(settinger): Add locks for thread safety.
     std::set<google::LogSink *>::iterator iter;
+
     // Call WaitTillSent() for all sinks.
     for (iter = google::log_sinks_global.begin();
-         iter != google::log_sinks_global.end(); ++iter)
+         iter != google::log_sinks_global.end(); ++iter) {
       (*iter)->WaitTillSent();
+    }
   }
 
-#endif // #ifdef ENABLE_LOG_SINKS
-
   void StripBasename(const std::string &full_path, std::string *filename) {
-    // TODO(settinger): add support for OS with different path separators.
+    // TODO(settinger): Add support for OSs with different path separators.
     const char kSeparator = '/';
     size_t pos = full_path.rfind(kSeparator);
-    if (pos != std::string::npos)
+    if (pos != std::string::npos) {
       *filename = full_path.substr(pos + 1, std::string::npos);
-    else
+    } else {
       *filename = full_path;
+    }
   }
 
   std::string file_;
@@ -247,7 +283,7 @@
 // This class is used to explicitly ignore values in the conditional
 // logging macros.  This avoids compiler warnings like "value computed
 // is not used" and "statement has no effect".
-class LoggerVoidify {
+class CERES_EXPORT LoggerVoidify {
  public:
   LoggerVoidify() { }
   // This has to be an operator with a precedence lower than << but
@@ -257,8 +293,8 @@
 
 // Log only if condition is met.  Otherwise evaluates to void.
 #define LOG_IF(severity, condition) \
-  !(condition) ? (void) 0 : LoggerVoidify() & \
-    MessageLogger((char *)__FILE__, __LINE__, "native", severity).stream()
+    !(condition) ? (void) 0 : LoggerVoidify() & \
+      MessageLogger((char *)__FILE__, __LINE__, "native", severity).stream()
 
 // Log only if condition is NOT met.  Otherwise evaluates to void.
 #define LOG_IF_FALSE(severity, condition) LOG_IF(severity, !(condition))
@@ -267,30 +303,31 @@
 // google3 code is discouraged and the following shortcut exists for
 // backward compatibility with existing code.
 #ifdef MAX_LOG_LEVEL
-#define LOG(n) LOG_IF(n, n <= MAX_LOG_LEVEL)
-#define VLOG(n) LOG_IF(n, n <= MAX_LOG_LEVEL)
-#define LG LOG_IF(INFO, INFO <= MAX_LOG_LEVEL)
+#  define LOG(n)  LOG_IF(n, n <= MAX_LOG_LEVEL)
+#  define VLOG(n) LOG_IF(n, n <= MAX_LOG_LEVEL)
+#  define LG      LOG_IF(INFO, INFO <= MAX_LOG_LEVEL)
+#  define VLOG_IF(n, condition) LOG_IF(n, (n <= MAX_LOG_LEVEL) && condition)
 #else
-#define LOG(n) MessageLogger((char *)__FILE__, __LINE__, "native", n).stream()
-#define VLOG(n) MessageLogger((char *)__FILE__, __LINE__, "native", n).stream()
-#define LG MessageLogger((char *)__FILE__, __LINE__, "native", INFO).stream()
+#  define LOG(n)  MessageLogger((char *)__FILE__, __LINE__, "native", n).stream()    // NOLINT
+#  define VLOG(n) MessageLogger((char *)__FILE__, __LINE__, "native", n).stream()    // NOLINT
+#  define LG      MessageLogger((char *)__FILE__, __LINE__, "native", INFO).stream() // NOLINT
+#  define VLOG_IF(n, condition) LOG_IF(n, condition)
 #endif
 
 // Currently, VLOG is always on for levels below MAX_LOG_LEVEL.
 #ifndef MAX_LOG_LEVEL
-#define VLOG_IS_ON(x) (1)
+#  define VLOG_IS_ON(x) (1)
 #else
-#define VLOG_IS_ON(x) (x <= MAX_LOG_LEVEL)
+#  define VLOG_IS_ON(x) (x <= MAX_LOG_LEVEL)
 #endif
 
 #ifndef NDEBUG
-#define DLOG LOG
+#  define DLOG LOG
 #else
-#define DLOG(severity) true ? (void) 0 : LoggerVoidify() & \
-    MessageLogger((char *)__FILE__, __LINE__, "native", severity).stream()
+#  define DLOG(severity) true ? (void) 0 : LoggerVoidify() & \
+      MessageLogger((char *)__FILE__, __LINE__, "native", severity).stream()
 #endif
 
-// ---------------------------- CHECK helpers --------------------------------
 
 // Log a message and terminate.
 template<class T>
@@ -307,19 +344,19 @@
 
 #ifndef NDEBUG
 // Debug only version of CHECK
-#define DCHECK(condition) LOG_IF_FALSE(FATAL, condition) \
-        << "Check failed: " #condition " "
+#  define DCHECK(condition) LOG_IF_FALSE(FATAL, condition) \
+          << "Check failed: " #condition " "
 #else
 // Optimized version - generates no code.
-#define DCHECK(condition) if (false) LOG_IF_FALSE(FATAL, condition) \
-        << "Check failed: " #condition " "
+#  define DCHECK(condition) if (false) LOG_IF_FALSE(FATAL, condition) \
+          << "Check failed: " #condition " "
 #endif  // NDEBUG
 
 // ------------------------- CHECK_OP macros ---------------------------------
 
 // Generic binary operator check macro. This should not be directly invoked,
 // instead use the binary comparison macros defined below.
-#define CHECK_OP(val1, val2, op) LOG_IF_FALSE(FATAL, (val1 op val2)) \
+#define CHECK_OP(val1, val2, op) LOG_IF_FALSE(FATAL, ((val1) op (val2))) \
   << "Check failed: " #val1 " " #op " " #val2 " "
 
 // Check_op macro definitions
@@ -332,20 +369,20 @@
 
 #ifndef NDEBUG
 // Debug only versions of CHECK_OP macros.
-#define DCHECK_EQ(val1, val2) CHECK_OP(val1, val2, ==)
-#define DCHECK_NE(val1, val2) CHECK_OP(val1, val2, !=)
-#define DCHECK_LE(val1, val2) CHECK_OP(val1, val2, <=)
-#define DCHECK_LT(val1, val2) CHECK_OP(val1, val2, <)
-#define DCHECK_GE(val1, val2) CHECK_OP(val1, val2, >=)
-#define DCHECK_GT(val1, val2) CHECK_OP(val1, val2, >)
+#  define DCHECK_EQ(val1, val2) CHECK_OP(val1, val2, ==)
+#  define DCHECK_NE(val1, val2) CHECK_OP(val1, val2, !=)
+#  define DCHECK_LE(val1, val2) CHECK_OP(val1, val2, <=)
+#  define DCHECK_LT(val1, val2) CHECK_OP(val1, val2, <)
+#  define DCHECK_GE(val1, val2) CHECK_OP(val1, val2, >=)
+#  define DCHECK_GT(val1, val2) CHECK_OP(val1, val2, >)
 #else
 // These versions generate no code in optimized mode.
-#define DCHECK_EQ(val1, val2) if (false) CHECK_OP(val1, val2, ==)
-#define DCHECK_NE(val1, val2) if (false) CHECK_OP(val1, val2, !=)
-#define DCHECK_LE(val1, val2) if (false) CHECK_OP(val1, val2, <=)
-#define DCHECK_LT(val1, val2) if (false) CHECK_OP(val1, val2, <)
-#define DCHECK_GE(val1, val2) if (false) CHECK_OP(val1, val2, >=)
-#define DCHECK_GT(val1, val2) if (false) CHECK_OP(val1, val2, >)
+#  define DCHECK_EQ(val1, val2) if (false) CHECK_OP(val1, val2, ==)
+#  define DCHECK_NE(val1, val2) if (false) CHECK_OP(val1, val2, !=)
+#  define DCHECK_LE(val1, val2) if (false) CHECK_OP(val1, val2, <=)
+#  define DCHECK_LT(val1, val2) if (false) CHECK_OP(val1, val2, <)
+#  define DCHECK_GE(val1, val2) if (false) CHECK_OP(val1, val2, >=)
+#  define DCHECK_GT(val1, val2) if (false) CHECK_OP(val1, val2, >)
 #endif  // NDEBUG
 
 // ---------------------------CHECK_NOTNULL macros ---------------------------
@@ -384,8 +421,6 @@
   CheckNotNull(__FILE__, __LINE__, "'" #val "' Must be non NULL", (val))
 #endif  // NDEBUG
 
-inline void PrintAndroid(const char *msg) {
-  __android_log_write(ANDROID_LOG_VERBOSE, "native", msg);
-}
+#include "ceres/internal/reenable_warnings.h"
 
-#endif  // MOBILE_BASE_LOGGING_H_
+#endif  // CERCES_INTERNAL_MINIGLOG_GLOG_LOGGING_H_
diff --git a/internal/ceres/minimizer.cc b/internal/ceres/minimizer.cc
index 2e2c15a..6c3b68d 100644
--- a/internal/ceres/minimizer.cc
+++ b/internal/ceres/minimizer.cc
@@ -37,13 +37,14 @@
 
 Minimizer::~Minimizer() {}
 
-bool Minimizer::RunCallbacks(const vector<IterationCallback*> callbacks,
+bool Minimizer::RunCallbacks(const Minimizer::Options& options,
                              const IterationSummary& iteration_summary,
                              Solver::Summary* summary) {
+  const bool is_not_silent = !options.is_silent;
   CallbackReturnType status = SOLVER_CONTINUE;
   int i = 0;
-  while (status == SOLVER_CONTINUE && i < callbacks.size()) {
-    status = (*callbacks[i])(iteration_summary);
+  while (status == SOLVER_CONTINUE && i < options.callbacks.size()) {
+    status = (*options.callbacks[i])(iteration_summary);
     ++i;
   }
   switch (status) {
@@ -51,11 +52,13 @@
       return true;
     case SOLVER_TERMINATE_SUCCESSFULLY:
       summary->termination_type = USER_SUCCESS;
-      VLOG(1) << "Terminating: User callback returned USER_SUCCESS.";
+      summary->message = "User callback returned SOLVER_TERMINATE_SUCCESSFULLY.";
+      VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
       return false;
     case SOLVER_ABORT:
-      summary->termination_type = USER_ABORT;
-      VLOG(1) << "Terminating: User callback returned  USER_ABORT.";
+      summary->termination_type = USER_FAILURE;
+      summary->message = "User callback returned SOLVER_ABORT.";
+      VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
       return false;
     default:
       LOG(FATAL) << "Unknown type of user callback status";
diff --git a/internal/ceres/minimizer.h b/internal/ceres/minimizer.h
index 622e9ce..f1da3f7 100644
--- a/internal/ceres/minimizer.h
+++ b/internal/ceres/minimizer.h
@@ -107,12 +107,14 @@
           options.line_search_sufficient_curvature_decrease;
       max_line_search_step_expansion =
           options.max_line_search_step_expansion;
+      is_silent = (options.logging_type == SILENT);
       evaluator = NULL;
       trust_region_strategy = NULL;
       jacobian = NULL;
       callbacks = options.callbacks;
       inner_iteration_minimizer = NULL;
       inner_iteration_tolerance = options.inner_iteration_tolerance;
+      is_constrained = false;
     }
 
     int max_num_iterations;
@@ -153,6 +155,8 @@
     double line_search_sufficient_curvature_decrease;
     double max_line_search_step_expansion;
 
+    // If true, then all logging is disabled.
+    bool is_silent;
 
     // List of callbacks that are executed by the Minimizer at the end
     // of each iteration.
@@ -177,9 +181,12 @@
 
     Minimizer* inner_iteration_minimizer;
     double inner_iteration_tolerance;
+
+    // Use a bounds constrained optimization algorithm.
+    bool is_constrained;
   };
 
-  static bool RunCallbacks(const vector<IterationCallback*> callbacks,
+  static bool RunCallbacks(const Options& options,
                            const IterationSummary& iteration_summary,
                            Solver::Summary* summary);
 
diff --git a/internal/ceres/minimizer_test.cc b/internal/ceres/minimizer_test.cc
index 1058036..0d8b617 100644
--- a/internal/ceres/minimizer_test.cc
+++ b/internal/ceres/minimizer_test.cc
@@ -44,7 +44,7 @@
   }
 };
 
-TEST(MinimizerTest, InitializationCopiesCallbacks) {
+TEST(Minimizer, InitializationCopiesCallbacks) {
   FakeIterationCallback callback0;
   FakeIterationCallback callback1;
 
@@ -59,5 +59,42 @@
   EXPECT_EQ(minimizer_options.callbacks[1], &callback1);
 }
 
+class AbortingIterationCallback : public IterationCallback {
+ public:
+  virtual ~AbortingIterationCallback() {}
+  virtual CallbackReturnType operator()(const IterationSummary& summary) {
+    return SOLVER_ABORT;
+  }
+};
+
+TEST(Minimizer, UserAbortUpdatesSummaryMessage) {
+  AbortingIterationCallback callback;
+  Solver::Options solver_options;
+  solver_options.callbacks.push_back(&callback);
+  Minimizer::Options minimizer_options(solver_options);
+  Solver::Summary summary;
+  Minimizer::RunCallbacks(minimizer_options, IterationSummary(), &summary);
+  EXPECT_EQ(summary.message, "User callback returned SOLVER_ABORT.");
+}
+
+class SucceedingIterationCallback : public IterationCallback {
+ public:
+  virtual ~SucceedingIterationCallback() {}
+  virtual CallbackReturnType operator()(const IterationSummary& summary) {
+    return SOLVER_TERMINATE_SUCCESSFULLY;
+  }
+};
+
+TEST(Minimizer, UserSuccessUpdatesSummaryMessage) {
+  SucceedingIterationCallback callback;
+  Solver::Options solver_options;
+  solver_options.callbacks.push_back(&callback);
+  Minimizer::Options minimizer_options(solver_options);
+  Solver::Summary summary;
+  Minimizer::RunCallbacks(minimizer_options, IterationSummary(), &summary);
+  EXPECT_EQ(summary.message,
+            "User callback returned SOLVER_TERMINATE_SUCCESSFULLY.");
+}
+
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/mutex.h b/internal/ceres/mutex.h
index 0c48ed3..97e2cd3 100644
--- a/internal/ceres/mutex.h
+++ b/internal/ceres/mutex.h
@@ -95,6 +95,8 @@
 #ifndef CERES_INTERNAL_MUTEX_H_
 #define CERES_INTERNAL_MUTEX_H_
 
+#include "ceres/internal/port.h"
+
 #if defined(CERES_NO_THREADS)
   typedef int MutexType;      // to keep a lock-count
 #elif defined(_WIN32) || defined(__CYGWIN32__) || defined(__CYGWIN64__)
@@ -112,7 +114,9 @@
 // To avoid macro definition of ERROR.
 # define NOGDI
 // To avoid macro definition of min/max.
-# define NOMINMAX
+# ifndef NOMINMAX
+#   define NOMINMAX
+# endif
 # include <windows.h>
   typedef CRITICAL_SECTION MutexType;
 #elif defined(CERES_HAVE_PTHREAD) && defined(CERES_HAVE_RWLOCK)
diff --git a/internal/ceres/numeric_diff_cost_function_test.cc b/internal/ceres/numeric_diff_cost_function_test.cc
index 3953ded..422c712 100644
--- a/internal/ceres/numeric_diff_cost_function_test.cc
+++ b/internal/ceres/numeric_diff_cost_function_test.cc
@@ -184,5 +184,18 @@
           new SizeTestingCostFunction<2,2>, ceres::TAKE_OWNERSHIP));
 }
 
+TEST(NumericDiffCostFunction, EasyCaseFunctorCentralDifferencesAndDynamicNumResiduals) {
+  internal::scoped_ptr<CostFunction> cost_function;
+  cost_function.reset(
+      new NumericDiffCostFunction<EasyFunctor,
+                                  CENTRAL,
+                                  ceres::DYNAMIC,
+                                  5,  /* size of x1 */
+                                  5   /* size of x2 */>(
+                                      new EasyFunctor, TAKE_OWNERSHIP, 3));
+  EasyFunctor functor;
+  functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, CENTRAL);
+}
+
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/ordered_groups_test.cc b/internal/ceres/ordered_groups_test.cc
index 700e788..7719d35 100644
--- a/internal/ceres/ordered_groups_test.cc
+++ b/internal/ceres/ordered_groups_test.cc
@@ -38,7 +38,7 @@
 namespace ceres {
 namespace internal {
 
-TEST(OrderedGroup, EmptyOrderedGroupBehavesCorrectly) {
+TEST(OrderedGroups, EmptyOrderedGroupBehavesCorrectly) {
   ParameterBlockOrdering ordering;
   EXPECT_EQ(ordering.NumGroups(), 0);
   EXPECT_EQ(ordering.NumElements(), 0);
@@ -48,7 +48,7 @@
   EXPECT_FALSE(ordering.Remove(&x));
 }
 
-TEST(OrderedGroup, EverythingInOneGroup) {
+TEST(OrderedGroups, EverythingInOneGroup) {
   ParameterBlockOrdering ordering;
   double x[3];
   ordering.AddElementToGroup(x, 1);
@@ -75,7 +75,7 @@
   EXPECT_EQ(ordering.GroupId(x + 2), 1);
 }
 
-TEST(OrderedGroup, StartInOneGroupAndThenSplit) {
+TEST(OrderedGroups, StartInOneGroupAndThenSplit) {
   ParameterBlockOrdering ordering;
   double x[3];
   ordering.AddElementToGroup(x, 1);
@@ -103,7 +103,7 @@
   EXPECT_EQ(ordering.GroupId(x + 2), 1);
 }
 
-TEST(OrderedGroup, AddAndRemoveEveryThingFromOneGroup) {
+TEST(OrderedGroups, AddAndRemoveEveryThingFromOneGroup) {
   ParameterBlockOrdering ordering;
   double x[3];
   ordering.AddElementToGroup(x, 1);
@@ -133,7 +133,7 @@
   EXPECT_EQ(ordering.GroupId(x + 2), 5);
 }
 
-TEST(OrderedGroup, ReverseOrdering) {
+TEST(OrderedGroups, ReverseOrdering) {
   ParameterBlockOrdering ordering;
   double x[3];
   ordering.AddElementToGroup(x, 1);
@@ -159,5 +159,61 @@
   EXPECT_EQ(ordering.GroupId(x + 2), 2);
 }
 
+TEST(OrderedGroups, BulkRemove) {
+  ParameterBlockOrdering ordering;
+  double x[3];
+  ordering.AddElementToGroup(x, 1);
+  ordering.AddElementToGroup(x + 1, 2);
+  ordering.AddElementToGroup(x + 2, 2);
+
+  vector<double*> elements_to_remove;
+  elements_to_remove.push_back(x);
+  elements_to_remove.push_back(x + 2);
+
+  EXPECT_EQ(ordering.Remove(elements_to_remove), 2);
+  EXPECT_EQ(ordering.NumElements(), 1);
+  EXPECT_EQ(ordering.GroupId(x), -1);
+  EXPECT_EQ(ordering.GroupId(x + 1), 2);
+  EXPECT_EQ(ordering.GroupId(x + 2), -1);
+}
+
+TEST(OrderedGroups, BulkRemoveWithNoElements) {
+  ParameterBlockOrdering ordering;
+
+  double x[3];
+  vector<double*> elements_to_remove;
+  elements_to_remove.push_back(x);
+  elements_to_remove.push_back(x + 2);
+
+  EXPECT_EQ(ordering.Remove(elements_to_remove), 0);
+
+  ordering.AddElementToGroup(x, 1);
+  ordering.AddElementToGroup(x + 1, 2);
+  ordering.AddElementToGroup(x + 2, 2);
+
+  elements_to_remove.clear();
+  EXPECT_EQ(ordering.Remove(elements_to_remove), 0);
+}
+
+TEST(OrderedGroups, MinNonZeroGroup) {
+  ParameterBlockOrdering ordering;
+  double x[3];
+
+  ordering.AddElementToGroup(x, 1);
+  ordering.AddElementToGroup(x + 1, 1);
+  ordering.AddElementToGroup(x + 2, 2);
+
+  EXPECT_EQ(ordering.MinNonZeroGroup(), 1);
+  ordering.Remove(x);
+
+  EXPECT_EQ(ordering.MinNonZeroGroup(), 1);
+  ordering.Remove(x + 1);
+
+  EXPECT_EQ(ordering.MinNonZeroGroup(), 2);
+  ordering.Remove(x + 2);
+
+  // No non-zero groups left.
+  EXPECT_DEATH_IF_SUPPORTED(ordering.MinNonZeroGroup(), "NumGroups()");
+}
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/parameter_block.h b/internal/ceres/parameter_block.h
index 695fa6f..7bc823d 100644
--- a/internal/ceres/parameter_block.h
+++ b/internal/ceres/parameter_block.h
@@ -31,7 +31,9 @@
 #ifndef CERES_INTERNAL_PARAMETER_BLOCK_H_
 #define CERES_INTERNAL_PARAMETER_BLOCK_H_
 
+#include <algorithm>
 #include <cstdlib>
+#include <limits>
 #include <string>
 #include "ceres/array_utils.h"
 #include "ceres/collections_port.h"
@@ -180,16 +182,59 @@
     }
   }
 
+  void SetUpperBound(int index, double upper_bound) {
+    CHECK_LT(index, size_);
+
+    if (upper_bounds_.get() == NULL) {
+      upper_bounds_.reset(new double[size_]);
+      std::fill(upper_bounds_.get(),
+                upper_bounds_.get() + size_,
+                std::numeric_limits<double>::max());
+    }
+
+    upper_bounds_[index] = upper_bound;
+  };
+
+  void SetLowerBound(int index, double lower_bound) {
+    CHECK_LT(index, size_);
+
+    if (lower_bounds_.get() == NULL) {
+      lower_bounds_.reset(new double[size_]);
+      std::fill(lower_bounds_.get(),
+                lower_bounds_.get() + size_,
+                -std::numeric_limits<double>::max());
+    }
+
+    lower_bounds_[index] = lower_bound;
+  }
+
   // Generalization of the addition operation. This is the same as
-  // LocalParameterization::Plus() but uses the parameter's current state
-  // instead of operating on a passed in pointer.
+  // LocalParameterization::Plus() followed by projection onto the
+  // hyper cube implied by the bounds constraints.
   bool Plus(const double *x, const double* delta, double* x_plus_delta) {
-    if (local_parameterization_ == NULL) {
+    if (local_parameterization_ != NULL) {
+      if (!local_parameterization_->Plus(x, delta, x_plus_delta)) {
+        return false;
+      }
+    } else {
       VectorRef(x_plus_delta, size_) = ConstVectorRef(x, size_) +
                                        ConstVectorRef(delta,  size_);
-      return true;
     }
-    return local_parameterization_->Plus(x, delta, x_plus_delta);
+
+    // Project onto the box constraints.
+    if (lower_bounds_.get() != NULL) {
+      for (int i = 0; i < size_; ++i) {
+        x_plus_delta[i] = std::max(x_plus_delta[i], lower_bounds_[i]);
+      }
+    }
+
+    if (upper_bounds_.get() != NULL) {
+      for (int i = 0; i < size_; ++i) {
+        x_plus_delta[i] = std::min(x_plus_delta[i], upper_bounds_[i]);
+      }
+    }
+
+    return true;
   }
 
   string ToString() const {
@@ -234,6 +279,22 @@
     return residual_blocks_.get();
   }
 
+  double LowerBoundForParameter(int index) const {
+    if (lower_bounds_.get() == NULL) {
+      return -std::numeric_limits<double>::max();
+    } else {
+      return lower_bounds_[index];
+    }
+  }
+
+  double UpperBoundForParameter(int index) const {
+    if (upper_bounds_.get() == NULL) {
+      return std::numeric_limits<double>::max();
+    } else {
+      return upper_bounds_[index];
+    }
+  }
+
  private:
   void Init(double* user_state,
             int size,
@@ -312,6 +373,20 @@
   // If non-null, contains the residual blocks this parameter block is in.
   scoped_ptr<ResidualBlockSet> residual_blocks_;
 
+  // Upper and lower bounds for the parameter block.  SetUpperBound
+  // and SetLowerBound lazily initialize the upper_bounds_ and
+  // lower_bounds_ arrays. If they are never called, then memory for
+  // these arrays is never allocated. Thus for problems where there
+  // are no bounds, or only one sided bounds we do not pay the cost of
+  // allocating memory for the inactive bounds constraints.
+  //
+  // Upon initialization these arrays are initialized to
+  // std::numeric_limits<double>::max() and
+  // -std::numeric_limits<double>::max() respectively which correspond
+  // to the parameter block being unconstrained.
+  scoped_array<double> upper_bounds_;
+  scoped_array<double> lower_bounds_;
+
   // Necessary so ProblemImpl can clean up the parameterizations.
   friend class ProblemImpl;
 };
diff --git a/internal/ceres/parameter_block_ordering.cc b/internal/ceres/parameter_block_ordering.cc
index 190715b..3032329 100644
--- a/internal/ceres/parameter_block_ordering.cc
+++ b/internal/ceres/parameter_block_ordering.cc
@@ -144,5 +144,21 @@
   return graph;
 }
 
+void OrderingToGroupSizes(const ParameterBlockOrdering* ordering,
+                          vector<int>* group_sizes) {
+  CHECK_NOTNULL(group_sizes)->clear();
+  if (ordering == NULL) {
+    return;
+  }
+
+  const map<int, set<double*> >& group_to_elements =
+      ordering->group_to_elements();
+  for (map<int, set<double*> >::const_iterator it = group_to_elements.begin();
+       it != group_to_elements.end();
+       ++it) {
+    group_sizes->push_back(it->second.size());
+  }
+}
+
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/parameter_block_ordering.h b/internal/ceres/parameter_block_ordering.h
index 4675cb8..5de9951 100644
--- a/internal/ceres/parameter_block_ordering.h
+++ b/internal/ceres/parameter_block_ordering.h
@@ -78,6 +78,11 @@
 // parameter blocks, if they co-occur in a residual block.
 Graph<ParameterBlock*>* CreateHessianGraph(const Program& program);
 
+// Iterate over each of the groups in order of their priority and fill
+// summary with their sizes.
+void OrderingToGroupSizes(const ParameterBlockOrdering* ordering,
+                          vector<int>* group_sizes);
+
 }  // namespace internal
 }  // namespace ceres
 
diff --git a/internal/ceres/parameter_block_test.cc b/internal/ceres/parameter_block_test.cc
index 09156f8..5a2db3c 100644
--- a/internal/ceres/parameter_block_test.cc
+++ b/internal/ceres/parameter_block_test.cc
@@ -169,5 +169,45 @@
   EXPECT_FALSE(parameter_block.SetState(&y));
 }
 
+TEST(ParameterBlock, DefaultBounds) {
+  double x[2];
+  ParameterBlock parameter_block(x, 2, -1, NULL);
+  EXPECT_EQ(parameter_block.UpperBoundForParameter(0),
+            std::numeric_limits<double>::max());
+  EXPECT_EQ(parameter_block.UpperBoundForParameter(1),
+            std::numeric_limits<double>::max());
+  EXPECT_EQ(parameter_block.LowerBoundForParameter(0),
+            -std::numeric_limits<double>::max());
+  EXPECT_EQ(parameter_block.LowerBoundForParameter(1),
+            -std::numeric_limits<double>::max());
+}
+
+TEST(ParameterBlock, SetBounds) {
+  double x[2];
+  ParameterBlock parameter_block(x, 2, -1, NULL);
+  parameter_block.SetLowerBound(0, 1);
+  parameter_block.SetUpperBound(1, 1);
+
+  EXPECT_EQ(parameter_block.LowerBoundForParameter(0), 1.0);
+  EXPECT_EQ(parameter_block.LowerBoundForParameter(1),
+            -std::numeric_limits<double>::max());
+
+  EXPECT_EQ(parameter_block.UpperBoundForParameter(0),
+            std::numeric_limits<double>::max());
+  EXPECT_EQ(parameter_block.UpperBoundForParameter(1), 1.0);
+}
+
+TEST(ParameterBlock, PlusWithBoundsConstraints) {
+  double x[] = {1.0, 0.0};
+  double delta[] = {2.0, -10.0};
+  ParameterBlock parameter_block(x, 2, -1, NULL);
+  parameter_block.SetUpperBound(0, 2.0);
+  parameter_block.SetLowerBound(1, -1.0);
+  double x_plus_delta[2];
+  parameter_block.Plus(x, delta, x_plus_delta);
+  EXPECT_EQ(x_plus_delta[0], 2.0);
+  EXPECT_EQ(x_plus_delta[1], -1.0);
+}
+
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/partitioned_matrix_view.cc b/internal/ceres/partitioned_matrix_view.cc
index 59eaff8..d745a9b 100644
--- a/internal/ceres/partitioned_matrix_view.cc
+++ b/internal/ceres/partitioned_matrix_view.cc
@@ -1,5 +1,5 @@
 // Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// Copyright 2013 Google Inc. All rights reserved.
 // http://code.google.com/p/ceres-solver/
 //
 // Redistribution and use in source and binary forms, with or without
@@ -27,277 +27,153 @@
 // POSSIBILITY OF SUCH DAMAGE.
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
 
-#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 10
-
+#include "ceres/linear_solver.h"
 #include "ceres/partitioned_matrix_view.h"
-
-#include <algorithm>
-#include <cstring>
-#include <vector>
-#include "ceres/block_sparse_matrix.h"
-#include "ceres/block_structure.h"
 #include "ceres/internal/eigen.h"
-#include "ceres/small_blas.h"
-#include "glog/logging.h"
 
 namespace ceres {
 namespace internal {
 
-PartitionedMatrixView::PartitionedMatrixView(
-    const BlockSparseMatrix& matrix,
-    int num_col_blocks_a)
-    : matrix_(matrix),
-      num_col_blocks_e_(num_col_blocks_a) {
-  const CompressedRowBlockStructure* bs = matrix_.block_structure();
-  CHECK_NOTNULL(bs);
-
-  num_col_blocks_f_ = bs->cols.size() - num_col_blocks_a;
-
-  // Compute the number of row blocks in E. The number of row blocks
-  // in E maybe less than the number of row blocks in the input matrix
-  // as some of the row blocks at the bottom may not have any
-  // e_blocks. For a definition of what an e_block is, please see
-  // explicit_schur_complement_solver.h
-  num_row_blocks_e_ = 0;
-  for (int r = 0; r < bs->rows.size(); ++r) {
-    const vector<Cell>& cells = bs->rows[r].cells;
-    if (cells[0].block_id < num_col_blocks_a) {
-      ++num_row_blocks_e_;
-    }
+PartitionedMatrixViewBase*
+PartitionedMatrixViewBase::Create(const LinearSolver::Options& options,
+                                  const BlockSparseMatrix& matrix) {
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+  if ((options.row_block_size == 2) &&
+      (options.e_block_size == 2) &&
+      (options.f_block_size == 2)) {
+    return new PartitionedMatrixView<2, 2, 2>(
+                 matrix, options.elimination_groups[0]);
+  }
+  if ((options.row_block_size == 2) &&
+      (options.e_block_size == 2) &&
+      (options.f_block_size == 3)) {
+    return new PartitionedMatrixView<2, 2, 3>(
+                 matrix, options.elimination_groups[0]);
+  }
+  if ((options.row_block_size == 2) &&
+      (options.e_block_size == 2) &&
+      (options.f_block_size == 4)) {
+    return new PartitionedMatrixView<2, 2, 4>(
+                 matrix, options.elimination_groups[0]);
+  }
+  if ((options.row_block_size == 2) &&
+      (options.e_block_size == 2) &&
+      (options.f_block_size == Eigen::Dynamic)) {
+    return new PartitionedMatrixView<2, 2, Eigen::Dynamic>(
+                 matrix, options.elimination_groups[0]);
+  }
+  if ((options.row_block_size == 2) &&
+      (options.e_block_size == 3) &&
+      (options.f_block_size == 3)) {
+    return new PartitionedMatrixView<2, 3, 3>(
+                 matrix, options.elimination_groups[0]);
+  }
+  if ((options.row_block_size == 2) &&
+      (options.e_block_size == 3) &&
+      (options.f_block_size == 4)) {
+    return new PartitionedMatrixView<2, 3, 4>(
+                 matrix, options.elimination_groups[0]);
+  }
+  if ((options.row_block_size == 2) &&
+      (options.e_block_size == 3) &&
+      (options.f_block_size == 9)) {
+    return new PartitionedMatrixView<2, 3, 9>(
+                 matrix, options.elimination_groups[0]);
+  }
+  if ((options.row_block_size == 2) &&
+      (options.e_block_size == 3) &&
+      (options.f_block_size == Eigen::Dynamic)) {
+    return new PartitionedMatrixView<2, 3, Eigen::Dynamic>(
+                 matrix, options.elimination_groups[0]);
+  }
+  if ((options.row_block_size == 2) &&
+      (options.e_block_size == 4) &&
+      (options.f_block_size == 3)) {
+    return new PartitionedMatrixView<2, 4, 3>(
+                 matrix, options.elimination_groups[0]);
+  }
+  if ((options.row_block_size == 2) &&
+      (options.e_block_size == 4) &&
+      (options.f_block_size == 4)) {
+    return new PartitionedMatrixView<2, 4, 4>(
+                 matrix, options.elimination_groups[0]);
+  }
+  if ((options.row_block_size == 2) &&
+      (options.e_block_size == 4) &&
+      (options.f_block_size == 8)) {
+    return new PartitionedMatrixView<2, 4, 8>(
+                 matrix, options.elimination_groups[0]);
+  }
+  if ((options.row_block_size == 2) &&
+      (options.e_block_size == 4) &&
+      (options.f_block_size == 9)) {
+    return new PartitionedMatrixView<2, 4, 9>(
+                 matrix, options.elimination_groups[0]);
+  }
+  if ((options.row_block_size == 2) &&
+      (options.e_block_size == 4) &&
+      (options.f_block_size == Eigen::Dynamic)) {
+    return new PartitionedMatrixView<2, 4, Eigen::Dynamic>(
+                 matrix, options.elimination_groups[0]);
+  }
+  if ((options.row_block_size == 2) &&
+      (options.e_block_size == Eigen::Dynamic) &&
+      (options.f_block_size == Eigen::Dynamic)) {
+    return new PartitionedMatrixView<2, Eigen::Dynamic, Eigen::Dynamic>(
+                 matrix, options.elimination_groups[0]);
+  }
+  if ((options.row_block_size == 4) &&
+      (options.e_block_size == 4) &&
+      (options.f_block_size == 2)) {
+    return new PartitionedMatrixView<4, 4, 2>(
+                 matrix, options.elimination_groups[0]);
+  }
+  if ((options.row_block_size == 4) &&
+      (options.e_block_size == 4) &&
+      (options.f_block_size == 3)) {
+    return new PartitionedMatrixView<4, 4, 3>(
+                 matrix, options.elimination_groups[0]);
+  }
+  if ((options.row_block_size == 4) &&
+      (options.e_block_size == 4) &&
+      (options.f_block_size == 4)) {
+    return new PartitionedMatrixView<4, 4, 4>(
+                 matrix, options.elimination_groups[0]);
+  }
+  if ((options.row_block_size == 4) &&
+      (options.e_block_size == 4) &&
+      (options.f_block_size == Eigen::Dynamic)) {
+    return new PartitionedMatrixView<4, 4, Eigen::Dynamic>(
+                 matrix, options.elimination_groups[0]);
+  }
+  if ((options.row_block_size == Eigen::Dynamic) &&
+      (options.e_block_size == Eigen::Dynamic) &&
+      (options.f_block_size == Eigen::Dynamic)) {
+    return new PartitionedMatrixView<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>(
+                 matrix, options.elimination_groups[0]);
   }
 
-  // Compute the number of columns in E and F.
-  num_cols_e_ = 0;
-  num_cols_f_ = 0;
-
-  for (int c = 0; c < bs->cols.size(); ++c) {
-    const Block& block = bs->cols[c];
-    if (c < num_col_blocks_a) {
-      num_cols_e_ += block.size;
-    } else {
-      num_cols_f_ += block.size;
-    }
-  }
-
-  CHECK_EQ(num_cols_e_ + num_cols_f_, matrix_.num_cols());
-}
-
-PartitionedMatrixView::~PartitionedMatrixView() {
-}
-
-// The next four methods don't seem to be particularly cache
-// friendly. This is an artifact of how the BlockStructure of the
-// input matrix is constructed. These methods will benefit from
-// multithreading as well as improved data layout.
-
-void PartitionedMatrixView::RightMultiplyE(const double* x, double* y) const {
-  const CompressedRowBlockStructure* bs = matrix_.block_structure();
-
-  // Iterate over the first num_row_blocks_e_ row blocks, and multiply
-  // by the first cell in each row block.
-  const double* values = matrix_.values();
-  for (int r = 0; r < num_row_blocks_e_; ++r) {
-    const Cell& cell = bs->rows[r].cells[0];
-    const int row_block_pos = bs->rows[r].block.position;
-    const int row_block_size = bs->rows[r].block.size;
-    const int col_block_id = cell.block_id;
-    const int col_block_pos = bs->cols[col_block_id].position;
-    const int col_block_size = bs->cols[col_block_id].size;
-    MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
-        values + cell.position, row_block_size, col_block_size,
-        x + col_block_pos,
-        y + row_block_pos);
-  }
-}
-
-void PartitionedMatrixView::RightMultiplyF(const double* x, double* y) const {
-  const CompressedRowBlockStructure* bs = matrix_.block_structure();
-
-  // Iterate over row blocks, and if the row block is in E, then
-  // multiply by all the cells except the first one which is of type
-  // E. If the row block is not in E (i.e its in the bottom
-  // num_row_blocks - num_row_blocks_e row blocks), then all the cells
-  // are of type F and multiply by them all.
-  const double* values = matrix_.values();
-  for (int r = 0; r < bs->rows.size(); ++r) {
-    const int row_block_pos = bs->rows[r].block.position;
-    const int row_block_size = bs->rows[r].block.size;
-    const vector<Cell>& cells = bs->rows[r].cells;
-    for (int c = (r < num_row_blocks_e_) ? 1 : 0; c < cells.size(); ++c) {
-      const int col_block_id = cells[c].block_id;
-      const int col_block_pos = bs->cols[col_block_id].position;
-      const int col_block_size = bs->cols[col_block_id].size;
-      MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
-          values + cells[c].position, row_block_size, col_block_size,
-          x + col_block_pos - num_cols_e(),
-          y + row_block_pos);
-    }
-  }
-}
-
-void PartitionedMatrixView::LeftMultiplyE(const double* x, double* y) const {
-  const CompressedRowBlockStructure* bs = matrix_.block_structure();
-
-  // Iterate over the first num_row_blocks_e_ row blocks, and multiply
-  // by the first cell in each row block.
-  const double* values = matrix_.values();
-  for (int r = 0; r < num_row_blocks_e_; ++r) {
-    const Cell& cell = bs->rows[r].cells[0];
-    const int row_block_pos = bs->rows[r].block.position;
-    const int row_block_size = bs->rows[r].block.size;
-    const int col_block_id = cell.block_id;
-    const int col_block_pos = bs->cols[col_block_id].position;
-    const int col_block_size = bs->cols[col_block_id].size;
-    MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
-        values + cell.position, row_block_size, col_block_size,
-        x + row_block_pos,
-        y + col_block_pos);
-  }
-}
-
-void PartitionedMatrixView::LeftMultiplyF(const double* x, double* y) const {
-  const CompressedRowBlockStructure* bs = matrix_.block_structure();
-
-  // Iterate over row blocks, and if the row block is in E, then
-  // multiply by all the cells except the first one which is of type
-  // E. If the row block is not in E (i.e its in the bottom
-  // num_row_blocks - num_row_blocks_e row blocks), then all the cells
-  // are of type F and multiply by them all.
-  const double* values = matrix_.values();
-  for (int r = 0; r < bs->rows.size(); ++r) {
-    const int row_block_pos = bs->rows[r].block.position;
-    const int row_block_size = bs->rows[r].block.size;
-    const vector<Cell>& cells = bs->rows[r].cells;
-    for (int c = (r < num_row_blocks_e_) ? 1 : 0; c < cells.size(); ++c) {
-      const int col_block_id = cells[c].block_id;
-      const int col_block_pos = bs->cols[col_block_id].position;
-      const int col_block_size = bs->cols[col_block_id].size;
-      MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
-        values + cells[c].position, row_block_size, col_block_size,
-        x + row_block_pos,
-        y + col_block_pos - num_cols_e());
-    }
-  }
-}
-
-// Given a range of columns blocks of a matrix m, compute the block
-// structure of the block diagonal of the matrix m(:,
-// start_col_block:end_col_block)'m(:, start_col_block:end_col_block)
-// and return a BlockSparseMatrix with the this block structure. The
-// caller owns the result.
-BlockSparseMatrix* PartitionedMatrixView::CreateBlockDiagonalMatrixLayout(
-    int start_col_block, int end_col_block) const {
-  const CompressedRowBlockStructure* bs = matrix_.block_structure();
-  CompressedRowBlockStructure* block_diagonal_structure =
-      new CompressedRowBlockStructure;
-
-  int block_position = 0;
-  int diagonal_cell_position = 0;
-
-  // Iterate over the column blocks, creating a new diagonal block for
-  // each column block.
-  for (int c = start_col_block; c < end_col_block; ++c) {
-    const Block& block = bs->cols[c];
-    block_diagonal_structure->cols.push_back(Block());
-    Block& diagonal_block = block_diagonal_structure->cols.back();
-    diagonal_block.size = block.size;
-    diagonal_block.position = block_position;
-
-    block_diagonal_structure->rows.push_back(CompressedRow());
-    CompressedRow& row = block_diagonal_structure->rows.back();
-    row.block = diagonal_block;
-
-    row.cells.push_back(Cell());
-    Cell& cell = row.cells.back();
-    cell.block_id = c - start_col_block;
-    cell.position = diagonal_cell_position;
-
-    block_position += block.size;
-    diagonal_cell_position += block.size * block.size;
-  }
-
-  // Build a BlockSparseMatrix with the just computed block
-  // structure.
-  return new BlockSparseMatrix(block_diagonal_structure);
-}
-
-BlockSparseMatrix* PartitionedMatrixView::CreateBlockDiagonalEtE() const {
-  BlockSparseMatrix* block_diagonal =
-      CreateBlockDiagonalMatrixLayout(0, num_col_blocks_e_);
-  UpdateBlockDiagonalEtE(block_diagonal);
-  return block_diagonal;
-}
-
-BlockSparseMatrix* PartitionedMatrixView::CreateBlockDiagonalFtF() const {
-  BlockSparseMatrix* block_diagonal =
-      CreateBlockDiagonalMatrixLayout(
-          num_col_blocks_e_, num_col_blocks_e_ + num_col_blocks_f_);
-  UpdateBlockDiagonalFtF(block_diagonal);
-  return block_diagonal;
-}
-
-// Similar to the code in RightMultiplyE, except instead of the matrix
-// vector multiply its an outer product.
-//
-//    block_diagonal = block_diagonal(E'E)
-void PartitionedMatrixView::UpdateBlockDiagonalEtE(
-    BlockSparseMatrix* block_diagonal) const {
-  const CompressedRowBlockStructure* bs = matrix_.block_structure();
-  const CompressedRowBlockStructure* block_diagonal_structure =
-      block_diagonal->block_structure();
-
-  block_diagonal->SetZero();
-  const double* values = matrix_.values();
-  for (int r = 0; r < num_row_blocks_e_ ; ++r) {
-    const Cell& cell = bs->rows[r].cells[0];
-    const int row_block_size = bs->rows[r].block.size;
-    const int block_id = cell.block_id;
-    const int col_block_size = bs->cols[block_id].size;
-    const int cell_position =
-        block_diagonal_structure->rows[block_id].cells[0].position;
-
-    MatrixTransposeMatrixMultiply
-        <Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
-            values + cell.position, row_block_size, col_block_size,
-            values + cell.position, row_block_size, col_block_size,
-            block_diagonal->mutable_values() + cell_position,
-            0, 0, col_block_size, col_block_size);
-  }
-}
-
-// Similar to the code in RightMultiplyF, except instead of the matrix
-// vector multiply its an outer product.
-//
-//   block_diagonal = block_diagonal(F'F)
-//
-void PartitionedMatrixView::UpdateBlockDiagonalFtF(
-    BlockSparseMatrix* block_diagonal) const {
-  const CompressedRowBlockStructure* bs = matrix_.block_structure();
-  const CompressedRowBlockStructure* block_diagonal_structure =
-      block_diagonal->block_structure();
-
-  block_diagonal->SetZero();
-  const double* values = matrix_.values();
-  for (int r = 0; r < bs->rows.size(); ++r) {
-    const int row_block_size = bs->rows[r].block.size;
-    const vector<Cell>& cells = bs->rows[r].cells;
-    for (int c = (r < num_row_blocks_e_) ? 1 : 0; c < cells.size(); ++c) {
-      const int col_block_id = cells[c].block_id;
-      const int col_block_size = bs->cols[col_block_id].size;
-      const int diagonal_block_id = col_block_id - num_col_blocks_e_;
-      const int cell_position =
-          block_diagonal_structure->rows[diagonal_block_id].cells[0].position;
-
-      MatrixTransposeMatrixMultiply
-          <Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
-              values + cells[c].position, row_block_size, col_block_size,
-              values + cells[c].position, row_block_size, col_block_size,
-              block_diagonal->mutable_values() + cell_position,
-              0, 0, col_block_size, col_block_size);
-    }
-  }
-}
+#endif
+  VLOG(1) << "Template specializations not found for <"
+          << options.row_block_size << ","
+          << options.e_block_size << ","
+          << options.f_block_size << ">";
+  return new PartitionedMatrixView<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>(
+               matrix, options.elimination_groups[0]);
+};
 
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/partitioned_matrix_view.h b/internal/ceres/partitioned_matrix_view.h
index ebfbe40..661252d 100644
--- a/internal/ceres/partitioned_matrix_view.h
+++ b/internal/ceres/partitioned_matrix_view.h
@@ -36,7 +36,15 @@
 #ifndef CERES_INTERNAL_PARTITIONED_MATRIX_VIEW_H_
 #define CERES_INTERNAL_PARTITIONED_MATRIX_VIEW_H_
 
-#include "ceres/block_sparse_matrix.h"
+#include <algorithm>
+#include <cstring>
+#include <vector>
+
+#include "ceres/block_structure.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/linear_solver.h"
+#include "ceres/small_blas.h"
+#include "glog/logging.h"
 
 namespace ceres {
 namespace internal {
@@ -51,57 +59,80 @@
 // block structure of the matrix does not satisfy the requirements of
 // the Schur complement solver it will result in unpredictable and
 // wrong output.
-//
-// This class lives in the internal name space as its a utility class
-// to be used by the IterativeSchurComplementSolver class, found in
-// iterative_schur_complement_solver.h, and is not meant for general
-// consumption.
-class PartitionedMatrixView {
+class PartitionedMatrixViewBase {
  public:
-  // matrix = [E F], where the matrix E contains the first
-  // num_col_blocks_a column blocks.
-  PartitionedMatrixView(const BlockSparseMatrix& matrix,
-                        int num_col_blocks_a);
-  ~PartitionedMatrixView();
+  virtual ~PartitionedMatrixViewBase() {}
 
   // y += E'x
-  void LeftMultiplyE(const double* x, double* y) const;
+  virtual void LeftMultiplyE(const double* x, double* y) const = 0;
 
   // y += F'x
-  void LeftMultiplyF(const double* x, double* y) const;
+  virtual void LeftMultiplyF(const double* x, double* y) const = 0;
 
   // y += Ex
-  void RightMultiplyE(const double* x, double* y) const;
+  virtual void RightMultiplyE(const double* x, double* y) const = 0;
 
   // y += Fx
-  void RightMultiplyF(const double* x, double* y) const;
+  virtual void RightMultiplyF(const double* x, double* y) const = 0;
 
   // Create and return the block diagonal of the matrix E'E.
-  BlockSparseMatrix* CreateBlockDiagonalEtE() const;
+  virtual BlockSparseMatrix* CreateBlockDiagonalEtE() const = 0;
 
-  // Create and return the block diagonal of the matrix F'F.
-  BlockSparseMatrix* CreateBlockDiagonalFtF() const;
+  // Create and return the block diagonal of the matrix F'F. Caller
+  // owns the result.
+  virtual BlockSparseMatrix* CreateBlockDiagonalFtF() const = 0;
 
   // Compute the block diagonal of the matrix E'E and store it in
   // block_diagonal. The matrix block_diagonal is expected to have a
   // BlockStructure (preferably created using
   // CreateBlockDiagonalMatrixEtE) which is has the same structure as
   // the block diagonal of E'E.
-  void UpdateBlockDiagonalEtE(BlockSparseMatrix* block_diagonal) const;
+  virtual void UpdateBlockDiagonalEtE(
+      BlockSparseMatrix* block_diagonal) const = 0;
 
   // Compute the block diagonal of the matrix F'F and store it in
   // block_diagonal. The matrix block_diagonal is expected to have a
   // BlockStructure (preferably created using
   // CreateBlockDiagonalMatrixFtF) which is has the same structure as
   // the block diagonal of F'F.
-  void UpdateBlockDiagonalFtF(BlockSparseMatrix* block_diagonal) const;
+  virtual void UpdateBlockDiagonalFtF(
+      BlockSparseMatrix* block_diagonal) const = 0;
 
-  int num_col_blocks_e() const { return num_col_blocks_e_;  }
-  int num_col_blocks_f() const { return num_col_blocks_f_;  }
-  int num_cols_e()       const { return num_cols_e_;        }
-  int num_cols_f()       const { return num_cols_f_;        }
-  int num_rows()         const { return matrix_.num_rows(); }
-  int num_cols()         const { return matrix_.num_cols(); }
+  virtual int num_col_blocks_e() const = 0;
+  virtual int num_col_blocks_f() const = 0;
+  virtual int num_cols_e()       const = 0;
+  virtual int num_cols_f()       const = 0;
+  virtual int num_rows()         const = 0;
+  virtual int num_cols()         const = 0;
+
+  static PartitionedMatrixViewBase* Create(const LinearSolver::Options& options,
+                                           const BlockSparseMatrix& matrix);
+};
+
+template <int kRowBlockSize = Eigen::Dynamic,
+          int kEBlockSize = Eigen::Dynamic,
+          int kFBlockSize = Eigen::Dynamic >
+class PartitionedMatrixView : public PartitionedMatrixViewBase {
+ public:
+  // matrix = [E F], where the matrix E contains the first
+  // num_col_blocks_a column blocks.
+  PartitionedMatrixView(const BlockSparseMatrix& matrix, int num_col_blocks_e);
+
+  virtual ~PartitionedMatrixView();
+  virtual void LeftMultiplyE(const double* x, double* y) const;
+  virtual void LeftMultiplyF(const double* x, double* y) const;
+  virtual void RightMultiplyE(const double* x, double* y) const;
+  virtual void RightMultiplyF(const double* x, double* y) const;
+  virtual BlockSparseMatrix* CreateBlockDiagonalEtE() const;
+  virtual BlockSparseMatrix* CreateBlockDiagonalFtF() const;
+  virtual void UpdateBlockDiagonalEtE(BlockSparseMatrix* block_diagonal) const;
+  virtual void UpdateBlockDiagonalFtF(BlockSparseMatrix* block_diagonal) const;
+  virtual int num_col_blocks_e() const { return num_col_blocks_e_;  }
+  virtual int num_col_blocks_f() const { return num_col_blocks_f_;  }
+  virtual int num_cols_e()       const { return num_cols_e_;        }
+  virtual int num_cols_f()       const { return num_cols_f_;        }
+  virtual int num_rows()         const { return matrix_.num_rows(); }
+  virtual int num_cols()         const { return matrix_.num_cols(); }
 
  private:
   BlockSparseMatrix* CreateBlockDiagonalMatrixLayout(int start_col_block,
diff --git a/internal/ceres/partitioned_matrix_view_impl.h b/internal/ceres/partitioned_matrix_view_impl.h
new file mode 100644
index 0000000..ae7f776
--- /dev/null
+++ b/internal/ceres/partitioned_matrix_view_impl.h
@@ -0,0 +1,380 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/partitioned_matrix_view.h"
+
+#include <algorithm>
+#include <cstring>
+#include <vector>
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/block_structure.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/small_blas.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+PartitionedMatrixView(
+    const BlockSparseMatrix& matrix,
+    int num_col_blocks_e)
+    : matrix_(matrix),
+      num_col_blocks_e_(num_col_blocks_e) {
+  const CompressedRowBlockStructure* bs = matrix_.block_structure();
+  CHECK_NOTNULL(bs);
+
+  num_col_blocks_f_ = bs->cols.size() - num_col_blocks_e_;
+
+  // Compute the number of row blocks in E. The number of row blocks
+  // in E maybe less than the number of row blocks in the input matrix
+  // as some of the row blocks at the bottom may not have any
+  // e_blocks. For a definition of what an e_block is, please see
+  // explicit_schur_complement_solver.h
+  num_row_blocks_e_ = 0;
+  for (int r = 0; r < bs->rows.size(); ++r) {
+    const vector<Cell>& cells = bs->rows[r].cells;
+    if (cells[0].block_id < num_col_blocks_e_) {
+      ++num_row_blocks_e_;
+    }
+  }
+
+  // Compute the number of columns in E and F.
+  num_cols_e_ = 0;
+  num_cols_f_ = 0;
+
+  for (int c = 0; c < bs->cols.size(); ++c) {
+    const Block& block = bs->cols[c];
+    if (c < num_col_blocks_e_) {
+      num_cols_e_ += block.size;
+    } else {
+      num_cols_f_ += block.size;
+    }
+  }
+
+  CHECK_EQ(num_cols_e_ + num_cols_f_, matrix_.num_cols());
+}
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+~PartitionedMatrixView() {
+}
+
+// The next four methods don't seem to be particularly cache
+// friendly. This is an artifact of how the BlockStructure of the
+// input matrix is constructed. These methods will benefit from
+// multithreading as well as improved data layout.
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+RightMultiplyE(const double* x, double* y) const {
+  const CompressedRowBlockStructure* bs = matrix_.block_structure();
+
+  // Iterate over the first num_row_blocks_e_ row blocks, and multiply
+  // by the first cell in each row block.
+  const double* values = matrix_.values();
+  for (int r = 0; r < num_row_blocks_e_; ++r) {
+    const Cell& cell = bs->rows[r].cells[0];
+    const int row_block_pos = bs->rows[r].block.position;
+    const int row_block_size = bs->rows[r].block.size;
+    const int col_block_id = cell.block_id;
+    const int col_block_pos = bs->cols[col_block_id].position;
+    const int col_block_size = bs->cols[col_block_id].size;
+    MatrixVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
+        values + cell.position, row_block_size, col_block_size,
+        x + col_block_pos,
+        y + row_block_pos);
+  }
+}
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+RightMultiplyF(const double* x, double* y) const {
+  const CompressedRowBlockStructure* bs = matrix_.block_structure();
+
+  // Iterate over row blocks, and if the row block is in E, then
+  // multiply by all the cells except the first one which is of type
+  // E. If the row block is not in E (i.e its in the bottom
+  // num_row_blocks - num_row_blocks_e row blocks), then all the cells
+  // are of type F and multiply by them all.
+  const double* values = matrix_.values();
+  for (int r = 0; r < num_row_blocks_e_; ++r) {
+    const int row_block_pos = bs->rows[r].block.position;
+    const int row_block_size = bs->rows[r].block.size;
+    const vector<Cell>& cells = bs->rows[r].cells;
+    for (int c = 1; c < cells.size(); ++c) {
+      const int col_block_id = cells[c].block_id;
+      const int col_block_pos = bs->cols[col_block_id].position;
+      const int col_block_size = bs->cols[col_block_id].size;
+      MatrixVectorMultiply<kRowBlockSize, kFBlockSize, 1>(
+          values + cells[c].position, row_block_size, col_block_size,
+          x + col_block_pos - num_cols_e_,
+          y + row_block_pos);
+    }
+  }
+
+  for (int r = num_row_blocks_e_; r < bs->rows.size(); ++r) {
+    const int row_block_pos = bs->rows[r].block.position;
+    const int row_block_size = bs->rows[r].block.size;
+    const vector<Cell>& cells = bs->rows[r].cells;
+    for (int c = 0; c < cells.size(); ++c) {
+      const int col_block_id = cells[c].block_id;
+      const int col_block_pos = bs->cols[col_block_id].position;
+      const int col_block_size = bs->cols[col_block_id].size;
+      MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
+          values + cells[c].position, row_block_size, col_block_size,
+          x + col_block_pos - num_cols_e_,
+          y + row_block_pos);
+    }
+  }
+}
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+LeftMultiplyE(const double* x, double* y) const {
+  const CompressedRowBlockStructure* bs = matrix_.block_structure();
+
+  // Iterate over the first num_row_blocks_e_ row blocks, and multiply
+  // by the first cell in each row block.
+  const double* values = matrix_.values();
+  for (int r = 0; r < num_row_blocks_e_; ++r) {
+    const Cell& cell = bs->rows[r].cells[0];
+    const int row_block_pos = bs->rows[r].block.position;
+    const int row_block_size = bs->rows[r].block.size;
+    const int col_block_id = cell.block_id;
+    const int col_block_pos = bs->cols[col_block_id].position;
+    const int col_block_size = bs->cols[col_block_id].size;
+    MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
+        values + cell.position, row_block_size, col_block_size,
+        x + row_block_pos,
+        y + col_block_pos);
+  }
+}
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+LeftMultiplyF(const double* x, double* y) const {
+  const CompressedRowBlockStructure* bs = matrix_.block_structure();
+
+  // Iterate over row blocks, and if the row block is in E, then
+  // multiply by all the cells except the first one which is of type
+  // E. If the row block is not in E (i.e its in the bottom
+  // num_row_blocks - num_row_blocks_e row blocks), then all the cells
+  // are of type F and multiply by them all.
+  const double* values = matrix_.values();
+  for (int r = 0; r < num_row_blocks_e_; ++r) {
+    const int row_block_pos = bs->rows[r].block.position;
+    const int row_block_size = bs->rows[r].block.size;
+    const vector<Cell>& cells = bs->rows[r].cells;
+    for (int c = 1; c < cells.size(); ++c) {
+      const int col_block_id = cells[c].block_id;
+      const int col_block_pos = bs->cols[col_block_id].position;
+      const int col_block_size = bs->cols[col_block_id].size;
+      MatrixTransposeVectorMultiply<kRowBlockSize, kFBlockSize, 1>(
+        values + cells[c].position, row_block_size, col_block_size,
+        x + row_block_pos,
+        y + col_block_pos - num_cols_e_);
+    }
+  }
+
+  for (int r = num_row_blocks_e_; r < bs->rows.size(); ++r) {
+    const int row_block_pos = bs->rows[r].block.position;
+    const int row_block_size = bs->rows[r].block.size;
+    const vector<Cell>& cells = bs->rows[r].cells;
+    for (int c = 0; c < cells.size(); ++c) {
+      const int col_block_id = cells[c].block_id;
+      const int col_block_pos = bs->cols[col_block_id].position;
+      const int col_block_size = bs->cols[col_block_id].size;
+      MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
+        values + cells[c].position, row_block_size, col_block_size,
+        x + row_block_pos,
+        y + col_block_pos - num_cols_e_);
+    }
+  }
+}
+
+// Given a range of columns blocks of a matrix m, compute the block
+// structure of the block diagonal of the matrix m(:,
+// start_col_block:end_col_block)'m(:, start_col_block:end_col_block)
+// and return a BlockSparseMatrix with the this block structure. The
+// caller owns the result.
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+BlockSparseMatrix*
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+CreateBlockDiagonalMatrixLayout(int start_col_block, int end_col_block) const {
+  const CompressedRowBlockStructure* bs = matrix_.block_structure();
+  CompressedRowBlockStructure* block_diagonal_structure =
+      new CompressedRowBlockStructure;
+
+  int block_position = 0;
+  int diagonal_cell_position = 0;
+
+  // Iterate over the column blocks, creating a new diagonal block for
+  // each column block.
+  for (int c = start_col_block; c < end_col_block; ++c) {
+    const Block& block = bs->cols[c];
+    block_diagonal_structure->cols.push_back(Block());
+    Block& diagonal_block = block_diagonal_structure->cols.back();
+    diagonal_block.size = block.size;
+    diagonal_block.position = block_position;
+
+    block_diagonal_structure->rows.push_back(CompressedRow());
+    CompressedRow& row = block_diagonal_structure->rows.back();
+    row.block = diagonal_block;
+
+    row.cells.push_back(Cell());
+    Cell& cell = row.cells.back();
+    cell.block_id = c - start_col_block;
+    cell.position = diagonal_cell_position;
+
+    block_position += block.size;
+    diagonal_cell_position += block.size * block.size;
+  }
+
+  // Build a BlockSparseMatrix with the just computed block
+  // structure.
+  return new BlockSparseMatrix(block_diagonal_structure);
+}
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+BlockSparseMatrix*
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+CreateBlockDiagonalEtE() const {
+  BlockSparseMatrix* block_diagonal =
+      CreateBlockDiagonalMatrixLayout(0, num_col_blocks_e_);
+  UpdateBlockDiagonalEtE(block_diagonal);
+  return block_diagonal;
+}
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+BlockSparseMatrix*
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+CreateBlockDiagonalFtF() const {
+  BlockSparseMatrix* block_diagonal =
+      CreateBlockDiagonalMatrixLayout(
+          num_col_blocks_e_, num_col_blocks_e_ + num_col_blocks_f_);
+  UpdateBlockDiagonalFtF(block_diagonal);
+  return block_diagonal;
+}
+
+// Similar to the code in RightMultiplyE, except instead of the matrix
+// vector multiply its an outer product.
+//
+//    block_diagonal = block_diagonal(E'E)
+//
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+UpdateBlockDiagonalEtE(
+    BlockSparseMatrix* block_diagonal) const {
+  const CompressedRowBlockStructure* bs = matrix_.block_structure();
+  const CompressedRowBlockStructure* block_diagonal_structure =
+      block_diagonal->block_structure();
+
+  block_diagonal->SetZero();
+  const double* values = matrix_.values();
+  for (int r = 0; r < num_row_blocks_e_ ; ++r) {
+    const Cell& cell = bs->rows[r].cells[0];
+    const int row_block_size = bs->rows[r].block.size;
+    const int block_id = cell.block_id;
+    const int col_block_size = bs->cols[block_id].size;
+    const int cell_position =
+        block_diagonal_structure->rows[block_id].cells[0].position;
+
+    MatrixTransposeMatrixMultiply
+        <kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>(
+            values + cell.position, row_block_size, col_block_size,
+            values + cell.position, row_block_size, col_block_size,
+            block_diagonal->mutable_values() + cell_position,
+            0, 0, col_block_size, col_block_size);
+  }
+}
+
+// Similar to the code in RightMultiplyF, except instead of the matrix
+// vector multiply its an outer product.
+//
+//   block_diagonal = block_diagonal(F'F)
+//
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+UpdateBlockDiagonalFtF(BlockSparseMatrix* block_diagonal) const {
+  const CompressedRowBlockStructure* bs = matrix_.block_structure();
+  const CompressedRowBlockStructure* block_diagonal_structure =
+      block_diagonal->block_structure();
+
+  block_diagonal->SetZero();
+  const double* values = matrix_.values();
+  for (int r = 0; r < num_row_blocks_e_; ++r) {
+    const int row_block_size = bs->rows[r].block.size;
+    const vector<Cell>& cells = bs->rows[r].cells;
+    for (int c = 1; c < cells.size(); ++c) {
+      const int col_block_id = cells[c].block_id;
+      const int col_block_size = bs->cols[col_block_id].size;
+      const int diagonal_block_id = col_block_id - num_col_blocks_e_;
+      const int cell_position =
+          block_diagonal_structure->rows[diagonal_block_id].cells[0].position;
+
+      MatrixTransposeMatrixMultiply
+          <kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>(
+              values + cells[c].position, row_block_size, col_block_size,
+              values + cells[c].position, row_block_size, col_block_size,
+              block_diagonal->mutable_values() + cell_position,
+              0, 0, col_block_size, col_block_size);
+    }
+  }
+
+  for (int r = num_row_blocks_e_; r < bs->rows.size(); ++r) {
+    const int row_block_size = bs->rows[r].block.size;
+    const vector<Cell>& cells = bs->rows[r].cells;
+    for (int c = 0; c < cells.size(); ++c) {
+      const int col_block_id = cells[c].block_id;
+      const int col_block_size = bs->cols[col_block_id].size;
+      const int diagonal_block_id = col_block_id - num_col_blocks_e_;
+      const int cell_position =
+          block_diagonal_structure->rows[diagonal_block_id].cells[0].position;
+
+      MatrixTransposeMatrixMultiply
+          <Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
+              values + cells[c].position, row_block_size, col_block_size,
+              values + cells[c].position, row_block_size, col_block_size,
+              block_diagonal->mutable_values() + cell_position,
+              0, 0, col_block_size, col_block_size);
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/partitioned_matrix_view_test.cc b/internal/ceres/partitioned_matrix_view_test.cc
index 48f7d24..ef5dae9 100644
--- a/internal/ceres/partitioned_matrix_view_test.cc
+++ b/internal/ceres/partitioned_matrix_view_test.cc
@@ -49,6 +49,7 @@
 class PartitionedMatrixViewTest : public ::testing::Test {
  protected :
   virtual void SetUp() {
+    srand(5);
     scoped_ptr<LinearLeastSquaresProblem> problem(
         CreateLinearLeastSquaresProblemFromId(2));
     CHECK_NOTNULL(problem.get());
@@ -57,108 +58,93 @@
     num_cols_ = A_->num_cols();
     num_rows_ = A_->num_rows();
     num_eliminate_blocks_ = problem->num_eliminate_blocks;
+    LinearSolver::Options options;
+    options.elimination_groups.push_back(num_eliminate_blocks_);
+    pmv_.reset(PartitionedMatrixViewBase::Create(
+                   options,
+                   *down_cast<BlockSparseMatrix*>(A_.get())));
   }
 
   int num_rows_;
   int num_cols_;
   int num_eliminate_blocks_;
-
   scoped_ptr<SparseMatrix> A_;
+  scoped_ptr<PartitionedMatrixViewBase> pmv_;
 };
 
 TEST_F(PartitionedMatrixViewTest, DimensionsTest) {
-  PartitionedMatrixView m(*down_cast<BlockSparseMatrix*>(A_.get()),
-                          num_eliminate_blocks_);
-  EXPECT_EQ(m.num_col_blocks_e(), num_eliminate_blocks_);
-  EXPECT_EQ(m.num_col_blocks_f(), num_cols_ - num_eliminate_blocks_);
-  EXPECT_EQ(m.num_cols_e(), num_eliminate_blocks_);
-  EXPECT_EQ(m.num_cols_f(), num_cols_ - num_eliminate_blocks_);
-  EXPECT_EQ(m.num_cols(), A_->num_cols());
-  EXPECT_EQ(m.num_rows(), A_->num_rows());
+  EXPECT_EQ(pmv_->num_col_blocks_e(), num_eliminate_blocks_);
+  EXPECT_EQ(pmv_->num_col_blocks_f(), num_cols_ - num_eliminate_blocks_);
+  EXPECT_EQ(pmv_->num_cols_e(), num_eliminate_blocks_);
+  EXPECT_EQ(pmv_->num_cols_f(), num_cols_ - num_eliminate_blocks_);
+  EXPECT_EQ(pmv_->num_cols(), A_->num_cols());
+  EXPECT_EQ(pmv_->num_rows(), A_->num_rows());
 }
 
 TEST_F(PartitionedMatrixViewTest, RightMultiplyE) {
-  PartitionedMatrixView m(*down_cast<BlockSparseMatrix*>(A_.get()),
-                          num_eliminate_blocks_);
-
-  srand(5);
-
-  Vector x1(m.num_cols_e());
-  Vector x2(m.num_cols());
+  Vector x1(pmv_->num_cols_e());
+  Vector x2(pmv_->num_cols());
   x2.setZero();
 
-  for (int i = 0; i < m.num_cols_e(); ++i) {
+  for (int i = 0; i < pmv_->num_cols_e(); ++i) {
     x1(i) = x2(i) = RandDouble();
   }
 
-  Vector y1 = Vector::Zero(m.num_rows());
-  m.RightMultiplyE(x1.data(), y1.data());
+  Vector y1 = Vector::Zero(pmv_->num_rows());
+  pmv_->RightMultiplyE(x1.data(), y1.data());
 
-  Vector y2 = Vector::Zero(m.num_rows());
+  Vector y2 = Vector::Zero(pmv_->num_rows());
   A_->RightMultiply(x2.data(), y2.data());
 
-  for (int i = 0; i < m.num_rows(); ++i) {
+  for (int i = 0; i < pmv_->num_rows(); ++i) {
     EXPECT_NEAR(y1(i), y2(i), kEpsilon);
   }
 }
 
 TEST_F(PartitionedMatrixViewTest, RightMultiplyF) {
-  PartitionedMatrixView m(*down_cast<BlockSparseMatrix*>(A_.get()),
-                          num_eliminate_blocks_);
+  Vector x1(pmv_->num_cols_f());
+  Vector x2 = Vector::Zero(pmv_->num_cols());
 
-  srand(5);
-
-  Vector x1(m.num_cols_f());
-  Vector x2 = Vector::Zero(m.num_cols());
-
-  for (int i = 0; i < m.num_cols_f(); ++i) {
+  for (int i = 0; i < pmv_->num_cols_f(); ++i) {
     x1(i) = RandDouble();
-    x2(i + m.num_cols_e()) = x1(i);
+    x2(i + pmv_->num_cols_e()) = x1(i);
   }
 
-  Vector y1 = Vector::Zero(m.num_rows());
-  m.RightMultiplyF(x1.data(), y1.data());
+  Vector y1 = Vector::Zero(pmv_->num_rows());
+  pmv_->RightMultiplyF(x1.data(), y1.data());
 
-  Vector y2 = Vector::Zero(m.num_rows());
+  Vector y2 = Vector::Zero(pmv_->num_rows());
   A_->RightMultiply(x2.data(), y2.data());
 
-  for (int i = 0; i < m.num_rows(); ++i) {
+  for (int i = 0; i < pmv_->num_rows(); ++i) {
     EXPECT_NEAR(y1(i), y2(i), kEpsilon);
   }
 }
 
 TEST_F(PartitionedMatrixViewTest, LeftMultiply) {
-  PartitionedMatrixView m(*down_cast<BlockSparseMatrix*>(A_.get()),
-                          num_eliminate_blocks_);
-
-  srand(5);
-
-  Vector x = Vector::Zero(m.num_rows());
-  for (int i = 0; i < m.num_rows(); ++i) {
+  Vector x = Vector::Zero(pmv_->num_rows());
+  for (int i = 0; i < pmv_->num_rows(); ++i) {
     x(i) = RandDouble();
   }
 
-  Vector y = Vector::Zero(m.num_cols());
-  Vector y1 = Vector::Zero(m.num_cols_e());
-  Vector y2 = Vector::Zero(m.num_cols_f());
+  Vector y = Vector::Zero(pmv_->num_cols());
+  Vector y1 = Vector::Zero(pmv_->num_cols_e());
+  Vector y2 = Vector::Zero(pmv_->num_cols_f());
 
   A_->LeftMultiply(x.data(), y.data());
-  m.LeftMultiplyE(x.data(), y1.data());
-  m.LeftMultiplyF(x.data(), y2.data());
+  pmv_->LeftMultiplyE(x.data(), y1.data());
+  pmv_->LeftMultiplyF(x.data(), y2.data());
 
-  for (int i = 0; i < m.num_cols(); ++i) {
+  for (int i = 0; i < pmv_->num_cols(); ++i) {
     EXPECT_NEAR(y(i),
-                (i < m.num_cols_e()) ? y1(i) : y2(i - m.num_cols_e()),
+                (i < pmv_->num_cols_e()) ? y1(i) : y2(i - pmv_->num_cols_e()),
                 kEpsilon);
   }
 }
 
 TEST_F(PartitionedMatrixViewTest, BlockDiagonalEtE) {
-  PartitionedMatrixView m(*down_cast<BlockSparseMatrix*>(A_.get()),
-                          num_eliminate_blocks_);
-
   scoped_ptr<BlockSparseMatrix>
-      block_diagonal_ee(m.CreateBlockDiagonalEtE());
+      block_diagonal_ee(pmv_->CreateBlockDiagonalEtE());
   const CompressedRowBlockStructure* bs  = block_diagonal_ee->block_structure();
 
   EXPECT_EQ(block_diagonal_ee->num_rows(), 2);
@@ -171,11 +157,8 @@
 }
 
 TEST_F(PartitionedMatrixViewTest, BlockDiagonalFtF) {
-  PartitionedMatrixView m(*down_cast<BlockSparseMatrix*>(A_.get()),
-                          num_eliminate_blocks_);
-
   scoped_ptr<BlockSparseMatrix>
-      block_diagonal_ff(m.CreateBlockDiagonalFtF());
+      block_diagonal_ff(pmv_->CreateBlockDiagonalFtF());
   const CompressedRowBlockStructure* bs  = block_diagonal_ff->block_structure();
 
   EXPECT_EQ(block_diagonal_ff->num_rows(), 3);
diff --git a/internal/ceres/polynomial.cc b/internal/ceres/polynomial.cc
index 3238b89..75f43de 100644
--- a/internal/ceres/polynomial.cc
+++ b/internal/ceres/polynomial.cc
@@ -37,6 +37,7 @@
 
 #include "Eigen/Dense"
 #include "ceres/internal/port.h"
+#include "ceres/stringprintf.h"
 #include "glog/logging.h"
 
 namespace ceres {
@@ -119,6 +120,63 @@
   }
   return polynomial_in.tail(polynomial_in.size() - i);
 }
+
+void FindLinearPolynomialRoots(const Vector& polynomial,
+                               Vector* real,
+                               Vector* imaginary) {
+  CHECK_EQ(polynomial.size(), 2);
+  if (real != NULL) {
+    real->resize(1);
+    (*real)(0) = -polynomial(1) / polynomial(0);
+  }
+
+  if (imaginary != NULL) {
+    imaginary->setZero(1);
+  }
+}
+
+void FindQuadraticPolynomialRoots(const Vector& polynomial,
+                                  Vector* real,
+                                  Vector* imaginary) {
+  CHECK_EQ(polynomial.size(), 3);
+  const double a = polynomial(0);
+  const double b = polynomial(1);
+  const double c = polynomial(2);
+  const double D = b * b - 4 * a * c;
+  const double sqrt_D = sqrt(fabs(D));
+  if (real != NULL) {
+    real->setZero(2);
+  }
+  if (imaginary != NULL) {
+    imaginary->setZero(2);
+  }
+
+  // Real roots.
+  if (D >= 0) {
+    if (real != NULL) {
+      // Stable quadratic roots according to BKP Horn.
+      // http://people.csail.mit.edu/bkph/articles/Quadratics.pdf
+      if (b >= 0) {
+        (*real)(0) = (-b - sqrt_D) / (2.0 * a);
+        (*real)(1) = (2.0 * c) / (-b - sqrt_D);
+      } else {
+        (*real)(0) = (2.0 * c) / (-b + sqrt_D);
+        (*real)(1) = (-b + sqrt_D) / (2.0 * a);
+      }
+    }
+    return;
+  }
+
+  // Use the normal quadratic formula for the complex case.
+  if (real != NULL) {
+    (*real)(0) = -b / (2.0 * a);
+    (*real)(1) = -b / (2.0 * a);
+  }
+  if (imaginary != NULL) {
+    (*imaginary)(0) = sqrt_D / (2.0 * a);
+    (*imaginary)(1) = -sqrt_D / (2.0 * a);
+  }
+}
 }  // namespace
 
 bool FindPolynomialRoots(const Vector& polynomial_in,
@@ -132,30 +190,40 @@
   Vector polynomial = RemoveLeadingZeros(polynomial_in);
   const int degree = polynomial.size() - 1;
 
+  VLOG(3) << "Input polynomial: " << polynomial_in.transpose();
+  if (polynomial.size() != polynomial_in.size()) {
+    VLOG(3) << "Trimmed polynomial: " << polynomial.transpose();
+  }
+
   // Is the polynomial constant?
   if (degree == 0) {
     LOG(WARNING) << "Trying to extract roots from a constant "
                  << "polynomial in FindPolynomialRoots";
+    // We return true with no roots, not false, as if the polynomial is constant
+    // it is correct that there are no roots. It is not the case that they were
+    // there, but that we have failed to extract them.
     return true;
   }
 
+  // Linear
+  if (degree == 1) {
+    FindLinearPolynomialRoots(polynomial, real, imaginary);
+    return true;
+  }
+
+  // Quadratic
+  if (degree == 2) {
+    FindQuadraticPolynomialRoots(polynomial, real, imaginary);
+    return true;
+  }
+
+  // The degree is now known to be at least 3. For cubic or higher
+  // roots we use the method of companion matrices.
+
   // Divide by leading term
   const double leading_term = polynomial(0);
   polynomial /= leading_term;
 
-  // Separately handle linear polynomials.
-  if (degree == 1) {
-    if (real != NULL) {
-      real->resize(1);
-      (*real)(0) = -polynomial(1);
-    }
-    if (imaginary != NULL) {
-      imaginary->resize(1);
-      imaginary->setZero();
-    }
-  }
-
-  // The degree is now known to be at least 2.
   // Build and balance the companion matrix to the polynomial.
   Matrix companion_matrix(degree, degree);
   BuildCompanionMatrix(polynomial, &companion_matrix);
@@ -255,6 +323,12 @@
   }
 }
 
+string FunctionSample::ToDebugString() const {
+  return StringPrintf("[x: %.8e, value: %.8e, gradient: %.8e, "
+                      "value_is_valid: %d, gradient_is_valid: %d]",
+                      x, value, gradient, value_is_valid, gradient_is_valid);
+}
+
 Vector FindInterpolatingPolynomial(const vector<FunctionSample>& samples) {
   const int num_samples = samples.size();
   int num_constraints = 0;
@@ -268,6 +342,7 @@
   }
 
   const int degree = num_constraints - 1;
+
   Matrix lhs = Matrix::Zero(num_constraints, num_constraints);
   Vector rhs = Vector::Zero(num_constraints);
 
diff --git a/internal/ceres/polynomial.h b/internal/ceres/polynomial.h
index 42ffdcb..80ce77e 100644
--- a/internal/ceres/polynomial.h
+++ b/internal/ceres/polynomial.h
@@ -95,6 +95,7 @@
         gradient(0.0),
         gradient_is_valid(false) {
   }
+  string ToDebugString() const;
 
   double x;
   double value;      // value = f(x)
diff --git a/internal/ceres/preconditioner.cc b/internal/ceres/preconditioner.cc
index 505a47d..062347f 100644
--- a/internal/ceres/preconditioner.cc
+++ b/internal/ceres/preconditioner.cc
@@ -37,6 +37,16 @@
 Preconditioner::~Preconditioner() {
 }
 
+PreconditionerType Preconditioner::PreconditionerForZeroEBlocks(
+    PreconditionerType preconditioner_type) {
+  if (preconditioner_type == SCHUR_JACOBI ||
+      preconditioner_type == CLUSTER_JACOBI ||
+      preconditioner_type == CLUSTER_TRIDIAGONAL) {
+    return JACOBI;
+  }
+  return preconditioner_type;
+}
+
 SparseMatrixPreconditionerWrapper::SparseMatrixPreconditionerWrapper(
     const SparseMatrix* matrix)
     : matrix_(CHECK_NOTNULL(matrix)) {
diff --git a/internal/ceres/preconditioner.h b/internal/ceres/preconditioner.h
index af64e3c..e8d5994 100644
--- a/internal/ceres/preconditioner.h
+++ b/internal/ceres/preconditioner.h
@@ -36,6 +36,7 @@
 #include "ceres/compressed_row_sparse_matrix.h"
 #include "ceres/linear_operator.h"
 #include "ceres/sparse_matrix.h"
+#include "ceres/types.h"
 
 namespace ceres {
 namespace internal {
@@ -48,6 +49,7 @@
   struct Options {
     Options()
         : type(JACOBI),
+          visibility_clustering_type(CANONICAL_VIEWS),
           sparse_linear_algebra_library_type(SUITE_SPARSE),
           num_threads(1),
           row_block_size(Eigen::Dynamic),
@@ -56,7 +58,7 @@
     }
 
     PreconditionerType type;
-
+    VisibilityClusteringType visibility_clustering_type;
     SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type;
 
     // If possible, how many threads the preconditioner can use.
@@ -94,6 +96,14 @@
     int f_block_size;
   };
 
+  // If the optimization problem is such that there are no remaining
+  // e-blocks, ITERATIVE_SCHUR with a Schur type preconditioner cannot
+  // be used. This function returns JACOBI if a preconditioner for
+  // ITERATIVE_SCHUR is used. The input preconditioner_type is
+  // returned otherwise.
+  static PreconditionerType PreconditionerForZeroEBlocks(
+      PreconditionerType preconditioner_type);
+
   virtual ~Preconditioner();
 
   // Update the numerical value of the preconditioner for the linear
diff --git a/internal/ceres/problem.cc b/internal/ceres/problem.cc
index 403e96a..674694d 100644
--- a/internal/ceres/problem.cc
+++ b/internal/ceres/problem.cc
@@ -1,5 +1,5 @@
 // Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// Copyright 2013 Google Inc. All rights reserved.
 // http://code.google.com/p/ceres-solver/
 //
 // Redistribution and use in source and binary forms, with or without
@@ -178,6 +178,23 @@
   problem_impl_->SetParameterization(values, local_parameterization);
 }
 
+const LocalParameterization* Problem::GetParameterization(
+    double* values) const {
+  return problem_impl_->GetParameterization(values);
+}
+
+void Problem::SetParameterLowerBound(double* values,
+                                     int index,
+                                     double lower_bound) {
+  problem_impl_->SetParameterLowerBound(values, index, lower_bound);
+}
+
+void Problem::SetParameterUpperBound(double* values,
+                                     int index,
+                                     double upper_bound) {
+  problem_impl_->SetParameterUpperBound(values, index, upper_bound);
+}
+
 bool Problem::Evaluate(const EvaluateOptions& evaluate_options,
                        double* cost,
                        vector<double>* residuals,
@@ -214,8 +231,31 @@
   return problem_impl_->ParameterBlockLocalSize(parameter_block);
 };
 
+bool Problem::HasParameterBlock(const double* values) const {
+  return problem_impl_->HasParameterBlock(values);
+}
+
 void Problem::GetParameterBlocks(vector<double*>* parameter_blocks) const {
   problem_impl_->GetParameterBlocks(parameter_blocks);
 }
 
+void Problem::GetResidualBlocks(
+    vector<ResidualBlockId>* residual_blocks) const {
+  problem_impl_->GetResidualBlocks(residual_blocks);
+}
+
+void Problem::GetParameterBlocksForResidualBlock(
+    const ResidualBlockId residual_block,
+    vector<double*>* parameter_blocks) const {
+  problem_impl_->GetParameterBlocksForResidualBlock(residual_block,
+                                                    parameter_blocks);
+}
+
+void Problem::GetResidualBlocksForParameterBlock(
+    const double* values,
+    vector<ResidualBlockId>* residual_blocks) const {
+  problem_impl_->GetResidualBlocksForParameterBlock(values,
+                                                    residual_blocks);
+}
+
 }  // namespace ceres
diff --git a/internal/ceres/problem_impl.cc b/internal/ceres/problem_impl.cc
index 8302702..7c86efb 100644
--- a/internal/ceres/problem_impl.cc
+++ b/internal/ceres/problem_impl.cc
@@ -1,5 +1,5 @@
 // Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// Copyright 2013 Google Inc. All rights reserved.
 // http://code.google.com/p/ceres-solver/
 //
 // Redistribution and use in source and binary forms, with or without
@@ -27,7 +27,7 @@
 // POSSIBILITY OF SUCH DAMAGE.
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
-//         keir@google.com (Keir Mierle)
+//         mierle@gmail.com (Keir Mierle)
 
 #include "ceres/problem_impl.h"
 
@@ -142,7 +142,7 @@
 
   // For dynamic problems, add the list of dependent residual blocks, which is
   // empty to start.
-  if (options_.enable_fast_parameter_block_removal) {
+  if (options_.enable_fast_removal) {
     new_parameter_block->EnableResidualBlockDependencies();
   }
   parameter_block_map_[values] = new_parameter_block;
@@ -150,6 +150,26 @@
   return new_parameter_block;
 }
 
+void ProblemImpl::InternalRemoveResidualBlock(ResidualBlock* residual_block) {
+  CHECK_NOTNULL(residual_block);
+  // Perform no check on the validity of residual_block, that is handled in
+  // the public method: RemoveResidualBlock().
+
+  // If needed, remove the parameter dependencies on this residual block.
+  if (options_.enable_fast_removal) {
+    const int num_parameter_blocks_for_residual =
+        residual_block->NumParameterBlocks();
+    for (int i = 0; i < num_parameter_blocks_for_residual; ++i) {
+      residual_block->parameter_blocks()[i]
+          ->RemoveResidualBlock(residual_block);
+    }
+
+    ResidualBlockSet::iterator it = residual_block_set_.find(residual_block);
+    residual_block_set_.erase(it);
+  }
+  DeleteBlockInVector(program_->mutable_residual_blocks(), residual_block);
+}
+
 // Deletes the residual block in question, assuming there are no other
 // references to it inside the problem (e.g. by another parameter). Referenced
 // cost and loss functions are tucked away for future deletion, since it is not
@@ -224,7 +244,7 @@
            cost_function->parameter_block_sizes().size());
 
   // Check the sizes match.
-  const vector<int16>& parameter_block_sizes =
+  const vector<int32>& parameter_block_sizes =
       cost_function->parameter_block_sizes();
 
   if (!options_.disable_all_safety_checks) {
@@ -278,13 +298,18 @@
                         program_->residual_blocks_.size());
 
   // Add dependencies on the residual to the parameter blocks.
-  if (options_.enable_fast_parameter_block_removal) {
+  if (options_.enable_fast_removal) {
     for (int i = 0; i < parameter_blocks.size(); ++i) {
       parameter_block_ptrs[i]->AddResidualBlock(new_residual_block);
     }
   }
 
   program_->residual_blocks_.push_back(new_residual_block);
+
+  if (options_.enable_fast_removal) {
+    residual_block_set_.insert(new_residual_block);
+  }
+
   return new_residual_block;
 }
 
@@ -452,7 +477,11 @@
 void ProblemImpl::DeleteBlockInVector(vector<Block*>* mutable_blocks,
                                       Block* block_to_remove) {
   CHECK_EQ((*mutable_blocks)[block_to_remove->index()], block_to_remove)
-      << "You found a Ceres bug! Block: " << block_to_remove->ToString();
+      << "You found a Ceres bug! \n"
+      << "Block requested: "
+      << block_to_remove->ToString() << "\n"
+      << "Block present: "
+      << (*mutable_blocks)[block_to_remove->index()]->ToString();
 
   // Prepare the to-be-moved block for the new, lower-in-index position by
   // setting the index to the blocks final location.
@@ -471,30 +500,46 @@
 void ProblemImpl::RemoveResidualBlock(ResidualBlock* residual_block) {
   CHECK_NOTNULL(residual_block);
 
-  // If needed, remove the parameter dependencies on this residual block.
-  if (options_.enable_fast_parameter_block_removal) {
-    const int num_parameter_blocks_for_residual =
-        residual_block->NumParameterBlocks();
-    for (int i = 0; i < num_parameter_blocks_for_residual; ++i) {
-      residual_block->parameter_blocks()[i]
-          ->RemoveResidualBlock(residual_block);
-    }
+  // Verify that residual_block identifies a residual in the current problem.
+  const string residual_not_found_message =
+      StringPrintf("Residual block to remove: %p not found. This usually means "
+                   "one of three things have happened:\n"
+                   " 1) residual_block is uninitialised and points to a random "
+                   "area in memory.\n"
+                   " 2) residual_block represented a residual that was added to"
+                   " the problem, but referred to a parameter block which has "
+                   "since been removed, which removes all residuals which "
+                   "depend on that parameter block, and was thus removed.\n"
+                   " 3) residual_block referred to a residual that has already "
+                   "been removed from the problem (by the user).",
+                   residual_block);
+  if (options_.enable_fast_removal) {
+    CHECK(residual_block_set_.find(residual_block) !=
+          residual_block_set_.end())
+        << residual_not_found_message;
+  } else {
+    // Perform a full search over all current residuals.
+    CHECK(std::find(program_->residual_blocks().begin(),
+                    program_->residual_blocks().end(),
+                    residual_block) != program_->residual_blocks().end())
+        << residual_not_found_message;
   }
-  DeleteBlockInVector(program_->mutable_residual_blocks(), residual_block);
+
+  InternalRemoveResidualBlock(residual_block);
 }
 
 void ProblemImpl::RemoveParameterBlock(double* values) {
   ParameterBlock* parameter_block =
       FindParameterBlockOrDie(parameter_block_map_, values);
 
-  if (options_.enable_fast_parameter_block_removal) {
+  if (options_.enable_fast_removal) {
     // Copy the dependent residuals from the parameter block because the set of
     // dependents will change after each call to RemoveResidualBlock().
     vector<ResidualBlock*> residual_blocks_to_remove(
         parameter_block->mutable_residual_blocks()->begin(),
         parameter_block->mutable_residual_blocks()->end());
     for (int i = 0; i < residual_blocks_to_remove.size(); ++i) {
-      RemoveResidualBlock(residual_blocks_to_remove[i]);
+      InternalRemoveResidualBlock(residual_blocks_to_remove[i]);
     }
   } else {
     // Scan all the residual blocks to remove ones that depend on the parameter
@@ -506,7 +551,7 @@
       const int num_parameter_blocks = residual_block->NumParameterBlocks();
       for (int j = 0; j < num_parameter_blocks; ++j) {
         if (residual_block->parameter_blocks()[j] == parameter_block) {
-          RemoveResidualBlock(residual_block);
+          InternalRemoveResidualBlock(residual_block);
           // The parameter blocks are guaranteed unique.
           break;
         }
@@ -531,6 +576,26 @@
       ->SetParameterization(local_parameterization);
 }
 
+const LocalParameterization* ProblemImpl::GetParameterization(
+    double* values) const {
+  return FindParameterBlockOrDie(parameter_block_map_, values)
+      ->local_parameterization();
+}
+
+void ProblemImpl::SetParameterLowerBound(double* values,
+                                         int index,
+                                         double lower_bound) {
+  FindParameterBlockOrDie(parameter_block_map_, values)
+      ->SetLowerBound(index, lower_bound);
+}
+
+void ProblemImpl::SetParameterUpperBound(double* values,
+                                         int index,
+                                         double upper_bound) {
+  FindParameterBlockOrDie(parameter_block_map_, values)
+      ->SetUpperBound(index, upper_bound);
+}
+
 bool ProblemImpl::Evaluate(const Problem::EvaluateOptions& evaluate_options,
                            double* cost,
                            vector<double>* residuals,
@@ -634,6 +699,9 @@
     for (int i = 0; i < variable_parameter_blocks.size(); ++i) {
       variable_parameter_blocks[i]->SetVarying();
     }
+
+    program_->SetParameterBlockStatePtrsToUserStatePtrs();
+    program_->SetParameterOffsetsAndIndex();
     return false;
   }
 
@@ -692,6 +760,8 @@
     }
   }
 
+  program_->SetParameterBlockStatePtrsToUserStatePtrs();
+  program_->SetParameterOffsetsAndIndex();
   return status;
 }
 
@@ -721,6 +791,11 @@
       parameter_block_map_, const_cast<double*>(parameter_block))->LocalSize();
 };
 
+bool ProblemImpl::HasParameterBlock(const double* parameter_block) const {
+  return (parameter_block_map_.find(const_cast<double*>(parameter_block)) !=
+          parameter_block_map_.end());
+}
+
 void ProblemImpl::GetParameterBlocks(vector<double*>* parameter_blocks) const {
   CHECK_NOTNULL(parameter_blocks);
   parameter_blocks->resize(0);
@@ -731,6 +806,57 @@
   }
 }
 
+void ProblemImpl::GetResidualBlocks(
+    vector<ResidualBlockId>* residual_blocks) const {
+  CHECK_NOTNULL(residual_blocks);
+  *residual_blocks = program().residual_blocks();
+}
+
+void ProblemImpl::GetParameterBlocksForResidualBlock(
+    const ResidualBlockId residual_block,
+    vector<double*>* parameter_blocks) const {
+  int num_parameter_blocks = residual_block->NumParameterBlocks();
+  CHECK_NOTNULL(parameter_blocks)->resize(num_parameter_blocks);
+  for (int i = 0; i < num_parameter_blocks; ++i) {
+    (*parameter_blocks)[i] =
+        residual_block->parameter_blocks()[i]->mutable_user_state();
+  }
+}
+
+void ProblemImpl::GetResidualBlocksForParameterBlock(
+    const double* values,
+    vector<ResidualBlockId>* residual_blocks) const {
+  ParameterBlock* parameter_block =
+      FindParameterBlockOrDie(parameter_block_map_,
+                              const_cast<double*>(values));
+
+  if (options_.enable_fast_removal) {
+    // In this case the residual blocks that depend on the parameter block are
+    // stored in the parameter block already, so just copy them out.
+    CHECK_NOTNULL(residual_blocks)->resize(
+        parameter_block->mutable_residual_blocks()->size());
+    std::copy(parameter_block->mutable_residual_blocks()->begin(),
+              parameter_block->mutable_residual_blocks()->end(),
+              residual_blocks->begin());
+    return;
+  }
+
+  // Find residual blocks that depend on the parameter block.
+  CHECK_NOTNULL(residual_blocks)->clear();
+  const int num_residual_blocks = NumResidualBlocks();
+  for (int i = 0; i < num_residual_blocks; ++i) {
+    ResidualBlock* residual_block =
+        (*(program_->mutable_residual_blocks()))[i];
+    const int num_parameter_blocks = residual_block->NumParameterBlocks();
+    for (int j = 0; j < num_parameter_blocks; ++j) {
+      if (residual_block->parameter_blocks()[j] == parameter_block) {
+        residual_blocks->push_back(residual_block);
+        // The parameter blocks are guaranteed unique.
+        break;
+      }
+    }
+  }
+}
 
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/problem_impl.h b/internal/ceres/problem_impl.h
index ace27f5..7b5547b 100644
--- a/internal/ceres/problem_impl.h
+++ b/internal/ceres/problem_impl.h
@@ -45,6 +45,7 @@
 #include "ceres/internal/macros.h"
 #include "ceres/internal/port.h"
 #include "ceres/internal/scoped_ptr.h"
+#include "ceres/collections_port.h"
 #include "ceres/problem.h"
 #include "ceres/types.h"
 
@@ -63,6 +64,7 @@
 class ProblemImpl {
  public:
   typedef map<double*, ParameterBlock*> ParameterMap;
+  typedef HashSet<ResidualBlock*> ResidualBlockSet;
 
   ProblemImpl();
   explicit ProblemImpl(const Problem::Options& options);
@@ -127,6 +129,10 @@
   void SetParameterBlockVariable(double* values);
   void SetParameterization(double* values,
                            LocalParameterization* local_parameterization);
+  const LocalParameterization* GetParameterization(double* values) const;
+
+  void SetParameterLowerBound(double* values, int index, double lower_bound);
+  void SetParameterUpperBound(double* values, int index, double upper_bound);
 
   bool Evaluate(const Problem::EvaluateOptions& options,
                 double* cost,
@@ -141,15 +147,33 @@
 
   int ParameterBlockSize(const double* parameter_block) const;
   int ParameterBlockLocalSize(const double* parameter_block) const;
+
+  bool HasParameterBlock(const double* parameter_block) const;
+
   void GetParameterBlocks(vector<double*>* parameter_blocks) const;
+  void GetResidualBlocks(vector<ResidualBlockId>* residual_blocks) const;
+
+  void GetParameterBlocksForResidualBlock(
+      const ResidualBlockId residual_block,
+      vector<double*>* parameter_blocks) const;
+
+  void GetResidualBlocksForParameterBlock(
+      const double* values,
+      vector<ResidualBlockId>* residual_blocks) const;
 
   const Program& program() const { return *program_; }
   Program* mutable_program() { return program_.get(); }
 
   const ParameterMap& parameter_map() const { return parameter_block_map_; }
+  const ResidualBlockSet& residual_block_set() const {
+    CHECK(options_.enable_fast_removal)
+        << "Fast removal not enabled, residual_block_set is not maintained.";
+    return residual_block_set_;
+  }
 
  private:
   ParameterBlock* InternalAddParameterBlock(double* values, int size);
+  void InternalRemoveResidualBlock(ResidualBlock* residual_block);
 
   bool InternalEvaluate(Program* program,
                         double* cost,
@@ -171,6 +195,9 @@
   // The mapping from user pointers to parameter blocks.
   map<double*, ParameterBlock*> parameter_block_map_;
 
+  // Iff enable_fast_removal is enabled, contains the current residual blocks.
+  ResidualBlockSet residual_block_set_;
+
   // The actual parameter and residual blocks.
   internal::scoped_ptr<internal::Program> program_;
 
diff --git a/internal/ceres/problem_test.cc b/internal/ceres/problem_test.cc
index 0944d3f..db082ec 100644
--- a/internal/ceres/problem_test.cc
+++ b/internal/ceres/problem_test.cc
@@ -1,5 +1,5 @@
 // Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// Copyright 2013 Google Inc. All rights reserved.
 // http://code.google.com/p/ceres-solver/
 //
 // Redistribution and use in source and binary forms, with or without
@@ -56,7 +56,7 @@
 // Trivial cost function that accepts a single argument.
 class UnaryCostFunction : public CostFunction {
  public:
-  UnaryCostFunction(int num_residuals, int16 parameter_block_size) {
+  UnaryCostFunction(int num_residuals, int32 parameter_block_size) {
     set_num_residuals(num_residuals);
     mutable_parameter_block_sizes()->push_back(parameter_block_size);
   }
@@ -76,8 +76,8 @@
 class BinaryCostFunction: public CostFunction {
  public:
   BinaryCostFunction(int num_residuals,
-                     int16 parameter_block1_size,
-                     int16 parameter_block2_size) {
+                     int32 parameter_block1_size,
+                     int32 parameter_block2_size) {
     set_num_residuals(num_residuals);
     mutable_parameter_block_sizes()->push_back(parameter_block1_size);
     mutable_parameter_block_sizes()->push_back(parameter_block2_size);
@@ -97,9 +97,9 @@
 class TernaryCostFunction: public CostFunction {
  public:
   TernaryCostFunction(int num_residuals,
-                      int16 parameter_block1_size,
-                      int16 parameter_block2_size,
-                      int16 parameter_block3_size) {
+                      int32 parameter_block1_size,
+                      int32 parameter_block2_size,
+                      int32 parameter_block3_size) {
     set_num_residuals(num_residuals);
     mutable_parameter_block_sizes()->push_back(parameter_block1_size);
     mutable_parameter_block_sizes()->push_back(parameter_block2_size);
@@ -139,7 +139,7 @@
   // UnaryCostFunction takes only one parameter, but two are passed.
   EXPECT_DEATH_IF_SUPPORTED(
       problem.AddResidualBlock(new UnaryCostFunction(2, 3), NULL, x, y),
-      "parameter_blocks.size()");
+      "parameter_blocks.size");
 }
 
 TEST(Problem, AddResidualWithDifferentSizesOnTheSameVariableDies) {
@@ -378,7 +378,7 @@
 struct DynamicProblem : public ::testing::TestWithParam<bool> {
   DynamicProblem() {
     Problem::Options options;
-    options.enable_fast_parameter_block_removal = GetParam();
+    options.enable_fast_removal = GetParam();
     problem.reset(new ProblemImpl(options));
   }
 
@@ -390,9 +390,26 @@
   }
 
   bool HasResidualBlock(ResidualBlock* residual_block) {
-    return find(problem->program().residual_blocks().begin(),
-                problem->program().residual_blocks().end(),
-                residual_block) != problem->program().residual_blocks().end();
+    bool have_residual_block = true;
+    if (GetParam()) {
+      have_residual_block &=
+          (problem->residual_block_set().find(residual_block) !=
+           problem->residual_block_set().end());
+    }
+    have_residual_block &=
+        find(problem->program().residual_blocks().begin(),
+             problem->program().residual_blocks().end(),
+             residual_block) != problem->program().residual_blocks().end();
+    return have_residual_block;
+  }
+
+  int NumResidualBlocks() {
+    // Verify that the hash set of residuals is maintained consistently.
+    if (GetParam()) {
+      EXPECT_EQ(problem->residual_block_set().size(),
+                problem->NumResidualBlocks());
+    }
+    return problem->NumResidualBlocks();
   }
 
   // The next block of functions until the end are only for testing the
@@ -502,6 +519,20 @@
       problem.RemoveParameterBlock(y), "Parameter block not found:");
 }
 
+TEST(Problem, GetParameterization) {
+  double x[3];
+  double y[2];
+
+  Problem problem;
+  problem.AddParameterBlock(x, 3);
+  problem.AddParameterBlock(y, 2);
+
+  LocalParameterization* parameterization =  new IdentityParameterization(3);
+  problem.SetParameterization(x, parameterization);
+  EXPECT_EQ(problem.GetParameterization(x), parameterization);
+  EXPECT_TRUE(problem.GetParameterization(y) == NULL);
+}
+
 TEST(Problem, ParameterBlockQueryTest) {
   double x[3];
   double y[4];
@@ -525,7 +556,9 @@
   EXPECT_TRUE(parameter_blocks[0] == x || parameter_blocks[0] == y);
   EXPECT_TRUE(parameter_blocks[1] == x || parameter_blocks[1] == y);
 
+  EXPECT_TRUE(problem.HasParameterBlock(x));
   problem.RemoveParameterBlock(x);
+  EXPECT_FALSE(problem.HasParameterBlock(x));
   problem.GetParameterBlocks(&parameter_blocks);
   EXPECT_EQ(parameter_blocks.size(), 1);
   EXPECT_TRUE(parameter_blocks[0] == y);
@@ -536,7 +569,7 @@
   problem->AddParameterBlock(z, 5);
   problem->AddParameterBlock(w, 3);
   ASSERT_EQ(3, problem->NumParameterBlocks());
-  ASSERT_EQ(0, problem->NumResidualBlocks());
+  ASSERT_EQ(0, NumResidualBlocks());
   EXPECT_EQ(y, GetParameterBlock(0)->user_state());
   EXPECT_EQ(z, GetParameterBlock(1)->user_state());
   EXPECT_EQ(w, GetParameterBlock(2)->user_state());
@@ -545,12 +578,12 @@
   // removing it.
   problem->RemoveParameterBlock(w);
   ASSERT_EQ(2, problem->NumParameterBlocks());
-  ASSERT_EQ(0, problem->NumResidualBlocks());
+  ASSERT_EQ(0, NumResidualBlocks());
   EXPECT_EQ(y, GetParameterBlock(0)->user_state());
   EXPECT_EQ(z, GetParameterBlock(1)->user_state());
   problem->AddParameterBlock(w, 3);
   ASSERT_EQ(3, problem->NumParameterBlocks());
-  ASSERT_EQ(0, problem->NumResidualBlocks());
+  ASSERT_EQ(0, NumResidualBlocks());
   EXPECT_EQ(y, GetParameterBlock(0)->user_state());
   EXPECT_EQ(z, GetParameterBlock(1)->user_state());
   EXPECT_EQ(w, GetParameterBlock(2)->user_state());
@@ -558,12 +591,12 @@
   // Now remove z, which is in the middle, and add it back.
   problem->RemoveParameterBlock(z);
   ASSERT_EQ(2, problem->NumParameterBlocks());
-  ASSERT_EQ(0, problem->NumResidualBlocks());
+  ASSERT_EQ(0, NumResidualBlocks());
   EXPECT_EQ(y, GetParameterBlock(0)->user_state());
   EXPECT_EQ(w, GetParameterBlock(1)->user_state());
   problem->AddParameterBlock(z, 5);
   ASSERT_EQ(3, problem->NumParameterBlocks());
-  ASSERT_EQ(0, problem->NumResidualBlocks());
+  ASSERT_EQ(0, NumResidualBlocks());
   EXPECT_EQ(y, GetParameterBlock(0)->user_state());
   EXPECT_EQ(w, GetParameterBlock(1)->user_state());
   EXPECT_EQ(z, GetParameterBlock(2)->user_state());
@@ -572,20 +605,20 @@
   // y
   problem->RemoveParameterBlock(y);
   ASSERT_EQ(2, problem->NumParameterBlocks());
-  ASSERT_EQ(0, problem->NumResidualBlocks());
+  ASSERT_EQ(0, NumResidualBlocks());
   EXPECT_EQ(z, GetParameterBlock(0)->user_state());
   EXPECT_EQ(w, GetParameterBlock(1)->user_state());
 
   // z
   problem->RemoveParameterBlock(z);
   ASSERT_EQ(1, problem->NumParameterBlocks());
-  ASSERT_EQ(0, problem->NumResidualBlocks());
+  ASSERT_EQ(0, NumResidualBlocks());
   EXPECT_EQ(w, GetParameterBlock(0)->user_state());
 
   // w
   problem->RemoveParameterBlock(w);
   EXPECT_EQ(0, problem->NumParameterBlocks());
-  EXPECT_EQ(0, problem->NumResidualBlocks());
+  EXPECT_EQ(0, NumResidualBlocks());
 }
 
 TEST_P(DynamicProblem, RemoveParameterBlockWithResiduals) {
@@ -593,7 +626,7 @@
   problem->AddParameterBlock(z, 5);
   problem->AddParameterBlock(w, 3);
   ASSERT_EQ(3, problem->NumParameterBlocks());
-  ASSERT_EQ(0, problem->NumResidualBlocks());
+  ASSERT_EQ(0, NumResidualBlocks());
   EXPECT_EQ(y, GetParameterBlock(0)->user_state());
   EXPECT_EQ(z, GetParameterBlock(1)->user_state());
   EXPECT_EQ(w, GetParameterBlock(2)->user_state());
@@ -616,12 +649,12 @@
   ResidualBlock* r_w   = problem->AddResidualBlock(cost_w,   NULL, w);
 
   EXPECT_EQ(3, problem->NumParameterBlocks());
-  EXPECT_EQ(7, problem->NumResidualBlocks());
+  EXPECT_EQ(7, NumResidualBlocks());
 
   // Remove w, which should remove r_yzw, r_yw, r_zw, r_w.
   problem->RemoveParameterBlock(w);
   ASSERT_EQ(2, problem->NumParameterBlocks());
-  ASSERT_EQ(3, problem->NumResidualBlocks());
+  ASSERT_EQ(3, NumResidualBlocks());
 
   ASSERT_FALSE(HasResidualBlock(r_yzw));
   ASSERT_TRUE (HasResidualBlock(r_yz ));
@@ -634,7 +667,7 @@
   // Remove z, which will remove almost everything else.
   problem->RemoveParameterBlock(z);
   ASSERT_EQ(1, problem->NumParameterBlocks());
-  ASSERT_EQ(1, problem->NumResidualBlocks());
+  ASSERT_EQ(1, NumResidualBlocks());
 
   ASSERT_FALSE(HasResidualBlock(r_yzw));
   ASSERT_FALSE(HasResidualBlock(r_yz ));
@@ -647,7 +680,7 @@
   // Remove y; all gone.
   problem->RemoveParameterBlock(y);
   EXPECT_EQ(0, problem->NumParameterBlocks());
-  EXPECT_EQ(0, problem->NumResidualBlocks());
+  EXPECT_EQ(0, NumResidualBlocks());
 }
 
 TEST_P(DynamicProblem, RemoveResidualBlock) {
@@ -685,14 +718,14 @@
     EXPECT_TRUE(GetParameterBlock(2)->mutable_residual_blocks() == NULL);
   }
   EXPECT_EQ(3, problem->NumParameterBlocks());
-  EXPECT_EQ(7, problem->NumResidualBlocks());
+  EXPECT_EQ(7, NumResidualBlocks());
 
   // Remove each residual and check the state after each removal.
 
   // Remove r_yzw.
   problem->RemoveResidualBlock(r_yzw);
   ASSERT_EQ(3, problem->NumParameterBlocks());
-  ASSERT_EQ(6, problem->NumResidualBlocks());
+  ASSERT_EQ(6, NumResidualBlocks());
   if (GetParam()) {
     ExpectParameterBlockContains(y, r_yz, r_yw, r_y);
     ExpectParameterBlockContains(z, r_yz, r_zw, r_z);
@@ -708,7 +741,7 @@
   // Remove r_yw.
   problem->RemoveResidualBlock(r_yw);
   ASSERT_EQ(3, problem->NumParameterBlocks());
-  ASSERT_EQ(5, problem->NumResidualBlocks());
+  ASSERT_EQ(5, NumResidualBlocks());
   if (GetParam()) {
     ExpectParameterBlockContains(y, r_yz, r_y);
     ExpectParameterBlockContains(z, r_yz, r_zw, r_z);
@@ -723,7 +756,7 @@
   // Remove r_zw.
   problem->RemoveResidualBlock(r_zw);
   ASSERT_EQ(3, problem->NumParameterBlocks());
-  ASSERT_EQ(4, problem->NumResidualBlocks());
+  ASSERT_EQ(4, NumResidualBlocks());
   if (GetParam()) {
     ExpectParameterBlockContains(y, r_yz, r_y);
     ExpectParameterBlockContains(z, r_yz, r_z);
@@ -737,7 +770,7 @@
   // Remove r_w.
   problem->RemoveResidualBlock(r_w);
   ASSERT_EQ(3, problem->NumParameterBlocks());
-  ASSERT_EQ(3, problem->NumResidualBlocks());
+  ASSERT_EQ(3, NumResidualBlocks());
   if (GetParam()) {
     ExpectParameterBlockContains(y, r_yz, r_y);
     ExpectParameterBlockContains(z, r_yz, r_z);
@@ -750,7 +783,7 @@
   // Remove r_yz.
   problem->RemoveResidualBlock(r_yz);
   ASSERT_EQ(3, problem->NumParameterBlocks());
-  ASSERT_EQ(2, problem->NumResidualBlocks());
+  ASSERT_EQ(2, NumResidualBlocks());
   if (GetParam()) {
     ExpectParameterBlockContains(y, r_y);
     ExpectParameterBlockContains(z, r_z);
@@ -763,7 +796,7 @@
   problem->RemoveResidualBlock(r_z);
   problem->RemoveResidualBlock(r_y);
   ASSERT_EQ(3, problem->NumParameterBlocks());
-  ASSERT_EQ(0, problem->NumResidualBlocks());
+  ASSERT_EQ(0, NumResidualBlocks());
   if (GetParam()) {
     ExpectParameterBlockContains(y);
     ExpectParameterBlockContains(z);
@@ -771,6 +804,191 @@
   }
 }
 
+TEST_P(DynamicProblem, RemoveInvalidResidualBlockDies) {
+  problem->AddParameterBlock(y, 4);
+  problem->AddParameterBlock(z, 5);
+  problem->AddParameterBlock(w, 3);
+
+  // Add all combinations of cost functions.
+  CostFunction* cost_yzw = new TernaryCostFunction(1, 4, 5, 3);
+  CostFunction* cost_yz  = new BinaryCostFunction (1, 4, 5);
+  CostFunction* cost_yw  = new BinaryCostFunction (1, 4, 3);
+  CostFunction* cost_zw  = new BinaryCostFunction (1, 5, 3);
+  CostFunction* cost_y   = new UnaryCostFunction  (1, 4);
+  CostFunction* cost_z   = new UnaryCostFunction  (1, 5);
+  CostFunction* cost_w   = new UnaryCostFunction  (1, 3);
+
+  ResidualBlock* r_yzw = problem->AddResidualBlock(cost_yzw, NULL, y, z, w);
+  ResidualBlock* r_yz  = problem->AddResidualBlock(cost_yz,  NULL, y, z);
+  ResidualBlock* r_yw  = problem->AddResidualBlock(cost_yw,  NULL, y, w);
+  ResidualBlock* r_zw  = problem->AddResidualBlock(cost_zw,  NULL, z, w);
+  ResidualBlock* r_y   = problem->AddResidualBlock(cost_y,   NULL, y);
+  ResidualBlock* r_z   = problem->AddResidualBlock(cost_z,   NULL, z);
+  ResidualBlock* r_w   = problem->AddResidualBlock(cost_w,   NULL, w);
+
+  // Remove r_yzw.
+  problem->RemoveResidualBlock(r_yzw);
+  ASSERT_EQ(3, problem->NumParameterBlocks());
+  ASSERT_EQ(6, NumResidualBlocks());
+  // Attempt to remove r_yzw again.
+  EXPECT_DEATH_IF_SUPPORTED(problem->RemoveResidualBlock(r_yzw), "not found");
+
+  // Attempt to remove a cast pointer never added as a residual.
+  int trash_memory = 1234;
+  ResidualBlock* invalid_residual =
+      reinterpret_cast<ResidualBlock*>(&trash_memory);
+  EXPECT_DEATH_IF_SUPPORTED(problem->RemoveResidualBlock(invalid_residual),
+                            "not found");
+
+  // Remove a parameter block, which in turn removes the dependent residuals
+  // then attempt to remove them directly.
+  problem->RemoveParameterBlock(z);
+  ASSERT_EQ(2, problem->NumParameterBlocks());
+  ASSERT_EQ(3, NumResidualBlocks());
+  EXPECT_DEATH_IF_SUPPORTED(problem->RemoveResidualBlock(r_yz), "not found");
+  EXPECT_DEATH_IF_SUPPORTED(problem->RemoveResidualBlock(r_zw), "not found");
+  EXPECT_DEATH_IF_SUPPORTED(problem->RemoveResidualBlock(r_z), "not found");
+
+  problem->RemoveResidualBlock(r_yw);
+  problem->RemoveResidualBlock(r_w);
+  problem->RemoveResidualBlock(r_y);
+}
+
+// Check that a null-terminated array, a, has the same elements as b.
+template<typename T>
+void ExpectVectorContainsUnordered(const T* a, const vector<T>& b) {
+  // Compute the size of a.
+  int size = 0;
+  while (a[size]) {
+    ++size;
+  }
+  ASSERT_EQ(size, b.size());
+
+  // Sort a.
+  vector<T> a_sorted(size);
+  copy(a, a + size, a_sorted.begin());
+  sort(a_sorted.begin(), a_sorted.end());
+
+  // Sort b.
+  vector<T> b_sorted(b);
+  sort(b_sorted.begin(), b_sorted.end());
+
+  // Compare.
+  for (int i = 0; i < size; ++i) {
+    EXPECT_EQ(a_sorted[i], b_sorted[i]);
+  }
+}
+
+void ExpectProblemHasResidualBlocks(
+    const ProblemImpl &problem,
+    const ResidualBlockId *expected_residual_blocks) {
+  vector<ResidualBlockId> residual_blocks;
+  problem.GetResidualBlocks(&residual_blocks);
+  ExpectVectorContainsUnordered(expected_residual_blocks, residual_blocks);
+}
+
+TEST_P(DynamicProblem, GetXXXBlocksForYYYBlock) {
+  problem->AddParameterBlock(y, 4);
+  problem->AddParameterBlock(z, 5);
+  problem->AddParameterBlock(w, 3);
+
+  // Add all combinations of cost functions.
+  CostFunction* cost_yzw = new TernaryCostFunction(1, 4, 5, 3);
+  CostFunction* cost_yz  = new BinaryCostFunction (1, 4, 5);
+  CostFunction* cost_yw  = new BinaryCostFunction (1, 4, 3);
+  CostFunction* cost_zw  = new BinaryCostFunction (1, 5, 3);
+  CostFunction* cost_y   = new UnaryCostFunction  (1, 4);
+  CostFunction* cost_z   = new UnaryCostFunction  (1, 5);
+  CostFunction* cost_w   = new UnaryCostFunction  (1, 3);
+
+  ResidualBlock* r_yzw = problem->AddResidualBlock(cost_yzw, NULL, y, z, w);
+  {
+    ResidualBlockId expected_residuals[] = {r_yzw, 0};
+    ExpectProblemHasResidualBlocks(*problem, expected_residuals);
+  }
+  ResidualBlock* r_yz  = problem->AddResidualBlock(cost_yz,  NULL, y, z);
+  {
+    ResidualBlockId expected_residuals[] = {r_yzw, r_yz, 0};
+    ExpectProblemHasResidualBlocks(*problem, expected_residuals);
+  }
+  ResidualBlock* r_yw  = problem->AddResidualBlock(cost_yw,  NULL, y, w);
+  {
+    ResidualBlock *expected_residuals[] = {r_yzw, r_yz, r_yw, 0};
+    ExpectProblemHasResidualBlocks(*problem, expected_residuals);
+  }
+  ResidualBlock* r_zw  = problem->AddResidualBlock(cost_zw,  NULL, z, w);
+  {
+    ResidualBlock *expected_residuals[] = {r_yzw, r_yz, r_yw, r_zw, 0};
+    ExpectProblemHasResidualBlocks(*problem, expected_residuals);
+  }
+  ResidualBlock* r_y   = problem->AddResidualBlock(cost_y,   NULL, y);
+  {
+    ResidualBlock *expected_residuals[] = {r_yzw, r_yz, r_yw, r_zw, r_y, 0};
+    ExpectProblemHasResidualBlocks(*problem, expected_residuals);
+  }
+  ResidualBlock* r_z   = problem->AddResidualBlock(cost_z,   NULL, z);
+  {
+    ResidualBlock *expected_residuals[] = {
+      r_yzw, r_yz, r_yw, r_zw, r_y, r_z, 0
+    };
+    ExpectProblemHasResidualBlocks(*problem, expected_residuals);
+  }
+  ResidualBlock* r_w   = problem->AddResidualBlock(cost_w,   NULL, w);
+  {
+    ResidualBlock *expected_residuals[] = {
+      r_yzw, r_yz, r_yw, r_zw, r_y, r_z, r_w, 0
+    };
+    ExpectProblemHasResidualBlocks(*problem, expected_residuals);
+  }
+
+  vector<double*> parameter_blocks;
+  vector<ResidualBlockId> residual_blocks;
+
+  // Check GetResidualBlocksForParameterBlock() for all parameter blocks.
+  struct GetResidualBlocksForParameterBlockTestCase {
+    double* parameter_block;
+    ResidualBlockId expected_residual_blocks[10];
+  };
+  GetResidualBlocksForParameterBlockTestCase get_residual_blocks_cases[] = {
+    { y, { r_yzw, r_yz, r_yw, r_y, NULL} },
+    { z, { r_yzw, r_yz, r_zw, r_z, NULL} },
+    { w, { r_yzw, r_yw, r_zw, r_w, NULL} },
+    { NULL }
+  };
+  for (int i = 0; get_residual_blocks_cases[i].parameter_block; ++i) {
+    problem->GetResidualBlocksForParameterBlock(
+        get_residual_blocks_cases[i].parameter_block,
+        &residual_blocks);
+    ExpectVectorContainsUnordered(
+        get_residual_blocks_cases[i].expected_residual_blocks,
+        residual_blocks);
+  }
+
+  // Check GetParameterBlocksForResidualBlock() for all residual blocks.
+  struct GetParameterBlocksForResidualBlockTestCase {
+    ResidualBlockId residual_block;
+    double* expected_parameter_blocks[10];
+  };
+  GetParameterBlocksForResidualBlockTestCase get_parameter_blocks_cases[] = {
+    { r_yzw, { y, z, w, NULL } },
+    { r_yz , { y, z, NULL } },
+    { r_yw , { y, w, NULL } },
+    { r_zw , { z, w, NULL } },
+    { r_y  , { y, NULL } },
+    { r_z  , { z, NULL } },
+    { r_w  , { w, NULL } },
+    { NULL }
+  };
+  for (int i = 0; get_parameter_blocks_cases[i].residual_block; ++i) {
+    problem->GetParameterBlocksForResidualBlock(
+        get_parameter_blocks_cases[i].residual_block,
+        &parameter_blocks);
+    ExpectVectorContainsUnordered(
+        get_parameter_blocks_cases[i].expected_parameter_blocks,
+        parameter_blocks);
+  }
+}
+
 INSTANTIATE_TEST_CASE_P(OptionsInstantiation,
                         DynamicProblem,
                         ::testing::Values(true, false));
@@ -862,7 +1080,9 @@
                                   parameters_));
   }
 
-
+  void TearDown() {
+    EXPECT_TRUE(problem_.program().IsValid());
+  }
 
   void EvaluateAndCompare(const Problem::EvaluateOptions& options,
                           const int expected_num_rows,
diff --git a/internal/ceres/program.cc b/internal/ceres/program.cc
index 82d76d3..1d0a157 100644
--- a/internal/ceres/program.cc
+++ b/internal/ceres/program.cc
@@ -32,6 +32,7 @@
 
 #include <map>
 #include <vector>
+#include "ceres/array_utils.h"
 #include "ceres/casts.h"
 #include "ceres/compressed_row_sparse_matrix.h"
 #include "ceres/cost_function.h"
@@ -44,6 +45,7 @@
 #include "ceres/problem.h"
 #include "ceres/residual_block.h"
 #include "ceres/stl_util.h"
+#include "ceres/triplet_sparse_matrix.h"
 
 namespace ceres {
 namespace internal {
@@ -140,6 +142,289 @@
   }
 }
 
+bool Program::IsValid() const {
+  for (int i = 0; i < residual_blocks_.size(); ++i) {
+    const ResidualBlock* residual_block = residual_blocks_[i];
+    if (residual_block->index() != i) {
+      LOG(WARNING) << "Residual block: " << i
+                   << " has incorrect index: " << residual_block->index();
+      return false;
+    }
+  }
+
+  int state_offset = 0;
+  int delta_offset = 0;
+  for (int i = 0; i < parameter_blocks_.size(); ++i) {
+    const ParameterBlock* parameter_block = parameter_blocks_[i];
+    if (parameter_block->index() != i ||
+        parameter_block->state_offset() != state_offset ||
+        parameter_block->delta_offset() != delta_offset) {
+      LOG(WARNING) << "Parameter block: " << i
+                   << "has incorrect indexing information: "
+                   << parameter_block->ToString();
+      return false;
+    }
+
+    state_offset += parameter_blocks_[i]->Size();
+    delta_offset += parameter_blocks_[i]->LocalSize();
+  }
+
+  return true;
+}
+
+bool Program::ParameterBlocksAreFinite(string* message) const {
+  CHECK_NOTNULL(message);
+  for (int i = 0; i < parameter_blocks_.size(); ++i) {
+    const ParameterBlock* parameter_block = parameter_blocks_[i];
+    const double* array = parameter_block->user_state();
+    const int size = parameter_block->Size();
+    const int invalid_index = FindInvalidValue(size, array);
+    if (invalid_index != size) {
+      *message = StringPrintf(
+          "ParameterBlock: %p with size %d has at least one invalid value.\n"
+          "First invalid value is at index: %d.\n"
+          "Parameter block values: ",
+          array, size, invalid_index);
+      AppendArrayToString(size, array, message);
+      return false;
+    }
+  }
+  return true;
+}
+
+bool Program::IsBoundsConstrained() const {
+  for (int i = 0; i < parameter_blocks_.size(); ++i) {
+    const ParameterBlock* parameter_block = parameter_blocks_[i];
+    if (parameter_block->IsConstant()) {
+      continue;
+    }
+    const int size = parameter_block->Size();
+    for (int j = 0; j < size; ++j) {
+      const double lower_bound = parameter_block->LowerBoundForParameter(j);
+      const double upper_bound = parameter_block->UpperBoundForParameter(j);
+      if (lower_bound > -std::numeric_limits<double>::max() ||
+          upper_bound < std::numeric_limits<double>::max()) {
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
+bool Program::IsFeasible(string* message) const {
+  CHECK_NOTNULL(message);
+  for (int i = 0; i < parameter_blocks_.size(); ++i) {
+    const ParameterBlock* parameter_block = parameter_blocks_[i];
+    const double* parameters = parameter_block->user_state();
+    const int size = parameter_block->Size();
+    if (parameter_block->IsConstant()) {
+      // Constant parameter blocks must start in the feasible region
+      // to ultimately produce a feasible solution, since Ceres cannot
+      // change them.
+      for (int j = 0; j < size; ++j) {
+        const double lower_bound = parameter_block->LowerBoundForParameter(j);
+        const double upper_bound = parameter_block->UpperBoundForParameter(j);
+        if (parameters[j] < lower_bound || parameters[j] > upper_bound) {
+          *message = StringPrintf(
+              "ParameterBlock: %p with size %d has at least one infeasible "
+              "value."
+              "\nFirst infeasible value is at index: %d."
+              "\nLower bound: %e, value: %e, upper bound: %e"
+              "\nParameter block values: ",
+              parameters, size, j, lower_bound, parameters[j], upper_bound);
+          AppendArrayToString(size, parameters, message);
+          return false;
+        }
+      }
+    } else {
+      // Variable parameter blocks must have non-empty feasible
+      // regions, otherwise there is no way to produce a feasible
+      // solution.
+      for (int j = 0; j < size; ++j) {
+        const double lower_bound = parameter_block->LowerBoundForParameter(j);
+        const double upper_bound = parameter_block->UpperBoundForParameter(j);
+        if (lower_bound >= upper_bound) {
+          *message = StringPrintf(
+              "ParameterBlock: %p with size %d has at least one infeasible "
+              "bound."
+              "\nFirst infeasible bound is at index: %d."
+              "\nLower bound: %e, upper bound: %e"
+              "\nParameter block values: ",
+              parameters, size, j, lower_bound, upper_bound);
+          AppendArrayToString(size, parameters, message);
+          return false;
+        }
+      }
+    }
+  }
+
+  return true;
+}
+
+Program* Program::CreateReducedProgram(vector<double*>* removed_parameter_blocks,
+                                       double* fixed_cost,
+                                       string* error) const {
+  CHECK_NOTNULL(removed_parameter_blocks);
+  CHECK_NOTNULL(fixed_cost);
+  CHECK_NOTNULL(error);
+
+  scoped_ptr<Program> reduced_program(new Program(*this));
+  if (!reduced_program->RemoveFixedBlocks(removed_parameter_blocks,
+                                          fixed_cost,
+                                          error)) {
+    return NULL;
+  }
+
+  reduced_program->SetParameterOffsetsAndIndex();
+  return reduced_program.release();
+}
+
+bool Program::RemoveFixedBlocks(vector<double*>* removed_parameter_blocks,
+                                double* fixed_cost,
+                                string* error) {
+  CHECK_NOTNULL(removed_parameter_blocks);
+  CHECK_NOTNULL(fixed_cost);
+  CHECK_NOTNULL(error);
+
+  scoped_array<double> residual_block_evaluate_scratch;
+  residual_block_evaluate_scratch.reset(
+      new double[MaxScratchDoublesNeededForEvaluate()]);
+  *fixed_cost = 0.0;
+
+  // Mark all the parameters as unused. Abuse the index member of the
+  // parameter blocks for the marking.
+  for (int i = 0; i < parameter_blocks_.size(); ++i) {
+    parameter_blocks_[i]->set_index(-1);
+  }
+
+  // Filter out residual that have all-constant parameters, and mark
+  // all the parameter blocks that appear in residuals.
+  int num_active_residual_blocks = 0;
+  for (int i = 0; i < residual_blocks_.size(); ++i) {
+    ResidualBlock* residual_block = residual_blocks_[i];
+    int num_parameter_blocks = residual_block->NumParameterBlocks();
+
+    // Determine if the residual block is fixed, and also mark varying
+    // parameters that appear in the residual block.
+    bool all_constant = true;
+    for (int k = 0; k < num_parameter_blocks; k++) {
+      ParameterBlock* parameter_block = residual_block->parameter_blocks()[k];
+      if (!parameter_block->IsConstant()) {
+        all_constant = false;
+        parameter_block->set_index(1);
+      }
+    }
+
+    if (!all_constant) {
+      residual_blocks_[num_active_residual_blocks++] = residual_block;
+      continue;
+    }
+
+    // The residual is constant and will be removed, so its cost is
+    // added to the variable fixed_cost.
+    double cost = 0.0;
+    if (!residual_block->Evaluate(true,
+                                  &cost,
+                                  NULL,
+                                  NULL,
+                                  residual_block_evaluate_scratch.get())) {
+      *error = StringPrintf("Evaluation of the residual %d failed during "
+                            "removal of fixed residual blocks.", i);
+      return false;
+    }
+    *fixed_cost += cost;
+  }
+  residual_blocks_.resize(num_active_residual_blocks);
+
+  // Filter out unused or fixed parameter blocks.
+  int num_active_parameter_blocks = 0;
+  removed_parameter_blocks->clear();
+  for (int i = 0; i < parameter_blocks_.size(); ++i) {
+    ParameterBlock* parameter_block = parameter_blocks_[i];
+    if (parameter_block->index() == -1) {
+      removed_parameter_blocks->push_back(parameter_block->mutable_user_state());
+    } else {
+      parameter_blocks_[num_active_parameter_blocks++] = parameter_block;
+    }
+  }
+  parameter_blocks_.resize(num_active_parameter_blocks);
+
+  if (!(((NumResidualBlocks() == 0) &&
+         (NumParameterBlocks() == 0)) ||
+        ((NumResidualBlocks() != 0) &&
+         (NumParameterBlocks() != 0)))) {
+    *error =  "Congratulations, you found a bug in Ceres. Please report it.";
+    return false;
+  }
+
+  return true;
+}
+
+bool Program::IsParameterBlockSetIndependent(const set<double*>& independent_set) const {
+  // Loop over each residual block and ensure that no two parameter
+  // blocks in the same residual block are part of
+  // parameter_block_ptrs as that would violate the assumption that it
+  // is an independent set in the Hessian matrix.
+  for (vector<ResidualBlock*>::const_iterator it = residual_blocks_.begin();
+       it != residual_blocks_.end();
+       ++it) {
+    ParameterBlock* const* parameter_blocks = (*it)->parameter_blocks();
+    const int num_parameter_blocks = (*it)->NumParameterBlocks();
+    int count = 0;
+    for (int i = 0; i < num_parameter_blocks; ++i) {
+      count += independent_set.count(
+          parameter_blocks[i]->mutable_user_state());
+    }
+    if (count > 1) {
+      return false;
+    }
+  }
+  return true;
+}
+
+TripletSparseMatrix* Program::CreateJacobianBlockSparsityTranspose() const {
+  // Matrix to store the block sparsity structure of the Jacobian.
+  TripletSparseMatrix* tsm =
+      new TripletSparseMatrix(NumParameterBlocks(),
+                              NumResidualBlocks(),
+                              10 * NumResidualBlocks());
+  int num_nonzeros = 0;
+  int* rows = tsm->mutable_rows();
+  int* cols = tsm->mutable_cols();
+  double* values = tsm->mutable_values();
+
+  for (int c = 0; c < residual_blocks_.size(); ++c) {
+    const ResidualBlock* residual_block = residual_blocks_[c];
+    const int num_parameter_blocks = residual_block->NumParameterBlocks();
+    ParameterBlock* const* parameter_blocks =
+        residual_block->parameter_blocks();
+
+    for (int j = 0; j < num_parameter_blocks; ++j) {
+      if (parameter_blocks[j]->IsConstant()) {
+        continue;
+      }
+
+      // Re-size the matrix if needed.
+      if (num_nonzeros >= tsm->max_num_nonzeros()) {
+        tsm->set_num_nonzeros(num_nonzeros);
+        tsm->Reserve(2 * num_nonzeros);
+        rows = tsm->mutable_rows();
+        cols = tsm->mutable_cols();
+        values = tsm->mutable_values();
+      }
+
+      const int r = parameter_blocks[j]->index();
+      rows[num_nonzeros] = r;
+      cols[num_nonzeros] = c;
+      values[num_nonzeros] = 1.0;
+      ++num_nonzeros;
+    }
+  }
+
+  tsm->set_num_nonzeros(num_nonzeros);
+  return tsm;
+}
+
 int Program::NumResidualBlocks() const {
   return residual_blocks_.size();
 }
diff --git a/internal/ceres/program.h b/internal/ceres/program.h
index 5002b7e..c7b22c4 100644
--- a/internal/ceres/program.h
+++ b/internal/ceres/program.h
@@ -31,6 +31,7 @@
 #ifndef CERES_INTERNAL_PROGRAM_H_
 #define CERES_INTERNAL_PROGRAM_H_
 
+#include <set>
 #include <string>
 #include <vector>
 #include "ceres/internal/port.h"
@@ -41,6 +42,7 @@
 class ParameterBlock;
 class ProblemImpl;
 class ResidualBlock;
+class TripletSparseMatrix;
 
 // A nonlinear least squares optimization problem. This is different from the
 // similarly-named "Problem" object, which offers a mutation interface for
@@ -99,6 +101,51 @@
   // position of the parameter in the state and delta vector respectively.
   void SetParameterOffsetsAndIndex();
 
+  // Check if the internal state of the program (the indexing and the
+  // offsets) are correct.
+  bool IsValid() const;
+
+  bool ParameterBlocksAreFinite(string* message) const;
+
+  // Returns true if the program has any non-constant parameter blocks
+  // which have non-trivial bounds constraints.
+  bool IsBoundsConstrained() const;
+
+  // Returns false, if the program has any constant parameter blocks
+  // which are not feasible, or any variable parameter blocks which
+  // have a lower bound greater than or equal to the upper bound.
+  bool IsFeasible(string* message) const;
+
+  // Loop over each residual block and ensure that no two parameter
+  // blocks in the same residual block are part of
+  // parameter_blocks as that would violate the assumption that it
+  // is an independent set in the Hessian matrix.
+  bool IsParameterBlockSetIndependent(const set<double*>& independent_set) const;
+
+  // Create a TripletSparseMatrix which contains the zero-one
+  // structure corresponding to the block sparsity of the transpose of
+  // the Jacobian matrix.
+  //
+  // Caller owns the result.
+  TripletSparseMatrix* CreateJacobianBlockSparsityTranspose() const;
+
+  // Create a copy of this program and removes constant parameter
+  // blocks and residual blocks with no varying parameter blocks while
+  // preserving their relative order.
+  //
+  // removed_parameter_blocks on exit will contain the list of
+  // parameter blocks that were removed.
+  //
+  // fixed_cost will be equal to the sum of the costs of the residual
+  // blocks that were removed.
+  //
+  // If there was a problem, then the function will return a NULL
+  // pointer and error will contain a human readable description of
+  // the problem.
+  Program* CreateReducedProgram(vector<double*>* removed_parameter_blocks,
+                                double* fixed_cost,
+                                string* error) const;
+
   // See problem.h for what these do.
   int NumParameterBlocks() const;
   int NumParameters() const;
@@ -116,6 +163,21 @@
   string ToString() const;
 
  private:
+  // Remove constant parameter blocks and residual blocks with no
+  // varying parameter blocks while preserving their relative order.
+  //
+  // removed_parameter_blocks on exit will contain the list of
+  // parameter blocks that were removed.
+  //
+  // fixed_cost will be equal to the sum of the costs of the residual
+  // blocks that were removed.
+  //
+  // If there was a problem, then the function will return false and
+  // error will contain a human readable description of the problem.
+  bool RemoveFixedBlocks(vector<double*>* removed_parameter_blocks,
+                         double* fixed_cost,
+                         string* message);
+
   // The Program does not own the ParameterBlock or ResidualBlock objects.
   vector<ParameterBlock*> parameter_blocks_;
   vector<ResidualBlock*> residual_blocks_;
diff --git a/internal/ceres/program_evaluator.h b/internal/ceres/program_evaluator.h
index 8aa2a39..672c233 100644
--- a/internal/ceres/program_evaluator.h
+++ b/internal/ceres/program_evaluator.h
@@ -79,6 +79,9 @@
 #ifndef CERES_INTERNAL_PROGRAM_EVALUATOR_H_
 #define CERES_INTERNAL_PROGRAM_EVALUATOR_H_
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifdef CERES_USE_OPENMP
 #include <omp.h>
 #endif
@@ -97,7 +100,13 @@
 namespace ceres {
 namespace internal {
 
-template<typename EvaluatePreparer, typename JacobianWriter>
+struct NullJacobianFinalizer {
+  void operator()(SparseMatrix* jacobian, int num_parameters) {}
+};
+
+template<typename EvaluatePreparer,
+         typename JacobianWriter,
+         typename JacobianFinalizer = NullJacobianFinalizer>
 class ProgramEvaluator : public Evaluator {
  public:
   ProgramEvaluator(const Evaluator::Options &options, Program* program)
@@ -244,9 +253,10 @@
     }
 
     if (!abort) {
+      const int num_parameters = program_->NumEffectiveParameters();
+
       // Sum the cost and gradient (if requested) from each thread.
       (*cost) = 0.0;
-      int num_parameters = program_->NumEffectiveParameters();
       if (gradient != NULL) {
         VectorRef(gradient, num_parameters).setZero();
       }
@@ -257,6 +267,15 @@
               VectorRef(evaluate_scratch_[i].gradient.get(), num_parameters);
         }
       }
+
+      // Finalize the Jacobian if it is available.
+      // `num_parameters` is passed to the finalizer so that additional
+      // storage can be reserved for additional diagonal elements if
+      // necessary.
+      if (jacobian != NULL) {
+        JacobianFinalizer f;
+        f(jacobian, num_parameters);
+      }
     }
     return !abort;
   }
diff --git a/internal/ceres/program_test.cc b/internal/ceres/program_test.cc
new file mode 100644
index 0000000..10bfa12
--- /dev/null
+++ b/internal/ceres/program_test.cc
@@ -0,0 +1,431 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/program.h"
+
+#include <limits>
+#include <cmath>
+#include <vector>
+#include "ceres/sized_cost_function.h"
+#include "ceres/problem_impl.h"
+#include "ceres/residual_block.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+// A cost function that simply returns its argument.
+class UnaryIdentityCostFunction : public SizedCostFunction<1, 1> {
+ public:
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    residuals[0] = parameters[0][0];
+    if (jacobians != NULL && jacobians[0] != NULL) {
+      jacobians[0][0] = 1.0;
+    }
+    return true;
+  }
+};
+
+// Templated base class for the CostFunction signatures.
+template <int kNumResiduals, int N0, int N1, int N2>
+class MockCostFunctionBase : public
+SizedCostFunction<kNumResiduals, N0, N1, N2> {
+ public:
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    for (int i = 0; i < kNumResiduals; ++i) {
+      residuals[i] = kNumResiduals +  N0 + N1 + N2;
+    }
+    return true;
+  }
+};
+
+class UnaryCostFunction : public MockCostFunctionBase<2, 1, 0, 0> {};
+class BinaryCostFunction : public MockCostFunctionBase<2, 1, 1, 0> {};
+class TernaryCostFunction : public MockCostFunctionBase<2, 1, 1, 1> {};
+
+TEST(Program, RemoveFixedBlocksNothingConstant) {
+  ProblemImpl problem;
+  double x;
+  double y;
+  double z;
+
+  problem.AddParameterBlock(&x, 1);
+  problem.AddParameterBlock(&y, 1);
+  problem.AddParameterBlock(&z, 1);
+  problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
+  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &y);
+  problem.AddResidualBlock(new TernaryCostFunction(), NULL, &x, &y, &z);
+
+  vector<double*> removed_parameter_blocks;
+  double fixed_cost = 0.0;
+  string message;
+  scoped_ptr<Program> reduced_program(
+      CHECK_NOTNULL(problem
+                    .program()
+                    .CreateReducedProgram(&removed_parameter_blocks,
+                                          &fixed_cost,
+                                          &message)));
+
+  EXPECT_EQ(reduced_program->NumParameterBlocks(), 3);
+  EXPECT_EQ(reduced_program->NumResidualBlocks(), 3);
+  EXPECT_EQ(removed_parameter_blocks.size(), 0);
+  EXPECT_EQ(fixed_cost, 0.0);
+}
+
+TEST(Program, RemoveFixedBlocksAllParameterBlocksConstant) {
+  ProblemImpl problem;
+  double x = 1.0;
+
+  problem.AddParameterBlock(&x, 1);
+  problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
+  problem.SetParameterBlockConstant(&x);
+
+  vector<double*> removed_parameter_blocks;
+  double fixed_cost = 0.0;
+  string message;
+  scoped_ptr<Program> reduced_program(
+      CHECK_NOTNULL(problem
+                    .program()
+                    .CreateReducedProgram(&removed_parameter_blocks,
+                                          &fixed_cost,
+                                          &message)));
+  EXPECT_EQ(reduced_program->NumParameterBlocks(), 0);
+  EXPECT_EQ(reduced_program->NumResidualBlocks(), 0);
+  EXPECT_EQ(removed_parameter_blocks.size(), 1);
+  EXPECT_EQ(removed_parameter_blocks[0], &x);
+  EXPECT_EQ(fixed_cost, 9.0);
+}
+
+
+TEST(Program, RemoveFixedBlocksNoResidualBlocks) {
+  ProblemImpl problem;
+  double x;
+  double y;
+  double z;
+
+  problem.AddParameterBlock(&x, 1);
+  problem.AddParameterBlock(&y, 1);
+  problem.AddParameterBlock(&z, 1);
+
+  vector<double*> removed_parameter_blocks;
+  double fixed_cost = 0.0;
+  string message;
+  scoped_ptr<Program> reduced_program(
+      CHECK_NOTNULL(problem
+                    .program()
+                    .CreateReducedProgram(&removed_parameter_blocks,
+                                          &fixed_cost,
+                                          &message)));
+  EXPECT_EQ(reduced_program->NumParameterBlocks(), 0);
+  EXPECT_EQ(reduced_program->NumResidualBlocks(), 0);
+  EXPECT_EQ(removed_parameter_blocks.size(), 3);
+  EXPECT_EQ(fixed_cost, 0.0);
+}
+
+TEST(Program, RemoveFixedBlocksOneParameterBlockConstant) {
+  ProblemImpl problem;
+  double x;
+  double y;
+  double z;
+
+  problem.AddParameterBlock(&x, 1);
+  problem.AddParameterBlock(&y, 1);
+  problem.AddParameterBlock(&z, 1);
+
+  problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
+  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &y);
+  problem.SetParameterBlockConstant(&x);
+
+  vector<double*> removed_parameter_blocks;
+  double fixed_cost = 0.0;
+  string message;
+  scoped_ptr<Program> reduced_program(
+      CHECK_NOTNULL(problem
+                    .program()
+                    .CreateReducedProgram(&removed_parameter_blocks,
+                                          &fixed_cost,
+                                          &message)));
+  EXPECT_EQ(reduced_program->NumParameterBlocks(), 1);
+  EXPECT_EQ(reduced_program->NumResidualBlocks(), 1);
+}
+
+TEST(Program, RemoveFixedBlocksNumEliminateBlocks) {
+  ProblemImpl problem;
+  double x;
+  double y;
+  double z;
+
+  problem.AddParameterBlock(&x, 1);
+  problem.AddParameterBlock(&y, 1);
+  problem.AddParameterBlock(&z, 1);
+  problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
+  problem.AddResidualBlock(new TernaryCostFunction(), NULL, &x, &y, &z);
+  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &y);
+  problem.SetParameterBlockConstant(&x);
+
+  vector<double*> removed_parameter_blocks;
+  double fixed_cost = 0.0;
+  string message;
+  scoped_ptr<Program> reduced_program(
+      CHECK_NOTNULL(problem
+                    .program()
+                    .CreateReducedProgram(&removed_parameter_blocks,
+                                          &fixed_cost,
+                                          &message)));
+  EXPECT_EQ(reduced_program->NumParameterBlocks(), 2);
+  EXPECT_EQ(reduced_program->NumResidualBlocks(), 2);
+}
+
+TEST(Program, RemoveFixedBlocksFixedCost) {
+  ProblemImpl problem;
+  double x = 1.23;
+  double y = 4.56;
+  double z = 7.89;
+
+  problem.AddParameterBlock(&x, 1);
+  problem.AddParameterBlock(&y, 1);
+  problem.AddParameterBlock(&z, 1);
+  problem.AddResidualBlock(new UnaryIdentityCostFunction(), NULL, &x);
+  problem.AddResidualBlock(new TernaryCostFunction(), NULL, &x, &y, &z);
+  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &y);
+  problem.SetParameterBlockConstant(&x);
+
+  ResidualBlock *expected_removed_block = problem.program().residual_blocks()[0];
+  scoped_array<double> scratch(
+      new double[expected_removed_block->NumScratchDoublesForEvaluate()]);
+  double expected_fixed_cost;
+  expected_removed_block->Evaluate(true,
+                                   &expected_fixed_cost,
+                                   NULL,
+                                   NULL,
+                                   scratch.get());
+
+
+  vector<double*> removed_parameter_blocks;
+  double fixed_cost = 0.0;
+  string message;
+  scoped_ptr<Program> reduced_program(
+      CHECK_NOTNULL(problem
+                    .program()
+                    .CreateReducedProgram(&removed_parameter_blocks,
+                                          &fixed_cost,
+                                          &message)));
+
+  EXPECT_EQ(reduced_program->NumParameterBlocks(), 2);
+  EXPECT_EQ(reduced_program->NumResidualBlocks(), 2);
+  EXPECT_DOUBLE_EQ(fixed_cost, expected_fixed_cost);
+}
+
+TEST(Program, CreateJacobianBlockSparsityTranspose) {
+  ProblemImpl problem;
+  double x[2];
+  double y[3];
+  double z;
+
+  problem.AddParameterBlock(x, 2);
+  problem.AddParameterBlock(y, 3);
+  problem.AddParameterBlock(&z, 1);
+
+  problem.AddResidualBlock(new MockCostFunctionBase<2, 2, 0, 0>(), NULL, x);
+  problem.AddResidualBlock(new MockCostFunctionBase<3, 1, 2, 0>(), NULL, &z, x);
+  problem.AddResidualBlock(new MockCostFunctionBase<4, 1, 3, 0>(), NULL, &z, y);
+  problem.AddResidualBlock(new MockCostFunctionBase<5, 1, 3, 0>(), NULL, &z, y);
+  problem.AddResidualBlock(new MockCostFunctionBase<1, 2, 1, 0>(), NULL, x, &z);
+  problem.AddResidualBlock(new MockCostFunctionBase<2, 1, 3, 0>(), NULL, &z, y);
+  problem.AddResidualBlock(new MockCostFunctionBase<2, 2, 1, 0>(), NULL, x, &z);
+  problem.AddResidualBlock(new MockCostFunctionBase<1, 3, 0, 0>(), NULL, y);
+
+  TripletSparseMatrix expected_block_sparse_jacobian(3, 8, 14);
+  {
+    int* rows = expected_block_sparse_jacobian.mutable_rows();
+    int* cols = expected_block_sparse_jacobian.mutable_cols();
+    double* values = expected_block_sparse_jacobian.mutable_values();
+    rows[0] = 0;
+    cols[0] = 0;
+
+    rows[1] = 2;
+    cols[1] = 1;
+    rows[2] = 0;
+    cols[2] = 1;
+
+    rows[3] = 2;
+    cols[3] = 2;
+    rows[4] = 1;
+    cols[4] = 2;
+
+    rows[5] = 2;
+    cols[5] = 3;
+    rows[6] = 1;
+    cols[6] = 3;
+
+    rows[7] = 0;
+    cols[7] = 4;
+    rows[8] = 2;
+    cols[8] = 4;
+
+    rows[9] = 2;
+    cols[9] = 5;
+    rows[10] = 1;
+    cols[10] = 5;
+
+    rows[11] = 0;
+    cols[11] = 6;
+    rows[12] = 2;
+    cols[12] = 6;
+
+    rows[13] = 1;
+    cols[13] = 7;
+    fill(values, values + 14, 1.0);
+    expected_block_sparse_jacobian.set_num_nonzeros(14);
+  }
+
+  Program* program = problem.mutable_program();
+  program->SetParameterOffsetsAndIndex();
+
+  scoped_ptr<TripletSparseMatrix> actual_block_sparse_jacobian(
+      program->CreateJacobianBlockSparsityTranspose());
+
+  Matrix expected_dense_jacobian;
+  expected_block_sparse_jacobian.ToDenseMatrix(&expected_dense_jacobian);
+
+  Matrix actual_dense_jacobian;
+  actual_block_sparse_jacobian->ToDenseMatrix(&actual_dense_jacobian);
+  EXPECT_EQ((expected_dense_jacobian - actual_dense_jacobian).norm(), 0.0);
+}
+
+template <int kNumResiduals, int kNumParameterBlocks>
+class NumParameterBlocksCostFunction : public CostFunction {
+ public:
+  NumParameterBlocksCostFunction() {
+    set_num_residuals(kNumResiduals);
+    for (int i = 0; i < kNumParameterBlocks; ++i) {
+      mutable_parameter_block_sizes()->push_back(1);
+    }
+  }
+
+  virtual ~NumParameterBlocksCostFunction() {
+  }
+
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    return true;
+  }
+};
+
+TEST(Program, ReallocationInCreateJacobianBlockSparsityTranspose) {
+  // CreateJacobianBlockSparsityTranspose starts with a conservative
+  // estimate of the size of the sparsity pattern. This test ensures
+  // that when those estimates are violated, the reallocation/resizing
+  // logic works correctly.
+
+  ProblemImpl problem;
+  double x[20];
+
+  vector<double*> parameter_blocks;
+  for (int i = 0; i < 20; ++i) {
+    problem.AddParameterBlock(x + i, 1);
+    parameter_blocks.push_back(x + i);
+  }
+
+  problem.AddResidualBlock(new NumParameterBlocksCostFunction<1, 20>(),
+                           NULL,
+                           parameter_blocks);
+
+  TripletSparseMatrix expected_block_sparse_jacobian(20, 1, 20);
+  {
+    int* rows = expected_block_sparse_jacobian.mutable_rows();
+    int* cols = expected_block_sparse_jacobian.mutable_cols();
+    for (int i = 0; i < 20; ++i) {
+      rows[i] = i;
+      cols[i] = 0;
+    }
+
+    double* values = expected_block_sparse_jacobian.mutable_values();
+    fill(values, values + 20, 1.0);
+    expected_block_sparse_jacobian.set_num_nonzeros(20);
+  }
+
+  Program* program = problem.mutable_program();
+  program->SetParameterOffsetsAndIndex();
+
+  scoped_ptr<TripletSparseMatrix> actual_block_sparse_jacobian(
+      program->CreateJacobianBlockSparsityTranspose());
+
+  Matrix expected_dense_jacobian;
+  expected_block_sparse_jacobian.ToDenseMatrix(&expected_dense_jacobian);
+
+  Matrix actual_dense_jacobian;
+  actual_block_sparse_jacobian->ToDenseMatrix(&actual_dense_jacobian);
+  EXPECT_EQ((expected_dense_jacobian - actual_dense_jacobian).norm(), 0.0);
+}
+
+TEST(Program, ProblemHasNanParameterBlocks) {
+  ProblemImpl problem;
+  double x[2];
+  x[0] = 1.0;
+  x[1] = std::numeric_limits<double>::quiet_NaN();
+  problem.AddResidualBlock(new MockCostFunctionBase<1, 2, 0, 0>(), NULL, x);
+  string error;
+  EXPECT_FALSE(problem.program().ParameterBlocksAreFinite(&error));
+  EXPECT_NE(error.find("has at least one invalid value"),
+            string::npos) << error;
+}
+
+TEST(Program, InfeasibleParameterBlock) {
+  ProblemImpl problem;
+  double x[] = {0.0, 0.0};
+  problem.AddResidualBlock(new MockCostFunctionBase<1, 2, 0, 0>(), NULL, x);
+  problem.SetParameterLowerBound(x, 0, 2.0);
+  problem.SetParameterUpperBound(x, 0, 1.0);
+  string error;
+  EXPECT_FALSE(problem.program().IsFeasible(&error));
+  EXPECT_NE(error.find("infeasible bound"), string::npos) << error;
+}
+
+TEST(Program, InfeasibleConstantParameterBlock) {
+  ProblemImpl problem;
+  double x[] = {0.0, 0.0};
+  problem.AddResidualBlock(new MockCostFunctionBase<1, 2, 0, 0>(), NULL, x);
+  problem.SetParameterLowerBound(x, 0, 1.0);
+  problem.SetParameterUpperBound(x, 0, 2.0);
+  problem.SetParameterBlockConstant(x);
+  string error;
+  EXPECT_FALSE(problem.program().IsFeasible(&error));
+  EXPECT_NE(error.find("infeasible value"), string::npos) << error;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/reorder_program.cc b/internal/ceres/reorder_program.cc
new file mode 100644
index 0000000..162bfb8
--- /dev/null
+++ b/internal/ceres/reorder_program.cc
@@ -0,0 +1,434 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/reorder_program.h"
+
+#include <algorithm>
+#include <numeric>
+#include <vector>
+
+#include "ceres/cxsparse.h"
+#include "ceres/internal/port.h"
+#include "ceres/ordered_groups.h"
+#include "ceres/parameter_block.h"
+#include "ceres/parameter_block_ordering.h"
+#include "ceres/problem_impl.h"
+#include "ceres/program.h"
+#include "ceres/program.h"
+#include "ceres/residual_block.h"
+#include "ceres/solver.h"
+#include "ceres/suitesparse.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+namespace {
+
+// Find the minimum index of any parameter block to the given residual.
+// Parameter blocks that have indices greater than num_eliminate_blocks are
+// considered to have an index equal to num_eliminate_blocks.
+static int MinParameterBlock(const ResidualBlock* residual_block,
+                             int num_eliminate_blocks) {
+  int min_parameter_block_position = num_eliminate_blocks;
+  for (int i = 0; i < residual_block->NumParameterBlocks(); ++i) {
+    ParameterBlock* parameter_block = residual_block->parameter_blocks()[i];
+    if (!parameter_block->IsConstant()) {
+      CHECK_NE(parameter_block->index(), -1)
+          << "Did you forget to call Program::SetParameterOffsetsAndIndex()? "
+          << "This is a Ceres bug; please contact the developers!";
+      min_parameter_block_position = std::min(parameter_block->index(),
+                                              min_parameter_block_position);
+    }
+  }
+  return min_parameter_block_position;
+}
+
+void OrderingForSparseNormalCholeskyUsingSuiteSparse(
+    const TripletSparseMatrix& tsm_block_jacobian_transpose,
+    const vector<ParameterBlock*>& parameter_blocks,
+    const ParameterBlockOrdering& parameter_block_ordering,
+    int* ordering) {
+#ifdef CERES_NO_SUITESPARSE
+  LOG(FATAL) << "Congratulations, you found a Ceres bug! "
+             << "Please report this error to the developers.";
+#else
+  SuiteSparse ss;
+  cholmod_sparse* block_jacobian_transpose =
+      ss.CreateSparseMatrix(
+          const_cast<TripletSparseMatrix*>(&tsm_block_jacobian_transpose));
+
+  // No CAMD or the user did not supply a useful ordering, then just
+  // use regular AMD.
+  if (parameter_block_ordering.NumGroups() <= 1 ||
+      !SuiteSparse::IsConstrainedApproximateMinimumDegreeOrderingAvailable()) {
+    ss.ApproximateMinimumDegreeOrdering(block_jacobian_transpose, &ordering[0]);
+  } else {
+    vector<int> constraints;
+    for (int i = 0; i < parameter_blocks.size(); ++i) {
+      constraints.push_back(
+          parameter_block_ordering.GroupId(
+              parameter_blocks[i]->mutable_user_state()));
+    }
+    ss.ConstrainedApproximateMinimumDegreeOrdering(block_jacobian_transpose,
+                                                   &constraints[0],
+                                                   ordering);
+  }
+
+  ss.Free(block_jacobian_transpose);
+#endif  // CERES_NO_SUITESPARSE
+}
+
+void OrderingForSparseNormalCholeskyUsingCXSparse(
+    const TripletSparseMatrix& tsm_block_jacobian_transpose,
+    int* ordering) {
+#ifdef CERES_NO_CXSPARSE
+  LOG(FATAL) << "Congratulations, you found a Ceres bug! "
+             << "Please report this error to the developers.";
+#else  // CERES_NO_CXSPARSE
+  // CXSparse works with J'J instead of J'. So compute the block
+  // sparsity for J'J and compute an approximate minimum degree
+  // ordering.
+  CXSparse cxsparse;
+  cs_di* block_jacobian_transpose;
+  block_jacobian_transpose =
+      cxsparse.CreateSparseMatrix(
+            const_cast<TripletSparseMatrix*>(&tsm_block_jacobian_transpose));
+  cs_di* block_jacobian = cxsparse.TransposeMatrix(block_jacobian_transpose);
+  cs_di* block_hessian =
+      cxsparse.MatrixMatrixMultiply(block_jacobian_transpose, block_jacobian);
+  cxsparse.Free(block_jacobian);
+  cxsparse.Free(block_jacobian_transpose);
+
+  cxsparse.ApproximateMinimumDegreeOrdering(block_hessian, ordering);
+  cxsparse.Free(block_hessian);
+#endif  // CERES_NO_CXSPARSE
+}
+
+}  // namespace
+
+bool ApplyOrdering(const ProblemImpl::ParameterMap& parameter_map,
+                   const ParameterBlockOrdering& ordering,
+                   Program* program,
+                   string* error) {
+  const int num_parameter_blocks =  program->NumParameterBlocks();
+  if (ordering.NumElements() != num_parameter_blocks) {
+    *error = StringPrintf("User specified ordering does not have the same "
+                          "number of parameters as the problem. The problem"
+                          "has %d blocks while the ordering has %d blocks.",
+                          num_parameter_blocks,
+                          ordering.NumElements());
+    return false;
+  }
+
+  vector<ParameterBlock*>* parameter_blocks =
+      program->mutable_parameter_blocks();
+  parameter_blocks->clear();
+
+  const map<int, set<double*> >& groups =
+      ordering.group_to_elements();
+
+  for (map<int, set<double*> >::const_iterator group_it = groups.begin();
+       group_it != groups.end();
+       ++group_it) {
+    const set<double*>& group = group_it->second;
+    for (set<double*>::const_iterator parameter_block_ptr_it = group.begin();
+         parameter_block_ptr_it != group.end();
+         ++parameter_block_ptr_it) {
+      ProblemImpl::ParameterMap::const_iterator parameter_block_it =
+          parameter_map.find(*parameter_block_ptr_it);
+      if (parameter_block_it == parameter_map.end()) {
+        *error = StringPrintf("User specified ordering contains a pointer "
+                              "to a double that is not a parameter block in "
+                              "the problem. The invalid double is in group: %d",
+                              group_it->first);
+        return false;
+      }
+      parameter_blocks->push_back(parameter_block_it->second);
+    }
+  }
+  return true;
+}
+
+bool LexicographicallyOrderResidualBlocks(const int num_eliminate_blocks,
+                                          Program* program,
+                                          string* error) {
+  CHECK_GE(num_eliminate_blocks, 1)
+      << "Congratulations, you found a Ceres bug! Please report this error "
+      << "to the developers.";
+
+  // Create a histogram of the number of residuals for each E block. There is an
+  // extra bucket at the end to catch all non-eliminated F blocks.
+  vector<int> residual_blocks_per_e_block(num_eliminate_blocks + 1);
+  vector<ResidualBlock*>* residual_blocks = program->mutable_residual_blocks();
+  vector<int> min_position_per_residual(residual_blocks->size());
+  for (int i = 0; i < residual_blocks->size(); ++i) {
+    ResidualBlock* residual_block = (*residual_blocks)[i];
+    int position = MinParameterBlock(residual_block, num_eliminate_blocks);
+    min_position_per_residual[i] = position;
+    DCHECK_LE(position, num_eliminate_blocks);
+    residual_blocks_per_e_block[position]++;
+  }
+
+  // Run a cumulative sum on the histogram, to obtain offsets to the start of
+  // each histogram bucket (where each bucket is for the residuals for that
+  // E-block).
+  vector<int> offsets(num_eliminate_blocks + 1);
+  std::partial_sum(residual_blocks_per_e_block.begin(),
+                   residual_blocks_per_e_block.end(),
+                   offsets.begin());
+  CHECK_EQ(offsets.back(), residual_blocks->size())
+      << "Congratulations, you found a Ceres bug! Please report this error "
+      << "to the developers.";
+
+  CHECK(find(residual_blocks_per_e_block.begin(),
+             residual_blocks_per_e_block.end() - 1, 0) !=
+        residual_blocks_per_e_block.end())
+      << "Congratulations, you found a Ceres bug! Please report this error "
+      << "to the developers.";
+
+  // Fill in each bucket with the residual blocks for its corresponding E block.
+  // Each bucket is individually filled from the back of the bucket to the front
+  // of the bucket. The filling order among the buckets is dictated by the
+  // residual blocks. This loop uses the offsets as counters; subtracting one
+  // from each offset as a residual block is placed in the bucket. When the
+  // filling is finished, the offset pointerts should have shifted down one
+  // entry (this is verified below).
+  vector<ResidualBlock*> reordered_residual_blocks(
+      (*residual_blocks).size(), static_cast<ResidualBlock*>(NULL));
+  for (int i = 0; i < residual_blocks->size(); ++i) {
+    int bucket = min_position_per_residual[i];
+
+    // Decrement the cursor, which should now point at the next empty position.
+    offsets[bucket]--;
+
+    // Sanity.
+    CHECK(reordered_residual_blocks[offsets[bucket]] == NULL)
+        << "Congratulations, you found a Ceres bug! Please report this error "
+        << "to the developers.";
+
+    reordered_residual_blocks[offsets[bucket]] = (*residual_blocks)[i];
+  }
+
+  // Sanity check #1: The difference in bucket offsets should match the
+  // histogram sizes.
+  for (int i = 0; i < num_eliminate_blocks; ++i) {
+    CHECK_EQ(residual_blocks_per_e_block[i], offsets[i + 1] - offsets[i])
+        << "Congratulations, you found a Ceres bug! Please report this error "
+        << "to the developers.";
+  }
+  // Sanity check #2: No NULL's left behind.
+  for (int i = 0; i < reordered_residual_blocks.size(); ++i) {
+    CHECK(reordered_residual_blocks[i] != NULL)
+        << "Congratulations, you found a Ceres bug! Please report this error "
+        << "to the developers.";
+  }
+
+  // Now that the residuals are collected by E block, swap them in place.
+  swap(*program->mutable_residual_blocks(), reordered_residual_blocks);
+  return true;
+}
+
+void MaybeReorderSchurComplementColumnsUsingSuiteSparse(
+    const ParameterBlockOrdering& parameter_block_ordering,
+    Program* program) {
+  // Pre-order the columns corresponding to the schur complement if
+  // possible.
+#ifndef CERES_NO_SUITESPARSE
+  SuiteSparse ss;
+  if (!SuiteSparse::IsConstrainedApproximateMinimumDegreeOrderingAvailable()) {
+    return;
+  }
+
+  vector<int> constraints;
+  vector<ParameterBlock*>& parameter_blocks =
+      *(program->mutable_parameter_blocks());
+
+  for (int i = 0; i < parameter_blocks.size(); ++i) {
+    constraints.push_back(
+        parameter_block_ordering.GroupId(
+            parameter_blocks[i]->mutable_user_state()));
+  }
+
+  // Renumber the entries of constraints to be contiguous integers
+  // as camd requires that the group ids be in the range [0,
+  // parameter_blocks.size() - 1].
+  MapValuesToContiguousRange(constraints.size(), &constraints[0]);
+
+  // Set the offsets and index for CreateJacobianSparsityTranspose.
+  program->SetParameterOffsetsAndIndex();
+  // Compute a block sparse presentation of J'.
+  scoped_ptr<TripletSparseMatrix> tsm_block_jacobian_transpose(
+      program->CreateJacobianBlockSparsityTranspose());
+
+
+  cholmod_sparse* block_jacobian_transpose =
+      ss.CreateSparseMatrix(tsm_block_jacobian_transpose.get());
+
+  vector<int> ordering(parameter_blocks.size(), 0);
+  ss.ConstrainedApproximateMinimumDegreeOrdering(block_jacobian_transpose,
+                                                 &constraints[0],
+                                                 &ordering[0]);
+  ss.Free(block_jacobian_transpose);
+
+  const vector<ParameterBlock*> parameter_blocks_copy(parameter_blocks);
+  for (int i = 0; i < program->NumParameterBlocks(); ++i) {
+    parameter_blocks[i] = parameter_blocks_copy[ordering[i]];
+  }
+#endif
+}
+
+bool ReorderProgramForSchurTypeLinearSolver(
+    const LinearSolverType linear_solver_type,
+    const SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
+    const ProblemImpl::ParameterMap& parameter_map,
+    ParameterBlockOrdering* parameter_block_ordering,
+    Program* program,
+    string* error) {
+  if (parameter_block_ordering->NumGroups() == 1) {
+    // If the user supplied an parameter_block_ordering with just one
+    // group, it is equivalent to the user supplying NULL as an
+    // parameter_block_ordering. Ceres is completely free to choose the
+    // parameter block ordering as it sees fit. For Schur type solvers,
+    // this means that the user wishes for Ceres to identify the
+    // e_blocks, which we do by computing a maximal independent set.
+    vector<ParameterBlock*> schur_ordering;
+    const int num_eliminate_blocks =
+        ComputeStableSchurOrdering(*program, &schur_ordering);
+
+    CHECK_EQ(schur_ordering.size(), program->NumParameterBlocks())
+        << "Congratulations, you found a Ceres bug! Please report this error "
+        << "to the developers.";
+
+    // Update the parameter_block_ordering object.
+    for (int i = 0; i < schur_ordering.size(); ++i) {
+      double* parameter_block = schur_ordering[i]->mutable_user_state();
+      const int group_id = (i < num_eliminate_blocks) ? 0 : 1;
+      parameter_block_ordering->AddElementToGroup(parameter_block, group_id);
+    }
+
+    // We could call ApplyOrdering but this is cheaper and
+    // simpler.
+    swap(*program->mutable_parameter_blocks(), schur_ordering);
+  } else {
+    // The user provided an ordering with more than one elimination
+    // group. Trust the user and apply the ordering.
+    if (!ApplyOrdering(parameter_map,
+                       *parameter_block_ordering,
+                       program,
+                       error)) {
+      return false;
+    }
+  }
+
+  if (linear_solver_type == SPARSE_SCHUR &&
+      sparse_linear_algebra_library_type == SUITE_SPARSE) {
+    MaybeReorderSchurComplementColumnsUsingSuiteSparse(
+        *parameter_block_ordering,
+        program);
+  }
+
+  program->SetParameterOffsetsAndIndex();
+  // Schur type solvers also require that their residual blocks be
+  // lexicographically ordered.
+  const int num_eliminate_blocks =
+      parameter_block_ordering->group_to_elements().begin()->second.size();
+  if (!LexicographicallyOrderResidualBlocks(num_eliminate_blocks,
+                                            program,
+                                            error)) {
+    return false;
+  }
+
+  program->SetParameterOffsetsAndIndex();
+  return true;
+}
+
+bool ReorderProgramForSparseNormalCholesky(
+    const SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
+    const ParameterBlockOrdering& parameter_block_ordering,
+    Program* program,
+    string* error) {
+
+  if (sparse_linear_algebra_library_type != SUITE_SPARSE &&
+      sparse_linear_algebra_library_type != CX_SPARSE &&
+      sparse_linear_algebra_library_type != EIGEN_SPARSE) {
+    *error = "Unknown sparse linear algebra library.";
+    return false;
+  }
+
+  // For Eigen, there is nothing to do. This is because Eigen in its
+  // current stable version does not expose a method for doing
+  // symbolic analysis on pre-ordered matrices, so a block
+  // pre-ordering is a bit pointless.
+  //
+  // The dev version as recently as July 20, 2014 has support for
+  // pre-ordering. Once this becomes more widespread, or we add
+  // support for detecting Eigen versions, we can add support for this
+  // along the lines of CXSparse.
+  if (sparse_linear_algebra_library_type == EIGEN_SPARSE) {
+    program->SetParameterOffsetsAndIndex();
+    return true;
+  }
+
+  // Set the offsets and index for CreateJacobianSparsityTranspose.
+  program->SetParameterOffsetsAndIndex();
+  // Compute a block sparse presentation of J'.
+  scoped_ptr<TripletSparseMatrix> tsm_block_jacobian_transpose(
+      program->CreateJacobianBlockSparsityTranspose());
+
+  vector<int> ordering(program->NumParameterBlocks(), 0);
+  vector<ParameterBlock*>& parameter_blocks =
+      *(program->mutable_parameter_blocks());
+
+  if (sparse_linear_algebra_library_type == SUITE_SPARSE) {
+    OrderingForSparseNormalCholeskyUsingSuiteSparse(
+        *tsm_block_jacobian_transpose,
+        parameter_blocks,
+        parameter_block_ordering,
+        &ordering[0]);
+  } else if (sparse_linear_algebra_library_type == CX_SPARSE){
+    OrderingForSparseNormalCholeskyUsingCXSparse(
+        *tsm_block_jacobian_transpose,
+        &ordering[0]);
+  }
+
+  // Apply ordering.
+  const vector<ParameterBlock*> parameter_blocks_copy(parameter_blocks);
+  for (int i = 0; i < program->NumParameterBlocks(); ++i) {
+    parameter_blocks[i] = parameter_blocks_copy[ordering[i]];
+  }
+
+  program->SetParameterOffsetsAndIndex();
+  return true;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/reorder_program.h b/internal/ceres/reorder_program.h
new file mode 100644
index 0000000..d3962f9
--- /dev/null
+++ b/internal/ceres/reorder_program.h
@@ -0,0 +1,101 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_REORDER_PROGRAM_H_
+#define CERES_INTERNAL_REORDER_PROGRAM_H_
+
+#include <string>
+#include "ceres/internal/port.h"
+#include "ceres/parameter_block_ordering.h"
+#include "ceres/problem_impl.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+class Program;
+
+// Reorder the parameter blocks in program using the ordering
+bool ApplyOrdering(const ProblemImpl::ParameterMap& parameter_map,
+                   const ParameterBlockOrdering& ordering,
+                   Program* program,
+                   string* error);
+
+// Reorder the residuals for program, if necessary, so that the residuals
+// involving each E block occur together. This is a necessary condition for the
+// Schur eliminator, which works on these "row blocks" in the jacobian.
+bool LexicographicallyOrderResidualBlocks(int num_eliminate_blocks,
+                                          Program* program,
+                                          string* error);
+
+// Schur type solvers require that all parameter blocks eliminated
+// by the Schur eliminator occur before others and the residuals be
+// sorted in lexicographic order of their parameter blocks.
+//
+// If the parameter_block_ordering only contains one elimination
+// group then a maximal independent set is computed and used as the
+// first elimination group, otherwise the user's ordering is used.
+//
+// If the linear solver type is SPARSE_SCHUR and support for
+// constrained fill-reducing ordering is available in the sparse
+// linear algebra library (SuiteSparse version >= 4.2.0) then
+// columns of the schur complement matrix are ordered to reduce the
+// fill-in the Cholesky factorization.
+//
+// Upon return, ordering contains the parameter block ordering that
+// was used to order the program.
+bool ReorderProgramForSchurTypeLinearSolver(
+    LinearSolverType linear_solver_type,
+    SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
+    const ProblemImpl::ParameterMap& parameter_map,
+    ParameterBlockOrdering* parameter_block_ordering,
+    Program* program,
+    string* error);
+
+// Sparse cholesky factorization routines when doing the sparse
+// cholesky factorization of the Jacobian matrix, reorders its
+// columns to reduce the fill-in. Compute this permutation and
+// re-order the parameter blocks.
+//
+// When using SuiteSparse, if the parameter_block_ordering contains
+// more than one elimination group and support for constrained
+// fill-reducing ordering is available in the sparse linear algebra
+// library (SuiteSparse version >= 4.2.0) then the fill reducing
+// ordering will take it into account, otherwise it will be ignored.
+bool ReorderProgramForSparseNormalCholesky(
+    SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
+    const ParameterBlockOrdering& parameter_block_ordering,
+    Program* program,
+    string* error);
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_REORDER_PROGRAM_
diff --git a/internal/ceres/reorder_program_test.cc b/internal/ceres/reorder_program_test.cc
new file mode 100644
index 0000000..2a0c4eb
--- /dev/null
+++ b/internal/ceres/reorder_program_test.cc
@@ -0,0 +1,170 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/reorder_program.h"
+
+#include "ceres/parameter_block.h"
+#include "ceres/problem_impl.h"
+#include "ceres/program.h"
+#include "ceres/sized_cost_function.h"
+#include "ceres/solver.h"
+
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+// Templated base class for the CostFunction signatures.
+template <int kNumResiduals, int N0, int N1, int N2>
+class MockCostFunctionBase : public
+SizedCostFunction<kNumResiduals, N0, N1, N2> {
+ public:
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    // Do nothing. This is never called.
+    return true;
+  }
+};
+
+class UnaryCostFunction : public MockCostFunctionBase<2, 1, 0, 0> {};
+class BinaryCostFunction : public MockCostFunctionBase<2, 1, 1, 0> {};
+class TernaryCostFunction : public MockCostFunctionBase<2, 1, 1, 1> {};
+
+TEST(_, ReorderResidualBlockNormalFunction) {
+  ProblemImpl problem;
+  double x;
+  double y;
+  double z;
+
+  problem.AddParameterBlock(&x, 1);
+  problem.AddParameterBlock(&y, 1);
+  problem.AddParameterBlock(&z, 1);
+
+  problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
+  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &x);
+  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &y);
+  problem.AddResidualBlock(new UnaryCostFunction(), NULL, &z);
+  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &y);
+  problem.AddResidualBlock(new UnaryCostFunction(), NULL, &y);
+
+  ParameterBlockOrdering* linear_solver_ordering = new ParameterBlockOrdering;
+  linear_solver_ordering->AddElementToGroup(&x, 0);
+  linear_solver_ordering->AddElementToGroup(&y, 0);
+  linear_solver_ordering->AddElementToGroup(&z, 1);
+
+  Solver::Options options;
+  options.linear_solver_type = DENSE_SCHUR;
+  options.linear_solver_ordering.reset(linear_solver_ordering);
+
+  const vector<ResidualBlock*>& residual_blocks =
+      problem.program().residual_blocks();
+
+  vector<ResidualBlock*> expected_residual_blocks;
+
+  // This is a bit fragile, but it serves the purpose. We know the
+  // bucketing algorithm that the reordering function uses, so we
+  // expect the order for residual blocks for each e_block to be
+  // filled in reverse.
+  expected_residual_blocks.push_back(residual_blocks[4]);
+  expected_residual_blocks.push_back(residual_blocks[1]);
+  expected_residual_blocks.push_back(residual_blocks[0]);
+  expected_residual_blocks.push_back(residual_blocks[5]);
+  expected_residual_blocks.push_back(residual_blocks[2]);
+  expected_residual_blocks.push_back(residual_blocks[3]);
+
+  Program* program = problem.mutable_program();
+  program->SetParameterOffsetsAndIndex();
+
+  string message;
+  EXPECT_TRUE(LexicographicallyOrderResidualBlocks(
+                  2,
+                  problem.mutable_program(),
+                  &message));
+  EXPECT_EQ(residual_blocks.size(), expected_residual_blocks.size());
+  for (int i = 0; i < expected_residual_blocks.size(); ++i) {
+    EXPECT_EQ(residual_blocks[i], expected_residual_blocks[i]);
+  }
+}
+
+TEST(_, ApplyOrderingOrderingTooSmall) {
+  ProblemImpl problem;
+  double x;
+  double y;
+  double z;
+
+  problem.AddParameterBlock(&x, 1);
+  problem.AddParameterBlock(&y, 1);
+  problem.AddParameterBlock(&z, 1);
+
+  ParameterBlockOrdering linear_solver_ordering;
+  linear_solver_ordering.AddElementToGroup(&x, 0);
+  linear_solver_ordering.AddElementToGroup(&y, 1);
+
+  Program program(problem.program());
+  string message;
+  EXPECT_FALSE(ApplyOrdering(problem.parameter_map(),
+                             linear_solver_ordering,
+                             &program,
+                             &message));
+}
+
+TEST(_, ApplyOrderingNormal) {
+  ProblemImpl problem;
+  double x;
+  double y;
+  double z;
+
+  problem.AddParameterBlock(&x, 1);
+  problem.AddParameterBlock(&y, 1);
+  problem.AddParameterBlock(&z, 1);
+
+  ParameterBlockOrdering linear_solver_ordering;
+  linear_solver_ordering.AddElementToGroup(&x, 0);
+  linear_solver_ordering.AddElementToGroup(&y, 2);
+  linear_solver_ordering.AddElementToGroup(&z, 1);
+
+  Program* program = problem.mutable_program();
+  string message;
+
+  EXPECT_TRUE(ApplyOrdering(problem.parameter_map(),
+                            linear_solver_ordering,
+                            program,
+                            &message));
+  const vector<ParameterBlock*>& parameter_blocks = program->parameter_blocks();
+
+  EXPECT_EQ(parameter_blocks.size(), 3);
+  EXPECT_EQ(parameter_blocks[0]->user_state(), &x);
+  EXPECT_EQ(parameter_blocks[1]->user_state(), &z);
+  EXPECT_EQ(parameter_blocks[2]->user_state(), &y);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/residual_block_test.cc b/internal/ceres/residual_block_test.cc
index 1e03e7d..b37f50f 100644
--- a/internal/ceres/residual_block_test.cc
+++ b/internal/ceres/residual_block_test.cc
@@ -43,9 +43,9 @@
 class TernaryCostFunction: public CostFunction {
  public:
   TernaryCostFunction(int num_residuals,
-                      int16 parameter_block1_size,
-                      int16 parameter_block2_size,
-                      int16 parameter_block3_size) {
+                      int32 parameter_block1_size,
+                      int32 parameter_block2_size,
+                      int32 parameter_block3_size) {
     set_num_residuals(num_residuals);
     mutable_parameter_block_sizes()->push_back(parameter_block1_size);
     mutable_parameter_block_sizes()->push_back(parameter_block2_size);
diff --git a/internal/ceres/residual_block_utils.cc b/internal/ceres/residual_block_utils.cc
index 4d88a9f..d2564a7 100644
--- a/internal/ceres/residual_block_utils.cc
+++ b/internal/ceres/residual_block_utils.cc
@@ -61,24 +61,6 @@
   }
 }
 
-// Utility routine to print an array of doubles to a string. If the
-// array pointer is NULL, it is treated as an array of zeros.
-namespace {
-void AppendArrayToString(const int size, const double* x, string* result) {
-  for (int i = 0; i < size; ++i) {
-    if (x == NULL) {
-      StringAppendF(result, "Not Computed  ");
-    } else {
-      if (x[i] == kImpossibleValue) {
-        StringAppendF(result, "Uninitialized ");
-      } else {
-        StringAppendF(result, "%12g ", x[i]);
-      }
-    }
-  }
-}
-}  // namespace
-
 string EvaluationToString(const ResidualBlock& block,
                           double const* const* parameters,
                           double* cost,
diff --git a/internal/ceres/rotation_test.cc b/internal/ceres/rotation_test.cc
index 8de1bbd..fab0a7a 100644
--- a/internal/ceres/rotation_test.cc
+++ b/internal/ceres/rotation_test.cc
@@ -548,6 +548,41 @@
   }
 }
 
+// Takes a bunch of random axis/angle values near zero, converts them
+// to rotation matrices, and back again.
+TEST(Rotation, AngleAxisToRotationMatrixAndBackNearZero) {
+  srand(5);
+  for (int i = 0; i < kNumTrials; i++) {
+    double axis_angle[3];
+    // Make an axis by choosing three random numbers in [-1, 1) and
+    // normalizing.
+    double norm = 0;
+    for (int i = 0; i < 3; i++) {
+      axis_angle[i] = RandDouble() * 2 - 1;
+      norm += axis_angle[i] * axis_angle[i];
+    }
+    norm = sqrt(norm);
+
+    // Tiny theta.
+    double theta = 1e-16 * (kPi * 2 * RandDouble() - kPi);
+    for (int i = 0; i < 3; i++) {
+      axis_angle[i] = axis_angle[i] * theta / norm;
+    }
+
+    double matrix[9];
+    double round_trip[3];
+    AngleAxisToRotationMatrix(axis_angle, matrix);
+    ASSERT_THAT(matrix, IsOrthonormal());
+    RotationMatrixToAngleAxis(matrix, round_trip);
+
+    for (int i = 0; i < 3; ++i) {
+      EXPECT_NEAR(round_trip[i], axis_angle[i],
+                  std::numeric_limits<double>::epsilon());
+    }
+  }
+}
+
+
 // Transposes a 3x3 matrix.
 static void Transpose3x3(double m[9]) {
   std::swap(m[1], m[3]);
diff --git a/internal/ceres/runtime_numeric_diff_cost_function.cc b/internal/ceres/runtime_numeric_diff_cost_function.cc
deleted file mode 100644
index 7af275c..0000000
--- a/internal/ceres/runtime_numeric_diff_cost_function.cc
+++ /dev/null
@@ -1,217 +0,0 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
-// http://code.google.com/p/ceres-solver/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-//   this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-//   this list of conditions and the following disclaimer in the documentation
-//   and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-//   used to endorse or promote products derived from this software without
-//   specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: keir@google.com (Keir Mierle)
-//
-// Based on the templated version in public/numeric_diff_cost_function.h.
-
-#include "ceres/runtime_numeric_diff_cost_function.h"
-
-#include <algorithm>
-#include <numeric>
-#include <vector>
-#include "Eigen/Dense"
-#include "ceres/cost_function.h"
-#include "ceres/internal/scoped_ptr.h"
-#include "glog/logging.h"
-
-namespace ceres {
-namespace internal {
-namespace {
-
-bool EvaluateJacobianForParameterBlock(const CostFunction* function,
-                                       int parameter_block_size,
-                                       int parameter_block,
-                                       RuntimeNumericDiffMethod method,
-                                       double relative_step_size,
-                                       double const* residuals_at_eval_point,
-                                       double** parameters,
-                                       double** jacobians) {
-  using Eigen::Map;
-  using Eigen::Matrix;
-  using Eigen::Dynamic;
-  using Eigen::RowMajor;
-
-  typedef Matrix<double, Dynamic, 1> ResidualVector;
-  typedef Matrix<double, Dynamic, 1> ParameterVector;
-  typedef Matrix<double, Dynamic, Dynamic, RowMajor> JacobianMatrix;
-
-  int num_residuals = function->num_residuals();
-
-  Map<JacobianMatrix> parameter_jacobian(jacobians[parameter_block],
-                                         num_residuals,
-                                         parameter_block_size);
-
-  // Mutate one element at a time and then restore.
-  Map<ParameterVector> x_plus_delta(parameters[parameter_block],
-                                    parameter_block_size);
-  ParameterVector x(x_plus_delta);
-  ParameterVector step_size = x.array().abs() * relative_step_size;
-
-  // To handle cases where a paremeter is exactly zero, instead use the mean
-  // step_size for the other dimensions.
-  double fallback_step_size = step_size.sum() / step_size.rows();
-  if (fallback_step_size == 0.0) {
-    // If all the parameters are zero, there's no good answer. Use the given
-    // relative step_size as absolute step_size and hope for the best.
-    fallback_step_size = relative_step_size;
-  }
-
-  // For each parameter in the parameter block, use finite differences to
-  // compute the derivative for that parameter.
-  for (int j = 0; j < parameter_block_size; ++j) {
-    if (step_size(j) == 0.0) {
-      // The parameter is exactly zero, so compromise and use the mean step_size
-      // from the other parameters. This can break in many cases, but it's hard
-      // to pick a good number without problem specific knowledge.
-      step_size(j) = fallback_step_size;
-    }
-    x_plus_delta(j) = x(j) + step_size(j);
-
-    ResidualVector residuals(num_residuals);
-    if (!function->Evaluate(parameters, &residuals[0], NULL)) {
-      // Something went wrong; bail.
-      return false;
-    }
-
-    // Compute this column of the jacobian in 3 steps:
-    // 1. Store residuals for the forward part.
-    // 2. Subtract residuals for the backward (or 0) part.
-    // 3. Divide out the run.
-    parameter_jacobian.col(j) = residuals;
-
-    double one_over_h = 1 / step_size(j);
-    if (method == CENTRAL) {
-      // Compute the function on the other side of x(j).
-      x_plus_delta(j) = x(j) - step_size(j);
-
-      if (!function->Evaluate(parameters, &residuals[0], NULL)) {
-        // Something went wrong; bail.
-        return false;
-      }
-      parameter_jacobian.col(j) -= residuals;
-      one_over_h /= 2;
-    } else {
-      // Forward difference only; reuse existing residuals evaluation.
-      parameter_jacobian.col(j) -=
-          Map<const ResidualVector>(residuals_at_eval_point, num_residuals);
-    }
-    x_plus_delta(j) = x(j);  // Restore x_plus_delta.
-
-    // Divide out the run to get slope.
-    parameter_jacobian.col(j) *= one_over_h;
-  }
-  return true;
-}
-
-class RuntimeNumericDiffCostFunction : public CostFunction {
- public:
-  RuntimeNumericDiffCostFunction(const CostFunction* function,
-                                 RuntimeNumericDiffMethod method,
-                                 double relative_step_size)
-      : function_(function),
-        method_(method),
-        relative_step_size_(relative_step_size) {
-    *mutable_parameter_block_sizes() = function->parameter_block_sizes();
-    set_num_residuals(function->num_residuals());
-  }
-
-  virtual ~RuntimeNumericDiffCostFunction() { }
-
-  virtual bool Evaluate(double const* const* parameters,
-                        double* residuals,
-                        double** jacobians) const {
-    // Get the function value (residuals) at the the point to evaluate.
-    bool success = function_->Evaluate(parameters, residuals, NULL);
-    if (!success) {
-      // Something went wrong; ignore the jacobian.
-      return false;
-    }
-    if (!jacobians) {
-      // Nothing to do; just forward.
-      return true;
-    }
-
-    const vector<int16>& block_sizes = function_->parameter_block_sizes();
-    CHECK(!block_sizes.empty());
-
-    // Create local space for a copy of the parameters which will get mutated.
-    int parameters_size = accumulate(block_sizes.begin(), block_sizes.end(), 0);
-    vector<double> parameters_copy(parameters_size);
-    vector<double*> parameters_references_copy(block_sizes.size());
-    parameters_references_copy[0] = &parameters_copy[0];
-    for (int block = 1; block < block_sizes.size(); ++block) {
-      parameters_references_copy[block] = parameters_references_copy[block - 1]
-          + block_sizes[block - 1];
-    }
-
-    // Copy the parameters into the local temp space.
-    for (int block = 0; block < block_sizes.size(); ++block) {
-      memcpy(parameters_references_copy[block],
-             parameters[block],
-             block_sizes[block] * sizeof(*parameters[block]));
-    }
-
-    for (int block = 0; block < block_sizes.size(); ++block) {
-      if (!jacobians[block]) {
-        // No jacobian requested for this parameter / residual pair.
-        continue;
-      }
-      if (!EvaluateJacobianForParameterBlock(function_,
-                                             block_sizes[block],
-                                             block,
-                                             method_,
-                                             relative_step_size_,
-                                             residuals,
-                                             &parameters_references_copy[0],
-                                             jacobians)) {
-        return false;
-      }
-    }
-    return true;
-  }
-
- private:
-  const CostFunction* function_;
-  RuntimeNumericDiffMethod method_;
-  double relative_step_size_;
-};
-
-}  // namespace
-
-CostFunction* CreateRuntimeNumericDiffCostFunction(
-    const CostFunction* cost_function,
-    RuntimeNumericDiffMethod method,
-    double relative_step_size) {
-  return new RuntimeNumericDiffCostFunction(cost_function,
-                                            method,
-                                            relative_step_size);
-}
-
-}  // namespace internal
-}  // namespace ceres
diff --git a/internal/ceres/runtime_numeric_diff_cost_function.h b/internal/ceres/runtime_numeric_diff_cost_function.h
deleted file mode 100644
index 01b57f9..0000000
--- a/internal/ceres/runtime_numeric_diff_cost_function.h
+++ /dev/null
@@ -1,87 +0,0 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
-// http://code.google.com/p/ceres-solver/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-//   this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-//   this list of conditions and the following disclaimer in the documentation
-//   and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-//   used to endorse or promote products derived from this software without
-//   specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: keir@google.com (Keir Mierle)
-//
-// Create CostFunctions as needed by the least squares framework with jacobians
-// computed via numeric differentiation.
-//
-// To get a numerically differentiated cost function, define a subclass of
-// CostFunction such that the Evaluate() function ignores the jacobian
-// parameter. The numeric differentiation wrapper will fill in the jacobian
-// parameter if nececssary by repeatedly calling the Evaluate() function with
-// small changes to the appropriate parameters, and computing the slope. This
-// implementation is not templated (hence the "Runtime" prefix), which is a bit
-// slower than but is more convenient than the templated version in
-// numeric_diff_cost_function.h
-//
-// The numerically differentiated version of a cost function for a cost function
-// can be constructed as follows:
-//
-//   CostFunction* cost_function =
-//     CreateRuntimeNumericDiffCostFunction(new MyCostFunction(...),
-//                                          CENTRAL,
-//                                          TAKE_OWNERSHIP);
-//
-// The central difference method is considerably more accurate; consider using
-// to start and only after that works, trying forward difference.
-//
-// TODO(keir): Characterize accuracy; mention pitfalls; provide alternatives.
-
-#ifndef CERES_INTERNAL_RUNTIME_NUMERIC_DIFF_COST_FUNCTION_H_
-#define CERES_INTERNAL_RUNTIME_NUMERIC_DIFF_COST_FUNCTION_H_
-
-#include "ceres/cost_function.h"
-
-namespace ceres {
-namespace internal {
-
-enum RuntimeNumericDiffMethod {
-  CENTRAL,
-  FORWARD,
-};
-
-// Create a cost function that evaluates the derivative with finite differences.
-// The base cost_function's implementation of Evaluate() only needs to fill in
-// the "residuals" argument and not the "jacobians". Any data written to the
-// jacobians by the base cost_function is overwritten.
-//
-// Forward difference or central difference is selected with CENTRAL or FORWARD.
-// The relative eps, which determines the step size for forward and central
-// differencing, is set with relative eps. Caller owns the resulting cost
-// function, and the resulting cost function does not own the base cost
-// function.
-CostFunction *CreateRuntimeNumericDiffCostFunction(
-    const CostFunction *cost_function,
-    RuntimeNumericDiffMethod method,
-    double relative_eps);
-
-}  // namespace internal
-}  // namespace ceres
-
-#endif  // CERES_INTERNAL_RUNTIME_NUMERIC_DIFF_COST_FUNCTION_H_
diff --git a/internal/ceres/runtime_numeric_diff_cost_function_test.cc b/internal/ceres/runtime_numeric_diff_cost_function_test.cc
deleted file mode 100644
index 71469ea..0000000
--- a/internal/ceres/runtime_numeric_diff_cost_function_test.cc
+++ /dev/null
@@ -1,222 +0,0 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
-// http://code.google.com/p/ceres-solver/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-//   this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-//   this list of conditions and the following disclaimer in the documentation
-//   and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-//   used to endorse or promote products derived from this software without
-//   specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: keir@google.com (Keir Mierle)
-//
-// Based on the tests in numeric_diff_cost_function.cc.
-//
-// TODO(keir): See about code duplication.
-
-#include "ceres/runtime_numeric_diff_cost_function.h"
-
-#include <algorithm>
-#include <cmath>
-#include <string>
-#include <vector>
-#include "ceres/cost_function.h"
-#include "ceres/internal/macros.h"
-#include "ceres/internal/scoped_ptr.h"
-#include "ceres/stringprintf.h"
-#include "ceres/test_util.h"
-#include "glog/logging.h"
-#include "gtest/gtest.h"
-
-namespace ceres {
-namespace internal {
-
-const double kRelativeEps = 1e-6;
-
-// y1 = x1'x2      -> dy1/dx1 = x2,               dy1/dx2 = x1
-// y2 = (x1'x2)^2  -> dy2/dx1 = 2 * x2 * (x1'x2), dy2/dx2 = 2 * x1 * (x1'x2)
-// y3 = x2'x2      -> dy3/dx1 = 0,                dy3/dx2 = 2 * x2
-class TestCostFunction : public CostFunction {
- public:
-  TestCostFunction() {
-    set_num_residuals(3);
-    mutable_parameter_block_sizes()->push_back(5);  // x1.
-    mutable_parameter_block_sizes()->push_back(5);  // x2.
-  }
-  virtual bool Evaluate(double const* const* parameters,
-                        double* residuals,
-                        double** jacobians) const {
-    (void) jacobians;  // Ignored.
-
-    residuals[0] = residuals[1] = residuals[2] = 0;
-    for (int i = 0; i < 5; ++i) {
-      residuals[0] += parameters[0][i] * parameters[1][i];
-      residuals[2] += parameters[1][i] * parameters[1][i];
-    }
-    residuals[1] = residuals[0] * residuals[0];
-    return true;
-  }
-};
-
-TEST(NumericDiffCostFunction, EasyCase) {
-  // Try both central and forward difference.
-  TestCostFunction term;
-  scoped_ptr<CostFunction> cfs[2];
-  cfs[0].reset(
-      CreateRuntimeNumericDiffCostFunction(&term, CENTRAL, kRelativeEps));
-
-  cfs[1].reset(
-      CreateRuntimeNumericDiffCostFunction(&term, FORWARD, kRelativeEps));
-
-
-  for (int c = 0; c < 2; ++c) {
-    CostFunction *cost_function = cfs[c].get();
-
-    double x1[] = { 1.0, 2.0, 3.0, 4.0, 5.0 };
-    double x2[] = { 9.0, 9.0, 5.0, 5.0, 1.0 };
-    double *parameters[] = { &x1[0], &x2[0] };
-
-    double dydx1[15];  // 3 x 5, row major.
-    double dydx2[15];  // 3 x 5, row major.
-    double *jacobians[2] = { &dydx1[0], &dydx2[0] };
-
-    double residuals[3] = {-1e-100, -2e-100, -3e-100 };
-
-    ASSERT_TRUE(cost_function->Evaluate(&parameters[0],
-                                        &residuals[0],
-                                        &jacobians[0]));
-
-    EXPECT_EQ(residuals[0], 67);
-    EXPECT_EQ(residuals[1], 4489);
-    EXPECT_EQ(residuals[2], 213);
-
-    for (int i = 0; i < 5; ++i) {
-      LOG(INFO) << "c = " << c << " i = " << i;
-      const double kEps = c == 0 ? /* central */ 3e-9 : /* forward */ 2e-5;
-
-      ExpectClose(x2[i],                    dydx1[5 * 0 + i], kEps);  // y1
-      ExpectClose(x1[i],                    dydx2[5 * 0 + i], kEps);
-      ExpectClose(2 * x2[i] * residuals[0], dydx1[5 * 1 + i], kEps);  // y2
-      ExpectClose(2 * x1[i] * residuals[0], dydx2[5 * 1 + i], kEps);
-      ExpectClose(0.0,                      dydx1[5 * 2 + i], kEps);  // y3
-      ExpectClose(2 * x2[i],                dydx2[5 * 2 + i], kEps);
-    }
-  }
-}
-
-// y1 = sin(x1'x2)
-// y2 = exp(-x1'x2 / 10)
-//
-// dy1/dx1 =  x2 * cos(x1'x2),            dy1/dx2 =  x1 * cos(x1'x2)
-// dy2/dx1 = -x2 * exp(-x1'x2 / 10) / 10, dy2/dx2 = -x2 * exp(-x1'x2 / 10) / 10
-class TranscendentalTestCostFunction : public CostFunction {
- public:
-  TranscendentalTestCostFunction() {
-    set_num_residuals(2);
-    mutable_parameter_block_sizes()->push_back(5);  // x1.
-    mutable_parameter_block_sizes()->push_back(5);  // x2.
-  }
-  virtual bool Evaluate(double const* const* parameters,
-                        double* residuals,
-                        double** jacobians) const {
-    (void) jacobians;  // Ignored.
-
-    double x1x2 = 0;
-    for (int i = 0; i < 5; ++i) {
-      x1x2 += parameters[0][i] * parameters[1][i];
-    }
-    residuals[0] = sin(x1x2);
-    residuals[1] = exp(-x1x2 / 10);
-    return true;
-  }
-};
-
-TEST(NumericDiffCostFunction, TransendentalOperationsInCostFunction) {
-  // Try both central and forward difference.
-  TranscendentalTestCostFunction term;
-  scoped_ptr<CostFunction> cfs[2];
-  cfs[0].reset(
-      CreateRuntimeNumericDiffCostFunction(&term, CENTRAL, kRelativeEps));
-
-  cfs[1].reset(
-      CreateRuntimeNumericDiffCostFunction(&term, FORWARD, kRelativeEps));
-
-  for (int c = 0; c < 2; ++c) {
-    CostFunction *cost_function = cfs[c].get();
-
-    struct {
-      double x1[5];
-      double x2[5];
-    } kTests[] = {
-      { { 1.0, 2.0, 3.0, 4.0, 5.0 },  // No zeros.
-        { 9.0, 9.0, 5.0, 5.0, 1.0 },
-      },
-      { { 0.0, 2.0, 3.0, 0.0, 5.0 },  // Some zeros x1.
-        { 9.0, 9.0, 5.0, 5.0, 1.0 },
-      },
-      { { 1.0, 2.0, 3.0, 1.0, 5.0 },  // Some zeros x2.
-        { 0.0, 9.0, 0.0, 5.0, 0.0 },
-      },
-      { { 0.0, 0.0, 0.0, 0.0, 0.0 },  // All zeros x1.
-        { 9.0, 9.0, 5.0, 5.0, 1.0 },
-      },
-      { { 1.0, 2.0, 3.0, 4.0, 5.0 },  // All zeros x2.
-        { 0.0, 0.0, 0.0, 0.0, 0.0 },
-      },
-      { { 0.0, 0.0, 0.0, 0.0, 0.0 },  // All zeros.
-        { 0.0, 0.0, 0.0, 0.0, 0.0 },
-      },
-    };
-    for (int k = 0; k < CERES_ARRAYSIZE(kTests); ++k) {
-      double *x1 = &(kTests[k].x1[0]);
-      double *x2 = &(kTests[k].x2[0]);
-      double *parameters[] = { x1, x2 };
-
-      double dydx1[10];
-      double dydx2[10];
-      double *jacobians[2] = { &dydx1[0], &dydx2[0] };
-
-      double residuals[2];
-
-      ASSERT_TRUE(cost_function->Evaluate(&parameters[0],
-                                          &residuals[0],
-                                          &jacobians[0]));
-      LOG(INFO) << "Ran evaluate for test k=" << k << " c=" << c;
-
-      double x1x2 = 0;
-      for (int i = 0; i < 5; ++i) {
-        x1x2 += x1[i] * x2[i];
-      }
-
-      for (int i = 0; i < 5; ++i) {
-        const double kEps = (c == 0 ? /* central */ 3e-9 : /* forward */ 2e-5);
-
-        ExpectClose( x2[i] * cos(x1x2),              dydx1[5 * 0 + i], kEps);  // NOLINT
-        ExpectClose( x1[i] * cos(x1x2),              dydx2[5 * 0 + i], kEps);  // NOLINT
-        ExpectClose(-x2[i] * exp(-x1x2 / 10.) / 10., dydx1[5 * 1 + i], kEps);
-        ExpectClose(-x1[i] * exp(-x1x2 / 10.) / 10., dydx2[5 * 1 + i], kEps);
-      }
-    }
-  }
-}
-
-}  // namespace internal
-}  // namespace ceres
diff --git a/internal/ceres/schur_complement_solver.cc b/internal/ceres/schur_complement_solver.cc
index b192aa1..d2aa168 100644
--- a/internal/ceres/schur_complement_solver.cc
+++ b/internal/ceres/schur_complement_solver.cc
@@ -1,5 +1,5 @@
 // Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// Copyright 2014 Google Inc. All rights reserved.
 // http://code.google.com/p/ceres-solver/
 //
 // Redistribution and use in source and binary forms, with or without
@@ -28,12 +28,13 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
+#include "ceres/internal/port.h"
+
 #include <algorithm>
 #include <ctime>
 #include <set>
 #include <vector>
 
-#include "Eigen/Dense"
 #include "ceres/block_random_access_dense_matrix.h"
 #include "ceres/block_random_access_matrix.h"
 #include "ceres/block_random_access_sparse_matrix.h"
@@ -42,7 +43,6 @@
 #include "ceres/cxsparse.h"
 #include "ceres/detect_structure.h"
 #include "ceres/internal/eigen.h"
-#include "ceres/internal/port.h"
 #include "ceres/internal/scoped_ptr.h"
 #include "ceres/lapack.h"
 #include "ceres/linear_solver.h"
@@ -51,6 +51,8 @@
 #include "ceres/triplet_sparse_matrix.h"
 #include "ceres/types.h"
 #include "ceres/wall_time.h"
+#include "Eigen/Dense"
+#include "Eigen/SparseCore"
 
 namespace ceres {
 namespace internal {
@@ -75,24 +77,19 @@
   fill(x, x + A->num_cols(), 0.0);
   event_logger.AddEvent("Setup");
 
-  LinearSolver::Summary summary;
-  summary.num_iterations = 1;
-  summary.termination_type = FAILURE;
   eliminator_->Eliminate(A, b, per_solve_options.D, lhs_.get(), rhs_.get());
   event_logger.AddEvent("Eliminate");
 
   double* reduced_solution = x + A->num_cols() - lhs_->num_cols();
-  const bool status = SolveReducedLinearSystem(reduced_solution);
+  const LinearSolver::Summary summary =
+      SolveReducedLinearSystem(reduced_solution);
   event_logger.AddEvent("ReducedSolve");
 
-  if (!status) {
-    return summary;
+  if (summary.termination_type == LINEAR_SOLVER_SUCCESS) {
+    eliminator_->BackSubstitute(A, b, per_solve_options.D, reduced_solution, x);
+    event_logger.AddEvent("BackSubstitute");
   }
 
-  eliminator_->BackSubstitute(A, b, per_solve_options.D, reduced_solution, x);
-  summary.termination_type = TOLERANCE;
-
-  event_logger.AddEvent("BackSubstitute");
   return summary;
 }
 
@@ -117,7 +114,13 @@
 // Solve the system Sx = r, assuming that the matrix S is stored in a
 // BlockRandomAccessDenseMatrix. The linear system is solved using
 // Eigen's Cholesky factorization.
-bool DenseSchurComplementSolver::SolveReducedLinearSystem(double* solution) {
+LinearSolver::Summary
+DenseSchurComplementSolver::SolveReducedLinearSystem(double* solution) {
+  LinearSolver::Summary summary;
+  summary.num_iterations = 0;
+  summary.termination_type = LINEAR_SOLVER_SUCCESS;
+  summary.message = "Success.";
+
   const BlockRandomAccessDenseMatrix* m =
       down_cast<const BlockRandomAccessDenseMatrix*>(lhs());
   const int num_rows = m->num_rows();
@@ -125,29 +128,36 @@
   // The case where there are no f blocks, and the system is block
   // diagonal.
   if (num_rows == 0) {
-    return true;
+    return summary;
   }
 
+  summary.num_iterations = 1;
+
   if (options().dense_linear_algebra_library_type == EIGEN) {
-    // TODO(sameeragarwal): Add proper error handling; this completely ignores
-    // the quality of the solution to the solve.
-    VectorRef(solution, num_rows) =
+    Eigen::LLT<Matrix, Eigen::Upper> llt =
         ConstMatrixRef(m->values(), num_rows, num_rows)
         .selfadjointView<Eigen::Upper>()
-        .llt()
-        .solve(ConstVectorRef(rhs(), num_rows));
-    return true;
+        .llt();
+    if (llt.info() != Eigen::Success) {
+      summary.termination_type = LINEAR_SOLVER_FAILURE;
+      summary.message =
+          "Eigen failure. Unable to perform dense Cholesky factorization.";
+      return summary;
+    }
+
+    VectorRef(solution, num_rows) = llt.solve(ConstVectorRef(rhs(), num_rows));
+  } else {
+    VectorRef(solution, num_rows) = ConstVectorRef(rhs(), num_rows);
+    summary.termination_type =
+        LAPACK::SolveInPlaceUsingCholesky(num_rows,
+                                          m->values(),
+                                          solution,
+                                          &summary.message);
   }
 
-  VectorRef(solution, num_rows) = ConstVectorRef(rhs(), num_rows);
-  const int info = LAPACK::SolveInPlaceUsingCholesky(num_rows,
-                                                     m->values(),
-                                                     solution);
-  return (info == 0);
+  return summary;
 }
 
-#if !defined(CERES_NO_SUITESPARSE) || !defined(CERES_NO_CXSPARE)
-
 SparseSchurComplementSolver::SparseSchurComplementSolver(
     const LinearSolver::Options& options)
     : SchurComplementSolver(options),
@@ -156,19 +166,15 @@
 }
 
 SparseSchurComplementSolver::~SparseSchurComplementSolver() {
-#ifndef CERES_NO_SUITESPARSE
   if (factor_ != NULL) {
     ss_.Free(factor_);
     factor_ = NULL;
   }
-#endif  // CERES_NO_SUITESPARSE
 
-#ifndef CERES_NO_CXSPARSE
   if (cxsparse_factor_ != NULL) {
     cxsparse_.Free(cxsparse_factor_);
     cxsparse_factor_ = NULL;
   }
-#endif  // CERES_NO_CXSPARSE
 }
 
 // Determine the non-zero blocks in the Schur Complement matrix, and
@@ -242,40 +248,57 @@
   set_rhs(new double[lhs()->num_rows()]);
 }
 
-bool SparseSchurComplementSolver::SolveReducedLinearSystem(double* solution) {
+LinearSolver::Summary
+SparseSchurComplementSolver::SolveReducedLinearSystem(double* solution) {
   switch (options().sparse_linear_algebra_library_type) {
     case SUITE_SPARSE:
       return SolveReducedLinearSystemUsingSuiteSparse(solution);
     case CX_SPARSE:
       return SolveReducedLinearSystemUsingCXSparse(solution);
+    case EIGEN_SPARSE:
+      return SolveReducedLinearSystemUsingEigen(solution);
     default:
       LOG(FATAL) << "Unknown sparse linear algebra library : "
                  << options().sparse_linear_algebra_library_type;
   }
 
-  LOG(FATAL) << "Unknown sparse linear algebra library : "
-             << options().sparse_linear_algebra_library_type;
-  return false;
+  return LinearSolver::Summary();
 }
 
-#ifndef CERES_NO_SUITESPARSE
 // Solve the system Sx = r, assuming that the matrix S is stored in a
 // BlockRandomAccessSparseMatrix.  The linear system is solved using
 // CHOLMOD's sparse cholesky factorization routines.
-bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse(
+LinearSolver::Summary
+SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse(
     double* solution) {
+#ifdef CERES_NO_SUITESPARSE
+
+  LinearSolver::Summary summary;
+  summary.num_iterations = 0;
+  summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+  summary.message = "Ceres was not built with SuiteSparse support. "
+      "Therefore, SPARSE_SCHUR cannot be used with SUITE_SPARSE";
+  return summary;
+
+#else
+
+  LinearSolver::Summary summary;
+  summary.num_iterations = 0;
+  summary.termination_type = LINEAR_SOLVER_SUCCESS;
+  summary.message = "Success.";
+
   TripletSparseMatrix* tsm =
       const_cast<TripletSparseMatrix*>(
           down_cast<const BlockRandomAccessSparseMatrix*>(lhs())->matrix());
-
   const int num_rows = tsm->num_rows();
 
   // The case where there are no f blocks, and the system is block
   // diagonal.
   if (num_rows == 0) {
-    return true;
+    return summary;
   }
 
+  summary.num_iterations = 1;
   cholmod_sparse* cholmod_lhs = NULL;
   if (options().use_postordering) {
     // If we are going to do a full symbolic analysis of the schur
@@ -288,7 +311,10 @@
     cholmod_lhs->stype = 1;
 
     if (factor_ == NULL) {
-      factor_ = ss_.BlockAnalyzeCholesky(cholmod_lhs, blocks_, blocks_);
+      factor_ = ss_.BlockAnalyzeCholesky(cholmod_lhs,
+                                         blocks_,
+                                         blocks_,
+                                         &summary.message);
     }
   } else {
     // If we are going to use the natural ordering (i.e. rely on the
@@ -301,53 +327,83 @@
     cholmod_lhs->stype = -1;
 
     if (factor_ == NULL) {
-      factor_ = ss_.AnalyzeCholeskyWithNaturalOrdering(cholmod_lhs);
+      factor_ = ss_.AnalyzeCholeskyWithNaturalOrdering(cholmod_lhs,
+                                                       &summary.message);
     }
   }
 
+  if (factor_ == NULL) {
+    ss_.Free(cholmod_lhs);
+    summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+    // No need to set message as it has already been set by the
+    // symbolic analysis routines above.
+    return summary;
+  }
+
+  summary.termination_type =
+    ss_.Cholesky(cholmod_lhs, factor_, &summary.message);
+
+  ss_.Free(cholmod_lhs);
+
+  if (summary.termination_type != LINEAR_SOLVER_SUCCESS) {
+    // No need to set message as it has already been set by the
+    // numeric factorization routine above.
+    return summary;
+  }
+
   cholmod_dense*  cholmod_rhs =
       ss_.CreateDenseVector(const_cast<double*>(rhs()), num_rows, num_rows);
-  cholmod_dense* cholmod_solution =
-      ss_.SolveCholesky(cholmod_lhs, factor_, cholmod_rhs);
-
-  ss_.Free(cholmod_lhs);
+  cholmod_dense* cholmod_solution = ss_.Solve(factor_,
+                                              cholmod_rhs,
+                                              &summary.message);
   ss_.Free(cholmod_rhs);
 
   if (cholmod_solution == NULL) {
-    LOG(WARNING) << "CHOLMOD solve failed.";
-    return false;
+    summary.message =
+        "SuiteSparse failure. Unable to perform triangular solve.";
+    summary.termination_type = LINEAR_SOLVER_FAILURE;
+    return summary;
   }
 
   VectorRef(solution, num_rows)
       = VectorRef(static_cast<double*>(cholmod_solution->x), num_rows);
   ss_.Free(cholmod_solution);
-  return true;
-}
-#else
-bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse(
-    double* solution) {
-  LOG(FATAL) << "No SuiteSparse support in Ceres.";
-  return false;
-}
+  return summary;
 #endif  // CERES_NO_SUITESPARSE
+}
 
-#ifndef CERES_NO_CXSPARSE
 // Solve the system Sx = r, assuming that the matrix S is stored in a
 // BlockRandomAccessSparseMatrix.  The linear system is solved using
 // CXSparse's sparse cholesky factorization routines.
-bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingCXSparse(
+LinearSolver::Summary
+SparseSchurComplementSolver::SolveReducedLinearSystemUsingCXSparse(
     double* solution) {
+#ifdef CERES_NO_CXSPARSE
+
+  LinearSolver::Summary summary;
+  summary.num_iterations = 0;
+  summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+  summary.message = "Ceres was not built with CXSparse support. "
+      "Therefore, SPARSE_SCHUR cannot be used with CX_SPARSE";
+  return summary;
+
+#else
+
+  LinearSolver::Summary summary;
+  summary.num_iterations = 0;
+  summary.termination_type = LINEAR_SOLVER_SUCCESS;
+  summary.message = "Success.";
+
   // Extract the TripletSparseMatrix that is used for actually storing S.
   TripletSparseMatrix* tsm =
       const_cast<TripletSparseMatrix*>(
           down_cast<const BlockRandomAccessSparseMatrix*>(lhs())->matrix());
-
   const int num_rows = tsm->num_rows();
 
   // The case where there are no f blocks, and the system is block
   // diagonal.
   if (num_rows == 0) {
-    return true;
+    return summary;
   }
 
   cs_di* lhs = CHECK_NOTNULL(cxsparse_.CreateSparseMatrix(tsm));
@@ -355,24 +411,108 @@
 
   // Compute symbolic factorization if not available.
   if (cxsparse_factor_ == NULL) {
-    cxsparse_factor_ =
-        CHECK_NOTNULL(cxsparse_.BlockAnalyzeCholesky(lhs, blocks_, blocks_));
+    cxsparse_factor_ = cxsparse_.BlockAnalyzeCholesky(lhs, blocks_, blocks_);
   }
 
-  // Solve the linear system.
-  bool ok = cxsparse_.SolveCholesky(lhs, cxsparse_factor_, solution);
+  if (cxsparse_factor_ == NULL) {
+    summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+    summary.message =
+        "CXSparse failure. Unable to find symbolic factorization.";
+  } else if (!cxsparse_.SolveCholesky(lhs, cxsparse_factor_, solution)) {
+    summary.termination_type = LINEAR_SOLVER_FAILURE;
+    summary.message = "CXSparse::SolveCholesky failed.";
+  }
 
   cxsparse_.Free(lhs);
-  return ok;
-}
-#else
-bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingCXSparse(
-    double* solution) {
-  LOG(FATAL) << "No CXSparse support in Ceres.";
-  return false;
-}
+  return summary;
 #endif  // CERES_NO_CXPARSE
+}
 
-#endif  // !defined(CERES_NO_SUITESPARSE) || !defined(CERES_NO_CXSPARE)
+// Solve the system Sx = r, assuming that the matrix S is stored in a
+// BlockRandomAccessSparseMatrix.  The linear system is solved using
+// Eigen's sparse cholesky factorization routines.
+LinearSolver::Summary
+SparseSchurComplementSolver::SolveReducedLinearSystemUsingEigen(
+    double* solution) {
+#ifndef CERES_USE_EIGEN_SPARSE
+
+  LinearSolver::Summary summary;
+  summary.num_iterations = 0;
+  summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+  summary.message =
+      "SPARSE_SCHUR cannot be used with EIGEN_SPARSE. "
+      "Ceres was not built with support for "
+      "Eigen's SimplicialLDLT decomposition. "
+      "This requires enabling building with -DEIGENSPARSE=ON.";
+  return summary;
+
+#else
+  EventLogger event_logger("SchurComplementSolver::EigenSolve");
+  LinearSolver::Summary summary;
+  summary.num_iterations = 0;
+  summary.termination_type = LINEAR_SOLVER_SUCCESS;
+  summary.message = "Success.";
+
+  // Extract the TripletSparseMatrix that is used for actually storing S.
+  TripletSparseMatrix* tsm =
+      const_cast<TripletSparseMatrix*>(
+          down_cast<const BlockRandomAccessSparseMatrix*>(lhs())->matrix());
+  const int num_rows = tsm->num_rows();
+
+  // The case where there are no f blocks, and the system is block
+  // diagonal.
+  if (num_rows == 0) {
+    return summary;
+  }
+
+  // This is an upper triangular matrix.
+  CompressedRowSparseMatrix crsm(*tsm);
+  // Map this to a column major, lower triangular matrix.
+  Eigen::MappedSparseMatrix<double, Eigen::ColMajor> eigen_lhs(
+      crsm.num_rows(),
+      crsm.num_rows(),
+      crsm.num_nonzeros(),
+      crsm.mutable_rows(),
+      crsm.mutable_cols(),
+      crsm.mutable_values());
+  event_logger.AddEvent("ToCompressedRowSparseMatrix");
+
+  // Compute symbolic factorization if one does not exist.
+  if (simplicial_ldlt_.get() == NULL) {
+    simplicial_ldlt_.reset(new SimplicialLDLT);
+    // This ordering is quite bad. The scalar ordering produced by the
+    // AMD algorithm is quite bad and can be an order of magnitude
+    // worse than the one computed using the block version of the
+    // algorithm.
+    simplicial_ldlt_->analyzePattern(eigen_lhs);
+    event_logger.AddEvent("Analysis");
+    if (simplicial_ldlt_->info() != Eigen::Success) {
+      summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+      summary.message =
+          "Eigen failure. Unable to find symbolic factorization.";
+      return summary;
+    }
+  }
+
+  simplicial_ldlt_->factorize(eigen_lhs);
+  event_logger.AddEvent("Factorize");
+  if (simplicial_ldlt_->info() != Eigen::Success) {
+    summary.termination_type = LINEAR_SOLVER_FAILURE;
+    summary.message = "Eigen failure. Unable to find numeric factoriztion.";
+    return summary;
+  }
+
+  VectorRef(solution, num_rows) =
+      simplicial_ldlt_->solve(ConstVectorRef(rhs(), num_rows));
+  event_logger.AddEvent("Solve");
+  if (simplicial_ldlt_->info() != Eigen::Success) {
+    summary.termination_type = LINEAR_SOLVER_FAILURE;
+    summary.message = "Eigen failure. Unable to do triangular solve.";
+  }
+
+  return summary;
+#endif  // CERES_USE_EIGEN_SPARSE
+}
+
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/schur_complement_solver.h b/internal/ceres/schur_complement_solver.h
index b5a1c74..723b149 100644
--- a/internal/ceres/schur_complement_solver.h
+++ b/internal/ceres/schur_complement_solver.h
@@ -35,6 +35,8 @@
 #include <utility>
 #include <vector>
 
+#include "ceres/internal/port.h"
+
 #include "ceres/block_random_access_matrix.h"
 #include "ceres/block_sparse_matrix.h"
 #include "ceres/block_structure.h"
@@ -45,6 +47,10 @@
 #include "ceres/internal/scoped_ptr.h"
 #include "ceres/types.h"
 
+#ifdef CERES_USE_EIGEN_SPARSE
+#include "Eigen/SparseCholesky"
+#endif
+
 namespace ceres {
 namespace internal {
 
@@ -126,7 +132,8 @@
 
  private:
   virtual void InitStorage(const CompressedRowBlockStructure* bs) = 0;
-  virtual bool SolveReducedLinearSystem(double* solution) = 0;
+  virtual LinearSolver::Summary SolveReducedLinearSystem(
+      double* solution) = 0;
 
   LinearSolver::Options options_;
 
@@ -146,12 +153,12 @@
 
  private:
   virtual void InitStorage(const CompressedRowBlockStructure* bs);
-  virtual bool SolveReducedLinearSystem(double* solution);
+  virtual LinearSolver::Summary SolveReducedLinearSystem(
+      double* solution);
 
   CERES_DISALLOW_COPY_AND_ASSIGN(DenseSchurComplementSolver);
 };
 
-#if !defined(CERES_NO_SUITESPARSE) || !defined(CERES_NO_CXSPARE)
 // Sparse Cholesky factorization based solver.
 class SparseSchurComplementSolver : public SchurComplementSolver {
  public:
@@ -160,9 +167,14 @@
 
  private:
   virtual void InitStorage(const CompressedRowBlockStructure* bs);
-  virtual bool SolveReducedLinearSystem(double* solution);
-  bool SolveReducedLinearSystemUsingSuiteSparse(double* solution);
-  bool SolveReducedLinearSystemUsingCXSparse(double* solution);
+  virtual LinearSolver::Summary SolveReducedLinearSystem(
+      double* solution);
+  LinearSolver::Summary SolveReducedLinearSystemUsingSuiteSparse(
+      double* solution);
+  LinearSolver::Summary SolveReducedLinearSystemUsingCXSparse(
+      double* solution);
+  LinearSolver::Summary SolveReducedLinearSystemUsingEigen(
+      double* solution);
 
   // Size of the blocks in the Schur complement.
   vector<int> blocks_;
@@ -175,10 +187,15 @@
   CXSparse cxsparse_;
   // Cached factorization
   cs_dis* cxsparse_factor_;
+
+#ifdef CERES_USE_EIGEN_SPARSE
+  typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<double> > SimplicialLDLT;
+  scoped_ptr<SimplicialLDLT> simplicial_ldlt_;
+#endif
+
   CERES_DISALLOW_COPY_AND_ASSIGN(SparseSchurComplementSolver);
 };
 
-#endif  // !defined(CERES_NO_SUITESPARSE) || !defined(CERES_NO_CXSPARE)
 }  // namespace internal
 }  // namespace ceres
 
diff --git a/internal/ceres/schur_complement_solver_test.cc b/internal/ceres/schur_complement_solver_test.cc
index d91c162..8e71b2e 100644
--- a/internal/ceres/schur_complement_solver_test.cc
+++ b/internal/ceres/schur_complement_solver_test.cc
@@ -187,17 +187,31 @@
 
 #ifndef CERES_NO_CXSPARSE
 TEST_F(SchurComplementSolverTest,
-       SparseSchurWithSuiteSparseSmallProblem) {
+       SparseSchurWithCXSparseSmallProblem) {
   ComputeAndCompareSolutions(2, false, SPARSE_SCHUR, EIGEN, CX_SPARSE, true);
   ComputeAndCompareSolutions(2, true, SPARSE_SCHUR, EIGEN, CX_SPARSE, true);
 }
 
 TEST_F(SchurComplementSolverTest,
-       SparseSchurWithSuiteSparseLargeProblem) {
+       SparseSchurWithCXSparseLargeProblem) {
   ComputeAndCompareSolutions(3, false, SPARSE_SCHUR, EIGEN, CX_SPARSE, true);
   ComputeAndCompareSolutions(3, true, SPARSE_SCHUR, EIGEN, CX_SPARSE, true);
 }
 #endif  // CERES_NO_CXSPARSE
 
+#ifdef CERES_USE_EIGEN_SPARSE
+TEST_F(SchurComplementSolverTest,
+       SparseSchurWithEigenSparseSmallProblem) {
+  ComputeAndCompareSolutions(2, false, SPARSE_SCHUR, EIGEN, EIGEN_SPARSE, true);
+  ComputeAndCompareSolutions(2, true, SPARSE_SCHUR, EIGEN, EIGEN_SPARSE, true);
+}
+
+TEST_F(SchurComplementSolverTest,
+       SparseSchurWithEigenSparseLargeProblem) {
+  ComputeAndCompareSolutions(3, false, SPARSE_SCHUR, EIGEN, EIGEN_SPARSE, true);
+  ComputeAndCompareSolutions(3, true, SPARSE_SCHUR, EIGEN, EIGEN_SPARSE, true);
+}
+#endif  // CERES_USE_EIGEN_SPARSE
+
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/schur_eliminator.cc b/internal/ceres/schur_eliminator.cc
index 31f8354..4d9b175 100644
--- a/internal/ceres/schur_eliminator.cc
+++ b/internal/ceres/schur_eliminator.cc
@@ -37,7 +37,7 @@
 // THIS FILE IS AUTOGENERATED. DO NOT EDIT.
 //=========================================
 //
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
 // Editing it manually is not recommended.
 
 #include "ceres/linear_solver.h"
@@ -102,9 +102,24 @@
   }
   if ((options.row_block_size == 2) &&
       (options.e_block_size == 4) &&
+      (options.f_block_size == 8)) {
+    return new SchurEliminator<2, 4, 8>(options);
+  }
+  if ((options.row_block_size == 2) &&
+      (options.e_block_size == 4) &&
+      (options.f_block_size == 9)) {
+    return new SchurEliminator<2, 4, 9>(options);
+  }
+  if ((options.row_block_size == 2) &&
+      (options.e_block_size == 4) &&
       (options.f_block_size == Eigen::Dynamic)) {
     return new SchurEliminator<2, 4, Eigen::Dynamic>(options);
   }
+  if ((options.row_block_size == 2) &&
+      (options.e_block_size == Eigen::Dynamic) &&
+      (options.f_block_size == Eigen::Dynamic)) {
+    return new SchurEliminator<2, Eigen::Dynamic, Eigen::Dynamic>(options);
+  }
   if ((options.row_block_size == 4) &&
       (options.e_block_size == 4) &&
       (options.f_block_size == 2)) {
diff --git a/internal/ceres/schur_eliminator_impl.h b/internal/ceres/schur_eliminator_impl.h
index c09b7fb..305d94e 100644
--- a/internal/ceres/schur_eliminator_impl.h
+++ b/internal/ceres/schur_eliminator_impl.h
@@ -45,6 +45,9 @@
 
 #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 10
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifdef CERES_USE_OPENMP
 #include <omp.h>
 #endif
diff --git a/internal/ceres/schur_jacobi_preconditioner.cc b/internal/ceres/schur_jacobi_preconditioner.cc
index 338df71..6dc9e89 100644
--- a/internal/ceres/schur_jacobi_preconditioner.cc
+++ b/internal/ceres/schur_jacobi_preconditioner.cc
@@ -33,10 +33,9 @@
 #include <utility>
 #include <vector>
 #include "Eigen/Dense"
-#include "ceres/block_random_access_sparse_matrix.h"
+#include "ceres/block_random_access_diagonal_matrix.h"
 #include "ceres/block_sparse_matrix.h"
 #include "ceres/collections_port.h"
-#include "ceres/detect_structure.h"
 #include "ceres/internal/scoped_ptr.h"
 #include "ceres/linear_solver.h"
 #include "ceres/schur_eliminator.h"
@@ -57,16 +56,11 @@
       << "SCHUR_JACOBI preconditioner.";
 
   block_size_.resize(num_blocks);
-  set<pair<int, int> > block_pairs;
-
-  int num_block_diagonal_entries = 0;
   for (int i = 0; i < num_blocks; ++i) {
     block_size_[i] = bs.cols[i + options_.elimination_groups[0]].size;
-    block_pairs.insert(make_pair(i, i));
-    num_block_diagonal_entries += block_size_[i] * block_size_[i];
   }
 
-  m_.reset(new BlockRandomAccessSparseMatrix(block_size_, block_pairs));
+  m_.reset(new BlockRandomAccessDiagonalMatrix(block_size_));
   InitEliminator(bs);
 }
 
@@ -77,17 +71,13 @@
 void SchurJacobiPreconditioner::InitEliminator(
     const CompressedRowBlockStructure& bs) {
   LinearSolver::Options eliminator_options;
-
   eliminator_options.elimination_groups = options_.elimination_groups;
   eliminator_options.num_threads = options_.num_threads;
-
-  DetectStructure(bs, options_.elimination_groups[0],
-                  &eliminator_options.row_block_size,
-                  &eliminator_options.e_block_size,
-                  &eliminator_options.f_block_size);
-
+  eliminator_options.e_block_size = options_.e_block_size;
+  eliminator_options.f_block_size = options_.f_block_size;
+  eliminator_options.row_block_size = options_.row_block_size;
   eliminator_.reset(SchurEliminatorBase::Create(eliminator_options));
-  eliminator_->Init(options_.elimination_groups[0], &bs);
+  eliminator_->Init(eliminator_options.elimination_groups[0], &bs);
 }
 
 // Update the values of the preconditioner matrix and factorize it.
@@ -118,7 +108,7 @@
   CHECK_NOTNULL(y);
 
   const double* lhs_values =
-      down_cast<BlockRandomAccessSparseMatrix*>(m_.get())->matrix()->values();
+      down_cast<BlockRandomAccessDiagonalMatrix*>(m_.get())->matrix()->values();
 
   // This loop can be easily multi-threaded with OpenMP if need be.
   for (int i = 0; i < block_size_.size(); ++i) {
diff --git a/internal/ceres/schur_jacobi_preconditioner.h b/internal/ceres/schur_jacobi_preconditioner.h
index f6e7b0d..aecb015 100644
--- a/internal/ceres/schur_jacobi_preconditioner.h
+++ b/internal/ceres/schur_jacobi_preconditioner.h
@@ -49,7 +49,7 @@
 namespace ceres {
 namespace internal {
 
-class BlockRandomAccessSparseMatrix;
+class BlockRandomAccessDiagonalMatrix;
 class BlockSparseMatrix;
 struct CompressedRowBlockStructure;
 class SchurEliminatorBase;
@@ -100,7 +100,7 @@
   scoped_ptr<SchurEliminatorBase> eliminator_;
 
   // Preconditioner matrix.
-  scoped_ptr<BlockRandomAccessSparseMatrix> m_;
+  scoped_ptr<BlockRandomAccessDiagonalMatrix> m_;
   CERES_DISALLOW_COPY_AND_ASSIGN(SchurJacobiPreconditioner);
 };
 
diff --git a/internal/ceres/schur_ordering.cc b/internal/ceres/schur_ordering.cc
deleted file mode 100644
index 1cdff4e..0000000
--- a/internal/ceres/schur_ordering.cc
+++ /dev/null
@@ -1,101 +0,0 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
-// http://code.google.com/p/ceres-solver/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-//   this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-//   this list of conditions and the following disclaimer in the documentation
-//   and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-//   used to endorse or promote products derived from this software without
-//   specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: sameeragarwal@google.com (Sameer Agarwal)
-
-#include "ceres/schur_ordering.h"
-
-#include "ceres/graph.h"
-#include "ceres/graph_algorithms.h"
-#include "ceres/internal/scoped_ptr.h"
-#include "ceres/map_util.h"
-#include "ceres/parameter_block.h"
-#include "ceres/program.h"
-#include "ceres/residual_block.h"
-#include "glog/logging.h"
-
-namespace ceres {
-namespace internal {
-
-int ComputeSchurOrdering(const Program& program,
-                         vector<ParameterBlock*>* ordering) {
-  CHECK_NOTNULL(ordering)->clear();
-
-  scoped_ptr<Graph< ParameterBlock*> > graph(
-      CHECK_NOTNULL(CreateHessianGraph(program)));
-  int independent_set_size = IndependentSetOrdering(*graph, ordering);
-  const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
-
-  // Add the excluded blocks to back of the ordering vector.
-  for (int i = 0; i < parameter_blocks.size(); ++i) {
-    ParameterBlock* parameter_block = parameter_blocks[i];
-    if (parameter_block->IsConstant()) {
-      ordering->push_back(parameter_block);
-    }
-  }
-
-  return independent_set_size;
-}
-
-Graph<ParameterBlock*>*
-CreateHessianGraph(const Program& program) {
-  Graph<ParameterBlock*>* graph = new Graph<ParameterBlock*>;
-  const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
-  for (int i = 0; i < parameter_blocks.size(); ++i) {
-    ParameterBlock* parameter_block = parameter_blocks[i];
-    if (!parameter_block->IsConstant()) {
-      graph->AddVertex(parameter_block);
-    }
-  }
-
-  const vector<ResidualBlock*>& residual_blocks = program.residual_blocks();
-  for (int i = 0; i < residual_blocks.size(); ++i) {
-    const ResidualBlock* residual_block = residual_blocks[i];
-    const int num_parameter_blocks = residual_block->NumParameterBlocks();
-    ParameterBlock* const* parameter_blocks =
-        residual_block->parameter_blocks();
-    for (int j = 0; j < num_parameter_blocks; ++j) {
-      if (parameter_blocks[j]->IsConstant()) {
-        continue;
-      }
-
-      for (int k = j + 1; k < num_parameter_blocks; ++k) {
-        if (parameter_blocks[k]->IsConstant()) {
-          continue;
-        }
-
-        graph->AddEdge(parameter_blocks[j], parameter_blocks[k]);
-      }
-    }
-  }
-
-  return graph;
-}
-
-}  // namespace internal
-}  // namespace ceres
diff --git a/internal/ceres/schur_ordering.h b/internal/ceres/schur_ordering.h
deleted file mode 100644
index 1f9a4ff..0000000
--- a/internal/ceres/schur_ordering.h
+++ /dev/null
@@ -1,74 +0,0 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
-// http://code.google.com/p/ceres-solver/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-//   this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-//   this list of conditions and the following disclaimer in the documentation
-//   and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-//   used to endorse or promote products derived from this software without
-//   specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: sameeragarwal@google.com (Sameer Agarwal)
-//
-// Compute a parameter block ordering for use with the Schur
-// complement based algorithms.
-
-#ifndef CERES_INTERNAL_SCHUR_ORDERING_H_
-#define CERES_INTERNAL_SCHUR_ORDERING_H_
-
-#include <vector>
-#include "ceres/graph.h"
-#include "ceres/types.h"
-
-namespace ceres {
-namespace internal {
-
-class Program;
-class ParameterBlock;
-
-// Uses an approximate independent set ordering to order the parameter
-// blocks of a problem so that it is suitable for use with Schur
-// complement based solvers. The output variable ordering contains an
-// ordering of the parameter blocks and the return value is size of
-// the independent set or the number of e_blocks (see
-// schur_complement_solver.h for an explanation). Constant parameters
-// are added to the end.
-//
-// The ordering vector has the structure
-//
-// ordering = [independent set,
-//             complement of the independent set,
-//             fixed blocks]
-int ComputeSchurOrdering(const Program& program,
-                         vector<ParameterBlock* >* ordering);
-
-
-// Builds a graph on the parameter blocks of a Problem, whose
-// structure reflects the sparsity structure of the Hessian. Each
-// vertex corresponds to a parameter block in the Problem except for
-// parameter blocks that are marked constant. An edge connects two
-// parameter blocks, if they co-occur in a residual block.
-Graph<ParameterBlock*>* CreateHessianGraph(const Program& program);
-
-}  // namespace internal
-}  // namespace ceres
-
-#endif  // CERES_INTERNAL_SCHUR_ORDERING_H_
diff --git a/internal/ceres/schur_ordering_test.cc b/internal/ceres/schur_ordering_test.cc
deleted file mode 100644
index bd74ebb..0000000
--- a/internal/ceres/schur_ordering_test.cc
+++ /dev/null
@@ -1,177 +0,0 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
-// http://code.google.com/p/ceres-solver/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-//   this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-//   this list of conditions and the following disclaimer in the documentation
-//   and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-//   used to endorse or promote products derived from this software without
-//   specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: sameeragarwal@google.com (Sameer Agarwal)
-
-#include "ceres/schur_ordering.h"
-
-#include <cstddef>
-#include <vector>
-#include "gtest/gtest.h"
-#include "ceres/collections_port.h"
-#include "ceres/graph.h"
-#include "ceres/problem_impl.h"
-#include "ceres/program.h"
-#include "ceres/stl_util.h"
-#include "ceres/cost_function.h"
-#include "ceres/internal/scoped_ptr.h"
-#include "ceres/sized_cost_function.h"
-
-namespace ceres {
-namespace internal {
-
-typedef Graph<ParameterBlock*> HessianGraph;
-typedef HashSet<ParameterBlock*> VertexSet;
-
-template <int M, int N1 = 0, int N2 = 0, int N3 = 0>
-class DummyCostFunction: public SizedCostFunction<M, N1, N2, N3> {
-  virtual bool Evaluate(double const* const* parameters,
-                        double* residuals,
-                        double** jacobians) const {
-    return true;
-  }
-};
-
-class SchurOrderingTest : public ::testing::Test {
- protected :
-  virtual void SetUp() {
-    // The explicit calls to AddParameterBlock are necessary because
-    // the below tests depend on the specific numbering of the
-    // parameter blocks.
-    problem_.AddParameterBlock(x_, 3);
-    problem_.AddParameterBlock(y_, 4);
-    problem_.AddParameterBlock(z_, 5);
-    problem_.AddParameterBlock(w_, 6);
-
-    problem_.AddResidualBlock(new DummyCostFunction<2, 3>, NULL, x_);
-    problem_.AddResidualBlock(new DummyCostFunction<6, 5, 4>, NULL, z_, y_);
-    problem_.AddResidualBlock(new DummyCostFunction<3, 3, 5>, NULL, x_, z_);
-    problem_.AddResidualBlock(new DummyCostFunction<7, 5, 3>, NULL, z_, x_);
-    problem_.AddResidualBlock(new DummyCostFunction<1, 5, 3, 6>, NULL,
-                              z_, x_, w_);
-  }
-
-  ProblemImpl problem_;
-  double x_[3], y_[4], z_[5], w_[6];
-};
-
-TEST_F(SchurOrderingTest, NoFixed) {
-  const Program& program = problem_.program();
-  const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
-  scoped_ptr<HessianGraph> graph(CreateHessianGraph(program));
-
-  const VertexSet& vertices = graph->vertices();
-  EXPECT_EQ(vertices.size(), 4);
-
-  for (int i = 0; i < 4; ++i) {
-    EXPECT_TRUE(vertices.find(parameter_blocks[i]) != vertices.end());
-  }
-
-  {
-    const VertexSet& neighbors = graph->Neighbors(parameter_blocks[0]);
-    EXPECT_EQ(neighbors.size(), 2);
-    EXPECT_TRUE(neighbors.find(parameter_blocks[2]) != neighbors.end());
-    EXPECT_TRUE(neighbors.find(parameter_blocks[3]) != neighbors.end());
-  }
-
-  {
-    const VertexSet& neighbors = graph->Neighbors(parameter_blocks[1]);
-    EXPECT_EQ(neighbors.size(), 1);
-    EXPECT_TRUE(neighbors.find(parameter_blocks[2]) != neighbors.end());
-  }
-
-  {
-    const VertexSet& neighbors = graph->Neighbors(parameter_blocks[2]);
-    EXPECT_EQ(neighbors.size(), 3);
-    EXPECT_TRUE(neighbors.find(parameter_blocks[0]) != neighbors.end());
-    EXPECT_TRUE(neighbors.find(parameter_blocks[1]) != neighbors.end());
-    EXPECT_TRUE(neighbors.find(parameter_blocks[3]) != neighbors.end());
-  }
-
-  {
-    const VertexSet& neighbors = graph->Neighbors(parameter_blocks[3]);
-    EXPECT_EQ(neighbors.size(), 2);
-    EXPECT_TRUE(neighbors.find(parameter_blocks[0]) != neighbors.end());
-    EXPECT_TRUE(neighbors.find(parameter_blocks[2]) != neighbors.end());
-  }
-}
-
-TEST_F(SchurOrderingTest, AllFixed) {
-  problem_.SetParameterBlockConstant(x_);
-  problem_.SetParameterBlockConstant(y_);
-  problem_.SetParameterBlockConstant(z_);
-  problem_.SetParameterBlockConstant(w_);
-
-  const Program& program = problem_.program();
-  scoped_ptr<HessianGraph> graph(CreateHessianGraph(program));
-  EXPECT_EQ(graph->vertices().size(), 0);
-}
-
-TEST_F(SchurOrderingTest, OneFixed) {
-  problem_.SetParameterBlockConstant(x_);
-
-  const Program& program = problem_.program();
-  const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
-  scoped_ptr<HessianGraph> graph(CreateHessianGraph(program));
-
-  const VertexSet& vertices = graph->vertices();
-
-  EXPECT_EQ(vertices.size(), 3);
-  EXPECT_TRUE(vertices.find(parameter_blocks[0]) == vertices.end());
-
-  for (int i = 1; i < 3; ++i) {
-    EXPECT_TRUE(vertices.find(parameter_blocks[i]) != vertices.end());
-  }
-
-  {
-    const VertexSet& neighbors = graph->Neighbors(parameter_blocks[1]);
-    EXPECT_EQ(neighbors.size(), 1);
-    EXPECT_TRUE(neighbors.find(parameter_blocks[2]) != neighbors.end());
-  }
-
-  {
-    const VertexSet& neighbors = graph->Neighbors(parameter_blocks[2]);
-    EXPECT_EQ(neighbors.size(), 2);
-    EXPECT_TRUE(neighbors.find(parameter_blocks[1]) != neighbors.end());
-    EXPECT_TRUE(neighbors.find(parameter_blocks[3]) != neighbors.end());
-  }
-
-  {
-    const VertexSet& neighbors = graph->Neighbors(parameter_blocks[3]);
-    EXPECT_EQ(neighbors.size(), 1);
-    EXPECT_TRUE(neighbors.find(parameter_blocks[2]) != neighbors.end());
-  }
-
-  // The constant parameter block is at the end.
-  vector<ParameterBlock*> ordering;
-  ComputeSchurOrdering(program, &ordering);
-  EXPECT_EQ(ordering.back(), parameter_blocks[0]);
-}
-
-}  // namespace internal
-}  // namespace ceres
diff --git a/internal/ceres/single_linkage_clustering.cc b/internal/ceres/single_linkage_clustering.cc
new file mode 100644
index 0000000..0a8b20c
--- /dev/null
+++ b/internal/ceres/single_linkage_clustering.cc
@@ -0,0 +1,110 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_NO_SUITESPARSE
+
+#include "ceres/single_linkage_clustering.h"
+
+#include "ceres/graph.h"
+#include "ceres/collections_port.h"
+#include "ceres/graph_algorithms.h"
+
+namespace ceres {
+namespace internal {
+
+int ComputeSingleLinkageClustering(
+    const SingleLinkageClusteringOptions& options,
+    const Graph<int>& graph,
+    HashMap<int, int>* membership) {
+  CHECK_NOTNULL(membership)->clear();
+
+  // Initially each vertex is in its own cluster.
+  const HashSet<int>& vertices = graph.vertices();
+  for (HashSet<int>::const_iterator it = vertices.begin();
+       it != vertices.end();
+       ++it) {
+    (*membership)[*it] = *it;
+  }
+
+  for (HashSet<int>::const_iterator it1 = vertices.begin();
+       it1 != vertices.end();
+       ++it1) {
+    const int vertex1 = *it1;
+    const HashSet<int>& neighbors = graph.Neighbors(vertex1);
+    for (HashSet<int>::const_iterator it2 = neighbors.begin();
+         it2 != neighbors.end();
+         ++it2) {
+      const int vertex2 = *it2;
+
+      // Since the graph is undirected, only pay attention to one side
+      // of the edge and ignore weak edges.
+      if ((vertex1 > vertex2) ||
+          (graph.EdgeWeight(vertex1, vertex2) < options.min_similarity)) {
+        continue;
+      }
+
+      // Use a union-find algorithm to keep track of the clusters.
+      const int c1 = FindConnectedComponent(vertex1, membership);
+      const int c2 = FindConnectedComponent(vertex2, membership);
+
+      if (c1 == c2) {
+        continue;
+      }
+
+      if (c1 < c2) {
+        (*membership)[c2] = c1;
+      } else {
+        (*membership)[c1] = c2;
+      }
+    }
+  }
+
+  // Make sure that every vertex is connected directly to the vertex
+  // identifying the cluster.
+  int num_clusters = 0;
+  for (HashMap<int, int>::iterator it = membership->begin();
+       it != membership->end();
+       ++it) {
+    it->second = FindConnectedComponent(it->first, membership);
+    if (it->first == it->second) {
+      ++num_clusters;
+    }
+  }
+
+  return num_clusters;
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_SUITESPARSE
diff --git a/internal/ceres/single_linkage_clustering.h b/internal/ceres/single_linkage_clustering.h
new file mode 100644
index 0000000..e6fdeab
--- /dev/null
+++ b/internal/ceres/single_linkage_clustering.h
@@ -0,0 +1,74 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_SINGLE_LINKAGE_CLUSTERING_H_
+#define CERES_INTERNAL_SINGLE_LINKAGE_CLUSTERING_H_
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_NO_SUITESPARSE
+
+#include "ceres/collections_port.h"
+#include "ceres/graph.h"
+
+namespace ceres {
+namespace internal {
+
+struct SingleLinkageClusteringOptions {
+  SingleLinkageClusteringOptions()
+      : min_similarity(0.99) {
+  }
+
+  // Graph edges with edge weight less than min_similarity are ignored
+  // during the clustering process.
+  double min_similarity;
+};
+
+// Compute a partitioning of the vertices of the graph using the
+// single linkage clustering algorithm. Edges with weight less than
+// SingleLinkageClusteringOptions::min_similarity will be ignored.
+//
+// membership upon return will contain a mapping from the vertices of
+// the graph to an integer indicating the identity of the cluster that
+// it belongs to.
+//
+// The return value of this function is the number of clusters
+// identified by the algorithm.
+int ComputeSingleLinkageClustering(
+    const SingleLinkageClusteringOptions& options,
+    const Graph<int>& graph,
+    HashMap<int, int>* membership);
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_SUITESPARSE
+#endif  // CERES_INTERNAL_SINGLE_LINKAGE_CLUSTERING_H_
diff --git a/internal/ceres/single_linkage_clustering_test.cc b/internal/ceres/single_linkage_clustering_test.cc
new file mode 100644
index 0000000..1cbc5be
--- /dev/null
+++ b/internal/ceres/single_linkage_clustering_test.cc
@@ -0,0 +1,132 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Sameer Agarwal (sameeragarwal@google.com)
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_NO_SUITESPARSE
+
+#include "ceres/single_linkage_clustering.h"
+
+#include "ceres/collections_port.h"
+#include "ceres/graph.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+TEST(SingleLinkageClustering, GraphHasTwoComponents) {
+  Graph<int> graph;
+  const int kNumVertices = 6;
+  for (int i = 0; i < kNumVertices; ++i) {
+    graph.AddVertex(i);
+  }
+  // Graph structure:
+  //
+  //  0-1-2-3 4-5
+  graph.AddEdge(0, 1, 1.0);
+  graph.AddEdge(1, 2, 1.0);
+  graph.AddEdge(2, 3, 1.0);
+  graph.AddEdge(4, 5, 1.0);
+
+  SingleLinkageClusteringOptions options;
+  HashMap<int, int> membership;
+  ComputeSingleLinkageClustering(options, graph, &membership);
+  EXPECT_EQ(membership.size(), kNumVertices);
+
+  EXPECT_EQ(membership[1], membership[0]);
+  EXPECT_EQ(membership[2], membership[0]);
+  EXPECT_EQ(membership[3], membership[0]);
+  EXPECT_NE(membership[4], membership[0]);
+  EXPECT_NE(membership[5], membership[0]);
+  EXPECT_EQ(membership[4], membership[5]);
+}
+
+TEST(SingleLinkageClustering, ComponentWithWeakLink) {
+  Graph<int> graph;
+  const int kNumVertices = 6;
+  for (int i = 0; i < kNumVertices; ++i) {
+    graph.AddVertex(i);
+  }
+  // Graph structure:
+  //
+  //  0-1-2-3 4-5
+  graph.AddEdge(0, 1, 1.0);
+  graph.AddEdge(1, 2, 1.0);
+  graph.AddEdge(2, 3, 1.0);
+
+  // This component should break up into two.
+  graph.AddEdge(4, 5, 0.5);
+
+  SingleLinkageClusteringOptions options;
+  HashMap<int, int> membership;
+  ComputeSingleLinkageClustering(options, graph, &membership);
+  EXPECT_EQ(membership.size(), kNumVertices);
+
+  EXPECT_EQ(membership[1], membership[0]);
+  EXPECT_EQ(membership[2], membership[0]);
+  EXPECT_EQ(membership[3], membership[0]);
+  EXPECT_NE(membership[4], membership[0]);
+  EXPECT_NE(membership[5], membership[0]);
+  EXPECT_NE(membership[4], membership[5]);
+}
+
+TEST(SingleLinkageClustering, ComponentWithWeakLinkAndStrongLink) {
+  Graph<int> graph;
+  const int kNumVertices = 6;
+  for (int i = 0; i < kNumVertices; ++i) {
+    graph.AddVertex(i);
+  }
+  // Graph structure:
+  //
+  //  0-1-2-3 4-5
+  graph.AddEdge(0, 1, 1.0);
+  graph.AddEdge(1, 2, 1.0);
+  graph.AddEdge(2, 3, 0.5); // Weak link
+  graph.AddEdge(0, 3, 1.0);
+
+  // This component should break up into two.
+  graph.AddEdge(4, 5, 1.0);
+
+  SingleLinkageClusteringOptions options;
+  HashMap<int, int> membership;
+  ComputeSingleLinkageClustering(options, graph, &membership);
+  EXPECT_EQ(membership.size(), kNumVertices);
+
+  EXPECT_EQ(membership[1], membership[0]);
+  EXPECT_EQ(membership[2], membership[0]);
+  EXPECT_EQ(membership[3], membership[0]);
+  EXPECT_EQ(membership[4], membership[5]);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_SUITESPARSE
diff --git a/internal/ceres/small_blas.h b/internal/ceres/small_blas.h
index e14e664..5639664 100644
--- a/internal/ceres/small_blas.h
+++ b/internal/ceres/small_blas.h
@@ -35,36 +35,13 @@
 #ifndef CERES_INTERNAL_SMALL_BLAS_H_
 #define CERES_INTERNAL_SMALL_BLAS_H_
 
+#include "ceres/internal/port.h"
 #include "ceres/internal/eigen.h"
 #include "glog/logging.h"
 
 namespace ceres {
 namespace internal {
 
-// Remove the ".noalias()" annotation from the matrix matrix
-// mutliplies to produce a correct build with the Android NDK,
-// including versions 6, 7, 8, and 8b, when built with STLPort and the
-// non-standalone toolchain (i.e. ndk-build). This appears to be a
-// compiler bug; if the workaround is not in place, the line
-//
-//   block.noalias() -= A * B;
-//
-// gets compiled to
-//
-//   block.noalias() += A * B;
-//
-// which breaks schur elimination. Introducing a temporary by removing the
-// .noalias() annotation causes the issue to disappear. Tracking this
-// issue down was tricky, since the test suite doesn't run when built with
-// the non-standalone toolchain.
-//
-// TODO(keir): Make a reproduction case for this and send it upstream.
-#ifdef CERES_WORK_AROUND_ANDROID_NDK_COMPILER_BUG
-#define CERES_MAYBE_NOALIAS
-#else
-#define CERES_MAYBE_NOALIAS .noalias()
-#endif
-
 // The following three macros are used to share code and reduce
 // template junk across the various GEMM variants.
 #define CERES_GEMM_BEGIN(name)                                          \
@@ -167,11 +144,11 @@
     block(Cref, start_row_c, start_col_c, num_row_a, num_col_b);
 
   if (kOperation > 0) {
-    block CERES_MAYBE_NOALIAS += Aref * Bref;
+    block.noalias() += Aref * Bref;
   } else if (kOperation < 0) {
-    block CERES_MAYBE_NOALIAS -= Aref * Bref;
+    block.noalias() -= Aref * Bref;
   } else {
-    block CERES_MAYBE_NOALIAS = Aref * Bref;
+    block.noalias() = Aref * Bref;
   }
 }
 
@@ -227,11 +204,11 @@
                                               start_row_c, start_col_c,
                                               num_col_a, num_col_b);
   if (kOperation > 0) {
-    block CERES_MAYBE_NOALIAS += Aref.transpose() * Bref;
+    block.noalias() += Aref.transpose() * Bref;
   } else if (kOperation < 0) {
-    block CERES_MAYBE_NOALIAS -= Aref.transpose() * Bref;
+    block.noalias() -= Aref.transpose() * Bref;
   } else {
-    block CERES_MAYBE_NOALIAS = Aref.transpose() * Bref;
+    block.noalias() = Aref.transpose() * Bref;
   }
 }
 
@@ -393,8 +370,6 @@
 #endif  // CERES_NO_CUSTOM_BLAS
 }
 
-
-#undef CERES_MAYBE_NOALIAS
 #undef CERES_GEMM_BEGIN
 #undef CERES_GEMM_EIGEN_HEADER
 #undef CERES_GEMM_NAIVE_HEADER
diff --git a/internal/ceres/solver.cc b/internal/ceres/solver.cc
index 3b67746..3a57084 100644
--- a/internal/ceres/solver.cc
+++ b/internal/ceres/solver.cc
@@ -1,5 +1,5 @@
 // Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// Copyright 2014 Google Inc. All rights reserved.
 // http://code.google.com/p/ceres-solver/
 //
 // Redistribution and use in source and binary forms, with or without
@@ -29,19 +29,257 @@
 // Author: keir@google.com (Keir Mierle)
 //         sameeragarwal@google.com (Sameer Agarwal)
 
+#include "ceres/internal/port.h"
 #include "ceres/solver.h"
 
+#include <sstream>   // NOLINT
 #include <vector>
+
 #include "ceres/problem.h"
 #include "ceres/problem_impl.h"
 #include "ceres/program.h"
 #include "ceres/solver_impl.h"
 #include "ceres/stringprintf.h"
+#include "ceres/types.h"
+#include "ceres/version.h"
 #include "ceres/wall_time.h"
 
 namespace ceres {
 namespace {
 
+#define OPTION_OP(x, y, OP)                                             \
+  if (!(options.x OP y)) {                                              \
+    std::stringstream ss;                                               \
+    ss << "Invalid configuration. ";                                    \
+    ss << string("Solver::Options::" #x " = ") << options.x << ". ";    \
+    ss << "Violated constraint: ";                                      \
+    ss << string("Solver::Options::" #x " " #OP " "#y);                 \
+    *error = ss.str();                                                  \
+    return false;                                                       \
+  }
+
+#define OPTION_OP_OPTION(x, y, OP)                                      \
+  if (!(options.x OP options.y)) {                                      \
+    std::stringstream ss;                                               \
+    ss << "Invalid configuration. ";                                    \
+    ss << string("Solver::Options::" #x " = ") << options.x << ". ";    \
+    ss << string("Solver::Options::" #y " = ") << options.y << ". ";    \
+    ss << "Violated constraint: ";                                      \
+    ss << string("Solver::Options::" #x );                              \
+    ss << string(#OP " Solver::Options::" #y ".");                      \
+    *error = ss.str();                                                  \
+    return false;                                                       \
+  }
+
+#define OPTION_GE(x, y) OPTION_OP(x, y, >=);
+#define OPTION_GT(x, y) OPTION_OP(x, y, >);
+#define OPTION_LE(x, y) OPTION_OP(x, y, <=);
+#define OPTION_LT(x, y) OPTION_OP(x, y, <);
+#define OPTION_LE_OPTION(x, y) OPTION_OP_OPTION(x ,y, <=)
+#define OPTION_LT_OPTION(x, y) OPTION_OP_OPTION(x ,y, <)
+
+bool CommonOptionsAreValid(const Solver::Options& options, string* error) {
+  OPTION_GE(max_num_iterations, 0);
+  OPTION_GE(max_solver_time_in_seconds, 0.0);
+  OPTION_GE(function_tolerance, 0.0);
+  OPTION_GE(gradient_tolerance, 0.0);
+  OPTION_GE(parameter_tolerance, 0.0);
+  OPTION_GT(num_threads, 0);
+  OPTION_GT(num_linear_solver_threads, 0);
+  if (options.check_gradients) {
+    OPTION_GT(gradient_check_relative_precision, 0.0);
+    OPTION_GT(numeric_derivative_relative_step_size, 0.0);
+  }
+  return true;
+}
+
+bool TrustRegionOptionsAreValid(const Solver::Options& options, string* error) {
+  OPTION_GT(initial_trust_region_radius, 0.0);
+  OPTION_GT(min_trust_region_radius, 0.0);
+  OPTION_GT(max_trust_region_radius, 0.0);
+  OPTION_LE_OPTION(min_trust_region_radius, max_trust_region_radius);
+  OPTION_LE_OPTION(min_trust_region_radius, initial_trust_region_radius);
+  OPTION_LE_OPTION(initial_trust_region_radius, max_trust_region_radius);
+  OPTION_GE(min_relative_decrease, 0.0);
+  OPTION_GE(min_lm_diagonal, 0.0);
+  OPTION_GE(max_lm_diagonal, 0.0);
+  OPTION_LE_OPTION(min_lm_diagonal, max_lm_diagonal);
+  OPTION_GE(max_num_consecutive_invalid_steps, 0);
+  OPTION_GT(eta, 0.0);
+  OPTION_GE(min_linear_solver_iterations, 1);
+  OPTION_GE(max_linear_solver_iterations, 1);
+  OPTION_LE_OPTION(min_linear_solver_iterations, max_linear_solver_iterations);
+
+  if (options.use_inner_iterations) {
+    OPTION_GE(inner_iteration_tolerance, 0.0);
+  }
+
+  if (options.use_nonmonotonic_steps) {
+    OPTION_GT(max_consecutive_nonmonotonic_steps, 0);
+  }
+
+  if (options.preconditioner_type == CLUSTER_JACOBI &&
+      options.sparse_linear_algebra_library_type != SUITE_SPARSE) {
+    *error =  "CLUSTER_JACOBI requires "
+        "Solver::Options::sparse_linear_algebra_library_type to be "
+        "SUITE_SPARSE";
+    return false;
+  }
+
+  if (options.preconditioner_type == CLUSTER_TRIDIAGONAL &&
+      options.sparse_linear_algebra_library_type != SUITE_SPARSE) {
+    *error =  "CLUSTER_TRIDIAGONAL requires "
+        "Solver::Options::sparse_linear_algebra_library_type to be "
+        "SUITE_SPARSE";
+    return false;
+  }
+
+#ifdef CERES_NO_LAPACK
+  if (options.dense_linear_algebra_library_type == LAPACK) {
+    if (options.linear_solver_type == DENSE_NORMAL_CHOLESKY) {
+      *error = "Can't use DENSE_NORMAL_CHOLESKY with LAPACK because "
+          "LAPACK was not enabled when Ceres was built.";
+      return false;
+    }
+
+    if (options.linear_solver_type == DENSE_QR) {
+      *error = "Can't use DENSE_QR with LAPACK because "
+          "LAPACK was not enabled when Ceres was built.";
+      return false;
+    }
+
+    if (options.linear_solver_type == DENSE_SCHUR) {
+      *error = "Can't use DENSE_SCHUR with LAPACK because "
+          "LAPACK was not enabled when Ceres was built.";
+      return false;
+    }
+  }
+#endif
+
+#ifdef CERES_NO_SUITESPARSE
+  if (options.sparse_linear_algebra_library_type == SUITE_SPARSE) {
+    if (options.linear_solver_type == SPARSE_NORMAL_CHOLESKY) {
+      *error = "Can't use SPARSE_NORMAL_CHOLESKY with SUITESPARSE because "
+             "SuiteSparse was not enabled when Ceres was built.";
+      return false;
+    }
+
+    if (options.linear_solver_type == SPARSE_SCHUR) {
+      *error = "Can't use SPARSE_SCHUR with SUITESPARSE because "
+          "SuiteSparse was not enabled when Ceres was built.";
+      return false;
+    }
+
+    if (options.preconditioner_type == CLUSTER_JACOBI) {
+      *error =  "CLUSTER_JACOBI preconditioner not supported. "
+          "SuiteSparse was not enabled when Ceres was built.";
+      return false;
+    }
+
+    if (options.preconditioner_type == CLUSTER_TRIDIAGONAL) {
+      *error =  "CLUSTER_TRIDIAGONAL preconditioner not supported. "
+          "SuiteSparse was not enabled when Ceres was built.";
+    return false;
+    }
+  }
+#endif
+
+#ifdef CERES_NO_CXSPARSE
+  if (options.sparse_linear_algebra_library_type == CX_SPARSE) {
+    if (options.linear_solver_type == SPARSE_NORMAL_CHOLESKY) {
+      *error = "Can't use SPARSE_NORMAL_CHOLESKY with CX_SPARSE because "
+             "CXSparse was not enabled when Ceres was built.";
+      return false;
+    }
+
+    if (options.linear_solver_type == SPARSE_SCHUR) {
+      *error = "Can't use SPARSE_SCHUR with CX_SPARSE because "
+          "CXSparse was not enabled when Ceres was built.";
+      return false;
+    }
+  }
+#endif
+
+  if (options.trust_region_strategy_type == DOGLEG) {
+    if (options.linear_solver_type == ITERATIVE_SCHUR ||
+        options.linear_solver_type == CGNR) {
+      *error = "DOGLEG only supports exact factorization based linear "
+          "solvers. If you want to use an iterative solver please "
+          "use LEVENBERG_MARQUARDT as the trust_region_strategy_type";
+      return false;
+    }
+  }
+
+  if (options.trust_region_minimizer_iterations_to_dump.size() > 0 &&
+      options.trust_region_problem_dump_format_type != CONSOLE &&
+      options.trust_region_problem_dump_directory.empty()) {
+    *error = "Solver::Options::trust_region_problem_dump_directory is empty.";
+    return false;
+  }
+
+  if (options.dynamic_sparsity &&
+      options.linear_solver_type != SPARSE_NORMAL_CHOLESKY) {
+    *error = "Dynamic sparsity is only supported with SPARSE_NORMAL_CHOLESKY.";
+    return false;
+  }
+
+  return true;
+}
+
+bool LineSearchOptionsAreValid(const Solver::Options& options, string* error) {
+  OPTION_GT(max_lbfgs_rank, 0);
+  OPTION_GT(min_line_search_step_size, 0.0);
+  OPTION_GT(max_line_search_step_contraction, 0.0);
+  OPTION_LT(max_line_search_step_contraction, 1.0);
+  OPTION_LT_OPTION(max_line_search_step_contraction,
+                   min_line_search_step_contraction);
+  OPTION_LE(min_line_search_step_contraction, 1.0);
+  OPTION_GT(max_num_line_search_step_size_iterations, 0);
+  OPTION_GT(line_search_sufficient_function_decrease, 0.0);
+  OPTION_LT_OPTION(line_search_sufficient_function_decrease,
+                   line_search_sufficient_curvature_decrease);
+  OPTION_LT(line_search_sufficient_curvature_decrease, 1.0);
+  OPTION_GT(max_line_search_step_expansion, 1.0);
+
+  if ((options.line_search_direction_type == ceres::BFGS ||
+       options.line_search_direction_type == ceres::LBFGS) &&
+      options.line_search_type != ceres::WOLFE) {
+
+    *error =
+        string("Invalid configuration: Solver::Options::line_search_type = ")
+        + string(LineSearchTypeToString(options.line_search_type))
+        + string(". When using (L)BFGS, "
+                 "Solver::Options::line_search_type must be set to WOLFE.");
+    return false;
+  }
+
+  // Warn user if they have requested BISECTION interpolation, but constraints
+  // on max/min step size change during line search prevent bisection scaling
+  // from occurring. Warn only, as this is likely a user mistake, but one which
+  // does not prevent us from continuing.
+  LOG_IF(WARNING,
+         (options.line_search_interpolation_type == ceres::BISECTION &&
+          (options.max_line_search_step_contraction > 0.5 ||
+           options.min_line_search_step_contraction < 0.5)))
+      << "Line search interpolation type is BISECTION, but specified "
+      << "max_line_search_step_contraction: "
+      << options.max_line_search_step_contraction << ", and "
+      << "min_line_search_step_contraction: "
+      << options.min_line_search_step_contraction
+      << ", prevent bisection (0.5) scaling, continuing with solve regardless.";
+
+  return true;
+}
+
+#undef OPTION_OP
+#undef OPTION_OP_OPTION
+#undef OPTION_GT
+#undef OPTION_GE
+#undef OPTION_LE
+#undef OPTION_LT
+#undef OPTION_LE_OPTION
+#undef OPTION_LT_OPTION
+
 void StringifyOrdering(const vector<int>& ordering, string* report) {
   if (ordering.size() == 0) {
     internal::StringAppendF(report, "AUTOMATIC");
@@ -54,11 +292,19 @@
   internal::StringAppendF(report, "%d", ordering.back());
 }
 
-}  // namespace
+} // namespace
 
-Solver::Options::~Options() {
-  delete linear_solver_ordering;
-  delete inner_iteration_ordering;
+bool Solver::Options::IsValid(string* error) const {
+  if (!CommonOptionsAreValid(*this, error)) {
+    return false;
+  }
+
+  if (minimizer_type == TRUST_REGION) {
+    return TrustRegionOptionsAreValid(*this, error);
+  }
+
+  CHECK_EQ(minimizer_type, LINE_SEARCH);
+  return LineSearchOptionsAreValid(*this, error);
 }
 
 Solver::~Solver() {}
@@ -67,8 +313,16 @@
                    Problem* problem,
                    Solver::Summary* summary) {
   double start_time_seconds = internal::WallTimeInSeconds();
-  internal::ProblemImpl* problem_impl =
-      CHECK_NOTNULL(problem)->problem_impl_.get();
+  CHECK_NOTNULL(problem);
+  CHECK_NOTNULL(summary);
+
+  *summary = Summary();
+  if (!options.IsValid(&summary->message)) {
+    LOG(ERROR) << "Terminating: " << summary->message;
+    return;
+  }
+
+  internal::ProblemImpl* problem_impl = problem->problem_impl_.get();
   internal::SolverImpl::Solve(options, problem_impl, summary);
   summary->total_time_in_seconds =
       internal::WallTimeInSeconds() - start_time_seconds;
@@ -85,7 +339,8 @@
     // Invalid values for most fields, to ensure that we are not
     // accidentally reporting default values.
     : minimizer_type(TRUST_REGION),
-      termination_type(DID_NOT_RUN),
+      termination_type(FAILURE),
+      message("ceres::Solve was not called."),
       initial_cost(-1.0),
       final_cost(-1.0),
       fixed_cost(-1.0),
@@ -119,75 +374,51 @@
       inner_iterations_given(false),
       inner_iterations_used(false),
       preconditioner_type(IDENTITY),
+      visibility_clustering_type(CANONICAL_VIEWS),
       trust_region_strategy_type(LEVENBERG_MARQUARDT),
       dense_linear_algebra_library_type(EIGEN),
       sparse_linear_algebra_library_type(SUITE_SPARSE),
       line_search_direction_type(LBFGS),
-      line_search_type(ARMIJO) {
+      line_search_type(ARMIJO),
+      line_search_interpolation_type(BISECTION),
+      nonlinear_conjugate_gradient_type(FLETCHER_REEVES),
+      max_lbfgs_rank(-1) {
 }
 
-string Solver::Summary::BriefReport() const {
-  string report = "Ceres Solver Report: ";
-  if (termination_type == DID_NOT_RUN) {
-    CHECK(!error.empty())
-          << "Solver terminated with DID_NOT_RUN but the solver did not "
-          << "return a reason. This is a Ceres error. Please report this "
-          << "to the Ceres team";
-    return report + "Termination: DID_NOT_RUN, because " + error;
-  }
-
-  internal::StringAppendF(&report, "Iterations: %d",
-                          num_successful_steps + num_unsuccessful_steps);
-  internal::StringAppendF(&report, ", Initial cost: %e", initial_cost);
-
-  // If the solver failed or was aborted, then the final_cost has no
-  // meaning.
-  if (termination_type != NUMERICAL_FAILURE &&
-      termination_type != USER_ABORT) {
-    internal::StringAppendF(&report, ", Final cost: %e", final_cost);
-  }
-
-  internal::StringAppendF(&report, ", Termination: %s.",
-                          SolverTerminationTypeToString(termination_type));
-  return report;
-};
-
 using internal::StringAppendF;
 using internal::StringPrintf;
 
+string Solver::Summary::BriefReport() const {
+  return StringPrintf("Ceres Solver Report: "
+                      "Iterations: %d, "
+                      "Initial cost: %e, "
+                      "Final cost: %e, "
+                      "Termination: %s",
+                      num_successful_steps + num_unsuccessful_steps,
+                      initial_cost,
+                      final_cost,
+                      TerminationTypeToString(termination_type));
+};
+
 string Solver::Summary::FullReport() const {
   string report =
       "\n"
-      "Ceres Solver Report\n"
-      "-------------------\n";
+      "Ceres Solver v" CERES_VERSION_STRING " Solve Report\n"
+      "----------------------------------\n";
 
-  if (termination_type == DID_NOT_RUN) {
-    StringAppendF(&report, "                      Original\n");
-    StringAppendF(&report, "Parameter blocks    % 10d\n", num_parameter_blocks);
-    StringAppendF(&report, "Parameters          % 10d\n", num_parameters);
-    if (num_effective_parameters != num_parameters) {
-      StringAppendF(&report, "Effective parameters% 10d\n", num_parameters);
-    }
-
-    StringAppendF(&report, "Residual blocks     % 10d\n",
-                  num_residual_blocks);
-    StringAppendF(&report, "Residuals           % 10d\n\n",
-                  num_residuals);
-  } else {
-    StringAppendF(&report, "%45s    %21s\n", "Original", "Reduced");
-    StringAppendF(&report, "Parameter blocks    % 25d% 25d\n",
-                  num_parameter_blocks, num_parameter_blocks_reduced);
-    StringAppendF(&report, "Parameters          % 25d% 25d\n",
-                  num_parameters, num_parameters_reduced);
-    if (num_effective_parameters_reduced != num_parameters_reduced) {
-      StringAppendF(&report, "Effective parameters% 25d% 25d\n",
-                    num_effective_parameters, num_effective_parameters_reduced);
-    }
-    StringAppendF(&report, "Residual blocks     % 25d% 25d\n",
-                  num_residual_blocks, num_residual_blocks_reduced);
-    StringAppendF(&report, "Residual            % 25d% 25d\n",
-                  num_residuals, num_residuals_reduced);
+  StringAppendF(&report, "%45s    %21s\n", "Original", "Reduced");
+  StringAppendF(&report, "Parameter blocks    % 25d% 25d\n",
+                num_parameter_blocks, num_parameter_blocks_reduced);
+  StringAppendF(&report, "Parameters          % 25d% 25d\n",
+                num_parameters, num_parameters_reduced);
+  if (num_effective_parameters_reduced != num_parameters_reduced) {
+    StringAppendF(&report, "Effective parameters% 25d% 25d\n",
+                  num_effective_parameters, num_effective_parameters_reduced);
   }
+  StringAppendF(&report, "Residual blocks     % 25d% 25d\n",
+                num_residual_blocks, num_residual_blocks_reduced);
+  StringAppendF(&report, "Residual            % 25d% 25d\n",
+                num_residuals, num_residuals_reduced);
 
   if (minimizer_type == TRUST_REGION) {
     // TRUST_SEARCH HEADER
@@ -237,6 +468,14 @@
                     PreconditionerTypeToString(preconditioner_type));
     }
 
+    if (preconditioner_type == CLUSTER_JACOBI ||
+        preconditioner_type == CLUSTER_TRIDIAGONAL) {
+      StringAppendF(&report, "Visibility clustering%24s%25s\n",
+                    VisibilityClusteringTypeToString(
+                        visibility_clustering_type),
+                    VisibilityClusteringTypeToString(
+                        visibility_clustering_type));
+    }
     StringAppendF(&report, "Threads             % 25d% 25d\n",
                   num_threads_given, num_threads_used);
     StringAppendF(&report, "Linear solver threads % 23d% 25d\n",
@@ -305,21 +544,10 @@
                   num_threads_given, num_threads_used);
   }
 
-  if (termination_type == DID_NOT_RUN) {
-    CHECK(!error.empty())
-        << "Solver terminated with DID_NOT_RUN but the solver did not "
-        << "return a reason. This is a Ceres error. Please report this "
-        << "to the Ceres team";
-    StringAppendF(&report, "Termination:           %20s\n",
-                  "DID_NOT_RUN");
-    StringAppendF(&report, "Reason: %s\n", error.c_str());
-    return report;
-  }
-
   StringAppendF(&report, "\nCost:\n");
   StringAppendF(&report, "Initial        % 30e\n", initial_cost);
-  if (termination_type != NUMERICAL_FAILURE &&
-      termination_type != USER_ABORT) {
+  if (termination_type != FAILURE &&
+      termination_type != USER_FAILURE) {
     StringAppendF(&report, "Final          % 30e\n", final_cost);
     StringAppendF(&report, "Change         % 30e\n",
                   initial_cost - final_cost);
@@ -370,9 +598,15 @@
   StringAppendF(&report, "Total               %25.3f\n\n",
                 total_time_in_seconds);
 
-  StringAppendF(&report, "Termination:        %25s\n",
-                SolverTerminationTypeToString(termination_type));
+  StringAppendF(&report, "Termination:        %25s (%s)\n",
+                TerminationTypeToString(termination_type), message.c_str());
   return report;
 };
 
+bool Solver::Summary::IsSolutionUsable() const {
+  return (termination_type == CONVERGENCE ||
+          termination_type == NO_CONVERGENCE ||
+          termination_type == USER_SUCCESS);
+}
+
 }  // namespace ceres
diff --git a/internal/ceres/solver_impl.cc b/internal/ceres/solver_impl.cc
index 83faa05..a1cf4ca 100644
--- a/internal/ceres/solver_impl.cc
+++ b/internal/ceres/solver_impl.cc
@@ -1,5 +1,5 @@
 // Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// Copyright 2014 Google Inc. All rights reserved.
 // http://code.google.com/p/ceres-solver/
 //
 // Redistribution and use in source and binary forms, with or without
@@ -34,6 +34,8 @@
 #include <iostream>  // NOLINT
 #include <numeric>
 #include <string>
+#include "ceres/array_utils.h"
+#include "ceres/callbacks.h"
 #include "ceres/coordinate_descent_minimizer.h"
 #include "ceres/cxsparse.h"
 #include "ceres/evaluator.h"
@@ -47,168 +49,20 @@
 #include "ceres/ordered_groups.h"
 #include "ceres/parameter_block.h"
 #include "ceres/parameter_block_ordering.h"
+#include "ceres/preconditioner.h"
 #include "ceres/problem.h"
 #include "ceres/problem_impl.h"
 #include "ceres/program.h"
+#include "ceres/reorder_program.h"
 #include "ceres/residual_block.h"
 #include "ceres/stringprintf.h"
 #include "ceres/suitesparse.h"
+#include "ceres/summary_utils.h"
 #include "ceres/trust_region_minimizer.h"
 #include "ceres/wall_time.h"
 
 namespace ceres {
 namespace internal {
-namespace {
-
-// Callback for updating the user's parameter blocks. Updates are only
-// done if the step is successful.
-class StateUpdatingCallback : public IterationCallback {
- public:
-  StateUpdatingCallback(Program* program, double* parameters)
-      : program_(program), parameters_(parameters) {}
-
-  CallbackReturnType operator()(const IterationSummary& summary) {
-    if (summary.step_is_successful) {
-      program_->StateVectorToParameterBlocks(parameters_);
-      program_->CopyParameterBlockStateToUserState();
-    }
-    return SOLVER_CONTINUE;
-  }
-
- private:
-  Program* program_;
-  double* parameters_;
-};
-
-void SetSummaryFinalCost(Solver::Summary* summary) {
-  summary->final_cost = summary->initial_cost;
-  // We need the loop here, instead of just looking at the last
-  // iteration because the minimizer maybe making non-monotonic steps.
-  for (int i = 0; i < summary->iterations.size(); ++i) {
-    const IterationSummary& iteration_summary = summary->iterations[i];
-    summary->final_cost = min(iteration_summary.cost, summary->final_cost);
-  }
-}
-
-// Callback for logging the state of the minimizer to STDERR or STDOUT
-// depending on the user's preferences and logging level.
-class TrustRegionLoggingCallback : public IterationCallback {
- public:
-  explicit TrustRegionLoggingCallback(bool log_to_stdout)
-      : log_to_stdout_(log_to_stdout) {}
-
-  ~TrustRegionLoggingCallback() {}
-
-  CallbackReturnType operator()(const IterationSummary& summary) {
-    const char* kReportRowFormat =
-        "% 4d: f:% 8e d:% 3.2e g:% 3.2e h:% 3.2e "
-        "rho:% 3.2e mu:% 3.2e li:% 3d it:% 3.2e tt:% 3.2e";
-    string output = StringPrintf(kReportRowFormat,
-                                 summary.iteration,
-                                 summary.cost,
-                                 summary.cost_change,
-                                 summary.gradient_max_norm,
-                                 summary.step_norm,
-                                 summary.relative_decrease,
-                                 summary.trust_region_radius,
-                                 summary.linear_solver_iterations,
-                                 summary.iteration_time_in_seconds,
-                                 summary.cumulative_time_in_seconds);
-    if (log_to_stdout_) {
-      cout << output << endl;
-    } else {
-      VLOG(1) << output;
-    }
-    return SOLVER_CONTINUE;
-  }
-
- private:
-  const bool log_to_stdout_;
-};
-
-// Callback for logging the state of the minimizer to STDERR or STDOUT
-// depending on the user's preferences and logging level.
-class LineSearchLoggingCallback : public IterationCallback {
- public:
-  explicit LineSearchLoggingCallback(bool log_to_stdout)
-      : log_to_stdout_(log_to_stdout) {}
-
-  ~LineSearchLoggingCallback() {}
-
-  CallbackReturnType operator()(const IterationSummary& summary) {
-    const char* kReportRowFormat =
-        "% 4d: f:% 8e d:% 3.2e g:% 3.2e h:% 3.2e "
-        "s:% 3.2e e:% 3d it:% 3.2e tt:% 3.2e";
-    string output = StringPrintf(kReportRowFormat,
-                                 summary.iteration,
-                                 summary.cost,
-                                 summary.cost_change,
-                                 summary.gradient_max_norm,
-                                 summary.step_norm,
-                                 summary.step_size,
-                                 summary.line_search_function_evaluations,
-                                 summary.iteration_time_in_seconds,
-                                 summary.cumulative_time_in_seconds);
-    if (log_to_stdout_) {
-      cout << output << endl;
-    } else {
-      VLOG(1) << output;
-    }
-    return SOLVER_CONTINUE;
-  }
-
- private:
-  const bool log_to_stdout_;
-};
-
-
-// Basic callback to record the execution of the solver to a file for
-// offline analysis.
-class FileLoggingCallback : public IterationCallback {
- public:
-  explicit FileLoggingCallback(const string& filename)
-      : fptr_(NULL) {
-    fptr_ = fopen(filename.c_str(), "w");
-    CHECK_NOTNULL(fptr_);
-  }
-
-  virtual ~FileLoggingCallback() {
-    if (fptr_ != NULL) {
-      fclose(fptr_);
-    }
-  }
-
-  virtual CallbackReturnType operator()(const IterationSummary& summary) {
-    fprintf(fptr_,
-            "%4d %e %e\n",
-            summary.iteration,
-            summary.cost,
-            summary.cumulative_time_in_seconds);
-    return SOLVER_CONTINUE;
-  }
- private:
-    FILE* fptr_;
-};
-
-// Iterate over each of the groups in order of their priority and fill
-// summary with their sizes.
-void SummarizeOrdering(ParameterBlockOrdering* ordering,
-                       vector<int>* summary) {
-  CHECK_NOTNULL(summary)->clear();
-  if (ordering == NULL) {
-    return;
-  }
-
-  const map<int, set<double*> >& group_to_elements =
-      ordering->group_to_elements();
-  for (map<int, set<double*> >::const_iterator it = group_to_elements.begin();
-       it != group_to_elements.end();
-       ++it) {
-    summary->push_back(it->second.size());
-  }
-}
-
-}  // namespace
 
 void SolverImpl::TrustRegionMinimize(
     const Solver::Options& options,
@@ -216,27 +70,26 @@
     CoordinateDescentMinimizer* inner_iteration_minimizer,
     Evaluator* evaluator,
     LinearSolver* linear_solver,
-    double* parameters,
     Solver::Summary* summary) {
   Minimizer::Options minimizer_options(options);
+  minimizer_options.is_constrained = program->IsBoundsConstrained();
 
-  // TODO(sameeragarwal): Add support for logging the configuration
-  // and more detailed stats.
-  scoped_ptr<IterationCallback> file_logging_callback;
-  if (!options.solver_log.empty()) {
-    file_logging_callback.reset(new FileLoggingCallback(options.solver_log));
-    minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
-                                       file_logging_callback.get());
-  }
+  // The optimizer works on contiguous parameter vectors; allocate
+  // some.
+  Vector parameters(program->NumParameters());
 
-  TrustRegionLoggingCallback logging_callback(
-      options.minimizer_progress_to_stdout);
+  // Collect the discontiguous parameters into a contiguous state
+  // vector.
+  program->ParameterBlocksToStateVector(parameters.data());
+
+  LoggingCallback logging_callback(TRUST_REGION,
+                                   options.minimizer_progress_to_stdout);
   if (options.logging_type != SILENT) {
     minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
                                        &logging_callback);
   }
 
-  StateUpdatingCallback updating_callback(program, parameters);
+  StateUpdatingCallback updating_callback(program, parameters.data());
   if (options.update_state_every_iteration) {
     // This must get pushed to the front of the callbacks so that it is run
     // before any of the user callbacks.
@@ -266,37 +119,42 @@
 
   TrustRegionMinimizer minimizer;
   double minimizer_start_time = WallTimeInSeconds();
-  minimizer.Minimize(minimizer_options, parameters, summary);
+  minimizer.Minimize(minimizer_options, parameters.data(), summary);
+
+  // If the user aborted mid-optimization or the optimization
+  // terminated because of a numerical failure, then do not update
+  // user state.
+  if (summary->termination_type != USER_FAILURE &&
+      summary->termination_type != FAILURE) {
+    program->StateVectorToParameterBlocks(parameters.data());
+    program->CopyParameterBlockStateToUserState();
+  }
+
   summary->minimizer_time_in_seconds =
       WallTimeInSeconds() - minimizer_start_time;
 }
 
-#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
 void SolverImpl::LineSearchMinimize(
     const Solver::Options& options,
     Program* program,
     Evaluator* evaluator,
-    double* parameters,
     Solver::Summary* summary) {
   Minimizer::Options minimizer_options(options);
 
-  // TODO(sameeragarwal): Add support for logging the configuration
-  // and more detailed stats.
-  scoped_ptr<IterationCallback> file_logging_callback;
-  if (!options.solver_log.empty()) {
-    file_logging_callback.reset(new FileLoggingCallback(options.solver_log));
-    minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
-                                       file_logging_callback.get());
-  }
+  // The optimizer works on contiguous parameter vectors; allocate some.
+  Vector parameters(program->NumParameters());
 
-  LineSearchLoggingCallback logging_callback(
-      options.minimizer_progress_to_stdout);
+  // Collect the discontiguous parameters into a contiguous state vector.
+  program->ParameterBlocksToStateVector(parameters.data());
+
+  LoggingCallback logging_callback(LINE_SEARCH,
+                                   options.minimizer_progress_to_stdout);
   if (options.logging_type != SILENT) {
     minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
                                        &logging_callback);
   }
 
-  StateUpdatingCallback updating_callback(program, parameters);
+  StateUpdatingCallback updating_callback(program, parameters.data());
   if (options.update_state_every_iteration) {
     // This must get pushed to the front of the callbacks so that it is run
     // before any of the user callbacks.
@@ -308,11 +166,20 @@
 
   LineSearchMinimizer minimizer;
   double minimizer_start_time = WallTimeInSeconds();
-  minimizer.Minimize(minimizer_options, parameters, summary);
+  minimizer.Minimize(minimizer_options, parameters.data(), summary);
+
+  // If the user aborted mid-optimization or the optimization
+  // terminated because of a numerical failure, then do not update
+  // user state.
+  if (summary->termination_type != USER_FAILURE &&
+      summary->termination_type != FAILURE) {
+    program->StateVectorToParameterBlocks(parameters.data());
+    program->CopyParameterBlockStateToUserState();
+  }
+
   summary->minimizer_time_in_seconds =
       WallTimeInSeconds() - minimizer_start_time;
 }
-#endif  // CERES_NO_LINE_SEARCH_MINIMIZER
 
 void SolverImpl::Solve(const Solver::Options& options,
                        ProblemImpl* problem_impl,
@@ -326,15 +193,10 @@
           << " residual blocks, "
           << problem_impl->NumResiduals()
           << " residuals.";
-
   if (options.minimizer_type == TRUST_REGION) {
     TrustRegionSolve(options, problem_impl, summary);
   } else {
-#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
     LineSearchSolve(options, problem_impl, summary);
-#else
-    LOG(FATAL) << "Ceres Solver was compiled with -DLINE_SEARCH_MINIMIZER=OFF";
-#endif
   }
 }
 
@@ -347,39 +209,15 @@
   Program* original_program = original_problem_impl->mutable_program();
   ProblemImpl* problem_impl = original_problem_impl;
 
-  // Reset the summary object to its default values.
-  *CHECK_NOTNULL(summary) = Solver::Summary();
-
   summary->minimizer_type = TRUST_REGION;
-  summary->num_parameter_blocks = problem_impl->NumParameterBlocks();
-  summary->num_parameters = problem_impl->NumParameters();
-  summary->num_effective_parameters =
-      original_program->NumEffectiveParameters();
-  summary->num_residual_blocks = problem_impl->NumResidualBlocks();
-  summary->num_residuals = problem_impl->NumResiduals();
 
-  // Empty programs are usually a user error.
-  if (summary->num_parameter_blocks == 0) {
-    summary->error = "Problem contains no parameter blocks.";
-    LOG(ERROR) << summary->error;
-    return;
-  }
-
-  if (summary->num_residual_blocks == 0) {
-    summary->error = "Problem contains no residual blocks.";
-    LOG(ERROR) << summary->error;
-    return;
-  }
-
-  SummarizeOrdering(original_options.linear_solver_ordering,
-                    &(summary->linear_solver_ordering_given));
-
-  SummarizeOrdering(original_options.inner_iteration_ordering,
-                    &(summary->inner_iteration_ordering_given));
+  SummarizeGivenProgram(*original_program, summary);
+  OrderingToGroupSizes(original_options.linear_solver_ordering.get(),
+                       &(summary->linear_solver_ordering_given));
+  OrderingToGroupSizes(original_options.inner_iteration_ordering.get(),
+                       &(summary->inner_iteration_ordering_given));
 
   Solver::Options options(original_options);
-  options.linear_solver_ordering = NULL;
-  options.inner_iteration_ordering = NULL;
 
 #ifndef CERES_USE_OPENMP
   if (options.num_threads > 1) {
@@ -404,9 +242,19 @@
   if (options.trust_region_minimizer_iterations_to_dump.size() > 0 &&
       options.trust_region_problem_dump_format_type != CONSOLE &&
       options.trust_region_problem_dump_directory.empty()) {
-    summary->error =
+    summary->message =
         "Solver::Options::trust_region_problem_dump_directory is empty.";
-    LOG(ERROR) << summary->error;
+    LOG(ERROR) << summary->message;
+    return;
+  }
+
+  if (!original_program->ParameterBlocksAreFinite(&summary->message)) {
+    LOG(ERROR) << "Terminating: " << summary->message;
+    return;
+  }
+
+  if (!original_program->IsFeasible(&summary->message)) {
+    LOG(ERROR) << "Terminating: " << summary->message;
     return;
   }
 
@@ -433,17 +281,14 @@
     problem_impl = gradient_checking_problem_impl.get();
   }
 
-  if (original_options.linear_solver_ordering != NULL) {
-    if (!IsOrderingValid(original_options, problem_impl, &summary->error)) {
-      LOG(ERROR) << summary->error;
+  if (options.linear_solver_ordering.get() != NULL) {
+    if (!IsOrderingValid(options, problem_impl, &summary->message)) {
+      LOG(ERROR) << summary->message;
       return;
     }
     event_logger.AddEvent("CheckOrdering");
-    options.linear_solver_ordering =
-        new ParameterBlockOrdering(*original_options.linear_solver_ordering);
-    event_logger.AddEvent("CopyOrdering");
   } else {
-    options.linear_solver_ordering = new ParameterBlockOrdering;
+    options.linear_solver_ordering.reset(new ParameterBlockOrdering);
     const ProblemImpl::ParameterMap& parameter_map =
         problem_impl->parameter_map();
     for (ProblemImpl::ParameterMap::const_iterator it = parameter_map.begin();
@@ -459,41 +304,35 @@
   scoped_ptr<Program> reduced_program(CreateReducedProgram(&options,
                                                            problem_impl,
                                                            &summary->fixed_cost,
-                                                           &summary->error));
+                                                           &summary->message));
 
   event_logger.AddEvent("CreateReducedProgram");
   if (reduced_program == NULL) {
     return;
   }
 
-  SummarizeOrdering(options.linear_solver_ordering,
-                    &(summary->linear_solver_ordering_used));
-
-  summary->num_parameter_blocks_reduced = reduced_program->NumParameterBlocks();
-  summary->num_parameters_reduced = reduced_program->NumParameters();
-  summary->num_effective_parameters_reduced =
-      reduced_program->NumEffectiveParameters();
-  summary->num_residual_blocks_reduced = reduced_program->NumResidualBlocks();
-  summary->num_residuals_reduced = reduced_program->NumResiduals();
+  OrderingToGroupSizes(options.linear_solver_ordering.get(),
+                       &(summary->linear_solver_ordering_used));
+  SummarizeReducedProgram(*reduced_program, summary);
 
   if (summary->num_parameter_blocks_reduced == 0) {
     summary->preprocessor_time_in_seconds =
         WallTimeInSeconds() - solver_start_time;
 
     double post_process_start_time = WallTimeInSeconds();
-    LOG(INFO) << "Terminating: FUNCTION_TOLERANCE reached. "
-              << "No non-constant parameter blocks found.";
+
+     summary->message =
+        "Function tolerance reached. "
+        "No non-constant parameter blocks found.";
+    summary->termination_type = CONVERGENCE;
+    VLOG_IF(1, options.logging_type != SILENT) << summary->message;
 
     summary->initial_cost = summary->fixed_cost;
     summary->final_cost = summary->fixed_cost;
 
-    // FUNCTION_TOLERANCE is the right convergence here, as we know
-    // that the objective function is constant and cannot be changed
-    // any further.
-    summary->termination_type = FUNCTION_TOLERANCE;
-
     // Ensure the program state is set to the user parameters on the way out.
     original_program->SetParameterBlockStatePtrsToUserStatePtrs();
+    original_program->SetParameterOffsetsAndIndex();
 
     summary->postprocessor_time_in_seconds =
         WallTimeInSeconds() - post_process_start_time;
@@ -501,7 +340,7 @@
   }
 
   scoped_ptr<LinearSolver>
-      linear_solver(CreateLinearSolver(&options, &summary->error));
+      linear_solver(CreateLinearSolver(&options, &summary->message));
   event_logger.AddEvent("CreateLinearSolver");
   if (linear_solver == NULL) {
     return;
@@ -511,6 +350,7 @@
   summary->linear_solver_type_used = options.linear_solver_type;
 
   summary->preconditioner_type = options.preconditioner_type;
+  summary->visibility_clustering_type = options.visibility_clustering_type;
 
   summary->num_linear_solver_threads_given =
       original_options.num_linear_solver_threads;
@@ -527,7 +367,7 @@
   scoped_ptr<Evaluator> evaluator(CreateEvaluator(options,
                                                   problem_impl->parameter_map(),
                                                   reduced_program.get(),
-                                                  &summary->error));
+                                                  &summary->message));
 
   event_logger.AddEvent("CreateEvaluator");
 
@@ -542,26 +382,18 @@
                    << "Disabling inner iterations.";
     } else {
       inner_iteration_minimizer.reset(
-          CreateInnerIterationMinimizer(original_options,
+          CreateInnerIterationMinimizer(options,
                                         *reduced_program,
                                         problem_impl->parameter_map(),
                                         summary));
       if (inner_iteration_minimizer == NULL) {
-        LOG(ERROR) << summary->error;
+        LOG(ERROR) << summary->message;
         return;
       }
     }
   }
   event_logger.AddEvent("CreateInnerIterationMinimizer");
 
-  // The optimizer works on contiguous parameter vectors; allocate some.
-  Vector parameters(reduced_program->NumParameters());
-
-  // Collect the discontiguous parameters into a contiguous state vector.
-  reduced_program->ParameterBlocksToStateVector(parameters.data());
-
-  Vector original_parameters = parameters;
-
   double minimizer_start_time = WallTimeInSeconds();
   summary->preprocessor_time_in_seconds =
       minimizer_start_time - solver_start_time;
@@ -572,30 +404,17 @@
                       inner_iteration_minimizer.get(),
                       evaluator.get(),
                       linear_solver.get(),
-                      parameters.data(),
                       summary);
   event_logger.AddEvent("Minimize");
 
-  SetSummaryFinalCost(summary);
-
-  // If the user aborted mid-optimization or the optimization
-  // terminated because of a numerical failure, then return without
-  // updating user state.
-  if (summary->termination_type == USER_ABORT ||
-      summary->termination_type == NUMERICAL_FAILURE) {
-    return;
-  }
-
   double post_process_start_time = WallTimeInSeconds();
 
-  // Push the contiguous optimized parameters back to the user's
-  // parameters.
-  reduced_program->StateVectorToParameterBlocks(parameters.data());
-  reduced_program->CopyParameterBlockStateToUserState();
+  SetSummaryFinalCost(summary);
 
   // Ensure the program state is set to the user parameters on the way
   // out.
   original_program->SetParameterBlockStatePtrsToUserStatePtrs();
+  original_program->SetParameterOffsetsAndIndex();
 
   const map<string, double>& linear_solver_time_statistics =
       linear_solver->TimeStatistics();
@@ -618,8 +437,6 @@
   event_logger.AddEvent("PostProcess");
 }
 
-
-#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
 void SolverImpl::LineSearchSolve(const Solver::Options& original_options,
                                  ProblemImpl* original_problem_impl,
                                  Solver::Summary* summary) {
@@ -628,9 +445,7 @@
   Program* original_program = original_problem_impl->mutable_program();
   ProblemImpl* problem_impl = original_problem_impl;
 
-  // Reset the summary object to its default values.
-  *CHECK_NOTNULL(summary) = Solver::Summary();
-
+  SummarizeGivenProgram(*original_program, summary);
   summary->minimizer_type = LINE_SEARCH;
   summary->line_search_direction_type =
       original_options.line_search_direction_type;
@@ -641,104 +456,9 @@
   summary->nonlinear_conjugate_gradient_type =
       original_options.nonlinear_conjugate_gradient_type;
 
-  summary->num_parameter_blocks = original_program->NumParameterBlocks();
-  summary->num_parameters = original_program->NumParameters();
-  summary->num_residual_blocks = original_program->NumResidualBlocks();
-  summary->num_residuals = original_program->NumResiduals();
-  summary->num_effective_parameters =
-      original_program->NumEffectiveParameters();
-
-  // Validate values for configuration parameters supplied by user.
-  if ((original_options.line_search_direction_type == ceres::BFGS ||
-       original_options.line_search_direction_type == ceres::LBFGS) &&
-      original_options.line_search_type != ceres::WOLFE) {
-    summary->error =
-        string("Invalid configuration: require line_search_type == "
-               "ceres::WOLFE when using (L)BFGS to ensure that underlying "
-               "assumptions are guaranteed to be satisfied.");
-    LOG(ERROR) << summary->error;
-    return;
-  }
-  if (original_options.max_lbfgs_rank <= 0) {
-    summary->error =
-        string("Invalid configuration: require max_lbfgs_rank > 0");
-    LOG(ERROR) << summary->error;
-    return;
-  }
-  if (original_options.min_line_search_step_size <= 0.0) {
-    summary->error = "Invalid configuration: min_line_search_step_size <= 0.0.";
-    LOG(ERROR) << summary->error;
-    return;
-  }
-  if (original_options.line_search_sufficient_function_decrease <= 0.0) {
-    summary->error =
-        string("Invalid configuration: require ") +
-        string("line_search_sufficient_function_decrease <= 0.0.");
-    LOG(ERROR) << summary->error;
-    return;
-  }
-  if (original_options.max_line_search_step_contraction <= 0.0 ||
-      original_options.max_line_search_step_contraction >= 1.0) {
-    summary->error = string("Invalid configuration: require ") +
-        string("0.0 < max_line_search_step_contraction < 1.0.");
-    LOG(ERROR) << summary->error;
-    return;
-  }
-  if (original_options.min_line_search_step_contraction <=
-      original_options.max_line_search_step_contraction ||
-      original_options.min_line_search_step_contraction > 1.0) {
-    summary->error = string("Invalid configuration: require ") +
-        string("max_line_search_step_contraction < ") +
-        string("min_line_search_step_contraction <= 1.0.");
-    LOG(ERROR) << summary->error;
-    return;
-  }
-  // Warn user if they have requested BISECTION interpolation, but constraints
-  // on max/min step size change during line search prevent bisection scaling
-  // from occurring. Warn only, as this is likely a user mistake, but one which
-  // does not prevent us from continuing.
-  LOG_IF(WARNING,
-         (original_options.line_search_interpolation_type == ceres::BISECTION &&
-          (original_options.max_line_search_step_contraction > 0.5 ||
-           original_options.min_line_search_step_contraction < 0.5)))
-      << "Line search interpolation type is BISECTION, but specified "
-      << "max_line_search_step_contraction: "
-      << original_options.max_line_search_step_contraction << ", and "
-      << "min_line_search_step_contraction: "
-      << original_options.min_line_search_step_contraction
-      << ", prevent bisection (0.5) scaling, continuing with solve regardless.";
-  if (original_options.max_num_line_search_step_size_iterations <= 0) {
-    summary->error = string("Invalid configuration: require ") +
-        string("max_num_line_search_step_size_iterations > 0.");
-    LOG(ERROR) << summary->error;
-    return;
-  }
-  if (original_options.line_search_sufficient_curvature_decrease <=
-      original_options.line_search_sufficient_function_decrease ||
-      original_options.line_search_sufficient_curvature_decrease > 1.0) {
-    summary->error = string("Invalid configuration: require ") +
-        string("line_search_sufficient_function_decrease < ") +
-        string("line_search_sufficient_curvature_decrease < 1.0.");
-    LOG(ERROR) << summary->error;
-    return;
-  }
-  if (original_options.max_line_search_step_expansion <= 1.0) {
-    summary->error = string("Invalid configuration: require ") +
-        string("max_line_search_step_expansion > 1.0.");
-    LOG(ERROR) << summary->error;
-    return;
-  }
-
-  // Empty programs are usually a user error.
-  if (summary->num_parameter_blocks == 0) {
-    summary->error = "Problem contains no parameter blocks.";
-    LOG(ERROR) << summary->error;
-    return;
-  }
-
-  if (summary->num_residual_blocks == 0) {
-    summary->error = "Problem contains no residual blocks.";
-    LOG(ERROR) << summary->error;
+  if (original_program->IsBoundsConstrained()) {
+    summary->message =  "LINE_SEARCH Minimizer does not support bounds.";
+    LOG(ERROR) << "Terminating: " << summary->message;
     return;
   }
 
@@ -750,8 +470,6 @@
   // line search.
   options.linear_solver_type = CGNR;
 
-  options.linear_solver_ordering = NULL;
-  options.inner_iteration_ordering = NULL;
 
 #ifndef CERES_USE_OPENMP
   if (options.num_threads > 1) {
@@ -766,15 +484,18 @@
   summary->num_threads_given = original_options.num_threads;
   summary->num_threads_used = options.num_threads;
 
-  if (original_options.linear_solver_ordering != NULL) {
-    if (!IsOrderingValid(original_options, problem_impl, &summary->error)) {
-      LOG(ERROR) << summary->error;
+  if (!original_program->ParameterBlocksAreFinite(&summary->message)) {
+    LOG(ERROR) << "Terminating: " << summary->message;
+    return;
+  }
+
+  if (options.linear_solver_ordering.get() != NULL) {
+    if (!IsOrderingValid(options, problem_impl, &summary->message)) {
+      LOG(ERROR) << summary->message;
       return;
     }
-    options.linear_solver_ordering =
-        new ParameterBlockOrdering(*original_options.linear_solver_ordering);
   } else {
-    options.linear_solver_ordering = new ParameterBlockOrdering;
+    options.linear_solver_ordering.reset(new ParameterBlockOrdering);
     const ProblemImpl::ParameterMap& parameter_map =
         problem_impl->parameter_map();
     for (ProblemImpl::ParameterMap::const_iterator it = parameter_map.begin();
@@ -784,6 +505,7 @@
     }
   }
 
+
   original_program->SetParameterBlockStatePtrsToUserStatePtrs();
 
   // If the user requests gradient checking, construct a new
@@ -809,36 +531,31 @@
   scoped_ptr<Program> reduced_program(CreateReducedProgram(&options,
                                                            problem_impl,
                                                            &summary->fixed_cost,
-                                                           &summary->error));
+                                                           &summary->message));
   if (reduced_program == NULL) {
     return;
   }
 
-  summary->num_parameter_blocks_reduced = reduced_program->NumParameterBlocks();
-  summary->num_parameters_reduced = reduced_program->NumParameters();
-  summary->num_residual_blocks_reduced = reduced_program->NumResidualBlocks();
-  summary->num_effective_parameters_reduced =
-      reduced_program->NumEffectiveParameters();
-  summary->num_residuals_reduced = reduced_program->NumResiduals();
-
+  SummarizeReducedProgram(*reduced_program, summary);
   if (summary->num_parameter_blocks_reduced == 0) {
     summary->preprocessor_time_in_seconds =
         WallTimeInSeconds() - solver_start_time;
 
-    LOG(INFO) << "Terminating: FUNCTION_TOLERANCE reached. "
-              << "No non-constant parameter blocks found.";
-
-    // FUNCTION_TOLERANCE is the right convergence here, as we know
-    // that the objective function is constant and cannot be changed
-    // any further.
-    summary->termination_type = FUNCTION_TOLERANCE;
+    summary->message =
+        "Function tolerance reached. "
+        "No non-constant parameter blocks found.";
+    summary->termination_type = CONVERGENCE;
+    VLOG_IF(1, options.logging_type != SILENT) << summary->message;
+    summary->initial_cost = summary->fixed_cost;
+    summary->final_cost = summary->fixed_cost;
 
     const double post_process_start_time = WallTimeInSeconds();
-
     SetSummaryFinalCost(summary);
 
     // Ensure the program state is set to the user parameters on the way out.
     original_program->SetParameterBlockStatePtrsToUserStatePtrs();
+    original_program->SetParameterOffsetsAndIndex();
+
     summary->postprocessor_time_in_seconds =
         WallTimeInSeconds() - post_process_start_time;
     return;
@@ -847,48 +564,25 @@
   scoped_ptr<Evaluator> evaluator(CreateEvaluator(options,
                                                   problem_impl->parameter_map(),
                                                   reduced_program.get(),
-                                                  &summary->error));
+                                                  &summary->message));
   if (evaluator == NULL) {
     return;
   }
 
-  // The optimizer works on contiguous parameter vectors; allocate some.
-  Vector parameters(reduced_program->NumParameters());
-
-  // Collect the discontiguous parameters into a contiguous state vector.
-  reduced_program->ParameterBlocksToStateVector(parameters.data());
-
-  Vector original_parameters = parameters;
-
   const double minimizer_start_time = WallTimeInSeconds();
   summary->preprocessor_time_in_seconds =
       minimizer_start_time - solver_start_time;
 
   // Run the optimization.
-  LineSearchMinimize(options,
-                     reduced_program.get(),
-                     evaluator.get(),
-                     parameters.data(),
-                     summary);
-
-  // If the user aborted mid-optimization or the optimization
-  // terminated because of a numerical failure, then return without
-  // updating user state.
-  if (summary->termination_type == USER_ABORT ||
-      summary->termination_type == NUMERICAL_FAILURE) {
-    return;
-  }
+  LineSearchMinimize(options, reduced_program.get(), evaluator.get(), summary);
 
   const double post_process_start_time = WallTimeInSeconds();
 
-  // Push the contiguous optimized parameters back to the user's parameters.
-  reduced_program->StateVectorToParameterBlocks(parameters.data());
-  reduced_program->CopyParameterBlockStateToUserState();
-
   SetSummaryFinalCost(summary);
 
   // Ensure the program state is set to the user parameters on the way out.
   original_program->SetParameterBlockStatePtrsToUserStatePtrs();
+  original_program->SetParameterOffsetsAndIndex();
 
   const map<string, double>& evaluator_time_statistics =
       evaluator->TimeStatistics();
@@ -902,7 +596,6 @@
   summary->postprocessor_time_in_seconds =
       WallTimeInSeconds() - post_process_start_time;
 }
-#endif  // CERES_NO_LINE_SEARCH_MINIMIZER
 
 bool SolverImpl::IsOrderingValid(const Solver::Options& options,
                                  const ProblemImpl* problem_impl,
@@ -966,133 +659,48 @@
   return true;
 }
 
-
-// Strips varying parameters and residuals, maintaining order, and updating
-// num_eliminate_blocks.
-bool SolverImpl::RemoveFixedBlocksFromProgram(Program* program,
-                                              ParameterBlockOrdering* ordering,
-                                              double* fixed_cost,
-                                              string* error) {
-  vector<ParameterBlock*>* parameter_blocks =
-      program->mutable_parameter_blocks();
-
-  scoped_array<double> residual_block_evaluate_scratch;
-  if (fixed_cost != NULL) {
-    residual_block_evaluate_scratch.reset(
-        new double[program->MaxScratchDoublesNeededForEvaluate()]);
-    *fixed_cost = 0.0;
-  }
-
-  // Mark all the parameters as unused. Abuse the index member of the parameter
-  // blocks for the marking.
-  for (int i = 0; i < parameter_blocks->size(); ++i) {
-    (*parameter_blocks)[i]->set_index(-1);
-  }
-
-  // Filter out residual that have all-constant parameters, and mark all the
-  // parameter blocks that appear in residuals.
-  {
-    vector<ResidualBlock*>* residual_blocks =
-        program->mutable_residual_blocks();
-    int j = 0;
-    for (int i = 0; i < residual_blocks->size(); ++i) {
-      ResidualBlock* residual_block = (*residual_blocks)[i];
-      int num_parameter_blocks = residual_block->NumParameterBlocks();
-
-      // Determine if the residual block is fixed, and also mark varying
-      // parameters that appear in the residual block.
-      bool all_constant = true;
-      for (int k = 0; k < num_parameter_blocks; k++) {
-        ParameterBlock* parameter_block = residual_block->parameter_blocks()[k];
-        if (!parameter_block->IsConstant()) {
-          all_constant = false;
-          parameter_block->set_index(1);
-        }
-      }
-
-      if (!all_constant) {
-        (*residual_blocks)[j++] = (*residual_blocks)[i];
-      } else if (fixed_cost != NULL) {
-        // The residual is constant and will be removed, so its cost is
-        // added to the variable fixed_cost.
-        double cost = 0.0;
-        if (!residual_block->Evaluate(true,
-                                      &cost,
-                                      NULL,
-                                      NULL,
-                                      residual_block_evaluate_scratch.get())) {
-          *error = StringPrintf("Evaluation of the residual %d failed during "
-                                "removal of fixed residual blocks.", i);
-          return false;
-        }
-        *fixed_cost += cost;
-      }
-    }
-    residual_blocks->resize(j);
-  }
-
-  // Filter out unused or fixed parameter blocks, and update
-  // the ordering.
-  {
-    vector<ParameterBlock*>* parameter_blocks =
-        program->mutable_parameter_blocks();
-    int j = 0;
-    for (int i = 0; i < parameter_blocks->size(); ++i) {
-      ParameterBlock* parameter_block = (*parameter_blocks)[i];
-      if (parameter_block->index() == 1) {
-        (*parameter_blocks)[j++] = parameter_block;
-      } else {
-        ordering->Remove(parameter_block->mutable_user_state());
-      }
-    }
-    parameter_blocks->resize(j);
-  }
-
-  if (!(((program->NumResidualBlocks() == 0) &&
-         (program->NumParameterBlocks() == 0)) ||
-        ((program->NumResidualBlocks() != 0) &&
-         (program->NumParameterBlocks() != 0)))) {
-    *error =  "Congratulations, you found a bug in Ceres. Please report it.";
-    return false;
-  }
-
-  return true;
-}
-
 Program* SolverImpl::CreateReducedProgram(Solver::Options* options,
                                           ProblemImpl* problem_impl,
                                           double* fixed_cost,
                                           string* error) {
-  CHECK_NOTNULL(options->linear_solver_ordering);
+  CHECK_NOTNULL(options->linear_solver_ordering.get());
   Program* original_program = problem_impl->mutable_program();
-  scoped_ptr<Program> transformed_program(new Program(*original_program));
 
-  ParameterBlockOrdering* linear_solver_ordering =
-      options->linear_solver_ordering;
-  const int min_group_id =
-      linear_solver_ordering->group_to_elements().begin()->first;
-
-  if (!RemoveFixedBlocksFromProgram(transformed_program.get(),
-                                    linear_solver_ordering,
-                                    fixed_cost,
-                                    error)) {
+  vector<double*> removed_parameter_blocks;
+  scoped_ptr<Program> reduced_program(
+      original_program->CreateReducedProgram(&removed_parameter_blocks,
+                                             fixed_cost,
+                                             error));
+  if (reduced_program.get() == NULL) {
     return NULL;
   }
 
   VLOG(2) << "Reduced problem: "
-          << transformed_program->NumParameterBlocks()
+          << reduced_program->NumParameterBlocks()
           << " parameter blocks, "
-          << transformed_program->NumParameters()
+          << reduced_program->NumParameters()
           << " parameters,  "
-          << transformed_program->NumResidualBlocks()
+          << reduced_program->NumResidualBlocks()
           << " residual blocks, "
-          << transformed_program->NumResiduals()
+          << reduced_program->NumResiduals()
           << " residuals.";
 
-  if (transformed_program->NumParameterBlocks() == 0) {
+  if (reduced_program->NumParameterBlocks() == 0) {
     LOG(WARNING) << "No varying parameter blocks to optimize; "
                  << "bailing early.";
-    return transformed_program.release();
+    return reduced_program.release();
+  }
+
+  ParameterBlockOrdering* linear_solver_ordering =
+      options->linear_solver_ordering.get();
+  const int min_group_id =
+      linear_solver_ordering->MinNonZeroGroup();
+  linear_solver_ordering->Remove(removed_parameter_blocks);
+
+  ParameterBlockOrdering* inner_iteration_ordering =
+      options->inner_iteration_ordering.get();
+  if (inner_iteration_ordering != NULL) {
+    inner_iteration_ordering->Remove(removed_parameter_blocks);
   }
 
   if (IsSchurType(options->linear_solver_type) &&
@@ -1108,7 +716,15 @@
     // as they assume there is at least one e_block. Thus, we
     // automatically switch to the closest solver to the one indicated
     // by the user.
-    AlternateLinearSolverForSchurTypeLinearSolver(options);
+    if (options->linear_solver_type == ITERATIVE_SCHUR) {
+      options->preconditioner_type =
+        Preconditioner::PreconditionerForZeroEBlocks(
+            options->preconditioner_type);
+    }
+
+    options->linear_solver_type =
+        LinearSolver::LinearSolverForZeroEBlocks(
+            options->linear_solver_type);
   }
 
   if (IsSchurType(options->linear_solver_type)) {
@@ -1117,33 +733,34 @@
             options->sparse_linear_algebra_library_type,
             problem_impl->parameter_map(),
             linear_solver_ordering,
-            transformed_program.get(),
+            reduced_program.get(),
             error)) {
       return NULL;
     }
-    return transformed_program.release();
+    return reduced_program.release();
   }
 
-  if (options->linear_solver_type == SPARSE_NORMAL_CHOLESKY) {
+  if (options->linear_solver_type == SPARSE_NORMAL_CHOLESKY &&
+      !options->dynamic_sparsity) {
     if (!ReorderProgramForSparseNormalCholesky(
             options->sparse_linear_algebra_library_type,
-            linear_solver_ordering,
-            transformed_program.get(),
+            *linear_solver_ordering,
+            reduced_program.get(),
             error)) {
       return NULL;
     }
 
-    return transformed_program.release();
+    return reduced_program.release();
   }
 
-  transformed_program->SetParameterOffsetsAndIndex();
-  return transformed_program.release();
+  reduced_program->SetParameterOffsetsAndIndex();
+  return reduced_program.release();
 }
 
 LinearSolver* SolverImpl::CreateLinearSolver(Solver::Options* options,
                                              string* error) {
   CHECK_NOTNULL(options);
-  CHECK_NOTNULL(options->linear_solver_ordering);
+  CHECK_NOTNULL(options->linear_solver_ordering.get());
   CHECK_NOTNULL(error);
 
   if (options->trust_region_strategy_type == DOGLEG) {
@@ -1209,14 +826,6 @@
   }
 #endif
 
-#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
-  if (options->linear_solver_type == SPARSE_SCHUR) {
-    *error = "Can't use SPARSE_SCHUR because neither SuiteSparse nor"
-        "CXSparse was enabled when Ceres was compiled.";
-    return NULL;
-  }
-#endif
-
   if (options->max_linear_solver_iterations <= 0) {
     *error = "Solver::Options::max_linear_solver_iterations is not positive.";
     return NULL;
@@ -1239,11 +848,14 @@
       options->max_linear_solver_iterations;
   linear_solver_options.type = options->linear_solver_type;
   linear_solver_options.preconditioner_type = options->preconditioner_type;
+  linear_solver_options.visibility_clustering_type =
+      options->visibility_clustering_type;
   linear_solver_options.sparse_linear_algebra_library_type =
       options->sparse_linear_algebra_library_type;
   linear_solver_options.dense_linear_algebra_library_type =
       options->dense_linear_algebra_library_type;
   linear_solver_options.use_postordering = options->use_postordering;
+  linear_solver_options.dynamic_sparsity = options->dynamic_sparsity;
 
   // Ignore user's postordering preferences and force it to be true if
   // cholmod_camd is not available. This ensures that the linear
@@ -1259,13 +871,8 @@
   linear_solver_options.num_threads = options->num_linear_solver_threads;
   options->num_linear_solver_threads = linear_solver_options.num_threads;
 
-  const map<int, set<double*> >& groups =
-      options->linear_solver_ordering->group_to_elements();
-  for (map<int, set<double*> >::const_iterator it = groups.begin();
-       it != groups.end();
-       ++it) {
-    linear_solver_options.elimination_groups.push_back(it->second.size());
-  }
+  OrderingToGroupSizes(options->linear_solver_ordering.get(),
+                       &linear_solver_options.elimination_groups);
   // Schur type solvers, expect at least two elimination groups. If
   // there is only one elimination group, then CreateReducedProgram
   // guarantees that this group only contains e_blocks. Thus we add a
@@ -1278,109 +885,6 @@
   return LinearSolver::Create(linear_solver_options);
 }
 
-
-// Find the minimum index of any parameter block to the given residual.
-// Parameter blocks that have indices greater than num_eliminate_blocks are
-// considered to have an index equal to num_eliminate_blocks.
-static int MinParameterBlock(const ResidualBlock* residual_block,
-                             int num_eliminate_blocks) {
-  int min_parameter_block_position = num_eliminate_blocks;
-  for (int i = 0; i < residual_block->NumParameterBlocks(); ++i) {
-    ParameterBlock* parameter_block = residual_block->parameter_blocks()[i];
-    if (!parameter_block->IsConstant()) {
-      CHECK_NE(parameter_block->index(), -1)
-          << "Did you forget to call Program::SetParameterOffsetsAndIndex()? "
-          << "This is a Ceres bug; please contact the developers!";
-      min_parameter_block_position = std::min(parameter_block->index(),
-                                              min_parameter_block_position);
-    }
-  }
-  return min_parameter_block_position;
-}
-
-// Reorder the residuals for program, if necessary, so that the residuals
-// involving each E block occur together. This is a necessary condition for the
-// Schur eliminator, which works on these "row blocks" in the jacobian.
-bool SolverImpl::LexicographicallyOrderResidualBlocks(
-    const int num_eliminate_blocks,
-    Program* program,
-    string* error) {
-  CHECK_GE(num_eliminate_blocks, 1)
-      << "Congratulations, you found a Ceres bug! Please report this error "
-      << "to the developers.";
-
-  // Create a histogram of the number of residuals for each E block. There is an
-  // extra bucket at the end to catch all non-eliminated F blocks.
-  vector<int> residual_blocks_per_e_block(num_eliminate_blocks + 1);
-  vector<ResidualBlock*>* residual_blocks = program->mutable_residual_blocks();
-  vector<int> min_position_per_residual(residual_blocks->size());
-  for (int i = 0; i < residual_blocks->size(); ++i) {
-    ResidualBlock* residual_block = (*residual_blocks)[i];
-    int position = MinParameterBlock(residual_block, num_eliminate_blocks);
-    min_position_per_residual[i] = position;
-    DCHECK_LE(position, num_eliminate_blocks);
-    residual_blocks_per_e_block[position]++;
-  }
-
-  // Run a cumulative sum on the histogram, to obtain offsets to the start of
-  // each histogram bucket (where each bucket is for the residuals for that
-  // E-block).
-  vector<int> offsets(num_eliminate_blocks + 1);
-  std::partial_sum(residual_blocks_per_e_block.begin(),
-                   residual_blocks_per_e_block.end(),
-                   offsets.begin());
-  CHECK_EQ(offsets.back(), residual_blocks->size())
-      << "Congratulations, you found a Ceres bug! Please report this error "
-      << "to the developers.";
-
-  CHECK(find(residual_blocks_per_e_block.begin(),
-             residual_blocks_per_e_block.end() - 1, 0) !=
-        residual_blocks_per_e_block.end())
-      << "Congratulations, you found a Ceres bug! Please report this error "
-      << "to the developers.";
-
-  // Fill in each bucket with the residual blocks for its corresponding E block.
-  // Each bucket is individually filled from the back of the bucket to the front
-  // of the bucket. The filling order among the buckets is dictated by the
-  // residual blocks. This loop uses the offsets as counters; subtracting one
-  // from each offset as a residual block is placed in the bucket. When the
-  // filling is finished, the offset pointerts should have shifted down one
-  // entry (this is verified below).
-  vector<ResidualBlock*> reordered_residual_blocks(
-      (*residual_blocks).size(), static_cast<ResidualBlock*>(NULL));
-  for (int i = 0; i < residual_blocks->size(); ++i) {
-    int bucket = min_position_per_residual[i];
-
-    // Decrement the cursor, which should now point at the next empty position.
-    offsets[bucket]--;
-
-    // Sanity.
-    CHECK(reordered_residual_blocks[offsets[bucket]] == NULL)
-        << "Congratulations, you found a Ceres bug! Please report this error "
-        << "to the developers.";
-
-    reordered_residual_blocks[offsets[bucket]] = (*residual_blocks)[i];
-  }
-
-  // Sanity check #1: The difference in bucket offsets should match the
-  // histogram sizes.
-  for (int i = 0; i < num_eliminate_blocks; ++i) {
-    CHECK_EQ(residual_blocks_per_e_block[i], offsets[i + 1] - offsets[i])
-        << "Congratulations, you found a Ceres bug! Please report this error "
-        << "to the developers.";
-  }
-  // Sanity check #2: No NULL's left behind.
-  for (int i = 0; i < reordered_residual_blocks.size(); ++i) {
-    CHECK(reordered_residual_blocks[i] != NULL)
-        << "Congratulations, you found a Ceres bug! Please report this error "
-        << "to the developers.";
-  }
-
-  // Now that the residuals are collected by E block, swap them in place.
-  swap(*program->mutable_residual_blocks(), reordered_residual_blocks);
-  return true;
-}
-
 Evaluator* SolverImpl::CreateEvaluator(
     const Solver::Options& options,
     const ProblemImpl::ParameterMap& parameter_map,
@@ -1396,6 +900,7 @@
          ->second.size())
       : 0;
   evaluator_options.num_threads = options.num_threads;
+  evaluator_options.dynamic_sparsity = options.dynamic_sparsity;
   return Evaluator::Create(evaluator_options, program, error);
 }
 
@@ -1411,374 +916,32 @@
   scoped_ptr<ParameterBlockOrdering> inner_iteration_ordering;
   ParameterBlockOrdering* ordering_ptr  = NULL;
 
-  if (options.inner_iteration_ordering == NULL) {
-    // Find a recursive decomposition of the Hessian matrix as a set
-    // of independent sets of decreasing size and invert it. This
-    // seems to work better in practice, i.e., Cameras before
-    // points.
-    inner_iteration_ordering.reset(new ParameterBlockOrdering);
-    ComputeRecursiveIndependentSetOrdering(program,
-                                           inner_iteration_ordering.get());
-    inner_iteration_ordering->Reverse();
+  if (options.inner_iteration_ordering.get() == NULL) {
+    inner_iteration_ordering.reset(
+        CoordinateDescentMinimizer::CreateOrdering(program));
     ordering_ptr = inner_iteration_ordering.get();
   } else {
-    const map<int, set<double*> >& group_to_elements =
-        options.inner_iteration_ordering->group_to_elements();
-
-    // Iterate over each group and verify that it is an independent
-    // set.
-    map<int, set<double*> >::const_iterator it = group_to_elements.begin();
-    for ( ; it != group_to_elements.end(); ++it) {
-      if (!IsParameterBlockSetIndependent(it->second,
-                                          program.residual_blocks())) {
-        summary->error =
-            StringPrintf("The user-provided "
-                         "parameter_blocks_for_inner_iterations does not "
-                         "form an independent set. Group Id: %d", it->first);
-        return NULL;
-      }
+    ordering_ptr = options.inner_iteration_ordering.get();
+    if (!CoordinateDescentMinimizer::IsOrderingValid(program,
+                                                     *ordering_ptr,
+                                                     &summary->message)) {
+      return NULL;
     }
-    ordering_ptr = options.inner_iteration_ordering;
   }
 
   if (!inner_iteration_minimizer->Init(program,
                                        parameter_map,
                                        *ordering_ptr,
-                                       &summary->error)) {
+                                       &summary->message)) {
     return NULL;
   }
 
   summary->inner_iterations_used = true;
   summary->inner_iteration_time_in_seconds = 0.0;
-  SummarizeOrdering(ordering_ptr, &(summary->inner_iteration_ordering_used));
+  OrderingToGroupSizes(ordering_ptr,
+                       &(summary->inner_iteration_ordering_used));
   return inner_iteration_minimizer.release();
 }
 
-void SolverImpl::AlternateLinearSolverForSchurTypeLinearSolver(
-    Solver::Options* options) {
-  if (!IsSchurType(options->linear_solver_type)) {
-    return;
-  }
-
-  string msg = "No e_blocks remaining. Switching from ";
-  if (options->linear_solver_type == SPARSE_SCHUR) {
-    options->linear_solver_type = SPARSE_NORMAL_CHOLESKY;
-    msg += "SPARSE_SCHUR to SPARSE_NORMAL_CHOLESKY.";
-  } else if (options->linear_solver_type == DENSE_SCHUR) {
-    // TODO(sameeragarwal): This is probably not a great choice.
-    // Ideally, we should have a DENSE_NORMAL_CHOLESKY, that can
-    // take a BlockSparseMatrix as input.
-    options->linear_solver_type = DENSE_QR;
-    msg += "DENSE_SCHUR to DENSE_QR.";
-  } else if (options->linear_solver_type == ITERATIVE_SCHUR) {
-    options->linear_solver_type = CGNR;
-    if (options->preconditioner_type != IDENTITY) {
-      msg += StringPrintf("ITERATIVE_SCHUR with %s preconditioner "
-                          "to CGNR with JACOBI preconditioner.",
-                          PreconditionerTypeToString(
-                            options->preconditioner_type));
-      // CGNR currently only supports the JACOBI preconditioner.
-      options->preconditioner_type = JACOBI;
-    } else {
-      msg += "ITERATIVE_SCHUR with IDENTITY preconditioner"
-          "to CGNR with IDENTITY preconditioner.";
-    }
-  }
-  LOG(WARNING) << msg;
-}
-
-bool SolverImpl::ApplyUserOrdering(
-    const ProblemImpl::ParameterMap& parameter_map,
-    const ParameterBlockOrdering* parameter_block_ordering,
-    Program* program,
-    string* error) {
-  const int num_parameter_blocks =  program->NumParameterBlocks();
-  if (parameter_block_ordering->NumElements() != num_parameter_blocks) {
-    *error = StringPrintf("User specified ordering does not have the same "
-                          "number of parameters as the problem. The problem"
-                          "has %d blocks while the ordering has %d blocks.",
-                          num_parameter_blocks,
-                          parameter_block_ordering->NumElements());
-    return false;
-  }
-
-  vector<ParameterBlock*>* parameter_blocks =
-      program->mutable_parameter_blocks();
-  parameter_blocks->clear();
-
-  const map<int, set<double*> >& groups =
-      parameter_block_ordering->group_to_elements();
-
-  for (map<int, set<double*> >::const_iterator group_it = groups.begin();
-       group_it != groups.end();
-       ++group_it) {
-    const set<double*>& group = group_it->second;
-    for (set<double*>::const_iterator parameter_block_ptr_it = group.begin();
-         parameter_block_ptr_it != group.end();
-         ++parameter_block_ptr_it) {
-      ProblemImpl::ParameterMap::const_iterator parameter_block_it =
-          parameter_map.find(*parameter_block_ptr_it);
-      if (parameter_block_it == parameter_map.end()) {
-        *error = StringPrintf("User specified ordering contains a pointer "
-                              "to a double that is not a parameter block in "
-                              "the problem. The invalid double is in group: %d",
-                              group_it->first);
-        return false;
-      }
-      parameter_blocks->push_back(parameter_block_it->second);
-    }
-  }
-  return true;
-}
-
-
-TripletSparseMatrix* SolverImpl::CreateJacobianBlockSparsityTranspose(
-    const Program* program) {
-
-  // Matrix to store the block sparsity structure of the Jacobian.
-  TripletSparseMatrix* tsm =
-      new TripletSparseMatrix(program->NumParameterBlocks(),
-                              program->NumResidualBlocks(),
-                              10 * program->NumResidualBlocks());
-  int num_nonzeros = 0;
-  int* rows = tsm->mutable_rows();
-  int* cols = tsm->mutable_cols();
-  double* values = tsm->mutable_values();
-
-  const vector<ResidualBlock*>& residual_blocks = program->residual_blocks();
-  for (int c = 0; c < residual_blocks.size(); ++c) {
-    const ResidualBlock* residual_block = residual_blocks[c];
-    const int num_parameter_blocks = residual_block->NumParameterBlocks();
-    ParameterBlock* const* parameter_blocks =
-        residual_block->parameter_blocks();
-
-    for (int j = 0; j < num_parameter_blocks; ++j) {
-      if (parameter_blocks[j]->IsConstant()) {
-        continue;
-      }
-
-      // Re-size the matrix if needed.
-      if (num_nonzeros >= tsm->max_num_nonzeros()) {
-        tsm->set_num_nonzeros(num_nonzeros);
-        tsm->Reserve(2 * num_nonzeros);
-        rows = tsm->mutable_rows();
-        cols = tsm->mutable_cols();
-        values = tsm->mutable_values();
-      }
-      CHECK_LT(num_nonzeros,  tsm->max_num_nonzeros());
-
-      const int r = parameter_blocks[j]->index();
-      rows[num_nonzeros] = r;
-      cols[num_nonzeros] = c;
-      values[num_nonzeros] = 1.0;
-      ++num_nonzeros;
-    }
-  }
-
-  tsm->set_num_nonzeros(num_nonzeros);
-  return tsm;
-}
-
-bool SolverImpl::ReorderProgramForSchurTypeLinearSolver(
-    const LinearSolverType linear_solver_type,
-    const SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
-    const ProblemImpl::ParameterMap& parameter_map,
-    ParameterBlockOrdering* parameter_block_ordering,
-    Program* program,
-    string* error) {
-  if (parameter_block_ordering->NumGroups() == 1) {
-    // If the user supplied an parameter_block_ordering with just one
-    // group, it is equivalent to the user supplying NULL as an
-    // parameter_block_ordering. Ceres is completely free to choose the
-    // parameter block ordering as it sees fit. For Schur type solvers,
-    // this means that the user wishes for Ceres to identify the
-    // e_blocks, which we do by computing a maximal independent set.
-    vector<ParameterBlock*> schur_ordering;
-    const int num_eliminate_blocks =
-        ComputeStableSchurOrdering(*program, &schur_ordering);
-
-    CHECK_EQ(schur_ordering.size(), program->NumParameterBlocks())
-        << "Congratulations, you found a Ceres bug! Please report this error "
-        << "to the developers.";
-
-    // Update the parameter_block_ordering object.
-    for (int i = 0; i < schur_ordering.size(); ++i) {
-      double* parameter_block = schur_ordering[i]->mutable_user_state();
-      const int group_id = (i < num_eliminate_blocks) ? 0 : 1;
-      parameter_block_ordering->AddElementToGroup(parameter_block, group_id);
-    }
-
-    // We could call ApplyUserOrdering but this is cheaper and
-    // simpler.
-    swap(*program->mutable_parameter_blocks(), schur_ordering);
-  } else {
-    // The user provided an ordering with more than one elimination
-    // group. Trust the user and apply the ordering.
-    if (!ApplyUserOrdering(parameter_map,
-                           parameter_block_ordering,
-                           program,
-                           error)) {
-      return false;
-    }
-  }
-
-  // Pre-order the columns corresponding to the schur complement if
-  // possible.
-#if !defined(CERES_NO_SUITESPARSE) && !defined(CERES_NO_CAMD)
-  if (linear_solver_type == SPARSE_SCHUR &&
-      sparse_linear_algebra_library_type == SUITE_SPARSE) {
-    vector<int> constraints;
-    vector<ParameterBlock*>& parameter_blocks =
-        *(program->mutable_parameter_blocks());
-
-    for (int i = 0; i < parameter_blocks.size(); ++i) {
-      constraints.push_back(
-          parameter_block_ordering->GroupId(
-              parameter_blocks[i]->mutable_user_state()));
-    }
-
-    // Renumber the entries of constraints to be contiguous integers
-    // as camd requires that the group ids be in the range [0,
-    // parameter_blocks.size() - 1].
-    SolverImpl::CompactifyArray(&constraints);
-
-    // Set the offsets and index for CreateJacobianSparsityTranspose.
-    program->SetParameterOffsetsAndIndex();
-    // Compute a block sparse presentation of J'.
-    scoped_ptr<TripletSparseMatrix> tsm_block_jacobian_transpose(
-        SolverImpl::CreateJacobianBlockSparsityTranspose(program));
-
-    SuiteSparse ss;
-    cholmod_sparse* block_jacobian_transpose =
-        ss.CreateSparseMatrix(tsm_block_jacobian_transpose.get());
-
-    vector<int> ordering(parameter_blocks.size(), 0);
-    ss.ConstrainedApproximateMinimumDegreeOrdering(block_jacobian_transpose,
-                                                   &constraints[0],
-                                                   &ordering[0]);
-    ss.Free(block_jacobian_transpose);
-
-    const vector<ParameterBlock*> parameter_blocks_copy(parameter_blocks);
-    for (int i = 0; i < program->NumParameterBlocks(); ++i) {
-      parameter_blocks[i] = parameter_blocks_copy[ordering[i]];
-    }
-  }
-#endif
-
-  program->SetParameterOffsetsAndIndex();
-  // Schur type solvers also require that their residual blocks be
-  // lexicographically ordered.
-  const int num_eliminate_blocks =
-      parameter_block_ordering->group_to_elements().begin()->second.size();
-  return LexicographicallyOrderResidualBlocks(num_eliminate_blocks,
-                                              program,
-                                              error);
-}
-
-bool SolverImpl::ReorderProgramForSparseNormalCholesky(
-    const SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
-    const ParameterBlockOrdering* parameter_block_ordering,
-    Program* program,
-    string* error) {
-  // Set the offsets and index for CreateJacobianSparsityTranspose.
-  program->SetParameterOffsetsAndIndex();
-  // Compute a block sparse presentation of J'.
-  scoped_ptr<TripletSparseMatrix> tsm_block_jacobian_transpose(
-      SolverImpl::CreateJacobianBlockSparsityTranspose(program));
-
-  vector<int> ordering(program->NumParameterBlocks(), 0);
-  vector<ParameterBlock*>& parameter_blocks =
-      *(program->mutable_parameter_blocks());
-
-  if (sparse_linear_algebra_library_type == SUITE_SPARSE) {
-#ifdef CERES_NO_SUITESPARSE
-    *error = "Can't use SPARSE_NORMAL_CHOLESKY with SUITE_SPARSE because "
-        "SuiteSparse was not enabled when Ceres was built.";
-    return false;
-#else
-    SuiteSparse ss;
-    cholmod_sparse* block_jacobian_transpose =
-        ss.CreateSparseMatrix(tsm_block_jacobian_transpose.get());
-
-#  ifdef CERES_NO_CAMD
-    // No cholmod_camd, so ignore user's parameter_block_ordering and
-    // use plain old AMD.
-    ss.ApproximateMinimumDegreeOrdering(block_jacobian_transpose, &ordering[0]);
-#  else
-    if (parameter_block_ordering->NumGroups() > 1) {
-      // If the user specified more than one elimination groups use them
-      // to constrain the ordering.
-      vector<int> constraints;
-      for (int i = 0; i < parameter_blocks.size(); ++i) {
-        constraints.push_back(
-            parameter_block_ordering->GroupId(
-                parameter_blocks[i]->mutable_user_state()));
-      }
-      ss.ConstrainedApproximateMinimumDegreeOrdering(
-          block_jacobian_transpose,
-          &constraints[0],
-          &ordering[0]);
-    } else {
-      ss.ApproximateMinimumDegreeOrdering(block_jacobian_transpose,
-                                          &ordering[0]);
-    }
-#  endif  // CERES_NO_CAMD
-
-    ss.Free(block_jacobian_transpose);
-#endif  // CERES_NO_SUITESPARSE
-
-  } else if (sparse_linear_algebra_library_type == CX_SPARSE) {
-#ifndef CERES_NO_CXSPARSE
-
-    // CXSparse works with J'J instead of J'. So compute the block
-    // sparsity for J'J and compute an approximate minimum degree
-    // ordering.
-    CXSparse cxsparse;
-    cs_di* block_jacobian_transpose;
-    block_jacobian_transpose =
-        cxsparse.CreateSparseMatrix(tsm_block_jacobian_transpose.get());
-    cs_di* block_jacobian = cxsparse.TransposeMatrix(block_jacobian_transpose);
-    cs_di* block_hessian =
-        cxsparse.MatrixMatrixMultiply(block_jacobian_transpose, block_jacobian);
-    cxsparse.Free(block_jacobian);
-    cxsparse.Free(block_jacobian_transpose);
-
-    cxsparse.ApproximateMinimumDegreeOrdering(block_hessian, &ordering[0]);
-    cxsparse.Free(block_hessian);
-#else  // CERES_NO_CXSPARSE
-    *error = "Can't use SPARSE_NORMAL_CHOLESKY with CX_SPARSE because "
-        "CXSparse was not enabled when Ceres was built.";
-    return false;
-#endif  // CERES_NO_CXSPARSE
-  } else {
-    *error = "Unknown sparse linear algebra library.";
-    return false;
-  }
-
-  // Apply ordering.
-  const vector<ParameterBlock*> parameter_blocks_copy(parameter_blocks);
-  for (int i = 0; i < program->NumParameterBlocks(); ++i) {
-    parameter_blocks[i] = parameter_blocks_copy[ordering[i]];
-  }
-
-  program->SetParameterOffsetsAndIndex();
-  return true;
-}
-
-void SolverImpl::CompactifyArray(vector<int>* array_ptr) {
-  vector<int>& array = *array_ptr;
-  const set<int> unique_group_ids(array.begin(), array.end());
-  map<int, int> group_id_map;
-  for (set<int>::const_iterator it = unique_group_ids.begin();
-       it != unique_group_ids.end();
-       ++it) {
-    InsertOrDie(&group_id_map, *it, group_id_map.size());
-  }
-
-  for (int i = 0; i < array.size(); ++i) {
-    array[i] = group_id_map[array[i]];
-  }
-}
-
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/solver_impl.h b/internal/ceres/solver_impl.h
index 2b7ca3e..c42c32a 100644
--- a/internal/ceres/solver_impl.h
+++ b/internal/ceres/solver_impl.h
@@ -67,10 +67,8 @@
       CoordinateDescentMinimizer* inner_iteration_minimizer,
       Evaluator* evaluator,
       LinearSolver* linear_solver,
-      double* parameters,
       Solver::Summary* summary);
 
-#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
   static void LineSearchSolve(const Solver::Options& options,
                               ProblemImpl* problem_impl,
                               Solver::Summary* summary);
@@ -79,9 +77,7 @@
   static void LineSearchMinimize(const Solver::Options &options,
                                  Program* program,
                                  Evaluator* evaluator,
-                                 double* parameters,
                                  Solver::Summary* summary);
-#endif  // CERES_NO_LINE_SEARCH_MINIMIZER
 
   // Create the transformed Program, which has all the fixed blocks
   // and residuals eliminated, and in the case of automatic schur
@@ -93,7 +89,7 @@
   static Program* CreateReducedProgram(Solver::Options* options,
                                        ProblemImpl* problem_impl,
                                        double* fixed_cost,
-                                       string* error);
+                                       string* message);
 
   // Create the appropriate linear solver, taking into account any
   // config changes decided by CreateTransformedProgram(). The
@@ -101,38 +97,18 @@
   // selected; consider the case that the remaining elimininated
   // blocks is zero after removing fixed blocks.
   static LinearSolver* CreateLinearSolver(Solver::Options* options,
-                                          string* error);
-
-  // Reorder the residuals for program, if necessary, so that the
-  // residuals involving e block (i.e., the first num_eliminate_block
-  // parameter blocks) occur together. This is a necessary condition
-  // for the Schur eliminator.
-  static bool LexicographicallyOrderResidualBlocks(
-      const int num_eliminate_blocks,
-      Program* program,
-      string* error);
+                                          string* message);
 
   // Create the appropriate evaluator for the transformed program.
   static Evaluator* CreateEvaluator(
       const Solver::Options& options,
       const ProblemImpl::ParameterMap& parameter_map,
       Program* program,
-      string* error);
-
-  // Remove the fixed or unused parameter blocks and residuals
-  // depending only on fixed parameters from the problem. Also updates
-  // num_eliminate_blocks, since removed parameters changes the point
-  // at which the eliminated blocks is valid.  If fixed_cost is not
-  // NULL, the residual blocks that are removed are evaluated and the
-  // sum of their cost is returned in fixed_cost.
-  static bool RemoveFixedBlocksFromProgram(Program* program,
-                                           ParameterBlockOrdering* ordering,
-                                           double* fixed_cost,
-                                           string* error);
+      string* message);
 
   static bool IsOrderingValid(const Solver::Options& options,
                               const ProblemImpl* problem_impl,
-                              string* error);
+                              string* message);
 
   static bool IsParameterBlockSetIndependent(
       const set<double*>& parameter_block_ptrs,
@@ -143,78 +119,6 @@
       const Program& program,
       const ProblemImpl::ParameterMap& parameter_map,
       Solver::Summary* summary);
-
-  // If the linear solver is of Schur type, then replace it with the
-  // closest equivalent linear solver. This is done when the user
-  // requested a Schur type solver but the problem structure makes it
-  // impossible to use one.
-  //
-  // If the linear solver is not of Schur type, the function is a
-  // no-op.
-  static void AlternateLinearSolverForSchurTypeLinearSolver(
-      Solver::Options* options);
-
-  // Create a TripletSparseMatrix which contains the zero-one
-  // structure corresponding to the block sparsity of the transpose of
-  // the Jacobian matrix.
-  //
-  // Caller owns the result.
-  static TripletSparseMatrix* CreateJacobianBlockSparsityTranspose(
-      const Program* program);
-
-  // Reorder the parameter blocks in program using the ordering
-  static bool ApplyUserOrdering(
-      const ProblemImpl::ParameterMap& parameter_map,
-      const ParameterBlockOrdering* parameter_block_ordering,
-      Program* program,
-      string* error);
-
-  // Sparse cholesky factorization routines when doing the sparse
-  // cholesky factorization of the Jacobian matrix, reorders its
-  // columns to reduce the fill-in. Compute this permutation and
-  // re-order the parameter blocks.
-  //
-  // If the parameter_block_ordering contains more than one
-  // elimination group and support for constrained fill-reducing
-  // ordering is available in the sparse linear algebra library
-  // (SuiteSparse version >= 4.2.0) then the fill reducing
-  // ordering will take it into account, otherwise it will be ignored.
-  static bool ReorderProgramForSparseNormalCholesky(
-      const SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
-      const ParameterBlockOrdering* parameter_block_ordering,
-      Program* program,
-      string* error);
-
-  // Schur type solvers require that all parameter blocks eliminated
-  // by the Schur eliminator occur before others and the residuals be
-  // sorted in lexicographic order of their parameter blocks.
-  //
-  // If the parameter_block_ordering only contains one elimination
-  // group then a maximal independent set is computed and used as the
-  // first elimination group, otherwise the user's ordering is used.
-  //
-  // If the linear solver type is SPARSE_SCHUR and support for
-  // constrained fill-reducing ordering is available in the sparse
-  // linear algebra library (SuiteSparse version >= 4.2.0) then
-  // columns of the schur complement matrix are ordered to reduce the
-  // fill-in the Cholesky factorization.
-  //
-  // Upon return, ordering contains the parameter block ordering that
-  // was used to order the program.
-  static bool ReorderProgramForSchurTypeLinearSolver(
-      const LinearSolverType linear_solver_type,
-      const SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
-      const ProblemImpl::ParameterMap& parameter_map,
-      ParameterBlockOrdering* parameter_block_ordering,
-      Program* program,
-      string* error);
-
-  // array contains a list of (possibly repeating) non-negative
-  // integers. Let us assume that we have constructed another array
-  // `p` by sorting and uniqueing the entries of array.
-  // CompactifyArray replaces each entry in "array" with its position
-  // in `p`.
-  static void CompactifyArray(vector<int>* array);
 };
 
 }  // namespace internal
diff --git a/internal/ceres/solver_impl_test.cc b/internal/ceres/solver_impl_test.cc
index 583ef4e..2d517c6 100644
--- a/internal/ceres/solver_impl_test.cc
+++ b/internal/ceres/solver_impl_test.cc
@@ -1,5 +1,5 @@
 // Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// Copyright 2014 Google Inc. All rights reserved.
 // http://code.google.com/p/ceres-solver/
 //
 // Redistribution and use in source and binary forms, with or without
@@ -42,660 +42,6 @@
 namespace ceres {
 namespace internal {
 
-// A cost function that sipmply returns its argument.
-class UnaryIdentityCostFunction : public SizedCostFunction<1, 1> {
- public:
-  virtual bool Evaluate(double const* const* parameters,
-                        double* residuals,
-                        double** jacobians) const {
-    residuals[0] = parameters[0][0];
-    if (jacobians != NULL && jacobians[0] != NULL) {
-      jacobians[0][0] = 1.0;
-    }
-    return true;
-  }
-};
-
-// Templated base class for the CostFunction signatures.
-template <int kNumResiduals, int N0, int N1, int N2>
-class MockCostFunctionBase : public
-SizedCostFunction<kNumResiduals, N0, N1, N2> {
- public:
-  virtual bool Evaluate(double const* const* parameters,
-                        double* residuals,
-                        double** jacobians) const {
-    // Do nothing. This is never called.
-    return true;
-  }
-};
-
-class UnaryCostFunction : public MockCostFunctionBase<2, 1, 0, 0> {};
-class BinaryCostFunction : public MockCostFunctionBase<2, 1, 1, 0> {};
-class TernaryCostFunction : public MockCostFunctionBase<2, 1, 1, 1> {};
-
-TEST(SolverImpl, RemoveFixedBlocksNothingConstant) {
-  ProblemImpl problem;
-  double x;
-  double y;
-  double z;
-
-  problem.AddParameterBlock(&x, 1);
-  problem.AddParameterBlock(&y, 1);
-  problem.AddParameterBlock(&z, 1);
-  problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
-  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &y);
-  problem.AddResidualBlock(new TernaryCostFunction(), NULL, &x, &y, &z);
-
-  string error;
-  {
-    ParameterBlockOrdering ordering;
-    ordering.AddElementToGroup(&x, 0);
-    ordering.AddElementToGroup(&y, 0);
-    ordering.AddElementToGroup(&z, 0);
-
-    Program program(*problem.mutable_program());
-    EXPECT_TRUE(SolverImpl::RemoveFixedBlocksFromProgram(&program,
-                                                         &ordering,
-                                                         NULL,
-                                                         &error));
-    EXPECT_EQ(program.NumParameterBlocks(), 3);
-    EXPECT_EQ(program.NumResidualBlocks(), 3);
-    EXPECT_EQ(ordering.NumElements(), 3);
-  }
-}
-
-TEST(SolverImpl, RemoveFixedBlocksAllParameterBlocksConstant) {
-  ProblemImpl problem;
-  double x;
-
-  problem.AddParameterBlock(&x, 1);
-  problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
-  problem.SetParameterBlockConstant(&x);
-
-  ParameterBlockOrdering ordering;
-  ordering.AddElementToGroup(&x, 0);
-
-  Program program(problem.program());
-  string error;
-  EXPECT_TRUE(SolverImpl::RemoveFixedBlocksFromProgram(&program,
-                                                       &ordering,
-                                                       NULL,
-                                                       &error));
-  EXPECT_EQ(program.NumParameterBlocks(), 0);
-  EXPECT_EQ(program.NumResidualBlocks(), 0);
-  EXPECT_EQ(ordering.NumElements(), 0);
-}
-
-TEST(SolverImpl, RemoveFixedBlocksNoResidualBlocks) {
-  ProblemImpl problem;
-  double x;
-  double y;
-  double z;
-
-  problem.AddParameterBlock(&x, 1);
-  problem.AddParameterBlock(&y, 1);
-  problem.AddParameterBlock(&z, 1);
-
-  ParameterBlockOrdering ordering;
-  ordering.AddElementToGroup(&x, 0);
-  ordering.AddElementToGroup(&y, 0);
-  ordering.AddElementToGroup(&z, 0);
-
-
-  Program program(problem.program());
-  string error;
-  EXPECT_TRUE(SolverImpl::RemoveFixedBlocksFromProgram(&program,
-                                                       &ordering,
-                                                       NULL,
-                                                       &error));
-  EXPECT_EQ(program.NumParameterBlocks(), 0);
-  EXPECT_EQ(program.NumResidualBlocks(), 0);
-  EXPECT_EQ(ordering.NumElements(), 0);
-}
-
-TEST(SolverImpl, RemoveFixedBlocksOneParameterBlockConstant) {
-  ProblemImpl problem;
-  double x;
-  double y;
-  double z;
-
-  problem.AddParameterBlock(&x, 1);
-  problem.AddParameterBlock(&y, 1);
-  problem.AddParameterBlock(&z, 1);
-
-  ParameterBlockOrdering ordering;
-  ordering.AddElementToGroup(&x, 0);
-  ordering.AddElementToGroup(&y, 0);
-  ordering.AddElementToGroup(&z, 0);
-
-  problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
-  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &y);
-  problem.SetParameterBlockConstant(&x);
-
-
-  Program program(problem.program());
-  string error;
-  EXPECT_TRUE(SolverImpl::RemoveFixedBlocksFromProgram(&program,
-                                                       &ordering,
-                                                       NULL,
-                                                       &error));
-  EXPECT_EQ(program.NumParameterBlocks(), 1);
-  EXPECT_EQ(program.NumResidualBlocks(), 1);
-  EXPECT_EQ(ordering.NumElements(), 1);
-}
-
-TEST(SolverImpl, RemoveFixedBlocksNumEliminateBlocks) {
-  ProblemImpl problem;
-  double x;
-  double y;
-  double z;
-
-  problem.AddParameterBlock(&x, 1);
-  problem.AddParameterBlock(&y, 1);
-  problem.AddParameterBlock(&z, 1);
-  problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
-  problem.AddResidualBlock(new TernaryCostFunction(), NULL, &x, &y, &z);
-  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &y);
-  problem.SetParameterBlockConstant(&x);
-
-  ParameterBlockOrdering ordering;
-  ordering.AddElementToGroup(&x, 0);
-  ordering.AddElementToGroup(&y, 0);
-  ordering.AddElementToGroup(&z, 1);
-
-  Program program(problem.program());
-  string error;
-  EXPECT_TRUE(SolverImpl::RemoveFixedBlocksFromProgram(&program,
-                                                       &ordering,
-                                                       NULL,
-                                                       &error));
-  EXPECT_EQ(program.NumParameterBlocks(), 2);
-  EXPECT_EQ(program.NumResidualBlocks(), 2);
-  EXPECT_EQ(ordering.NumElements(), 2);
-  EXPECT_EQ(ordering.GroupId(&y), 0);
-  EXPECT_EQ(ordering.GroupId(&z), 1);
-}
-
-TEST(SolverImpl, RemoveFixedBlocksFixedCost) {
-  ProblemImpl problem;
-  double x = 1.23;
-  double y = 4.56;
-  double z = 7.89;
-
-  problem.AddParameterBlock(&x, 1);
-  problem.AddParameterBlock(&y, 1);
-  problem.AddParameterBlock(&z, 1);
-  problem.AddResidualBlock(new UnaryIdentityCostFunction(), NULL, &x);
-  problem.AddResidualBlock(new TernaryCostFunction(), NULL, &x, &y, &z);
-  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &y);
-  problem.SetParameterBlockConstant(&x);
-
-  ParameterBlockOrdering ordering;
-  ordering.AddElementToGroup(&x, 0);
-  ordering.AddElementToGroup(&y, 0);
-  ordering.AddElementToGroup(&z, 1);
-
-  double fixed_cost = 0.0;
-  Program program(problem.program());
-
-  double expected_fixed_cost;
-  ResidualBlock *expected_removed_block = program.residual_blocks()[0];
-  scoped_array<double> scratch(
-      new double[expected_removed_block->NumScratchDoublesForEvaluate()]);
-  expected_removed_block->Evaluate(true,
-                                   &expected_fixed_cost,
-                                   NULL,
-                                   NULL,
-                                   scratch.get());
-
-  string error;
-  EXPECT_TRUE(SolverImpl::RemoveFixedBlocksFromProgram(&program,
-                                                       &ordering,
-                                                       &fixed_cost,
-                                                       &error));
-  EXPECT_EQ(program.NumParameterBlocks(), 2);
-  EXPECT_EQ(program.NumResidualBlocks(), 2);
-  EXPECT_EQ(ordering.NumElements(), 2);
-  EXPECT_EQ(ordering.GroupId(&y), 0);
-  EXPECT_EQ(ordering.GroupId(&z), 1);
-  EXPECT_DOUBLE_EQ(fixed_cost, expected_fixed_cost);
-}
-
-TEST(SolverImpl, ReorderResidualBlockNormalFunction) {
-  ProblemImpl problem;
-  double x;
-  double y;
-  double z;
-
-  problem.AddParameterBlock(&x, 1);
-  problem.AddParameterBlock(&y, 1);
-  problem.AddParameterBlock(&z, 1);
-
-  problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
-  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &x);
-  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &y);
-  problem.AddResidualBlock(new UnaryCostFunction(), NULL, &z);
-  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &y);
-  problem.AddResidualBlock(new UnaryCostFunction(), NULL, &y);
-
-  ParameterBlockOrdering* ordering = new ParameterBlockOrdering;
-  ordering->AddElementToGroup(&x, 0);
-  ordering->AddElementToGroup(&y, 0);
-  ordering->AddElementToGroup(&z, 1);
-
-  Solver::Options options;
-  options.linear_solver_type = DENSE_SCHUR;
-  options.linear_solver_ordering = ordering;
-
-  const vector<ResidualBlock*>& residual_blocks =
-      problem.program().residual_blocks();
-
-  vector<ResidualBlock*> expected_residual_blocks;
-
-  // This is a bit fragile, but it serves the purpose. We know the
-  // bucketing algorithm that the reordering function uses, so we
-  // expect the order for residual blocks for each e_block to be
-  // filled in reverse.
-  expected_residual_blocks.push_back(residual_blocks[4]);
-  expected_residual_blocks.push_back(residual_blocks[1]);
-  expected_residual_blocks.push_back(residual_blocks[0]);
-  expected_residual_blocks.push_back(residual_blocks[5]);
-  expected_residual_blocks.push_back(residual_blocks[2]);
-  expected_residual_blocks.push_back(residual_blocks[3]);
-
-  Program* program = problem.mutable_program();
-  program->SetParameterOffsetsAndIndex();
-
-  string error;
-  EXPECT_TRUE(SolverImpl::LexicographicallyOrderResidualBlocks(
-                  2,
-                  problem.mutable_program(),
-                  &error));
-  EXPECT_EQ(residual_blocks.size(), expected_residual_blocks.size());
-  for (int i = 0; i < expected_residual_blocks.size(); ++i) {
-    EXPECT_EQ(residual_blocks[i], expected_residual_blocks[i]);
-  }
-}
-
-TEST(SolverImpl, ReorderResidualBlockNormalFunctionWithFixedBlocks) {
-  ProblemImpl problem;
-  double x;
-  double y;
-  double z;
-
-  problem.AddParameterBlock(&x, 1);
-  problem.AddParameterBlock(&y, 1);
-  problem.AddParameterBlock(&z, 1);
-
-  // Set one parameter block constant.
-  problem.SetParameterBlockConstant(&z);
-
-  // Mark residuals for x's row block with "x" for readability.
-  problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);       // 0 x
-  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &x);  // 1 x
-  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &y);  // 2
-  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &y);  // 3
-  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &z);  // 4 x
-  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &y);  // 5
-  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &z);  // 6 x
-  problem.AddResidualBlock(new UnaryCostFunction(), NULL, &y);       // 7
-
-  ParameterBlockOrdering* ordering = new ParameterBlockOrdering;
-  ordering->AddElementToGroup(&x, 0);
-  ordering->AddElementToGroup(&z, 0);
-  ordering->AddElementToGroup(&y, 1);
-
-  Solver::Options options;
-  options.linear_solver_type = DENSE_SCHUR;
-  options.linear_solver_ordering = ordering;
-
-  // Create the reduced program. This should remove the fixed block "z",
-  // marking the index to -1 at the same time. x and y also get indices.
-  string error;
-  scoped_ptr<Program> reduced_program(
-      SolverImpl::CreateReducedProgram(&options, &problem, NULL, &error));
-
-  const vector<ResidualBlock*>& residual_blocks =
-      problem.program().residual_blocks();
-
-  // This is a bit fragile, but it serves the purpose. We know the
-  // bucketing algorithm that the reordering function uses, so we
-  // expect the order for residual blocks for each e_block to be
-  // filled in reverse.
-
-  vector<ResidualBlock*> expected_residual_blocks;
-
-  // Row block for residuals involving "x". These are marked "x" in the block
-  // of code calling AddResidual() above.
-  expected_residual_blocks.push_back(residual_blocks[6]);
-  expected_residual_blocks.push_back(residual_blocks[4]);
-  expected_residual_blocks.push_back(residual_blocks[1]);
-  expected_residual_blocks.push_back(residual_blocks[0]);
-
-  // Row block for residuals involving "y".
-  expected_residual_blocks.push_back(residual_blocks[7]);
-  expected_residual_blocks.push_back(residual_blocks[5]);
-  expected_residual_blocks.push_back(residual_blocks[3]);
-  expected_residual_blocks.push_back(residual_blocks[2]);
-
-  EXPECT_EQ(reduced_program->residual_blocks().size(),
-            expected_residual_blocks.size());
-  for (int i = 0; i < expected_residual_blocks.size(); ++i) {
-    EXPECT_EQ(reduced_program->residual_blocks()[i],
-              expected_residual_blocks[i]);
-  }
-}
-
-TEST(SolverImpl, AutomaticSchurReorderingRespectsConstantBlocks) {
-  ProblemImpl problem;
-  double x;
-  double y;
-  double z;
-
-  problem.AddParameterBlock(&x, 1);
-  problem.AddParameterBlock(&y, 1);
-  problem.AddParameterBlock(&z, 1);
-
-  // Set one parameter block constant.
-  problem.SetParameterBlockConstant(&z);
-
-  problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
-  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &x);
-  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &y);
-  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &y);
-  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &z);
-  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &y);
-  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &z);
-  problem.AddResidualBlock(new UnaryCostFunction(), NULL, &y);
-  problem.AddResidualBlock(new UnaryCostFunction(), NULL, &z);
-
-  ParameterBlockOrdering* ordering = new ParameterBlockOrdering;
-  ordering->AddElementToGroup(&x, 0);
-  ordering->AddElementToGroup(&z, 0);
-  ordering->AddElementToGroup(&y, 0);
-
-  Solver::Options options;
-  options.linear_solver_type = DENSE_SCHUR;
-  options.linear_solver_ordering = ordering;
-
-  string error;
-  scoped_ptr<Program> reduced_program(
-      SolverImpl::CreateReducedProgram(&options, &problem, NULL, &error));
-
-  const vector<ResidualBlock*>& residual_blocks =
-      reduced_program->residual_blocks();
-  const vector<ParameterBlock*>& parameter_blocks =
-      reduced_program->parameter_blocks();
-
-  const vector<ResidualBlock*>& original_residual_blocks =
-      problem.program().residual_blocks();
-
-  EXPECT_EQ(residual_blocks.size(), 8);
-  EXPECT_EQ(reduced_program->parameter_blocks().size(), 2);
-
-  // Verify that right parmeter block and the residual blocks have
-  // been removed.
-  for (int i = 0; i < 8; ++i) {
-    EXPECT_NE(residual_blocks[i], original_residual_blocks.back());
-  }
-  for (int i = 0; i < 2; ++i) {
-    EXPECT_NE(parameter_blocks[i]->mutable_user_state(), &z);
-  }
-}
-
-TEST(SolverImpl, ApplyUserOrderingOrderingTooSmall) {
-  ProblemImpl problem;
-  double x;
-  double y;
-  double z;
-
-  problem.AddParameterBlock(&x, 1);
-  problem.AddParameterBlock(&y, 1);
-  problem.AddParameterBlock(&z, 1);
-
-  ParameterBlockOrdering ordering;
-  ordering.AddElementToGroup(&x, 0);
-  ordering.AddElementToGroup(&y, 1);
-
-  Program program(problem.program());
-  string error;
-  EXPECT_FALSE(SolverImpl::ApplyUserOrdering(problem.parameter_map(),
-                                             &ordering,
-                                             &program,
-                                             &error));
-}
-
-TEST(SolverImpl, ApplyUserOrderingNormal) {
-  ProblemImpl problem;
-  double x;
-  double y;
-  double z;
-
-  problem.AddParameterBlock(&x, 1);
-  problem.AddParameterBlock(&y, 1);
-  problem.AddParameterBlock(&z, 1);
-
-  ParameterBlockOrdering ordering;
-  ordering.AddElementToGroup(&x, 0);
-  ordering.AddElementToGroup(&y, 2);
-  ordering.AddElementToGroup(&z, 1);
-
-  Program* program = problem.mutable_program();
-  string error;
-
-  EXPECT_TRUE(SolverImpl::ApplyUserOrdering(problem.parameter_map(),
-                                            &ordering,
-                                            program,
-                                            &error));
-  const vector<ParameterBlock*>& parameter_blocks = program->parameter_blocks();
-
-  EXPECT_EQ(parameter_blocks.size(), 3);
-  EXPECT_EQ(parameter_blocks[0]->user_state(), &x);
-  EXPECT_EQ(parameter_blocks[1]->user_state(), &z);
-  EXPECT_EQ(parameter_blocks[2]->user_state(), &y);
-}
-
-#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
-TEST(SolverImpl, CreateLinearSolverNoSuiteSparse) {
-  Solver::Options options;
-  options.linear_solver_type = SPARSE_NORMAL_CHOLESKY;
-  // CreateLinearSolver assumes a non-empty ordering.
-  options.linear_solver_ordering = new ParameterBlockOrdering;
-  string error;
-  EXPECT_FALSE(SolverImpl::CreateLinearSolver(&options, &error));
-}
-#endif
-
-TEST(SolverImpl, CreateLinearSolverNegativeMaxNumIterations) {
-  Solver::Options options;
-  options.linear_solver_type = DENSE_QR;
-  options.max_linear_solver_iterations = -1;
-  // CreateLinearSolver assumes a non-empty ordering.
-  options.linear_solver_ordering = new ParameterBlockOrdering;
-  string error;
-  EXPECT_EQ(SolverImpl::CreateLinearSolver(&options, &error),
-            static_cast<LinearSolver*>(NULL));
-}
-
-TEST(SolverImpl, CreateLinearSolverNegativeMinNumIterations) {
-  Solver::Options options;
-  options.linear_solver_type = DENSE_QR;
-  options.min_linear_solver_iterations = -1;
-  // CreateLinearSolver assumes a non-empty ordering.
-  options.linear_solver_ordering = new ParameterBlockOrdering;
-  string error;
-  EXPECT_EQ(SolverImpl::CreateLinearSolver(&options, &error),
-            static_cast<LinearSolver*>(NULL));
-}
-
-TEST(SolverImpl, CreateLinearSolverMaxLessThanMinIterations) {
-  Solver::Options options;
-  options.linear_solver_type = DENSE_QR;
-  options.min_linear_solver_iterations = 10;
-  options.max_linear_solver_iterations = 5;
-  options.linear_solver_ordering = new ParameterBlockOrdering;
-  string error;
-  EXPECT_EQ(SolverImpl::CreateLinearSolver(&options, &error),
-            static_cast<LinearSolver*>(NULL));
-}
-
-TEST(SolverImpl, CreateLinearSolverDenseSchurMultipleThreads) {
-  Solver::Options options;
-  options.linear_solver_type = DENSE_SCHUR;
-  options.num_linear_solver_threads = 2;
-  // The Schur type solvers can only be created with the Ordering
-  // contains at least one elimination group.
-  options.linear_solver_ordering = new ParameterBlockOrdering;
-  double x;
-  double y;
-  options.linear_solver_ordering->AddElementToGroup(&x, 0);
-  options.linear_solver_ordering->AddElementToGroup(&y, 0);
-
-  string error;
-  scoped_ptr<LinearSolver> solver(
-      SolverImpl::CreateLinearSolver(&options, &error));
-  EXPECT_TRUE(solver != NULL);
-  EXPECT_EQ(options.linear_solver_type, DENSE_SCHUR);
-  EXPECT_EQ(options.num_linear_solver_threads, 2);
-}
-
-TEST(SolverImpl, CreateIterativeLinearSolverForDogleg) {
-  Solver::Options options;
-  options.trust_region_strategy_type = DOGLEG;
-  // CreateLinearSolver assumes a non-empty ordering.
-  options.linear_solver_ordering = new ParameterBlockOrdering;
-  string error;
-  options.linear_solver_type = ITERATIVE_SCHUR;
-  EXPECT_EQ(SolverImpl::CreateLinearSolver(&options, &error),
-            static_cast<LinearSolver*>(NULL));
-
-  options.linear_solver_type = CGNR;
-  EXPECT_EQ(SolverImpl::CreateLinearSolver(&options, &error),
-            static_cast<LinearSolver*>(NULL));
-}
-
-TEST(SolverImpl, CreateLinearSolverNormalOperation) {
-  Solver::Options options;
-  scoped_ptr<LinearSolver> solver;
-  options.linear_solver_type = DENSE_QR;
-  // CreateLinearSolver assumes a non-empty ordering.
-  options.linear_solver_ordering = new ParameterBlockOrdering;
-  string error;
-  solver.reset(SolverImpl::CreateLinearSolver(&options, &error));
-  EXPECT_EQ(options.linear_solver_type, DENSE_QR);
-  EXPECT_TRUE(solver.get() != NULL);
-
-  options.linear_solver_type = DENSE_NORMAL_CHOLESKY;
-  solver.reset(SolverImpl::CreateLinearSolver(&options, &error));
-  EXPECT_EQ(options.linear_solver_type, DENSE_NORMAL_CHOLESKY);
-  EXPECT_TRUE(solver.get() != NULL);
-
-#ifndef CERES_NO_SUITESPARSE
-  options.linear_solver_type = SPARSE_NORMAL_CHOLESKY;
-  options.sparse_linear_algebra_library_type = SUITE_SPARSE;
-  solver.reset(SolverImpl::CreateLinearSolver(&options, &error));
-  EXPECT_EQ(options.linear_solver_type, SPARSE_NORMAL_CHOLESKY);
-  EXPECT_TRUE(solver.get() != NULL);
-#endif
-
-#ifndef CERES_NO_CXSPARSE
-  options.linear_solver_type = SPARSE_NORMAL_CHOLESKY;
-  options.sparse_linear_algebra_library_type = CX_SPARSE;
-  solver.reset(SolverImpl::CreateLinearSolver(&options, &error));
-  EXPECT_EQ(options.linear_solver_type, SPARSE_NORMAL_CHOLESKY);
-  EXPECT_TRUE(solver.get() != NULL);
-#endif
-
-  double x;
-  double y;
-  options.linear_solver_ordering->AddElementToGroup(&x, 0);
-  options.linear_solver_ordering->AddElementToGroup(&y, 0);
-
-  options.linear_solver_type = DENSE_SCHUR;
-  solver.reset(SolverImpl::CreateLinearSolver(&options, &error));
-  EXPECT_EQ(options.linear_solver_type, DENSE_SCHUR);
-  EXPECT_TRUE(solver.get() != NULL);
-
-  options.linear_solver_type = SPARSE_SCHUR;
-  solver.reset(SolverImpl::CreateLinearSolver(&options, &error));
-
-#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
-  EXPECT_TRUE(SolverImpl::CreateLinearSolver(&options, &error) == NULL);
-#else
-  EXPECT_TRUE(solver.get() != NULL);
-  EXPECT_EQ(options.linear_solver_type, SPARSE_SCHUR);
-#endif
-
-  options.linear_solver_type = ITERATIVE_SCHUR;
-  solver.reset(SolverImpl::CreateLinearSolver(&options, &error));
-  EXPECT_EQ(options.linear_solver_type, ITERATIVE_SCHUR);
-  EXPECT_TRUE(solver.get() != NULL);
-}
-
-struct QuadraticCostFunction {
-  template <typename T> bool operator()(const T* const x,
-                                        T* residual) const {
-    residual[0] = T(5.0) - *x;
-    return true;
-  }
-};
-
-struct RememberingCallback : public IterationCallback {
-  explicit RememberingCallback(double *x) : calls(0), x(x) {}
-  virtual ~RememberingCallback() {}
-  virtual CallbackReturnType operator()(const IterationSummary& summary) {
-    x_values.push_back(*x);
-    return SOLVER_CONTINUE;
-  }
-  int calls;
-  double *x;
-  vector<double> x_values;
-};
-
-TEST(SolverImpl, UpdateStateEveryIterationOption) {
-  double x = 50.0;
-  const double original_x = x;
-
-  scoped_ptr<CostFunction> cost_function(
-      new AutoDiffCostFunction<QuadraticCostFunction, 1, 1>(
-          new QuadraticCostFunction));
-
-  Problem::Options problem_options;
-  problem_options.cost_function_ownership = DO_NOT_TAKE_OWNERSHIP;
-  ProblemImpl problem(problem_options);
-  problem.AddResidualBlock(cost_function.get(), NULL, &x);
-
-  Solver::Options options;
-  options.linear_solver_type = DENSE_QR;
-
-  RememberingCallback callback(&x);
-  options.callbacks.push_back(&callback);
-
-  Solver::Summary summary;
-
-  int num_iterations;
-
-  // First try: no updating.
-  SolverImpl::Solve(options, &problem, &summary);
-  num_iterations = summary.num_successful_steps +
-                   summary.num_unsuccessful_steps;
-  EXPECT_GT(num_iterations, 1);
-  for (int i = 0; i < callback.x_values.size(); ++i) {
-    EXPECT_EQ(50.0, callback.x_values[i]);
-  }
-
-  // Second try: with updating
-  x = 50.0;
-  options.update_state_every_iteration = true;
-  callback.x_values.clear();
-  SolverImpl::Solve(options, &problem, &summary);
-  num_iterations = summary.num_successful_steps +
-                   summary.num_unsuccessful_steps;
-  EXPECT_GT(num_iterations, 1);
-  EXPECT_EQ(original_x, callback.x_values[0]);
-  EXPECT_NE(original_x, callback.x_values[1]);
-}
-
 // The parameters must be in separate blocks so that they can be individually
 // set constant or not.
 struct Quadratic4DCostFunction {
@@ -753,289 +99,8 @@
   EXPECT_EQ(&y, problem.program().parameter_blocks()[1]->state());
   EXPECT_EQ(&z, problem.program().parameter_blocks()[2]->state());
   EXPECT_EQ(&w, problem.program().parameter_blocks()[3]->state());
-}
 
-TEST(SolverImpl, NoParameterBlocks) {
-  ProblemImpl problem_impl;
-  Solver::Options options;
-  Solver::Summary summary;
-  SolverImpl::Solve(options, &problem_impl, &summary);
-  EXPECT_EQ(summary.termination_type, DID_NOT_RUN);
-  EXPECT_EQ(summary.error, "Problem contains no parameter blocks.");
-}
-
-TEST(SolverImpl, NoResiduals) {
-  ProblemImpl problem_impl;
-  Solver::Options options;
-  Solver::Summary summary;
-  double x = 1;
-  problem_impl.AddParameterBlock(&x, 1);
-  SolverImpl::Solve(options, &problem_impl, &summary);
-  EXPECT_EQ(summary.termination_type, DID_NOT_RUN);
-  EXPECT_EQ(summary.error, "Problem contains no residual blocks.");
-}
-
-
-TEST(SolverImpl, ProblemIsConstant) {
-  ProblemImpl problem_impl;
-  Solver::Options options;
-  Solver::Summary summary;
-  double x = 1;
-  problem_impl.AddResidualBlock(new UnaryIdentityCostFunction, NULL, &x);
-  problem_impl.SetParameterBlockConstant(&x);
-  SolverImpl::Solve(options, &problem_impl, &summary);
-  EXPECT_EQ(summary.termination_type, FUNCTION_TOLERANCE);
-  EXPECT_EQ(summary.initial_cost, 1.0 / 2.0);
-  EXPECT_EQ(summary.final_cost, 1.0 / 2.0);
-}
-
-TEST(SolverImpl, AlternateLinearSolverForSchurTypeLinearSolver) {
-  Solver::Options options;
-
-  options.linear_solver_type = DENSE_QR;
-  SolverImpl::AlternateLinearSolverForSchurTypeLinearSolver(&options);
-  EXPECT_EQ(options.linear_solver_type, DENSE_QR);
-
-  options.linear_solver_type = DENSE_NORMAL_CHOLESKY;
-  SolverImpl::AlternateLinearSolverForSchurTypeLinearSolver(&options);
-  EXPECT_EQ(options.linear_solver_type, DENSE_NORMAL_CHOLESKY);
-
-  options.linear_solver_type = SPARSE_NORMAL_CHOLESKY;
-  SolverImpl::AlternateLinearSolverForSchurTypeLinearSolver(&options);
-  EXPECT_EQ(options.linear_solver_type, SPARSE_NORMAL_CHOLESKY);
-
-  options.linear_solver_type = CGNR;
-  SolverImpl::AlternateLinearSolverForSchurTypeLinearSolver(&options);
-  EXPECT_EQ(options.linear_solver_type, CGNR);
-
-  options.linear_solver_type = DENSE_SCHUR;
-  SolverImpl::AlternateLinearSolverForSchurTypeLinearSolver(&options);
-  EXPECT_EQ(options.linear_solver_type, DENSE_QR);
-
-  options.linear_solver_type = SPARSE_SCHUR;
-  SolverImpl::AlternateLinearSolverForSchurTypeLinearSolver(&options);
-  EXPECT_EQ(options.linear_solver_type, SPARSE_NORMAL_CHOLESKY);
-
-  options.linear_solver_type = ITERATIVE_SCHUR;
-  options.preconditioner_type = IDENTITY;
-  SolverImpl::AlternateLinearSolverForSchurTypeLinearSolver(&options);
-  EXPECT_EQ(options.linear_solver_type, CGNR);
-  EXPECT_EQ(options.preconditioner_type, IDENTITY);
-
-  options.linear_solver_type = ITERATIVE_SCHUR;
-  options.preconditioner_type = JACOBI;
-  SolverImpl::AlternateLinearSolverForSchurTypeLinearSolver(&options);
-  EXPECT_EQ(options.linear_solver_type, CGNR);
-  EXPECT_EQ(options.preconditioner_type, JACOBI);
-
-  options.linear_solver_type = ITERATIVE_SCHUR;
-  options.preconditioner_type = SCHUR_JACOBI;
-  SolverImpl::AlternateLinearSolverForSchurTypeLinearSolver(&options);
-  EXPECT_EQ(options.linear_solver_type, CGNR);
-  EXPECT_EQ(options.preconditioner_type, JACOBI);
-
-  options.linear_solver_type = ITERATIVE_SCHUR;
-  options.preconditioner_type = CLUSTER_JACOBI;
-  SolverImpl::AlternateLinearSolverForSchurTypeLinearSolver(&options);
-  EXPECT_EQ(options.linear_solver_type, CGNR);
-  EXPECT_EQ(options.preconditioner_type, JACOBI);
-
-  options.linear_solver_type = ITERATIVE_SCHUR;
-  options.preconditioner_type = CLUSTER_TRIDIAGONAL;
-  SolverImpl::AlternateLinearSolverForSchurTypeLinearSolver(&options);
-  EXPECT_EQ(options.linear_solver_type, CGNR);
-  EXPECT_EQ(options.preconditioner_type, JACOBI);
-}
-
-TEST(SolverImpl, CreateJacobianBlockSparsityTranspose) {
-  ProblemImpl problem;
-  double x[2];
-  double y[3];
-  double z;
-
-  problem.AddParameterBlock(x, 2);
-  problem.AddParameterBlock(y, 3);
-  problem.AddParameterBlock(&z, 1);
-
-  problem.AddResidualBlock(new MockCostFunctionBase<2, 2, 0, 0>(), NULL, x);
-  problem.AddResidualBlock(new MockCostFunctionBase<3, 1, 2, 0>(), NULL, &z, x);
-  problem.AddResidualBlock(new MockCostFunctionBase<4, 1, 3, 0>(), NULL, &z, y);
-  problem.AddResidualBlock(new MockCostFunctionBase<5, 1, 3, 0>(), NULL, &z, y);
-  problem.AddResidualBlock(new MockCostFunctionBase<1, 2, 1, 0>(), NULL, x, &z);
-  problem.AddResidualBlock(new MockCostFunctionBase<2, 1, 3, 0>(), NULL, &z, y);
-  problem.AddResidualBlock(new MockCostFunctionBase<2, 2, 1, 0>(), NULL, x, &z);
-  problem.AddResidualBlock(new MockCostFunctionBase<1, 3, 0, 0>(), NULL, y);
-
-  TripletSparseMatrix expected_block_sparse_jacobian(3, 8, 14);
-  {
-    int* rows = expected_block_sparse_jacobian.mutable_rows();
-    int* cols = expected_block_sparse_jacobian.mutable_cols();
-    double* values = expected_block_sparse_jacobian.mutable_values();
-    rows[0] = 0;
-    cols[0] = 0;
-
-    rows[1] = 2;
-    cols[1] = 1;
-    rows[2] = 0;
-    cols[2] = 1;
-
-    rows[3] = 2;
-    cols[3] = 2;
-    rows[4] = 1;
-    cols[4] = 2;
-
-    rows[5] = 2;
-    cols[5] = 3;
-    rows[6] = 1;
-    cols[6] = 3;
-
-    rows[7] = 0;
-    cols[7] = 4;
-    rows[8] = 2;
-    cols[8] = 4;
-
-    rows[9] = 2;
-    cols[9] = 5;
-    rows[10] = 1;
-    cols[10] = 5;
-
-    rows[11] = 0;
-    cols[11] = 6;
-    rows[12] = 2;
-    cols[12] = 6;
-
-    rows[13] = 1;
-    cols[13] = 7;
-    fill(values, values + 14, 1.0);
-    expected_block_sparse_jacobian.set_num_nonzeros(14);
-  }
-
-  Program* program = problem.mutable_program();
-  program->SetParameterOffsetsAndIndex();
-
-  scoped_ptr<TripletSparseMatrix> actual_block_sparse_jacobian(
-      SolverImpl::CreateJacobianBlockSparsityTranspose(program));
-
-  Matrix expected_dense_jacobian;
-  expected_block_sparse_jacobian.ToDenseMatrix(&expected_dense_jacobian);
-
-  Matrix actual_dense_jacobian;
-  actual_block_sparse_jacobian->ToDenseMatrix(&actual_dense_jacobian);
-  EXPECT_EQ((expected_dense_jacobian - actual_dense_jacobian).norm(), 0.0);
-}
-
-template <int kNumResiduals, int kNumParameterBlocks>
-class NumParameterBlocksCostFunction : public CostFunction {
- public:
-  NumParameterBlocksCostFunction() {
-    set_num_residuals(kNumResiduals);
-    for (int i = 0; i < kNumParameterBlocks; ++i) {
-      mutable_parameter_block_sizes()->push_back(1);
-    }
-  }
-
-  virtual ~NumParameterBlocksCostFunction() {
-  }
-
-  virtual bool Evaluate(double const* const* parameters,
-                        double* residuals,
-                        double** jacobians) const {
-    return true;
-  }
-};
-
-TEST(SolverImpl, ReallocationInCreateJacobianBlockSparsityTranspose) {
-  // CreateJacobianBlockSparsityTranspose starts with a conservative
-  // estimate of the size of the sparsity pattern. This test ensures
-  // that when those estimates are violated, the reallocation/resizing
-  // logic works correctly.
-
-  ProblemImpl problem;
-  double x[20];
-
-  vector<double*> parameter_blocks;
-  for (int i = 0; i < 20; ++i) {
-    problem.AddParameterBlock(x + i, 1);
-    parameter_blocks.push_back(x + i);
-  }
-
-  problem.AddResidualBlock(new NumParameterBlocksCostFunction<1, 20>(),
-                           NULL,
-                           parameter_blocks);
-
-  TripletSparseMatrix expected_block_sparse_jacobian(20, 1, 20);
-  {
-    int* rows = expected_block_sparse_jacobian.mutable_rows();
-    int* cols = expected_block_sparse_jacobian.mutable_cols();
-    for (int i = 0; i < 20; ++i) {
-      rows[i] = i;
-      cols[i] = 0;
-    }
-
-    double* values = expected_block_sparse_jacobian.mutable_values();
-    fill(values, values + 20, 1.0);
-    expected_block_sparse_jacobian.set_num_nonzeros(20);
-  }
-
-  Program* program = problem.mutable_program();
-  program->SetParameterOffsetsAndIndex();
-
-  scoped_ptr<TripletSparseMatrix> actual_block_sparse_jacobian(
-      SolverImpl::CreateJacobianBlockSparsityTranspose(program));
-
-  Matrix expected_dense_jacobian;
-  expected_block_sparse_jacobian.ToDenseMatrix(&expected_dense_jacobian);
-
-  Matrix actual_dense_jacobian;
-  actual_block_sparse_jacobian->ToDenseMatrix(&actual_dense_jacobian);
-  EXPECT_EQ((expected_dense_jacobian - actual_dense_jacobian).norm(), 0.0);
-}
-
-TEST(CompactifyArray, ContiguousEntries) {
-  vector<int> array;
-  array.push_back(0);
-  array.push_back(1);
-  vector<int> expected = array;
-  SolverImpl::CompactifyArray(&array);
-  EXPECT_EQ(array, expected);
-  array.clear();
-
-  array.push_back(1);
-  array.push_back(0);
-  expected = array;
-  SolverImpl::CompactifyArray(&array);
-  EXPECT_EQ(array, expected);
-}
-
-TEST(CompactifyArray, NonContiguousEntries) {
-  vector<int> array;
-  array.push_back(0);
-  array.push_back(2);
-  vector<int> expected;
-  expected.push_back(0);
-  expected.push_back(1);
-  SolverImpl::CompactifyArray(&array);
-  EXPECT_EQ(array, expected);
-}
-
-TEST(CompactifyArray, NonContiguousRepeatingEntries) {
-  vector<int> array;
-  array.push_back(3);
-  array.push_back(1);
-  array.push_back(0);
-  array.push_back(0);
-  array.push_back(0);
-  array.push_back(5);
-  vector<int> expected;
-  expected.push_back(2);
-  expected.push_back(1);
-  expected.push_back(0);
-  expected.push_back(0);
-  expected.push_back(0);
-  expected.push_back(3);
-
-  SolverImpl::CompactifyArray(&array);
-  EXPECT_EQ(array, expected);
+  EXPECT_TRUE(problem.program().IsValid());
 }
 
 }  // namespace internal
diff --git a/internal/ceres/solver_test.cc b/internal/ceres/solver_test.cc
new file mode 100644
index 0000000..2a136f7
--- /dev/null
+++ b/internal/ceres/solver_test.cc
@@ -0,0 +1,298 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/solver.h"
+
+#include <limits>
+#include <cmath>
+#include <vector>
+#include "gtest/gtest.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/autodiff_cost_function.h"
+#include "ceres/sized_cost_function.h"
+#include "ceres/problem.h"
+#include "ceres/problem_impl.h"
+
+namespace ceres {
+namespace internal {
+
+TEST(SolverOptions, DefaultTrustRegionOptionsAreValid) {
+  Solver::Options options;
+  options.minimizer_type = TRUST_REGION;
+  string error;
+  EXPECT_TRUE(options.IsValid(&error)) << error;
+}
+
+TEST(SolverOptions, DefaultLineSearchOptionsAreValid) {
+  Solver::Options options;
+  options.minimizer_type = LINE_SEARCH;
+  string error;
+  EXPECT_TRUE(options.IsValid(&error)) << error;
+}
+
+struct QuadraticCostFunctor {
+  template <typename T> bool operator()(const T* const x,
+                                        T* residual) const {
+    residual[0] = T(5.0) - *x;
+    return true;
+  }
+
+  static CostFunction* Create() {
+    return new AutoDiffCostFunction<QuadraticCostFunctor, 1, 1>(
+        new QuadraticCostFunctor);
+  }
+};
+
+struct RememberingCallback : public IterationCallback {
+  explicit RememberingCallback(double *x) : calls(0), x(x) {}
+  virtual ~RememberingCallback() {}
+  virtual CallbackReturnType operator()(const IterationSummary& summary) {
+    x_values.push_back(*x);
+    return SOLVER_CONTINUE;
+  }
+  int calls;
+  double *x;
+  vector<double> x_values;
+};
+
+TEST(Solver, UpdateStateEveryIterationOption) {
+  double x = 50.0;
+  const double original_x = x;
+
+  scoped_ptr<CostFunction> cost_function(QuadraticCostFunctor::Create());
+  Problem::Options problem_options;
+  problem_options.cost_function_ownership = DO_NOT_TAKE_OWNERSHIP;
+  Problem problem(problem_options);
+  problem.AddResidualBlock(cost_function.get(), NULL, &x);
+
+  Solver::Options options;
+  options.linear_solver_type = DENSE_QR;
+
+  RememberingCallback callback(&x);
+  options.callbacks.push_back(&callback);
+
+  Solver::Summary summary;
+
+  int num_iterations;
+
+  // First try: no updating.
+  Solve(options, &problem, &summary);
+  num_iterations = summary.num_successful_steps +
+                   summary.num_unsuccessful_steps;
+  EXPECT_GT(num_iterations, 1);
+  for (int i = 0; i < callback.x_values.size(); ++i) {
+    EXPECT_EQ(50.0, callback.x_values[i]);
+  }
+
+  // Second try: with updating
+  x = 50.0;
+  options.update_state_every_iteration = true;
+  callback.x_values.clear();
+  Solve(options, &problem, &summary);
+  num_iterations = summary.num_successful_steps +
+                   summary.num_unsuccessful_steps;
+  EXPECT_GT(num_iterations, 1);
+  EXPECT_EQ(original_x, callback.x_values[0]);
+  EXPECT_NE(original_x, callback.x_values[1]);
+}
+
+// The parameters must be in separate blocks so that they can be individually
+// set constant or not.
+struct Quadratic4DCostFunction {
+  template <typename T> bool operator()(const T* const x,
+                                        const T* const y,
+                                        const T* const z,
+                                        const T* const w,
+                                        T* residual) const {
+    // A 4-dimension axis-aligned quadratic.
+    residual[0] = T(10.0) - *x +
+                  T(20.0) - *y +
+                  T(30.0) - *z +
+                  T(40.0) - *w;
+    return true;
+  }
+
+  static CostFunction* Create() {
+    return new AutoDiffCostFunction<Quadratic4DCostFunction, 1, 1, 1, 1, 1>(
+        new Quadratic4DCostFunction);
+  }
+};
+
+// A cost function that simply returns its argument.
+class UnaryIdentityCostFunction : public SizedCostFunction<1, 1> {
+ public:
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    residuals[0] = parameters[0][0];
+    if (jacobians != NULL && jacobians[0] != NULL) {
+      jacobians[0][0] = 1.0;
+    }
+    return true;
+  }
+};
+
+TEST(Solver, TrustRegionProblemHasNoParameterBlocks) {
+  Problem problem;
+  Solver::Options options;
+  options.minimizer_type = TRUST_REGION;
+  Solver::Summary summary;
+  Solve(options, &problem, &summary);
+  EXPECT_EQ(summary.termination_type, CONVERGENCE);
+  EXPECT_EQ(summary.message,
+            "Function tolerance reached. "
+            "No non-constant parameter blocks found.");
+}
+
+TEST(Solver, LineSearchProblemHasNoParameterBlocks) {
+  Problem problem;
+  Solver::Options options;
+  options.minimizer_type = LINE_SEARCH;
+  Solver::Summary summary;
+  Solve(options, &problem, &summary);
+  EXPECT_EQ(summary.termination_type, CONVERGENCE);
+  EXPECT_EQ(summary.message,
+            "Function tolerance reached. "
+            "No non-constant parameter blocks found.");
+}
+
+TEST(Solver, TrustRegionProblemHasZeroResiduals) {
+  Problem problem;
+  double x = 1;
+  problem.AddParameterBlock(&x, 1);
+  Solver::Options options;
+  options.minimizer_type = TRUST_REGION;
+  Solver::Summary summary;
+  Solve(options, &problem, &summary);
+  EXPECT_EQ(summary.termination_type, CONVERGENCE);
+  EXPECT_EQ(summary.message,
+            "Function tolerance reached. "
+            "No non-constant parameter blocks found.");
+}
+
+TEST(Solver, LineSearchProblemHasZeroResiduals) {
+  Problem problem;
+  double x = 1;
+  problem.AddParameterBlock(&x, 1);
+  Solver::Options options;
+  options.minimizer_type = LINE_SEARCH;
+  Solver::Summary summary;
+  Solve(options, &problem, &summary);
+  EXPECT_EQ(summary.termination_type, CONVERGENCE);
+  EXPECT_EQ(summary.message,
+            "Function tolerance reached. "
+            "No non-constant parameter blocks found.");
+}
+
+TEST(Solver, TrustRegionProblemIsConstant) {
+  Problem problem;
+  double x = 1;
+  problem.AddResidualBlock(new UnaryIdentityCostFunction, NULL, &x);
+  problem.SetParameterBlockConstant(&x);
+  Solver::Options options;
+  options.minimizer_type = TRUST_REGION;
+  Solver::Summary summary;
+  Solve(options, &problem, &summary);
+  EXPECT_EQ(summary.termination_type, CONVERGENCE);
+  EXPECT_EQ(summary.initial_cost, 1.0 / 2.0);
+  EXPECT_EQ(summary.final_cost, 1.0 / 2.0);
+}
+
+TEST(Solver, LineSearchProblemIsConstant) {
+  Problem problem;
+  double x = 1;
+  problem.AddResidualBlock(new UnaryIdentityCostFunction, NULL, &x);
+  problem.SetParameterBlockConstant(&x);
+  Solver::Options options;
+  options.minimizer_type = LINE_SEARCH;
+  Solver::Summary summary;
+  Solve(options, &problem, &summary);
+  EXPECT_EQ(summary.termination_type, CONVERGENCE);
+  EXPECT_EQ(summary.initial_cost, 1.0 / 2.0);
+  EXPECT_EQ(summary.final_cost, 1.0 / 2.0);
+}
+
+#if defined(CERES_NO_SUITESPARSE)
+TEST(Solver, SparseNormalCholeskyNoSuiteSparse) {
+  Solver::Options options;
+  options.sparse_linear_algebra_library_type = SUITE_SPARSE;
+  options.linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+  string message;
+  EXPECT_FALSE(options.IsValid(&message));
+}
+#endif
+
+#if defined(CERES_NO_CXSPARSE)
+TEST(Solver, SparseNormalCholeskyNoCXSparse) {
+  Solver::Options options;
+  options.sparse_linear_algebra_library_type = CX_SPARSE;
+  options.linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+  string message;
+  EXPECT_FALSE(options.IsValid(&message));
+}
+#endif
+
+TEST(Solver, IterativeLinearSolverForDogleg) {
+  Solver::Options options;
+  options.trust_region_strategy_type = DOGLEG;
+  string message;
+  options.linear_solver_type = ITERATIVE_SCHUR;
+  EXPECT_FALSE(options.IsValid(&message));
+
+  options.linear_solver_type = CGNR;
+  EXPECT_FALSE(options.IsValid(&message));
+}
+
+TEST(Solver, LinearSolverTypeNormalOperation) {
+  Solver::Options options;
+  options.linear_solver_type = DENSE_QR;
+
+  string message;
+  EXPECT_TRUE(options.IsValid(&message));
+
+  options.linear_solver_type = DENSE_NORMAL_CHOLESKY;
+  EXPECT_TRUE(options.IsValid(&message));
+
+  options.linear_solver_type = DENSE_SCHUR;
+  EXPECT_TRUE(options.IsValid(&message));
+
+  options.linear_solver_type = SPARSE_SCHUR;
+#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
+  EXPECT_FALSE(options.IsValid(&message));
+#else
+  EXPECT_TRUE(options.IsValid(&message));
+#endif
+
+  options.linear_solver_type = ITERATIVE_SCHUR;
+  EXPECT_TRUE(options.IsValid(&message));
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/sparse_normal_cholesky_solver.cc b/internal/ceres/sparse_normal_cholesky_solver.cc
index f1a5237..0940815 100644
--- a/internal/ceres/sparse_normal_cholesky_solver.cc
+++ b/internal/ceres/sparse_normal_cholesky_solver.cc
@@ -28,7 +28,8 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
-#if !defined(CERES_NO_SUITESPARSE) || !defined(CERES_NO_CXSPARSE)
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
 
 #include "ceres/sparse_normal_cholesky_solver.h"
 
@@ -45,6 +46,8 @@
 #include "ceres/triplet_sparse_matrix.h"
 #include "ceres/types.h"
 #include "ceres/wall_time.h"
+#include "Eigen/SparseCore"
+
 
 namespace ceres {
 namespace internal {
@@ -53,23 +56,23 @@
     const LinearSolver::Options& options)
     : factor_(NULL),
       cxsparse_factor_(NULL),
-      options_(options) {
+      options_(options){
 }
 
-SparseNormalCholeskySolver::~SparseNormalCholeskySolver() {
-#ifndef CERES_NO_SUITESPARSE
+void SparseNormalCholeskySolver::FreeFactorization() {
   if (factor_ != NULL) {
     ss_.Free(factor_);
     factor_ = NULL;
   }
-#endif
 
-#ifndef CERES_NO_CXSPARSE
   if (cxsparse_factor_ != NULL) {
     cxsparse_.Free(cxsparse_factor_);
     cxsparse_factor_ = NULL;
   }
-#endif  // CERES_NO_CXSPARSE
+}
+
+SparseNormalCholeskySolver::~SparseNormalCholeskySolver() {
+  FreeFactorization();
 }
 
 LinearSolver::Summary SparseNormalCholeskySolver::SolveImpl(
@@ -77,177 +80,303 @@
     const double* b,
     const LinearSolver::PerSolveOptions& per_solve_options,
     double * x) {
+
+  const int num_cols = A->num_cols();
+  VectorRef(x, num_cols).setZero();
+  A->LeftMultiply(b, x);
+
+  if (per_solve_options.D != NULL) {
+    // Temporarily append a diagonal block to the A matrix, but undo
+    // it before returning the matrix to the user.
+    scoped_ptr<CompressedRowSparseMatrix> regularizer;
+    if (A->col_blocks().size() > 0) {
+      regularizer.reset(CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
+                            per_solve_options.D, A->col_blocks()));
+    } else {
+      regularizer.reset(new CompressedRowSparseMatrix(
+                            per_solve_options.D, num_cols));
+    }
+    A->AppendRows(*regularizer);
+  }
+
+  LinearSolver::Summary summary;
   switch (options_.sparse_linear_algebra_library_type) {
     case SUITE_SPARSE:
-      return SolveImplUsingSuiteSparse(A, b, per_solve_options, x);
+      summary = SolveImplUsingSuiteSparse(A, per_solve_options, x);
+      break;
     case CX_SPARSE:
-      return SolveImplUsingCXSparse(A, b, per_solve_options, x);
+      summary = SolveImplUsingCXSparse(A, per_solve_options, x);
+      break;
+    case EIGEN_SPARSE:
+      summary = SolveImplUsingEigen(A, per_solve_options, x);
+      break;
     default:
       LOG(FATAL) << "Unknown sparse linear algebra library : "
                  << options_.sparse_linear_algebra_library_type;
   }
 
-  LOG(FATAL) << "Unknown sparse linear algebra library : "
-             << options_.sparse_linear_algebra_library_type;
-  return LinearSolver::Summary();
+  if (per_solve_options.D != NULL) {
+    A->DeleteRows(num_cols);
+  }
+
+  return summary;
 }
 
-#ifndef CERES_NO_CXSPARSE
+LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingEigen(
+    CompressedRowSparseMatrix* A,
+    const LinearSolver::PerSolveOptions& per_solve_options,
+    double * rhs_and_solution) {
+#ifndef CERES_USE_EIGEN_SPARSE
+
+  LinearSolver::Summary summary;
+  summary.num_iterations = 0;
+  summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+  summary.message =
+      "SPARSE_NORMAL_CHOLESKY cannot be used with EIGEN_SPARSE "
+      "because Ceres was not built with support for "
+      "Eigen's SimplicialLDLT decomposition. "
+      "This requires enabling building with -DEIGENSPARSE=ON.";
+  return summary;
+
+#else
+
+  EventLogger event_logger("SparseNormalCholeskySolver::Eigen::Solve");
+
+  LinearSolver::Summary summary;
+  summary.num_iterations = 1;
+  summary.termination_type = LINEAR_SOLVER_SUCCESS;
+  summary.message = "Success.";
+
+  // Compute the normal equations. J'J delta = J'f and solve them
+  // using a sparse Cholesky factorization. Notice that when compared
+  // to SuiteSparse we have to explicitly compute the normal equations
+  // before they can be factorized. CHOLMOD/SuiteSparse on the other
+  // hand can just work off of Jt to compute the Cholesky
+  // factorization of the normal equations.
+  //
+  // TODO(sameeragarwal): See note about how this maybe a bad idea for
+  // dynamic sparsity.
+  if (outer_product_.get() == NULL) {
+    outer_product_.reset(
+        CompressedRowSparseMatrix::CreateOuterProductMatrixAndProgram(
+            *A, &pattern_));
+  }
+
+  CompressedRowSparseMatrix::ComputeOuterProduct(
+      *A, pattern_, outer_product_.get());
+
+  // Map to an upper triangular column major matrix.
+  //
+  // outer_product_ is a compressed row sparse matrix and in lower
+  // triangular form, when mapped to a compressed column sparse
+  // matrix, it becomes an upper triangular matrix.
+  Eigen::MappedSparseMatrix<double, Eigen::ColMajor> AtA(
+      outer_product_->num_rows(),
+      outer_product_->num_rows(),
+      outer_product_->num_nonzeros(),
+      outer_product_->mutable_rows(),
+      outer_product_->mutable_cols(),
+      outer_product_->mutable_values());
+
+  const Vector b = VectorRef(rhs_and_solution, outer_product_->num_rows());
+  if (simplicial_ldlt_.get() == NULL || options_.dynamic_sparsity) {
+    simplicial_ldlt_.reset(new SimplicialLDLT);
+    // This is a crappy way to be doing this. But right now Eigen does
+    // not expose a way to do symbolic analysis with a given
+    // permutation pattern, so we cannot use a block analysis of the
+    // Jacobian.
+    simplicial_ldlt_->analyzePattern(AtA);
+    if (simplicial_ldlt_->info() != Eigen::Success) {
+      summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+      summary.message =
+          "Eigen failure. Unable to find symbolic factorization.";
+      return summary;
+    }
+  }
+  event_logger.AddEvent("Analysis");
+
+  simplicial_ldlt_->factorize(AtA);
+  if(simplicial_ldlt_->info() != Eigen::Success) {
+    summary.termination_type = LINEAR_SOLVER_FAILURE;
+    summary.message =
+        "Eigen failure. Unable to find numeric factorization.";
+    return summary;
+  }
+
+  VectorRef(rhs_and_solution, outer_product_->num_rows()) =
+      simplicial_ldlt_->solve(b);
+  if(simplicial_ldlt_->info() != Eigen::Success) {
+    summary.termination_type = LINEAR_SOLVER_FAILURE;
+    summary.message =
+        "Eigen failure. Unable to do triangular solve.";
+    return summary;
+  }
+
+  event_logger.AddEvent("Solve");
+  return summary;
+#endif  // EIGEN_USE_EIGEN_SPARSE
+}
+
+
+
 LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingCXSparse(
     CompressedRowSparseMatrix* A,
-    const double* b,
     const LinearSolver::PerSolveOptions& per_solve_options,
-    double * x) {
+    double * rhs_and_solution) {
+#ifdef CERES_NO_CXSPARSE
+
+  LinearSolver::Summary summary;
+  summary.num_iterations = 0;
+  summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+  summary.message =
+      "SPARSE_NORMAL_CHOLESKY cannot be used with CX_SPARSE "
+      "because Ceres was not built with support for CXSparse. "
+      "This requires enabling building with -DCXSPARSE=ON.";
+
+  return summary;
+
+#else
+
   EventLogger event_logger("SparseNormalCholeskySolver::CXSparse::Solve");
 
   LinearSolver::Summary summary;
   summary.num_iterations = 1;
-  const int num_cols = A->num_cols();
-  Vector Atb = Vector::Zero(num_cols);
-  A->LeftMultiply(b, Atb.data());
-
-  if (per_solve_options.D != NULL) {
-    // Temporarily append a diagonal block to the A matrix, but undo
-    // it before returning the matrix to the user.
-    CompressedRowSparseMatrix D(per_solve_options.D, num_cols);
-    A->AppendRows(D);
-  }
-
-  VectorRef(x, num_cols).setZero();
-
-  // Wrap the augmented Jacobian in a compressed sparse column matrix.
-  cs_di At = cxsparse_.CreateSparseMatrixTransposeView(A);
+  summary.termination_type = LINEAR_SOLVER_SUCCESS;
+  summary.message = "Success.";
 
   // Compute the normal equations. J'J delta = J'f and solve them
   // using a sparse Cholesky factorization. Notice that when compared
-  // to SuiteSparse we have to explicitly compute the transpose of Jt,
-  // and then the normal equations before they can be
-  // factorized. CHOLMOD/SuiteSparse on the other hand can just work
-  // off of Jt to compute the Cholesky factorization of the normal
-  // equations.
-  cs_di* A2 = cxsparse_.TransposeMatrix(&At);
-  cs_di* AtA = cxsparse_.MatrixMatrixMultiply(&At, A2);
-
-  cxsparse_.Free(A2);
-  if (per_solve_options.D != NULL) {
-    A->DeleteRows(num_cols);
+  // to SuiteSparse we have to explicitly compute the normal equations
+  // before they can be factorized. CHOLMOD/SuiteSparse on the other
+  // hand can just work off of Jt to compute the Cholesky
+  // factorization of the normal equations.
+  //
+  // TODO(sameeragarwal): If dynamic sparsity is enabled, then this is
+  // not a good idea performance wise, since the jacobian has far too
+  // many entries and the program will go crazy with memory.
+  if (outer_product_.get() == NULL) {
+    outer_product_.reset(
+        CompressedRowSparseMatrix::CreateOuterProductMatrixAndProgram(
+            *A, &pattern_));
   }
+
+  CompressedRowSparseMatrix::ComputeOuterProduct(
+      *A, pattern_, outer_product_.get());
+  cs_di AtA_view =
+      cxsparse_.CreateSparseMatrixTransposeView(outer_product_.get());
+  cs_di* AtA = &AtA_view;
+
   event_logger.AddEvent("Setup");
 
   // Compute symbolic factorization if not available.
+  if (options_.dynamic_sparsity) {
+    FreeFactorization();
+  }
   if (cxsparse_factor_ == NULL) {
     if (options_.use_postordering) {
-      cxsparse_factor_ =
-          CHECK_NOTNULL(cxsparse_.BlockAnalyzeCholesky(AtA,
-                                                       A->col_blocks(),
-                                                       A->col_blocks()));
+      cxsparse_factor_ = cxsparse_.BlockAnalyzeCholesky(AtA,
+                                                        A->col_blocks(),
+                                                        A->col_blocks());
     } else {
-      cxsparse_factor_ =
-          CHECK_NOTNULL(cxsparse_.AnalyzeCholeskyWithNaturalOrdering(AtA));
+      if (options_.dynamic_sparsity) {
+        cxsparse_factor_ = cxsparse_.AnalyzeCholesky(AtA);
+      } else {
+        cxsparse_factor_ = cxsparse_.AnalyzeCholeskyWithNaturalOrdering(AtA);
+      }
     }
   }
   event_logger.AddEvent("Analysis");
 
-  // Solve the linear system.
-  if (cxsparse_.SolveCholesky(AtA, cxsparse_factor_, Atb.data())) {
-    VectorRef(x, Atb.rows()) = Atb;
-    summary.termination_type = TOLERANCE;
+  if (cxsparse_factor_ == NULL) {
+    summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+    summary.message =
+        "CXSparse failure. Unable to find symbolic factorization.";
+  } else if (!cxsparse_.SolveCholesky(AtA, cxsparse_factor_, rhs_and_solution)) {
+    summary.termination_type = LINEAR_SOLVER_FAILURE;
+    summary.message = "CXSparse::SolveCholesky failed.";
   }
   event_logger.AddEvent("Solve");
 
-  cxsparse_.Free(AtA);
-  event_logger.AddEvent("Teardown");
   return summary;
-}
-#else
-LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingCXSparse(
-    CompressedRowSparseMatrix* A,
-    const double* b,
-    const LinearSolver::PerSolveOptions& per_solve_options,
-    double * x) {
-  LOG(FATAL) << "No CXSparse support in Ceres.";
-
-  // Unreachable but MSVC does not know this.
-  return LinearSolver::Summary();
-}
 #endif
+}
 
-#ifndef CERES_NO_SUITESPARSE
 LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingSuiteSparse(
     CompressedRowSparseMatrix* A,
-    const double* b,
     const LinearSolver::PerSolveOptions& per_solve_options,
-    double * x) {
+    double * rhs_and_solution) {
+#ifdef CERES_NO_SUITESPARSE
+
+  LinearSolver::Summary summary;
+  summary.num_iterations = 0;
+  summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+  summary.message =
+      "SPARSE_NORMAL_CHOLESKY cannot be used with SUITE_SPARSE "
+      "because Ceres was not built with support for SuiteSparse. "
+      "This requires enabling building with -DSUITESPARSE=ON.";
+  return summary;
+
+#else
+
   EventLogger event_logger("SparseNormalCholeskySolver::SuiteSparse::Solve");
+  LinearSolver::Summary summary;
+  summary.termination_type = LINEAR_SOLVER_SUCCESS;
+  summary.num_iterations = 1;
+  summary.message = "Success.";
 
   const int num_cols = A->num_cols();
-  LinearSolver::Summary summary;
-  Vector Atb = Vector::Zero(num_cols);
-  A->LeftMultiply(b, Atb.data());
-
-  if (per_solve_options.D != NULL) {
-    // Temporarily append a diagonal block to the A matrix, but undo it before
-    // returning the matrix to the user.
-    CompressedRowSparseMatrix D(per_solve_options.D, num_cols);
-    A->AppendRows(D);
-  }
-
-  VectorRef(x, num_cols).setZero();
-
   cholmod_sparse lhs = ss_.CreateSparseMatrixTransposeView(A);
-  cholmod_dense* rhs = ss_.CreateDenseVector(Atb.data(), num_cols, num_cols);
   event_logger.AddEvent("Setup");
 
+  if (options_.dynamic_sparsity) {
+    FreeFactorization();
+  }
   if (factor_ == NULL) {
     if (options_.use_postordering) {
-      factor_ =
-          CHECK_NOTNULL(ss_.BlockAnalyzeCholesky(&lhs,
-                                                 A->col_blocks(),
-                                                 A->row_blocks()));
+      factor_ = ss_.BlockAnalyzeCholesky(&lhs,
+                                         A->col_blocks(),
+                                         A->row_blocks(),
+                                         &summary.message);
     } else {
-      factor_ =
-      CHECK_NOTNULL(ss_.AnalyzeCholeskyWithNaturalOrdering(&lhs));
+      if (options_.dynamic_sparsity) {
+        factor_ = ss_.AnalyzeCholesky(&lhs, &summary.message);
+      } else {
+        factor_ = ss_.AnalyzeCholeskyWithNaturalOrdering(&lhs, &summary.message);
+      }
     }
   }
-
   event_logger.AddEvent("Analysis");
 
-  cholmod_dense* sol = ss_.SolveCholesky(&lhs, factor_, rhs);
+  if (factor_ == NULL) {
+    summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+    // No need to set message as it has already been set by the
+    // symbolic analysis routines above.
+    return summary;
+  }
+
+  summary.termination_type = ss_.Cholesky(&lhs, factor_, &summary.message);
+  if (summary.termination_type != LINEAR_SOLVER_SUCCESS) {
+    return summary;
+  }
+
+  cholmod_dense* rhs = ss_.CreateDenseVector(rhs_and_solution, num_cols, num_cols);
+  cholmod_dense* solution = ss_.Solve(factor_, rhs, &summary.message);
   event_logger.AddEvent("Solve");
 
   ss_.Free(rhs);
-  rhs = NULL;
-
-  if (per_solve_options.D != NULL) {
-    A->DeleteRows(num_cols);
-  }
-
-  summary.num_iterations = 1;
-  if (sol != NULL) {
-    memcpy(x, sol->x, num_cols * sizeof(*x));
-
-    ss_.Free(sol);
-    sol = NULL;
-    summary.termination_type = TOLERANCE;
+  if (solution != NULL) {
+    memcpy(rhs_and_solution, solution->x, num_cols * sizeof(*rhs_and_solution));
+    ss_.Free(solution);
+  } else {
+    // No need to set message as it has already been set by the
+    // numeric factorization routine above.
+    summary.termination_type = LINEAR_SOLVER_FAILURE;
   }
 
   event_logger.AddEvent("Teardown");
   return summary;
-}
-#else
-LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingSuiteSparse(
-    CompressedRowSparseMatrix* A,
-    const double* b,
-    const LinearSolver::PerSolveOptions& per_solve_options,
-    double * x) {
-  LOG(FATAL) << "No SuiteSparse support in Ceres.";
-
-  // Unreachable but MSVC does not know this.
-  return LinearSolver::Summary();
-}
 #endif
+}
 
 }   // namespace internal
 }   // namespace ceres
-
-#endif  // !defined(CERES_NO_SUITESPARSE) || !defined(CERES_NO_CXSPARSE)
diff --git a/internal/ceres/sparse_normal_cholesky_solver.h b/internal/ceres/sparse_normal_cholesky_solver.h
index 61111b4..6572835 100644
--- a/internal/ceres/sparse_normal_cholesky_solver.h
+++ b/internal/ceres/sparse_normal_cholesky_solver.h
@@ -34,12 +34,17 @@
 #ifndef CERES_INTERNAL_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
 #define CERES_INTERNAL_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
 
-#if !defined(CERES_NO_SUITESPARSE) || !defined(CERES_NO_CXSPARSE)
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
 
-#include "ceres/cxsparse.h"
 #include "ceres/internal/macros.h"
 #include "ceres/linear_solver.h"
 #include "ceres/suitesparse.h"
+#include "ceres/cxsparse.h"
+
+#ifdef CERES_USE_EIGEN_SPARSE
+#include "Eigen/SparseCholesky"
+#endif
 
 namespace ceres {
 namespace internal {
@@ -62,16 +67,22 @@
 
   LinearSolver::Summary SolveImplUsingSuiteSparse(
       CompressedRowSparseMatrix* A,
-      const double* b,
       const LinearSolver::PerSolveOptions& options,
-      double* x);
+      double* rhs_and_solution);
 
   // Crashes if CSparse is not installed.
   LinearSolver::Summary SolveImplUsingCXSparse(
       CompressedRowSparseMatrix* A,
-      const double* b,
       const LinearSolver::PerSolveOptions& options,
-      double* x);
+      double* rhs_and_solution);
+
+  // Crashes if CERES_USE_LGPGL_CODE is not defined.
+  LinearSolver::Summary SolveImplUsingEigen(
+      CompressedRowSparseMatrix* A,
+      const LinearSolver::PerSolveOptions& options,
+      double* rhs_and_solution);
+
+  void FreeFactorization();
 
   SuiteSparse ss_;
   // Cached factorization
@@ -81,6 +92,14 @@
   // Cached factorization
   cs_dis* cxsparse_factor_;
 
+#ifdef CERES_USE_EIGEN_SPARSE
+  typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<double>,
+                                Eigen::Upper> SimplicialLDLT;
+  scoped_ptr<SimplicialLDLT> simplicial_ldlt_;
+#endif
+
+  scoped_ptr<CompressedRowSparseMatrix> outer_product_;
+  vector<int> pattern_;
   const LinearSolver::Options options_;
   CERES_DISALLOW_COPY_AND_ASSIGN(SparseNormalCholeskySolver);
 };
@@ -88,5 +107,4 @@
 }  // namespace internal
 }  // namespace ceres
 
-#endif  // !defined(CERES_NO_SUITESPARSE) || !defined(CERES_NO_CXSPARSE)
 #endif  // CERES_INTERNAL_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
diff --git a/internal/ceres/stringprintf.cc b/internal/ceres/stringprintf.cc
index ce20467..0f85f05 100644
--- a/internal/ceres/stringprintf.cc
+++ b/internal/ceres/stringprintf.cc
@@ -43,7 +43,9 @@
 
 #ifdef _MSC_VER
 enum { IS_COMPILER_MSVC = 1 };
-#define va_copy(d,s) ((d) = (s))
+#if _MSC_VER < 1800
+#define va_copy(d, s) ((d) = (s))
+#endif
 #else
 enum { IS_COMPILER_MSVC = 0 };
 #endif
diff --git a/internal/ceres/suitesparse.cc b/internal/ceres/suitesparse.cc
index 9de32fd..1df7566 100644
--- a/internal/ceres/suitesparse.cc
+++ b/internal/ceres/suitesparse.cc
@@ -28,6 +28,9 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifndef CERES_NO_SUITESPARSE
 #include "ceres/suitesparse.h"
 
@@ -35,6 +38,7 @@
 #include "cholmod.h"
 #include "ceres/compressed_col_sparse_matrix_utils.h"
 #include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/linear_solver.h"
 #include "ceres/triplet_sparse_matrix.h"
 
 namespace ceres {
@@ -120,7 +124,8 @@
     return v;
 }
 
-cholmod_factor* SuiteSparse::AnalyzeCholesky(cholmod_sparse* A) {
+cholmod_factor* SuiteSparse::AnalyzeCholesky(cholmod_sparse* A,
+                                             string* message) {
   // Cholmod can try multiple re-ordering strategies to find a fill
   // reducing ordering. Here we just tell it use AMD with automatic
   // matrix dependence choice of supernodal versus simplicial
@@ -130,31 +135,35 @@
   cc_.supernodal = CHOLMOD_AUTO;
 
   cholmod_factor* factor = cholmod_analyze(A, &cc_);
-  CHECK_EQ(cc_.status, CHOLMOD_OK)
-      << "Cholmod symbolic analysis failed " << cc_.status;
-  CHECK_NOTNULL(factor);
-
   if (VLOG_IS_ON(2)) {
     cholmod_print_common(const_cast<char*>("Symbolic Analysis"), &cc_);
   }
 
-  return factor;
+  if (cc_.status != CHOLMOD_OK) {
+    *message = StringPrintf("cholmod_analyze failed. error code: %d",
+                            cc_.status);
+    return NULL;
+  }
+
+  return CHECK_NOTNULL(factor);
 }
 
 cholmod_factor* SuiteSparse::BlockAnalyzeCholesky(
     cholmod_sparse* A,
     const vector<int>& row_blocks,
-    const vector<int>& col_blocks) {
+    const vector<int>& col_blocks,
+    string* message) {
   vector<int> ordering;
   if (!BlockAMDOrdering(A, row_blocks, col_blocks, &ordering)) {
     return NULL;
   }
-  return AnalyzeCholeskyWithUserOrdering(A, ordering);
+  return AnalyzeCholeskyWithUserOrdering(A, ordering, message);
 }
 
 cholmod_factor* SuiteSparse::AnalyzeCholeskyWithUserOrdering(
     cholmod_sparse* A,
-    const vector<int>& ordering) {
+    const vector<int>& ordering,
+    string* message) {
   CHECK_EQ(ordering.size(), A->nrow);
 
   cc_.nmethods = 1;
@@ -162,33 +171,36 @@
 
   cholmod_factor* factor  =
       cholmod_analyze_p(A, const_cast<int*>(&ordering[0]), NULL, 0, &cc_);
-  CHECK_EQ(cc_.status, CHOLMOD_OK)
-      << "Cholmod symbolic analysis failed " << cc_.status;
-  CHECK_NOTNULL(factor);
-
   if (VLOG_IS_ON(2)) {
     cholmod_print_common(const_cast<char*>("Symbolic Analysis"), &cc_);
   }
+  if (cc_.status != CHOLMOD_OK) {
+    *message = StringPrintf("cholmod_analyze failed. error code: %d",
+                            cc_.status);
+    return NULL;
+  }
 
-  return factor;
+  return CHECK_NOTNULL(factor);
 }
 
 cholmod_factor* SuiteSparse::AnalyzeCholeskyWithNaturalOrdering(
-    cholmod_sparse* A) {
+    cholmod_sparse* A,
+    string* message) {
   cc_.nmethods = 1;
   cc_.method[0].ordering = CHOLMOD_NATURAL;
   cc_.postorder = 0;
 
   cholmod_factor* factor  = cholmod_analyze(A, &cc_);
-  CHECK_EQ(cc_.status, CHOLMOD_OK)
-      << "Cholmod symbolic analysis failed " << cc_.status;
-  CHECK_NOTNULL(factor);
-
   if (VLOG_IS_ON(2)) {
     cholmod_print_common(const_cast<char*>("Symbolic Analysis"), &cc_);
   }
+  if (cc_.status != CHOLMOD_OK) {
+    *message = StringPrintf("cholmod_analyze failed. error code: %d",
+                            cc_.status);
+    return NULL;
+  }
 
-  return factor;
+  return CHECK_NOTNULL(factor);
 }
 
 bool SuiteSparse::BlockAMDOrdering(const cholmod_sparse* A,
@@ -233,7 +245,9 @@
   return true;
 }
 
-bool SuiteSparse::Cholesky(cholmod_sparse* A, cholmod_factor* L) {
+LinearSolverTerminationType SuiteSparse::Cholesky(cholmod_sparse* A,
+                                                  cholmod_factor* L,
+                                                  string* message) {
   CHECK_NOTNULL(A);
   CHECK_NOTNULL(L);
 
@@ -245,7 +259,7 @@
   cc_.print = 0;
 
   cc_.quick_return_if_not_posdef = 1;
-  int status = cholmod_factorize(A, L, &cc_);
+  int cholmod_status = cholmod_factorize(A, L, &cc_);
   cc_.print = old_print_level;
 
   // TODO(sameeragarwal): This switch statement is not consistent. It
@@ -257,84 +271,73 @@
   // (e.g. out of memory).
   switch (cc_.status) {
     case CHOLMOD_NOT_INSTALLED:
-      LOG(WARNING) << "CHOLMOD failure: Method not installed.";
-      return false;
+      *message = "CHOLMOD failure: Method not installed.";
+      return LINEAR_SOLVER_FATAL_ERROR;
     case CHOLMOD_OUT_OF_MEMORY:
-      LOG(WARNING) << "CHOLMOD failure: Out of memory.";
-      return false;
+      *message = "CHOLMOD failure: Out of memory.";
+      return LINEAR_SOLVER_FATAL_ERROR;
     case CHOLMOD_TOO_LARGE:
-      LOG(WARNING) << "CHOLMOD failure: Integer overflow occured.";
-      return false;
+      *message = "CHOLMOD failure: Integer overflow occured.";
+      return LINEAR_SOLVER_FATAL_ERROR;
     case CHOLMOD_INVALID:
-      LOG(WARNING) << "CHOLMOD failure: Invalid input.";
-      return false;
+      *message = "CHOLMOD failure: Invalid input.";
+      return LINEAR_SOLVER_FATAL_ERROR;
     case CHOLMOD_NOT_POSDEF:
-      // TODO(sameeragarwal): These two warnings require more
-      // sophisticated handling going forward. For now we will be
-      // strict and treat them as failures.
-      LOG(WARNING) << "CHOLMOD warning: Matrix not positive definite.";
-      return false;
+      *message = "CHOLMOD warning: Matrix not positive definite.";
+      return LINEAR_SOLVER_FAILURE;
     case CHOLMOD_DSMALL:
-      LOG(WARNING) << "CHOLMOD warning: D for LDL' or diag(L) or "
-                   << "LL' has tiny absolute value.";
-      return false;
+      *message = "CHOLMOD warning: D for LDL' or diag(L) or "
+                "LL' has tiny absolute value.";
+      return LINEAR_SOLVER_FAILURE;
     case CHOLMOD_OK:
-      if (status != 0) {
-        return true;
+      if (cholmod_status != 0) {
+        return LINEAR_SOLVER_SUCCESS;
       }
-      LOG(WARNING) << "CHOLMOD failure: cholmod_factorize returned zero "
-                   << "but cholmod_common::status is CHOLMOD_OK."
-                   << "Please report this to ceres-solver@googlegroups.com.";
-      return false;
+
+      *message = "CHOLMOD failure: cholmod_factorize returned false "
+          "but cholmod_common::status is CHOLMOD_OK."
+          "Please report this to ceres-solver@googlegroups.com.";
+      return LINEAR_SOLVER_FATAL_ERROR;
     default:
-      LOG(WARNING) << "Unknown cholmod return code. "
-                   << "Please report this to ceres-solver@googlegroups.com.";
-      return false;
+      *message =
+          StringPrintf("Unknown cholmod return code: %d. "
+                       "Please report this to ceres-solver@googlegroups.com.",
+                       cc_.status);
+      return LINEAR_SOLVER_FATAL_ERROR;
   }
-  return false;
+
+  return LINEAR_SOLVER_FATAL_ERROR;
 }
 
 cholmod_dense* SuiteSparse::Solve(cholmod_factor* L,
-                                  cholmod_dense* b) {
+                                  cholmod_dense* b,
+                                  string* message) {
   if (cc_.status != CHOLMOD_OK) {
-    LOG(WARNING) << "CHOLMOD status NOT OK";
+    *message = "cholmod_solve failed. CHOLMOD status is not CHOLMOD_OK";
     return NULL;
   }
 
   return cholmod_solve(CHOLMOD_A, L, b, &cc_);
 }
 
-cholmod_dense* SuiteSparse::SolveCholesky(cholmod_sparse* A,
-                                          cholmod_factor* L,
-                                          cholmod_dense* b) {
-  CHECK_NOTNULL(A);
-  CHECK_NOTNULL(L);
-  CHECK_NOTNULL(b);
-
-  if (Cholesky(A, L)) {
-    return Solve(L, b);
-  }
-
-  return NULL;
-}
-
-void SuiteSparse::ApproximateMinimumDegreeOrdering(cholmod_sparse* matrix,
+bool SuiteSparse::ApproximateMinimumDegreeOrdering(cholmod_sparse* matrix,
                                                    int* ordering) {
-  cholmod_amd(matrix, NULL, 0, ordering, &cc_);
+  return cholmod_amd(matrix, NULL, 0, ordering, &cc_);
 }
 
-void SuiteSparse::ConstrainedApproximateMinimumDegreeOrdering(
+bool SuiteSparse::ConstrainedApproximateMinimumDegreeOrdering(
     cholmod_sparse* matrix,
     int* constraints,
     int* ordering) {
 #ifndef CERES_NO_CAMD
-  cholmod_camd(matrix, NULL, 0, constraints, ordering, &cc_);
+  return cholmod_camd(matrix, NULL, 0, constraints, ordering, &cc_);
 #else
   LOG(FATAL) << "Congratulations you have found a bug in Ceres."
              << "Ceres Solver was compiled with SuiteSparse "
              << "version 4.1.0 or less. Calling this function "
              << "in that case is a bug. Please contact the"
              << "the Ceres Solver developers.";
+  return false;
 #endif
 }
 
diff --git a/internal/ceres/suitesparse.h b/internal/ceres/suitesparse.h
index 16f298e..baab899 100644
--- a/internal/ceres/suitesparse.h
+++ b/internal/ceres/suitesparse.h
@@ -33,6 +33,8 @@
 #ifndef CERES_INTERNAL_SUITESPARSE_H_
 #define CERES_INTERNAL_SUITESPARSE_H_
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
 
 #ifndef CERES_NO_SUITESPARSE
 
@@ -41,6 +43,7 @@
 #include <vector>
 
 #include "ceres/internal/port.h"
+#include "ceres/linear_solver.h"
 #include "cholmod.h"
 #include "glog/logging.h"
 #include "SuiteSparseQR.hpp"
@@ -138,12 +141,15 @@
   // A is not modified, only the pattern of non-zeros of A is used,
   // the actual numerical values in A are of no consequence.
   //
+  // message contains an explanation of the failures if any.
+  //
   // Caller owns the result.
-  cholmod_factor* AnalyzeCholesky(cholmod_sparse* A);
+  cholmod_factor* AnalyzeCholesky(cholmod_sparse* A, string* message);
 
   cholmod_factor* BlockAnalyzeCholesky(cholmod_sparse* A,
                                        const vector<int>& row_blocks,
-                                       const vector<int>& col_blocks);
+                                       const vector<int>& col_blocks,
+                                       string* message);
 
   // If A is symmetric, then compute the symbolic Cholesky
   // factorization of A(ordering, ordering). If A is unsymmetric, then
@@ -153,33 +159,38 @@
   // A is not modified, only the pattern of non-zeros of A is used,
   // the actual numerical values in A are of no consequence.
   //
+  // message contains an explanation of the failures if any.
+  //
   // Caller owns the result.
   cholmod_factor* AnalyzeCholeskyWithUserOrdering(cholmod_sparse* A,
-                                                  const vector<int>& ordering);
+                                                  const vector<int>& ordering,
+                                                  string* message);
 
   // Perform a symbolic factorization of A without re-ordering A. No
   // postordering of the elimination tree is performed. This ensures
   // that the symbolic factor does not introduce an extra permutation
   // on the matrix. See the documentation for CHOLMOD for more details.
-  cholmod_factor* AnalyzeCholeskyWithNaturalOrdering(cholmod_sparse* A);
+  //
+  // message contains an explanation of the failures if any.
+  cholmod_factor* AnalyzeCholeskyWithNaturalOrdering(cholmod_sparse* A,
+                                                     string* message);
 
   // Use the symbolic factorization in L, to find the numerical
   // factorization for the matrix A or AA^T. Return true if
   // successful, false otherwise. L contains the numeric factorization
   // on return.
-  bool Cholesky(cholmod_sparse* A, cholmod_factor* L);
+  //
+  // message contains an explanation of the failures if any.
+  LinearSolverTerminationType Cholesky(cholmod_sparse* A,
+                                       cholmod_factor* L,
+                                       string* message);
 
   // Given a Cholesky factorization of a matrix A = LL^T, solve the
   // linear system Ax = b, and return the result. If the Solve fails
   // NULL is returned. Caller owns the result.
-  cholmod_dense* Solve(cholmod_factor* L, cholmod_dense* b);
-
-  // Combine the calls to Cholesky and Solve into a single call. If
-  // the cholesky factorization or the solve fails, return
-  // NULL. Caller owns the result.
-  cholmod_dense* SolveCholesky(cholmod_sparse* A,
-                               cholmod_factor* L,
-                               cholmod_dense* b);
+  //
+  // message contains an explanation of the failures if any.
+  cholmod_dense* Solve(cholmod_factor* L, cholmod_dense* b, string* message);
 
   // By virtue of the modeling layer in Ceres being block oriented,
   // all the matrices used by Ceres are also block oriented. When
@@ -211,7 +222,7 @@
   // Find a fill reducing approximate minimum degree
   // ordering. ordering is expected to be large enough to hold the
   // ordering.
-  void ApproximateMinimumDegreeOrdering(cholmod_sparse* matrix, int* ordering);
+  bool ApproximateMinimumDegreeOrdering(cholmod_sparse* matrix, int* ordering);
 
 
   // Before SuiteSparse version 4.2.0, cholmod_camd was only enabled
@@ -241,7 +252,7 @@
   //
   // If CERES_NO_CAMD is defined then calling this function will
   // result in a crash.
-  void ConstrainedApproximateMinimumDegreeOrdering(cholmod_sparse* matrix,
+  bool ConstrainedApproximateMinimumDegreeOrdering(cholmod_sparse* matrix,
                                                    int* constraints,
                                                    int* ordering);
 
@@ -272,9 +283,24 @@
 
 #else  // CERES_NO_SUITESPARSE
 
-class SuiteSparse {};
 typedef void cholmod_factor;
 
+class SuiteSparse {
+ public:
+  // Defining this static function even when SuiteSparse is not
+  // available, allows client code to check for the presence of CAMD
+  // without checking for the absence of the CERES_NO_CAMD symbol.
+  //
+  // This is safer because the symbol maybe missing due to a user
+  // accidently not including suitesparse.h in their code when
+  // checking for the symbol.
+  static bool IsConstrainedApproximateMinimumDegreeOrderingAvailable() {
+    return false;
+  }
+
+  void Free(void*) {};
+};
+
 #endif  // CERES_NO_SUITESPARSE
 
 #endif  // CERES_INTERNAL_SUITESPARSE_H_
diff --git a/internal/ceres/summary_utils.cc b/internal/ceres/summary_utils.cc
new file mode 100644
index 0000000..243030c
--- /dev/null
+++ b/internal/ceres/summary_utils.cc
@@ -0,0 +1,66 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <algorithm>
+#include "ceres/summary_utils.h"
+#include "ceres/program.h"
+#include "ceres/solver.h"
+
+namespace ceres {
+namespace internal {
+
+void SetSummaryFinalCost(Solver::Summary* summary) {
+  summary->final_cost = summary->initial_cost;
+  // We need the loop here, instead of just looking at the last
+  // iteration because the minimizer maybe making non-monotonic steps.
+  for (int i = 0; i < summary->iterations.size(); ++i) {
+    const IterationSummary& iteration_summary = summary->iterations[i];
+    summary->final_cost = min(iteration_summary.cost, summary->final_cost);
+  }
+}
+
+void SummarizeGivenProgram(const Program& program, Solver::Summary* summary) {
+  summary->num_parameter_blocks     = program.NumParameterBlocks();
+  summary->num_parameters           = program.NumParameters();
+  summary->num_effective_parameters = program.NumEffectiveParameters();
+  summary->num_residual_blocks      = program.NumResidualBlocks();
+  summary->num_residuals            = program.NumResiduals();
+}
+
+void SummarizeReducedProgram(const Program& program, Solver::Summary* summary) {
+  summary->num_parameter_blocks_reduced     = program.NumParameterBlocks();
+  summary->num_parameters_reduced           = program.NumParameters();
+  summary->num_effective_parameters_reduced = program.NumEffectiveParameters();
+  summary->num_residual_blocks_reduced      = program.NumResidualBlocks();
+  summary->num_residuals_reduced            = program.NumResiduals();
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/summary_utils.h b/internal/ceres/summary_utils.h
new file mode 100644
index 0000000..9b07987
--- /dev/null
+++ b/internal/ceres/summary_utils.h
@@ -0,0 +1,49 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_SUMMARY_UTILS_H_
+#define CERES_INTERNAL_SUMMARY_UTILS_H_
+
+#include <vector>
+#include "ceres/solver.h"
+
+namespace ceres {
+namespace internal {
+
+class Program;
+
+void SummarizeGivenProgram(const Program& program, Solver::Summary* summary);
+void SummarizeReducedProgram(const Program& program, Solver::Summary* summary);
+void SetSummaryFinalCost(Solver::Summary* summary);
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_SUMMARY_UTILS_H_
diff --git a/internal/ceres/symmetric_linear_solver_test.cc b/internal/ceres/symmetric_linear_solver_test.cc
index f33adb4..ac5a774 100644
--- a/internal/ceres/symmetric_linear_solver_test.cc
+++ b/internal/ceres/symmetric_linear_solver_test.cc
@@ -71,7 +71,7 @@
   LinearSolver::Summary summary =
       solver.Solve(A.get(), b.data(), per_solve_options, x.data());
 
-  EXPECT_EQ(summary.termination_type, TOLERANCE);
+  EXPECT_EQ(summary.termination_type, LINEAR_SOLVER_SUCCESS);
   ASSERT_EQ(summary.num_iterations, 1);
 
   ASSERT_DOUBLE_EQ(1, x(0));
@@ -128,7 +128,7 @@
   LinearSolver::Summary summary =
       solver.Solve(A.get(), b.data(), per_solve_options, x.data());
 
-  EXPECT_EQ(summary.termination_type, TOLERANCE);
+  EXPECT_EQ(summary.termination_type, LINEAR_SOLVER_SUCCESS);
 
   ASSERT_DOUBLE_EQ(0, x(0));
   ASSERT_DOUBLE_EQ(1, x(1));
diff --git a/internal/ceres/system_test.cc b/internal/ceres/system_test.cc
index 7b0e02d..be56f20 100644
--- a/internal/ceres/system_test.cc
+++ b/internal/ceres/system_test.cc
@@ -43,6 +43,8 @@
 #include <cstdlib>
 #include <string>
 
+#include "ceres/internal/port.h"
+
 #include "ceres/autodiff_cost_function.h"
 #include "ceres/ordered_groups.h"
 #include "ceres/problem.h"
@@ -63,9 +65,10 @@
 
 // Struct used for configuring the solver.
 struct SolverConfig {
-  SolverConfig(LinearSolverType linear_solver_type,
-               SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
-               bool use_automatic_ordering)
+  SolverConfig(
+      LinearSolverType linear_solver_type,
+      SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
+      bool use_automatic_ordering)
       : linear_solver_type(linear_solver_type),
         sparse_linear_algebra_library_type(sparse_linear_algebra_library_type),
         use_automatic_ordering(use_automatic_ordering),
@@ -73,10 +76,11 @@
         num_threads(1) {
   }
 
-  SolverConfig(LinearSolverType linear_solver_type,
-               SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
-               bool use_automatic_ordering,
-               PreconditionerType preconditioner_type)
+  SolverConfig(
+      LinearSolverType linear_solver_type,
+      SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
+      bool use_automatic_ordering,
+      PreconditionerType preconditioner_type)
       : linear_solver_type(linear_solver_type),
         sparse_linear_algebra_library_type(sparse_linear_algebra_library_type),
         use_automatic_ordering(use_automatic_ordering),
@@ -88,7 +92,8 @@
     return StringPrintf(
         "(%s, %s, %s, %s, %d)",
         LinearSolverTypeToString(linear_solver_type),
-        SparseLinearAlgebraLibraryTypeToString(sparse_linear_algebra_library_type),
+        SparseLinearAlgebraLibraryTypeToString(
+            sparse_linear_algebra_library_type),
         use_automatic_ordering ? "AUTOMATIC" : "USER",
         PreconditionerTypeToString(preconditioner_type),
         num_threads);
@@ -137,8 +142,7 @@
     options.num_linear_solver_threads = config.num_threads;
 
     if (config.use_automatic_ordering) {
-      delete options.linear_solver_ordering;
-      options.linear_solver_ordering = NULL;
+      options.linear_solver_ordering.reset();
     }
 
     LOG(INFO) << "Running solver configuration: "
@@ -157,7 +161,7 @@
                    NULL,
                    NULL);
 
-    CHECK_NE(summary.termination_type, ceres::NUMERICAL_FAILURE)
+    CHECK_NE(summary.termination_type, ceres::FAILURE)
         << "Solver configuration " << i << " failed.";
     problems.push_back(system_test_problem);
 
@@ -395,7 +399,7 @@
       problem_.AddResidualBlock(cost_function, NULL, camera, point);
     }
 
-    options_.linear_solver_ordering = new ParameterBlockOrdering;
+    options_.linear_solver_ordering.reset(new ParameterBlockOrdering);
 
     // The points come before the cameras.
     for (int i = 0; i < num_points_; ++i) {
@@ -491,40 +495,45 @@
                                  ordering,                              \
                                  preconditioner))
 
+  CONFIGURE(DENSE_SCHUR,            SUITE_SPARSE, kAutomaticOrdering, IDENTITY);
+  CONFIGURE(DENSE_SCHUR,            SUITE_SPARSE, kUserOrdering,      IDENTITY);
+
+  CONFIGURE(CGNR,                   SUITE_SPARSE, kAutomaticOrdering, JACOBI);
+
+  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kUserOrdering,      JACOBI);
+  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kAutomaticOrdering, JACOBI);
+
+  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kUserOrdering,      SCHUR_JACOBI);
+  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kAutomaticOrdering, SCHUR_JACOBI);
+
 #ifndef CERES_NO_SUITESPARSE
   CONFIGURE(SPARSE_NORMAL_CHOLESKY, SUITE_SPARSE, kAutomaticOrdering, IDENTITY);
   CONFIGURE(SPARSE_NORMAL_CHOLESKY, SUITE_SPARSE, kUserOrdering,      IDENTITY);
 
   CONFIGURE(SPARSE_SCHUR,           SUITE_SPARSE, kAutomaticOrdering, IDENTITY);
   CONFIGURE(SPARSE_SCHUR,           SUITE_SPARSE, kUserOrdering,      IDENTITY);
+
+  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kAutomaticOrdering, CLUSTER_JACOBI);
+  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kUserOrdering,      CLUSTER_JACOBI);
+
+  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kAutomaticOrdering, CLUSTER_TRIDIAGONAL);
+  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kUserOrdering,      CLUSTER_TRIDIAGONAL);
 #endif  // CERES_NO_SUITESPARSE
 
 #ifndef CERES_NO_CXSPARSE
+  CONFIGURE(SPARSE_NORMAL_CHOLESKY, CX_SPARSE,    kAutomaticOrdering, IDENTITY);
+  CONFIGURE(SPARSE_NORMAL_CHOLESKY, CX_SPARSE,    kUserOrdering,      IDENTITY);
+
   CONFIGURE(SPARSE_SCHUR,           CX_SPARSE,    kAutomaticOrdering, IDENTITY);
   CONFIGURE(SPARSE_SCHUR,           CX_SPARSE,    kUserOrdering,      IDENTITY);
 #endif  // CERES_NO_CXSPARSE
 
-  CONFIGURE(DENSE_SCHUR,            SUITE_SPARSE, kAutomaticOrdering, IDENTITY);
-  CONFIGURE(DENSE_SCHUR,            SUITE_SPARSE, kUserOrdering,      IDENTITY);
-
-  CONFIGURE(CGNR,                   SUITE_SPARSE, kAutomaticOrdering, JACOBI);
-  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kUserOrdering,      JACOBI);
-  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kUserOrdering,      SCHUR_JACOBI);
-
-#ifndef CERES_NO_SUITESPARSE
-
-  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kUserOrdering,      CLUSTER_JACOBI);
-  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kUserOrdering,      CLUSTER_TRIDIAGONAL);
-#endif  // CERES_NO_SUITESPARSE
-
-  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kAutomaticOrdering, JACOBI);
-  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kAutomaticOrdering, SCHUR_JACOBI);
-
-#ifndef CERES_NO_SUITESPARSE
-
-  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kAutomaticOrdering, CLUSTER_JACOBI);
-  CONFIGURE(ITERATIVE_SCHUR,        SUITE_SPARSE, kAutomaticOrdering, CLUSTER_TRIDIAGONAL);
-#endif  // CERES_NO_SUITESPARSE
+#ifdef CERES_USE_EIGEN_SPARSE
+  CONFIGURE(SPARSE_SCHUR,           EIGEN_SPARSE, kAutomaticOrdering, IDENTITY);
+  CONFIGURE(SPARSE_SCHUR,           EIGEN_SPARSE, kUserOrdering,      IDENTITY);
+  CONFIGURE(SPARSE_NORMAL_CHOLESKY, EIGEN_SPARSE, kAutomaticOrdering, IDENTITY);
+  CONFIGURE(SPARSE_NORMAL_CHOLESKY, EIGEN_SPARSE, kUserOrdering,      IDENTITY);
+#endif  // CERES_USE_EIGEN_SPARSE
 
 #undef CONFIGURE
 
diff --git a/internal/ceres/test_util.cc b/internal/ceres/test_util.cc
index a3f67bd..8af48ab 100644
--- a/internal/ceres/test_util.cc
+++ b/internal/ceres/test_util.cc
@@ -30,6 +30,7 @@
 //
 // Utility functions useful for testing.
 
+#include <algorithm>
 #include <cmath>
 #include "ceres/file.h"
 #include "ceres/stringprintf.h"
diff --git a/internal/ceres/trust_region_minimizer.cc b/internal/ceres/trust_region_minimizer.cc
index 03d6c8e..4be5619 100644
--- a/internal/ceres/trust_region_minimizer.cc
+++ b/internal/ceres/trust_region_minimizer.cc
@@ -44,6 +44,7 @@
 #include "ceres/file.h"
 #include "ceres/internal/eigen.h"
 #include "ceres/internal/scoped_ptr.h"
+#include "ceres/line_search.h"
 #include "ceres/linear_least_squares_problems.h"
 #include "ceres/sparse_matrix.h"
 #include "ceres/stringprintf.h"
@@ -55,8 +56,53 @@
 namespace ceres {
 namespace internal {
 namespace {
-// Small constant for various floating point issues.
-const double kEpsilon = 1e-12;
+
+LineSearch::Summary DoLineSearch(const Minimizer::Options& options,
+                                 const Vector& x,
+                                 const Vector& gradient,
+                                 const double cost,
+                                 const Vector& delta,
+                                 Evaluator* evaluator) {
+  LineSearchFunction line_search_function(evaluator);
+
+  LineSearch::Options line_search_options;
+  line_search_options.is_silent = true;
+  line_search_options.interpolation_type =
+      options.line_search_interpolation_type;
+  line_search_options.min_step_size = options.min_line_search_step_size;
+  line_search_options.sufficient_decrease =
+      options.line_search_sufficient_function_decrease;
+  line_search_options.max_step_contraction =
+      options.max_line_search_step_contraction;
+  line_search_options.min_step_contraction =
+      options.min_line_search_step_contraction;
+  line_search_options.max_num_iterations =
+      options.max_num_line_search_step_size_iterations;
+  line_search_options.sufficient_curvature_decrease =
+      options.line_search_sufficient_curvature_decrease;
+  line_search_options.max_step_expansion =
+      options.max_line_search_step_expansion;
+  line_search_options.function = &line_search_function;
+
+  string message;
+  scoped_ptr<LineSearch>
+      line_search(CHECK_NOTNULL(
+                      LineSearch::Create(ceres::ARMIJO,
+                                         line_search_options,
+                                         &message)));
+  LineSearch::Summary summary;
+  line_search_function.Init(x, delta);
+  // Try the trust region step.
+  line_search->Search(1.0, cost, gradient.dot(delta), &summary);
+  if (!summary.success) {
+    // If that was not successful, try the negative gradient as a
+    // search direction.
+    line_search_function.Init(x, -gradient);
+    line_search->Search(1.0, cost, -gradient.squaredNorm(), &summary);
+  }
+  return summary;
+}
+
 }  // namespace
 
 // Compute a scaling vector that is used to improve the conditioning
@@ -82,22 +128,29 @@
   double iteration_start_time =  start_time;
   Init(options);
 
-  summary->termination_type = NO_CONVERGENCE;
-  summary->num_successful_steps = 0;
-  summary->num_unsuccessful_steps = 0;
-
   Evaluator* evaluator = CHECK_NOTNULL(options_.evaluator);
   SparseMatrix* jacobian = CHECK_NOTNULL(options_.jacobian);
   TrustRegionStrategy* strategy = CHECK_NOTNULL(options_.trust_region_strategy);
 
+  const bool is_not_silent = !options.is_silent;
+
+  // If the problem is bounds constrained, then enable the use of a
+  // line search after the trust region step has been computed. This
+  // line search will automatically use a projected test point onto
+  // the feasible set, there by guaranteeing the feasibility of the
+  // final output.
+  //
+  // TODO(sameeragarwal): Make line search available more generally.
+  const bool use_line_search = options.is_constrained;
+
+  summary->termination_type = NO_CONVERGENCE;
+  summary->num_successful_steps = 0;
+  summary->num_unsuccessful_steps = 0;
+
   const int num_parameters = evaluator->NumParameters();
   const int num_effective_parameters = evaluator->NumEffectiveParameters();
   const int num_residuals = evaluator->NumResiduals();
 
-  VectorRef x_min(parameters, num_parameters);
-  Vector x = x_min;
-  double x_norm = x.norm();
-
   Vector residuals(num_residuals);
   Vector trust_region_step(num_effective_parameters);
   Vector delta(num_effective_parameters);
@@ -105,6 +158,8 @@
   Vector gradient(num_effective_parameters);
   Vector model_residuals(num_residuals);
   Vector scale(num_effective_parameters);
+  Vector negative_gradient(num_effective_parameters);
+  Vector projected_gradient_step(num_parameters);
 
   IterationSummary iteration_summary;
   iteration_summary.iteration = 0;
@@ -112,15 +167,32 @@
   iteration_summary.step_is_successful = false;
   iteration_summary.cost_change = 0.0;
   iteration_summary.gradient_max_norm = 0.0;
+  iteration_summary.gradient_norm = 0.0;
   iteration_summary.step_norm = 0.0;
   iteration_summary.relative_decrease = 0.0;
   iteration_summary.trust_region_radius = strategy->Radius();
-  // TODO(sameeragarwal): Rename eta to linear_solver_accuracy or
-  // something similar across the board.
   iteration_summary.eta = options_.eta;
   iteration_summary.linear_solver_iterations = 0;
   iteration_summary.step_solver_time_in_seconds = 0;
 
+  VectorRef x_min(parameters, num_parameters);
+  Vector x = x_min;
+  // Project onto the feasible set.
+  if (options.is_constrained) {
+    delta.setZero();
+    if (!evaluator->Plus(x.data(), delta.data(), x_plus_delta.data())) {
+      summary->message =
+          "Unable to project initial point onto the feasible set.";
+      summary->termination_type = FAILURE;
+      LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
+      return;
+    }
+    x_min = x_plus_delta;
+    x = x_plus_delta;
+  }
+
+  double x_norm = x.norm();
+
   // Do initial cost and Jacobian evaluation.
   double cost = 0.0;
   if (!evaluator->Evaluate(x.data(),
@@ -128,38 +200,45 @@
                            residuals.data(),
                            gradient.data(),
                            jacobian)) {
-    LOG(WARNING) << "Terminating: Residual and Jacobian evaluation failed.";
-    summary->termination_type = NUMERICAL_FAILURE;
+    summary->message = "Residual and Jacobian evaluation failed.";
+    summary->termination_type = FAILURE;
+    LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
     return;
   }
 
-  int num_consecutive_nonmonotonic_steps = 0;
-  double minimum_cost = cost;
-  double reference_cost = cost;
-  double accumulated_reference_model_cost_change = 0.0;
-  double candidate_cost = cost;
-  double accumulated_candidate_model_cost_change = 0.0;
+  negative_gradient = -gradient;
+  if (!evaluator->Plus(x.data(),
+                       negative_gradient.data(),
+                       projected_gradient_step.data())) {
+    summary->message = "Unable to compute gradient step.";
+    summary->termination_type = FAILURE;
+    LOG(ERROR) << "Terminating: " << summary->message;
+    return;
+  }
 
   summary->initial_cost = cost + summary->fixed_cost;
   iteration_summary.cost = cost + summary->fixed_cost;
-  iteration_summary.gradient_max_norm = gradient.lpNorm<Eigen::Infinity>();
+  iteration_summary.gradient_max_norm =
+    (x - projected_gradient_step).lpNorm<Eigen::Infinity>();
+  iteration_summary.gradient_norm = (x - projected_gradient_step).norm();
 
-  // The initial gradient max_norm is bounded from below so that we do
-  // not divide by zero.
-  const double initial_gradient_max_norm =
-      max(iteration_summary.gradient_max_norm, kEpsilon);
-  const double absolute_gradient_tolerance =
-      options_.gradient_tolerance * initial_gradient_max_norm;
-
-  if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) {
-    summary->termination_type = GRADIENT_TOLERANCE;
-    VLOG(1) << "Terminating: Gradient tolerance reached."
-            << "Relative gradient max norm: "
-            << iteration_summary.gradient_max_norm / initial_gradient_max_norm
-            << " <= " << options_.gradient_tolerance;
+  if (iteration_summary.gradient_max_norm <= options.gradient_tolerance) {
+    summary->message = StringPrintf("Gradient tolerance reached. "
+                                    "Gradient max norm: %e <= %e",
+                                    iteration_summary.gradient_max_norm,
+                                    options_.gradient_tolerance);
+    summary->termination_type = CONVERGENCE;
+    VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
     return;
   }
 
+  if (options_.jacobi_scaling) {
+    EstimateScale(*jacobian, scale.data());
+    jacobian->ScaleColumns(scale.data());
+  } else {
+    scale.setOnes();
+  }
+
   iteration_summary.iteration_time_in_seconds =
       WallTimeInSeconds() - iteration_start_time;
   iteration_summary.cumulative_time_in_seconds =
@@ -167,34 +246,35 @@
       + summary->preprocessor_time_in_seconds;
   summary->iterations.push_back(iteration_summary);
 
-  if (options_.jacobi_scaling) {
-    EstimateScale(*jacobian, scale.data());
-    jacobian->ScaleColumns(scale.data());
-  } else {
-    scale.setOnes();
-  }
-
+  int num_consecutive_nonmonotonic_steps = 0;
+  double minimum_cost = cost;
+  double reference_cost = cost;
+  double accumulated_reference_model_cost_change = 0.0;
+  double candidate_cost = cost;
+  double accumulated_candidate_model_cost_change = 0.0;
   int num_consecutive_invalid_steps = 0;
   bool inner_iterations_are_enabled = options.inner_iteration_minimizer != NULL;
   while (true) {
     bool inner_iterations_were_useful = false;
-    if (!RunCallbacks(options.callbacks, iteration_summary, summary)) {
+    if (!RunCallbacks(options, iteration_summary, summary)) {
       return;
     }
 
     iteration_start_time = WallTimeInSeconds();
     if (iteration_summary.iteration >= options_.max_num_iterations) {
+      summary->message = "Maximum number of iterations reached.";
       summary->termination_type = NO_CONVERGENCE;
-      VLOG(1) << "Terminating: Maximum number of iterations reached.";
-      break;
+      VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
+      return;
     }
 
     const double total_solver_time = iteration_start_time - start_time +
         summary->preprocessor_time_in_seconds;
     if (total_solver_time >= options_.max_solver_time_in_seconds) {
+      summary->message = "Maximum solver time reached.";
       summary->termination_type = NO_CONVERGENCE;
-      VLOG(1) << "Terminating: Maximum solver time reached.";
-      break;
+      VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
+      return;
     }
 
     const double strategy_start_time = WallTimeInSeconds();
@@ -221,6 +301,15 @@
                               residuals.data(),
                               trust_region_step.data());
 
+    if (strategy_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) {
+      summary->message =
+          "Linear solver failed due to unrecoverable "
+          "non-numeric causes. Please see the error log for clues. ";
+      summary->termination_type = FAILURE;
+      LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
+      return;
+    }
+
     iteration_summary = IterationSummary();
     iteration_summary.iteration = summary->iterations.back().iteration + 1;
     iteration_summary.step_solver_time_in_seconds =
@@ -231,7 +320,7 @@
     iteration_summary.step_is_successful = false;
 
     double model_cost_change = 0.0;
-    if (strategy_summary.termination_type != FAILURE) {
+    if (strategy_summary.termination_type != LINEAR_SOLVER_FAILURE) {
       // new_model_cost
       //  = 1/2 [f + J * step]^2
       //  = 1/2 [ f'f + 2f'J * step + step' * J' * J * step ]
@@ -245,9 +334,10 @@
           - model_residuals.dot(residuals + model_residuals / 2.0);
 
       if (model_cost_change < 0.0) {
-        VLOG(1) << "Invalid step: current_cost: " << cost
-                << " absolute difference " << model_cost_change
-                << " relative difference " << (model_cost_change / cost);
+        VLOG_IF(1, is_not_silent)
+            << "Invalid step: current_cost: " << cost
+            << " absolute difference " << model_cost_change
+            << " relative difference " << (model_cost_change / cost);
       } else {
         iteration_summary.step_is_valid = true;
       }
@@ -256,16 +346,15 @@
     if (!iteration_summary.step_is_valid) {
       // Invalid steps can happen due to a number of reasons, and we
       // allow a limited number of successive failures, and return with
-      // NUMERICAL_FAILURE if this limit is exceeded.
+      // FAILURE if this limit is exceeded.
       if (++num_consecutive_invalid_steps >=
           options_.max_num_consecutive_invalid_steps) {
-        summary->termination_type = NUMERICAL_FAILURE;
-        summary->error = StringPrintf(
-            "Terminating. Number of successive invalid steps more "
+        summary->message = StringPrintf(
+            "Number of successive invalid steps more "
             "than Solver::Options::max_num_consecutive_invalid_steps: %d",
             options_.max_num_consecutive_invalid_steps);
-
-        LOG(WARNING) << summary->error;
+        summary->termination_type = FAILURE;
+        LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
         return;
       }
 
@@ -278,6 +367,8 @@
       iteration_summary.cost_change = 0.0;
       iteration_summary.gradient_max_norm =
           summary->iterations.back().gradient_max_norm;
+      iteration_summary.gradient_norm =
+          summary->iterations.back().gradient_norm;
       iteration_summary.step_norm = 0.0;
       iteration_summary.relative_decrease = 0.0;
       iteration_summary.eta = options_.eta;
@@ -287,26 +378,37 @@
 
       // Undo the Jacobian column scaling.
       delta = (trust_region_step.array() * scale.array()).matrix();
-      if (!evaluator->Plus(x.data(), delta.data(), x_plus_delta.data())) {
-        summary->termination_type = NUMERICAL_FAILURE;
-        summary->error =
-            "Terminating. Failed to compute Plus(x, delta, x_plus_delta).";
 
-        LOG(WARNING) << summary->error;
-        return;
+      // Try improving the step further by using an ARMIJO line
+      // search.
+      //
+      // TODO(sameeragarwal): What happens to trust region sizing as
+      // it interacts with the line search ?
+      if (use_line_search) {
+        const LineSearch::Summary line_search_summary =
+            DoLineSearch(options, x, gradient, cost, delta, evaluator);
+        if (line_search_summary.success) {
+          delta *= line_search_summary.optimal_step_size;
+        }
       }
 
-      // Try this step.
-      double new_cost = numeric_limits<double>::max();
-      if (!evaluator->Evaluate(x_plus_delta.data(),
-                               &new_cost,
-                               NULL, NULL, NULL)) {
-        // If the evaluation of the new cost fails, treat it as a step
-        // with high cost.
-        LOG(WARNING) << "Step failed to evaluate. "
-                     << "Treating it as step with infinite cost";
-        new_cost = numeric_limits<double>::max();
+      double new_cost = std::numeric_limits<double>::max();
+      if (evaluator->Plus(x.data(), delta.data(), x_plus_delta.data())) {
+        if (!evaluator->Evaluate(x_plus_delta.data(),
+                                &new_cost,
+                                NULL,
+                                NULL,
+                                NULL)) {
+          LOG(WARNING) << "Step failed to evaluate. "
+                       << "Treating it as a step with infinite cost";
+          new_cost = numeric_limits<double>::max();
+        }
       } else {
+        LOG(WARNING) << "x_plus_delta = Plus(x, delta) failed. "
+                     << "Treating it as a step with infinite cost";
+      }
+
+      if (new_cost < std::numeric_limits<double>::max()) {
         // Check if performing an inner iteration will make it better.
         if (inner_iterations_are_enabled) {
           ++summary->num_inner_iteration_steps;
@@ -320,30 +422,30 @@
           if (!evaluator->Evaluate(inner_iteration_x.data(),
                                    &new_cost,
                                    NULL, NULL, NULL)) {
-            VLOG(2) << "Inner iteration failed.";
+            VLOG_IF(2, is_not_silent) << "Inner iteration failed.";
             new_cost = x_plus_delta_cost;
           } else {
             x_plus_delta = inner_iteration_x;
             // Boost the model_cost_change, since the inner iteration
             // improvements are not accounted for by the trust region.
             model_cost_change +=  x_plus_delta_cost - new_cost;
-            VLOG(2) << "Inner iteration succeeded; current cost: " << cost
-                    << " x_plus_delta_cost: " << x_plus_delta_cost
-                    << " new_cost: " << new_cost;
-            const double inner_iteration_relative_progress =
-                1.0 - new_cost / x_plus_delta_cost;
-            inner_iterations_are_enabled =
-                (inner_iteration_relative_progress >
-                 options.inner_iteration_tolerance);
+            VLOG_IF(2, is_not_silent)
+                << "Inner iteration succeeded; Current cost: " << cost
+                << " Trust region step cost: " << x_plus_delta_cost
+                << " Inner iteration cost: " << new_cost;
 
             inner_iterations_were_useful = new_cost < cost;
 
+            const double inner_iteration_relative_progress =
+                1.0 - new_cost / x_plus_delta_cost;
             // Disable inner iterations once the relative improvement
             // drops below tolerance.
-            if (!inner_iterations_are_enabled) {
-              VLOG(2) << "Disabling inner iterations. Progress : "
-                      << inner_iteration_relative_progress;
-            }
+            inner_iterations_are_enabled =
+                (inner_iteration_relative_progress >
+                 options.inner_iteration_tolerance);
+            VLOG_IF(2, is_not_silent && !inner_iterations_are_enabled)
+                << "Disabling inner iterations. Progress : "
+                << inner_iteration_relative_progress;
           }
           summary->inner_iteration_time_in_seconds +=
               WallTimeInSeconds() - inner_iteration_start_time;
@@ -356,12 +458,14 @@
       const double step_size_tolerance =  options_.parameter_tolerance *
           (x_norm + options_.parameter_tolerance);
       if (iteration_summary.step_norm <= step_size_tolerance) {
-        VLOG(1) << "Terminating. Parameter tolerance reached. "
-                << "relative step_norm: "
-                << iteration_summary.step_norm /
-            (x_norm + options_.parameter_tolerance)
-                << " <= " << options_.parameter_tolerance;
-        summary->termination_type = PARAMETER_TOLERANCE;
+        summary->message =
+            StringPrintf("Parameter tolerance reached. "
+                         "Relative step_norm: %e <= %e.",
+                         (iteration_summary.step_norm /
+                          (x_norm + options_.parameter_tolerance)),
+                         options_.parameter_tolerance);
+        summary->termination_type = CONVERGENCE;
+        VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
         return;
       }
 
@@ -369,11 +473,13 @@
       const double absolute_function_tolerance =
           options_.function_tolerance * cost;
       if (fabs(iteration_summary.cost_change) < absolute_function_tolerance) {
-        VLOG(1) << "Terminating. Function tolerance reached. "
-                << "|cost_change|/cost: "
-                << fabs(iteration_summary.cost_change) / cost
-                << " <= " << options_.function_tolerance;
-        summary->termination_type = FUNCTION_TOLERANCE;
+        summary->message =
+            StringPrintf("Function tolerance reached. "
+                         "|cost_change|/cost: %e <= %e",
+                         fabs(iteration_summary.cost_change) / cost,
+                         options_.function_tolerance);
+        summary->termination_type = CONVERGENCE;
+        VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
         return;
       }
 
@@ -447,10 +553,12 @@
         if (!inner_iterations_were_useful &&
             relative_decrease <= options_.min_relative_decrease) {
           iteration_summary.step_is_nonmonotonic = true;
-          VLOG(2) << "Non-monotonic step! "
-                  << " relative_decrease: " << relative_decrease
-                  << " historical_relative_decrease: "
-                  << historical_relative_decrease;
+          VLOG_IF(2, is_not_silent)
+              << "Non-monotonic step! "
+              << " relative_decrease: "
+              << relative_decrease
+              << " historical_relative_decrease: "
+              << historical_relative_decrease;
         }
       }
     }
@@ -458,6 +566,7 @@
     if (iteration_summary.step_is_successful) {
       ++summary->num_successful_steps;
       strategy->StepAccepted(iteration_summary.relative_decrease);
+
       x = x_plus_delta;
       x_norm = x.norm();
 
@@ -468,22 +577,34 @@
                                residuals.data(),
                                gradient.data(),
                                jacobian)) {
-        summary->termination_type = NUMERICAL_FAILURE;
-        summary->error =
-            "Terminating: Residual and Jacobian evaluation failed.";
-        LOG(WARNING) << summary->error;
+        summary->message = "Residual and Jacobian evaluation failed.";
+        summary->termination_type = FAILURE;
+        LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
         return;
       }
 
-      iteration_summary.gradient_max_norm = gradient.lpNorm<Eigen::Infinity>();
+      negative_gradient = -gradient;
+      if (!evaluator->Plus(x.data(),
+                           negative_gradient.data(),
+                           projected_gradient_step.data())) {
+        summary->message =
+            "projected_gradient_step = Plus(x, -gradient) failed.";
+        summary->termination_type = FAILURE;
+        LOG(ERROR) << "Terminating: " << summary->message;
+        return;
+      }
 
-      if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) {
-        summary->termination_type = GRADIENT_TOLERANCE;
-        VLOG(1) << "Terminating: Gradient tolerance reached."
-                << "Relative gradient max norm: "
-                << (iteration_summary.gradient_max_norm /
-                    initial_gradient_max_norm)
-                << " <= " << options_.gradient_tolerance;
+      iteration_summary.gradient_max_norm =
+        (x - projected_gradient_step).lpNorm<Eigen::Infinity>();
+      iteration_summary.gradient_norm = (x - projected_gradient_step).norm();
+
+      if (iteration_summary.gradient_max_norm <= options.gradient_tolerance) {
+        summary->message = StringPrintf("Gradient tolerance reached. "
+                                        "Gradient max norm: %e <= %e",
+                                        iteration_summary.gradient_max_norm,
+                                        options_.gradient_tolerance);
+        summary->termination_type = CONVERGENCE;
+        VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
         return;
       }
 
@@ -511,7 +632,8 @@
         if (cost > candidate_cost) {
           // The current iterate is has a higher cost than the
           // candidate iterate. Set the candidate to this point.
-          VLOG(2) << "Updating the candidate iterate to the current point.";
+          VLOG_IF(2, is_not_silent)
+              << "Updating the candidate iterate to the current point.";
           candidate_cost = cost;
           accumulated_candidate_model_cost_change = 0.0;
         }
@@ -525,7 +647,8 @@
         // iterate.
         if (num_consecutive_nonmonotonic_steps ==
             options.max_consecutive_nonmonotonic_steps) {
-          VLOG(2) << "Resetting the reference point to the candidate point";
+          VLOG_IF(2, is_not_silent)
+              << "Resetting the reference point to the candidate point";
           reference_cost = candidate_cost;
           accumulated_reference_model_cost_change =
               accumulated_candidate_model_cost_change;
@@ -544,8 +667,9 @@
     iteration_summary.trust_region_radius = strategy->Radius();
     if (iteration_summary.trust_region_radius <
         options_.min_trust_region_radius) {
-      summary->termination_type = PARAMETER_TOLERANCE;
-      VLOG(1) << "Termination. Minimum trust region radius reached.";
+      summary->message = "Termination. Minimum trust region radius reached.";
+      summary->termination_type = CONVERGENCE;
+      VLOG_IF(1, is_not_silent) << summary->message;
       return;
     }
 
diff --git a/internal/ceres/trust_region_strategy.h b/internal/ceres/trust_region_strategy.h
index 0dcdbfe..998514f 100644
--- a/internal/ceres/trust_region_strategy.h
+++ b/internal/ceres/trust_region_strategy.h
@@ -33,7 +33,7 @@
 
 #include <string>
 #include "ceres/internal/port.h"
-#include "ceres/types.h"
+#include "ceres/linear_solver.h"
 
 namespace ceres {
 namespace internal {
@@ -106,7 +106,7 @@
     Summary()
         : residual_norm(0.0),
           num_iterations(-1),
-          termination_type(FAILURE) {
+          termination_type(LINEAR_SOLVER_FAILURE) {
     }
 
     // If the trust region problem is,
diff --git a/internal/ceres/types.cc b/internal/ceres/types.cc
index a97f1a5..4710261 100644
--- a/internal/ceres/types.cc
+++ b/internal/ceres/types.cc
@@ -96,6 +96,7 @@
   switch (type) {
     CASESTR(SUITE_SPARSE);
     CASESTR(CX_SPARSE);
+    CASESTR(EIGEN_SPARSE);
     default:
       return "UNKNOWN";
   }
@@ -107,6 +108,7 @@
   UpperCase(&value);
   STRENUM(SUITE_SPARSE);
   STRENUM(CX_SPARSE);
+  STRENUM(EIGEN_SPARSE);
   return false;
 }
 
@@ -240,7 +242,7 @@
     NonlinearConjugateGradientType type) {
   switch (type) {
     CASESTR(FLETCHER_REEVES);
-    CASESTR(POLAK_RIBIRERE);
+    CASESTR(POLAK_RIBIERE);
     CASESTR(HESTENES_STIEFEL);
     default:
       return "UNKNOWN";
@@ -252,7 +254,7 @@
     NonlinearConjugateGradientType* type) {
   UpperCase(&value);
   STRENUM(FLETCHER_REEVES);
-  STRENUM(POLAK_RIBIRERE);
+  STRENUM(POLAK_RIBIERE);
   STRENUM(HESTENES_STIEFEL);
   return false;
 }
@@ -261,8 +263,8 @@
     CovarianceAlgorithmType type) {
   switch (type) {
     CASESTR(DENSE_SVD);
-    CASESTR(SPARSE_CHOLESKY);
-    CASESTR(SPARSE_QR);
+    CASESTR(EIGEN_SPARSE_QR);
+    CASESTR(SUITE_SPARSE_QR);
     default:
       return "UNKNOWN";
   }
@@ -273,33 +275,37 @@
     CovarianceAlgorithmType* type) {
   UpperCase(&value);
   STRENUM(DENSE_SVD);
-  STRENUM(SPARSE_CHOLESKY);
-  STRENUM(SPARSE_QR);
+  STRENUM(EIGEN_SPARSE_QR);
+  STRENUM(SUITE_SPARSE_QR);
   return false;
 }
 
-const char* SolverTerminationTypeToString(SolverTerminationType type) {
+const char* VisibilityClusteringTypeToString(
+    VisibilityClusteringType type) {
   switch (type) {
-    CASESTR(NO_CONVERGENCE);
-    CASESTR(FUNCTION_TOLERANCE);
-    CASESTR(GRADIENT_TOLERANCE);
-    CASESTR(PARAMETER_TOLERANCE);
-    CASESTR(NUMERICAL_FAILURE);
-    CASESTR(USER_ABORT);
-    CASESTR(USER_SUCCESS);
-    CASESTR(DID_NOT_RUN);
+    CASESTR(CANONICAL_VIEWS);
+    CASESTR(SINGLE_LINKAGE);
     default:
       return "UNKNOWN";
   }
 }
 
-const char* LinearSolverTerminationTypeToString(
-    LinearSolverTerminationType type) {
+bool StringToVisibilityClusteringType(
+    string value,
+    VisibilityClusteringType* type) {
+  UpperCase(&value);
+  STRENUM(CANONICAL_VIEWS);
+  STRENUM(SINGLE_LINKAGE);
+  return false;
+}
+
+const char* TerminationTypeToString(TerminationType type) {
   switch (type) {
-    CASESTR(TOLERANCE);
-    CASESTR(MAX_ITERATIONS);
-    CASESTR(STAGNATION);
+    CASESTR(CONVERGENCE);
+    CASESTR(NO_CONVERGENCE);
     CASESTR(FAILURE);
+    CASESTR(USER_SUCCESS);
+    CASESTR(USER_FAILURE);
     default:
       return "UNKNOWN";
   }
diff --git a/internal/ceres/unsymmetric_linear_solver_test.cc b/internal/ceres/unsymmetric_linear_solver_test.cc
index af9dffe..0b82e6a 100644
--- a/internal/ceres/unsymmetric_linear_solver_test.cc
+++ b/internal/ceres/unsymmetric_linear_solver_test.cc
@@ -57,7 +57,7 @@
   }
 
   void TestSolver(const LinearSolver::Options& options) {
-    scoped_ptr<LinearSolver> solver(LinearSolver::Create(options));
+
 
     LinearSolver::PerSolveOptions per_solve_options;
     LinearSolver::Summary unregularized_solve_summary;
@@ -84,13 +84,17 @@
     } else {
       LOG(FATAL) << "Unknown linear solver : " << options.type;
     }
+
     // Unregularized
+    scoped_ptr<LinearSolver> solver(LinearSolver::Create(options));
     unregularized_solve_summary =
         solver->Solve(transformed_A.get(),
                       b_.get(),
                       per_solve_options,
                       x_unregularized.data());
 
+    // Sparsity structure is changing, reset the solver.
+    solver.reset(LinearSolver::Create(options));
     // Regularized solution
     per_solve_options.D = D_.get();
     regularized_solve_summary =
@@ -99,15 +103,23 @@
                       per_solve_options,
                       x_regularized.data());
 
-    EXPECT_EQ(unregularized_solve_summary.termination_type, TOLERANCE);
+    EXPECT_EQ(unregularized_solve_summary.termination_type,
+              LINEAR_SOLVER_SUCCESS);
 
     for (int i = 0; i < A_->num_cols(); ++i) {
-      EXPECT_NEAR(sol_unregularized_[i], x_unregularized[i], 1e-8);
+      EXPECT_NEAR(sol_unregularized_[i], x_unregularized[i], 1e-8)
+          << "\nExpected: "
+          << ConstVectorRef(sol_unregularized_.get(), A_->num_cols()).transpose()
+          << "\nActual: " << x_unregularized.transpose();
     }
 
-    EXPECT_EQ(regularized_solve_summary.termination_type, TOLERANCE);
+    EXPECT_EQ(regularized_solve_summary.termination_type,
+              LINEAR_SOLVER_SUCCESS);
     for (int i = 0; i < A_->num_cols(); ++i) {
-      EXPECT_NEAR(sol_regularized_[i], x_regularized[i], 1e-8);
+      EXPECT_NEAR(sol_regularized_[i], x_regularized[i], 1e-8)
+          << "\nExpected: "
+          << ConstVectorRef(sol_regularized_.get(), A_->num_cols()).transpose()
+          << "\nActual: " << x_regularized.transpose();
     }
   }
 
@@ -166,6 +178,15 @@
   options.use_postordering = true;
   TestSolver(options);
 }
+
+TEST_F(UnsymmetricLinearSolverTest,
+       SparseNormalCholeskyUsingSuiteSparseDynamicSparsity) {
+  LinearSolver::Options options;
+  options.sparse_linear_algebra_library_type = SUITE_SPARSE;
+  options.type = SPARSE_NORMAL_CHOLESKY;
+  options.dynamic_sparsity = true;
+  TestSolver(options);
+}
 #endif
 
 #ifndef CERES_NO_CXSPARSE
@@ -186,7 +207,46 @@
   options.use_postordering = true;
   TestSolver(options);
 }
+
+TEST_F(UnsymmetricLinearSolverTest,
+       SparseNormalCholeskyUsingCXSparseDynamicSparsity) {
+  LinearSolver::Options options;
+  options.sparse_linear_algebra_library_type = CX_SPARSE;
+  options.type = SPARSE_NORMAL_CHOLESKY;
+  options.dynamic_sparsity = true;
+  TestSolver(options);
+}
 #endif
 
+#ifdef CERES_USE_EIGEN_SPARSE
+TEST_F(UnsymmetricLinearSolverTest,
+       SparseNormalCholeskyUsingEigenPreOrdering) {
+  LinearSolver::Options options;
+  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;
+  options.type = SPARSE_NORMAL_CHOLESKY;
+  options.use_postordering = false;
+  TestSolver(options);
+}
+
+TEST_F(UnsymmetricLinearSolverTest,
+       SparseNormalCholeskyUsingEigenPostOrdering) {
+  LinearSolver::Options options;
+  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;
+  options.type = SPARSE_NORMAL_CHOLESKY;
+  options.use_postordering = true;
+  TestSolver(options);
+}
+
+TEST_F(UnsymmetricLinearSolverTest,
+       SparseNormalCholeskyUsingEigenDynamicSparsity) {
+  LinearSolver::Options options;
+  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;
+  options.type = SPARSE_NORMAL_CHOLESKY;
+  options.dynamic_sparsity = true;
+  TestSolver(options);
+}
+
+#endif  // CERES_USE_EIGEN_SPARSE
+
 }  // namespace internal
 }  // namespace ceres
diff --git a/internal/ceres/visibility.cc b/internal/ceres/visibility.cc
index acfa45b..b3ee185 100644
--- a/internal/ceres/visibility.cc
+++ b/internal/ceres/visibility.cc
@@ -28,6 +28,9 @@
 //
 // Author: kushalav@google.com (Avanish Kushal)
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifndef CERES_NO_SUITESPARSE
 
 #include "ceres/visibility.h"
diff --git a/internal/ceres/visibility.h b/internal/ceres/visibility.h
index 2d1e6f8..5ddd3a5 100644
--- a/internal/ceres/visibility.h
+++ b/internal/ceres/visibility.h
@@ -35,6 +35,9 @@
 #ifndef CERES_INTERNAL_VISIBILITY_H_
 #define CERES_INTERNAL_VISIBILITY_H_
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifndef CERES_NO_SUITESPARSE
 
 #include <set>
diff --git a/internal/ceres/visibility_based_preconditioner.cc b/internal/ceres/visibility_based_preconditioner.cc
index 7af1339..695eedc 100644
--- a/internal/ceres/visibility_based_preconditioner.cc
+++ b/internal/ceres/visibility_based_preconditioner.cc
@@ -28,6 +28,9 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifndef CERES_NO_SUITESPARSE
 
 #include "ceres/visibility_based_preconditioner.h"
@@ -43,12 +46,12 @@
 #include "ceres/block_sparse_matrix.h"
 #include "ceres/canonical_views_clustering.h"
 #include "ceres/collections_port.h"
-#include "ceres/detect_structure.h"
 #include "ceres/graph.h"
 #include "ceres/graph_algorithms.h"
 #include "ceres/internal/scoped_ptr.h"
 #include "ceres/linear_solver.h"
 #include "ceres/schur_eliminator.h"
+#include "ceres/single_linkage_clustering.h"
 #include "ceres/visibility.h"
 #include "glog/logging.h"
 
@@ -61,8 +64,9 @@
 //
 // This will require some more work on the clustering algorithm and
 // possibly some more refactoring of the code.
-static const double kSizePenaltyWeight = 3.0;
-static const double kSimilarityPenaltyWeight = 0.0;
+static const double kCanonicalViewsSizePenaltyWeight = 3.0;
+static const double kCanonicalViewsSimilarityPenaltyWeight = 0.0;
+static const double kSingleLinkageMinSimilarity = 0.9;
 
 VisibilityBasedPreconditioner::VisibilityBasedPreconditioner(
     const CompressedRowBlockStructure& bs,
@@ -188,17 +192,31 @@
   scoped_ptr<Graph<int> > schur_complement_graph(
       CHECK_NOTNULL(CreateSchurComplementGraph(visibility)));
 
-  CanonicalViewsClusteringOptions options;
-  options.size_penalty_weight = kSizePenaltyWeight;
-  options.similarity_penalty_weight = kSimilarityPenaltyWeight;
-
-  vector<int> centers;
   HashMap<int, int> membership;
-  ComputeCanonicalViewsClustering(*schur_complement_graph,
-                                  options,
-                                  &centers,
-                                  &membership);
-  num_clusters_ = centers.size();
+
+  if (options_.visibility_clustering_type == CANONICAL_VIEWS) {
+    vector<int> centers;
+    CanonicalViewsClusteringOptions clustering_options;
+    clustering_options.size_penalty_weight =
+        kCanonicalViewsSizePenaltyWeight;
+    clustering_options.similarity_penalty_weight =
+        kCanonicalViewsSimilarityPenaltyWeight;
+    ComputeCanonicalViewsClustering(clustering_options,
+                                    *schur_complement_graph,
+                                    &centers,
+                                    &membership);
+    num_clusters_ = centers.size();
+  } else if (options_.visibility_clustering_type == SINGLE_LINKAGE) {
+    SingleLinkageClusteringOptions clustering_options;
+    clustering_options.min_similarity =
+        kSingleLinkageMinSimilarity;
+    num_clusters_ = ComputeSingleLinkageClustering(clustering_options,
+                                                   *schur_complement_graph,
+                                                   &membership);
+  } else {
+    LOG(FATAL) << "Unknown visibility clustering algorithm.";
+  }
+
   CHECK_GT(num_clusters_, 0);
   VLOG(2) << "num_clusters: " << num_clusters_;
   FlattenMembershipMap(membership, &cluster_membership_);
@@ -313,14 +331,11 @@
   LinearSolver::Options eliminator_options;
   eliminator_options.elimination_groups = options_.elimination_groups;
   eliminator_options.num_threads = options_.num_threads;
-
-  DetectStructure(bs, options_.elimination_groups[0],
-                  &eliminator_options.row_block_size,
-                  &eliminator_options.e_block_size,
-                  &eliminator_options.f_block_size);
-
+  eliminator_options.e_block_size = options_.e_block_size;
+  eliminator_options.f_block_size = options_.f_block_size;
+  eliminator_options.row_block_size = options_.row_block_size;
   eliminator_.reset(SchurEliminatorBase::Create(eliminator_options));
-  eliminator_->Init(options_.elimination_groups[0], &bs);
+  eliminator_->Init(eliminator_options.elimination_groups[0], &bs);
 }
 
 // Update the values of the preconditioner matrix and factorize it.
@@ -356,14 +371,18 @@
   //
   // Doing the factorization like this saves us matrix mass when
   // scaling is not needed, which is quite often in our experience.
-  bool status = Factorize();
+  LinearSolverTerminationType status = Factorize();
+
+  if (status == LINEAR_SOLVER_FATAL_ERROR) {
+    return false;
+  }
 
   // The scaling only affects the tri-diagonal case, since
   // ScaleOffDiagonalBlocks only pays attenion to the cells that
   // belong to the edges of the degree-2 forest. In the CLUSTER_JACOBI
   // case, the preconditioner is guaranteed to be positive
   // semidefinite.
-  if (!status && options_.type == CLUSTER_TRIDIAGONAL) {
+  if (status == LINEAR_SOLVER_FAILURE && options_.type == CLUSTER_TRIDIAGONAL) {
     VLOG(1) << "Unscaled factorization failed. Retrying with off-diagonal "
             << "scaling";
     ScaleOffDiagonalCells();
@@ -371,7 +390,7 @@
   }
 
   VLOG(2) << "Compute time: " << time(NULL) - start_time;
-  return status;
+  return (status == LINEAR_SOLVER_SUCCESS);
 }
 
 // Consider the preconditioner matrix as meta-block matrix, whose
@@ -408,7 +427,7 @@
 
 // Compute the sparse Cholesky factorization of the preconditioner
 // matrix.
-bool VisibilityBasedPreconditioner::Factorize() {
+LinearSolverTerminationType VisibilityBasedPreconditioner::Factorize() {
   // Extract the TripletSparseMatrix that is used for actually storing
   // S and convert it into a cholmod_sparse object.
   cholmod_sparse* lhs = ss_.CreateSparseMatrix(
@@ -419,14 +438,21 @@
   // matrix contains the values.
   lhs->stype = 1;
 
+  // TODO(sameeragarwal): Refactor to pipe this up and out.
+  string status;
+
   // Symbolic factorization is computed if we don't already have one handy.
   if (factor_ == NULL) {
-    factor_ = ss_.BlockAnalyzeCholesky(lhs, block_size_, block_size_);
+    factor_ = ss_.BlockAnalyzeCholesky(lhs, block_size_, block_size_, &status);
   }
 
-  bool status = ss_.Cholesky(lhs, factor_);
+  const LinearSolverTerminationType termination_type =
+      (factor_ != NULL)
+      ? ss_.Cholesky(lhs, factor_, &status)
+      : LINEAR_SOLVER_FATAL_ERROR;
+
   ss_.Free(lhs);
-  return status;
+  return termination_type;
 }
 
 void VisibilityBasedPreconditioner::RightMultiply(const double* x,
@@ -437,7 +463,10 @@
 
   const int num_rows = m_->num_rows();
   memcpy(CHECK_NOTNULL(tmp_rhs_)->x, x, m_->num_rows() * sizeof(*x));
-  cholmod_dense* solution = CHECK_NOTNULL(ss->Solve(factor_, tmp_rhs_));
+  // TODO(sameeragarwal): Better error handling.
+  string status;
+  cholmod_dense* solution =
+      CHECK_NOTNULL(ss->Solve(factor_, tmp_rhs_, &status));
   memcpy(y, solution->x, sizeof(*y) * num_rows);
   ss->Free(solution);
 }
@@ -546,11 +575,17 @@
 // cluster ids. Convert this into a flat array for quick lookup. It is
 // possible that some of the vertices may not be associated with any
 // cluster. In that case, randomly assign them to one of the clusters.
+//
+// The cluster ids can be non-contiguous integers. So as we flatten
+// the membership_map, we also map the cluster ids to a contiguous set
+// of integers so that the cluster ids are in [0, num_clusters_).
 void VisibilityBasedPreconditioner::FlattenMembershipMap(
     const HashMap<int, int>& membership_map,
     vector<int>* membership_vector) const {
   CHECK_NOTNULL(membership_vector)->resize(0);
   membership_vector->resize(num_blocks_, -1);
+
+  HashMap<int, int> cluster_id_to_index;
   // Iterate over the cluster membership map and update the
   // cluster_membership_ vector assigning arbitrary cluster ids to
   // the few cameras that have not been clustered.
@@ -571,7 +606,16 @@
       cluster_id = camera_id % num_clusters_;
     }
 
-    membership_vector->at(camera_id) = cluster_id;
+    const int index = FindWithDefault(cluster_id_to_index,
+                                      cluster_id,
+                                      cluster_id_to_index.size());
+
+    if (index == cluster_id_to_index.size()) {
+      cluster_id_to_index[cluster_id] = index;
+    }
+
+    CHECK_LT(index, num_clusters_);
+    membership_vector->at(camera_id) = index;
   }
 }
 
diff --git a/internal/ceres/visibility_based_preconditioner.h b/internal/ceres/visibility_based_preconditioner.h
index c58b1a7..70cea83 100644
--- a/internal/ceres/visibility_based_preconditioner.h
+++ b/internal/ceres/visibility_based_preconditioner.h
@@ -55,6 +55,7 @@
 #include "ceres/graph.h"
 #include "ceres/internal/macros.h"
 #include "ceres/internal/scoped_ptr.h"
+#include "ceres/linear_solver.h"
 #include "ceres/preconditioner.h"
 #include "ceres/suitesparse.h"
 
@@ -147,7 +148,7 @@
   void ComputeClusterTridiagonalSparsity(const CompressedRowBlockStructure& bs);
   void InitStorage(const CompressedRowBlockStructure& bs);
   void InitEliminator(const CompressedRowBlockStructure& bs);
-  bool Factorize();
+  LinearSolverTerminationType Factorize();
   void ScaleOffDiagonalCells();
 
   void ClusterCameras(const vector< set<int> >& visibility);
diff --git a/internal/ceres/visibility_based_preconditioner_test.cc b/internal/ceres/visibility_based_preconditioner_test.cc
index 2edbb18..c718b5e 100644
--- a/internal/ceres/visibility_based_preconditioner_test.cc
+++ b/internal/ceres/visibility_based_preconditioner_test.cc
@@ -28,6 +28,9 @@
 //
 // Author: sameeragarwal@google.com (Sameer Agarwal)
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifndef CERES_NO_SUITESPARSE
 
 #include "ceres/visibility_based_preconditioner.h"
diff --git a/internal/ceres/visibility_test.cc b/internal/ceres/visibility_test.cc
index 3cfb232..0e22f88 100644
--- a/internal/ceres/visibility_test.cc
+++ b/internal/ceres/visibility_test.cc
@@ -29,6 +29,9 @@
 // Author: kushalav@google.com (Avanish Kushal)
 //         sameeragarwal@google.com (Sameer Agarwal)
 
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
 #ifndef CERES_NO_SUITESPARSE
 
 #include "ceres/visibility.h"
diff --git a/jni/Android.mk b/jni/Android.mk
index 7d5afd8..70680f5 100644
--- a/jni/Android.mk
+++ b/jni/Android.mk
@@ -29,49 +29,44 @@
 # Author: settinger@google.com (Scott Ettinger)
 #         keir@google.com (Keir Mierle)
 #
-# Builds Ceres for Android, using the standard toolchain (not standalone). It
-# uses STLPort instead of GNU C++. This is useful for anyone wishing to ship
-# GPL-free code. This cannot build the tests or other parts of Ceres; only the
-# core libraries. If you need a more complete Ceres build, consider using the
-# CMake toolchain (noting that the standalone toolchain doesn't work with
-# STLPort).
+# Builds Ceres for Android, using the standard toolchain (not
+# standalone). It uses LLVM's libc++ as the standard library. It is a
+# modern BSD licensed implementation of the standard c++ library. We
+# do this to avoid any licensing issues that may arise from using
+# GCC's libstdc++ which is licensed under GPL3.
 #
-# You will have to specify the environment EIGEN_PATH to point to the Eigen
-# sources when building. For example:
+# Building
+# --------
+#
+# You will have to specify the environment EIGEN_PATH to point to the
+# Eigen sources when building. For example:
 #
 #   EIGEN_PATH=/home/keir/src/eigen-3.0.5 ndk-build -j
 #
-# It is also possible to specify CERES_EXTRA_DEFINES, in case you need to pass
-# more definitions to the C compiler.
+# It is also possible to specify CERES_EXTRA_DEFINES, in case you need
+# to pass more definitions to the C compiler.
 #
-# IMPORTANT:
-#
-# The shared library built at the bottom is fake, broken, and empty. It exists
-# only to force ndk-build to build the shared library. This shouldn't be
-# necessary, but if it is missing, then ndk-build will do nothing when asked to
-# build. The produced .so library is NON-FUNCTIONAL since it has no Ceres
-# function-level dependencies. Instead, copy the static library:
+# Using the library
+# -----------------
+# Copy the static library:
 #
 #   ../obj/local/armeabi-v7a/libceres.a
 #
-# into your own project, then link it into your binary in your Android.mk file.
+# into your own project, then link it into your binary in your
+# Android.mk file.
 #
-# Reducing binary size:
-#
-# This build includes the Schur specializations, which cause binary bloat. If
-# you don't need them for your application, consider adding:
+# Reducing binary size
+# --------------------
+# This build includes the Schur specializations, which increase the
+# size of the binary. If you don't need them for your application,
+# consider adding:
 #
 #   -DCERES_RESTRICT_SCHUR_SPECIALIZATION
 #
-# to the LOCAL_CFLAGS variable below, and commenting out all the
-# generated/schur_eliminator_2_2_2.cc-alike files, leaving only the _d_d_d one.
+# to the LOCAL_CFLAGS variable below.
 #
-# Similarly if you do not need the line search minimizer, consider adding
-#
-#   -DCERES_NO_LINE_SEARCH_MINIMIZER
-#
-# Changing the logging library:
-#
+# Changing the logging library
+# ----------------------------
 # Ceres Solver ships with a replacement for glog that provides a
 # simple and small implementation that builds on Android. However, if
 # you wish to supply a header only version yourself, then you may
@@ -79,11 +74,17 @@
 
 LOCAL_PATH := $(call my-dir)
 
+# Ceres requires at least NDK version r9d to compile.
+ifneq ($(shell $(LOCAL_PATH)/assert_ndk_version.sh "r9d" $(NDK_ROOT)), true)
+  $(error Ceres requires NDK version r9d or greater)
+endif
+
 EIGEN_PATH := $(EIGEN_PATH)
 CERES_INCLUDE_PATHS := $(CERES_EXTRA_INCLUDES)
 CERES_INCLUDE_PATHS += $(LOCAL_PATH)/../internal
 CERES_INCLUDE_PATHS += $(LOCAL_PATH)/../internal/ceres
 CERES_INCLUDE_PATHS += $(LOCAL_PATH)/../include
+CERES_INCLUDE_PATHS += $(LOCAL_PATH)/../config
 
 # Use the alternate glog implementation if provided by the user.
 ifdef CERES_GLOG_DIR
@@ -101,15 +102,9 @@
 LOCAL_CFLAGS := $(CERES_EXTRA_DEFINES) \
                 -DCERES_NO_LAPACK \
                 -DCERES_NO_SUITESPARSE \
-                -DCERES_NO_GFLAGS \
                 -DCERES_NO_THREADS \
                 -DCERES_NO_CXSPARSE \
-                -DCERES_NO_TR1 \
-                -DCERES_WORK_AROUND_ANDROID_NDK_COMPILER_BUG
-
-# On Android NDK 8b, GCC gives spurrious warnings about ABI incompatibility for
-# which there is no solution. Hide the warning instead.
-LOCAL_CFLAGS += -Wno-psabi
+                -DCERES_STD_UNORDERED_MAP
 
 LOCAL_SRC_FILES := $(CERES_SRC_PATH)/array_utils.cc \
                    $(CERES_SRC_PATH)/blas.cc \
@@ -117,10 +112,12 @@
                    $(CERES_SRC_PATH)/block_jacobian_writer.cc \
                    $(CERES_SRC_PATH)/block_jacobi_preconditioner.cc \
                    $(CERES_SRC_PATH)/block_random_access_dense_matrix.cc \
+                   $(CERES_SRC_PATH)/block_random_access_diagonal_matrix.cc \
                    $(CERES_SRC_PATH)/block_random_access_matrix.cc \
                    $(CERES_SRC_PATH)/block_random_access_sparse_matrix.cc \
                    $(CERES_SRC_PATH)/block_sparse_matrix.cc \
                    $(CERES_SRC_PATH)/block_structure.cc \
+                   $(CERES_SRC_PATH)/callbacks.cc \
                    $(CERES_SRC_PATH)/canonical_views_clustering.cc \
                    $(CERES_SRC_PATH)/cgnr_solver.cc \
                    $(CERES_SRC_PATH)/compressed_row_jacobian_writer.cc \
@@ -134,6 +131,8 @@
                    $(CERES_SRC_PATH)/dense_sparse_matrix.cc \
                    $(CERES_SRC_PATH)/detect_structure.cc \
                    $(CERES_SRC_PATH)/dogleg_strategy.cc \
+                   $(CERES_SRC_PATH)/dynamic_compressed_row_jacobian_writer.cc \
+                   $(CERES_SRC_PATH)/dynamic_compressed_row_sparse_matrix.cc \
                    $(CERES_SRC_PATH)/evaluator.cc \
                    $(CERES_SRC_PATH)/file.cc \
                    $(CERES_SRC_PATH)/gradient_checking_cost_function.cc \
@@ -159,9 +158,9 @@
                    $(CERES_SRC_PATH)/problem.cc \
                    $(CERES_SRC_PATH)/problem_impl.cc \
                    $(CERES_SRC_PATH)/program.cc \
+                   $(CERES_SRC_PATH)/reorder_program.cc \
                    $(CERES_SRC_PATH)/residual_block.cc \
                    $(CERES_SRC_PATH)/residual_block_utils.cc \
-                   $(CERES_SRC_PATH)/runtime_numeric_diff_cost_function.cc \
                    $(CERES_SRC_PATH)/schur_complement_solver.cc \
                    $(CERES_SRC_PATH)/schur_eliminator.cc \
                    $(CERES_SRC_PATH)/schur_jacobi_preconditioner.cc \
@@ -172,6 +171,7 @@
                    $(CERES_SRC_PATH)/sparse_normal_cholesky_solver.cc \
                    $(CERES_SRC_PATH)/split.cc \
                    $(CERES_SRC_PATH)/stringprintf.cc \
+                   $(CERES_SRC_PATH)/summary_utils.cc \
                    $(CERES_SRC_PATH)/suitesparse.cc \
                    $(CERES_SRC_PATH)/triplet_sparse_matrix.cc \
                    $(CERES_SRC_PATH)/trust_region_minimizer.cc \
@@ -191,11 +191,33 @@
                    $(CERES_SRC_PATH)/generated/schur_eliminator_2_3_d.cc \
                    $(CERES_SRC_PATH)/generated/schur_eliminator_2_4_3.cc \
                    $(CERES_SRC_PATH)/generated/schur_eliminator_2_4_4.cc \
+                   $(CERES_SRC_PATH)/generated/schur_eliminator_2_4_8.cc \
+                   $(CERES_SRC_PATH)/generated/schur_eliminator_2_4_9.cc \
                    $(CERES_SRC_PATH)/generated/schur_eliminator_2_4_d.cc \
+                   $(CERES_SRC_PATH)/generated/schur_eliminator_2_d_d.cc \
                    $(CERES_SRC_PATH)/generated/schur_eliminator_4_4_2.cc \
                    $(CERES_SRC_PATH)/generated/schur_eliminator_4_4_3.cc \
                    $(CERES_SRC_PATH)/generated/schur_eliminator_4_4_4.cc \
-                   $(CERES_SRC_PATH)/generated/schur_eliminator_4_4_d.cc
+                   $(CERES_SRC_PATH)/generated/schur_eliminator_4_4_d.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_d_d_d.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_2_2_2.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_2_2_3.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_2_2_4.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_2_2_d.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_2_3_3.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_2_3_4.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_2_3_9.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_2_3_d.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_2_4_3.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_2_4_4.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_2_4_8.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_2_4_9.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_2_4_d.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_2_d_d.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_4_4_2.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_4_4_3.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_4_4_4.cc \
+                   $(CERES_SRC_PATH)/generated/partitioned_matrix_view_4_4_d.cc
 
 ifndef CERES_GLOG_DIR
 LOCAL_SRC_FILES += $(CERES_SRC_PATH)/miniglog/glog/logging.cc
@@ -203,11 +225,3 @@
 
 LOCAL_MODULE := ceres
 include $(BUILD_STATIC_LIBRARY)
-
-# This is a fake library; see the file header comments.
-include $(CLEAR_VARS)
-LOCAL_C_INCLUDES := $(CERES_INCLUDE_PATHS)
-LOCAL_C_INCLUDES += $(EIGEN_PATH)
-LOCAL_MODULE := forces_static_ceres_build_do_not_use
-LOCAL_STATIC_LIBRARIES := ceres
-include $(BUILD_SHARED_LIBRARY)
diff --git a/jni/Application.mk b/jni/Application.mk
index 462823d..ec40293 100644
--- a/jni/Application.mk
+++ b/jni/Application.mk
@@ -33,6 +33,7 @@
 APP_CPPFLAGS += -fno-rtti
 APP_OPTIM := release
 
-# Don't use GNU libstdc++; instead use STLPort, which is free of GPL3 issues.
-APP_STL := stlport_static
+# Use libc++ from LLVM. It is a modern BSD licensed implementation of
+# the standard C++ library.
+APP_STL := c++_static
 APP_ABI := armeabi-v7a
diff --git a/jni/assert_ndk_version.sh b/jni/assert_ndk_version.sh
new file mode 100755
index 0000000..0704492
--- /dev/null
+++ b/jni/assert_ndk_version.sh
@@ -0,0 +1,100 @@
+#!/bin/bash
+
+# Bash script to assert that the current version of the NDK is at least the
+# specified version. Prints 'true' to standard out if it's the right version,
+# 'false' if it's not.
+#
+# Typically used like this, in your jni/Android.mk:
+#
+#   ifneq ($(shell $(LOCAL_PATH)/assert_ndk_version.sh "r5c" "ndk-dir"), true)
+#     $(error NDK version r5c or greater required)
+#   endif
+#
+# See https://gist.github.com/2878774 for asserting SDK version.
+#
+# Retrieved from: https://gist.github.com/jorgenpt/1961404 on 2014-06-03.
+#
+# Copyright (c) 2012, Lookout, Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+#    this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: jorgenpt@gmail.com (Jorgen Tjerno)
+#         alexs.mac@gmail.com (Alex Stewart)
+
+# Extracts 'r5c' into '5 c', also handles newer versions of the form
+# 'r9d (64-bit)' and versions >= 10.
+function get_major_minor() {
+  # r9d (64-bit) -> '9d', also handle versions >= 10.
+  local version=$(echo "$1" | sed 's/r\([0-9]\{1,2\}[a-z]\{0,1\}\).*/\1/')
+  local major=$(echo "$version" | sed 's/\([0-9]\{1,2\}\).*/\1/')
+  local minor=$(echo "$version" | sed 's/^[0-9]*//')
+  echo "$major $minor"
+}
+
+if [[ -z "$2" ]]; then
+  echo "Usage: $0 <required version> <NDK_ROOT>" >&2
+  echo " For example: $0 r5c android-ndk-r9d" >&2
+  exit 1
+fi
+
+# Assert that the expected version is at least 4.
+declare -a expected_version
+expected_version=( $(get_major_minor "$1") )
+if [[ ${expected_version[0]} -le 4 ]]; then
+  echo "Cannot test for versions less than r5: r4 doesn't have a version file." >&2
+  echo false
+  exit 1
+fi
+
+release_file="$2/RELEASE.TXT"
+
+# NDK version r4 or earlier doesn't have a RELEASE.txt, and we just asserted
+# that the person was looking for r5 or above, so that implies that this is an
+# invalid version.
+if [ ! -s "$release_file" ]; then
+  echo false
+  exit 0
+fi
+
+# Make sure the data is at least kinda sane.
+version=$(grep '^r' $release_file)
+declare -a actual_version
+actual_version=( $(get_major_minor "$version") )
+if [ -z "$version" ] || [ -z "${actual_version[0]}" ]; then
+  echo "Invalid RELEASE.txt: $(cat $release_file)" >&2
+  echo false
+  exit 1
+fi
+
+if [[ ${actual_version[0]} -lt ${expected_version[0]} ]]; then
+  echo "false"
+elif [[ ${actual_version[0]} -eq ${expected_version[0]} ]]; then
+  # This uses < and not -lt because they're string identifiers (a, b, c, etc)
+  if [[ "${actual_version[1]}" < "${expected_version[1]}" ]]; then
+    echo "false"
+  else
+    echo "true"
+  fi
+else
+  echo "true"
+fi
diff --git a/scripts/ceres-solver.spec b/scripts/ceres-solver.spec
index b3b6f0f..1e5c7c0 100644
--- a/scripts/ceres-solver.spec
+++ b/scripts/ceres-solver.spec
@@ -1,15 +1,15 @@
 Name:           ceres-solver
-Version:        1.7.0
+Version:        1.9.0
 # Release candidate versions are messy. Give them a release of
 # e.g. "0.1.0%{?dist}" for RC1 (and remember to adjust the Source0
 # URL). Non-RC releases go back to incrementing integers starting at 1.
-Release:        0.3.0%{?dist}
+Release:        0.2.0%{?dist}
 Summary:        A non-linear least squares minimizer
 
 Group:          Development/Libraries
 License:        BSD
-URL:            http://code.google.com/p/ceres-solver/
-Source0:        http://%{name}.googlecode.com/files/%{name}-%{version}rc3.tar.gz
+URL:            http://ceres-solver.org/
+Source0:        http://%{name}.org/%{name}-%{version}.tar.gz
 BuildRoot:      %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
 
 %if (0%{?rhel} == 06)
@@ -35,16 +35,17 @@
 large complicated nonlinear least squares problems. Features include:
 
   - A friendly API: build your objective function one term at a time
-  - Automatic differentiation
+  - Automatic and numeric differentiation
   - Robust loss functions
   - Local parameterizations
   - Threaded Jacobian evaluators and linear solvers
-  - Levenberg-Marquardt and Dogleg (Powell & Subspace) solvers
+  - Trust region solvers with non-monotonic steps (Levenberg-Marquardt and Dogleg (Powell & Subspace))
+  - Line search solvers (L-BFGS and Nonlinear CG)
   - Dense QR and Cholesky factorization (using Eigen) for small problems
   - Sparse Cholesky factorization (using SuiteSparse) for large sparse problems
   - Specialized solvers for bundle adjustment problems in computer vision
   - Iterative linear solvers for general sparse and bundle adjustment problems
-  - Runs on Linux, Windows, Mac OS X and Android. An iOS port is underway
+  - Runs on Linux, Windows, Mac OS X, Android, and iOS
 
 Notable use of Ceres Solver is for the image alignment in Google Maps and for
 vehicle pose in Google Street View.
@@ -110,6 +111,21 @@
 
 
 %changelog
+* Mon May 27 2014 Sameer Agarwal <sameeragarwal@google.com> - 1.9.0-0.2.0
+- Bump version
+
+* Fri May 16 2014 Sameer Agarwal <sameeragarwal@google.com> - 1.9.0-0.1.0
+- Bump version
+
+* Tue Nov 12 2013 Sameer Agarwal <sameeragarwal@google.com> - 1.8.0-0.3.0
+- Bump version
+
+* Wed Nov 6 2013 Sameer Agarwal <sameeragarwal@google.com> - 1.8.0-0.2.0
+- Bump version
+
+* Thu Oct 31 2013 Sameer Agarwal <sameeragarwal@google.com> - 1.8.0-0.1.0
+- Bump version
+
 * Thu Aug 29 2013 Taylor Braun-Jones <taylor@braun-jones.org> - 1.7.0-0.3.0
 - Bump version
 
diff --git a/scripts/make_docs.py b/scripts/make_docs.py
index efbbf88..f992d13 100644
--- a/scripts/make_docs.py
+++ b/scripts/make_docs.py
@@ -1,7 +1,8 @@
 #!/usr/bin/python
+# encoding: utf-8
 #
 # Ceres Solver - A fast non-linear least squares minimizer
-# Copyright 2013 Google Inc. All rights reserved.
+# Copyright 2014 Google Inc. All rights reserved.
 # http://code.google.com/p/ceres-solver/
 #
 # Redistribution and use in source and binary forms, with or without
@@ -40,42 +41,82 @@
 N = len(sys.argv)
 
 if N < 3:
-  print "make_docs.py src_root destination_root"
+  print 'make_docs.py src_root destination_root'
   sys.exit(1)
 
-src_dir    = sys.argv[1] + "/docs/source"
+src_dir    = sys.argv[1] + '/docs/source'
 build_root = sys.argv[2]
-cache_dir  = build_root + "/doctrees"
-html_dir   = build_root + "/html"
+cache_dir  = build_root + '/doctrees'
+html_dir   = build_root + '/html'
 
 # Called from Command Line
 if N == 3:
-  sphinx_exe = "sphinx-build"
+  sphinx_exe = 'sphinx-build'
 
 # Called from CMake (using the SPHINX_EXECUTABLE found)
 elif N == 4:
   sphinx_exe = sys.argv[3]
 
 # Run Sphinx to build the documentation.
-os.system("%s -b html -d %s %s %s" %(sphinx_exe, cache_dir, src_dir, html_dir))
+os.system('%s -b html -d %s %s %s' %(sphinx_exe, cache_dir, src_dir, html_dir))
 
-input_pattern = """config=TeX-AMS-MML_HTMLorMML"></script>"""
-output_pattern = """config=TeX-AMS_HTML">
-  MathJax.Hub.Config({
-    "HTML-CSS": {
-      availableFonts: ["TeX"]
-    }
-  });
-</script>"""
+replacements = [
+  # By default MathJax uses does not use TeX fonts. This simple search
+  # and replace fixes that.
+  ('''config=TeX-AMS-MML_HTMLorMML"></script>''',
+   '''config=TeX-AMS_HTML">
+      MathJax.Hub.Config({
+          "HTML-CSS": {
+            availableFonts: ["TeX"]
+          }
+        });
+      </script>'''),
 
-# By default MathJax uses does not use TeX fonts. This simple search
-# and replace fixes that.
-for name in glob.glob("%s/*.html" % html_dir):
-  print "Postprocessing: ", name
-  fptr = open(name)
-  out = fptr.read().replace(input_pattern, output_pattern)
-  fptr.close()
+  # The title for the homepage is not ideal, so change it.
+  ('<title>Ceres Solver &mdash; Ceres Solver</title>',
+   '<title>Ceres Solver &mdash; A Nonlinear Least Squares Minimizer</title>')
+]
 
-  fptr = open(name, "w")
-  fptr.write(out)
-  fptr.close()
+# This is a nasty hack to strip the breadcrumb navigation. A better strategy is
+# to fork the upstream template, but that is no fun either. Whitespace matters!
+# This doesn't use regular expressions since the escaping makes it untenable.
+breadcrumb_start_other = \
+'''<div role="navigation" aria-label="breadcrumbs navigation">
+  <ul class="wy-breadcrumbs">
+    <li><a href="index.html">Docs</a> &raquo;</li>
+      
+    <li>'''
+
+# The index page has a slightly different breadcrumb.
+breadcrumb_start_index = breadcrumb_start_other.replace('index.html', '#')
+
+breadcrumb_end = \
+'''</li>
+      <li class="wy-breadcrumbs-aside">
+        
+      </li>
+  </ul>
+  <hr/>
+</div>'''
+
+for name in glob.glob('%s/*.html' % html_dir):
+  print 'Postprocessing: ', name
+  with open(name) as fptr:
+    out = fptr.read()
+
+  for input_pattern, output_pattern in replacements:
+    out = out.replace(input_pattern, output_pattern)
+
+  try:
+    breadcrumb_start = breadcrumb_start_index \
+                       if name.endswith('index.html') \
+                       else breadcrumb_start_other
+    pre_breadcrumb_start, post_breadcrumb_start = out.split(breadcrumb_start)
+    title, post_breadcrumb_end = post_breadcrumb_start.split(breadcrumb_end)
+    print 'Stripping breadcrumb for -', title
+    out = pre_breadcrumb_start + post_breadcrumb_end
+  except ValueError:
+    print 'Skipping breadcrumb strip for', name
+
+  with open(name, 'w') as fptr:
+    fptr.write(out)