Merged changes in the trunk up to revision 50829.

Conflicts resolved:
source/blender/blenloader/intern/readfile.c
source/blender/render/intern/source/convertblender.c
source/blender/render/intern/source/pipeline.c

Also addressed code inconsistency due to changes in the trunk revision 50628 (color
management with OCIO) and 50806 (UV project material).  OCIO-related changes are marked
OCIO_TODO as in some other files modified in revision 50628.
This commit is contained in:
Tamito Kajiyama 2012-09-23 18:50:56 +00:00
commit a42ba82f63
1024 changed files with 399537 additions and 14461 deletions

@ -131,6 +131,7 @@ option(WITH_FFTW3 "Enable FFTW3 support (Used for smoke and audio effect
option(WITH_BULLET "Enable Bullet (Physics Engine)" ON)
option(WITH_GAMEENGINE "Enable Game Engine" ON)
option(WITH_PLAYER "Build Player" OFF)
option(WITH_OPENCOLORIO "Enable OpenColorIO color management" ON)
option(WITH_COMPOSITOR "Enable the tile based nodal compositor" ON)
option(WITH_COMPOSITOR_LEGACY "Enable legacy compositor" OFF)
@ -377,6 +378,8 @@ endif()
if(WITH_GHOST_SDL OR WITH_HEADLESS)
set(WITH_GHOST_XDND OFF)
set(WITH_X11_XF86VMODE OFF)
set(WITH_X11_XINPUT OFF)
endif()
if(MINGW)
@ -562,7 +565,14 @@ if(UNIX AND NOT APPLE)
endif()
mark_as_advanced(FFMPEG)
set(FFMPEG_INCLUDE_DIRS ${FFMPEG}/include ${FFMPEG}/include/ffmpeg)
# lame, but until we have propper find module for ffmpeg
set(FFMPEG_INCLUDE_DIRS ${FFMPEG}/include)
if(EXISTS "${FFMPEG}/include/ffmpeg/")
set(FFMPEG_INCLUDE_DIRS "${FFMPEG_INCLUDE_DIRS} ${FFMPEG}/include/ffmpeg")
endif()
# end lameness
mark_as_advanced(FFMPEG_LIBRARIES)
set(FFMPEG_LIBPATH ${FFMPEG}/lib)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D__STDC_CONSTANT_MACROS")
@ -672,6 +682,24 @@ if(UNIX AND NOT APPLE)
endif()
endif()
if(WITH_OPENCOLORIO)
# use lib dir if available and nothing else specified
if(LIBDIR AND NOT OPENCOLORIO_ROOT_DIR)
set(OPENCOLORIO_ROOT_DIR ${LIBDIR}/ocio)
endif()
find_package(OpenColorIO)
set(OPENCOLORIO_LIBRARIES ${OPENCOLORIO_LIBRARIES})
set(OPENCOLORIO_LIBPATH) # TODO, remove and reference the absolute path everywhere
set(OPENCOLORIO_DEFINITIONS)
if(NOT OPENCOLORIO_FOUND)
set(WITH_OPENCOLORIO OFF)
message(STATUS "OpenColorIO not found")
endif()
endif()
if(WITH_CYCLES_OSL)
set(CYCLES_OSL ${LIBDIR}/osl CACHE PATH "Path to OpenShadingLanguage installation")
@ -1065,6 +1093,14 @@ elseif(WIN32)
set(OPENIMAGEIO_DEFINITIONS)
endif()
if(WITH_OPENCOLORIO)
set(OPENCOLORIO ${LIBDIR}/opencolorio)
set(OPENCOLORIO_INCLUDE_DIRS ${OPENCOLORIO}/include)
set(OPENCOLORIO_LIBRARIES OpenColorIO)
set_lib_path(OPENCOLORIO_LIBPATH "opencolorio/lib")
set(OPENCOLORIO_DEFINITIONS)
endif()
set(PLATFORM_LINKFLAGS "/SUBSYSTEM:CONSOLE /STACK:2097152 /INCREMENTAL:NO /NODEFAULTLIB:msvcrt.lib /NODEFAULTLIB:msvcmrt.lib /NODEFAULTLIB:msvcurt.lib /NODEFAULTLIB:msvcrtd.lib")
# MSVC only, Mingw doesnt need
@ -1077,7 +1113,7 @@ elseif(WIN32)
set(PLATFORM_LINKFLAGS_DEBUG "/NODEFAULTLIB:libcmt.lib /NODEFAULTLIB:libc.lib")
# used in many places so include globally, like OpenGL
blender_include_dirs("${PTHREADS_INCLUDE_DIRS}")
blender_include_dirs_sys("${PTHREADS_INCLUDE_DIRS}")
elseif(CMAKE_COMPILER_IS_GNUCC)
# keep GCC specific stuff here
@ -1216,8 +1252,8 @@ elseif(WIN32)
set(BOOST_POSTFIX "mgw47-mt-s-1_49")
set(BOOST_DEBUG_POSTFIX "mgw47-mt-sd-1_49")
else()
set(BOOST_POSTFIX "mgw46-mt-s-1_47")
set(BOOST_DEBUG_POSTFIX "mgw46-mt-sd-1_47")
set(BOOST_POSTFIX "mgw46-mt-s-1_49")
set(BOOST_DEBUG_POSTFIX "mgw46-mt-sd-1_49")
endif()
set(BOOST_LIBRARIES
optimized boost_date_time-${BOOST_POSTFIX} boost_filesystem-${BOOST_POSTFIX}
@ -1236,6 +1272,14 @@ elseif(WIN32)
set(OPENIMAGEIO_DEFINITIONS)
endif()
if(WITH_OPENCOLORIO)
set(OPENCOLORIO ${LIBDIR}/opencolorio)
set(OPENCOLORIO_INCLUDE_DIRS ${OPENCOLORIO}/include)
set(OPENCOLORIO_LIBRARIES OpenColorIO)
set(OPENCOLORIO_LIBPATH ${OPENCOLORIO}/lib)
set(OPENCOLORIO_DEFINITIONS)
endif()
set(PLATFORM_LINKFLAGS "-Xlinker --stack=2097152")
## DISABLE - causes linking errors
@ -1482,6 +1526,14 @@ elseif(APPLE)
set(OPENIMAGEIO_DEFINITIONS "-DOIIO_STATIC_BUILD")
endif()
if(WITH_OPENCOLORIO)
set(OPENCOLORIO ${LIBDIR}/opencolorio)
set(OPENCOLORIO_INCLUDE_DIRS ${OPENCOLORIO}/include)
set(OPENCOLORIO_LIBRARIES OpenColorIO tinyxml yaml-cpp)
set(OPENCOLORIO_LIBPATH ${OPENCOLORIO}/lib)
set(OPENCOLORIO_DEFINITIONS "-DOCIO_STATIC_BUILD")
endif()
if(WITH_CYCLES_OSL)
set(CYCLES_OSL ${LIBDIR}/osl CACHE PATH "Path to OpenShadingLanguage installation")
@ -1673,14 +1725,22 @@ if(CMAKE_COMPILER_IS_GNUCC)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_CAST_ALIGN -Wcast-align)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_ERROR_DECLARATION_AFTER_STATEMENT -Werror=declaration-after-statement)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_ERROR_IMPLICIT_FUNCTION_DECLARATION -Werror=implicit-function-declaration)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_ERROR_RETURN_TYPE -Werror=return-type)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_ERROR_RETURN_TYPE -Werror=return-type)
# system headers sometimes do this, disable for now, was: -Werror=strict-prototypes
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_STRICT_PROTOTYPES -Wstrict-prototypes)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_STRICT_PROTOTYPES -Wstrict-prototypes)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_MISSING_PROTOTYPES -Wmissing-prototypes)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_NO_CHAR_SUBSCRIPTS -Wno-char-subscripts)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_NO_UNKNOWN_PRAGMAS -Wno-unknown-pragmas)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_POINTER_ARITH -Wpointer-arith)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_UNUSED_PARAMETER -Wunused-parameter)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_WRITE_STRINGS -Wwrite-strings)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_LOGICAL_OP -Wlogical-op)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_UNDEF -Wundef)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_UNINITIALIZED -Wuninitialized)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_INIT_SELF -Winit-self) # needs -Wuninitialized
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_NO_NULL -Wnonnull) # C only
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_MISSING_INCLUDE_DIRS -Wmissing-include-dirs)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_NO_DIV_BY_ZERO -Wno-div-by-zero)
# # this causes too many warnings, disable
# ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_UNDEFINED -Wundef)
@ -1695,9 +1755,14 @@ if(CMAKE_COMPILER_IS_GNUCC)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_ALL -Wall)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_INVALID_OFFSETOF -Wno-invalid-offsetof)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_SIGN_COMPARE -Wno-sign-compare)
# # this causes too many warnings, disable
# ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_UNDEFINED -Wundef)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_MISSING_DECLARATIONS -Wmissing-declarations)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_LOGICAL_OP -Wlogical-op)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_UNDEF -Wundef)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_UNINITIALIZED -Wuninitialized)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_INIT_SELF -Winit-self) # needs -Wuninitialized
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_MISSING_INCLUDE_DIRS -Wmissing-include-dirs)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_DIV_BY_ZERO -Wno-div-by-zero)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_UNDEFINED -Wundef)
# flags to undo strict flags
ADD_CHECK_C_COMPILER_FLAG(CC_REMOVE_STRICT_FLAGS C_WARN_NO_DEPRECATED_DECLARATIONS -Wno-deprecated-declarations)
@ -1709,17 +1774,22 @@ if(CMAKE_COMPILER_IS_GNUCC)
elseif(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
# strange, clang complains these are not supported, but then yses them.
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_ALL -Wall)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_NO_AUTOLOGICAL_COMPARE -Wno-tautological-compare)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_NO_UNKNOWN_PRAGMAS -Wno-unknown-pragmas)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_NO_CHAR_SUBSCRIPTS -Wno-char-subscripts)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_STRICT_PROTOTYPES -Wstrict-prototypes)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_MISSING_PROTOTYPES -Wmissing-prototypes)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_UNUSED_MACROS -Wunused-macros)
ADD_CHECK_C_COMPILER_FLAG(CXX_WARNINGS C_WARN_ALL -Wall)
ADD_CHECK_C_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_AUTOLOGICAL_COMPARE -Wno-tautological-compare)
ADD_CHECK_C_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_UNKNOWN_PRAGMAS -Wno-unknown-pragmas)
ADD_CHECK_C_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_CHAR_SUBSCRIPTS -Wno-char-subscripts)
ADD_CHECK_C_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_OVERLOADED_VIRTUAL -Wno-overloaded-virtual) # we get a lot of these, if its a problem a dev needs to look into it.
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_INVALID_OFFSETOF -Wno-invalid-offsetof)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_ALL -Wall)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_AUTOLOGICAL_COMPARE -Wno-tautological-compare)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_UNKNOWN_PRAGMAS -Wno-unknown-pragmas)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_CHAR_SUBSCRIPTS -Wno-char-subscripts)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_OVERLOADED_VIRTUAL -Wno-overloaded-virtual) # we get a lot of these, if its a problem a dev needs to look into it.
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_INVALID_OFFSETOF -Wno-invalid-offsetof)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_UNUSED_MACROS -Wunused-macros)
elseif(CMAKE_C_COMPILER_ID MATCHES "Intel")
@ -1881,6 +1951,7 @@ if(FIRST_RUN)
info_cfg_option(WITH_INTERNATIONAL)
info_cfg_option(WITH_INPUT_NDOF)
info_cfg_option(WITH_CYCLES)
info_cfg_option(WITH_OPENCOLORIO)
info_cfg_text("Compiler Options:")
info_cfg_option(WITH_BUILDINFO)

@ -173,6 +173,7 @@ help:
@echo " * check_cppcheck - run blender source through cppcheck (C & C++)"
@echo " * check_splint - run blenders source through splint (C only)"
@echo " * check_sparse - run blenders source through sparse (C only)"
@echo " * check_smatch - run blenders source through smatch (C only)"
@echo " * check_spelling_c - check for spelling errors (C/C++ only)"
@echo " * check_spelling_py - check for spelling errors (Python only)"
@echo ""

@ -719,6 +719,26 @@ if env['OURPLATFORM']!='darwin':
cubin_file = os.path.join(kernel_build_dir, "kernel_%s.cubin" % arch)
scriptinstall.append(env.Install(dir=dir,source=cubin_file))
if env['WITH_BF_OCIO']:
colormanagement = os.path.join('release', 'datafiles', 'colormanagement')
for dp, dn, df in os.walk(colormanagement):
if '.svn' in dn:
dn.remove('.svn')
if '_svn' in dn:
dn.remove('_svn')
dir = os.path.join(env['BF_INSTALLDIR'], VERSION, 'datafiles')
dir += os.sep + os.path.basename(colormanagement) + dp[len(colormanagement):]
source = [os.path.join(dp, f) for f in df if not f.endswith(".pyc")]
# To ensure empty dirs are created too
if len(source) == 0:
env.Execute(Mkdir(dir))
scriptinstall.append(env.Install(dir=dir,source=source))
if env['WITH_BF_INTERNATIONAL']:
internationalpaths=['release' + os.sep + 'datafiles']
@ -851,6 +871,13 @@ if env['OURPLATFORM'] in ('win32-vc', 'win32-mingw', 'win64-vc', 'linuxcross'):
if env['WITH_BF_OIIO'] and env['OURPLATFORM'] != 'win32-mingw':
dllsources.append('${LCGDIR}/openimageio/bin/OpenImageIO.dll')
if env['WITH_BF_OCIO']:
if not env['OURPLATFORM'] in ('win32-mingw', 'linuxcross'):
dllsources.append('${LCGDIR}/opencolorio/bin/OpenColorIO.dll')
else:
dllsources.append('${LCGDIR}/opencolorio/bin/libOpenColorIO.dll')
dllsources.append('#source/icons/blender.exe.manifest')
windlls = env.Install(dir=env['BF_INSTALLDIR'], source = dllsources)
@ -880,6 +907,9 @@ if env['OURPLATFORM'] == 'win64-mingw':
if(env['WITH_BF_OPENMP']):
dllsources.append('${LCGDIR}/binaries/libgomp-1.dll')
if env['WITH_BF_OCIO']:
dllsources.append('${LCGDIR}/opencolorio/bin/libOpenColorIO.dll')
dllsources.append('${LCGDIR}/thumbhandler/lib/BlendThumb64.dll')
dllsources.append('${LCGDIR}/binaries/libgcc_s_sjlj-1.dll')

@ -106,6 +106,14 @@ BF_OIIO_INC = '${BF_OIIO}/include'
BF_OIIO_LIB_STATIC = '${BF_OIIO_LIBPATH}/libOpenImageIO.a ${BF_OPENEXR}/lib/libIlmImf.a'
BF_OIIO_LIBPATH = '${BF_OIIO}/lib'
# Color management
WITH_BF_OCIO = True
WITH_BF_STATICOCIO = True
BF_OCIO = '/opt/ocio'
BF_OCIO_INC = '${BF_OCIO}/include'
BF_OCIO_LIB_STATIC = '${BF_OCIO_LIBPATH}/libOpenColorIO.a ${BF_OCIO_LIBPATH}/libtinyxml.a ${BF_OCIO_LIBPATH}/libyaml-cpp.a'
BF_OCIO_LIBPATH = '${BF_OCIO}/lib'
WITH_BF_BOOST = True
WITH_BF_STATICBOOST = True
BF_BOOST = '/opt/boost'

@ -82,6 +82,14 @@ WITH_BF_STATIC3DMOUSE = True
BF_3DMOUSE = '/home/sources/staticlibs/spnav'
BF_3DMOUSE_LIBPATH = '${BF_3DMOUSE}/lib32'
# Color management
WITH_BF_OCIO = True
WITH_BF_STATICOCIO = True
BF_OCIO = '/opt/ocio'
BF_OCIO_INC = '${BF_OCIO}/include'
BF_OCIO_LIB_STATIC = '${BF_OCIO_LIBPATH}/libOpenColorIO.a ${BF_OCIO_LIBPATH}/libtinyxml.a ${BF_OCIO_LIBPATH}/libyaml-cpp.a'
BF_OCIO_LIBPATH = '${BF_OCIO}/lib'
# JACK
WITH_BF_JACK = True

@ -82,6 +82,14 @@ WITH_BF_STATIC3DMOUSE = True
BF_3DMOUSE = '/home/sources/staticlibs/spnav'
BF_3DMOUSE_LIBPATH = '${BF_3DMOUSE}/lib64'
# Color management
WITH_BF_OCIO = True
WITH_BF_STATICOCIO = True
BF_OCIO = '/opt/ocio'
BF_OCIO_INC = '${BF_OCIO}/include'
BF_OCIO_LIB_STATIC = '${BF_OCIO_LIBPATH}/libOpenColorIO.a ${BF_OCIO_LIBPATH}/libtinyxml.a ${BF_OCIO_LIBPATH}/libyaml-cpp.a'
BF_OCIO_LIBPATH = '${BF_OCIO}/lib'
# JACK
WITH_BF_JACK = True

@ -106,6 +106,14 @@ BF_OIIO_INC = '${BF_OIIO}/include'
BF_OIIO_LIB_STATIC = '${BF_OIIO_LIBPATH}/libOpenImageIO.a ${BF_OPENEXR}/lib/libIlmImf.a'
BF_OIIO_LIBPATH = '${BF_OIIO}/lib'
# Color management
WITH_BF_OCIO = True
WITH_BF_STATICOCIO = True
BF_OCIO = '/opt/ocio'
BF_OCIO_INC = '${BF_OCIO}/include'
BF_OCIO_LIB_STATIC = '${BF_OCIO_LIBPATH}/libOpenColorIO.a ${BF_OCIO_LIBPATH}/libtinyxml.a ${BF_OCIO_LIBPATH}/libyaml-cpp.a'
BF_OCIO_LIBPATH = '${BF_OCIO}/lib'
WITH_BF_BOOST = True
WITH_BF_STATICBOOST = True
BF_BOOST = '/opt/boost'

@ -0,0 +1,85 @@
# - Find OpenColorIO library
# Find the native OpenColorIO includes and library
# This module defines
# OPENCOLORIO_INCLUDE_DIRS, where to find OpenColorIO.h, Set when
# OPENCOLORIO_INCLUDE_DIR is found.
# OPENCOLORIO_LIBRARIES, libraries to link against to use OpenColorIO.
# OPENCOLORIO_ROOT_DIR, The base directory to search for OpenColorIO.
# This can also be an environment variable.
# OPENCOLORIO_FOUND, If false, do not try to use OpenColorIO.
#
# also defined, but not for general use are
# OPENCOLORIO_LIBRARY, where to find the OpenColorIO library.
#=============================================================================
# Copyright 2012 Blender Foundation.
#
# Distributed under the OSI-approved BSD License (the "License");
# see accompanying file Copyright.txt for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the License for more information.
#=============================================================================
# If OPENCOLORIO_ROOT_DIR was defined in the environment, use it.
IF(NOT OPENCOLORIO_ROOT_DIR AND NOT $ENV{OPENCOLORIO_ROOT_DIR} STREQUAL "")
SET(OPENCOLORIO_ROOT_DIR $ENV{OPENCOLORIO_ROOT_DIR})
ENDIF()
SET(_opencolorio_FIND_COMPONENTS
OpenColorIO
yaml-cpp
tinyxml
)
SET(_opencolorio_SEARCH_DIRS
${OPENCOLORIO_ROOT_DIR}
/usr/local
/sw # Fink
/opt/local # DarwinPorts
/opt/csw # Blastwave
)
FIND_PATH(OPENCOLORIO_INCLUDE_DIR
NAMES
OpenColorIO/OpenColorIO.h
HINTS
${_opencolorio_SEARCH_DIRS}
PATH_SUFFIXES
include
)
SET(_opencolorio_LIBRARIES)
FOREACH(COMPONENT ${_opencolorio_FIND_COMPONENTS})
STRING(TOUPPER ${COMPONENT} UPPERCOMPONENT)
FIND_LIBRARY(OPENCOLORIO_${UPPERCOMPONENT}_LIBRARY
NAMES
${COMPONENT}
HINTS
${_opencolorio_SEARCH_DIRS}
PATH_SUFFIXES
lib64 lib
)
if(OPENCOLORIO_${UPPERCOMPONENT}_LIBRARY)
LIST(APPEND _opencolorio_LIBRARIES "${OPENCOLORIO_${UPPERCOMPONENT}_LIBRARY}")
endif()
ENDFOREACH()
# handle the QUIETLY and REQUIRED arguments and set OPENCOLORIO_FOUND to TRUE if
# all listed variables are TRUE
INCLUDE(FindPackageHandleStandardArgs)
FIND_PACKAGE_HANDLE_STANDARD_ARGS(OpenColorIO DEFAULT_MSG
_opencolorio_LIBRARIES OPENCOLORIO_INCLUDE_DIR)
IF(OPENCOLORIO_FOUND)
SET(OPENCOLORIO_LIBRARIES ${_opencolorio_LIBRARIES})
SET(OPENCOLORIO_INCLUDE_DIRS ${OPENCOLORIO_INCLUDE_DIR})
ENDIF(OPENCOLORIO_FOUND)
MARK_AS_ADVANCED(
OPENCOLORIO_INCLUDE_DIR
OPENCOLORIO_LIBRARY
)

@ -42,6 +42,7 @@ set(WITH_MOD_OCEANSIM OFF CACHE FORCE BOOL)
set(WITH_AUDASPACE OFF CACHE FORCE BOOL)
set(WITH_OPENAL OFF CACHE FORCE BOOL)
set(WITH_OPENCOLLADA OFF CACHE FORCE BOOL)
set(WITH_OPENCOLORIO OFF CACHE FORCE BOOL)
set(WITH_OPENMP OFF CACHE FORCE BOOL)
set(WITH_PYTHON_INSTALL OFF CACHE FORCE BOOL)
set(WITH_RAYOPTIMIZATION OFF CACHE FORCE BOOL)

@ -218,6 +218,9 @@ macro(SETUP_LIBDIRS)
if(WITH_OPENIMAGEIO)
link_directories(${OPENIMAGEIO_LIBPATH})
endif()
if(WITH_OPENCOLORIO)
link_directories(${OPENCOLORIO_LIBPATH})
endif()
if(WITH_IMAGE_OPENJPEG AND WITH_SYSTEM_OPENJPEG)
link_directories(${OPENJPEG_LIBPATH})
endif()
@ -313,6 +316,9 @@ macro(setup_liblinks
if(WITH_OPENIMAGEIO)
target_link_libraries(${target} ${OPENIMAGEIO_LIBRARIES})
endif()
if(WITH_OPENCOLORIO)
target_link_libraries(${target} ${OPENCOLORIO_LIBRARIES})
endif()
if(WITH_BOOST)
target_link_libraries(${target} ${BOOST_LIBRARIES})
endif()
@ -470,6 +476,7 @@ macro(remove_strict_flags)
if(CMAKE_COMPILER_IS_GNUCC)
remove_cc_flag("-Wstrict-prototypes")
remove_cc_flag("-Wmissing-prototypes")
remove_cc_flag("-Wunused-parameter")
remove_cc_flag("-Wwrite-strings")
remove_cc_flag("-Wundef")

@ -287,6 +287,12 @@ BF_OIIO_INC = BF_OIIO + '/include'
BF_OIIO_LIB = 'OpenImageIO'
BF_OIIO_LIBPATH = BF_OIIO + '/lib'
WITH_BF_OCIO = True
BF_OCIO = LIBDIR + '/opencolorio'
BF_OCIO_INC = BF_OCIO + '/include'
BF_OCIO_LIB = 'OpenColorIO tinyxml yaml-cpp'
BF_OCIO_LIBPATH = BF_OCIO + '/lib'
WITH_BF_BOOST = True
BF_BOOST = LIBDIR + '/boost'
BF_BOOST_INC = BF_BOOST + '/include'

@ -215,6 +215,16 @@ BF_OIIO_INC = BF_OIIO + '/include'
BF_OIIO_LIB = 'OpenImageIO'
BF_OIIO_LIBPATH = BF_OIIO + '/lib'
WITH_BF_OCIO = True
WITH_BF_STATICOCIO = False
BF_OCIO = LIBDIR + '/ocio'
if not os.path.exists(LCGDIR + '/ocio'):
WITH_BF_OCIO = False
BF_OCIO = '/usr'
BF_OCIO_INC = BF_OCIO + '/include'
BF_OCIO_LIB = 'OpenColorIO yaml-cpp tinyxml'
BF_OCIO_LIBPATH = BF_OCIO + '/lib'
WITH_BF_BOOST = True
WITH_BF_STATICBOOST = False
BF_BOOST = LIBDIR + '/boost'
@ -229,7 +239,7 @@ WITH_BF_CYCLES = WITH_BF_OIIO and WITH_BF_BOOST
WITH_BF_CYCLES_CUDA_BINARIES = False
BF_CYCLES_CUDA_NVCC = '/usr/local/cuda/bin/nvcc'
BF_CYCLES_CUDA_BINARIES_ARCH = ['sm_13', 'sm_20', 'sm_21']
BF_CYCLES_CUDA_BINARIES_ARCH = ['sm_13', 'sm_20', 'sm_21', 'sm_30']
WITH_BF_OPENMP = True

@ -159,10 +159,16 @@ BF_OIIO_INC = BF_OIIO + '/include'
BF_OIIO_LIB = 'OpenImageIO'
BF_OIIO_LIBPATH = BF_OIIO + '/lib'
WITH_BF_OCIO = True
BF_OCIO = LIBDIR + '/opencolorio'
BF_OCIO_INC = BF_OCIO + '/include'
BF_OCIO_LIB = 'OpenColorIO'
BF_OCIO_LIBPATH = BF_OCIO + '/lib'
WITH_BF_BOOST = True
BF_BOOST = LIBDIR + '/boost'
BF_BOOST_INC = BF_BOOST + '/include'
BF_BOOST_LIB = 'boost_date_time-mgw46-mt-s-1_47 boost_filesystem-mgw46-mt-s-1_47 boost_regex-mgw46-mt-s-1_47 boost_system-mgw46-mt-s-1_47 boost_thread-mgw46-mt-s-1_47'
BF_BOOST_LIB = 'boost_date_time-mgw46-mt-s-1_49 boost_filesystem-mgw46-mt-s-1_49 boost_regex-mgw46-mt-s-1_49 boost_system-mgw46-mt-s-1_49 boost_thread-mgw46-mt-s-1_49'
BF_BOOST_LIBPATH = BF_BOOST + '/lib'
#Ray trace optimization

@ -161,6 +161,12 @@ BF_OIIO_INC = '${BF_OIIO}/include'
BF_OIIO_LIB = 'OpenImageIO'
BF_OIIO_LIBPATH = '${BF_OIIO}/lib'
WITH_BF_OCIO = True
BF_OCIO = '${LIBDIR}/opencolorio'
BF_OCIO_INC = '${BF_OCIO}/include'
BF_OCIO_LIB = 'OpenColorIO'
BF_OCIO_LIBPATH = '${BF_OCIO}/lib'
WITH_BF_BOOST = True
BF_BOOST = '${LIBDIR}/boost'
BF_BOOST_INC = '${BF_BOOST}/include'

@ -159,6 +159,12 @@ BF_OIIO_INC = '${BF_OIIO}/include'
BF_OIIO_LIB = 'OpenImageIO'
BF_OIIO_LIBPATH = '${BF_OIIO}/lib'
WITH_BF_OCIO = True
BF_OCIO = LIBDIR + '/opencolorio'
BF_OCIO_INC = '${BF_OCIO}/include'
BF_OCIO_LIB = 'OpenColorIO'
BF_OCIO_LIBPATH = '${BF_OCIO}/lib'
WITH_BF_BOOST = True
BF_BOOST = LIBDIR + '/boost'
BF_BOOST_INC = BF_BOOST + '/include'

@ -158,6 +158,13 @@ BF_OIIO_LIB = 'OpenImageIO'
BF_OIIO_LIBPATH = '${BF_OIIO}/lib'
BF_OIIO_LIBPATH = '${BF_OIIO}/lib'
WITH_BF_OCIO = True
BF_OCIO = '${LIBDIR}/opencolorio'
BF_OCIO_INC = '${BF_OCIO}/include'
BF_OCIO_LIB = 'OpenColorIO'
BF_OCIO_LIBPATH = '${BF_OCIO}/lib'
BF_OCIO_LIBPATH = '${BF_OCIO}/lib'
WITH_BF_BOOST = True
BF_BOOST = '${LIBDIR}/boost'
BF_BOOST_INC = '${BF_BOOST}/include'

@ -204,6 +204,11 @@ def setup_staticlibs(lenv):
if lenv['WITH_BF_STATICOIIO']:
statlibs += Split(lenv['BF_OIIO_LIB_STATIC'])
if lenv['WITH_BF_OCIO']:
libincs += Split(lenv['BF_OCIO_LIBPATH'])
if lenv['WITH_BF_STATICOCIO']:
statlibs += Split(lenv['BF_OCIO_LIB_STATIC'])
if lenv['WITH_BF_BOOST']:
libincs += Split(lenv['BF_BOOST_LIBPATH'])
if lenv['WITH_BF_STATICBOOST']:
@ -252,6 +257,10 @@ def setup_syslibs(lenv):
if not lenv['WITH_BF_STATICOIIO']:
syslibs += Split(lenv['BF_OIIO_LIB'])
if lenv['WITH_BF_OCIO']:
if not lenv['WITH_BF_STATICOCIO']:
syslibs += Split(lenv['BF_OCIO_LIB'])
if lenv['WITH_BF_OPENEXR'] and not lenv['WITH_BF_STATICOPENEXR']:
syslibs += Split(lenv['BF_OPENEXR_LIB'])
if lenv['WITH_BF_TIFF'] and not lenv['WITH_BF_STATICTIFF']:
@ -584,6 +593,9 @@ def AppIt(target=None, source=None, env=None):
commands.getoutput(cmd)
cmd = 'cp -R %s/release/datafiles/fonts %s/%s.app/Contents/MacOS/%s/datafiles/'%(bldroot,installdir,binary,VERSION)
commands.getoutput(cmd)
if env['WITH_BF_OCIO']:
cmd = 'cp -R %s/release/datafiles/colormanagement %s/%s.app/Contents/MacOS/%s/datafiles/'%(bldroot,installdir,binary,VERSION)
commands.getoutput(cmd)
cmd = 'cp -R %s/release/scripts %s/%s.app/Contents/MacOS/%s/'%(bldroot,installdir,binary,VERSION)
commands.getoutput(cmd)

@ -163,6 +163,7 @@ def validate_arguments(args, bc):
'WITH_BF_3DMOUSE', 'WITH_BF_STATIC3DMOUSE', 'BF_3DMOUSE', 'BF_3DMOUSE_INC', 'BF_3DMOUSE_LIB', 'BF_3DMOUSE_LIBPATH', 'BF_3DMOUSE_LIB_STATIC',
'WITH_BF_CYCLES', 'WITH_BF_CYCLES_CUDA_BINARIES', 'BF_CYCLES_CUDA_NVCC', 'BF_CYCLES_CUDA_NVCC', 'WITH_BF_CYCLES_CUDA_THREADED_COMPILE',
'WITH_BF_OIIO', 'WITH_BF_STATICOIIO', 'BF_OIIO', 'BF_OIIO_INC', 'BF_OIIO_LIB', 'BF_OIIO_LIB_STATIC', 'BF_OIIO_LIBPATH',
'WITH_BF_OCIO', 'WITH_BF_STATICOCIO', 'BF_OCIO', 'BF_OCIO_INC', 'BF_OCIO_LIB', 'BF_OCIO_LIB_STATIC', 'BF_OCIO_LIBPATH',
'WITH_BF_BOOST', 'WITH_BF_STATICBOOST', 'BF_BOOST', 'BF_BOOST_INC', 'BF_BOOST_LIB', 'BF_BOOST_LIB_STATIC', 'BF_BOOST_LIBPATH',
'WITH_BF_LIBMV'
]
@ -575,6 +576,14 @@ def read_opts(env, cfg, args):
('BF_OIIO_LIBPATH', 'OIIO library path', ''),
('BF_OIIO_LIB_STATIC', 'OIIO static library', ''),
(BoolVariable('WITH_BF_OCIO', 'Build with OpenColorIO', False)),
(BoolVariable('WITH_BF_STATICOCIO', 'Staticly link to OpenColorIO', False)),
('BF_OCIO', 'OCIO root path', ''),
('BF_OCIO_INC', 'OCIO include path', ''),
('BF_OCIO_LIB', 'OCIO library', ''),
('BF_OCIO_LIBPATH', 'OCIO library path', ''),
('BF_OCIO_LIB_STATIC', 'OCIO static library', ''),
(BoolVariable('WITH_BF_BOOST', 'Build with Boost', False)),
(BoolVariable('WITH_BF_STATICBOOST', 'Staticly link to boost', False)),
('BF_BOOST', 'Boost root path', ''),
@ -584,7 +593,7 @@ def read_opts(env, cfg, args):
('BF_BOOST_LIB_STATIC', 'Boost static library', ''),
(BoolVariable('WITH_GHOST_XDND', 'Build with drag-n-drop support on Linux platforms using XDND protocol', True)),
(BoolVariable('WITH_BF_COMPOSITOR_LEGACY', 'Enable the legacy compositor', True))
(BoolVariable('WITH_BF_COMPOSITOR_LEGACY', 'Enable the legacy compositor', False))
) # end of opts.AddOptions()
return localopts

@ -40,19 +40,6 @@ set(INC_SYS
${ZLIB_INCLUDE_DIRS}
)
# XXX - FIXME
# this is a momentary hack to find unwind.h in 10.6.sdk
if(APPLE)
if(${CMAKE_OSX_DEPLOYMENT_TARGET} STREQUAL "10.6")
list(APPEND INC_SYS
${CMAKE_OSX_SYSROOT}/Developer/usr/llvm-gcc-4.2/lib/gcc/i686-apple-darwin10/4.2.1/include
)
endif()
endif()
# XXX - END
set(SRC
libmv-capi.cpp
libmv/image/array_nd.cc

@ -523,7 +523,7 @@ int libmv_refineParametersAreValid(int parameters) {
LIBMV_REFINE_RADIAL_DISTORTION_K1));
}
void libmv_solveRefineIntrinsics(libmv::Tracks *tracks, libmv::CameraIntrinsics *intrinsics,
static void libmv_solveRefineIntrinsics(libmv::Tracks *tracks, libmv::CameraIntrinsics *intrinsics,
libmv::EuclideanReconstruction *reconstruction, int refine_intrinsics,
reconstruct_progress_update_cb progress_update_callback, void *callback_customdata)
{
@ -1027,7 +1027,7 @@ void libmv_InvertIntrinsics(double focal_length, double principal_x, double prin
/* ************ point clouds ************ */
void libmvTransformToMat4(libmv::Mat3 &R, libmv::Vec3 &S, libmv::Vec3 &t, double M[4][4])
static void libmvTransformToMat4(libmv::Mat3 &R, libmv::Vec3 &S, libmv::Vec3 &t, double M[4][4])
{
for (int j = 0; j < 3; ++j)
for (int k = 0; k < 3; ++k)

@ -147,14 +147,14 @@ void libmv_CameraIntrinsicsDistortFloat(struct libmv_CameraIntrinsics *libmvIntr
/* dsitortion */
void libmv_undistortByte(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
unsigned char *src, unsigned char *dst, int width, int height, int channels);
unsigned char *src, unsigned char *dst, int width, int height, float overscan, int channels);
void libmv_undistortFloat(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
float *src, float *dst, int width, int height, int channels);
float *src, float *dst, int width, int height, float overscan, int channels);
void libmv_distortByte(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
unsigned char *src, unsigned char *dst, int width, int height, int channels);
unsigned char *src, unsigned char *dst, int width, int height, float overscan, int channels);
void libmv_distortFloat(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
float *src, float *dst, int width, int height, int channels);
float *src, float *dst, int width, int height, float overscan, int channels);
/* utils */
void libmv_applyCameraIntrinsics(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,

@ -351,9 +351,9 @@ void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
}
// Selects 4 virtual control points using mean and PCA.
void SelectControlPoints(const Mat3X &X_world,
Mat *X_centered,
Mat34 *X_control_points) {
static void SelectControlPoints(const Mat3X &X_world,
Mat *X_centered,
Mat34 *X_control_points) {
size_t num_points = X_world.cols();
// The first virtual control point, C0, is the centroid.
@ -377,9 +377,9 @@ void SelectControlPoints(const Mat3X &X_world,
}
// Computes the barycentric coordinates for all real points
void ComputeBarycentricCoordinates(const Mat3X &X_world_centered,
const Mat34 &X_control_points,
Mat4X *alphas) {
static void ComputeBarycentricCoordinates(const Mat3X &X_world_centered,
const Mat34 &X_control_points,
Mat4X *alphas) {
size_t num_points = X_world_centered.cols();
Mat3 C2 ;
for (size_t c = 1; c < 4; c++) {
@ -398,7 +398,7 @@ void ComputeBarycentricCoordinates(const Mat3X &X_world_centered,
}
// Estimates the coordinates of all real points in the camera coordinate frame
void ComputePointsCoordinatesInCameraFrame(
static void ComputePointsCoordinatesInCameraFrame(
const Mat4X &alphas,
const Vec4 &betas,
const Eigen::Matrix<double, 12, 12> &U,
@ -535,7 +535,16 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
vector<Vec3> ts(3);
Vec rmse(3);
// TODO(julien): Document where the "1e-3" magical constant comes from below.
// At one point this threshold was 1e-3, and caused no end of problems in
// Blender by causing frames to not resect when they would have worked fine.
// When the resect failed, the projective followup is run leading to worse
// results, and often the dreaded "flipping" where objects get flipped
// between frames. Instead, disable the check for now, always succeeding. The
// ultimate check is always reprojection error anyway.
//
// TODO(keir): Decide if setting this to infinity, effectively disabling the
// check, is the right approach. So far this seems the case.
double kSuccessThreshold = std::numeric_limits<double>::max();
// Find the first possible solution for R, t corresponding to:
// Betas = [b00 b01 b11 b02 b12 b22 b03 b13 b23 b33]
@ -548,7 +557,7 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
Eigen::JacobiSVD<Mat> svd_of_l4(l_6x4,
Eigen::ComputeFullU | Eigen::ComputeFullV);
Vec4 b4 = svd_of_l4.solve(rho);
if ((l_6x4 * b4).isApprox(rho, 1e-3)) {
if ((l_6x4 * b4).isApprox(rho, kSuccessThreshold)) {
if (b4(0) < 0) {
b4 = -b4;
}
@ -574,7 +583,7 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
Vec3 b3 = svdOfL3.solve(rho);
VLOG(2) << " rho = " << rho;
VLOG(2) << " l_6x3 * b3 = " << l_6x3 * b3;
if ((l_6x3 * b3).isApprox(rho, 1e-3)) {
if ((l_6x3 * b3).isApprox(rho, kSuccessThreshold)) {
if (b3(0) < 0) {
betas(0) = std::sqrt(-b3(0));
betas(1) = (b3(2) < 0) ? std::sqrt(-b3(2)) : 0;
@ -605,7 +614,7 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
Eigen::JacobiSVD<Mat> svdOfL5(l_6x5,
Eigen::ComputeFullU | Eigen::ComputeFullV);
Vec5 b5 = svdOfL5.solve(rho);
if ((l_6x5 * b5).isApprox(rho, 1e-3)) {
if ((l_6x5 * b5).isApprox(rho, kSuccessThreshold)) {
if (b5(0) < 0) {
betas(0) = std::sqrt(-b5(0));
if (b5(2) < 0) {

@ -29,6 +29,9 @@ namespace euclidean_resection {
enum ResectionMethod {
RESECTION_ANSAR_DANIILIDIS,
// The "EPnP" algorithm by Lepetit et al.
// http://cvlab.epfl.ch/~lepetit/papers/lepetit_ijcv08.pdf
RESECTION_EPNP,
};

@ -28,7 +28,7 @@
namespace libmv {
void EliminateRow(const Mat34 &P, int row, Mat *X) {
static void EliminateRow(const Mat34 &P, int row, Mat *X) {
X->resize(2, 4);
int first_row = (row + 1) % 3;
@ -69,7 +69,7 @@ void FundamentalFromProjections(const Mat34 &P1, const Mat34 &P2, Mat3 *F) {
// HZ 11.1 pag.279 (x1 = x, x2 = x')
// http://www.cs.unc.edu/~marc/tutorial/node54.html
double EightPointSolver(const Mat &x1, const Mat &x2, Mat3 *F) {
static double EightPointSolver(const Mat &x1, const Mat &x2, Mat3 *F) {
DCHECK_EQ(x1.rows(), 2);
DCHECK_GE(x1.cols(), 8);
DCHECK_EQ(x1.rows(), x2.rows());

@ -40,7 +40,7 @@ namespace libmv {
* (a-x1*g)*y1 + (b-x1*h)*y2 + c-x1 = |0|
* (-x2*a+x1*d)*y1 + (-x2*b+x1*e)*y2 + -x2*c+x1*f |0|
*/
bool Homography2DFromCorrespondencesLinearEuc(
static bool Homography2DFromCorrespondencesLinearEuc(
const Mat &x1,
const Mat &x2,
Mat3 *H,

@ -124,11 +124,11 @@ class LevenbergMarquardt {
Parameters dx, x_new;
int i;
for (i = 0; results.status == RUNNING && i < params.max_iterations; ++i) {
VLOG(1) << "iteration: " << i;
VLOG(1) << "||f(x)||: " << f_(x).norm();
VLOG(1) << "max(g): " << g.array().abs().maxCoeff();
VLOG(1) << "u: " << u;
VLOG(1) << "v: " << v;
VLOG(3) << "iteration: " << i;
VLOG(3) << "||f(x)||: " << f_(x).norm();
VLOG(3) << "max(g): " << g.array().abs().maxCoeff();
VLOG(3) << "u: " << u;
VLOG(3) << "v: " << v;
AMatrixType A_augmented = A + u*AMatrixType::Identity(J.cols(), J.cols());
Solver solver(A_augmented);

@ -35,7 +35,7 @@ namespace libmv {
typedef unsigned int uint;
int featurecmp(const void *a_v, const void *b_v)
static int featurecmp(const void *a_v, const void *b_v)
{
Feature *a = (Feature*)a_v;
Feature *b = (Feature*)b_v;

@ -24,6 +24,7 @@
#include "libmv/multiview/projection.h"
#include "libmv/numeric/levenberg_marquardt.h"
#include "libmv/numeric/numeric.h"
#include "libmv/simple_pipeline/initialize_reconstruction.h"
#include "libmv/simple_pipeline/reconstruction.h"
#include "libmv/simple_pipeline/tracks.h"

@ -26,6 +26,7 @@
#include "libmv/multiview/projection.h"
#include "libmv/numeric/numeric.h"
#include "libmv/numeric/levenberg_marquardt.h"
#include "libmv/simple_pipeline/intersect.h"
#include "libmv/simple_pipeline/reconstruction.h"
#include "libmv/simple_pipeline/tracks.h"

@ -242,9 +242,9 @@ void InternalCompleteReconstruction(
(double)tot_resects/(max_image));
if (PipelineRoutines::Resect(reconstructed_markers, reconstruction, true)) {
num_resects++;
LG << "Ran Resect() for image " << image;
LG << "Ran final Resect() for image " << image;
} else {
LG << "Failed Resect() for image " << image;
LG << "Failed final Resect() for image " << image;
}
}
}

@ -27,6 +27,7 @@
#include "libmv/multiview/projection.h"
#include "libmv/numeric/numeric.h"
#include "libmv/numeric/levenberg_marquardt.h"
#include "libmv/simple_pipeline/resect.h"
#include "libmv/simple_pipeline/reconstruction.h"
#include "libmv/simple_pipeline/tracks.h"
@ -107,6 +108,10 @@ bool EuclideanResect(const vector<Marker> &markers,
// printf("Resection for image %d failed\n", markers[0].image);
LG << "Resection for image " << markers[0].image << " failed;"
<< " trying fallback projective resection.";
LG << "No fallback; failing resection for " << markers[0].image;
return false;
if (!final_pass) return false;
// Euclidean resection failed. Fall back to projective resection, which is
// less reliable but better conditioned when there are many points.

@ -18,7 +18,7 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
// Author: mierle@google.com (Keir Mierle)
// Author: mierle@gmail.com (Keir Mierle)
//
// TODO(keir): While this tracking code works rather well, it has some
// outragous inefficiencies. There is probably a 5-10x speedup to be had if a
@ -41,6 +41,85 @@
#include "libmv/multiview/homography.h"
#include "libmv/numeric/numeric.h"
// Expand the Jet functionality of Ceres to allow mixed numeric/autodiff.
//
// TODO(keir): Push this (or something similar) into upstream Ceres.
namespace ceres {
// A jet traits class to make it easier to work with mixed auto / numeric diff.
template<typename T>
struct JetOps {
static bool IsScalar() {
return true;
}
static T GetScalar(const T& t) {
return t;
}
static void SetScalar(const T& scalar, T* t) {
*t = scalar;
}
static void ScaleDerivative(double scale_by, T *value) {
// For double, there is no derivative to scale.
}
};
template<typename T, int N>
struct JetOps<Jet<T, N> > {
static bool IsScalar() {
return false;
}
static T GetScalar(const Jet<T, N>& t) {
return t.a;
}
static void SetScalar(const T& scalar, Jet<T, N>* t) {
t->a = scalar;
}
static void ScaleDerivative(double scale_by, Jet<T, N> *value) {
value->v *= scale_by;
}
};
template<typename FunctionType, int kNumArgs, typename ArgumentType>
struct Chain {
static ArgumentType Rule(const FunctionType &f,
const FunctionType dfdx[kNumArgs],
const ArgumentType x[kNumArgs]) {
// In the default case of scalars, there's nothing to do since there are no
// derivatives to propagate.
return f;
}
};
// XXX Add documentation here!
template<typename FunctionType, int kNumArgs, typename T, int N>
struct Chain<FunctionType, kNumArgs, Jet<T, N> > {
static Jet<T, N> Rule(const FunctionType &f,
const FunctionType dfdx[kNumArgs],
const Jet<T, N> x[kNumArgs]) {
// x is itself a function of another variable ("z"); what this function
// needs to return is "f", but with the derivative with respect to z
// attached to the jet. So combine the derivative part of x's jets to form
// a Jacobian matrix between x and z (i.e. dx/dz).
Eigen::Matrix<T, kNumArgs, N> dxdz;
for (int i = 0; i < kNumArgs; ++i) {
dxdz.row(i) = x[i].v.transpose();
}
// Map the input gradient dfdx into an Eigen row vector.
Eigen::Map<const Eigen::Matrix<FunctionType, 1, kNumArgs> >
vector_dfdx(dfdx, 1, kNumArgs);
// Now apply the chain rule to obtain df/dz. Combine the derivative with
// the scalar part to obtain f with full derivative information.
Jet<T, N> jet_f;
jet_f.a = f;
jet_f.v = vector_dfdx.template cast<T>() * dxdz; // Also known as dfdz.
return jet_f;
}
};
} // namespace ceres
namespace libmv {
using ceres::Jet;
@ -57,6 +136,7 @@ TrackRegionOptions::TrackRegionOptions()
sigma(0.9),
num_extra_points(0),
regularization_coefficient(0.0),
minimum_corner_shift_tolerance_pixels(0.005),
image1_mask(NULL) {
}
@ -108,45 +188,93 @@ static T SampleWithDerivative(const FloatImage &image_and_gradient,
}
template<typename Warp>
class BoundaryCheckingCallback : public ceres::IterationCallback {
class TerminationCheckingCallback : public ceres::IterationCallback {
public:
BoundaryCheckingCallback(const FloatImage& image2,
const Warp &warp,
const double *x1, const double *y1)
: image2_(image2), warp_(warp), x1_(x1), y1_(y1) {}
TerminationCheckingCallback(const TrackRegionOptions &options,
const FloatImage& image2,
const Warp &warp,
const double *x1, const double *y1)
: options_(options), image2_(image2), warp_(warp), x1_(x1), y1_(y1),
have_last_successful_step_(false) {}
virtual ceres::CallbackReturnType operator()(
const ceres::IterationSummary& summary) {
// If the step wasn't successful, there's nothing to do.
if (!summary.step_is_successful) {
return ceres::SOLVER_CONTINUE;
}
// Warp the original 4 points with the current warp into image2.
double x2[4];
double y2[4];
for (int i = 0; i < 4; ++i) {
warp_.Forward(warp_.parameters, x1_[i], y1_[i], x2 + i, y2 + i);
}
// Enusre they are all in bounds.
// Ensure the corners are all in bounds.
if (!AllInBounds(image2_, x2, y2)) {
LG << "Successful step fell outside of the pattern bounds; aborting.";
return ceres::SOLVER_ABORT;
}
// Ensure the minimizer is making large enough shifts to bother continuing.
// Ideally, this check would happen on the parameters themselves which
// Ceres supports directly; however, the mapping from parameter change
// magnitude to corner movement in pixels is not a simple norm. Hence, the
// need for a stateful callback which tracks the last successful set of
// parameters (and the position of the projected patch corners).
if (have_last_successful_step_) {
// Compute the maximum shift of any corner in pixels since the last
// successful iteration.
double max_change_pixels = 0;
for (int i = 0; i < 4; ++i) {
double dx = x2[i] - x2_last_successful_[i];
double dy = y2[i] - y2_last_successful_[i];
double change_pixels = dx*dx + dy*dy;
if (change_pixels > max_change_pixels) {
max_change_pixels = change_pixels;
}
}
max_change_pixels = sqrt(max_change_pixels);
LG << "Max patch corner shift is " << max_change_pixels;
// Bail if the shift is too small.
if (max_change_pixels < options_.minimum_corner_shift_tolerance_pixels) {
LG << "Max patch corner shift is " << max_change_pixels
<< " from the last iteration; returning success.";
return ceres::SOLVER_TERMINATE_SUCCESSFULLY;
}
}
// Save the projected corners for checking on the next successful iteration.
for (int i = 0; i < 4; ++i) {
x2_last_successful_[i] = x2[i];
y2_last_successful_[i] = y2[i];
}
have_last_successful_step_ = true;
return ceres::SOLVER_CONTINUE;
}
private:
const TrackRegionOptions &options_;
const FloatImage &image2_;
const Warp &warp_;
const double *x1_;
const double *y1_;
bool have_last_successful_step_;
double x2_last_successful_[4];
double y2_last_successful_[4];
};
template<typename Warp>
class PixelDifferenceCostFunctor {
public:
PixelDifferenceCostFunctor(const TrackRegionOptions &options,
const FloatImage &image_and_gradient1,
const FloatImage &image_and_gradient2,
const Mat3 &canonical_to_image1,
int num_samples_x,
int num_samples_y,
const Warp &warp)
const FloatImage &image_and_gradient1,
const FloatImage &image_and_gradient2,
const Mat3 &canonical_to_image1,
int num_samples_x,
int num_samples_y,
const Warp &warp)
: options_(options),
image_and_gradient1_(image_and_gradient1),
image_and_gradient2_(image_and_gradient2),
@ -1044,6 +1172,9 @@ void CreateBrutePattern(const double *x1, const double *y1,
// correlation. Instead, this is a dumb implementation. Surprisingly, it is
// fast enough in practice.
//
// Returns true if any alignment was found, and false if the projected pattern
// is zero sized.
//
// TODO(keir): The normalization is less effective for the brute force search
// than it is with the Ceres solver. It's unclear if this is a bug or due to
// the original frame being too different from the reprojected reference in the
@ -1054,7 +1185,7 @@ void CreateBrutePattern(const double *x1, const double *y1,
// totally different warping interface, since access to more than a the source
// and current destination frame is necessary.
template<typename Warp>
void BruteTranslationOnlyInitialize(const FloatImage &image1,
bool BruteTranslationOnlyInitialize(const FloatImage &image1,
const FloatImage *image1_mask,
const FloatImage &image2,
const int num_extra_points,
@ -1100,6 +1231,7 @@ void BruteTranslationOnlyInitialize(const FloatImage &image1,
int best_c = -1;
int w = pattern.cols();
int h = pattern.rows();
for (int r = 0; r < (image2.Height() - h); ++r) {
for (int c = 0; c < (image2.Width() - w); ++c) {
// Compute the weighted sum of absolute differences, Eigen style. Note
@ -1124,8 +1256,12 @@ void BruteTranslationOnlyInitialize(const FloatImage &image1,
}
}
}
CHECK_NE(best_r, -1);
CHECK_NE(best_c, -1);
// This mean the effective pattern area is zero. This check could go earlier,
// but this is less code.
if (best_r == -1 || best_c == -1) {
return false;
}
LG << "Brute force translation found a shift. "
<< "best_c: " << best_c << ", best_r: " << best_r << ", "
@ -1140,6 +1276,7 @@ void BruteTranslationOnlyInitialize(const FloatImage &image1,
x2[i] += best_c - origin_x;
y2[i] += best_r - origin_y;
}
return true;
}
} // namespace
@ -1191,12 +1328,19 @@ void TemplatedTrackRegion(const FloatImage &image1,
if (SearchAreaTooBigForDescent(image2, x2, y2) &&
options.use_brute_initialization) {
LG << "Running brute initialization...";
BruteTranslationOnlyInitialize<Warp>(image_and_gradient1,
options.image1_mask,
image2,
options.num_extra_points,
options.use_normalized_intensities,
x1, y1, x2, y2);
bool found_any_alignment = BruteTranslationOnlyInitialize<Warp>(
image_and_gradient1,
options.image1_mask,
image2,
options.num_extra_points,
options.use_normalized_intensities,
x1, y1, x2, y2);
if (!found_any_alignment) {
LG << "Brute failed to find an alignment; pattern too small. "
<< "Failing entire track operation.";
result->termination = TrackRegionResult::INSUFFICIENT_PATTERN_AREA;
return;
}
for (int i = 0; i < 4; ++i) {
LG << "P" << i << ": (" << x1[i] << ", " << y1[i] << "); brute ("
<< x2[i] << ", " << y2[i] << "); (dx, dy): (" << (x2[i] - x1[i])
@ -1260,14 +1404,15 @@ void TemplatedTrackRegion(const FloatImage &image1,
// Configure the solve.
ceres::Solver::Options solver_options;
solver_options.linear_solver_type = ceres::DENSE_QR;
solver_options.linear_solver_type = ceres::DENSE_NORMAL_CHOLESKY;
solver_options.max_num_iterations = options.max_iterations;
solver_options.update_state_every_iteration = true;
solver_options.parameter_tolerance = 1e-16;
solver_options.function_tolerance = 1e-16;
// Prevent the corners from going outside the destination image.
BoundaryCheckingCallback<Warp> callback(image2, warp, x1, y1);
// Prevent the corners from going outside the destination image and
// terminate if the optimizer is making tiny moves (converged).
TerminationCheckingCallback<Warp> callback(options, image2, warp, x1, y1);
solver_options.callbacks.push_back(&callback);
// Run the solve.
@ -1290,11 +1435,21 @@ void TemplatedTrackRegion(const FloatImage &image1,
// TODO(keir): Update the result statistics.
// TODO(keir): Add a normalize-cross-correlation variant.
CHECK_NE(summary.termination_type, ceres::USER_ABORT) << "Libmv bug.";
if (summary.termination_type == ceres::USER_ABORT) {
result->termination = TrackRegionResult::FELL_OUT_OF_BOUNDS;
return;
}
// This happens when the minimum corner shift tolerance is reached. Due to
// how the tolerance is computed this can't be done by Ceres. So return the
// same termination enum as Ceres, even though this is slightly different
// than Ceres's parameter tolerance, which operates on the raw parameter
// values rather than the pixel shifts of the patch corners.
if (summary.termination_type == ceres::USER_SUCCESS) {
result->termination = TrackRegionResult::PARAMETER_TOLERANCE;
return;
}
#define HANDLE_TERMINATION(termination_enum) \
if (summary.termination_type == ceres::termination_enum) { \
result->termination = TrackRegionResult::termination_enum; \
@ -1377,11 +1532,11 @@ bool SamplePlanarPatch(const FloatImage &image,
image_position(0),
&(*patch)(r, c, 0));
if (mask) {
float maskValue = SampleLinear(*mask, image_position(1),
image_position(0), 0);
float mask_value = SampleLinear(*mask, image_position(1),
image_position(0), 0);
for (int d = 0; d < image.Depth(); d++)
(*patch)(r, c, d) *= maskValue;
(*patch)(r, c, d) *= mask_value;
}
}
}

@ -90,6 +90,11 @@ struct TrackRegionOptions {
// If zero, no regularization is used.
double regularization_coefficient;
// If the maximum shift of any patch corner between successful iterations of
// the solver is less than this amount, then the tracking is declared
// successful. The solver termination becomes PARAMETER_TOLERANCE.
double minimum_corner_shift_tolerance_pixels;
// If non-null, this is used as the pattern mask. It should match the size of
// image1, even though only values inside the image1 quad are examined. The
// values must be in the range 0.0 to 0.1.
@ -111,6 +116,7 @@ struct TrackRegionResult {
DESTINATION_OUT_OF_BOUNDS,
FELL_OUT_OF_BOUNDS,
INSUFFICIENT_CORRELATION,
INSUFFICIENT_PATTERN_AREA,
CONFIGURATION_ERROR,
};
Termination termination;

@ -38,6 +38,7 @@ set(INC_SYS
)
set(SRC
internal/ceres/array_utils.cc
internal/ceres/block_evaluate_preparer.cc
internal/ceres/block_jacobian_writer.cc
internal/ceres/block_jacobi_preconditioner.cc
@ -53,16 +54,19 @@ set(SRC
internal/ceres/conditioned_cost_function.cc
internal/ceres/conjugate_gradients_solver.cc
internal/ceres/corrector.cc
internal/ceres/cxsparse.cc
internal/ceres/dense_normal_cholesky_solver.cc
internal/ceres/dense_qr_solver.cc
internal/ceres/dense_sparse_matrix.cc
internal/ceres/detect_structure.cc
internal/ceres/dogleg_strategy.cc
internal/ceres/evaluator.cc
internal/ceres/file.cc
internal/ceres/generated/schur_eliminator_d_d_d.cc
internal/ceres/gradient_checking_cost_function.cc
internal/ceres/implicit_schur_complement.cc
internal/ceres/iterative_schur_complement_solver.cc
internal/ceres/levenberg_marquardt.cc
internal/ceres/levenberg_marquardt_strategy.cc
internal/ceres/linear_least_squares_problems.cc
internal/ceres/linear_operator.cc
internal/ceres/linear_solver.cc
@ -70,6 +74,7 @@ set(SRC
internal/ceres/loss_function.cc
internal/ceres/normal_prior.cc
internal/ceres/partitioned_matrix_view.cc
internal/ceres/polynomial_solver.cc
internal/ceres/problem.cc
internal/ceres/problem_impl.cc
internal/ceres/program.cc
@ -88,6 +93,8 @@ set(SRC
internal/ceres/stringprintf.cc
internal/ceres/suitesparse.cc
internal/ceres/triplet_sparse_matrix.cc
internal/ceres/trust_region_minimizer.cc
internal/ceres/trust_region_strategy.cc
internal/ceres/types.cc
internal/ceres/visibility_based_preconditioner.cc
internal/ceres/visibility.cc
@ -96,6 +103,8 @@ set(SRC
include/ceres/ceres.h
include/ceres/conditioned_cost_function.h
include/ceres/cost_function.h
include/ceres/crs_matrix.h
include/ceres/fpclassify.h
include/ceres/internal/autodiff.h
include/ceres/internal/eigen.h
include/ceres/internal/fixed_array.h
@ -114,6 +123,7 @@ set(SRC
include/ceres/sized_cost_function.h
include/ceres/solver.h
include/ceres/types.h
internal/ceres/array_utils.h
internal/ceres/block_evaluate_preparer.h
internal/ceres/block_jacobian_writer.h
internal/ceres/block_jacobi_preconditioner.h
@ -131,10 +141,13 @@ set(SRC
internal/ceres/compressed_row_sparse_matrix.h
internal/ceres/conjugate_gradients_solver.h
internal/ceres/corrector.h
internal/ceres/cxsparse.h
internal/ceres/dense_jacobian_writer.h
internal/ceres/dense_normal_cholesky_solver.h
internal/ceres/dense_qr_solver.h
internal/ceres/dense_sparse_matrix.h
internal/ceres/detect_structure.h
internal/ceres/dogleg_strategy.h
internal/ceres/evaluator.h
internal/ceres/file.h
internal/ceres/gradient_checking_cost_function.h
@ -143,7 +156,7 @@ set(SRC
internal/ceres/implicit_schur_complement.h
internal/ceres/integral_types.h
internal/ceres/iterative_schur_complement_solver.h
internal/ceres/levenberg_marquardt.h
internal/ceres/levenberg_marquardt_strategy.h
internal/ceres/linear_least_squares_problems.h
internal/ceres/linear_operator.h
internal/ceres/linear_solver.h
@ -153,6 +166,7 @@ set(SRC
internal/ceres/mutex.h
internal/ceres/parameter_block.h
internal/ceres/partitioned_matrix_view.h
internal/ceres/polynomial_solver.h
internal/ceres/problem_impl.h
internal/ceres/program_evaluator.h
internal/ceres/program.h
@ -168,10 +182,13 @@ set(SRC
internal/ceres/solver_impl.h
internal/ceres/sparse_matrix.h
internal/ceres/sparse_normal_cholesky_solver.h
internal/ceres/split.h
internal/ceres/stl_util.h
internal/ceres/stringprintf.h
internal/ceres/suitesparse.h
internal/ceres/triplet_sparse_matrix.h
internal/ceres/trust_region_minimizer.h
internal/ceres/trust_region_strategy.h
internal/ceres/visibility_based_preconditioner.h
internal/ceres/visibility.h
)
@ -203,7 +220,7 @@ if(WIN32)
if(NOT MINGW)
list(APPEND INC
third_party/msinttypes
../msinttypes
)
endif()
else()
@ -214,11 +231,30 @@ endif()
add_definitions(
-DCERES_HAVE_PTHREAD
-D"CERES_HASH_NAMESPACE_START=namespace std { namespace tr1 {"
-D"CERES_HASH_NAMESPACE_END=}}"
-DCERES_NO_SUITESPARSE
-DCERES_DONT_HAVE_PROTOCOL_BUFFERS
-DCERES_NO_CXSPARSE
-DCERES_NO_PROTOCOL_BUFFERS
-DCERES_RESTRICT_SCHUR_SPECIALIZATION
)
if(MSVC10)
add_definitions(
-D"CERES_HASH_NAMESPACE_START=namespace std {"
-D"CERES_HASH_NAMESPACE_END=}"
)
else()
add_definitions(
-D"CERES_HASH_NAMESPACE_START=namespace std { namespace tr1 {"
-D"CERES_HASH_NAMESPACE_END=}}"
)
endif()
if(APPLE)
if(CMAKE_OSX_DEPLOYMENT_TARGET STREQUAL "10.5")
add_definitions(
-DCERES_NO_TR1
)
endif()
endif()
blender_add_lib(extern_ceres "${SRC}" "${INC}" "${INC_SYS}")

@ -1,324 +1,524 @@
commit ca72152362ae1f4b9928c012e74b4d49d094a4ca
Merge: d297f8d 0a04199
Author: Keir Mierle <mierle@gmail.com>
Date: Wed May 9 13:10:59 2012 -0700
commit 552f9f85bba89f00ca307bc18fbda1dff23bd0e4
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Fri Aug 31 07:27:22 2012 -0700
Merge branch 'master' into windows
commit 0a04199ef279cc9ea97f665fed8e7fae717813c3
Merge: fdeb577 f2571f1
Author: Keir Mierle <mierle@gmail.com>
Date: Wed May 9 12:54:56 2012 -0700
Merge branch 'master' of https://code.google.com/p/ceres-solver
commit fdeb5772cc5eeebca4d776d220d80cc91b6d0f74
Author: Keir Mierle <mierle@gmail.com>
Date: Wed May 9 07:38:07 2012 -0700
Support varying numbers of residuals in autodiff.
Various minor bug fixes to the solver logic.
This commit modifies the only function in autodiff that takes a
templated number of outputs (i.e. residuals) and makes that
template parameter a normal parameter. With that change, it
is a trivial matter to support a dynamic number of residuals.
1. CostFunction returning false is handled better.
If only the cost is being evaluated, it is possible to
use the false value as an infinite value signal/outside
a region of validity. This allows a weak form of constraint
handling. Useful for example in handling infinities.
The API for dynamic residuals is to pass a fake number of
residuals as the second template argument to
AutoDiffCostFunction, and to pass the real number of
parameters as a second constructor argument.
commit da3e0563cc12e08e7b3e0fbf11d9cc8cfe9658aa
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Wed May 9 11:57:47 2012 -0700
Typo corrections in the documentation from Bing
commit aa9526d8e8fb34c23d63e3af5bf9239b0c4ea603
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Tue May 8 21:22:09 2012 -0700
Share search paths across various library searches.
Fix typos in glog search.
Split the error messages for include and lib.
Enable building of tests by default.
Made building on homebrew installations a bit better.
Remove temporary variables for glog and gflags.
commit f2571f186850ed3dd316236ac4be488979df7d30
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Wed May 9 11:57:47 2012 -0700
Typo corrections in the documentation from Bing
commit 8f7f11ff7d07737435428a2620c52419cf99f98e
Merge: e6c17c4 eaccbb3
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Wed May 9 11:34:15 2012 -0700
Merge branch 'master' of https://code.google.com/p/ceres-solver
commit e6c17c4c9d9307218f6f739cea39bc2d87733d4d
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Tue May 8 21:22:09 2012 -0700
Share search paths across various library searches.
Fix typos in glog search.
Split the error messages for include and lib.
Enable building of tests by default.
Made building on homebrew installations a bit better.
Remove temporary variables for glog and gflags.
commit eaccbb345614c0d24c5e21fa931f470cfda874df
Author: Keir Mierle <mierle@gmail.com>
Date: Wed May 9 05:31:29 2012 -0700
Remove unused template parameter from VariadicEvaluate.
commit 82f4b88c34b0b2cf85064e5fc20e374e978b2e3b
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Sun May 6 21:05:28 2012 -0700
Extend support writing linear least squares problems to disk.
2. Changed the way how the slop around zero when model_cost
is larger than the current cost. Relative instead of absolute
tolerances are used. The same logic is propagated how the
corresponding clamping of the model_cost is done.
1. Make the mechanism for writing problems to disk, generic and
controllable using an enum DumpType visible in the API.
3. Fixed a minor indexing bug in nist.cc.
2. Instead of single file containing protocol buffers, now matrices can
be written in a matlab/octave friendly format. This is now the default.
4. Some minor logging fixes to nist.cc to make it more
compatible with the rest of ceres.
3. The support for writing problems to disk is moved into
linear_least_squares_problem.cc/h
Together these changes, take the successful solve count from
41/54 to 46/54 and eliminate all NUMERICAL_FAILURE problems.
4. SparseMatrix now has a ToTextFile virtual method which is
implemented by each of its subclasses to write a (i,j,s) triplets.
5. Minor changes to simple_bundle_adjuster to enable logging at startup.
Change-Id: If94170ea4731af5b243805c0200963dd31aa94a7
commit d297f8d3d3f5025c24752f0f4c1ec2469a769f99
Merge: 7e74d81 f8bd7fa
Author: Keir Mierle <mierle@gmail.com>
Date: Tue May 8 05:39:56 2012 -0700
Merge branch 'master' into windows
commit f8bd7fa9aa9dbf64b6165606630287cf8cf21194
Author: Keir Mierle <mierle@gmail.com>
Date: Tue May 8 05:39:32 2012 -0700
Small tweaks to the block jacobi preconditioner.
commit 7e74d81ad57a159f14110eb5348b3bc7990b8bd4
Merge: ecd7c8d e2a6cdc
Author: Keir Mierle <mierle@gmail.com>
Date: Mon May 7 07:02:49 2012 -0700
Merge branch 'master' into windows
commit e2a6cdc0816af9d0c77933f5017f137da3d52a35
Author: Keir Mierle <mierle@gmail.com>
Date: Mon May 7 06:39:56 2012 -0700
Address some of the comments on CGNR patch
- Rename BlockDiagonalPreconditioner to BlockJacobiPreconditioner
- Include the diagonal in the block jacobi preconditioner.
- Better flag help for eta.
- Enable test for CGNR
- Rename CONJUGATE_GRADIENTS to CGNR.
- etc.
commit 1b95dc580aa5d89be021c0915e26df83f18013bb
Merge: 211812a 7646039
Author: Keir Mierle <mierle@gmail.com>
Date: Mon May 7 04:34:10 2012 -0700
Merge branch 'master' of https://code.google.com/p/ceres-solver
commit 211812a57360d2011cbcfd115cd55e0eb73600db
Author: Keir Mierle <mierle@gmail.com>
Date: Mon May 7 04:33:50 2012 -0700
Better error handling in bundle_adjuster.cc
commit 7646039ad9672b267495f5b31925473ad3022ac8
commit 0b776b5cc9634d3b88d623905b96006f7647ce3e
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Sun May 6 22:02:19 2012 -0700
Date: Thu Aug 30 15:26:17 2012 -0700
Kashif's corrections to the docs
commit 0d2d34148d10c5c7e924b3ca82ad2b237573ef64
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Sun May 6 21:16:03 2012 -0700
glog minimum version requirements
Update docs.
Building Ceres requires version 0.3.1 or better of glog.
Fedora 16 ships with a busted version 0.3.
Change-Id: I69d50bcd37aed3bea2190ca614f023e83172901b
commit 2d7176ad7c8fb7238ca8abd6de73415d95877494
Author: Petter Strandmark <petter.strandmark@gmail.com>
Date: Thu Aug 30 19:51:24 2012 -0700
max_consecutive_nonmonotonic_steps should be int
issue 15 contains the gory details.
Found via Visual Studio warning.
Added a note to the build documentation to this effect.
Change-Id: Id2cd7de562dfc8cd35df5d5f5220dd2d7350eb2c
commit 39efc5ec4b64b8f5a2c5a3dbacdbc45421221547
Author: Keir Mierle <mierle@gmail.com>
Date: Sun May 6 16:09:52 2012 -0700
Fix tests broken by the CGNR change.
commit 3faa08b7f7c4ac73661c6a15a6824c12080dfcb1
commit 1a89bcc94e88933f89b20427a45bc40cdd23c056
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Sun May 6 16:08:22 2012 -0700
Date: Thu Aug 30 15:26:17 2012 -0700
Formatting fixed based on Keir's comments and extended the tests
commit 4f21c68409bc478c431a9b6aedf9e5cfdf11d2f3
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Sun May 6 15:33:47 2012 -0700
Fix the struct weak ordering used by independent set ordering, tests for it
commit 887b156b917ccd4c172484452b059d33ea45f4f0
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Sun May 6 15:14:47 2012 -0700
fix he degree ordering routine
commit ecd7c8df2af19404dc394b36bbe96e9db3bce840
Author: Keir Mierle <mierle@gmail.com>
Date: Sun May 6 00:09:41 2012 -0700
First step towards windows compatibilty
Better reporting on the NIST problems.
This adds some small changes to Ceres to make it mostly
compile on Windows. There are still issues with the
hash map use in schur_ordering.cc but I will fix those
shortly.
Change-Id: I7cf774ec3242c0612dbe52fc233c3fc6cff3f031
commit f7898fba1b92f0e996571b5bfa22a37f5e3644de
Author: Keir Mierle <mierle@gmail.com>
Date: Sat May 5 20:55:08 2012 -0700
commit ea11704857a1e4a735e096896e4d775d83981499
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Wed Aug 29 18:18:48 2012 -0700
Add a general sparse iterative solver: CGNR
Basic harness for testing NIST problems.
This adds a new LinearOperator which implements symmetric
products of a matrix, and a new CGNR solver to leverage
CG to directly solve the normal equations. This also
includes a block diagonal preconditioner. In experiments
on problem-16, the non-preconditioned version is about
1/5 the speed of SPARSE_SCHUR, and the preconditioned
version using block cholesky is about 20% slower than
SPARSE_SCHUR.
Change-Id: I5baaa24dbf0506ceedf4a9be4ed17c84974d71a1
commit 0a359d6198d257776a8831c3eb98f64ee91cf836
Author: Keir Mierle <mierle@gmail.com>
Date: Sat May 5 20:33:46 2012 -0700
commit 98bf14d2b95386c2c4a6c29154637943dae4c36c
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Thu Aug 30 10:26:44 2012 -0700
Comment formatting.
commit db4ec9312bb2f1ca7b2337812f6bad6cdd75b227
Author: Keir Mierle <mierle@gmail.com>
Date: Sat May 5 20:33:16 2012 -0700
Comment formatting
commit f10163aaf3e57f52551bcd60bbdae873890a49dd
Author: Keir Mierle <mierle@gmail.com>
Date: Fri May 4 21:33:53 2012 -0700
Warn about disabled schur specializations.
Miscellaneous fixes.
This commit brought to you from 30,000ft.
Change-Id: I521e11f2d20bf24960bbc6b5dab4ec8bb1503d23
commit ad7b2b4aaf3ccc51f2b854febd53a9df54686cfe
Author: Keir Mierle <mierle@gmail.com>
Date: Fri May 4 20:15:28 2012 -0700
commit 1e3cbd9a4442cdd8fda43a7fb452f19dac8c74af
Author: Petter Strandmark <strandmark@google.com>
Date: Wed Aug 29 09:39:56 2012 -0700
Add vim swapfiles to .gitignore
commit 6447219826bf6e47b0c99d9ff0eaf5e2ba573d79
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Thu May 3 21:53:07 2012 -0700
1. Changes the tutorial to refer to BriefReport.
2. Some of the enums have commas at the end.
3. Fix a bug in the default value of circle_fit.cc in the examples.
commit 30c5f93c7f88dec49f76168663372772e06f17f5
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Thu May 3 10:44:43 2012 -0700
Rework the glog and gtest path checking to be consistent with the rest of the file and disable the dashboard support enabled by the earlier ctesting related patch.
commit f10b033eb4aca77919987bc551d16d8a88b10110
Merge: cc38774 e0a52a9
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Thu May 3 08:45:20 2012 -0700
Merge branch 'ctest'
commit e0a52a993394e73bc7f7db8d520728926feab83e
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Thu May 3 08:43:34 2012 -0700
Arnaus Gelas' patch to add better path searching for gflags and glog
commit a9b8e815e1c026599734510399b10f4cf014c9cd
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Thu May 3 08:41:52 2012 -0700
Arnaus Gelas' patch to add .gitignore
commit a0cefc3347c32b2065053bbaff4f34d11529d931
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Thu May 3 08:38:33 2012 -0700
Arnaus Gelas' patch to move to Ctest
commit cc38774d74e287704915282425fbd16818a72ec3
Author: Keir Mierle <mierle@gmail.com>
Date: Thu May 3 01:27:50 2012 -0700
Clarify ProgramEvaluator comments.
commit 017c9530df557863f78212fb5ccd02814baa9fa8
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Wed May 2 08:21:59 2012 -0700
Mac OS X build instructions are much simpler, as homebrew takes care of gflags when glog is brought in. Also CMAKE does not need any flags to do the default thing
commit 92d5ab5f8ae6fe355c30b606a5f230415ee0494b
Author: Keir Mierle <mierle@gmail.com>
Date: Tue May 1 18:33:08 2012 -0700
Link BLAS explicitly on non-Mac platforms
Caching the symbolic Cholesky factorization when using CXSparse
Fixes issue #3.
commit df3e54eb4a6b001b7f0560a2da73a5bd7f18615e
Author: Keir Mierle <mierle@gmail.com>
Date: Tue May 1 18:22:51 2012 -0700
Fix link order of CHOLMOD
Average factorization times for bundle adjustment test problem:
SuiteSparse: 0.2794 s.
CXSparse: 0.4039 s.
CXSparse cached: 0.2399 s.
This was working by accident due to dynamic linking. Fixes issue #2.
commit f477a3835329e2b48eb20c34c631a480b0f0d5bf
Author: Keir Mierle <mierle@gmail.com>
Date: Tue May 1 18:10:48 2012 -0700
Fix Eigen search paths
CXSparse will still be slower, though, because it has to compute
the transpose and J^T * J.
Fixes issue #1 on http://code.google.com/p/ceres-solver.
Change-Id: If9cdaa3dd520bee84b56e5fd4953b56a93db6bde
commit 17fbc8ebb894c1d22bb3b0b02ea1394b580120f8
commit 8b64140878ccd1e183d3715c38942a81fdecefde
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Tue May 1 00:21:19 2012 -0700
Date: Wed Aug 29 05:41:22 2012 -0700
Minor changes to the documentation. Formatting, and typos.
Documentation update
Change-Id: I271a0422e7f6f42bcfd1dc6b5dc10c7a18f6a179
commit 8ebb0730388045570f22b89fe8672c860cd2ad1b
commit a5353acd85a9fd19370b3d74035d87b0f0bac230
Author: Petter Strandmark <petter.strandmark@gmail.com>
Date: Tue Aug 28 18:16:41 2012 -0700
Adding gflags include to test_util.cc
test_util seems to need gflags.
Change-Id: I0c4757960f8ac69ad599c138aea58e3c88a4ea28
commit 87ca1b2ba28ec512752bbcf5fc994ce1434eb765
Author: Petter Strandmark <petter.strandmark@gmail.com>
Date: Tue Aug 28 18:05:20 2012 -0700
Changing random.h to use cstdlib for Windows compability.
As discussed with Sameer today.
Change-Id: If3d0284830c6591c71cc77b8400cafb45c0da61f
commit aeb00a07323808a0a1816e733ad18a87d5109ea3
Author: Petter Strandmark <strandmark@google.com>
Date: Mon Aug 27 22:22:57 2012 -0700
Removing gomp for Visual Studio
Linking currently fails in Visual Studio due to a missing library
"gomp.lib". This is not needed in Visual Studio. OpenMP works
without it.
Change-Id: I39e204a8dd4f1b7425df7d4b222d86a8bb961432
commit 6f362464ba99b800494d2f15c27768a342ddaa68
Author: Markus Moll <markus.moll@esat.kuleuven.be>
Date: Tue Aug 28 01:03:38 2012 +0200
Add some tests for DoglegStrategy.
Not necessarily a complete set.
Change-Id: I14eb3a38c6fe976c8212f3934655411b6d1e0aa4
commit 122cf836a6dc9726489ce2fbecc6143bddc1caaf
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Fri Aug 24 16:28:27 2012 -0700
Documentation update.
Change-Id: I0a3c5ae4bc981a8f5bdd5a8905f923dc5f09a024
commit 69081719f73da8de2935774a42d237837a91952a
Author: Keir Mierle <mierle@gmail.com>
Date: Mon Apr 30 23:09:08 2012 -0700
Date: Mon Aug 27 13:28:56 2012 -0700
Initial commit of Ceres Solver.
Remove unnecessary overload for hash<>
The overload for pointers in hash tables was applied in normal
usage of schur_ordering.cc. However, the tests did not include the
overload since they only included collections_port.h. As a result,
the routines in schur_ordering.cc were using a different hash
function than that inside the tests.
The fix is to remove the specialization. If this breaks one of the
compiler configurations, we will find a workaround at that time.
Change-Id: Idbf60415d5e2aec0c865b514ad0c577d21b91405
commit 1762420b6ed76b1c4d30b913b2cac1927b666534
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Wed Aug 22 10:01:31 2012 -0700
Update changelog.
Change-Id: Idf5af69d5a9dbe35f58e30a8afcbfcd29bb7ebfe
commit 976ab7aca908309b8282cb40bc080ca859136854
Author: Keir Mierle <mierle@gmail.com>
Date: Thu Aug 23 18:21:36 2012 -0700
Remove Google-era vestigial unit test.
Change-Id: Ia7a295a5c759a17c1675a3055d287d3e40e9e0fe
commit 6ad6257de0e2152ac5e77dc003758de45187d6ea
Author: Keir Mierle <mierle@gmail.com>
Date: Wed Aug 22 11:10:31 2012 -0700
Add a workaround for an Android NDK compiler bug.
On certain NDK build configurations, one of the innermost
parts of the Schur eliminator would get compiled
incorrectly. The compiler changed a -= to a +=.
The normal Ceres unit tests caught the problem; however,
since it is not possible to build the tests with the NDK
(only with the standalone toolchain) this was difficult to
track down. Finding the issue involved pasting the schur
eliminator unit test inside of solver_impl.cc and other such
hacks.
Change-Id: Ie91bb545d74fe39f0c8cbd1a6eb69ee4d8b25fb2
commit aecb2dc92b4aa7f3bf77a1ac918e62953602392b
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Wed Aug 22 10:08:17 2012 -0700
Fix relative path bug in bibtex call.
Change-Id: I0d31786564320a6831259bcdf4c75a6b665c43ad
commit 1e2892009e591804df6286caebd5c960e7e3b099
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Tue Aug 21 18:00:54 2012 -0700
Update Summary::FullReport to report dogleg type.
Change-Id: I0b4be8d7486c1c4b36b299693b3fe8b0d3426537
commit 295ade1122a86b83e1ea605d5ca394f315874717
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Wed Aug 22 06:51:22 2012 -0700
Fix Eigen3 Row/Column Major storage issue.
Eigen3 does not allow column vectors to be stored in row-major
format. NumericDiffCostFunction by default stores its Jacobian
matrices in row-major format. This works fine if the residual
contains more than one variable. But if the residual block
depends on one variable and has more than one residuals, the
resulting Jacobian matrix is a column matrix in row-major format
resulting in a compile time error.
The fix is to check the template parameters and switch to column-major
storage as needed.
Thanks to Lena Gieseke for reporting this.
Change-Id: Icc51c5b38e1f3609e0e1ecb3c4e4a02aecd72c3b
commit 9ad27e8e9fb1bbd2054e2f6ae37623e01428f1c0
Author: Arnaud Gelas <arnaudgelas@gmail.com>
Date: Tue Aug 21 09:56:30 2012 +0200
Add one uninstall target to remove all installed files
Change-Id: Ifcf89a6c27b25f28403d95a50e29c093a525298f
commit 0c3a748ee49e04fe334f8f5a433649d18003d550
Author: Markus Moll <markus.moll@esat.kuleuven.be>
Date: Tue Aug 21 14:44:59 2012 +0200
Allow equal lower and upper bound for diagonal scaling.
This way, setting the lower and upper bound both to 1.0, one can disable
the automatic trust region scaling.
Change-Id: Ifa317a6911b813a89c1cf7fdfde25af603705319
commit 3d644b76adefac6475b91dc53c3ae5e01c4f4d66
Author: Arnaud Gelas <arnaudgelas@gmail.com>
Date: Thu Aug 16 17:33:21 2012 +0200
Install headers, libraries and pdf
Headers are installed in ${CMAKE_INSTALL_PREFIX}/include/ceres
Libraries are installed in ${CMAKE_INSTALL_PREFIX}/lib
pdf is installed in ${CMAKE_INSTALL_PREFIX}/share/ceres/docs
Change-Id: Ic175f2c2f5fa86820a1e8c64c2ed171f4a302a68
commit d2fb5adea4d8c2aeb43c4289c6976798a54d3cf1
Author: Arnaud Gelas <arnaudgelas@gmail.com>
Date: Fri Aug 17 10:11:02 2012 +0200
Configure gerrit hook at CMake time
If the source directory is a clone, at CMake time the commit-msg hook gets
downloaded and installed in the right location.
Change-Id: I5fee17d050ca22d8b92a49fdcc2a1cd6659f209b
commit 73166098fc4b1072adc30321c666188a3909c43c
Author: Arnaud Gelas <arnaudgelas@gmail.com>
Date: Mon Aug 20 15:40:41 2012 +0200
Add one CMake option to build the examples.
Currently the examples are always built. For external projects, it is useful
not to compile the examples.
Change-Id: I41d3bde19c7e742818e60f78222d39c43992ca8b
commit 86d4f1ba41ef14eb1b6b61a7936af83387b35eb2
Author: Keir Mierle <mierle@gmail.com>
Date: Mon Aug 20 11:52:04 2012 -0700
Add missing return statement.
Change-Id: I5eaf718318e27040e3c97e32ee46cf0a11176a37
commit 51eb229da34187a4e8ce73ed9cc0e731998bb2be
Author: Keir Mierle <mierle@gmail.com>
Date: Mon Aug 20 11:46:12 2012 -0700
Add Program::ToString() to aid debugging.
Change-Id: I0ab37ed2fe0947ca87a152919d4e7dc9b56dedc6
commit bcc7100635e2047dc2b77df19a4ded8a6ab4d4b9
Author: Keir Mierle <mierle@gmail.com>
Date: Mon Aug 20 11:45:04 2012 -0700
Ignore minted.sty.
Change-Id: I2467a6f801812b9007b51bf14b00757f026e4322
commit 9705a736dd3d6fbead0d8a6ff77102c69bbcdc08
Author: Keir Mierle <mierle@gmail.com>
Date: Mon Aug 20 11:24:05 2012 -0700
Add ParameterBlock::ToString() to aid debugging.
Change-Id: Id3f5cb27b855c536dd65a986f345bd8eb2799dfa
commit 0c714a70e6123ceb68e5cfcd3cfbee0d09deb1db
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Mon Aug 20 11:18:16 2012 -0700
Fix blanks before private in loss_function.h
Change-Id: I068bed6431bc7c9b7958af391655df61499000b2
commit 51cf7cbe3bac45c6807c2703a2fc3175d76a1b47
Author: Markus Moll <markus.moll@esat.kuleuven.be>
Date: Mon Aug 20 20:10:20 2012 +0200
Add the two-dimensional subspace search to DoglegStrategy
Change-Id: I5163744c100cdf07dd93343d0734ffe0e80364f3
commit ad1f7b772e559a911ac3a3b078b0aee1836fe785
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Mon Aug 20 11:10:34 2012 -0700
Add ArcTanLoss, TolerantLoss and ComposedLossFunction.
Based on work by James Roseborough.
Change-Id: Idc4e0b099028f67702bfc7fe3e43dbd96b6f9256
commit 05292bf8fc5208b86b4a13544615b584f6efa936
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Mon Aug 20 07:40:45 2012 -0700
Add a TrustRegionStrategy::Summary object.
Change-Id: I7caee35a3408ee4a0ec16ba407410d822929340d
commit b12b906c4d21c3949f0dce62c4c0d083c8edecf1
Author: Arnaud Gelas <arnaudgelas@gmail.com>
Date: Wed Aug 15 16:27:38 2012 +0200
Add one option to generate the PDF from CMake at build time
Make sure pygmentize is installed
Change-Id: I068ba45c33a8e96acc906a464b12d10d58b3e231
commit b9f15a59361c609ffc4a328aea9be3d265b5da81
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Sat Aug 18 13:06:19 2012 -0700
Add a dense Cholesky factorization based linear solver.
For problems with a small number of variables, but a large
number of residuals, it is sometimes beneficial to use the
Cholesky factorization on the normal equations, instead of
the dense QR factorization of the Jacobian, even though it
is numerically the better thing to do.
Change-Id: I3506b006195754018deec964e6e190b7e8c9ac8f
commit b3fa009435acf476cd373052e62988f6437970b1
Author: Arnaud Gelas <arnaudgelas@gmail.com>
Date: Fri Aug 17 10:31:41 2012 +0200
Set CMAKE_*_OUTPUT_DIRECTORY
Gather
* all executables in ${CMAKE_BINARY_DIR}/bin
* all libraries (static and dynamic) in ${CMAKE_BINARY_DIR}/lib
Change-Id: Ibc2fa1adfb6f0aea65d66d570259b79546bf3b07
commit 1b8a4d5d11671ed83cf6077e363dd95333f08ef8
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Fri Aug 17 16:49:11 2012 -0700
Fix a minor bug in detect_structure logging.
Change-Id: I117f7745e4c67595b3ff9244cde82b5b5b34ee4b
commit 31c1e784ab2cb9294c6e05414cf06aae2b3766de
Author: Keir Mierle <mierle@gmail.com>
Date: Fri Aug 17 16:16:32 2012 -0700
Minor cleanups.
Change-Id: Ida4866997deeaa1bc2cebd6b69313a05ac82e457
commit e83f7879a8b21c6976e116958caf35bcdcf41cb0
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Fri Aug 17 15:34:42 2012 -0700
Fix SuiteSparse3 UFConfig.h detection really.
Change-Id: Id187102e755b7d778dff4363f22f9a4697ed12dd
commit 96f25dc57658d296ee6b6633818b4f1e51d7d587
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Fri Aug 17 15:34:42 2012 -0700
Fix SuiteSparse3 UFConfig.h detection.
Change-Id: Ia59aefdb0ad7f713f76ed79692f2db4fa2821e5b
commit c497bd6cd9aa944f518aa491d3bc645851ff9594
Author: Markus Moll <markus.moll@esat.kuleuven.be>
Date: Fri Aug 17 14:40:13 2012 +0200
Add UFconfig and/or SuiteSparse_config test to CMakeLists.txt
SuiteSparse 4 requires linking to libsuitesparseconfig.a.
Both SuiteSparse 3 and SuiteSparse 4 require an additional header
(either UFconfig.h or SuiteSparse_config.h) that is not found if it is
in a separate path. Therefore, add explicit checks.
Change-Id: I699902b5db4f1b7f17134b5a54f9aa681445e294
commit 383c04f4236d92801c7c674892814362dedf7ad6
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Fri Aug 17 10:14:04 2012 -0700
Fix QuaternionToAngleAxis to ensure rotations are between -pi and pi.
Thanks to Guoxuan Zhang for reporting this.
Change-Id: I2831ca3a04d5dc6467849c290461adbe23faaea3
commit dd2b17d7dd9750801ba4720bdece2062e59b7ae3
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Thu Aug 16 19:34:57 2012 -0700
CERES_DONT_HAVE_PROTOCOL_BUFFERS -> CERES_NO_PROTOCOL_BUFFERS.
Change-Id: I6c9f50e4c006faf4e75a8f417455db18357f3187
commit 8b4cb7aa2c74a0da62c638b2023566aa242af995
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Thu Aug 16 19:26:55 2012 -0700
Fix sparse linear algebra library logging in Summary::FullReport.
Change-Id: Id2c902dc86c00954fde7749c7b4a67dd94215a31
commit 47d26bcd3b38b5ff53b34768c33b499d47b26bd0
Author: Markus Moll <markus.moll@esat.kuleuven.be>
Date: Thu Aug 16 00:23:38 2012 +0200
Do not implicitly negate the step in the TrustRegionMinimizer.
In the TrustRegionMinimizer, the step is currently implicitly negated.
This is done so that the linearized residual is |r - J*step|^2, which
corresponds to J*step = r, so neither J nor r have to be modified.
However, it leads to the rather unintuitive situation that the strategy
returns a step in positive gradient direction, which you would expect to
increase the function value. One way is to rename the "step" parameter in
the strategy to "negative_step" and document it.
This patch instead moves the negation inside the strategy, just around
the linear solver call, so that it is done in a local context and easier
to document.
Change-Id: Idb258149a01f61c64e22128ea221c5a30cd89c89
commit 51da590c8457e6664f76fe9813425a0c71351497
Author: Markus Moll <markus.moll@esat.kuleuven.be>
Date: Fri Aug 17 12:56:09 2012 +0200
Remove tmp file
Change-Id: I07496fafae7b0c5c12cc26ae336e0db3b5592735
commit 7006a1f2b1701b8d89b8d1525fc0101943802221
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Thu Aug 16 18:04:22 2012 -0700
Correct example code in Powell's function example.
Thanks to Petter Strandmark for pointing this out.
Change-Id: I967632235dccdb481396e94904bb911c9a1efe1e
commit 57a44b27bc6fc95b4e70fdc25c25c9925a2072a0
Author: Keir Mierle <mierle@gmail.com>
Date: Thu Aug 16 17:04:50 2012 -0700
Remove unnecessary flags in NDK build.
Change-Id: Ib5b4d0b7f2d898671252734978c789b8171d96a8
commit f21bee247251a8b2e836c215a84c4668c31d75cd
Author: Keir Mierle <mierle@gmail.com>
Date: Thu Aug 16 16:27:10 2012 -0700
Fix for fpclassify.h NDK porting work.
Change-Id: I69df1b4caf2941ed96a53e35e43ec54073f84f59
commit 8ceb02cb75b66602de44a35e413225386cb21c27
Author: Keir Mierle <mierle@gmail.com>
Date: Thu Aug 16 14:23:47 2012 -0700
Add Android NDK build files.
This adds a Android.mk build that builds a Ceres static library
suitable for embetting in larger Android applications. This is
useful when needing to build Ceres without GPL'd components, since
the standalone toolchain (needed for the CMake Android build) does
not work with STLPort.
Change-Id: I8d857237f6f82658741017d161b2e31d9a20e5a7

@ -20,9 +20,13 @@ defs.append('CERES_HAVE_PTHREAD')
defs.append('CERES_HASH_NAMESPACE_START=namespace std { namespace tr1 {')
defs.append('CERES_HASH_NAMESPACE_END=}}')
defs.append('CERES_NO_SUITESPARSE')
defs.append('CERES_DONT_HAVE_PROTOCOL_BUFFERS')
defs.append('CERES_NO_CXSPARSE')
defs.append('CERES_NO_PROTOCOL_BUFFERS')
defs.append('CERES_RESTRICT_SCHUR_SPECIALIZATION')
if 'Mac OS X 10.5' in env['MACOSX_SDK_CHECK']:
defs.append('CERES_NO_TR1')
incs = '. ../../ ../../../Eigen3 ./include ./internal ../gflags'
# work around broken hashtable in 10.5 SDK

@ -1,6 +1,5 @@
#!/bin/sh
if false; then
if [ "x$1" = "x--i-really-know-what-im-doing" ] ; then
echo Proceeding as requested by command line ...
else
@ -8,16 +7,22 @@ else
exit 1
fi
repo="https://code.google.com/p/ceres-solver/"
branch="windows"
repo="https://ceres-solver.googlesource.com/ceres-solver"
branch="master"
tag="1.3.0"
tmp=`mktemp -d`
checkout="$tmp/ceres"
GIT="git --git-dir $tmp/ceres/.git --work-tree $tmp/ceres"
GIT="git --git-dir $tmp/ceres/.git --work-tree $checkout"
git clone $repo $tmp/ceres
git clone $repo $checkout
if [ $branch != "master" ]; then
$GIT checkout -t remotes/origin/$branch
else
if [ "x$tag" != "x" ]; then
$GIT checkout $tag
fi
fi
$GIT log -n 50 > ChangeLog
@ -37,8 +42,6 @@ done
rm -rf $tmp
fi
sources=`find ./include ./internal -type f -iname '*.cc' -or -iname '*.cpp' -or -iname '*.c' | sed -r 's/^\.\//\t/' | grep -v -E 'schur_eliminator_[0-9]_[0-9]_[0-9d].cc' | sort -d`
generated_sources=`find ./include ./internal -type f -iname '*.cc' -or -iname '*.cpp' -or -iname '*.c' | sed -r 's/^\.\//#\t\t/' | grep -E 'schur_eliminator_[0-9]_[0-9]_[0-9d].cc' | sort -d`
headers=`find ./include ./internal -type f -iname '*.h' | sed -r 's/^\.\//\t/' | sort -d`
@ -142,7 +145,7 @@ if(WIN32)
if(NOT MINGW)
list(APPEND INC
third_party/msinttypes
../msinttypes
)
endif()
else()
@ -153,13 +156,32 @@ endif()
add_definitions(
-DCERES_HAVE_PTHREAD
-D"CERES_HASH_NAMESPACE_START=namespace std { namespace tr1 {"
-D"CERES_HASH_NAMESPACE_END=}}"
-DCERES_NO_SUITESPARSE
-DCERES_DONT_HAVE_PROTOCOL_BUFFERS
-DCERES_NO_CXSPARSE
-DCERES_NO_PROTOCOL_BUFFERS
-DCERES_RESTRICT_SCHUR_SPECIALIZATION
)
if(MSVC10)
add_definitions(
-D"CERES_HASH_NAMESPACE_START=namespace std {"
-D"CERES_HASH_NAMESPACE_END=}"
)
else()
add_definitions(
-D"CERES_HASH_NAMESPACE_START=namespace std { namespace tr1 {"
-D"CERES_HASH_NAMESPACE_END=}}"
)
endif()
if(APPLE)
if(CMAKE_OSX_DEPLOYMENT_TARGET STREQUAL "10.5")
add_definitions(
-DCERES_NO_TR1
)
endif()
endif()
blender_add_lib(extern_ceres "\${SRC}" "\${INC}" "\${INC_SYS}")
EOF
@ -186,11 +208,20 @@ defs.append('CERES_HAVE_PTHREAD')
defs.append('CERES_HASH_NAMESPACE_START=namespace std { namespace tr1 {')
defs.append('CERES_HASH_NAMESPACE_END=}}')
defs.append('CERES_NO_SUITESPARSE')
defs.append('CERES_DONT_HAVE_PROTOCOL_BUFFERS')
defs.append('CERES_NO_CXSPARSE')
defs.append('CERES_NO_PROTOCOL_BUFFERS')
defs.append('CERES_RESTRICT_SCHUR_SPECIALIZATION')
if 'Mac OS X 10.5' in env['MACOSX_SDK_CHECK']:
defs.append('CERES_NO_TR1')
incs = '. ../../ ../../../Eigen3 ./include ./internal ../gflags'
# work around broken hashtable in 10.5 SDK
if env['OURPLATFORM'] == 'darwin' and env['WITH_BF_BOOST']:
incs += ' ' + env['BF_BOOST_INC']
defs.append('CERES_HASH_BOOST')
if env['OURPLATFORM'] in ('win32-vc', 'win32-mingw', 'linuxcross', 'win64-vc', 'win64-mingw'):
if env['OURPLATFORM'] in ('win32-vc', 'win64-vc'):
incs += ' ../msinttypes'

@ -2,6 +2,8 @@ include/ceres/autodiff_cost_function.h
include/ceres/ceres.h
include/ceres/conditioned_cost_function.h
include/ceres/cost_function.h
include/ceres/crs_matrix.h
include/ceres/fpclassify.h
include/ceres/internal/autodiff.h
include/ceres/internal/eigen.h
include/ceres/internal/fixed_array.h
@ -20,6 +22,8 @@ include/ceres/rotation.h
include/ceres/sized_cost_function.h
include/ceres/solver.h
include/ceres/types.h
internal/ceres/array_utils.cc
internal/ceres/array_utils.h
internal/ceres/block_evaluate_preparer.cc
internal/ceres/block_evaluate_preparer.h
internal/ceres/block_jacobian_writer.cc
@ -52,13 +56,19 @@ internal/ceres/conjugate_gradients_solver.cc
internal/ceres/conjugate_gradients_solver.h
internal/ceres/corrector.cc
internal/ceres/corrector.h
internal/ceres/cxsparse.cc
internal/ceres/cxsparse.h
internal/ceres/dense_jacobian_writer.h
internal/ceres/dense_normal_cholesky_solver.cc
internal/ceres/dense_normal_cholesky_solver.h
internal/ceres/dense_qr_solver.cc
internal/ceres/dense_qr_solver.h
internal/ceres/dense_sparse_matrix.cc
internal/ceres/dense_sparse_matrix.h
internal/ceres/detect_structure.cc
internal/ceres/detect_structure.h
internal/ceres/dogleg_strategy.cc
internal/ceres/dogleg_strategy.h
internal/ceres/evaluator.cc
internal/ceres/evaluator.h
internal/ceres/file.cc
@ -79,6 +89,7 @@ internal/ceres/generated/schur_eliminator_4_4_3.cc
internal/ceres/generated/schur_eliminator_4_4_4.cc
internal/ceres/generated/schur_eliminator_4_4_d.cc
internal/ceres/generated/schur_eliminator_d_d_d.cc
internal/ceres/generate_eliminator_specialization.py
internal/ceres/gradient_checking_cost_function.cc
internal/ceres/gradient_checking_cost_function.h
internal/ceres/graph_algorithms.h
@ -88,8 +99,8 @@ internal/ceres/implicit_schur_complement.h
internal/ceres/integral_types.h
internal/ceres/iterative_schur_complement_solver.cc
internal/ceres/iterative_schur_complement_solver.h
internal/ceres/levenberg_marquardt.cc
internal/ceres/levenberg_marquardt.h
internal/ceres/levenberg_marquardt_strategy.cc
internal/ceres/levenberg_marquardt_strategy.h
internal/ceres/linear_least_squares_problems.cc
internal/ceres/linear_least_squares_problems.h
internal/ceres/linear_operator.cc
@ -106,6 +117,8 @@ internal/ceres/normal_prior.cc
internal/ceres/parameter_block.h
internal/ceres/partitioned_matrix_view.cc
internal/ceres/partitioned_matrix_view.h
internal/ceres/polynomial_solver.cc
internal/ceres/polynomial_solver.h
internal/ceres/problem.cc
internal/ceres/problem_impl.cc
internal/ceres/problem_impl.h
@ -136,6 +149,7 @@ internal/ceres/sparse_matrix.h
internal/ceres/sparse_normal_cholesky_solver.cc
internal/ceres/sparse_normal_cholesky_solver.h
internal/ceres/split.cc
internal/ceres/split.h
internal/ceres/stl_util.h
internal/ceres/stringprintf.cc
internal/ceres/stringprintf.h
@ -143,6 +157,10 @@ internal/ceres/suitesparse.cc
internal/ceres/suitesparse.h
internal/ceres/triplet_sparse_matrix.cc
internal/ceres/triplet_sparse_matrix.h
internal/ceres/trust_region_minimizer.cc
internal/ceres/trust_region_minimizer.h
internal/ceres/trust_region_strategy.cc
internal/ceres/trust_region_strategy.h
internal/ceres/types.cc
internal/ceres/visibility_based_preconditioner.cc
internal/ceres/visibility_based_preconditioner.h

@ -163,7 +163,7 @@ class AutoDiffCostFunction :
explicit AutoDiffCostFunction(CostFunctor* functor)
: functor_(functor) {
CHECK_NE(M, DYNAMIC) << "Can't run the fixed-size constructor if the "
<< "number of residuals is set to ceres::DYNAMIC.";
<< "number of residuals is set to ceres::DYNAMIC.";
}
// Takes ownership of functor. Ignores the template-provided number of
@ -174,7 +174,7 @@ class AutoDiffCostFunction :
AutoDiffCostFunction(CostFunctor* functor, int num_residuals)
: functor_(functor) {
CHECK_EQ(M, DYNAMIC) << "Can't run the dynamic-size constructor if the "
<< "number of residuals is not ceres::DYNAMIC.";
<< "number of residuals is not ceres::DYNAMIC.";
SizedCostFunction<M, N0, N1, N2, N3, N4, N5>::set_num_residuals(num_residuals);
}

@ -119,7 +119,7 @@ class CostFunction {
// number of outputs (residuals).
vector<int16> parameter_block_sizes_;
int num_residuals_;
DISALLOW_COPY_AND_ASSIGN(CostFunction);
CERES_DISALLOW_COPY_AND_ASSIGN(CostFunction);
};
} // namespace ceres

@ -0,0 +1,65 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
#ifndef CERES_PUBLIC_CRS_MATRIX_H_
#define CERES_PUBLIC_CRS_MATRIX_H_
#include <vector>
#include "ceres/internal/port.h"
namespace ceres {
// A compressed row sparse matrix used primarily for communicating the
// Jacobian matrix to the user.
struct CRSMatrix {
CRSMatrix() : num_rows(0), num_cols(0) {}
int num_rows;
int num_cols;
// A compressed row matrix stores its contents in three arrays.
// The non-zero pattern of the i^th row is given by
//
// rows[cols[i] ... cols[i + 1]]
//
// and the corresponding values by
//
// values[cols[i] ... cols[i + 1]]
//
// Thus, cols is a vector of size num_cols + 1, and rows and values
// have as many entries as number of non-zeros in the matrix.
vector<int> cols;
vector<int> rows;
vector<double> values;
};
} // namespace ceres
#endif // CERES_PUBLIC_CRS_MATRIX_H_

@ -0,0 +1,88 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: keir@google.com (Keir Mierle)
//
// Portable floating point classification. The names are picked such that they
// do not collide with macros. For example, "isnan" in C99 is a macro and hence
// does not respect namespaces.
//
// TODO(keir): Finish porting!
#ifndef CERES_PUBLIC_FPCLASSIFY_H_
#define CERES_PUBLIC_FPCLASSIFY_H_
#if defined(_MSC_VER)
#include <float.h>
#endif
namespace ceres {
#if defined(_MSC_VER)
inline bool IsFinite (double x) { return _finite(x); }
inline bool IsInfinite(double x) { return !_finite(x) && !_isnan(x); }
inline bool IsNaN (double x) { return _isnan(x); }
inline bool IsNormal (double x) {
int classification = _fpclass(x);
return classification == _FPCLASS_NN ||
classification == _FPCLASS_PN;
}
#elif defined(ANDROID)
// On Android when using the GNU STL, the C++ fpclassify functions are not
// available. Strictly speaking, the std functions are are not standard until
// C++11. Instead use the C99 macros on Android.
inline bool IsNaN (double x) { return isnan(x); }
inline bool IsNormal (double x) { return isnormal(x); }
// On Android NDK r6, when using STLPort, the isinf and isfinite functions are
// not available, so reimplement them.
# if defined(_STLPORT_VERSION)
inline bool IsInfinite(double x) {
return x == std::numeric_limits<double>::infinity() ||
x == -std::numeric_limits<double>::infinity();
}
inline bool IsFinite(double x) {
return !isnan(x) && !IsInfinite(x);
}
# else
inline bool IsFinite (double x) { return isfinite(x); }
inline bool IsInfinite(double x) { return isinf(x); }
# endif // defined(_STLPORT_VERSION)
#else
// These definitions are for the normal Unix suspects.
// TODO(keir): Test the "else" with more platforms.
inline bool IsFinite (double x) { return std::isfinite(x); }
inline bool IsInfinite(double x) { return std::isinf(x); }
inline bool IsNaN (double x) { return std::isnan(x); }
inline bool IsNormal (double x) { return std::isnormal(x); }
#endif
} // namespace ceres
#endif // CERES_PUBLIC_FPCLASSIFY_H_

@ -34,6 +34,8 @@
#include <cstddef>
#include <glog/logging.h>
#include "Eigen/Core"
#include "ceres/internal/macros.h"
#include "ceres/internal/manual_constructor.h"
namespace ceres {
@ -136,7 +138,6 @@ class FixedArray {
// and T must be the same, otherwise callers' assumptions about use
// of this code will be broken.
struct InnerContainer {
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
T element;
};

@ -43,11 +43,13 @@
//
// For disallowing only assign or copy, write the code directly, but declare
// the intend in a comment, for example:
// void operator=(const TypeName&); // DISALLOW_ASSIGN
// Note, that most uses of DISALLOW_ASSIGN and DISALLOW_COPY are broken
// semantically, one should either use disallow both or neither. Try to
// avoid these in new code.
#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
//
// void operator=(const TypeName&); // _DISALLOW_ASSIGN
// Note, that most uses of CERES_DISALLOW_ASSIGN and CERES_DISALLOW_COPY
// are broken semantically, one should either use disallow both or
// neither. Try to avoid these in new code.
#define CERES_DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&); \
void operator=(const TypeName&)
@ -57,9 +59,9 @@
// This should be used in the private: declarations for a class
// that wants to prevent anyone from instantiating it. This is
// especially useful for classes containing only static methods.
#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
#define CERES_DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
TypeName(); \
DISALLOW_COPY_AND_ASSIGN(TypeName)
CERES_DISALLOW_COPY_AND_ASSIGN(TypeName)
// The arraysize(arr) macro returns the # of elements in an array arr.
// The expression is a compile-time constant, and therefore can be
@ -151,4 +153,19 @@ char (&ArraySizeHelper(const T (&array)[N]))[N];
#define MUST_USE_RESULT
#endif
// Platform independent macros to get aligned memory allocations.
// For example
//
// MyFoo my_foo CERES_ALIGN_ATTRIBUTE(16);
//
// Gives us an instance of MyFoo which is aligned at a 16 byte
// boundary.
#if defined(_MSC_VER)
#define CERES_ALIGN_ATTRIBUTE(n) __declspec(align(n))
#define CERES_ALIGN_OF(T) __alignof(T)
#elif defined(__GNUC__)
#define CERES_ALIGN_ATTRIBUTE(n) __attribute__((aligned(n)))
#define CERES_ALIGN_OF(T) __alignof(T)
#endif
#endif // CERES_PUBLIC_INTERNAL_MACROS_H_

@ -45,60 +45,49 @@
namespace ceres {
namespace internal {
// ------- Define ALIGNED_CHAR_ARRAY --------------------------------
// ------- Define CERES_ALIGNED_CHAR_ARRAY --------------------------------
#ifndef ALIGNED_CHAR_ARRAY
#ifndef CERES_ALIGNED_CHAR_ARRAY
// Because MSVC and older GCCs require that the argument to their alignment
// construct to be a literal constant integer, we use a template instantiated
// at all the possible powers of two.
template<int alignment, int size> struct AlignType { };
template<int size> struct AlignType<0, size> { typedef char result[size]; };
#if defined(_MSC_VER)
#define BASE_PORT_H_ALIGN_ATTRIBUTE(X) __declspec(align(X))
#define BASE_PORT_H_ALIGN_OF(T) __alignof(T)
#elif defined(__GNUC__)
#define BASE_PORT_H_ALIGN_ATTRIBUTE(X) __attribute__((aligned(X)))
#define BASE_PORT_H_ALIGN_OF(T) __alignof__(T)
#endif
#if defined(BASE_PORT_H_ALIGN_ATTRIBUTE)
#if !defined(CERES_ALIGN_ATTRIBUTE)
#define CERES_ALIGNED_CHAR_ARRAY you_must_define_CERES_ALIGNED_CHAR_ARRAY_for_your_compiler
#else // !defined(CERES_ALIGN_ATTRIBUTE)
#define BASE_PORT_H_ALIGNTYPE_TEMPLATE(X) \
#define CERES_ALIGN_TYPE_TEMPLATE(X) \
template<int size> struct AlignType<X, size> { \
typedef BASE_PORT_H_ALIGN_ATTRIBUTE(X) char result[size]; \
typedef CERES_ALIGN_ATTRIBUTE(X) char result[size]; \
}
BASE_PORT_H_ALIGNTYPE_TEMPLATE(1);
BASE_PORT_H_ALIGNTYPE_TEMPLATE(2);
BASE_PORT_H_ALIGNTYPE_TEMPLATE(4);
BASE_PORT_H_ALIGNTYPE_TEMPLATE(8);
BASE_PORT_H_ALIGNTYPE_TEMPLATE(16);
BASE_PORT_H_ALIGNTYPE_TEMPLATE(32);
BASE_PORT_H_ALIGNTYPE_TEMPLATE(64);
BASE_PORT_H_ALIGNTYPE_TEMPLATE(128);
BASE_PORT_H_ALIGNTYPE_TEMPLATE(256);
BASE_PORT_H_ALIGNTYPE_TEMPLATE(512);
BASE_PORT_H_ALIGNTYPE_TEMPLATE(1024);
BASE_PORT_H_ALIGNTYPE_TEMPLATE(2048);
BASE_PORT_H_ALIGNTYPE_TEMPLATE(4096);
BASE_PORT_H_ALIGNTYPE_TEMPLATE(8192);
CERES_ALIGN_TYPE_TEMPLATE(1);
CERES_ALIGN_TYPE_TEMPLATE(2);
CERES_ALIGN_TYPE_TEMPLATE(4);
CERES_ALIGN_TYPE_TEMPLATE(8);
CERES_ALIGN_TYPE_TEMPLATE(16);
CERES_ALIGN_TYPE_TEMPLATE(32);
CERES_ALIGN_TYPE_TEMPLATE(64);
CERES_ALIGN_TYPE_TEMPLATE(128);
CERES_ALIGN_TYPE_TEMPLATE(256);
CERES_ALIGN_TYPE_TEMPLATE(512);
CERES_ALIGN_TYPE_TEMPLATE(1024);
CERES_ALIGN_TYPE_TEMPLATE(2048);
CERES_ALIGN_TYPE_TEMPLATE(4096);
CERES_ALIGN_TYPE_TEMPLATE(8192);
// Any larger and MSVC++ will complain.
#define ALIGNED_CHAR_ARRAY(T, Size) \
typename AlignType<BASE_PORT_H_ALIGN_OF(T), sizeof(T) * Size>::result
#undef CERES_ALIGN_TYPE_TEMPLATE
#undef BASE_PORT_H_ALIGNTYPE_TEMPLATE
#undef BASE_PORT_H_ALIGN_ATTRIBUTE
#define CERES_ALIGNED_CHAR_ARRAY(T, Size) \
typename AlignType<CERES_ALIGN_OF(T), sizeof(T) * Size>::result
#else // defined(BASE_PORT_H_ALIGN_ATTRIBUTE)
#define ALIGNED_CHAR_ARRAY you_must_define_ALIGNED_CHAR_ARRAY_for_your_compiler
#endif // defined(BASE_PORT_H_ALIGN_ATTRIBUTE)
#endif // !defined(CERES_ALIGN_ATTRIBUTE)
#undef BASE_PORT_H_ALIGNTYPE_TEMPLATE
#undef BASE_PORT_H_ALIGN_ATTRIBUTE
#endif // ALIGNED_CHAR_ARRAY
#endif // CERES_ALIGNED_CHAR_ARRAY
template <typename Type>
class ManualConstructor {
@ -203,10 +192,10 @@ class ManualConstructor {
}
private:
ALIGNED_CHAR_ARRAY(Type, 1) space_;
CERES_ALIGNED_CHAR_ARRAY(Type, 1) space_;
};
#undef ALIGNED_CHAR_ARRAY
#undef CERES_ALIGNED_CHAR_ARRAY
} // namespace internal
} // namespace ceres

@ -31,6 +31,8 @@
#ifndef CERES_PUBLIC_INTERNAL_PORT_H_
#define CERES_PUBLIC_INTERNAL_PORT_H_
#include <string>
namespace ceres {
// It is unfortunate that this import of the entire standard namespace is
@ -39,6 +41,10 @@ namespace ceres {
// things outside of the Ceres optimization package.
using namespace std;
// This is necessary to properly handle the case that there is a different
// "string" implementation in the global namespace.
using std::string;
} // namespace ceres
#endif // CERES_PUBLIC_INTERNAL_PORT_H_

@ -28,8 +28,9 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
//
// When an iteration callback is specified, Ceres calls the callback after each
// optimizer step and pass it an IterationSummary object, defined below.
// When an iteration callback is specified, Ceres calls the callback
// after each minimizer step (if the minimizer has not converged) and
// passes it an IterationSummary object, defined below.
#ifndef CERES_PUBLIC_ITERATION_CALLBACK_H_
#define CERES_PUBLIC_ITERATION_CALLBACK_H_
@ -44,7 +45,15 @@ struct IterationSummary {
// Current iteration number.
int32 iteration;
// Step was numerically valid, i.e., all values are finite and the
// step reduces the value of the linearized model.
//
// Note: step_is_valid is false when iteration = 0.
bool step_is_valid;
// Whether or not the algorithm made progress in this iteration.
//
// Note: step_is_successful is false when iteration = 0.
bool step_is_successful;
// Value of the objective function.
@ -66,9 +75,10 @@ struct IterationSummary {
// cost and the change in the cost of the linearized approximation.
double relative_decrease;
// Value of the regularization parameter for Levenberg-Marquardt
// algorithm at the end of the current iteration.
double mu;
// Size of the trust region at the end of the current iteration. For
// the Levenberg-Marquardt algorithm, the regularization parameter
// mu = 1.0 / trust_region_radius.
double trust_region_radius;
// For the inexact step Levenberg-Marquardt algorithm, this is the
// relative accuracy with which the Newton(LM) step is solved. This
@ -81,13 +91,15 @@ struct IterationSummary {
// Newton step.
int linear_solver_iterations;
// TODO(sameeragarwal): Change to use a higher precision timer using
// clock_gettime.
// Time (in seconds) spent inside the linear least squares solver.
int iteration_time_sec;
// Time (in seconds) spent inside the minimizer loop in the current
// iteration.
double iteration_time_in_seconds;
// Time (in seconds) spent inside the linear least squares solver.
int linear_solver_time_sec;
// Time (in seconds) spent inside the trust region step solver.
double step_solver_time_in_seconds;
// Time (in seconds) since the user called Solve().
double cumulative_time_in_seconds;
};
// Interface for specifying callbacks that are executed at the end of
@ -133,7 +145,7 @@ struct IterationSummary {
// summary.gradient_max_norm,
// summary.step_norm,
// summary.relative_decrease,
// summary.mu,
// summary.trust_region_radius,
// summary.eta,
// summary.linear_solver_iterations);
// if (log_to_stdout_) {

@ -162,16 +162,7 @@
#include <string>
#include "Eigen/Core"
// Visual Studio 2012 or older version
#if defined(_MSC_VER) && _MSC_VER <= 1700
namespace std {
inline bool isfinite(double x) { return _finite(x); }
inline bool isinf (double x) { return !_finite(x) && !_isnan(x); }
inline bool isnan (double x) { return _isnan(x); }
inline bool isnormal(double x) { return _finite(x) && x != 0.0; }
} // namespace std
#endif
#include "ceres/fpclassify.h"
namespace ceres {
@ -184,7 +175,9 @@ struct Jet {
// (where T is a Jet<T, N>). This usually only happens in opt mode. Note that
// the C++ standard mandates that e.g. default constructed doubles are
// initialized to 0.0; see sections 8.5 of the C++03 standard.
Jet() : a() {}
Jet() : a() {
v.setZero();
}
// Constructor from scalar: a + 0.
explicit Jet(const T& value) {
@ -199,18 +192,6 @@ struct Jet {
v[k] = T(1.0);
}
/*
// Construct from an array where the first element is the scalar.
// This is templated to support converting from other data types.
template<typename D>
Jet(const D* scalar_and_derivatives) {
a = T(scalar_and_derivatives[0]);
v = Eigen::Map<const Eigen::Matrix<D, N, 1> >(
scalar_and_derivatives + 1, N).cast<T>();
}
*/
// Compound operators
Jet<T, N>& operator+=(const Jet<T, N> &y) {
*this = *this + y;
@ -232,8 +213,25 @@ struct Jet {
return *this;
}
T a; // The scalar part.
Eigen::Matrix<T, N, 1> v; // The infinitesimal part.
// The scalar part.
T a;
// The infinitesimal part.
//
// Note the Eigen::DontAlign bit is needed here because this object
// gets allocated on the stack and as part of other arrays and
// structs. Forcing the right alignment there is the source of much
// pain and suffering. Even if that works, passing Jets around to
// functions by value has problems because the C++ ABI does not
// guarantee alignment for function arguments.
//
// Setting the DontAlign bit prevents Eigen from using SSE for the
// various operations on Jets. This is a small performance penalty
// since the AutoDiff code will still expose much of the code as
// statically sized loops to the compiler. But given the subtle
// issues that arise due to alignment, especially when dealing with
// multiple platforms, it seems to be a trade off worth making.
Eigen::Matrix<T, N, 1, Eigen::DontAlign> v;
};
// Unary +
@ -411,10 +409,6 @@ inline double cos (double x) { return std::cos(x); }
inline double acos (double x) { return std::acos(x); }
inline double sin (double x) { return std::sin(x); }
inline double asin (double x) { return std::asin(x); }
inline bool isfinite(double x) { return std::isfinite(x); }
inline bool isinf (double x) { return std::isinf(x); }
inline bool isnan (double x) { return std::isnan(x); }
inline bool isnormal(double x) { return std::isnormal(x); }
inline double pow (double x, double y) { return std::pow(x, y); }
inline double atan2(double y, double x) { return std::atan2(y, x); }
@ -492,22 +486,23 @@ Jet<T, N> asin(const Jet<T, N>& f) {
}
// Jet Classification. It is not clear what the appropriate semantics are for
// these classifications. This picks that isfinite and isnormal are "all"
// operations, i.e. all elements of the jet must be finite for the jet itself to
// be finite (or normal). For isnan and isinf, the answer is less clear. This
// takes a "any" approach for isnan and isinf such that if any part of a jet is
// nan or inf, then the entire jet is nan or inf. This leads to strange
// situations like a jet can be both isinf and isnan, but in practice the "any"
// semantics are the most useful for e.g. checking that derivatives are sane.
// these classifications. This picks that IsFinite and isnormal are "all"
// operations, i.e. all elements of the jet must be finite for the jet itself
// to be finite (or normal). For IsNaN and IsInfinite, the answer is less
// clear. This takes a "any" approach for IsNaN and IsInfinite such that if any
// part of a jet is nan or inf, then the entire jet is nan or inf. This leads
// to strange situations like a jet can be both IsInfinite and IsNaN, but in
// practice the "any" semantics are the most useful for e.g. checking that
// derivatives are sane.
// The jet is finite if all parts of the jet are finite.
template <typename T, int N> inline
bool isfinite(const Jet<T, N>& f) {
if (!isfinite(f.a)) {
bool IsFinite(const Jet<T, N>& f) {
if (!IsFinite(f.a)) {
return false;
}
for (int i = 0; i < N; ++i) {
if (!isfinite(f.v[i])) {
if (!IsFinite(f.v[i])) {
return false;
}
}
@ -516,12 +511,12 @@ bool isfinite(const Jet<T, N>& f) {
// The jet is infinite if any part of the jet is infinite.
template <typename T, int N> inline
bool isinf(const Jet<T, N>& f) {
if (isinf(f.a)) {
bool IsInfinite(const Jet<T, N>& f) {
if (IsInfinite(f.a)) {
return true;
}
for (int i = 0; i < N; i++) {
if (isinf(f.v[i])) {
if (IsInfinite(f.v[i])) {
return true;
}
}
@ -530,12 +525,12 @@ bool isinf(const Jet<T, N>& f) {
// The jet is NaN if any part of the jet is NaN.
template <typename T, int N> inline
bool isnan(const Jet<T, N>& f) {
if (isnan(f.a)) {
bool IsNaN(const Jet<T, N>& f) {
if (IsNaN(f.a)) {
return true;
}
for (int i = 0; i < N; ++i) {
if (isnan(f.v[i])) {
if (IsNaN(f.v[i])) {
return true;
}
}
@ -544,12 +539,12 @@ bool isnan(const Jet<T, N>& f) {
// The jet is normal if all parts of the jet are normal.
template <typename T, int N> inline
bool isnormal(const Jet<T, N>& f) {
if (!isnormal(f.a)) {
bool IsNormal(const Jet<T, N>& f) {
if (!IsNormal(f.a)) {
return false;
}
for (int i = 0; i < N; ++i) {
if (!isnormal(f.v[i])) {
if (!IsNormal(f.v[i])) {
return false;
}
}
@ -650,78 +645,6 @@ inline std::ostream &operator<<(std::ostream &s, const Jet<T, N>& z) {
return s << "[" << z.a << " ; " << z.v.transpose() << "]";
}
// A jet traits class to make it easier to work with mixed auto / numeric diff.
template<typename T>
struct JetOps {
static bool IsScalar() {
return true;
}
static T GetScalar(const T& t) {
return t;
}
static void SetScalar(const T& scalar, T* t) {
*t = scalar;
}
static void ScaleDerivative(double scale_by, T *value) {
// For double, there is no derivative to scale.
}
};
template<typename T, int N>
struct JetOps<Jet<T, N> > {
static bool IsScalar() {
return false;
}
static T GetScalar(const Jet<T, N>& t) {
return t.a;
}
static void SetScalar(const T& scalar, Jet<T, N>* t) {
t->a = scalar;
}
static void ScaleDerivative(double scale_by, Jet<T, N> *value) {
value->v *= scale_by;
}
};
template<typename FunctionType, int kNumArgs, typename ArgumentType>
struct Chain {
static ArgumentType Rule(const FunctionType &f,
const FunctionType dfdx[kNumArgs],
const ArgumentType x[kNumArgs]) {
// In the default case of scalars, there's nothing to do since there are no
// derivatives to propagate.
return f;
}
};
// XXX Add documentation here!
template<typename FunctionType, int kNumArgs, typename T, int N>
struct Chain<FunctionType, kNumArgs, Jet<T, N> > {
static Jet<T, N> Rule(const FunctionType &f,
const FunctionType dfdx[kNumArgs],
const Jet<T, N> x[kNumArgs]) {
// x is itself a function of another variable ("z"); what this function
// needs to return is "f", but with the derivative with respect to z
// attached to the jet. So combine the derivative part of x's jets to form
// a Jacobian matrix between x and z (i.e. dx/dz).
Eigen::Matrix<T, kNumArgs, N> dxdz;
for (int i = 0; i < kNumArgs; ++i) {
dxdz.row(i) = x[i].v.transpose();
}
// Map the input gradient dfdx into an Eigen row vector.
Eigen::Map<const Eigen::Matrix<FunctionType, 1, kNumArgs> >
vector_dfdx(dfdx, 1, kNumArgs);
// Now apply the chain rule to obtain df/dz. Combine the derivative with
// the scalar part to obtain f with full derivative information.
Jet<T, N> jet_f;
jet_f.a = f;
jet_f.v = vector_dfdx.template cast<T>() * dxdz; // Also known as dfdz.
return jet_f;
}
};
} // namespace ceres
namespace Eigen {

@ -175,6 +175,7 @@ class HuberLoss : public LossFunction {
public:
explicit HuberLoss(double a) : a_(a), b_(a * a) { }
virtual void Evaluate(double, double*) const;
private:
const double a_;
// b = a^2.
@ -190,6 +191,7 @@ class SoftLOneLoss : public LossFunction {
public:
explicit SoftLOneLoss(double a) : b_(a * a), c_(1 / b_) { }
virtual void Evaluate(double, double*) const;
private:
// b = a^2.
const double b_;
@ -206,6 +208,7 @@ class CauchyLoss : public LossFunction {
public:
explicit CauchyLoss(double a) : b_(a * a), c_(1 / b_) { }
virtual void Evaluate(double, double*) const;
private:
// b = a^2.
const double b_;
@ -213,6 +216,78 @@ class CauchyLoss : public LossFunction {
const double c_;
};
// Loss that is capped beyond a certain level using the arc-tangent function.
// The scaling parameter 'a' determines the level where falloff occurs.
// For costs much smaller than 'a', the loss function is linear and behaves like
// TrivialLoss, and for values much larger than 'a' the value asymptotically
// approaches the constant value of a * PI / 2.
//
// rho(s) = a atan(s / a).
//
// At s = 0: rho = [0, 1, 0].
class ArctanLoss : public LossFunction {
public:
explicit ArctanLoss(double a) : a_(a), b_(1 / (a * a)) { }
virtual void Evaluate(double, double*) const;
private:
const double a_;
// b = 1 / a^2.
const double b_;
};
// Loss function that maps to approximately zero cost in a range around the
// origin, and reverts to linear in error (quadratic in cost) beyond this range.
// The tolerance parameter 'a' sets the nominal point at which the
// transition occurs, and the transition size parameter 'b' sets the nominal
// distance over which most of the transition occurs. Both a and b must be
// greater than zero, and typically b will be set to a fraction of a.
// The slope rho'[s] varies smoothly from about 0 at s <= a - b to
// about 1 at s >= a + b.
//
// The term is computed as:
//
// rho(s) = b log(1 + exp((s - a) / b)) - c0.
//
// where c0 is chosen so that rho(0) == 0
//
// c0 = b log(1 + exp(-a / b)
//
// This has the following useful properties:
//
// rho(s) == 0 for s = 0
// rho'(s) ~= 0 for s << a - b
// rho'(s) ~= 1 for s >> a + b
// rho''(s) > 0 for all s
//
// In addition, all derivatives are continuous, and the curvature is
// concentrated in the range a - b to a + b.
//
// At s = 0: rho = [0, ~0, ~0].
class TolerantLoss : public LossFunction {
public:
explicit TolerantLoss(double a, double b);
virtual void Evaluate(double, double*) const;
private:
const double a_, b_, c_;
};
// Composition of two loss functions. The error is the result of first
// evaluating g followed by f to yield the composition f(g(s)).
// The loss functions must not be NULL.
class ComposedLoss : public LossFunction {
public:
explicit ComposedLoss(const LossFunction* f, Ownership ownership_f,
const LossFunction* g, Ownership ownership_g);
virtual ~ComposedLoss();
virtual void Evaluate(double, double*) const;
private:
internal::scoped_ptr<const LossFunction> f_, g_;
const Ownership ownership_f_, ownership_g_;
};
// The discussion above has to do with length scaling: it affects the space
// in which s is measured. Sometimes you want to simply scale the output
// value of the robustifier. For example, you might want to weight
@ -249,7 +324,7 @@ class ScaledLoss : public LossFunction {
internal::scoped_ptr<const LossFunction> rho_;
const double a_;
const Ownership ownership_;
DISALLOW_COPY_AND_ASSIGN(ScaledLoss);
CERES_DISALLOW_COPY_AND_ASSIGN(ScaledLoss);
};
// Sometimes after the optimization problem has been constructed, we
@ -314,7 +389,7 @@ class LossFunctionWrapper : public LossFunction {
private:
internal::scoped_ptr<const LossFunction> rho_;
Ownership ownership_;
DISALLOW_COPY_AND_ASSIGN(LossFunctionWrapper);
CERES_DISALLOW_COPY_AND_ASSIGN(LossFunctionWrapper);
};
} // namespace ceres

@ -93,11 +93,13 @@ struct Differencer {
using Eigen::Map;
using Eigen::Matrix;
using Eigen::RowMajor;
using Eigen::ColMajor;
typedef Matrix<double, num_residuals, 1> ResidualVector;
typedef Matrix<double, parameter_block_size, 1> ParameterVector;
typedef Matrix<double, num_residuals, parameter_block_size, RowMajor>
JacobianMatrix;
typedef Matrix<double, num_residuals, parameter_block_size,
(parameter_block_size == 1 &&
num_residuals > 1) ? ColMajor : RowMajor> JacobianMatrix;
Map<JacobianMatrix> parameter_jacobian(jacobians[parameter_block],
num_residuals,

@ -50,13 +50,13 @@ namespace ceres {
class CostFunction;
class LossFunction;
class LocalParameterization;
class Solver;
namespace internal {
class Preprocessor;
class ProblemImpl;
class ParameterBlock;
class ResidualBlock;
class SolverImpl;
} // namespace internal
// A ResidualBlockId is a handle clients can use to delete residual
@ -255,9 +255,9 @@ class Problem {
int NumResiduals() const;
private:
friend class internal::SolverImpl;
friend class Solver;
internal::scoped_ptr<internal::ProblemImpl> problem_impl_;
DISALLOW_COPY_AND_ASSIGN(Problem);
CERES_DISALLOW_COPY_AND_ASSIGN(Problem);
};
} // namespace ceres

@ -47,6 +47,7 @@
#include <algorithm>
#include <cmath>
#include "glog/logging.h"
namespace ceres {
@ -145,18 +146,11 @@ void AngleAxisRotatePoint(const T angle_axis[3], const T pt[3], T result[3]);
// --- IMPLEMENTATION
// Duplicate rather than decorate every use of cmath with _USE_MATH_CONSTANTS.
// Necessitated by Windows.
#ifndef M_PI
#define M_PI 3.14159265358979323846
#define CERES_NEED_M_PI_UNDEF
#endif
template<typename T>
inline void AngleAxisToQuaternion(const T* angle_axis, T* quaternion) {
const T &a0 = angle_axis[0];
const T &a1 = angle_axis[1];
const T &a2 = angle_axis[2];
const T& a0 = angle_axis[0];
const T& a1 = angle_axis[1];
const T& a2 = angle_axis[2];
const T theta_squared = a0 * a0 + a1 * a1 + a2 * a2;
// For points not at the origin, the full conversion is numerically stable.
@ -183,16 +177,35 @@ inline void AngleAxisToQuaternion(const T* angle_axis, T* quaternion) {
template<typename T>
inline void QuaternionToAngleAxis(const T* quaternion, T* angle_axis) {
const T &q1 = quaternion[1];
const T &q2 = quaternion[2];
const T &q3 = quaternion[3];
const T sin_squared = q1 * q1 + q2 * q2 + q3 * q3;
const T& q1 = quaternion[1];
const T& q2 = quaternion[2];
const T& q3 = quaternion[3];
const T sin_squared_theta = q1 * q1 + q2 * q2 + q3 * q3;
// For quaternions representing non-zero rotation, the conversion
// is numerically stable.
if (sin_squared > T(0.0)) {
const T sin_theta = sqrt(sin_squared);
const T k = T(2.0) * atan2(sin_theta, quaternion[0]) / sin_theta;
if (sin_squared_theta > T(0.0)) {
const T sin_theta = sqrt(sin_squared_theta);
const T& cos_theta = quaternion[0];
// If cos_theta is negative, theta is greater than pi/2, which
// means that angle for the angle_axis vector which is 2 * theta
// would be greater than pi.
//
// While this will result in the correct rotation, it does not
// result in a normalized angle-axis vector.
//
// In that case we observe that 2 * theta ~ 2 * theta - 2 * pi,
// which is equivalent saying
//
// theta - pi = atan(sin(theta - pi), cos(theta - pi))
// = atan(-sin(theta), -cos(theta))
//
const T two_theta =
T(2.0) * ((cos_theta < 0.0)
? atan2(-sin_theta, -cos_theta)
: atan2(sin_theta, cos_theta));
const T k = two_theta / sin_theta;
angle_axis[0] = q1 * k;
angle_axis[1] = q2 * k;
angle_axis[2] = q3 * k;
@ -259,7 +272,7 @@ inline void RotationMatrixToAngleAxis(const T * R, T * angle_axis) {
// Case 2: theta ~ 0, means sin(theta) ~ theta to a good
// approximation.
if (costheta > 0) {
if (costheta > 0.0) {
const T kHalf = T(0.5);
for (int i = 0; i < 3; ++i) {
angle_axis[i] *= kHalf;
@ -284,8 +297,8 @@ inline void RotationMatrixToAngleAxis(const T * R, T * angle_axis) {
// angle_axis[i] should be positive, otherwise negative.
for (int i = 0; i < 3; ++i) {
angle_axis[i] = theta * sqrt((R[i*4] - costheta) * inv_one_minus_costheta);
if (((sintheta < 0) && (angle_axis[i] > 0)) ||
((sintheta > 0) && (angle_axis[i] < 0))) {
if (((sintheta < 0.0) && (angle_axis[i] > 0.0)) ||
((sintheta > 0.0) && (angle_axis[i] < 0.0))) {
angle_axis[i] = -angle_axis[i];
}
}
@ -334,7 +347,8 @@ template <typename T>
inline void EulerAnglesToRotationMatrix(const T* euler,
const int row_stride,
T* R) {
const T degrees_to_radians(M_PI / 180.0);
const double kPi = 3.14159265358979323846;
const T degrees_to_radians(kPi / 180.0);
const T pitch(euler[0] * degrees_to_radians);
const T roll(euler[1] * degrees_to_radians);
@ -517,10 +531,4 @@ void AngleAxisRotatePoint(const T angle_axis[3], const T pt[3], T result[3]) {
} // namespace ceres
// Clean define pollution.
#ifdef CERES_NEED_M_PI_UNDEF
#undef CERES_NEED_M_PI_UNDEF
#undef M_PI
#endif
#endif // CERES_PUBLIC_ROTATION_H_

@ -34,10 +34,10 @@
#include <cmath>
#include <string>
#include <vector>
#include "ceres/iteration_callback.h"
#include "ceres/crs_matrix.h"
#include "ceres/internal/macros.h"
#include "ceres/internal/port.h"
#include "ceres/iteration_callback.h"
#include "ceres/types.h"
namespace ceres {
@ -57,24 +57,47 @@ class Solver {
struct Options {
// Default constructor that sets up a generic sparse problem.
Options() {
minimizer_type = LEVENBERG_MARQUARDT;
trust_region_strategy_type = LEVENBERG_MARQUARDT;
dogleg_type = TRADITIONAL_DOGLEG;
use_nonmonotonic_steps = false;
max_consecutive_nonmonotonic_steps = 5;
max_num_iterations = 50;
max_solver_time_sec = 1.0e9;
max_solver_time_in_seconds = 1e9;
num_threads = 1;
tau = 1e-4;
initial_trust_region_radius = 1e4;
max_trust_region_radius = 1e16;
min_trust_region_radius = 1e-32;
min_relative_decrease = 1e-3;
lm_min_diagonal = 1e-6;
lm_max_diagonal = 1e32;
max_num_consecutive_invalid_steps = 5;
function_tolerance = 1e-6;
gradient_tolerance = 1e-10;
parameter_tolerance = 1e-8;
#ifndef CERES_NO_SUITESPARSE
linear_solver_type = SPARSE_NORMAL_CHOLESKY;
#else
#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
linear_solver_type = DENSE_QR;
#endif // CERES_NO_SUITESPARSE
#else
linear_solver_type = SPARSE_NORMAL_CHOLESKY;
#endif
preconditioner_type = JACOBI;
sparse_linear_algebra_library = SUITE_SPARSE;
#if defined(CERES_NO_SUITESPARSE) && !defined(CERES_NO_CXSPARSE)
sparse_linear_algebra_library = CX_SPARSE;
#endif
num_linear_solver_threads = 1;
num_eliminate_blocks = 0;
ordering_type = NATURAL;
#if defined(CERES_NO_SUITESPARSE)
use_block_amd = false;
#else
use_block_amd = true;
#endif
linear_solver_min_num_iterations = 1;
linear_solver_max_num_iterations = 500;
eta = 1e-1;
@ -82,10 +105,13 @@ class Solver {
logging_type = PER_MINIMIZER_ITERATION;
minimizer_progress_to_stdout = false;
return_initial_residuals = false;
return_initial_gradient = false;
return_initial_jacobian = false;
return_final_residuals = false;
return_final_gradient = false;
return_final_jacobian = false;
lsqp_dump_directory = "/tmp";
lsqp_dump_format_type = TEXTFILE;
crash_and_dump_lsqp_on_failure = false;
check_gradients = false;
gradient_check_relative_precision = 1e-8;
numeric_derivative_relative_step_size = 1e-6;
@ -94,27 +120,78 @@ class Solver {
// Minimizer options ----------------------------------------
MinimizerType minimizer_type;
TrustRegionStrategyType trust_region_strategy_type;
// Type of dogleg strategy to use.
DoglegType dogleg_type;
// The classical trust region methods are descent methods, in that
// they only accept a point if it strictly reduces the value of
// the objective function.
//
// Relaxing this requirement allows the algorithm to be more
// efficient in the long term at the cost of some local increase
// in the value of the objective function.
//
// This is because allowing for non-decreasing objective function
// values in a princpled manner allows the algorithm to "jump over
// boulders" as the method is not restricted to move into narrow
// valleys while preserving its convergence properties.
//
// Setting use_nonmonotonic_steps to true enables the
// non-monotonic trust region algorithm as described by Conn,
// Gould & Toint in "Trust Region Methods", Section 10.1.
//
// The parameter max_consecutive_nonmonotonic_steps controls the
// window size used by the step selection algorithm to accept
// non-monotonic steps.
//
// Even though the value of the objective function may be larger
// than the minimum value encountered over the course of the
// optimization, the final parameters returned to the user are the
// ones corresponding to the minimum cost over all iterations.
bool use_nonmonotonic_steps;
int max_consecutive_nonmonotonic_steps;
// Maximum number of iterations for the minimizer to run for.
int max_num_iterations;
// Maximum time for which the minimizer should run for.
double max_solver_time_sec;
double max_solver_time_in_seconds;
// Number of threads used by Ceres for evaluating the cost and
// jacobians.
int num_threads;
// For Levenberg-Marquardt, the initial value for the
// regularizer. This is the inversely related to the size of the
// initial trust region.
double tau;
// Trust region minimizer settings.
double initial_trust_region_radius;
double max_trust_region_radius;
// For trust region methods, this is lower threshold for the
// relative decrease before a step is accepted.
// Minimizer terminates when the trust region radius becomes
// smaller than this value.
double min_trust_region_radius;
// Lower bound for the relative decrease before a step is
// accepted.
double min_relative_decrease;
// For the Levenberg-Marquadt algorithm, the scaled diagonal of
// the normal equations J'J is used to control the size of the
// trust region. Extremely small and large values along the
// diagonal can make this regularization scheme
// fail. lm_max_diagonal and lm_min_diagonal, clamp the values of
// diag(J'J) from above and below. In the normal course of
// operation, the user should not have to modify these parameters.
double lm_min_diagonal;
double lm_max_diagonal;
// Sometimes due to numerical conditioning problems or linear
// solver flakiness, the trust region strategy may return a
// numerically invalid step that can be fixed by reducing the
// trust region size. So the TrustRegionMinimizer allows for a few
// successive invalid steps before it declares NUMERICAL_FAILURE.
int max_num_consecutive_invalid_steps;
// Minimizer terminates when
//
// (new_cost - old_cost) < function_tolerance * old_cost;
@ -141,6 +218,12 @@ class Solver {
// Type of preconditioner to use with the iterative linear solvers.
PreconditionerType preconditioner_type;
// Ceres supports using multiple sparse linear algebra libraries
// for sparse matrix ordering and factorizations. Currently,
// SUITE_SPARSE and CX_SPARSE are the valid choices, depending on
// whether they are linked into Ceres at build time.
SparseLinearAlgebraLibraryType sparse_linear_algebra_library;
// Number of threads used by Ceres to solve the Newton
// step. Currently only the SPARSE_SCHUR solver is capable of
// using this setting.
@ -170,6 +253,19 @@ class Solver {
// non-empty.
vector<double*> ordering;
// By virtue of the modeling layer in Ceres being block oriented,
// all the matrices used by Ceres are also block oriented. When
// doing sparse direct factorization of these matrices (for
// SPARSE_NORMAL_CHOLESKY, SPARSE_SCHUR and ITERATIVE in
// conjunction with CLUSTER_TRIDIAGONAL AND CLUSTER_JACOBI
// preconditioners), the fill-reducing ordering algorithms can
// either be run on the block or the scalar form of these matrices.
// Running it on the block form exposes more of the super-nodal
// structure of the matrix to the factorization routines. Setting
// this parameter to true runs the ordering algorithms in block
// form. Currently this option only makes sense with
// sparse_linear_algebra_library = SUITE_SPARSE.
bool use_block_amd;
// Minimum number of iterations for which the linear solver should
// run, even if the convergence criterion is satisfied.
@ -206,7 +302,12 @@ class Solver {
bool minimizer_progress_to_stdout;
bool return_initial_residuals;
bool return_initial_gradient;
bool return_initial_jacobian;
bool return_final_residuals;
bool return_final_gradient;
bool return_final_jacobian;
// List of iterations at which the optimizer should dump the
// linear least squares problem to disk. Useful for testing and
@ -217,15 +318,6 @@ class Solver {
string lsqp_dump_directory;
DumpFormatType lsqp_dump_format_type;
// Dump the linear least squares problem to disk if the minimizer
// fails due to NUMERICAL_FAILURE and crash the process. This flag
// is useful for generating debugging information. The problem is
// dumped in a file whose name is determined by
// Solver::Options::lsqp_dump_format.
//
// Note: This requires a version of Ceres built with protocol buffers.
bool crash_and_dump_lsqp_on_failure;
// Finite differences options ----------------------------------------------
// Check all jacobians computed by each residual block with finite
@ -273,16 +365,25 @@ class Solver {
bool update_state_every_iteration;
// Callbacks that are executed at the end of each iteration of the
// Minimizer. They are executed in the order that they are
// specified in this vector. By default, parameter blocks are
// updated only at the end of the optimization, i.e when the
// Minimizer terminates. This behaviour is controlled by
// Minimizer. An iteration may terminate midway, either due to
// numerical failures or because one of the convergence tests has
// been satisfied. In this case none of the callbacks are
// executed.
// Callbacks are executed in the order that they are specified in
// this vector. By default, parameter blocks are updated only at
// the end of the optimization, i.e when the Minimizer
// terminates. This behaviour is controlled by
// update_state_every_variable. If the user wishes to have access
// to the update parameter blocks when his/her callbacks are
// executed, then set update_state_every_iteration to true.
//
// The solver does NOT take ownership of these pointers.
vector<IterationCallback*> callbacks;
// If non-empty, a summary of the execution of the solver is
// recorded to this file.
string solver_log;
};
struct Summary {
@ -313,20 +414,74 @@ class Solver {
// blocks that they depend on were fixed.
double fixed_cost;
// Residuals before and after the optimization. Each vector
// contains problem.NumResiduals() elements. Residuals are in the
// same order in which they were added to the problem object when
// constructing this problem.
// Vectors of residuals before and after the optimization. The
// entries of these vectors are in the order in which
// ResidualBlocks were added to the Problem object.
//
// Whether the residual vectors are populated with values is
// controlled by Solver::Options::return_initial_residuals and
// Solver::Options::return_final_residuals respectively.
vector<double> initial_residuals;
vector<double> final_residuals;
// Gradient vectors, before and after the optimization. The rows
// are in the same order in which the ParameterBlocks were added
// to the Problem object.
//
// NOTE: Since AddResidualBlock adds ParameterBlocks to the
// Problem automatically if they do not already exist, if you wish
// to have explicit control over the ordering of the vectors, then
// use Problem::AddParameterBlock to explicitly add the
// ParameterBlocks in the order desired.
//
// Whether the vectors are populated with values is controlled by
// Solver::Options::return_initial_gradient and
// Solver::Options::return_final_gradient respectively.
vector<double> initial_gradient;
vector<double> final_gradient;
// Jacobian matrices before and after the optimization. The rows
// of these matrices are in the same order in which the
// ResidualBlocks were added to the Problem object. The columns
// are in the same order in which the ParameterBlocks were added
// to the Problem object.
//
// NOTE: Since AddResidualBlock adds ParameterBlocks to the
// Problem automatically if they do not already exist, if you wish
// to have explicit control over the column ordering of the
// matrix, then use Problem::AddParameterBlock to explicitly add
// the ParameterBlocks in the order desired.
//
// The Jacobian matrices are stored as compressed row sparse
// matrices. Please see ceres/crs_matrix.h for more details of the
// format.
//
// Whether the Jacboan matrices are populated with values is
// controlled by Solver::Options::return_initial_jacobian and
// Solver::Options::return_final_jacobian respectively.
CRSMatrix initial_jacobian;
CRSMatrix final_jacobian;
vector<IterationSummary> iterations;
int num_successful_steps;
int num_unsuccessful_steps;
// When the user calls Solve, before the actual optimization
// occurs, Ceres performs a number of preprocessing steps. These
// include error checks, memory allocations, and reorderings. This
// time is accounted for as preprocessing time.
double preprocessor_time_in_seconds;
// Time spent in the TrustRegionMinimizer.
double minimizer_time_in_seconds;
// After the Minimizer is finished, some time is spent in
// re-evaluating residuals etc. This time is accounted for in the
// postprocessor time.
double postprocessor_time_in_seconds;
// Some total of all time spent inside Ceres when Solve is called.
double total_time_in_seconds;
// Preprocessor summary.
@ -354,6 +509,10 @@ class Solver {
PreconditionerType preconditioner_type;
OrderingType ordering_type;
TrustRegionStrategyType trust_region_strategy_type;
DoglegType dogleg_type;
SparseLinearAlgebraLibraryType sparse_linear_algebra_library;
};
// Once a least squares problem has been built, this function takes

@ -59,14 +59,18 @@ enum LinearSolverType {
// normal equations A'A x = A'b. They are direct solvers and do not
// assume any special problem structure.
// Solve the normal equations using a sparse cholesky solver; based
// on CHOLMOD.
SPARSE_NORMAL_CHOLESKY,
// Solve the normal equations using a dense Cholesky solver; based
// on Eigen.
DENSE_NORMAL_CHOLESKY,
// Solve the normal equations using a dense QR solver; based on
// Eigen.
DENSE_QR,
// Solve the normal equations using a sparse cholesky solver; requires
// SuiteSparse or CXSparse.
SPARSE_NORMAL_CHOLESKY,
// Specialized solvers, specific to problems with a generalized
// bi-partitite structure.
@ -110,6 +114,15 @@ enum PreconditionerType {
CLUSTER_TRIDIAGONAL
};
enum SparseLinearAlgebraLibraryType {
// High performance sparse Cholesky factorization and approximate
// minimum degree ordering.
SUITE_SPARSE,
// A lightweight replacment for SuiteSparse.
CX_SPARSE
};
enum LinearSolverTerminationType {
// Termination criterion was met. For factorization based solvers
// the tolerance is assumed to be zero. Any user provided values are
@ -149,8 +162,47 @@ enum LoggingType {
PER_MINIMIZER_ITERATION
};
enum MinimizerType {
LEVENBERG_MARQUARDT
// Ceres supports different strategies for computing the trust region
// step.
enum TrustRegionStrategyType {
// The default trust region strategy is to use the step computation
// used in the Levenberg-Marquardt algorithm. For more details see
// levenberg_marquardt_strategy.h
LEVENBERG_MARQUARDT,
// Powell's dogleg algorithm interpolates between the Cauchy point
// and the Gauss-Newton step. It is particularly useful if the
// LEVENBERG_MARQUARDT algorithm is making a large number of
// unsuccessful steps. For more details see dogleg_strategy.h.
//
// NOTES:
//
// 1. This strategy has not been experimented with or tested as
// extensively as LEVENBERG_MARQUARDT, and therefore it should be
// considered EXPERIMENTAL for now.
//
// 2. For now this strategy should only be used with exact
// factorization based linear solvers, i.e., SPARSE_SCHUR,
// DENSE_SCHUR, DENSE_QR and SPARSE_NORMAL_CHOLESKY.
DOGLEG
};
// Ceres supports two different dogleg strategies.
// The "traditional" dogleg method by Powell and the
// "subspace" method described in
// R. H. Byrd, R. B. Schnabel, and G. A. Shultz,
// "Approximate solution of the trust region problem by minimization
// over two-dimensional subspaces", Mathematical Programming,
// 40 (1988), pp. 247--263
enum DoglegType {
// The traditional approach constructs a dogleg path
// consisting of two line segments and finds the furthest
// point on that path that is still inside the trust region.
TRADITIONAL_DOGLEG,
// The subspace approach finds the exact minimum of the model
// constrained to the subspace spanned by the dogleg path.
SUBSPACE_DOGLEG
};
enum SolverTerminationType {
@ -246,11 +298,15 @@ enum DimensionType {
const char* LinearSolverTypeToString(LinearSolverType type);
const char* PreconditionerTypeToString(PreconditionerType type);
const char* SparseLinearAlgebraLibraryTypeToString(
SparseLinearAlgebraLibraryType type);
const char* LinearSolverTerminationTypeToString(
LinearSolverTerminationType type);
const char* OrderingTypeToString(OrderingType type);
const char* SolverTerminationTypeToString(SolverTerminationType type);
const char* SparseLinearAlgebraLibraryTypeToString(
SparseLinearAlgebraLibraryType type);
const char* TrustRegionStrategyTypeToString( TrustRegionStrategyType type);
bool IsSchurType(LinearSolverType type);
} // namespace ceres

@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
@ -27,39 +27,41 @@
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
//
// Implmentation of Levenberg Marquardt algorithm based on "Methods for
// Nonlinear Least Squares" by K. Madsen, H.B. Nielsen and
// O. Tingleff. Available to download from
//
// http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/3215/pdf/imm3215.pdf
//
#ifndef CERES_INTERNAL_LEVENBERG_MARQUARDT_H_
#define CERES_INTERNAL_LEVENBERG_MARQUARDT_H_
#include "ceres/array_utils.h"
#include "ceres/minimizer.h"
#include "ceres/solver.h"
#include <cmath>
#include <cstddef>
#include "ceres/fpclassify.h"
namespace ceres {
namespace internal {
class Evaluator;
class LinearSolver;
// It is a near impossibility that user code generates this exact
// value in normal operation, thus we will use it to fill arrays
// before passing them to user code. If on return an element of the
// array still contains this value, we will assume that the user code
// did not write to that memory location.
const double kImpossibleValue = 1e302;
class LevenbergMarquardt : public Minimizer {
public:
virtual ~LevenbergMarquardt();
bool IsArrayValid(const int size, const double* x) {
if (x != NULL) {
for (int i = 0; i < size; ++i) {
if (!IsFinite(x[i]) || (x[i] == kImpossibleValue)) {
return false;
}
}
}
return true;
}
virtual void Minimize(const Minimizer::Options& options,
Evaluator* evaluator,
LinearSolver* linear_solver,
const double* initial_parameters,
double* final_parameters,
Solver::Summary* summary);
};
void InvalidateArray(const int size, double* x) {
if (x != NULL) {
for (int i = 0; i < size; ++i) {
x[i] = kImpossibleValue;
}
}
}
} // namespace internal
} // namespace ceres
#endif // CERES_INTERNAL_LEVENBERG_MARQUARDT_H_

@ -0,0 +1,65 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
//
// Utility routines for validating arrays.
//
// These are useful for detecting two common class of errors.
//
// 1. Uninitialized memory - where the user for some reason did not
// compute part of an array, but the code expects it.
//
// 2. Numerical failure while computing the cost/residual/jacobian,
// e.g. NaN, infinities etc. This is particularly useful since the
// automatic differentiation code does computations that are not
// evident to the user and can silently generate hard to debug errors.
#ifndef CERES_INTERNAL_ARRAY_UTILS_H_
#define CERES_INTERNAL_ARRAY_UTILS_H_
#include "ceres/internal/port.h"
namespace ceres {
namespace internal {
// Fill the array x with an impossible value that the user code is
// never expected to compute.
void InvalidateArray(int size, double* x);
// Check if all the entries of the array x are valid, i.e. all the
// values in the array should be finite and none of them should be
// equal to the "impossible" value used by InvalidateArray.
bool IsArrayValid(int size, const double* x);
extern const double kImpossibleValue;
} // namespace internal
} // namespace ceres
#endif // CERES_INTERNAL_ARRAY_UTILS_H_

@ -40,16 +40,26 @@
namespace ceres {
namespace internal {
void BlockEvaluatePreparer::Init(int** jacobian_layout) {
void BlockEvaluatePreparer::Init(int const* const* jacobian_layout,
int max_derivatives_per_residual_block) {
jacobian_layout_ = jacobian_layout;
scratch_evaluate_preparer_.Init(max_derivatives_per_residual_block);
}
// Point the jacobian blocks directly into the block sparse matrix.
void BlockEvaluatePreparer::Prepare(const ResidualBlock* residual_block,
int residual_block_index,
SparseMatrix* jacobian,
double** jacobians) const {
CHECK(jacobian != NULL);
double** jacobians) {
// If the overall jacobian is not available, use the scratch space.
if (jacobian == NULL) {
scratch_evaluate_preparer_.Prepare(residual_block,
residual_block_index,
jacobian,
jacobians);
return;
}
double* jacobian_values =
down_cast<BlockSparseMatrix*>(jacobian)->mutable_values();

@ -36,6 +36,8 @@
#ifndef CERES_INTERNAL_BLOCK_EVALUATE_PREPARER_H_
#define CERES_INTERNAL_BLOCK_EVALUATE_PREPARER_H_
#include "ceres/scratch_evaluate_preparer.h"
namespace ceres {
namespace internal {
@ -47,18 +49,26 @@ class BlockEvaluatePreparer {
// Using Init() instead of a constructor allows for allocating this structure
// with new[]. This is because C++ doesn't allow passing arguments to objects
// constructed with new[] (as opposed to plain 'new').
void Init(int** jacobian_layout);
void Init(int const* const* jacobian_layout,
int max_derivatives_per_residual_block);
// EvaluatePreparer interface
// Point the jacobian blocks directly into the block sparse matrix.
// Point the jacobian blocks directly into the block sparse matrix, if
// jacobian is non-null. Otherwise, uses an internal per-thread buffer to
// store the jacobians temporarily.
void Prepare(const ResidualBlock* residual_block,
int residual_block_index,
SparseMatrix* jacobian,
double** jacobians) const;
double** jacobians);
private:
int const* const* jacobian_layout_;
// For the case that the overall jacobian is not available, but the
// individual jacobians are requested, use a pass-through scratch evaluate
// preparer.
ScratchEvaluatePreparer scratch_evaluate_preparer_;
};
} // namespace internal

@ -40,11 +40,10 @@
namespace ceres {
namespace internal {
BlockJacobiPreconditioner::BlockJacobiPreconditioner(
const LinearOperator& A)
: block_structure_(
*(down_cast<const BlockSparseMatrix*>(&A)->block_structure())),
num_rows_(A.num_rows()) {
BlockJacobiPreconditioner::BlockJacobiPreconditioner(const LinearOperator& A)
: num_rows_(A.num_rows()),
block_structure_(
*(down_cast<const BlockSparseMatrix*>(&A)->block_structure())) {
// Calculate the amount of storage needed.
int storage_needed = 0;
for (int c = 0; c < block_structure_.cols.size(); ++c) {

@ -136,9 +136,12 @@ BlockJacobianWriter::BlockJacobianWriter(const Evaluator::Options& options,
// makes the final Write() a nop.
BlockEvaluatePreparer* BlockJacobianWriter::CreateEvaluatePreparers(
int num_threads) {
int max_derivatives_per_residual_block =
program_->MaxDerivativesPerResidualBlock();
BlockEvaluatePreparer* preparers = new BlockEvaluatePreparer[num_threads];
for (int i = 0; i < num_threads; i++) {
preparers[i].Init(&jacobian_layout_[0]);
preparers[i].Init(&jacobian_layout_[0], max_derivatives_per_residual_block);
}
return preparers;
}

@ -28,10 +28,10 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
#include "glog/logging.h"
#include "ceres/block_random_access_dense_matrix.h"
#include <vector>
#include <glog/logging.h>
#include "ceres/internal/eigen.h"
#include "ceres/internal/scoped_ptr.h"

@ -89,7 +89,7 @@ class BlockRandomAccessDenseMatrix : public BlockRandomAccessMatrix {
vector<int> block_layout_;
scoped_array<double> values_;
DISALLOW_COPY_AND_ASSIGN(BlockRandomAccessDenseMatrix);
CERES_DISALLOW_COPY_AND_ASSIGN(BlockRandomAccessDenseMatrix);
};
} // namespace internal

@ -77,7 +77,7 @@ namespace internal {
//
// if (cell != NULL) {
// MatrixRef m(cell->values, row_stride, col_stride);
// MutexLock l(&cell->m);
// CeresMutexLock l(&cell->m);
// m.block(row, col, row_block_size, col_block_size) = ...
// }

@ -28,17 +28,17 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
#include "glog/logging.h"
#include "ceres/block_random_access_sparse_matrix.h"
#include <algorithm>
#include <set>
#include <utility>
#include <vector>
#include <glog/logging.h>
#include "ceres/mutex.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/mutex.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/types.h"
namespace ceres {

@ -38,6 +38,7 @@
#include "ceres/block_random_access_matrix.h"
#include "ceres/collections_port.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/integral_types.h"
#include "ceres/internal/macros.h"
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
@ -84,11 +85,11 @@ class BlockRandomAccessSparseMatrix : public BlockRandomAccessMatrix {
TripletSparseMatrix* mutable_matrix() { return tsm_.get(); }
private:
long int IntPairToLong(int a, int b) {
int64 IntPairToLong(int a, int b) {
return a * kMaxRowBlocks + b;
}
const int kMaxRowBlocks;
const int64 kMaxRowBlocks;
// row/column block sizes.
const vector<int> blocks_;
@ -100,7 +101,8 @@ class BlockRandomAccessSparseMatrix : public BlockRandomAccessMatrix {
// The underlying matrix object which actually stores the cells.
scoped_ptr<TripletSparseMatrix> tsm_;
DISALLOW_COPY_AND_ASSIGN(BlockRandomAccessSparseMatrix);
friend class BlockRandomAccessSparseMatrixTest;
CERES_DISALLOW_COPY_AND_ASSIGN(BlockRandomAccessSparseMatrix);
};
} // namespace internal

@ -33,11 +33,11 @@
#include <cstddef>
#include <algorithm>
#include <vector>
#include <glog/logging.h>
#include "ceres/block_structure.h"
#include "ceres/internal/eigen.h"
#include "ceres/matrix_proto.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/internal/eigen.h"
#include "glog/logging.h"
namespace ceres {
namespace internal {
@ -81,7 +81,7 @@ BlockSparseMatrix::BlockSparseMatrix(
CHECK_NOTNULL(values_.get());
}
#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
#ifndef CERES_NO_PROTOCOL_BUFFERS
BlockSparseMatrix::BlockSparseMatrix(const SparseMatrixProto& outer_proto) {
CHECK(outer_proto.has_block_matrix());
@ -244,7 +244,7 @@ const CompressedRowBlockStructure* BlockSparseMatrix::block_structure()
return block_structure_.get();
}
#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
#ifndef CERES_NO_PROTOCOL_BUFFERS
void BlockSparseMatrix::ToProto(SparseMatrixProto* outer_proto) const {
outer_proto->Clear();

@ -74,7 +74,7 @@ class BlockSparseMatrixBase : public SparseMatrix {
virtual const double* RowBlockValues(int row_block_index) const = 0;
private:
DISALLOW_COPY_AND_ASSIGN(BlockSparseMatrixBase);
CERES_DISALLOW_COPY_AND_ASSIGN(BlockSparseMatrixBase);
};
// This class implements the SparseMatrix interface for storing and
@ -96,7 +96,7 @@ class BlockSparseMatrix : public BlockSparseMatrixBase {
explicit BlockSparseMatrix(CompressedRowBlockStructure* block_structure);
// Construct a block sparse matrix from a protocol buffer.
#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
#ifndef CERES_NO_PROTOCOL_BUFFERS
explicit BlockSparseMatrix(const SparseMatrixProto& proto);
#endif
@ -110,7 +110,7 @@ class BlockSparseMatrix : public BlockSparseMatrixBase {
virtual void SquaredColumnNorm(double* x) const;
virtual void ScaleColumns(const double* scale);
virtual void ToDenseMatrix(Matrix* dense_matrix) const;
#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
#ifndef CERES_NO_PROTOCOL_BUFFERS
virtual void ToProto(SparseMatrixProto* proto) const;
#endif
virtual void ToTextFile(FILE* file) const;
@ -135,7 +135,7 @@ class BlockSparseMatrix : public BlockSparseMatrixBase {
int num_nonzeros_;
scoped_array<double> values_;
scoped_ptr<CompressedRowBlockStructure> block_structure_;
DISALLOW_COPY_AND_ASSIGN(BlockSparseMatrix);
CERES_DISALLOW_COPY_AND_ASSIGN(BlockSparseMatrix);
};
} // namespace internal

@ -38,7 +38,7 @@ bool CellLessThan(const Cell& lhs, const Cell& rhs) {
return (lhs.block_id < rhs.block_id);
}
#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
#ifndef CERES_NO_PROTOCOL_BUFFERS
void ProtoToBlockStructure(const BlockStructureProto &proto,
CompressedRowBlockStructure *block_structure) {
// Decode the column blocks.

@ -31,11 +31,11 @@
#include "ceres/canonical_views_clustering.h"
#include <glog/logging.h>
#include "ceres/graph.h"
#include "ceres/collections_port.h"
#include "ceres/map_util.h"
#include "ceres/graph.h"
#include "ceres/internal/macros.h"
#include "ceres/map_util.h"
#include "glog/logging.h"
namespace ceres {
namespace internal {
@ -75,7 +75,7 @@ class CanonicalViewsClustering {
IntMap view_to_canonical_view_;
// Maps a view to its similarity to its current cluster center.
HashMap<int, double> view_to_canonical_view_similarity_;
DISALLOW_COPY_AND_ASSIGN(CanonicalViewsClustering);
CERES_DISALLOW_COPY_AND_ASSIGN(CanonicalViewsClustering);
};
void ComputeCanonicalViewsClustering(

@ -57,7 +57,7 @@ class CgnrSolver : public LinearSolver {
private:
const LinearSolver::Options options_;
scoped_ptr<BlockJacobiPreconditioner> jacobi_preconditioner_;
DISALLOW_COPY_AND_ASSIGN(CgnrSolver);
CERES_DISALLOW_COPY_AND_ASSIGN(CgnrSolver);
};
} // namespace internal

@ -33,31 +33,49 @@
#ifndef CERES_INTERNAL_COLLECTIONS_PORT_H_
#define CERES_INTERNAL_COLLECTIONS_PORT_H_
#ifdef CERES_HASH_BOOST
#include <boost/tr1/unordered_map.hpp>
#include <boost/tr1/unordered_set.hpp>
#if defined(CERES_NO_TR1)
# include <map>
# include <set>
#else
#if defined(_MSC_VER) && _MSC_VER <= 1700
#include <unordered_map>
#include <unordered_set>
#else
#include <tr1/unordered_map>
#include <tr1/unordered_set>
# if defined(_MSC_VER) && _MSC_VER <= 1600
# include <unordered_map>
# include <unordered_set>
# else
# include <tr1/unordered_map>
# include <tr1/unordered_set>
# endif
#endif
#endif
#include <utility>
#include "ceres/integral_types.h"
#include "ceres/internal/port.h"
// Some systems don't have access to TR1. In that case, substitute the hash
// map/set with normal map/set. The price to pay is slightly slower speed for
// some operations.
#if defined(CERES_NO_TR1)
namespace ceres {
namespace internal {
template<typename K, typename V>
struct HashMap : tr1::unordered_map<K, V> {};
struct HashMap : map<K, V> {};
template<typename K>
struct HashSet : tr1::unordered_set<K> {};
struct HashSet : set<K> {};
} // namespace internal
} // namespace ceres
#else
namespace ceres {
namespace internal {
template<typename K, typename V>
struct HashMap : std::tr1::unordered_map<K, V> {};
template<typename K>
struct HashSet : std::tr1::unordered_set<K> {};
#if defined(_WIN32) && !defined(__MINGW64__) && !defined(__MINGW32__)
#define GG_LONGLONG(x) x##I64
@ -124,11 +142,7 @@ CERES_HASH_NAMESPACE_START
// Hasher for STL pairs. Requires hashers for both members to be defined.
template<typename T>
#ifdef CERES_HASH_BOOST
struct hash {
#else
struct hash<pair<T, T> > {
#endif
size_t operator()(const pair<T, T>& p) const {
size_t h1 = hash<T>()(p.first);
size_t h2 = hash<T>()(p.second);
@ -148,4 +162,6 @@ struct hash<pair<T, T> > {
CERES_HASH_NAMESPACE_END
#endif // CERES_NO_TR1
#endif // CERES_INTERNAL_COLLECTIONS_PORT_H_

@ -130,8 +130,24 @@ SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
}
row_pos += num_residuals;
}
CHECK_EQ(num_jacobian_nonzeros, rows[total_num_residuals]);
// Populate the row and column block vectors for use by block
// oriented ordering algorithms. This is useful when
// Solver::Options::use_block_amd = true.
const vector<ParameterBlock*>& parameter_blocks = program_->parameter_blocks();
vector<int>& col_blocks = *(jacobian->mutable_col_blocks());
col_blocks.resize(parameter_blocks.size());
for (int i = 0; i < parameter_blocks.size(); ++i) {
col_blocks[i] = parameter_blocks[i]->LocalSize();
}
vector<int>& row_blocks = *(jacobian->mutable_row_blocks());
row_blocks.resize(residual_blocks.size());
for (int i = 0; i < residual_blocks.size(); ++i) {
row_blocks[i] = residual_blocks[i]->NumResiduals();
}
return jacobian;
}

@ -32,15 +32,22 @@
#include <algorithm>
#include <vector>
#include "ceres/matrix_proto.h"
#include "ceres/crs_matrix.h"
#include "ceres/internal/port.h"
#include "ceres/matrix_proto.h"
namespace ceres {
namespace internal {
namespace {
// Helper functor used by the constructor for reordering the contents
// of a TripletSparseMatrix.
// of a TripletSparseMatrix. This comparator assumes thay there are no
// duplicates in the pair of arrays rows and cols, i.e., there is no
// indices i and j (not equal to each other) s.t.
//
// rows[i] == rows[j] && cols[i] == cols[j]
//
// If this is the case, this functor will not be a StrictWeakOrdering.
struct RowColLessThan {
RowColLessThan(const int* rows, const int* cols)
: rows(rows), cols(cols) {
@ -128,7 +135,7 @@ CompressedRowSparseMatrix::CompressedRowSparseMatrix(
CHECK_EQ(num_nonzeros(), m.num_nonzeros());
}
#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
#ifndef CERES_NO_PROTOCOL_BUFFERS
CompressedRowSparseMatrix::CompressedRowSparseMatrix(
const SparseMatrixProto& outer_proto) {
CHECK(outer_proto.has_compressed_row_matrix());
@ -241,7 +248,7 @@ void CompressedRowSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
}
}
#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
#ifndef CERES_NO_PROTOCOL_BUFFERS
void CompressedRowSparseMatrix::ToProto(SparseMatrixProto* outer_proto) const {
CHECK_NOTNULL(outer_proto);
@ -330,5 +337,18 @@ void CompressedRowSparseMatrix::ToTextFile(FILE* file) const {
}
}
void CompressedRowSparseMatrix::ToCRSMatrix(CRSMatrix* matrix) const {
matrix->num_rows = num_rows();
matrix->num_cols = num_cols();
matrix->rows.resize(matrix->num_rows + 1);
matrix->cols.resize(num_nonzeros());
matrix->values.resize(num_nonzeros());
copy(rows_.get(), rows_.get() + matrix->num_rows + 1, matrix->rows.begin());
copy(cols_.get(), cols_.get() + num_nonzeros(), matrix->cols.begin());
copy(values_.get(), values_.get() + num_nonzeros(), matrix->values.begin());
}
} // namespace internal
} // namespace ceres

@ -31,14 +31,19 @@
#ifndef CERES_INTERNAL_COMPRESSED_ROW_SPARSE_MATRIX_H_
#define CERES_INTERNAL_COMPRESSED_ROW_SPARSE_MATRIX_H_
#include <vector>
#include <glog/logging.h>
#include "ceres/sparse_matrix.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/macros.h"
#include "ceres/internal/port.h"
#include "ceres/types.h"
namespace ceres {
class CRSMatrix;
namespace internal {
class SparseMatrixProto;
@ -52,7 +57,7 @@ class CompressedRowSparseMatrix : public SparseMatrix {
//
// We assume that m does not have any repeated entries.
explicit CompressedRowSparseMatrix(const TripletSparseMatrix& m);
#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
#ifndef CERES_NO_PROTOCOL_BUFFERS
explicit CompressedRowSparseMatrix(const SparseMatrixProto& proto);
#endif
@ -85,7 +90,7 @@ class CompressedRowSparseMatrix : public SparseMatrix {
virtual void ScaleColumns(const double* scale);
virtual void ToDenseMatrix(Matrix* dense_matrix) const;
#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
#ifndef CERES_NO_PROTOCOL_BUFFERS
virtual void ToProto(SparseMatrixProto* proto) const;
#endif
virtual void ToTextFile(FILE* file) const;
@ -103,6 +108,8 @@ class CompressedRowSparseMatrix : public SparseMatrix {
// have the same number of columns as this matrix.
void AppendRows(const CompressedRowSparseMatrix& m);
void ToCRSMatrix(CRSMatrix* matrix) const;
// Low level access methods that expose the structure of the matrix.
const int* cols() const { return cols_.get(); }
int* mutable_cols() { return cols_.get(); }
@ -110,6 +117,12 @@ class CompressedRowSparseMatrix : public SparseMatrix {
const int* rows() const { return rows_.get(); }
int* mutable_rows() { return rows_.get(); }
const vector<int>& row_blocks() const { return row_blocks_; }
vector<int>* mutable_row_blocks() { return &row_blocks_; }
const vector<int>& col_blocks() const { return col_blocks_; }
vector<int>* mutable_col_blocks() { return &col_blocks_; }
private:
scoped_array<int> cols_;
scoped_array<int> rows_;
@ -117,10 +130,17 @@ class CompressedRowSparseMatrix : public SparseMatrix {
int num_rows_;
int num_cols_;
int max_num_nonzeros_;
DISALLOW_COPY_AND_ASSIGN(CompressedRowSparseMatrix);
// If the matrix has an underlying block structure, then it can also
// carry with it row and column block sizes. This is auxilliary and
// optional information for use by algorithms operating on the
// matrix. The class itself does not make use of this information in
// any way.
vector<int> row_blocks_;
vector<int> col_blocks_;
CERES_DISALLOW_COPY_AND_ASSIGN(CompressedRowSparseMatrix);
};
} // namespace internal

@ -34,10 +34,10 @@
#include <cstddef>
#include <glog/logging.h>
#include "ceres/stl_util.h"
#include "ceres/internal/eigen.h"
#include "ceres/stl_util.h"
#include "ceres/types.h"
#include "glog/logging.h"
namespace ceres {

@ -41,18 +41,18 @@
#include <cmath>
#include <cstddef>
#include <glog/logging.h>
#include "ceres/linear_operator.h"
#include "ceres/fpclassify.h"
#include "ceres/internal/eigen.h"
#include "ceres/linear_operator.h"
#include "ceres/types.h"
#include "ceres/jet.h"
#include "glog/logging.h"
namespace ceres {
namespace internal {
namespace {
bool IsZeroOrInfinity(double x) {
return ((x == 0.0) || (isinf(x)));
return ((x == 0.0) || (IsInfinite(x)));
}
// Constant used in the MATLAB implementation ~ 2 * eps.
@ -115,7 +115,7 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
for (summary.num_iterations = 1;
summary.num_iterations < options_.max_num_iterations;
++summary.num_iterations) {
VLOG(2) << "cg iteration " << summary.num_iterations;
VLOG(3) << "cg iteration " << summary.num_iterations;
// Apply preconditioner
if (per_solve_options.preconditioner != NULL) {
@ -151,14 +151,14 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
A->RightMultiply(p.data(), q.data());
double pq = p.dot(q);
if ((pq <= 0) || isinf(pq)) {
if ((pq <= 0) || IsInfinite(pq)) {
LOG(ERROR) << "Numerical failure. pq = " << pq;
summary.termination_type = FAILURE;
break;
}
double alpha = rho / pq;
if (isinf(alpha)) {
if (IsInfinite(alpha)) {
LOG(ERROR) << "Numerical failure. alpha " << alpha;
summary.termination_type = FAILURE;
break;
@ -202,13 +202,13 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
// 1. Stephen G. Nash & Ariela Sofer, Assessing A Search
// Direction Within A Truncated Newton Method, Operation
// Research Letters 9(1990) 219-221.
//
//
// 2. Stephen G. Nash, A Survey of Truncated Newton Methods,
// Journal of Computational and Applied Mathematics,
// 124(1-2), 45-59, 2000.
//
double zeta = summary.num_iterations * (Q1 - Q0) / Q1;
VLOG(2) << "Q termination: zeta " << zeta
VLOG(3) << "Q termination: zeta " << zeta
<< " " << per_solve_options.q_tolerance;
if (zeta < per_solve_options.q_tolerance) {
summary.termination_type = TOLERANCE;
@ -218,7 +218,7 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
// Residual based termination.
norm_r = r. norm();
VLOG(2) << "R termination: norm_r " << norm_r
VLOG(3) << "R termination: norm_r " << norm_r
<< " " << tol_r;
if (norm_r <= tol_r) {
summary.termination_type = TOLERANCE;

@ -65,7 +65,7 @@ class ConjugateGradientsSolver : public LinearSolver {
private:
const LinearSolver::Options options_;
DISALLOW_COPY_AND_ASSIGN(ConjugateGradientsSolver);
CERES_DISALLOW_COPY_AND_ASSIGN(ConjugateGradientsSolver);
};
} // namespace internal

@ -32,8 +32,8 @@
#include <cstddef>
#include <cmath>
#include <glog/logging.h>
#include "ceres/internal/eigen.h"
#include "glog/logging.h"
namespace ceres {
namespace internal {

@ -0,0 +1,130 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: strandmark@google.com (Petter Strandmark)
#ifndef CERES_NO_CXSPARSE
#include "ceres/cxsparse.h"
#include "ceres/compressed_row_sparse_matrix.h"
#include "ceres/triplet_sparse_matrix.h"
#include "glog/logging.h"
namespace ceres {
namespace internal {
CXSparse::CXSparse() : scratch_size_(0), scratch_(NULL) {
}
CXSparse::~CXSparse() {
if (scratch_size_ > 0) {
cs_free(scratch_);
}
}
bool CXSparse::SolveCholesky(cs_di* A,
cs_dis* symbolic_factorization,
double* b) {
// Make sure we have enough scratch space available.
if (scratch_size_ < A->n) {
if (scratch_size_ > 0) {
cs_free(scratch_);
}
scratch_ = reinterpret_cast<CS_ENTRY*>(cs_malloc(A->n, sizeof(CS_ENTRY)));
}
// Solve using Cholesky factorization
csn* numeric_factorization = cs_chol(A, symbolic_factorization);
if (numeric_factorization == NULL) {
LOG(WARNING) << "Cholesky factorization failed.";
return false;
}
// When the Cholesky factorization succeeded, these methods are guaranteed to
// succeeded as well. In the comments below, "x" refers to the scratch space.
//
// Set x = P * b.
cs_ipvec(symbolic_factorization->pinv, b, scratch_, A->n);
// Set x = L \ x.
cs_lsolve(numeric_factorization->L, scratch_);
// Set x = L' \ x.
cs_ltsolve(numeric_factorization->L, scratch_);
// Set b = P' * x.
cs_pvec(symbolic_factorization->pinv, scratch_, b, A->n);
// Free Cholesky factorization.
cs_nfree(numeric_factorization);
return true;
}
cs_dis* CXSparse::AnalyzeCholesky(cs_di* A) {
// order = 1 for Cholesky factorization.
return cs_schol(1, A);
}
cs_di CXSparse::CreateSparseMatrixTransposeView(CompressedRowSparseMatrix* A) {
cs_di At;
At.m = A->num_cols();
At.n = A->num_rows();
At.nz = -1;
At.nzmax = A->num_nonzeros();
At.p = A->mutable_rows();
At.i = A->mutable_cols();
At.x = A->mutable_values();
return At;
}
cs_di* CXSparse::CreateSparseMatrix(TripletSparseMatrix* tsm) {
cs_di_sparse tsm_wrapper;
tsm_wrapper.nzmax = tsm->num_nonzeros();;
tsm_wrapper.nz = tsm->num_nonzeros();;
tsm_wrapper.m = tsm->num_rows();
tsm_wrapper.n = tsm->num_cols();
tsm_wrapper.p = tsm->mutable_cols();
tsm_wrapper.i = tsm->mutable_rows();
tsm_wrapper.x = tsm->mutable_values();
return cs_compress(&tsm_wrapper);
}
void CXSparse::Free(cs_di* factor) {
cs_free(factor);
}
void CXSparse::Free(cs_dis* factor) {
cs_sfree(factor);
}
} // namespace internal
} // namespace ceres
#endif // CERES_NO_CXSPARSE

@ -0,0 +1,90 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: strandmark@google.com (Petter Strandmark)
#ifndef CERES_INTERNAL_CXSPARSE_H_
#define CERES_INTERNAL_CXSPARSE_H_
#ifndef CERES_NO_CXSPARSE
#include "cs.h"
namespace ceres {
namespace internal {
class CompressedRowSparseMatrix;
class TripletSparseMatrix;
// This object provides access to solving linear systems using Cholesky
// factorization with a known symbolic factorization. This features does not
// explicity exist in CXSparse. The methods in the class are nonstatic because
// the class manages internal scratch space.
class CXSparse {
public:
CXSparse();
~CXSparse();
// Solves a symmetric linear system A * x = b using Cholesky factorization.
// A - The system matrix.
// symbolic_factorization - The symbolic factorization of A. This is obtained
// from AnalyzeCholesky.
// b - The right hand size of the linear equation. This
// array will also recieve the solution.
// Returns false if Cholesky factorization of A fails.
bool SolveCholesky(cs_di* A, cs_dis* symbolic_factorization, double* b);
// Creates a sparse matrix from a compressed-column form. No memory is
// allocated or copied; the structure A is filled out with info from the
// argument.
cs_di CreateSparseMatrixTransposeView(CompressedRowSparseMatrix* A);
// Creates a new matrix from a triplet form. Deallocate the returned matrix
// with Free. May return NULL if the compression or allocation fails.
cs_di* CreateSparseMatrix(TripletSparseMatrix* A);
// Computes a symbolic factorization of A that can be used in SolveCholesky.
// The returned matrix should be deallocated with Free when not used anymore.
cs_dis* AnalyzeCholesky(cs_di* A);
// Deallocates the memory of a matrix obtained from AnalyzeCholesky.
void Free(cs_di* factor);
void Free(cs_dis* factor);
private:
// Cached scratch space
CS_ENTRY* scratch_;
int scratch_size_;
};
} // namespace internal
} // namespace ceres
#endif // CERES_NO_CXSPARSE
#endif // CERES_INTERNAL_CXSPARSE_H_

@ -0,0 +1,86 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
#include "ceres/dense_normal_cholesky_solver.h"
#include <cstddef>
#include "Eigen/Dense"
#include "ceres/dense_sparse_matrix.h"
#include "ceres/linear_solver.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
namespace ceres {
namespace internal {
DenseNormalCholeskySolver::DenseNormalCholeskySolver(
const LinearSolver::Options& options)
: options_(options) {}
LinearSolver::Summary DenseNormalCholeskySolver::SolveImpl(
DenseSparseMatrix* A,
const double* b,
const LinearSolver::PerSolveOptions& per_solve_options,
double* x) {
const int num_rows = A->num_rows();
const int num_cols = A->num_cols();
ConstAlignedMatrixRef Aref = A->matrix();
Matrix lhs(num_cols, num_cols);
lhs.setZero();
// lhs += A'A
//
// Using rankUpdate instead of GEMM, exposes the fact that its the
// same matrix being multiplied with itself and that the product is
// symmetric.
lhs.selfadjointView<Eigen::Upper>().rankUpdate(Aref.transpose());
// rhs = A'b
Vector rhs = Aref.transpose() * ConstVectorRef(b, num_rows);
if (per_solve_options.D != NULL) {
ConstVectorRef D(per_solve_options.D, num_cols);
lhs += D.array().square().matrix().asDiagonal();
}
VectorRef(x, num_cols) =
lhs.selfadjointView<Eigen::Upper>().ldlt().solve(rhs);
LinearSolver::Summary summary;
summary.num_iterations = 1;
summary.termination_type = TOLERANCE;
return summary;
}
} // namespace internal
} // namespace ceres

@ -0,0 +1,95 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
//
// Solve dense rectangular systems Ax = b by forming the normal
// equations and solving them using the Cholesky factorization.
#ifndef CERES_INTERNAL_DENSE_NORMAL_CHOLESKY_SOLVER_H_
#define CERES_INTERNAL_DENSE_NORMAL_CHOLESKY_SOLVER_H_
#include "ceres/linear_solver.h"
#include "ceres/internal/macros.h"
namespace ceres {
namespace internal {
class DenseSparseMatrix;
// This class implements the LinearSolver interface for solving
// rectangular/unsymmetric (well constrained) linear systems of the
// form
//
// Ax = b
//
// Since there does not usually exist a solution that satisfies these
// equations, the solver instead solves the linear least squares
// problem
//
// min_x |Ax - b|^2
//
// Setting the gradient of the above optimization problem to zero
// gives us the normal equations
//
// A'Ax = A'b
//
// A'A is a positive definite matrix (hopefully), and the resulting
// linear system can be solved using Cholesky factorization.
//
// If the PerSolveOptions struct has a non-null array D, then the
// augmented/regularized linear system
//
// [ A ]x = [b]
// [ diag(D) ] [0]
//
// is solved.
//
// This class uses the LDLT factorization routines from the Eigen
// library. This solver always returns a solution, it is the user's
// responsibility to judge if the solution is good enough for their
// purposes.
class DenseNormalCholeskySolver: public DenseSparseMatrixSolver {
public:
explicit DenseNormalCholeskySolver(const LinearSolver::Options& options);
private:
virtual LinearSolver::Summary SolveImpl(
DenseSparseMatrix* A,
const double* b,
const LinearSolver::PerSolveOptions& per_solve_options,
double* x);
const LinearSolver::Options options_;
CERES_DISALLOW_COPY_AND_ASSIGN(DenseNormalCholeskySolver);
};
} // namespace internal
} // namespace ceres
#endif // CERES_INTERNAL_DENSE_NORMAL_CHOLESKY_SOLVER_H_

@ -33,8 +33,8 @@
#include <cstddef>
#include "Eigen/Dense"
#include "ceres/dense_sparse_matrix.h"
#include "ceres/linear_solver.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
@ -62,7 +62,7 @@ LinearSolver::Summary DenseQRSolver::SolveImpl(
}
// rhs = [b;0] to account for the additional rows in the lhs.
Vector rhs(num_rows + ((per_solve_options.D !=NULL) ? num_cols : 0));
Vector rhs(num_rows + ((per_solve_options.D != NULL) ? num_cols : 0));
rhs.setZero();
rhs.head(num_rows) = ConstVectorRef(b, num_rows);

@ -28,7 +28,7 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
//
// Solve dense rectangular systems Ax = b using the QR factoriztion.
// Solve dense rectangular systems Ax = b using the QR factorization.
#ifndef CERES_INTERNAL_DENSE_QR_SOLVER_H_
#define CERES_INTERNAL_DENSE_QR_SOLVER_H_
@ -90,7 +90,7 @@ class DenseQRSolver: public DenseSparseMatrixSolver {
double* x);
const LinearSolver::Options options_;
DISALLOW_COPY_AND_ASSIGN(DenseQRSolver);
CERES_DISALLOW_COPY_AND_ASSIGN(DenseQRSolver);
};
} // namespace internal

@ -67,7 +67,7 @@ DenseSparseMatrix::DenseSparseMatrix(const Matrix& m)
has_diagonal_reserved_(false) {
}
#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
#ifndef CERES_NO_PROTOCOL_BUFFERS
DenseSparseMatrix::DenseSparseMatrix(const SparseMatrixProto& outer_proto)
: m_(Eigen::MatrixXd::Zero(
outer_proto.dense_matrix().num_rows(),
@ -108,7 +108,7 @@ void DenseSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
*dense_matrix = m_;
}
#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
#ifndef CERES_NO_PROTOCOL_BUFFERS
void DenseSparseMatrix::ToProto(SparseMatrixProto* outer_proto) const {
CHECK(!has_diagonal_appended_) << "Not supported.";
outer_proto->Clear();
@ -183,7 +183,7 @@ void DenseSparseMatrix::ToTextFile(FILE* file) const {
CHECK_NOTNULL(file);
const int active_rows =
(has_diagonal_reserved_ && !has_diagonal_appended_)
? (m_.rows() - m_.cols())
? (m_.rows() - m_.cols())
: m_.rows();
for (int r = 0; r < active_rows; ++r) {

@ -52,7 +52,7 @@ class DenseSparseMatrix : public SparseMatrix {
// m. This assumes that m does not have any repeated entries.
explicit DenseSparseMatrix(const TripletSparseMatrix& m);
explicit DenseSparseMatrix(const Matrix& m);
#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
#ifndef CERES_NO_PROTOCOL_BUFFERS
explicit DenseSparseMatrix(const SparseMatrixProto& proto);
#endif
@ -67,7 +67,7 @@ class DenseSparseMatrix : public SparseMatrix {
virtual void SquaredColumnNorm(double* x) const;
virtual void ScaleColumns(const double* scale);
virtual void ToDenseMatrix(Matrix* dense_matrix) const;
#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
#ifndef CERES_NO_PROTOCOL_BUFFERS
virtual void ToProto(SparseMatrixProto* proto) const;
#endif
virtual void ToTextFile(FILE* file) const;

@ -28,9 +28,9 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
#include <glog/logging.h>
#include "ceres/detect_structure.h"
#include "ceres/internal/eigen.h"
#include "glog/logging.h"
namespace ceres {
namespace internal {
@ -60,10 +60,10 @@ void DetectStructure(const CompressedRowBlockStructure& bs,
*row_block_size = row.block.size;
} else if (*row_block_size != Eigen::Dynamic &&
*row_block_size != row.block.size) {
*row_block_size = Eigen::Dynamic;
VLOG(2) << "Dynamic row block size because the block size changed from "
<< *row_block_size << " to "
<< row.block.size;
*row_block_size = Eigen::Dynamic;
}
@ -71,10 +71,10 @@ void DetectStructure(const CompressedRowBlockStructure& bs,
*e_block_size = bs.cols[e_block_id].size;
} else if (*e_block_size != Eigen::Dynamic &&
*e_block_size != bs.cols[e_block_id].size) {
*e_block_size = Eigen::Dynamic;
VLOG(2) << "Dynamic e block size because the block size changed from "
<< *e_block_size << " to "
<< bs.cols[e_block_id].size;
*e_block_size = Eigen::Dynamic;
}
if (*f_block_size == 0) {
@ -85,11 +85,11 @@ void DetectStructure(const CompressedRowBlockStructure& bs,
} else if (*f_block_size != Eigen::Dynamic) {
for (int c = 1; c < row.cells.size(); ++c) {
if (*f_block_size != bs.cols[row.cells[c].block_id].size) {
*f_block_size = Eigen::Dynamic;
VLOG(2) << "Dynamic f block size because the block size "
<< "changed from " << *f_block_size << " to "
<< bs.cols[row.cells[c].block_id].size;
break;
<< "changed from " << *f_block_size << " to "
<< bs.cols[row.cells[c].block_id].size;
*f_block_size = Eigen::Dynamic;
break;
}
}
}

@ -49,7 +49,7 @@ namespace internal {
// is known as compile time.
//
// For more details about e_blocks and f_blocks, see
// schur_complement.h. This information is used to initialized an
// schur_eliminator.h. This information is used to initialized an
// appropriate template specialization of SchurEliminator.
void DetectStructure(const CompressedRowBlockStructure& bs,
const int num_eliminate_blocks,

@ -0,0 +1,691 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
#include "ceres/dogleg_strategy.h"
#include <cmath>
#include "Eigen/Dense"
#include "ceres/array_utils.h"
#include "ceres/internal/eigen.h"
#include "ceres/linear_solver.h"
#include "ceres/polynomial_solver.h"
#include "ceres/sparse_matrix.h"
#include "ceres/trust_region_strategy.h"
#include "ceres/types.h"
#include "glog/logging.h"
namespace ceres {
namespace internal {
namespace {
const double kMaxMu = 1.0;
const double kMinMu = 1e-8;
}
DoglegStrategy::DoglegStrategy(const TrustRegionStrategy::Options& options)
: linear_solver_(options.linear_solver),
radius_(options.initial_radius),
max_radius_(options.max_radius),
min_diagonal_(options.lm_min_diagonal),
max_diagonal_(options.lm_max_diagonal),
mu_(kMinMu),
min_mu_(kMinMu),
max_mu_(kMaxMu),
mu_increase_factor_(10.0),
increase_threshold_(0.75),
decrease_threshold_(0.25),
dogleg_step_norm_(0.0),
reuse_(false),
dogleg_type_(options.dogleg_type) {
CHECK_NOTNULL(linear_solver_);
CHECK_GT(min_diagonal_, 0.0);
CHECK_LE(min_diagonal_, max_diagonal_);
CHECK_GT(max_radius_, 0.0);
}
// If the reuse_ flag is not set, then the Cauchy point (scaled
// gradient) and the new Gauss-Newton step are computed from
// scratch. The Dogleg step is then computed as interpolation of these
// two vectors.
TrustRegionStrategy::Summary DoglegStrategy::ComputeStep(
const TrustRegionStrategy::PerSolveOptions& per_solve_options,
SparseMatrix* jacobian,
const double* residuals,
double* step) {
CHECK_NOTNULL(jacobian);
CHECK_NOTNULL(residuals);
CHECK_NOTNULL(step);
const int n = jacobian->num_cols();
if (reuse_) {
// Gauss-Newton and gradient vectors are always available, only a
// new interpolant need to be computed. For the subspace case,
// the subspace and the two-dimensional model are also still valid.
switch(dogleg_type_) {
case TRADITIONAL_DOGLEG:
ComputeTraditionalDoglegStep(step);
break;
case SUBSPACE_DOGLEG:
ComputeSubspaceDoglegStep(step);
break;
}
TrustRegionStrategy::Summary summary;
summary.num_iterations = 0;
summary.termination_type = TOLERANCE;
return summary;
}
reuse_ = true;
// Check that we have the storage needed to hold the various
// temporary vectors.
if (diagonal_.rows() != n) {
diagonal_.resize(n, 1);
gradient_.resize(n, 1);
gauss_newton_step_.resize(n, 1);
}
// Vector used to form the diagonal matrix that is used to
// regularize the Gauss-Newton solve and that defines the
// elliptical trust region
//
// || D * step || <= radius_ .
//
jacobian->SquaredColumnNorm(diagonal_.data());
for (int i = 0; i < n; ++i) {
diagonal_[i] = min(max(diagonal_[i], min_diagonal_), max_diagonal_);
}
diagonal_ = diagonal_.array().sqrt();
ComputeGradient(jacobian, residuals);
ComputeCauchyPoint(jacobian);
LinearSolver::Summary linear_solver_summary =
ComputeGaussNewtonStep(jacobian, residuals);
TrustRegionStrategy::Summary summary;
summary.residual_norm = linear_solver_summary.residual_norm;
summary.num_iterations = linear_solver_summary.num_iterations;
summary.termination_type = linear_solver_summary.termination_type;
if (linear_solver_summary.termination_type != FAILURE) {
switch(dogleg_type_) {
// Interpolate the Cauchy point and the Gauss-Newton step.
case TRADITIONAL_DOGLEG:
ComputeTraditionalDoglegStep(step);
break;
// Find the minimum in the subspace defined by the
// Cauchy point and the (Gauss-)Newton step.
case SUBSPACE_DOGLEG:
if (!ComputeSubspaceModel(jacobian)) {
summary.termination_type = FAILURE;
break;
}
ComputeSubspaceDoglegStep(step);
break;
}
}
return summary;
}
// The trust region is assumed to be elliptical with the
// diagonal scaling matrix D defined by sqrt(diagonal_).
// It is implemented by substituting step' = D * step.
// The trust region for step' is spherical.
// The gradient, the Gauss-Newton step, the Cauchy point,
// and all calculations involving the Jacobian have to
// be adjusted accordingly.
void DoglegStrategy::ComputeGradient(
SparseMatrix* jacobian,
const double* residuals) {
gradient_.setZero();
jacobian->LeftMultiply(residuals, gradient_.data());
gradient_.array() /= diagonal_.array();
}
// The Cauchy point is the global minimizer of the quadratic model
// along the one-dimensional subspace spanned by the gradient.
void DoglegStrategy::ComputeCauchyPoint(SparseMatrix* jacobian) {
// alpha * -gradient is the Cauchy point.
Vector Jg(jacobian->num_rows());
Jg.setZero();
// The Jacobian is scaled implicitly by computing J * (D^-1 * (D^-1 * g))
// instead of (J * D^-1) * (D^-1 * g).
Vector scaled_gradient =
(gradient_.array() / diagonal_.array()).matrix();
jacobian->RightMultiply(scaled_gradient.data(), Jg.data());
alpha_ = gradient_.squaredNorm() / Jg.squaredNorm();
}
// The dogleg step is defined as the intersection of the trust region
// boundary with the piecewise linear path from the origin to the Cauchy
// point and then from there to the Gauss-Newton point (global minimizer
// of the model function). The Gauss-Newton point is taken if it lies
// within the trust region.
void DoglegStrategy::ComputeTraditionalDoglegStep(double* dogleg) {
VectorRef dogleg_step(dogleg, gradient_.rows());
// Case 1. The Gauss-Newton step lies inside the trust region, and
// is therefore the optimal solution to the trust-region problem.
const double gradient_norm = gradient_.norm();
const double gauss_newton_norm = gauss_newton_step_.norm();
if (gauss_newton_norm <= radius_) {
dogleg_step = gauss_newton_step_;
dogleg_step_norm_ = gauss_newton_norm;
dogleg_step.array() /= diagonal_.array();
VLOG(3) << "GaussNewton step size: " << dogleg_step_norm_
<< " radius: " << radius_;
return;
}
// Case 2. The Cauchy point and the Gauss-Newton steps lie outside
// the trust region. Rescale the Cauchy point to the trust region
// and return.
if (gradient_norm * alpha_ >= radius_) {
dogleg_step = -(radius_ / gradient_norm) * gradient_;
dogleg_step_norm_ = radius_;
dogleg_step.array() /= diagonal_.array();
VLOG(3) << "Cauchy step size: " << dogleg_step_norm_
<< " radius: " << radius_;
return;
}
// Case 3. The Cauchy point is inside the trust region and the
// Gauss-Newton step is outside. Compute the line joining the two
// points and the point on it which intersects the trust region
// boundary.
// a = alpha * -gradient
// b = gauss_newton_step
const double b_dot_a = -alpha_ * gradient_.dot(gauss_newton_step_);
const double a_squared_norm = pow(alpha_ * gradient_norm, 2.0);
const double b_minus_a_squared_norm =
a_squared_norm - 2 * b_dot_a + pow(gauss_newton_norm, 2);
// c = a' (b - a)
// = alpha * -gradient' gauss_newton_step - alpha^2 |gradient|^2
const double c = b_dot_a - a_squared_norm;
const double d = sqrt(c * c + b_minus_a_squared_norm *
(pow(radius_, 2.0) - a_squared_norm));
double beta =
(c <= 0)
? (d - c) / b_minus_a_squared_norm
: (radius_ * radius_ - a_squared_norm) / (d + c);
dogleg_step = (-alpha_ * (1.0 - beta)) * gradient_
+ beta * gauss_newton_step_;
dogleg_step_norm_ = dogleg_step.norm();
dogleg_step.array() /= diagonal_.array();
VLOG(3) << "Dogleg step size: " << dogleg_step_norm_
<< " radius: " << radius_;
}
// The subspace method finds the minimum of the two-dimensional problem
//
// min. 1/2 x' B' H B x + g' B x
// s.t. || B x ||^2 <= r^2
//
// where r is the trust region radius and B is the matrix with unit columns
// spanning the subspace defined by the steepest descent and Newton direction.
// This subspace by definition includes the Gauss-Newton point, which is
// therefore taken if it lies within the trust region.
void DoglegStrategy::ComputeSubspaceDoglegStep(double* dogleg) {
VectorRef dogleg_step(dogleg, gradient_.rows());
// The Gauss-Newton point is inside the trust region if |GN| <= radius_.
// This test is valid even though radius_ is a length in the two-dimensional
// subspace while gauss_newton_step_ is expressed in the (scaled)
// higher dimensional original space. This is because
//
// 1. gauss_newton_step_ by definition lies in the subspace, and
// 2. the subspace basis is orthonormal.
//
// As a consequence, the norm of the gauss_newton_step_ in the subspace is
// the same as its norm in the original space.
const double gauss_newton_norm = gauss_newton_step_.norm();
if (gauss_newton_norm <= radius_) {
dogleg_step = gauss_newton_step_;
dogleg_step_norm_ = gauss_newton_norm;
dogleg_step.array() /= diagonal_.array();
VLOG(3) << "GaussNewton step size: " << dogleg_step_norm_
<< " radius: " << radius_;
return;
}
// The optimum lies on the boundary of the trust region. The above problem
// therefore becomes
//
// min. 1/2 x^T B^T H B x + g^T B x
// s.t. || B x ||^2 = r^2
//
// Notice the equality in the constraint.
//
// This can be solved by forming the Lagrangian, solving for x(y), where
// y is the Lagrange multiplier, using the gradient of the objective, and
// putting x(y) back into the constraint. This results in a fourth order
// polynomial in y, which can be solved using e.g. the companion matrix.
// See the description of MakePolynomialForBoundaryConstrainedProblem for
// details. The result is up to four real roots y*, not all of which
// correspond to feasible points. The feasible points x(y*) have to be
// tested for optimality.
if (subspace_is_one_dimensional_) {
// The subspace is one-dimensional, so both the gradient and
// the Gauss-Newton step point towards the same direction.
// In this case, we move along the gradient until we reach the trust
// region boundary.
dogleg_step = -(radius_ / gradient_.norm()) * gradient_;
dogleg_step_norm_ = radius_;
dogleg_step.array() /= diagonal_.array();
VLOG(3) << "Dogleg subspace step size (1D): " << dogleg_step_norm_
<< " radius: " << radius_;
return;
}
Vector2d minimum(0.0, 0.0);
if (!FindMinimumOnTrustRegionBoundary(&minimum)) {
// For the positive semi-definite case, a traditional dogleg step
// is taken in this case.
LOG(WARNING) << "Failed to compute polynomial roots. "
<< "Taking traditional dogleg step instead.";
ComputeTraditionalDoglegStep(dogleg);
return;
}
// Test first order optimality at the minimum.
// The first order KKT conditions state that the minimum x*
// has to satisfy either || x* ||^2 < r^2 (i.e. has to lie within
// the trust region), or
//
// (B x* + g) + y x* = 0
//
// for some positive scalar y.
// Here, as it is already known that the minimum lies on the boundary, the
// latter condition is tested. To allow for small imprecisions, we test if
// the angle between (B x* + g) and -x* is smaller than acos(0.99).
// The exact value of the cosine is arbitrary but should be close to 1.
//
// This condition should not be violated. If it is, the minimum was not
// correctly determined.
const double kCosineThreshold = 0.99;
const Vector2d grad_minimum = subspace_B_ * minimum + subspace_g_;
const double cosine_angle = -minimum.dot(grad_minimum) /
(minimum.norm() * grad_minimum.norm());
if (cosine_angle < kCosineThreshold) {
LOG(WARNING) << "First order optimality seems to be violated "
<< "in the subspace method!\n"
<< "Cosine of angle between x and B x + g is "
<< cosine_angle << ".\n"
<< "Taking a regular dogleg step instead.\n"
<< "Please consider filing a bug report if this "
<< "happens frequently or consistently.\n";
ComputeTraditionalDoglegStep(dogleg);
return;
}
// Create the full step from the optimal 2d solution.
dogleg_step = subspace_basis_ * minimum;
dogleg_step_norm_ = radius_;
dogleg_step.array() /= diagonal_.array();
VLOG(3) << "Dogleg subspace step size: " << dogleg_step_norm_
<< " radius: " << radius_;
}
// Build the polynomial that defines the optimal Lagrange multipliers.
// Let the Lagrangian be
//
// L(x, y) = 0.5 x^T B x + x^T g + y (0.5 x^T x - 0.5 r^2). (1)
//
// Stationary points of the Lagrangian are given by
//
// 0 = d L(x, y) / dx = Bx + g + y x (2)
// 0 = d L(x, y) / dy = 0.5 x^T x - 0.5 r^2 (3)
//
// For any given y, we can solve (2) for x as
//
// x(y) = -(B + y I)^-1 g . (4)
//
// As B + y I is 2x2, we form the inverse explicitly:
//
// (B + y I)^-1 = (1 / det(B + y I)) adj(B + y I) (5)
//
// where adj() denotes adjugation. This should be safe, as B is positive
// semi-definite and y is necessarily positive, so (B + y I) is indeed
// invertible.
// Plugging (5) into (4) and the result into (3), then dividing by 0.5 we
// obtain
//
// 0 = (1 / det(B + y I))^2 g^T adj(B + y I)^T adj(B + y I) g - r^2
// (6)
//
// or
//
// det(B + y I)^2 r^2 = g^T adj(B + y I)^T adj(B + y I) g (7a)
// = g^T adj(B)^T adj(B) g
// + 2 y g^T adj(B)^T g + y^2 g^T g (7b)
//
// as
//
// adj(B + y I) = adj(B) + y I = adj(B)^T + y I . (8)
//
// The left hand side can be expressed explicitly using
//
// det(B + y I) = det(B) + y tr(B) + y^2 . (9)
//
// So (7) is a polynomial in y of degree four.
// Bringing everything back to the left hand side, the coefficients can
// be read off as
//
// y^4 r^2
// + y^3 2 r^2 tr(B)
// + y^2 (r^2 tr(B)^2 + 2 r^2 det(B) - g^T g)
// + y^1 (2 r^2 det(B) tr(B) - 2 g^T adj(B)^T g)
// + y^0 (r^2 det(B)^2 - g^T adj(B)^T adj(B) g)
//
Vector DoglegStrategy::MakePolynomialForBoundaryConstrainedProblem() const {
const double detB = subspace_B_.determinant();
const double trB = subspace_B_.trace();
const double r2 = radius_ * radius_;
Matrix2d B_adj;
B_adj << subspace_B_(1,1) , -subspace_B_(0,1),
-subspace_B_(1,0) , subspace_B_(0,0);
Vector polynomial(5);
polynomial(0) = r2;
polynomial(1) = 2.0 * r2 * trB;
polynomial(2) = r2 * ( trB * trB + 2.0 * detB ) - subspace_g_.squaredNorm();
polynomial(3) = -2.0 * ( subspace_g_.transpose() * B_adj * subspace_g_
- r2 * detB * trB );
polynomial(4) = r2 * detB * detB - (B_adj * subspace_g_).squaredNorm();
return polynomial;
}
// Given a Lagrange multiplier y that corresponds to a stationary point
// of the Lagrangian L(x, y), compute the corresponding x from the
// equation
//
// 0 = d L(x, y) / dx
// = B * x + g + y * x
// = (B + y * I) * x + g
//
DoglegStrategy::Vector2d DoglegStrategy::ComputeSubspaceStepFromRoot(
double y) const {
const Matrix2d B_i = subspace_B_ + y * Matrix2d::Identity();
return -B_i.partialPivLu().solve(subspace_g_);
}
// This function evaluates the quadratic model at a point x in the
// subspace spanned by subspace_basis_.
double DoglegStrategy::EvaluateSubspaceModel(const Vector2d& x) const {
return 0.5 * x.dot(subspace_B_ * x) + subspace_g_.dot(x);
}
// This function attempts to solve the boundary-constrained subspace problem
//
// min. 1/2 x^T B^T H B x + g^T B x
// s.t. || B x ||^2 = r^2
//
// where B is an orthonormal subspace basis and r is the trust-region radius.
//
// This is done by finding the roots of a fourth degree polynomial. If the
// root finding fails, the function returns false and minimum will be set
// to (0, 0). If it succeeds, true is returned.
//
// In the failure case, another step should be taken, such as the traditional
// dogleg step.
bool DoglegStrategy::FindMinimumOnTrustRegionBoundary(Vector2d* minimum) const {
CHECK_NOTNULL(minimum);
// Return (0, 0) in all error cases.
minimum->setZero();
// Create the fourth-degree polynomial that is a necessary condition for
// optimality.
const Vector polynomial = MakePolynomialForBoundaryConstrainedProblem();
// Find the real parts y_i of its roots (not only the real roots).
Vector roots_real;
if (!FindPolynomialRoots(polynomial, &roots_real, NULL)) {
// Failed to find the roots of the polynomial, i.e. the candidate
// solutions of the constrained problem. Report this back to the caller.
return false;
}
// For each root y, compute B x(y) and check for feasibility.
// Notice that there should always be four roots, as the leading term of
// the polynomial is r^2 and therefore non-zero. However, as some roots
// may be complex, the real parts are not necessarily unique.
double minimum_value = std::numeric_limits<double>::max();
bool valid_root_found = false;
for (int i = 0; i < roots_real.size(); ++i) {
const Vector2d x_i = ComputeSubspaceStepFromRoot(roots_real(i));
// Not all roots correspond to points on the trust region boundary.
// There are at most four candidate solutions. As we are interested
// in the minimum, it is safe to consider all of them after projecting
// them onto the trust region boundary.
if (x_i.norm() > 0) {
const double f_i = EvaluateSubspaceModel((radius_ / x_i.norm()) * x_i);
valid_root_found = true;
if (f_i < minimum_value) {
minimum_value = f_i;
*minimum = x_i;
}
}
}
return valid_root_found;
}
LinearSolver::Summary DoglegStrategy::ComputeGaussNewtonStep(
SparseMatrix* jacobian,
const double* residuals) {
const int n = jacobian->num_cols();
LinearSolver::Summary linear_solver_summary;
linear_solver_summary.termination_type = FAILURE;
// The Jacobian matrix is often quite poorly conditioned. Thus it is
// necessary to add a diagonal matrix at the bottom to prevent the
// linear solver from failing.
//
// We do this by computing the same diagonal matrix as the one used
// by Levenberg-Marquardt (other choices are possible), and scaling
// it by a small constant (independent of the trust region radius).
//
// If the solve fails, the multiplier to the diagonal is increased
// up to max_mu_ by a factor of mu_increase_factor_ every time. If
// the linear solver is still not successful, the strategy returns
// with FAILURE.
//
// Next time when a new Gauss-Newton step is requested, the
// multiplier starts out from the last successful solve.
//
// When a step is declared successful, the multiplier is decreased
// by half of mu_increase_factor_.
while (mu_ < max_mu_) {
// Dogleg, as far as I (sameeragarwal) understand it, requires a
// reasonably good estimate of the Gauss-Newton step. This means
// that we need to solve the normal equations more or less
// exactly. This is reflected in the values of the tolerances set
// below.
//
// For now, this strategy should only be used with exact
// factorization based solvers, for which these tolerances are
// automatically satisfied.
//
// The right way to combine inexact solves with trust region
// methods is to use Stiehaug's method.
LinearSolver::PerSolveOptions solve_options;
solve_options.q_tolerance = 0.0;
solve_options.r_tolerance = 0.0;
lm_diagonal_ = diagonal_ * std::sqrt(mu_);
solve_options.D = lm_diagonal_.data();
// As in the LevenbergMarquardtStrategy, solve Jy = r instead
// of Jx = -r and later set x = -y to avoid having to modify
// either jacobian or residuals.
InvalidateArray(n, gauss_newton_step_.data());
linear_solver_summary = linear_solver_->Solve(jacobian,
residuals,
solve_options,
gauss_newton_step_.data());
if (linear_solver_summary.termination_type == FAILURE ||
!IsArrayValid(n, gauss_newton_step_.data())) {
mu_ *= mu_increase_factor_;
VLOG(2) << "Increasing mu " << mu_;
linear_solver_summary.termination_type = FAILURE;
continue;
}
break;
}
if (linear_solver_summary.termination_type != FAILURE) {
// The scaled Gauss-Newton step is D * GN:
//
// - (D^-1 J^T J D^-1)^-1 (D^-1 g)
// = - D (J^T J)^-1 D D^-1 g
// = D -(J^T J)^-1 g
//
gauss_newton_step_.array() *= -diagonal_.array();
}
return linear_solver_summary;
}
void DoglegStrategy::StepAccepted(double step_quality) {
CHECK_GT(step_quality, 0.0);
if (step_quality < decrease_threshold_) {
radius_ *= 0.5;
}
if (step_quality > increase_threshold_) {
radius_ = max(radius_, 3.0 * dogleg_step_norm_);
}
// Reduce the regularization multiplier, in the hope that whatever
// was causing the rank deficiency has gone away and we can return
// to doing a pure Gauss-Newton solve.
mu_ = max(min_mu_, 2.0 * mu_ / mu_increase_factor_ );
reuse_ = false;
}
void DoglegStrategy::StepRejected(double step_quality) {
radius_ *= 0.5;
reuse_ = true;
}
void DoglegStrategy::StepIsInvalid() {
mu_ *= mu_increase_factor_;
reuse_ = false;
}
double DoglegStrategy::Radius() const {
return radius_;
}
bool DoglegStrategy::ComputeSubspaceModel(SparseMatrix* jacobian) {
// Compute an orthogonal basis for the subspace using QR decomposition.
Matrix basis_vectors(jacobian->num_cols(), 2);
basis_vectors.col(0) = gradient_;
basis_vectors.col(1) = gauss_newton_step_;
Eigen::ColPivHouseholderQR<Matrix> basis_qr(basis_vectors);
switch (basis_qr.rank()) {
case 0:
// This should never happen, as it implies that both the gradient
// and the Gauss-Newton step are zero. In this case, the minimizer should
// have stopped due to the gradient being too small.
LOG(ERROR) << "Rank of subspace basis is 0. "
<< "This means that the gradient at the current iterate is "
<< "zero but the optimization has not been terminated. "
<< "You may have found a bug in Ceres.";
return false;
case 1:
// Gradient and Gauss-Newton step coincide, so we lie on one of the
// major axes of the quadratic problem. In this case, we simply move
// along the gradient until we reach the trust region boundary.
subspace_is_one_dimensional_ = true;
return true;
case 2:
subspace_is_one_dimensional_ = false;
break;
default:
LOG(ERROR) << "Rank of the subspace basis matrix is reported to be "
<< "greater than 2. As the matrix contains only two "
<< "columns this cannot be true and is indicative of "
<< "a bug.";
return false;
}
// The subspace is two-dimensional, so compute the subspace model.
// Given the basis U, this is
//
// subspace_g_ = g_scaled^T U
//
// and
//
// subspace_B_ = U^T (J_scaled^T J_scaled) U
//
// As J_scaled = J * D^-1, the latter becomes
//
// subspace_B_ = ((U^T D^-1) J^T) (J (D^-1 U))
// = (J (D^-1 U))^T (J (D^-1 U))
subspace_basis_ =
basis_qr.householderQ() * Matrix::Identity(jacobian->num_cols(), 2);
subspace_g_ = subspace_basis_.transpose() * gradient_;
Eigen::Matrix<double, 2, Eigen::Dynamic, Eigen::RowMajor>
Jb(2, jacobian->num_rows());
Jb.setZero();
Vector tmp;
tmp = (subspace_basis_.col(0).array() / diagonal_.array()).matrix();
jacobian->RightMultiply(tmp.data(), Jb.row(0).data());
tmp = (subspace_basis_.col(1).array() / diagonal_.array()).matrix();
jacobian->RightMultiply(tmp.data(), Jb.row(1).data());
subspace_B_ = Jb * Jb.transpose();
return true;
}
} // namespace internal
} // namespace ceres

@ -0,0 +1,163 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
#ifndef CERES_INTERNAL_DOGLEG_STRATEGY_H_
#define CERES_INTERNAL_DOGLEG_STRATEGY_H_
#include "ceres/linear_solver.h"
#include "ceres/trust_region_strategy.h"
namespace ceres {
namespace internal {
// Dogleg step computation and trust region sizing strategy based on
// on "Methods for Nonlinear Least Squares" by K. Madsen, H.B. Nielsen
// and O. Tingleff. Available to download from
//
// http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/3215/pdf/imm3215.pdf
//
// One minor modification is that instead of computing the pure
// Gauss-Newton step, we compute a regularized version of it. This is
// because the Jacobian is often rank-deficient and in such cases
// using a direct solver leads to numerical failure.
//
// If SUBSPACE is passed as the type argument to the constructor, the
// DoglegStrategy follows the approach by Shultz, Schnabel, Byrd.
// This finds the exact optimum over the two-dimensional subspace
// spanned by the two Dogleg vectors.
class DoglegStrategy : public TrustRegionStrategy {
public:
DoglegStrategy(const TrustRegionStrategy::Options& options);
virtual ~DoglegStrategy() {}
// TrustRegionStrategy interface
virtual Summary ComputeStep(const PerSolveOptions& per_solve_options,
SparseMatrix* jacobian,
const double* residuals,
double* step);
virtual void StepAccepted(double step_quality);
virtual void StepRejected(double step_quality);
virtual void StepIsInvalid();
virtual double Radius() const;
// These functions are predominantly for testing.
Vector gradient() const { return gradient_; }
Vector gauss_newton_step() const { return gauss_newton_step_; }
Matrix subspace_basis() const { return subspace_basis_; }
Vector subspace_g() const { return subspace_g_; }
Matrix subspace_B() const { return subspace_B_; }
private:
typedef Eigen::Matrix<double, 2, 1, Eigen::DontAlign> Vector2d;
typedef Eigen::Matrix<double, 2, 2, Eigen::DontAlign> Matrix2d;
LinearSolver::Summary ComputeGaussNewtonStep(SparseMatrix* jacobian,
const double* residuals);
void ComputeCauchyPoint(SparseMatrix* jacobian);
void ComputeGradient(SparseMatrix* jacobian, const double* residuals);
void ComputeTraditionalDoglegStep(double* step);
bool ComputeSubspaceModel(SparseMatrix* jacobian);
void ComputeSubspaceDoglegStep(double* step);
bool FindMinimumOnTrustRegionBoundary(Vector2d* minimum) const;
Vector MakePolynomialForBoundaryConstrainedProblem() const;
Vector2d ComputeSubspaceStepFromRoot(double lambda) const;
double EvaluateSubspaceModel(const Vector2d& x) const;
LinearSolver* linear_solver_;
double radius_;
const double max_radius_;
const double min_diagonal_;
const double max_diagonal_;
// mu is used to scale the diagonal matrix used to make the
// Gauss-Newton solve full rank. In each solve, the strategy starts
// out with mu = min_mu, and tries values upto max_mu. If the user
// reports an invalid step, the value of mu_ is increased so that
// the next solve starts with a stronger regularization.
//
// If a successful step is reported, then the value of mu_ is
// decreased with a lower bound of min_mu_.
double mu_;
const double min_mu_;
const double max_mu_;
const double mu_increase_factor_;
const double increase_threshold_;
const double decrease_threshold_;
Vector diagonal_; // sqrt(diag(J^T J))
Vector lm_diagonal_;
Vector gradient_;
Vector gauss_newton_step_;
// cauchy_step = alpha * gradient
double alpha_;
double dogleg_step_norm_;
// When, ComputeStep is called, reuse_ indicates whether the
// Gauss-Newton and Cauchy steps from the last call to ComputeStep
// can be reused or not.
//
// If the user called StepAccepted, then it is expected that the
// user has recomputed the Jacobian matrix and new Gauss-Newton
// solve is needed and reuse is set to false.
//
// If the user called StepRejected, then it is expected that the
// user wants to solve the trust region problem with the same matrix
// but a different trust region radius and the Gauss-Newton and
// Cauchy steps can be reused to compute the Dogleg, thus reuse is
// set to true.
//
// If the user called StepIsInvalid, then there was a numerical
// problem with the step computed in the last call to ComputeStep,
// and the regularization used to do the Gauss-Newton solve is
// increased and a new solve should be done when ComputeStep is
// called again, thus reuse is set to false.
bool reuse_;
// The dogleg type determines how the minimum of the local
// quadratic model is found.
DoglegType dogleg_type_;
// If the type is SUBSPACE_DOGLEG, the two-dimensional
// model 1/2 x^T B x + g^T x has to be computed and stored.
bool subspace_is_one_dimensional_;
Matrix subspace_basis_;
Vector2d subspace_g_;
Matrix2d subspace_B_;
};
} // namespace internal
} // namespace ceres
#endif // CERES_INTERNAL_DOGLEG_STRATEGY_H_

@ -28,14 +28,18 @@
//
// Author: keir@google.com (Keir Mierle)
#include <glog/logging.h>
#include "ceres/evaluator.h"
#include <vector>
#include "ceres/block_evaluate_preparer.h"
#include "ceres/block_jacobian_writer.h"
#include "ceres/compressed_row_jacobian_writer.h"
#include "ceres/scratch_evaluate_preparer.h"
#include "ceres/compressed_row_sparse_matrix.h"
#include "ceres/crs_matrix.h"
#include "ceres/dense_jacobian_writer.h"
#include "ceres/evaluator.h"
#include "ceres/internal/port.h"
#include "ceres/program_evaluator.h"
#include "ceres/scratch_evaluate_preparer.h"
#include "glog/logging.h"
namespace ceres {
namespace internal {
@ -47,6 +51,7 @@ Evaluator* Evaluator::Create(const Evaluator::Options& options,
string* error) {
switch (options.linear_solver_type) {
case DENSE_QR:
case DENSE_NORMAL_CHOLESKY:
return new ProgramEvaluator<ScratchEvaluatePreparer,
DenseJacobianWriter>(options,
program);
@ -67,5 +72,76 @@ Evaluator* Evaluator::Create(const Evaluator::Options& options,
}
}
bool Evaluator::Evaluate(Program* program,
int num_threads,
double* cost,
vector<double>* residuals,
vector<double>* gradient,
CRSMatrix* output_jacobian) {
CHECK_GE(num_threads, 1)
<< "This is a Ceres bug; please contact the developers!";
CHECK_NOTNULL(cost);
// Setup the Parameter indices and offsets before an evaluator can
// be constructed and used.
program->SetParameterOffsetsAndIndex();
Evaluator::Options evaluator_options;
evaluator_options.linear_solver_type = SPARSE_NORMAL_CHOLESKY;
evaluator_options.num_threads = num_threads;
string error;
scoped_ptr<Evaluator> evaluator(
Evaluator::Create(evaluator_options, program, &error));
if (evaluator.get() == NULL) {
LOG(ERROR) << "Unable to create an Evaluator object. "
<< "Error: " << error
<< "This is a Ceres bug; please contact the developers!";
return false;
}
if (residuals !=NULL) {
residuals->resize(evaluator->NumResiduals());
}
if (gradient != NULL) {
gradient->resize(evaluator->NumEffectiveParameters());
}
scoped_ptr<CompressedRowSparseMatrix> jacobian;
if (output_jacobian != NULL) {
jacobian.reset(
down_cast<CompressedRowSparseMatrix*>(evaluator->CreateJacobian()));
}
// Point the state pointers to the user state pointers. This is
// needed so that we can extract a parameter vector which is then
// passed to Evaluator::Evaluate.
program->SetParameterBlockStatePtrsToUserStatePtrs();
// Copy the value of the parameter blocks into a vector, since the
// Evaluate::Evaluate method needs its input as such. The previous
// call to SetParameterBlockStatePtrsToUserStatePtrs ensures that
// these values are the ones corresponding to the actual state of
// the parameter blocks, rather than the temporary state pointer
// used for evaluation.
Vector parameters(program->NumParameters());
program->ParameterBlocksToStateVector(parameters.data());
if (!evaluator->Evaluate(parameters.data(),
cost,
residuals != NULL ? &(*residuals)[0] : NULL,
gradient != NULL ? &(*gradient)[0] : NULL,
jacobian.get())) {
return false;
}
if (output_jacobian != NULL) {
jacobian->ToCRSMatrix(output_jacobian);
}
return true;
}
} // namespace internal
} // namespace ceres

@ -33,10 +33,14 @@
#define CERES_INTERNAL_EVALUATOR_H_
#include <string>
#include <vector>
#include "ceres/internal/port.h"
#include "ceres/types.h"
namespace ceres {
class CRSMatrix;
namespace internal {
class Program;
@ -65,6 +69,32 @@ class Evaluator {
Program* program,
string* error);
// This is used for computing the cost, residual and Jacobian for
// returning to the user. For actually solving the optimization
// problem, the optimization algorithm uses the ProgramEvaluator
// objects directly.
//
// The residual, gradients and jacobian pointers can be NULL, in
// which case they will not be evaluated. cost cannot be NULL.
//
// The parallelism of the evaluator is controlled by num_threads; it
// should be at least 1.
//
// Note: That this function does not take a parameter vector as
// input. The parameter blocks are evaluated on the values contained
// in the arrays pointed to by their user_state pointers.
//
// Also worth noting is that this function mutates program by
// calling Program::SetParameterOffsetsAndIndex() on it so that an
// evaluator object can be constructed.
static bool Evaluate(Program* program,
int num_threads,
double* cost,
vector<double>* residuals,
vector<double>* gradient,
CRSMatrix* jacobian);
// Build and return a sparse matrix for storing and working with the Jacobian
// of the objective function. The jacobian has dimensions
// NumEffectiveParameters() by NumParameters(), and is typically extremely
@ -95,6 +125,7 @@ class Evaluator {
virtual bool Evaluate(const double* state,
double* cost,
double* residuals,
double* gradient,
SparseMatrix* jacobian) = 0;
// Make a change delta (of size NumEffectiveParameters()) to state (of size

@ -31,7 +31,8 @@
// Really simple file IO.
#include <cstdio>
#include <glog/logging.h>
#include "file.h"
#include "glog/logging.h"
namespace ceres {
namespace internal {
@ -48,7 +49,7 @@ void WriteStringToFileOrDie(const string &data, const string &filename) {
}
void ReadFileToStringOrDie(const string &filename, string *data) {
FILE* file_descriptor = file_descriptor = fopen(filename.c_str(), "r");
FILE* file_descriptor = fopen(filename.c_str(), "r");
if (!file_descriptor) {
LOG(FATAL) << "Couldn't read file: " << filename;

@ -0,0 +1,186 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#
# Copyright 2011 Google Inc. All Rights Reserved.
# Author: sameeragarwal@google.com (Sameer Agarwal)
#
# Script for explicitly generating template specialization of the
# SchurEliminator class. It is a rather large class
# and the number of explicit instantiations is also large. Explicitly
# generating these instantiations in separate .cc files breaks the
# compilation into separate compilation unit rather than one large cc
# file which takes 2+GB of RAM to compile.
#
# This script creates two sets of files.
#
# 1. schur_eliminator_x_x_x.cc
# where, the x indicates the template parameters and
#
# 2. schur_eliminator.cc
#
# that contains a factory function for instantiating these classes
# based on runtime parameters.
#
# The list of tuples, specializations indicates the set of
# specializations that is generated.
# Set of template specializations to generate
SPECIALIZATIONS = [(2, 2, 2),
(2, 2, 3),
(2, 2, 4),
(2, 2, "Dynamic"),
(2, 3, 3),
(2, 3, 4),
(2, 3, 9),
(2, 3, "Dynamic"),
(2, 4, 3),
(2, 4, 4),
(2, 4, "Dynamic"),
(4, 4, 2),
(4, 4, 3),
(4, 4, 4),
(4, 4, "Dynamic"),
("Dynamic", "Dynamic", "Dynamic")]
SPECIALIZATION_FILE = """// Copyright 2011 Google Inc. All Rights Reserved.
// Author: sameeragarwal@google.com (Sameer Agarwal)
//
// Template specialization of SchurEliminator.
//
// ========================================
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_eliminator_specializations.py.
// Editing it manually is not recommended.
#include "ceres/schur_eliminator_impl.h"
#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
template class SchurEliminator<%s, %s, %s>;
} // namespace internal
} // namespace ceres
"""
FACTORY_FILE_HEADER = """// Copyright 2011 Google Inc. All Rights Reserved.
// Author: sameeragarwal@google.com (Sameer Agarwal)
//
// ========================================
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_template_specializations.py.
// Editing it manually is not recommended.
#include "ceres/linear_solver.h"
#include "ceres/schur_eliminator.h"
#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
SchurEliminatorBase*
SchurEliminatorBase::Create(const LinearSolver::Options& options) {
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
"""
FACTORY_CONDITIONAL = """ if ((options.row_block_size == %s) &&
(options.e_block_size == %s) &&
(options.f_block_size == %s)) {
return new SchurEliminator<%s, %s, %s>(options);
}
"""
FACTORY_FOOTER = """
#endif
VLOG(1) << "Template specializations not found for <"
<< options.row_block_size << ","
<< options.e_block_size << ","
<< options.f_block_size << ">";
return new SchurEliminator<Dynamic, Dynamic, Dynamic>(options);
}
} // namespace internal
} // namespace ceres
"""
def SuffixForSize(size):
if size == "Dynamic":
return "d"
return str(size)
def SpecializationFilename(prefix, row_block_size, e_block_size, f_block_size):
return "_".join([prefix] + map(SuffixForSize, (row_block_size,
e_block_size,
f_block_size)))
def Specialize():
"""
Generate specialization code and the conditionals to instantiate it.
"""
f = open("schur_eliminator.cc", "w")
f.write(FACTORY_FILE_HEADER)
for row_block_size, e_block_size, f_block_size in SPECIALIZATIONS:
output = SpecializationFilename("generated/schur_eliminator",
row_block_size,
e_block_size,
f_block_size) + ".cc"
fptr = open(output, "w")
fptr.write(SPECIALIZATION_FILE % (row_block_size,
e_block_size,
f_block_size))
fptr.close()
f.write(FACTORY_CONDITIONAL % (row_block_size,
e_block_size,
f_block_size,
row_block_size,
e_block_size,
f_block_size))
f.write(FACTORY_FOOTER)
f.close()
if __name__ == "__main__":
Specialize()

@ -36,18 +36,18 @@
#include <string>
#include <vector>
#include <glog/logging.h>
#include "ceres/cost_function.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/parameter_block.h"
#include "ceres/problem.h"
#include "ceres/problem_impl.h"
#include "ceres/program.h"
#include "ceres/residual_block.h"
#include "ceres/runtime_numeric_diff_cost_function.h"
#include "ceres/stringprintf.h"
#include "ceres/cost_function.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/problem.h"
#include "ceres/types.h"
#include "glog/logging.h"
namespace ceres {
namespace internal {

@ -129,7 +129,7 @@ class Graph {
HashMap<Vertex, HashSet<Vertex> > edges_;
HashMap<pair<Vertex, Vertex>, double> edge_weights_;
DISALLOW_COPY_AND_ASSIGN(Graph);
CERES_DISALLOW_COPY_AND_ASSIGN(Graph);
};
} // namespace internal

@ -30,22 +30,20 @@
#include "ceres/implicit_schur_complement.h"
#include <glog/logging.h>
#include "Eigen/Dense"
#include "ceres/block_sparse_matrix.h"
#include "ceres/block_structure.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
#include "glog/logging.h"
namespace ceres {
namespace internal {
ImplicitSchurComplement::ImplicitSchurComplement(int num_eliminate_blocks,
bool constant_sparsity,
bool preconditioner)
: num_eliminate_blocks_(num_eliminate_blocks),
constant_sparsity_(constant_sparsity),
preconditioner_(preconditioner),
A_(NULL),
D_(NULL),
@ -62,7 +60,7 @@ void ImplicitSchurComplement::Init(const BlockSparseMatrixBase& A,
const double* b) {
// Since initialization is reasonably heavy, perhaps we can save on
// constructing a new object everytime.
if ((A_ == NULL) || !constant_sparsity_) {
if (A_ == NULL) {
A_.reset(new PartitionedMatrixView(A, num_eliminate_blocks_));
}
@ -71,7 +69,7 @@ void ImplicitSchurComplement::Init(const BlockSparseMatrixBase& A,
// Initialize temporary storage and compute the block diagonals of
// E'E and F'E.
if ((!constant_sparsity_) || (block_diagonal_EtE_inverse_ == NULL)) {
if (block_diagonal_EtE_inverse_ == NULL) {
block_diagonal_EtE_inverse_.reset(A_->CreateBlockDiagonalEtE());
if (preconditioner_) {
block_diagonal_FtF_inverse_.reset(A_->CreateBlockDiagonalFtF());
@ -92,17 +90,10 @@ void ImplicitSchurComplement::Init(const BlockSparseMatrixBase& A,
// The block diagonals of the augmented linear system contain
// contributions from the diagonal D if it is non-null. Add that to
// the block diagonals and invert them.
if (D_ != NULL) {
AddDiagonalAndInvert(D_, block_diagonal_EtE_inverse_.get());
if (preconditioner_) {
AddDiagonalAndInvert(D_ + A_->num_cols_e(),
block_diagonal_FtF_inverse_.get());
}
} else {
AddDiagonalAndInvert(NULL, block_diagonal_EtE_inverse_.get());
if (preconditioner_) {
AddDiagonalAndInvert(NULL, block_diagonal_FtF_inverse_.get());
}
AddDiagonalAndInvert(D_, block_diagonal_EtE_inverse_.get());
if (preconditioner_) {
AddDiagonalAndInvert((D_ == NULL) ? NULL : D_ + A_->num_cols_e(),
block_diagonal_FtF_inverse_.get());
}
// Compute the RHS of the Schur complement system.

@ -91,20 +91,13 @@ class ImplicitSchurComplement : public LinearOperator {
// num_eliminate_blocks is the number of E blocks in the matrix
// A.
//
// constant_sparsity indicates if across calls to Init, the sparsity
// structure of the matrix A remains constant or not. This makes for
// significant savings across multiple matrices A, e.g. when used in
// conjunction with an optimization algorithm.
//
// preconditioner indicates whether the inverse of the matrix F'F
// should be computed or not as a preconditioner for the Schur
// Complement.
//
// TODO(sameeragarwal): Get rid of the two bools below and replace
// them with enums.
ImplicitSchurComplement(int num_eliminate_blocks,
bool constant_sparsity,
bool preconditioner);
ImplicitSchurComplement(int num_eliminate_blocks, bool preconditioner);
virtual ~ImplicitSchurComplement();
// Initialize the Schur complement for a linear least squares
@ -151,7 +144,6 @@ class ImplicitSchurComplement : public LinearOperator {
void UpdateRhs();
int num_eliminate_blocks_;
bool constant_sparsity_;
bool preconditioner_;
scoped_ptr<PartitionedMatrixView> A_;

@ -33,22 +33,18 @@
#include <algorithm>
#include <cstring>
#include <vector>
#include <glog/logging.h>
#include "Eigen/Dense"
#include "ceres/block_sparse_matrix.h"
#include "ceres/block_structure.h"
#include "ceres/conjugate_gradients_solver.h"
#include "ceres/implicit_schur_complement.h"
#include "ceres/linear_solver.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/visibility_based_preconditioner.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/linear_solver.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/types.h"
#include "ceres/visibility_based_preconditioner.h"
#include "glog/logging.h"
namespace ceres {
namespace internal {
@ -69,10 +65,9 @@ LinearSolver::Summary IterativeSchurComplementSolver::SolveImpl(
CHECK_NOTNULL(A->block_structure());
// Initialize a ImplicitSchurComplement object.
if ((schur_complement_ == NULL) || (!options_.constant_sparsity)) {
if (schur_complement_ == NULL) {
schur_complement_.reset(
new ImplicitSchurComplement(options_.num_eliminate_blocks,
options_.constant_sparsity,
options_.preconditioner_type == JACOBI));
}
schur_complement_->Init(*A, per_solve_options.D, b);
@ -119,7 +114,7 @@ LinearSolver::Summary IterativeSchurComplementSolver::SolveImpl(
new VisibilityBasedPreconditioner(*A->block_structure(), options_));
}
is_preconditioner_good =
visibility_based_preconditioner_->Compute(*A, per_solve_options.D);
visibility_based_preconditioner_->Update(*A, per_solve_options.D);
cg_per_solve_options.preconditioner =
visibility_based_preconditioner_.get();
break;

@ -1,574 +0,0 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
//
// Implementation of a simple LM algorithm which uses the step sizing
// rule of "Methods for Nonlinear Least Squares" by K. Madsen,
// H.B. Nielsen and O. Tingleff. Available to download from
//
// http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/3215/pdf/imm3215.pdf
//
// The basic algorithm described in this note is an exact step
// algorithm that depends on the Newton(LM) step being solved exactly
// in each iteration. When a suitable iterative solver is available to
// solve the Newton(LM) step, the algorithm will automatically switch
// to an inexact step solution method. This trades some slowdown in
// convergence for significant savings in solve time and memory
// usage. Our implementation of the Truncated Newton algorithm follows
// the discussion and recommendataions in "Stephen G. Nash, A Survey
// of Truncated Newton Methods, Journal of Computational and Applied
// Mathematics, 124(1-2), 45-59, 2000.
#include "ceres/levenberg_marquardt.h"
#include <algorithm>
#include <cstdlib>
#include <cmath>
#include <cstring>
#include <string>
#include <vector>
#include <glog/logging.h>
#include "Eigen/Core"
#include "ceres/evaluator.h"
#include "ceres/file.h"
#include "ceres/linear_least_squares_problems.h"
#include "ceres/linear_solver.h"
#include "ceres/matrix_proto.h"
#include "ceres/sparse_matrix.h"
#include "ceres/stringprintf.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
namespace ceres {
namespace internal {
namespace {
// Numbers for clamping the size of the LM diagonal. The size of these
// numbers is heuristic. We will probably be adjusting them in the
// future based on more numerical experience. With jacobi scaling
// enabled, these numbers should be all but redundant.
const double kMinLevenbergMarquardtDiagonal = 1e-6;
const double kMaxLevenbergMarquardtDiagonal = 1e32;
// Small constant for various floating point issues.
const double kEpsilon = 1e-12;
// Number of times the linear solver should be retried in case of
// numerical failure. The retries are done by exponentially scaling up
// mu at each retry. This leads to stronger and stronger
// regularization making the linear least squares problem better
// conditioned at each retry.
const int kMaxLinearSolverRetries = 5;
// D = 1/sqrt(diag(J^T * J))
void EstimateScale(const SparseMatrix& jacobian, double* D) {
CHECK_NOTNULL(D);
jacobian.SquaredColumnNorm(D);
for (int i = 0; i < jacobian.num_cols(); ++i) {
D[i] = 1.0 / (kEpsilon + sqrt(D[i]));
}
}
// D = diag(J^T * J)
void LevenbergMarquardtDiagonal(const SparseMatrix& jacobian,
double* D) {
CHECK_NOTNULL(D);
jacobian.SquaredColumnNorm(D);
for (int i = 0; i < jacobian.num_cols(); ++i) {
D[i] = min(max(D[i], kMinLevenbergMarquardtDiagonal),
kMaxLevenbergMarquardtDiagonal);
}
}
bool RunCallback(IterationCallback* callback,
const IterationSummary& iteration_summary,
Solver::Summary* summary) {
const CallbackReturnType status = (*callback)(iteration_summary);
switch (status) {
case SOLVER_TERMINATE_SUCCESSFULLY:
summary->termination_type = USER_SUCCESS;
VLOG(1) << "Terminating on USER_SUCCESS.";
return false;
case SOLVER_ABORT:
summary->termination_type = USER_ABORT;
VLOG(1) << "Terminating on USER_ABORT.";
return false;
case SOLVER_CONTINUE:
return true;
default:
LOG(FATAL) << "Unknown status returned by callback: "
<< status;
return NULL;
}
}
} // namespace
LevenbergMarquardt::~LevenbergMarquardt() {}
void LevenbergMarquardt::Minimize(const Minimizer::Options& options,
Evaluator* evaluator,
LinearSolver* linear_solver,
const double* initial_parameters,
double* final_parameters,
Solver::Summary* summary) {
time_t start_time = time(NULL);
const int num_parameters = evaluator->NumParameters();
const int num_effective_parameters = evaluator->NumEffectiveParameters();
const int num_residuals = evaluator->NumResiduals();
summary->termination_type = NO_CONVERGENCE;
summary->num_successful_steps = 0;
summary->num_unsuccessful_steps = 0;
// Allocate the various vectors needed by the algorithm.
memcpy(final_parameters, initial_parameters,
num_parameters * sizeof(*initial_parameters));
VectorRef x(final_parameters, num_parameters);
Vector x_new(num_parameters);
Vector lm_step(num_effective_parameters);
Vector gradient(num_effective_parameters);
Vector scaled_gradient(num_effective_parameters);
// Jacobi scaling vector
Vector scale(num_effective_parameters);
Vector f_model(num_residuals);
Vector f(num_residuals);
Vector f_new(num_residuals);
Vector D(num_parameters);
Vector muD(num_parameters);
// Ask the Evaluator to create the jacobian matrix. The sparsity
// pattern of this matrix is going to remain constant, so we only do
// this once and then re-use this matrix for all subsequent Jacobian
// computations.
scoped_ptr<SparseMatrix> jacobian(evaluator->CreateJacobian());
double x_norm = x.norm();
double cost = 0.0;
D.setOnes();
f.setZero();
// Do initial cost and Jacobian evaluation.
if (!evaluator->Evaluate(x.data(), &cost, f.data(), jacobian.get())) {
LOG(WARNING) << "Failed to compute residuals and Jacobian. "
<< "Terminating.";
summary->termination_type = NUMERICAL_FAILURE;
return;
}
if (options.jacobi_scaling) {
EstimateScale(*jacobian, scale.data());
jacobian->ScaleColumns(scale.data());
} else {
scale.setOnes();
}
// This is a poor way to do this computation. Even if fixed_cost is
// zero, because we are subtracting two possibly large numbers, we
// are depending on exact cancellation to give us a zero here. But
// initial_cost and cost have been computed by two different
// evaluators. One which runs on the whole problem (in
// solver_impl.cc) in single threaded mode and another which runs
// here on the reduced problem, so fixed_cost can (and does) contain
// some numerical garbage with a relative magnitude of 1e-14.
//
// The right way to do this, would be to compute the fixed cost on
// just the set of residual blocks which are held constant and were
// removed from the original problem when the reduced problem was
// constructed.
summary->fixed_cost = summary->initial_cost - cost;
double model_cost = f.squaredNorm() / 2.0;
double total_cost = summary->fixed_cost + cost;
scaled_gradient.setZero();
jacobian->LeftMultiply(f.data(), scaled_gradient.data());
gradient = scaled_gradient.array() / scale.array();
double gradient_max_norm = gradient.lpNorm<Eigen::Infinity>();
// We need the max here to guard againt the gradient being zero.
const double gradient_max_norm_0 = max(gradient_max_norm, kEpsilon);
double gradient_tolerance = options.gradient_tolerance * gradient_max_norm_0;
double mu = options.tau;
double nu = 2.0;
int iteration = 0;
double actual_cost_change = 0.0;
double step_norm = 0.0;
double relative_decrease = 0.0;
// Insane steps are steps which are not sane, i.e. there is some
// numerical kookiness going on with them. There are various reasons
// for this kookiness, some easier to diagnose then others. From the
// point of view of the non-linear solver, they are steps which
// cannot be used. We return with NUMERICAL_FAILURE after
// kMaxLinearSolverRetries consecutive insane steps.
bool step_is_sane = false;
int num_consecutive_insane_steps = 0;
// Whether the step resulted in a sufficient decrease in the
// objective function when compared to the decrease in the value of
// the lineariztion.
bool step_is_successful = false;
// Parse the iterations for which to dump the linear problem.
vector<int> iterations_to_dump = options.lsqp_iterations_to_dump;
sort(iterations_to_dump.begin(), iterations_to_dump.end());
IterationSummary iteration_summary;
iteration_summary.iteration = iteration;
iteration_summary.step_is_successful = false;
iteration_summary.cost = total_cost;
iteration_summary.cost_change = actual_cost_change;
iteration_summary.gradient_max_norm = gradient_max_norm;
iteration_summary.step_norm = step_norm;
iteration_summary.relative_decrease = relative_decrease;
iteration_summary.mu = mu;
iteration_summary.eta = options.eta;
iteration_summary.linear_solver_iterations = 0;
iteration_summary.linear_solver_time_sec = 0.0;
iteration_summary.iteration_time_sec = (time(NULL) - start_time);
if (options.logging_type >= PER_MINIMIZER_ITERATION) {
summary->iterations.push_back(iteration_summary);
}
// Check if the starting point is an optimum.
VLOG(2) << "Gradient max norm: " << gradient_max_norm
<< " tolerance: " << gradient_tolerance
<< " ratio: " << gradient_max_norm / gradient_max_norm_0
<< " tolerance: " << options.gradient_tolerance;
if (gradient_max_norm <= gradient_tolerance) {
summary->termination_type = GRADIENT_TOLERANCE;
VLOG(1) << "Terminating on GRADIENT_TOLERANCE. "
<< "Relative gradient max norm: "
<< gradient_max_norm / gradient_max_norm_0
<< " <= " << options.gradient_tolerance;
return;
}
// Call the various callbacks.
for (int i = 0; i < options.callbacks.size(); ++i) {
if (!RunCallback(options.callbacks[i], iteration_summary, summary)) {
return;
}
}
// We only need the LM diagonal if we are actually going to do at
// least one iteration of the optimization. So we wait to do it
// until now.
LevenbergMarquardtDiagonal(*jacobian, D.data());
while ((iteration < options.max_num_iterations) &&
(time(NULL) - start_time) <= options.max_solver_time_sec) {
time_t iteration_start_time = time(NULL);
step_is_sane = false;
step_is_successful = false;
IterationSummary iteration_summary;
// The while loop here is just to provide an easily breakable
// control structure. We are guaranteed to always exit this loop
// at the end of one iteration or before.
while (1) {
muD = (mu * D).array().sqrt();
LinearSolver::PerSolveOptions solve_options;
solve_options.D = muD.data();
solve_options.q_tolerance = options.eta;
// Disable r_tolerance checking. Since we only care about
// termination via the q_tolerance. As Nash and Sofer show,
// r_tolerance based termination is essentially useless in
// Truncated Newton methods.
solve_options.r_tolerance = -1.0;
const time_t linear_solver_start_time = time(NULL);
LinearSolver::Summary linear_solver_summary =
linear_solver->Solve(jacobian.get(),
f.data(),
solve_options,
lm_step.data());
iteration_summary.linear_solver_time_sec =
(time(NULL) - linear_solver_start_time);
iteration_summary.linear_solver_iterations =
linear_solver_summary.num_iterations;
if (binary_search(iterations_to_dump.begin(),
iterations_to_dump.end(),
iteration)) {
CHECK(DumpLinearLeastSquaresProblem(options.lsqp_dump_directory,
iteration,
options.lsqp_dump_format_type,
jacobian.get(),
muD.data(),
f.data(),
lm_step.data(),
options.num_eliminate_blocks))
<< "Tried writing linear least squares problem: "
<< options.lsqp_dump_directory
<< " but failed.";
}
// We ignore the case where the linear solver did not converge,
// since the partial solution computed by it still maybe of use,
// and there is no reason to ignore it, especially since we
// spent so much time computing it.
if ((linear_solver_summary.termination_type != TOLERANCE) &&
(linear_solver_summary.termination_type != MAX_ITERATIONS)) {
VLOG(1) << "Linear solver failure: retrying with a higher mu";
break;
}
step_norm = (lm_step.array() * scale.array()).matrix().norm();
// Check step length based convergence. If the step length is
// too small, then we are done.
const double step_size_tolerance = options.parameter_tolerance *
(x_norm + options.parameter_tolerance);
VLOG(2) << "Step size: " << step_norm
<< " tolerance: " << step_size_tolerance
<< " ratio: " << step_norm / step_size_tolerance
<< " tolerance: " << options.parameter_tolerance;
if (step_norm <= options.parameter_tolerance *
(x_norm + options.parameter_tolerance)) {
summary->termination_type = PARAMETER_TOLERANCE;
VLOG(1) << "Terminating on PARAMETER_TOLERANCE."
<< "Relative step size: " << step_norm / step_size_tolerance
<< " <= " << options.parameter_tolerance;
return;
}
Vector delta = -(lm_step.array() * scale.array()).matrix();
if (!evaluator->Plus(x.data(), delta.data(), x_new.data())) {
LOG(WARNING) << "Failed to compute Plus(x, delta, x_plus_delta). "
<< "Terminating.";
summary->termination_type = NUMERICAL_FAILURE;
return;
}
double cost_new = 0.0;
if (!evaluator->Evaluate(x_new.data(), &cost_new, NULL, NULL)) {
LOG(WARNING) << "Failed to compute the value of the objective "
<< "function. Terminating.";
summary->termination_type = NUMERICAL_FAILURE;
return;
}
f_model.setZero();
jacobian->RightMultiply(lm_step.data(), f_model.data());
const double model_cost_new =
(f.segment(0, num_residuals) - f_model).squaredNorm() / 2;
actual_cost_change = cost - cost_new;
double model_cost_change = model_cost - model_cost_new;
VLOG(2) << "[Model cost] current: " << model_cost
<< " new : " << model_cost_new
<< " change: " << model_cost_change;
VLOG(2) << "[Nonlinear cost] current: " << cost
<< " new : " << cost_new
<< " change: " << actual_cost_change
<< " relative change: " << fabs(actual_cost_change) / cost
<< " tolerance: " << options.function_tolerance;
// In exact arithmetic model_cost_change should never be
// negative. But due to numerical precision issues, we may end up
// with a small negative number. model_cost_change which are
// negative and large in absolute value are indicative of a
// numerical failure in the solver.
if (model_cost_change < -kEpsilon) {
VLOG(1) << "Model cost change is negative.\n"
<< "Current : " << model_cost
<< " new : " << model_cost_new
<< " change: " << model_cost_change << "\n";
break;
}
// If we have reached this far, then we are willing to trust the
// numerical quality of the step.
step_is_sane = true;
num_consecutive_insane_steps = 0;
// Check function value based convergence.
if (fabs(actual_cost_change) < options.function_tolerance * cost) {
VLOG(1) << "Termination on FUNCTION_TOLERANCE."
<< " Relative cost change: " << fabs(actual_cost_change) / cost
<< " tolerance: " << options.function_tolerance;
summary->termination_type = FUNCTION_TOLERANCE;
return;
}
// Clamp model_cost_change at kEpsilon from below.
if (model_cost_change < kEpsilon) {
VLOG(1) << "Clamping model cost change " << model_cost_change
<< " to " << kEpsilon;
model_cost_change = kEpsilon;
}
relative_decrease = actual_cost_change / model_cost_change;
VLOG(2) << "actual_cost_change / model_cost_change = "
<< relative_decrease;
if (relative_decrease < options.min_relative_decrease) {
VLOG(2) << "Unsuccessful step.";
break;
}
VLOG(2) << "Successful step.";
++summary->num_successful_steps;
x = x_new;
x_norm = x.norm();
if (!evaluator->Evaluate(x.data(), &cost, f.data(), jacobian.get())) {
LOG(WARNING) << "Failed to compute residuals and jacobian. "
<< "Terminating.";
summary->termination_type = NUMERICAL_FAILURE;
return;
}
if (options.jacobi_scaling) {
jacobian->ScaleColumns(scale.data());
}
model_cost = f.squaredNorm() / 2.0;
LevenbergMarquardtDiagonal(*jacobian, D.data());
scaled_gradient.setZero();
jacobian->LeftMultiply(f.data(), scaled_gradient.data());
gradient = scaled_gradient.array() / scale.array();
gradient_max_norm = gradient.lpNorm<Eigen::Infinity>();
// Check gradient based convergence.
VLOG(2) << "Gradient max norm: " << gradient_max_norm
<< " tolerance: " << gradient_tolerance
<< " ratio: " << gradient_max_norm / gradient_max_norm_0
<< " tolerance: " << options.gradient_tolerance;
if (gradient_max_norm <= gradient_tolerance) {
summary->termination_type = GRADIENT_TOLERANCE;
VLOG(1) << "Terminating on GRADIENT_TOLERANCE. "
<< "Relative gradient max norm: "
<< gradient_max_norm / gradient_max_norm_0
<< " <= " << options.gradient_tolerance
<< " (tolerance).";
return;
}
mu = mu * max(1.0 / 3.0, 1 - pow(2 * relative_decrease - 1, 3));
nu = 2.0;
step_is_successful = true;
break;
}
if (!step_is_sane) {
++num_consecutive_insane_steps;
}
if (num_consecutive_insane_steps == kMaxLinearSolverRetries) {
summary->termination_type = NUMERICAL_FAILURE;
VLOG(1) << "Too many consecutive retries; ending with numerical fail.";
if (!options.crash_and_dump_lsqp_on_failure) {
return;
}
// Dump debugging information to disk.
CHECK(options.lsqp_dump_format_type == TEXTFILE ||
options.lsqp_dump_format_type == PROTOBUF)
<< "Dumping the linear least squares problem on crash "
<< "requires Solver::Options::lsqp_dump_format_type to be "
<< "PROTOBUF or TEXTFILE.";
if (DumpLinearLeastSquaresProblem(options.lsqp_dump_directory,
iteration,
options.lsqp_dump_format_type,
jacobian.get(),
muD.data(),
f.data(),
lm_step.data(),
options.num_eliminate_blocks)) {
LOG(FATAL) << "Linear least squares problem saved to: "
<< options.lsqp_dump_directory
<< ". Please provide this to the Ceres developers for "
<< " debugging along with the v=2 log.";
} else {
LOG(FATAL) << "Tried writing linear least squares problem: "
<< options.lsqp_dump_directory
<< " but failed.";
}
}
if (!step_is_successful) {
// Either the step did not lead to a decrease in cost or there
// was numerical failure. In either case we will scale mu up and
// retry. If it was a numerical failure, we hope that the
// stronger regularization will make the linear system better
// conditioned. If it was numerically sane, but there was no
// decrease in cost, then increasing mu reduces the size of the
// trust region and we look for a decrease closer to the
// linearization point.
++summary->num_unsuccessful_steps;
mu = mu * nu;
nu = 2 * nu;
}
++iteration;
total_cost = summary->fixed_cost + cost;
iteration_summary.iteration = iteration;
iteration_summary.step_is_successful = step_is_successful;
iteration_summary.cost = total_cost;
iteration_summary.cost_change = actual_cost_change;
iteration_summary.gradient_max_norm = gradient_max_norm;
iteration_summary.step_norm = step_norm;
iteration_summary.relative_decrease = relative_decrease;
iteration_summary.mu = mu;
iteration_summary.eta = options.eta;
iteration_summary.iteration_time_sec = (time(NULL) - iteration_start_time);
if (options.logging_type >= PER_MINIMIZER_ITERATION) {
summary->iterations.push_back(iteration_summary);
}
// Call the various callbacks.
for (int i = 0; i < options.callbacks.size(); ++i) {
if (!RunCallback(options.callbacks[i], iteration_summary, summary)) {
return;
}
}
}
}
} // namespace internal
} // namespace ceres

Some files were not shown because too many files have changed in this diff Show More