Merged changes in the trunk up to revision 54992.

Resolved conflicts:
release/scripts/startup/bl_ui/space_view3d.py
This commit is contained in:
Tamito Kajiyama 2013-03-03 15:07:49 +00:00
commit 66a2b84897
516 changed files with 26540 additions and 23570 deletions

@ -1971,20 +1971,27 @@ elseif(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
# strange, clang complains these are not supported, but then yses them.
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_ALL -Wall)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_ERROR_DECLARATION_AFTER_STATEMENT -Werror=declaration-after-statement)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_ERROR_IMPLICIT_FUNCTION_DECLARATION -Werror=implicit-function-declaration)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_ERROR_RETURN_TYPE -Werror=return-type)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_NO_AUTOLOGICAL_COMPARE -Wno-tautological-compare)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_NO_UNKNOWN_PRAGMAS -Wno-unknown-pragmas)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_NO_CHAR_SUBSCRIPTS -Wno-char-subscripts)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_STRICT_PROTOTYPES -Wstrict-prototypes)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_MISSING_PROTOTYPES -Wmissing-prototypes)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_UNUSED_MACROS -Wunused-macros)
ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_UNUSED_PARAMETER -Wunused-parameter)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_ALL -Wall)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_AUTOLOGICAL_COMPARE -Wno-tautological-compare)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_UNKNOWN_PRAGMAS -Wno-unknown-pragmas)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_CHAR_SUBSCRIPTS -Wno-char-subscripts)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_OVERLOADED_VIRTUAL -Wno-overloaded-virtual) # we get a lot of these, if its a problem a dev needs to look into it.
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_SIGN_COMPARE -Wno-sign-compare)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_NO_INVALID_OFFSETOF -Wno-invalid-offsetof)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_UNUSED_MACROS -Wunused-macros)
# gives too many unfixable warnings
# ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_UNUSED_MACROS -Wunused-macros)
# ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_UNUSED_MACROS -Wunused-macros)
elseif(CMAKE_C_COMPILER_ID MATCHES "Intel")

@ -528,11 +528,13 @@ if B.targets != ['cudakernels']:
data_to_c_simple("release/datafiles/preview_cycles.blend")
# --- glsl ---
data_to_c_simple("source/blender/gpu/shaders/gpu_shader_fixed_fragment.glsl")
data_to_c_simple("source/blender/gpu/shaders/gpu_shader_fixed_vertex.glsl")
data_to_c_simple("source/blender/gpu/shaders/gpu_shader_material.glsl")
data_to_c_simple("source/blender/gpu/shaders/gpu_shader_material.glsl")
data_to_c_simple("source/blender/gpu/shaders/gpu_shader_vertex.glsl")
data_to_c_simple("source/blender/gpu/shaders/gpu_shader_sep_gaussian_blur_frag.glsl")
data_to_c_simple("source/blender/gpu/shaders/gpu_shader_sep_gaussian_blur_vert.glsl")
data_to_c_simple("source/blender/gpu/shaders/gpu_shader_material.glsl")
data_to_c_simple("source/blender/gpu/shaders/gpu_shader_vertex.glsl")
data_to_c_simple("source/blender/gpu/shaders/gpu_shader_vsm_store_frag.glsl")
data_to_c_simple("source/blender/gpu/shaders/gpu_shader_vsm_store_vert.glsl")

@ -796,6 +796,11 @@ compile_OIIO() {
cmake_d="$cmake_d -D CMAKE_PREFIX_PATH=$_inst"
cmake_d="$cmake_d -D CMAKE_INSTALL_PREFIX=$_inst"
cmake_d="$cmake_d -D BUILDSTATIC=ON"
# Optional tests and cmd tools
cmake_d="$cmake_d -D USE_QT=OFF"
cmake_d="$cmake_d -D OIIO_BUILD_TOOLS=OFF"
cmake_d="$cmake_d -D OIIO_BUILD_TESTS=OFF"
# linking statically could give issues on Debian/Ubuntu (and probably other distros
# which doesn't like static linking) when linking shared oiio library due to missing

@ -143,7 +143,9 @@ macro(blender_source_group
foreach(_SRC ${sources})
get_filename_component(_SRC_EXT ${_SRC} EXT)
if((${_SRC_EXT} MATCHES ".h") OR (${_SRC_EXT} MATCHES ".hpp"))
if((${_SRC_EXT} MATCHES ".h") OR
(${_SRC_EXT} MATCHES ".hpp") OR
(${_SRC_EXT} MATCHES ".hh"))
source_group("Header Files" FILES ${_SRC})
else()
source_group("Source Files" FILES ${_SRC})
@ -499,6 +501,7 @@ macro(remove_strict_flags)
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
remove_cc_flag("-Wunused-parameter")
remove_cc_flag("-Wunused-variable")
remove_cc_flag("-Werror=[^ ]+")
remove_cc_flag("-Werror")
endif()

@ -7,12 +7,9 @@
1. Introduction
2. Obtaining CMake
3. Obtaining Dependencies
4. Deciding on a Build Environment
5. Configuring the build for the first time
6. Configuring the build after SVN updates
7. Specify alternate Python library versions and locations
3. Building Blender
4. Generic Setup
5. Configuring the build after SVN updates
1. Introduction
---------------
@ -28,36 +25,17 @@
The website also contains some documentation on CMake usage but I found
the man page alone pretty helpful.
3. Obtaining Dependencies
-------------------------
3. Building Blender
-------------------
Check from the page
http://www.blender.org/cms/Getting_Dependencies.135.0.html that you
have all dependencies needed for building Blender. Note that for
windows many of these dependencies already come in the lib/windows
module from SVN.
Building Blender requires obtaining a compiler, library dependencies,
and correct setup depending on the system. For details on how to set
up a build on various operating systems, see the wiki documentation:
4. Deciding on a Build Environment
----------------------------------
http://wiki.blender.org/index.php/Dev:Doc/Building_Blender
To build Blender with the CMake scripts you first need to decide which
build environment you feel comfortable with. This decision will also be
influenced by the platform you are developing on. The current implementation
have been successfully used to generate build files for the following
environments:
1. Microsoft Visual Studio 2008. There is a free version available
at http://http://www.microsoft.com/visualstudio/en-us/products/2008-editions/express
2. Xcode on Mac OSX
3. Unix Makefiles (On Linux and Mac OSX): CMake actually creates make
files which generates nicely color coded output and a percentage
progress indicator.
5. Configuring the build for the first time
-------------------------------------------
4. Generic Setup
----------------
CMake allows one to generate the build project files and binary objects
outside the source tree which can be pretty handy in working and experimenting
@ -113,42 +91,14 @@
It is also possible to use the commandline of 'cmake' to override certain
of these settings.
6. Configuring the build after SVN updates
5. Configuring the build after SVN updates
------------------------------------------
The $BLENDERBUILD directory maintains a file called CMakeCache.txt which
remembers the initial run's settings for subsequent generation runs. After
every SVN update it may be a good idea to rerun the generation before building
Blender again. Just rerun the original 'cmake' run to do this, the settings
will be remembered. For the example above the following will do after every
'svn up':
% cmake -G Xcode $BLENDERSOURCE
7. Specify alternate Python library versions and locations
----------------------------------------------------------
The commandline can be used to override detected/default settings, e.g:
On Unix:
cmake -D PYTHON_LIBRARY=/usr/local/lib/python3.2/config/libpython3.2.so -D PYTHON_INCLUDE_DIR=/usr/local/include/python3.2 ../blender
On Macs:
cmake -D PYTHON_INCLUDE_DIRS=/System/Library/Frameworks/Python.framework/Versions/3.2/include/python3.2 -G Xcode ../blender
Mote that this should only be needed once per build directory generation because it will keep the overrides in CMakeCache.txt for subsequent runs.
To be continued...
TODO's
------
1. Get CMake to create proper distribution directories for the various platforms
like scons does.
2. Investigate the viability of using CPack to package installs automatically.
3. Refine this document and write detailed developer's document.
4. Make sure all options (ffmpeg, openexr, quicktime) has proper CMake support
on the various platforms.
SVN updates that contain changes to the build system, rebuilding Blender will
automatically invoke CMake to regenerate the CMakeCache.txt and other files
as needed.
/Jacques Beaurain (jbinto)

@ -127,7 +127,7 @@ Variables
.. data:: joysticks
A list of attached joysticks. The list size it he maximum number of supported joysticks. If no joystick is available for a given slot, the slot is set to None.
A list of attached :class:`~bge.types.SCA_PythonJoystick`s. The list size is the maximum number of supported joysticks. If no joystick is available for a given slot, the slot is set to None.
*****************
General functions

@ -91,7 +91,7 @@ if(WITH_BOOST)
-DCARVE_SYSTEM_BOOST
)
list(APPEND INC
list(APPEND INC_SYS
\${BOOST_INCLUDE_DIR}
)
endif()

192
extern/carve/files.txt vendored

@ -1,107 +1,109 @@
include/carve/polyhedron_decl.hpp
include/carve/geom2d.hpp
include/carve/exact.hpp
include/carve/triangulator_impl.hpp
include/carve/collection.hpp
include/carve/pointset.hpp
include/carve/djset.hpp
include/carve/kd_node.hpp
include/carve/polyline.hpp
include/carve/polyline_iter.hpp
include/carve/geom3d.hpp
include/carve/edge_decl.hpp
include/carve/face_decl.hpp
include/carve/vertex_impl.hpp
include/carve/aabb_impl.hpp
include/carve/colour.hpp
include/carve/pointset_iter.hpp
include/carve/polyline_decl.hpp
include/carve/rescale.hpp
include/carve/mesh_impl.hpp
include/carve/classification.hpp
include/carve/util.hpp
include/carve/triangulator.hpp
include/carve/polyhedron_base.hpp
include/carve/rtree.hpp
include/carve/math.hpp
include/carve/math_constants.hpp
include/carve/octree_decl.hpp
include/carve/input.hpp
include/carve/mesh_ops.hpp
include/carve/debug_hooks.hpp
include/carve/mesh_simplify.hpp
include/carve/interpolator.hpp
include/carve/poly_decl.hpp
include/carve/csg.hpp
include/carve/pointset_iter.hpp
include/carve/debug_hooks.hpp
include/carve/mesh.hpp
include/carve/carve.hpp
include/carve/gnu_cxx.h
include/carve/polyhedron_impl.hpp
include/carve/poly_impl.hpp
include/carve/aabb.hpp
include/carve/convex_hull.hpp
include/carve/vertex_decl.hpp
include/carve/win32.h
include/carve/edge_impl.hpp
include/carve/tag.hpp
include/carve/tree.hpp
include/carve/heap.hpp
include/carve/matrix.hpp
include/carve/poly.hpp
include/carve/vector.hpp
include/carve/intersection.hpp
include/carve/faceloop.hpp
include/carve/geom_impl.hpp
include/carve/octree_impl.hpp
include/carve/spacetree.hpp
include/carve/collection/unordered/std_impl.hpp
include/carve/triangulator_impl.hpp
include/carve/edge_decl.hpp
include/carve/collection/unordered.hpp
include/carve/collection/unordered/tr1_impl.hpp
include/carve/collection/unordered/fallback_impl.hpp
include/carve/collection/unordered/std_impl.hpp
include/carve/collection/unordered/vcpp_impl.hpp
include/carve/collection/unordered/libstdcpp_impl.hpp
include/carve/collection/unordered/boost_impl.hpp
include/carve/collection/unordered/vcpp_impl.hpp
include/carve/collection/unordered/fallback_impl.hpp
include/carve/collection/unordered.hpp
include/carve/face_impl.hpp
include/carve/pointset_impl.hpp
include/carve/cbrt.h
include/carve/vcpp_config.h
include/carve/convex_hull.hpp
include/carve/geom.hpp
include/carve/vertex_impl.hpp
include/carve/polyline_impl.hpp
include/carve/pointset_decl.hpp
include/carve/timing.hpp
include/carve/csg_triangulator.hpp
include/carve/iobj.hpp
include/carve/collection_types.hpp
lib/carve.cpp
lib/mesh.cpp
lib/intersect_group.cpp
lib/intersect_classify_common.hpp
lib/intersect_classify_edge.cpp
lib/intersect_classify_group.cpp
lib/csg_data.hpp
lib/polyhedron.cpp
lib/csg_collector.hpp
lib/geom3d.cpp
lib/polyline.cpp
lib/csg_collector.cpp
lib/triangulator.cpp
lib/intersect_face_division.cpp
lib/intersect_half_classify_group.cpp
lib/edge.cpp
lib/math.cpp
lib/geom2d.cpp
lib/tag.cpp
include/carve/cbrt.h
include/carve/util.hpp
include/carve/iobj.hpp
include/carve/polyline_decl.hpp
include/carve/polyline_impl.hpp
include/carve/win32.h
include/carve/edge_impl.hpp
include/carve/carve.hpp
include/carve/polyline.hpp
include/carve/config.h
include/carve/face_decl.hpp
include/carve/matrix.hpp
include/carve/classification.hpp
include/carve/geom_impl.hpp
include/carve/faceloop.hpp
include/carve/mesh_ops.hpp
include/carve/tree.hpp
include/carve/geom2d.hpp
include/carve/face_impl.hpp
include/carve/polyhedron_decl.hpp
include/carve/interpolator.hpp
include/carve/poly_decl.hpp
include/carve/mesh_impl.hpp
include/carve/gnu_cxx.h
include/carve/mesh_simplify.hpp
include/carve/triangulator.hpp
include/carve/pointset_impl.hpp
include/carve/rtree.hpp
include/carve/math_constants.hpp
include/carve/vector.hpp
include/carve/octree_impl.hpp
include/carve/pointset.hpp
include/carve/math.hpp
include/carve/intersection.hpp
include/carve/colour.hpp
include/carve/kd_node.hpp
include/carve/input.hpp
include/carve/geom3d.hpp
include/carve/exact.hpp
include/carve/rescale.hpp
include/carve/polyhedron_base.hpp
include/carve/heap.hpp
include/carve/spacetree.hpp
include/carve/polyhedron_impl.hpp
include/carve/vcpp_config.h
include/carve/aabb.hpp
include/carve/polyline_iter.hpp
include/carve/djset.hpp
include/carve/vertex_decl.hpp
include/carve/csg_triangulator.hpp
include/carve/poly.hpp
include/carve/external/boost/random.hpp
include/carve/timing.hpp
include/carve/octree_decl.hpp
include/carve/pointset_decl.hpp
include/carve/tag.hpp
include/carve/collection.hpp
include/carve/poly_impl.hpp
lib/intersection.cpp
lib/convex_hull.cpp
lib/intersect_common.hpp
lib/intersect_classify_common_impl.hpp
lib/csg.cpp
lib/intersect.cpp
lib/csg_detail.hpp
lib/face.cpp
lib/pointset.cpp
lib/timing.cpp
lib/octree.cpp
lib/aabb.cpp
lib/triangulator.cpp
lib/intersect_debug.hpp
lib/csg_collector.hpp
lib/csg_data.hpp
lib/convex_hull.cpp
lib/intersect_classify_common.hpp
lib/intersect_common.hpp
lib/polyhedron.cpp
lib/polyline.cpp
lib/pointset.cpp
lib/geom2d.cpp
lib/math.cpp
lib/intersect_half_classify_group.cpp
lib/intersect_face_division.cpp
lib/tag.cpp
lib/aabb.cpp
lib/intersect_classify_group.cpp
lib/csg_detail.hpp
lib/mesh.cpp
lib/timing.cpp
lib/geom3d.cpp
lib/intersect_group.cpp
lib/carve.cpp
lib/intersect_classify_edge.cpp
lib/csg.cpp
lib/face.cpp
lib/csg_collector.cpp
lib/intersect_debug.cpp
lib/edge.cpp
lib/intersect_classify_common_impl.hpp
lib/octree.cpp

@ -174,7 +174,7 @@ namespace carve {
double scoreQuad(edge_map_t::iterator i, edge_map_t &edge_map) {
if (!(*i).second.first || !(*i).second.second) return -1;
return 0;
return -1;
}
carve::mesh::MeshSet<3>::face_t *mergeQuad(edge_map_t::iterator i, edge_map_t &edge_map) {

@ -206,9 +206,22 @@ namespace carve {
* * +1, if a is ordered after b around, rotating about direction.
*/
inline int compareAngles(const Vector &direction, const Vector &base, const Vector &a, const Vector &b) {
const double d1 = carve::geom3d::orient3d(carve::geom::VECTOR(0,0,0), direction, a, b);
const double d2 = carve::geom3d::orient3d(carve::geom::VECTOR(0,0,0), direction, base, a);
const double d3 = carve::geom3d::orient3d(carve::geom::VECTOR(0,0,0), direction, base, b);
// double d1 = carve::geom3d::orient3d(carve::geom::VECTOR(0,0,0), direction, a, b);
// double d2 = carve::geom3d::orient3d(carve::geom::VECTOR(0,0,0), direction, base, a);
// double d3 = carve::geom3d::orient3d(carve::geom::VECTOR(0,0,0), direction, base, b);
#if defined(CARVE_USE_EXACT_PREDICATES)
// which is equivalent to the following (which eliminates a
// vector subtraction):
double d1 = carve::geom3d::orient3d(direction, b, a, carve::geom::VECTOR(0,0,0));
double d2 = carve::geom3d::orient3d(direction, a, base, carve::geom::VECTOR(0,0,0));
double d3 = carve::geom3d::orient3d(direction, b, base, carve::geom::VECTOR(0,0,0));
#else
// dotcross = a . (b x c)
double d1 = carve::geom::dotcross(direction, b, a );
double d2 = carve::geom::dotcross(direction, a, base);
double d3 = carve::geom::dotcross(direction, b, base);
#endif
// CASE: a and b are coplanar wrt. direction.
if (d1 == 0.0) {

@ -396,7 +396,7 @@ namespace carve {
// Compute a . (b x c)
return
(a.x * b.y * c.z + a.y * b.z * c.x + a.z * b.x * c.y) -
(a.x * b.z * c.y + a.y * b.x * c.z + a.z * b.y * c.x);
(a.x * c.y * b.z + a.y * c.z * b.x + a.z * c.x * b.y);
}

@ -17,6 +17,9 @@
#pragma once
#include <map>
#include <string>
#include <carve/carve.hpp>
#include <carve/poly.hpp>
#include <carve/mesh.hpp>
@ -28,6 +31,50 @@
namespace carve {
namespace input {
typedef std::map<std::string, std::string> Options;
static inline Options opts() {
return Options();
}
static inline Options opts(const char **kv) {
Options r;
for (size_t i = 0; kv[i] != NULL; i += 2) {
r[kv[i]] = kv[i+1];
}
return r;
}
static inline Options opts(const std::string &k1, const std::string &v1) {
Options r;
r[k1] = v1;
return r;
}
static inline Options opts(const std::string &k1, const std::string &v1,
const std::string &k2, const std::string &v2) {
Options r;
r[k1] = v1;
r[k2] = v2;
return r;
}
static inline Options opts(const std::string &k1, const std::string &v1,
const std::string &k2, const std::string &v2,
const std::string &k3, const std::string &v3) {
Options r;
r[k1] = v1;
r[k2] = v2;
r[k3] = v3;
return r;
}
static inline bool _bool(const std::string &str, bool _default = false) {
if (str == "true") return true;
if (str == "false") return false;
return _default;
}
struct Data {
Data() {
}
@ -126,12 +173,18 @@ namespace carve {
faceCount = 0;
}
carve::poly::Polyhedron *create() const {
carve::poly::Polyhedron *create(const Options &options) const {
return new carve::poly::Polyhedron(points, faceCount, faceIndices);
}
carve::mesh::MeshSet<3> *createMesh() const {
return new carve::mesh::MeshSet<3>(points, faceCount, faceIndices);
carve::mesh::MeshSet<3> *createMesh(const Options &options) const {
Options::const_iterator i;
carve::mesh::MeshOptions opts;
i = options.find("avoid_cavities");
if (i != options.end()) {
opts.avoid_cavities(_bool((*i).second));
}
return new carve::mesh::MeshSet<3>(points, faceCount, faceIndices, opts);
}
};
@ -159,7 +212,7 @@ namespace carve {
polylines.back().second.push_back(idx);
}
carve::line::PolylineSet *create() const {
carve::line::PolylineSet *create(const Options &options) const {
carve::line::PolylineSet *p = new carve::line::PolylineSet(points);
for (std::list<polyline_data_t>::const_iterator i = polylines.begin();
@ -181,7 +234,7 @@ namespace carve {
virtual ~PointSetData() {
}
carve::point::PointSet *create() const {
carve::point::PointSet *create(const Options &options) const {
carve::point::PointSet *p = new carve::point::PointSet(points);
return p;
}
@ -214,37 +267,37 @@ namespace carve {
}
template<typename T>
static inline T *create(Data *d) {
static inline T *create(Data *d, const Options &options = Options()) {
return NULL;
}
};
template<>
inline carve::mesh::MeshSet<3> *Input::create(Data *d) {
inline carve::mesh::MeshSet<3> *Input::create(Data *d, const Options &options) {
PolyhedronData *p = dynamic_cast<PolyhedronData *>(d);
if (p == NULL) return NULL;
return p->createMesh();
return p->createMesh(options);
}
template<>
inline carve::poly::Polyhedron *Input::create(Data *d) {
inline carve::poly::Polyhedron *Input::create(Data *d, const Options &options) {
PolyhedronData *p = dynamic_cast<PolyhedronData *>(d);
if (p == NULL) return NULL;
return p->create();
return p->create(options);
}
template<>
inline carve::line::PolylineSet *Input::create(Data *d) {
inline carve::line::PolylineSet *Input::create(Data *d, const Options &options) {
PolylineSetData *p = dynamic_cast<PolylineSetData *>(d);
if (p == NULL) return NULL;
return p->create();
return p->create(options);
}
template<>
inline carve::point::PointSet *Input::create(Data *d) {
inline carve::point::PointSet *Input::create(Data *d, const Options &options) {
PointSetData *p = dynamic_cast<PointSetData *>(d);
if (p == NULL) return NULL;
return p->create();
return p->create(options);
}
}

@ -464,8 +464,27 @@ namespace carve {
struct MeshOptions {
bool opt_avoid_cavities;
MeshOptions() :
opt_avoid_cavities(false) {
}
MeshOptions &avoid_cavities(bool val) {
opt_avoid_cavities = val;
return *this;
}
};
namespace detail {
class FaceStitcher {
FaceStitcher();
FaceStitcher(const FaceStitcher &);
FaceStitcher &operator=(const FaceStitcher &);
typedef Vertex<3> vertex_t;
typedef Edge<3> edge_t;
typedef Face<3> face_t;
@ -475,6 +494,8 @@ namespace carve {
typedef std::unordered_map<vpair_t, edgelist_t, carve::mesh::hash_vertex_pair> edge_map_t;
typedef std::unordered_map<const vertex_t *, std::set<const vertex_t *> > edge_graph_t;
MeshOptions opts;
edge_map_t edges;
edge_map_t complex_edges;
@ -570,6 +591,8 @@ namespace carve {
void build(iter_t begin, iter_t end, std::vector<Mesh<3> *> &meshes);
public:
FaceStitcher(const MeshOptions &_opts);
template<typename iter_t>
void create(iter_t begin, iter_t end, std::vector<Mesh<3> *> &meshes);
};
@ -623,7 +646,7 @@ namespace carve {
~Mesh();
template<typename iter_t>
static void create(iter_t begin, iter_t end, std::vector<Mesh<ndim> *> &meshes);
static void create(iter_t begin, iter_t end, std::vector<Mesh<ndim> *> &meshes, const MeshOptions &opts);
aabb_t getAABB() const {
return aabb_t(faces.begin(), faces.end());
@ -692,7 +715,7 @@ namespace carve {
MeshSet &operator=(const MeshSet &);
template<typename iter_t>
void _init_from_faces(iter_t begin, iter_t end);
void _init_from_faces(iter_t begin, iter_t end, const MeshOptions &opts);
public:
typedef Vertex<ndim> vertex_t;
@ -781,13 +804,16 @@ namespace carve {
MeshSet(const std::vector<typename vertex_t::vector_t> &points,
size_t n_faces,
const std::vector<int> &face_indices);
const std::vector<int> &face_indices,
const MeshOptions &opts = MeshOptions());
// Construct a mesh set from a set of disconnected faces. Takes
// posession of the face pointers.
MeshSet(std::vector<face_t *> &faces);
MeshSet(std::vector<face_t *> &faces,
const MeshOptions &opts = MeshOptions());
MeshSet(std::list<face_t *> &faces);
MeshSet(std::list<face_t *> &faces,
const MeshOptions &opts = MeshOptions());
MeshSet(std::vector<vertex_t> &_vertex_storage,
std::vector<mesh_t *> &_meshes);
@ -817,6 +843,8 @@ namespace carve {
void collectVertices();
void canonicalize();
void separateMeshes();
};

@ -676,7 +676,7 @@ namespace carve {
template<unsigned ndim>
template<typename iter_t>
void Mesh<ndim>::create(iter_t begin, iter_t end, std::vector<Mesh<ndim> *> &meshes) {
void Mesh<ndim>::create(iter_t begin, iter_t end, std::vector<Mesh<ndim> *> &meshes, const MeshOptions &opts) {
meshes.clear();
}
@ -684,15 +684,15 @@ namespace carve {
template<>
template<typename iter_t>
void Mesh<3>::create(iter_t begin, iter_t end, std::vector<Mesh<3> *> &meshes) {
detail::FaceStitcher().create(begin, end, meshes);
void Mesh<3>::create(iter_t begin, iter_t end, std::vector<Mesh<3> *> &meshes, const MeshOptions &opts) {
detail::FaceStitcher(opts).create(begin, end, meshes);
}
template<unsigned ndim>
template<typename iter_t>
void MeshSet<ndim>::_init_from_faces(iter_t begin, iter_t end) {
void MeshSet<ndim>::_init_from_faces(iter_t begin, iter_t end, const MeshOptions &opts) {
typedef std::unordered_map<const vertex_t *, size_t> map_t;
map_t vmap;
@ -723,7 +723,7 @@ namespace carve {
} while (e != f->edge);
}
mesh_t::create(begin, end, meshes);
mesh_t::create(begin, end, meshes, opts);
for (size_t i = 0; i < meshes.size(); ++i) {
meshes[i]->meshset = this;
@ -735,7 +735,8 @@ namespace carve {
template<unsigned ndim>
MeshSet<ndim>::MeshSet(const std::vector<typename MeshSet<ndim>::vertex_t::vector_t> &points,
size_t n_faces,
const std::vector<int> &face_indices) {
const std::vector<int> &face_indices,
const MeshOptions &opts) {
vertex_storage.reserve(points.size());
std::vector<face_t *> faces;
faces.reserve(n_faces);
@ -755,7 +756,7 @@ namespace carve {
faces.push_back(new face_t(v.begin(), v.end()));
}
CARVE_ASSERT(p == face_indices.size());
mesh_t::create(faces.begin(), faces.end(), meshes);
mesh_t::create(faces.begin(), faces.end(), meshes, opts);
for (size_t i = 0; i < meshes.size(); ++i) {
meshes[i]->meshset = this;
@ -765,15 +766,15 @@ namespace carve {
template<unsigned ndim>
MeshSet<ndim>::MeshSet(std::vector<face_t *> &faces) {
_init_from_faces(faces.begin(), faces.end());
MeshSet<ndim>::MeshSet(std::vector<face_t *> &faces, const MeshOptions &opts) {
_init_from_faces(faces.begin(), faces.end(), opts);
}
template<unsigned ndim>
MeshSet<ndim>::MeshSet(std::list<face_t *> &faces) {
_init_from_faces(faces.begin(), faces.end());
MeshSet<ndim>::MeshSet(std::list<face_t *> &faces, const MeshOptions &opts) {
_init_from_faces(faces.begin(), faces.end(), opts);
}
@ -1010,5 +1011,41 @@ namespace carve {
vertex_storage.swap(vout);
}
template<unsigned ndim>
void MeshSet<ndim>::separateMeshes() {
size_t n;
typedef std::unordered_map<std::pair<mesh_t *, vertex_t *>, vertex_t *> vmap_t;
vmap_t vmap;
typename vmap_t::iterator vmap_iter;
for (face_iter i = faceBegin(); i != faceEnd(); ++i) {
face_t *f = *i;
for (typename face_t::edge_iter_t j = f->begin(); j != f->end(); ++j) {
edge_t &e = *j;
vmap[std::make_pair(f->mesh, e.vert)] = e.vert;
}
}
std::vector<vertex_t> vout;
vout.reserve(vmap.size());
for (n = 0, vmap_iter = vmap.begin(); vmap_iter != vmap.end(); ++vmap_iter, ++n) {
vout.push_back(*(*vmap_iter).second);
(*vmap_iter).second = & vout.back();
}
for (face_iter i = faceBegin(); i != faceEnd(); ++i) {
face_t *f = *i;
for (typename face_t::edge_iter_t j = f->begin(); j != f->end(); ++j) {
edge_t &e = *j;
e.vert = vmap[std::make_pair(f->mesh, e.vert)];
}
}
vertex_storage.swap(vout);
}
}
}

@ -1320,8 +1320,8 @@ void carve::csg::CSG::calc(meshset_t *a,
}
#endif
checkFaceLoopIntegrity(a_face_loops);
checkFaceLoopIntegrity(b_face_loops);
// checkFaceLoopIntegrity(a_face_loops);
// checkFaceLoopIntegrity(b_face_loops);
#if defined(CARVE_DEBUG)
std::cerr << "classify" << std::endl;

@ -1110,8 +1110,7 @@ namespace {
}
// copy up to the end of the path.
if (pos < e1_1)
std::copy(base_loop.begin() + pos, base_loop.begin() + e1_1, std::back_inserter(out));
std::copy(base_loop.begin() + pos, base_loop.begin() + e1_1, std::back_inserter(out));
CARVE_ASSERT(base_loop[e1_1] == p1.back());
std::copy(p1.rbegin(), p1.rend() - 1, std::back_inserter(out));

@ -243,15 +243,20 @@ namespace carve {
bool FaceStitcher::EdgeOrderData::Cmp::operator()(const EdgeOrderData &a, const EdgeOrderData &b) const {
int v = carve::geom3d::compareAngles(edge_dir, base_dir, a.face_dir, b.face_dir);
double da = carve::geom3d::antiClockwiseAngle(base_dir, a.face_dir, edge_dir);
double db = carve::geom3d::antiClockwiseAngle(base_dir, b.face_dir, edge_dir);
int v0 = v;
v = 0;
if (da < db) v = -1;
if (db < da) v = +1;
if (v0 != v) {
std::cerr << "v0= " << v0 << " v= " << v << " da= " << da << " db= " << db << " " << edge_dir << " " << base_dir << " " << a.face_dir << b.face_dir << std::endl;
#if defined(CARVE_DEBUG)
{
double da = carve::geom3d::antiClockwiseAngle(base_dir, a.face_dir, edge_dir);
double db = carve::geom3d::antiClockwiseAngle(base_dir, b.face_dir, edge_dir);
int v_cmp = 0;
if (da < db) v_cmp = -1;
if (db < da) v_cmp = +1;
if (v_cmp != v) {
std::cerr << "v= " << v << " v_cmp= " << v_cmp << " da= " << da << " db= " << db << " edge_dir=" << edge_dir << " base_dir=" << base_dir << " a=" << a.face_dir << " b=" << b.face_dir << std::endl;
}
}
#endif
if (v < 0) return true;
if (v == 0) {
if (a.is_reversed && !b.is_reversed) return true;
@ -327,9 +332,14 @@ namespace carve {
CARVE_ASSERT(erev[0][i]->v2() == erev[j][i]->v2());
}
std::sort(result[i].begin(),
result[i].end(),
EdgeOrderData::Cmp(base->v2()->v - base->v1()->v, result[i][0].face_dir));
geom::vector<3> sort_dir;
if (opts.opt_avoid_cavities) {
sort_dir = base->v1()->v - base->v2()->v;
} else {
sort_dir = base->v2()->v - base->v1()->v;
}
std::sort(result[i].begin(), result[i].end(), EdgeOrderData::Cmp(sort_dir, result[i][0].face_dir));
}
}
@ -751,11 +761,15 @@ namespace carve {
}
}
}
FaceStitcher::FaceStitcher(const MeshOptions &_opts) : opts(_opts) {
}
}
}
// construct a MeshSet from a Polyhedron, maintaining on the
// connectivity information in the Polyhedron.
mesh::MeshSet<3> *meshFromPolyhedron(const poly::Polyhedron *poly, int manifold_id) {

@ -233,7 +233,7 @@ namespace carve {
}
std::vector<mesh::Mesh<3> *> meshes;
mesh::Mesh<3>::create(mesh_faces.begin(), mesh_faces.end(), meshes);
mesh::Mesh<3>::create(mesh_faces.begin(), mesh_faces.end(), meshes, mesh::MeshOptions());
mesh::MeshSet<3> *meshset = new mesh::MeshSet<3>(vertex_storage, meshes);
manifold_is_closed.resize(meshset->meshes.size());

@ -1,4 +1,3 @@
strict_flags.patch
includes.patch
win32.patch
mesh_iterator.patch

@ -1,22 +0,0 @@
diff -r 47dfdaff1dd5 include/carve/csg_triangulator.hpp
--- a/include/carve/csg_triangulator.hpp Thu Jan 12 15:49:04 2012 -0500
+++ b/include/carve/csg_triangulator.hpp Fri Jan 13 03:13:32 2012 +0600
@@ -174,6 +174,7 @@
double scoreQuad(edge_map_t::iterator i, edge_map_t &edge_map) {
if (!(*i).second.first || !(*i).second.second) return -1;
+ return 0;
}
carve::mesh::MeshSet<3>::face_t *mergeQuad(edge_map_t::iterator i, edge_map_t &edge_map) {
diff -r 47dfdaff1dd5 lib/selfintersect.cpp
--- a/lib/selfintersect.cpp Thu Jan 12 15:49:04 2012 -0500
+++ b/lib/selfintersect.cpp Fri Jan 13 03:13:32 2012 +0600
@@ -465,6 +465,7 @@
// returns true if no intersection, based upon edge^a_i and edge^b_j separating axis.
bool sat_edge(const vec3 tri_a[3], const vec3 tri_b[3], unsigned i, unsigned j) {
+ return false;
}

@ -28,14 +28,11 @@
set(INC
.
../colamd/Include
third_party/ceres/include
)
set(INC_SYS
../Eigen3
third_party/ssba
third_party/ldl/Include
${PNG_INCLUDE_DIR}
${ZLIB_INCLUDE_DIRS}
)
@ -62,7 +59,6 @@ set(SRC
libmv/simple_pipeline/pipeline.cc
libmv/simple_pipeline/reconstruction.cc
libmv/simple_pipeline/resect.cc
libmv/simple_pipeline/rigid_registration.cc
libmv/simple_pipeline/tracks.cc
libmv/tracking/brute_region_tracker.cc
libmv/tracking/esm_region_tracker.cc
@ -83,9 +79,6 @@ set(SRC
third_party/gflags/gflags.cc
third_party/gflags/gflags_completions.cc
third_party/gflags/gflags_reporting.cc
third_party/ldl/Source/ldl.c
third_party/ssba/Geometry/v3d_metricbundle.cpp
third_party/ssba/Math/v3d_optimization.cpp
libmv-capi.h
libmv/base/id_generator.h
@ -105,6 +98,7 @@ set(SRC
libmv/multiview/homography.h
libmv/multiview/homography_parameterization.h
libmv/multiview/nviewtriangulation.h
libmv/multiview/panography.h
libmv/multiview/projection.h
libmv/multiview/resection.h
libmv/multiview/triangulation.h
@ -123,7 +117,6 @@ set(SRC
libmv/simple_pipeline/pipeline.h
libmv/simple_pipeline/reconstruction.h
libmv/simple_pipeline/resect.h
libmv/simple_pipeline/rigid_registration.h
libmv/simple_pipeline/tracks.h
libmv/tracking/brute_region_tracker.h
libmv/tracking/esm_region_tracker.h
@ -143,16 +136,8 @@ set(SRC
third_party/gflags/gflags/gflags.h
third_party/gflags/mutex.h
third_party/gflags/util.h
third_party/ldl/Include/ldl.h
third_party/msinttypes/inttypes.h
third_party/msinttypes/stdint.h
third_party/ssba/Geometry/v3d_cameramatrix.h
third_party/ssba/Geometry/v3d_distortion.h
third_party/ssba/Geometry/v3d_metricbundle.h
third_party/ssba/Math/v3d_linear.h
third_party/ssba/Math/v3d_linear_utils.h
third_party/ssba/Math/v3d_mathutilities.h
third_party/ssba/Math/v3d_optimization.h
)
if(WIN32)
@ -230,7 +215,6 @@ else()
endif()
add_definitions(
-DV3DLIB_ENABLE_SUITESPARSE
-DGOOGLE_GLOG_DLL_DECL=
)

430
extern/libmv/ChangeLog vendored

@ -1,3 +1,256 @@
commit 473996468a4e67e7c860169181a4ff31ce9b8c80
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Fri Mar 1 17:44:54 2013 +0600
Fixed incorrect order of arguments passing
to EXPECT_EQ in keyframe selection tests.
commit d38ebb74693fdf5b8f0fecf62a3d8c9c53b0b84a
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Fri Mar 1 17:40:38 2013 +0600
Modal (aka tripod) solver rework
Several major things are done in this commit:
- First of all, logic of modal solver was changed.
We do not rely on only minimizer to take care of
guessing rotation for frame, but we're using
analytical rotation computation for point clouds
to obtain initial rotation.
Then this rotation is being refined using Ceres
minimizer and now instead of minimizing average
distance between points of point of two clouds,
minimization of reprojection error of point
cloud onto frame happens.
This gives quite a bit of precision improvement.
- Second bigger improvement here is using bundle
adjustment for a result of first step when we're
only estimating rotation between neighbor images
and reprojecting markers.
This averages error across the image sequence
avoiding error accumulation. Also, this will
tweak bundles themselves a bit for better match.
- And last bigger improvement here is support of
camera intrinsics refirenment.
This allowed to significantly improve solution
for real-life footage and results after such
refining are much more usable than it were before.
Thanks to Keir for the help and code review!
commit 5d6c2e7a27bdd1a1b23bf289d70a9b8f62514c9a
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Fri Mar 1 17:37:35 2013 +0600
Increase verbosity level for reprojected markers info
This information is useful, but in cases when you, say,
working on a bundler it's annoying to scroll all the
information up.
commit ac252bb1250b3028b9c94736b644e7ab4e7b14b8
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Fri Mar 1 17:36:19 2013 +0600
Move radial distortion code to own templated function
This shall not lead to any functional changes, just
avoids radial distortion code duplicated in camera
intrinsics and bundling code.
For fancier bundle adjustment support of different
distortion models this is not actually enough and
would need to make some bigger changes, but this
changes makes code a bit easier to maintain already.
commit c253b794612dd529e1d3a9bd7a7c41c32c9a9abb
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Fri Mar 1 17:33:27 2013 +0600
Use threaded cost function, jacobian and linear solver
computation, so bundling is as fast as it could be with
current parameter block structure.
commit 931fe37a10212b91b525d4f6eb753990a338b471
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Fri Mar 1 17:29:21 2013 +0600
Fixed comment for euclidean bundling,
which is now supports raidal bundling independently
from other intrinsics.
commit 217d8e6edc3de1a853fb84275d2d2dd898e7529c
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Tue Feb 26 18:19:01 2013 +0600
Allow K1,K2 refirement combination
It is now possible to refine only radial distortion
with new Ceres based bundler and this new combination
is already used in Blender.
commit d8850addc944d400f7a9c358396c437d9e4acc70
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Tue Feb 26 18:17:09 2013 +0600
Switch euclidean intersection code to use Ceres
Would not expect any significant changes in solver
behavior, but it could be more accurate in some cases.
Switching projective intersection to ceres is marked
as a TODO for now.
commit 6990b7946ec96b3cb2dcfc8a1beaaba9538b0802
Author: Keir Mierle <mierle@gmail.com>
Date: Mon Feb 25 20:00:48 2013 +0000
Switch motion tracker bundle adjustment to Ceres.
Patch originally written by me, then finished by Sergey. Big
thanks to Sergey for troopering through and fixing the many issues
with my original (not compilable) patch.
The Ceres implementation uses 2 parameter blocks for each camera
(1 for rotation and 1 for translation), 1 parameter block for
common intrinsics (focal length etc) and 1 parameter block for
each track (e.g. bundle or 3D point).
We turn on some fancy optimizer options to get better performance,
in particular:
options.preconditioner_type = ceres::SCHUR_JACOBI;
options.linear_solver_type = ceres::ITERATIVE_SCHUR;
options.use_inner_iterations = true;
options.use_nonmonotonic_steps = true;
options.max_num_iterations = 100;
Special thanks to Sameer Agarwal of Ceres fame for splitting out
the SCHUR_JACOBI preconditioner so that it didn't depend on
CHOLMOD. Previously we could not use that preconditioner in
Blender because CHOLMOD is too large of a dependency for Blender.
BundleIntrinsicsLogMessage:
- Moved bunch of if(foo) LG << "bar" into this function, to make
EuclideanBundleCommonIntrinsics a little bit easier to follow.
EuclideanBundle:
- Fix RMSE logging.
commit 1696342954614b54133780d74d6ee0fbcbe224f0
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Tue Feb 26 18:10:33 2013 +0600
Upgrade ceres to latest upstream version
This is needed because of some features of new Ceres
for further development.
commit 575336f794841ada90aacd783285014081b8318c
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Mon Jan 7 15:58:40 2013 +0600
Fixed for keyframe selection
- Calculate residuals for GRIC in pixel space rather than
in normalized space.
This seems to be how it's intended to be used.
Algebraic H and F will still use normalized coordinates which
are more stable, after this matrices are converted to pixel
space and Ceres refinement happens in pixel space.
- Standard deviation calculation was wrong in GRIC. It shouldn't
be deviation of residuals, but as per Torr it should be deviation
of measurement error, which is constant (in our case 0.1)
Not sure if using squared cost function is correct for GRIC,
but cost function is indeed squared and in most papers cost
function is used for GRIC. After some further tests we could
switch GRIC residuals to non-squared distance.
- Bring back rho part of GRIC, in unit tests it doesn't make
sense whether it's enabled or not, lets see how it'll behave
in real-life footage.
- Added one more unit test based on elevator scene and manual
keyframe selection.
commit 24117f3c3fc5531beb6497d79bb6f1780a998081
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Sun Jan 6 19:07:06 2013 +0600
Added test for keyframe selection based on manual selection
Additional changes:
- Reduce minimal correspondence to match real-world manually
tracked footage
- Returned back squares to SymmetricEpipolarDistance and
SymmetricGeometricDistance -- this is actually a cost
functions, not distances and they shall be squared.
commit 770eb0293b881c4c419c587a6cdb062c47ab6e44
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Fri Dec 21 00:43:30 2012 +0600
Improvements for keyframe selection
- Changed main keyframe selection cycle, so in cases there're no
more next keyframes for current keyframe could be found in the
image sequence, current keyframe would be moved forward and
search continues.
This helps in cases when there's poor motion in the beginning
of the sequence, then markers becomes occluded. There could be
good keyframes in the middle of the shot still.
- Extended keyframe_selection_test with real world cases.
- Moved correspondences constraint to the top, so no H/F estimation
happens if there's bad correspondence. Makes algorithm go a bit
faster.
Strangely, but using non-squared distances makes neighbor frames
test fail, using squared distances makes this tests pass.
However, using non-squared distances seems to be working better
in real tests i've been doing. So this requires more investigation/
commit 7415c62fbda36c5bd1c291bc94d535a66da896d0
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Thu Dec 20 18:46:09 2012 +0600
Cosmetic change to correspondences reports in keyframe selection
commit ceaf80c987ec0338e7e83965bc808411453eb755
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Thu Dec 20 18:08:03 2012 +0600
Various fixes:
- That was a typo in symmetric geometric cost functor, which
computed inverse distance in a wrong way.
- Fixed compilation of unit tests
- Added simple test for keyframe selection. Currently only
covers case that neighbor frames with only translation
(homography should be better than fundamental) are not
considered a keyframes.
Still need to be investigated why it only works if tracks
are in pixel space and why doesn't work in normalized space.
commit cfabdfe48df2add3d1f30cf4370efd0b31990ab0
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Thu Dec 20 05:46:53 2012 +0600
@ -545,180 +798,3 @@ Date: Thu Apr 12 13:56:02 2012 +0600
It projects markers onto sphere and uses rigid registration of rotation to
find rotation angles which makes bundles from previous and current frame be
as closest as it's possible.
commit fa3842e472e3b9c789e47bf6d8f592aa40a84f16
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Thu Apr 12 12:32:48 2012 +0600
implementation of some basic algorithms for point cloud orientation:
- Implementation of rigid registration algorithm which searches transformation
form one point cloud to another assuming that points in this clouds are
already paired (points with the same index in different clouds belongs to
the same pair) which minimizes average distance between points in pairs.
Algorithm uses Levenberg-Marquardt solver to find such transformation.
Supports registration of rotation-scale-transform (which is probably most
common usage) and rotation only (which might be useful for basic modal
tripod solver).
- Implementation of Iterative-Point-Clouds algorithm which searches
transformation from one arbitrary point cloud to another making
points as closest to each other as possible.
This algorithm doesn't require points be initially paired, but for
good result clouds should have rough initial orientation. If they're
arbitrary oriented from the very beginning, algorithm might fail
producing good resold.
Iteration is based on building pairs of closest to each other points
and registering rigid transformation between them which incrementally
constructs final result.
TODO: building pairs might be speedup a lot using data structures like
AABB trees, K-D trees or so.
commit 9618d9a1d48bb3c28da605d9027f57a74f462785
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Wed Apr 11 14:17:14 2012 +0600
Added configuration file for glog to compile smooth on Hurd platform.
Patch by Pino Toscano <pino@debian.org>, thanks!
commit 63b2bd20237c8599fa73ce42556e4fb99b9f7cca
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Thu Mar 22 17:03:34 2012 +0600
Trackers refactoring:
- Deduplicate pattern sampling used in esm and lmicklt trackers
and move SamplePattern to image/sample.h
- Move computation of Pearson product-moment correlation into
own function in new file image/correlation.h so all trackers
can use it to check final correlation.
- Remove SAD tracker. It's almost the same as brute tracker,
with only two differences:
1. It does brute search of affine transformation which in some cases
helps to track rotating features
2. It didn't use common tracker api which probably gave some speed
advantage, but lead to a real headache to use it together with
other trackers leading to duplicated code in 3d-party software.
commit 9fe49c32e990f28c83f2bbb1d18057aed8879af7
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Mon Mar 12 09:36:15 2012 +0600
Code cleanup: convert line endings to Unix style (native on my platform) so it
wouldn't confuse other versioning systems used for project where libmv is bundled to,
Also change mode to +x for glog's windows-related script.
commit fe74ae2b53769389b0ed9d7e604c8e60be81077d
Author: Sergey I. Sharybin <g.ulairi@gmail.com>
Date: Sun Mar 11 20:34:15 2012 +0600
Replace "third_party/glog/src/glog/logging.h" with <glog/logging.h>
It was needed because of how build systems is setup in Blender but think
this will be helpful change for other applications too because it makes
it's easier to move libraries around and even use libraries installed
on the operation system.
commit 37fc726701479f2d321d6af878fa93f3176278d5
Author: Sergey I. Sharybin <g.ulairi@gmail.com>
Date: Sun Mar 11 19:27:41 2012 +0600
Upgrade gflags and glog libraries - stage 2 (final)
Changes to upstream code which are needed to make libmv compile smooth on all platforms
* Replace <gflags/gflags.h> with "third_party/gflags/gflags/gflags.h" which is easier
to setup things in libmv and also helps with setting up building libmv into
external applications.
* Replace "glog/logging.h" and "glog/logging.h" with <glog/logging.h> and <glog/logging.h>
which is needed on Windows platform because otherwise files like logging.cc will be using
relative path which points to headers used by linux instead of headers need to be used
on Windows.
* Replace _asm int 3 with __debugbreak(). Such assembler code is obsolete and doesn't work
with 64bit versions of MSVC compilers.
* Do not use stacktrace for MinGW and FreeBSD because it leads into issues accessing
some specific data on this platforms.
* Define HAVE_LIB_GFLAGS for Windows builds.
* Do not define __declspec(dllimport) for MinGW platforms.
* Setup proper includes and datatypes for int32, uint32, int64 and uint64 for MinGW
* Do not define va_copy for MinGW platforms (it's already defined there).
* Patch localtime_r to be working fine with MinGW, disable strerror_r for MinGW because
of lack of needed functions.
commit 8ed07abfa49d1e0511752021c972e0715e5a1383
Author: Sergey I. Sharybin <g.ulairi@gmail.com>
Date: Sun Mar 11 19:06:33 2012 +0600
Upgrade gflags and glog libraries - stage 1
This commit copies sources from latest original release of gflags and glog
over currently bundled versions of this libraries without any modifications.
This revision can't b compiled, all needed changes to make new libraries working
fine will be done with next commit to make it clear which changes were necessary
for easier bundling further newer version and extract patches and put them to
gflags/glog upstream repo.
Such upgrade of libraries is needed to make it able to compile libmv
with clang compilers. Currently used versions:
- gflags is version 2.0
- glog is version 0.3.2
commit 75b9af405964ff2c7d3f0a44500e27e63b37c91b
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Fri Feb 17 23:29:11 2012 +0600
_USE_MATH_DEFINES is needed to define constants like M_E when building with msvc
Occasionally was removed, but now added comment about this so hopefully it
wouldn't removed again.
commit f85b1232a9b929f69443b5eed6e7a39908cd6551
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Fri Feb 17 21:34:40 2012 +0600
Picky edit: corrected mode for ssba readme file.
commit f8c2b223f01551fd81a85f6d5221646165147035
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Fri Feb 17 21:32:05 2012 +0600
Picky edits: corrected EOL
commit 3f2a4205ec5adadcdfa306b161c705c868a7be93
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Fri Feb 17 21:30:07 2012 +0600
Fixed incorrect access to ucontext on linux. Caused by incorrect merge conflict resolve.
commit d360a21a5aa125cf9e83dd26b302508688ff7007
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Fri Feb 17 20:54:13 2012 +0600
More Windows -> Unix EOL conversions
commit 18aeda58bec9556140ba617724e31ada6f5b67c0
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Fri Feb 17 20:15:42 2012 +0600
Looks like this debug output was removed accidentally.
commit 189dc0cacdee3c1eab68c43263ecb038ed244c09
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Fri Feb 17 20:11:56 2012 +0600
Made V3D verbose again by default
commit 8b3422d3eec5e450d76243886bf07fb0a3e83a81
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Fri Feb 17 20:08:01 2012 +0600
SAD tracker now can deal with pattern size any size,
Very quick implementation came from Blender before Hybrid tracker was added.
Better to be replaced with brute tracker.

@ -11,7 +11,6 @@ Import('env')
defs = []
defs.append('V3DLIB_ENABLE_SUITESPARSE')
defs.append('GOOGLE_GLOG_DLL_DECL=')
src = env.Glob("*.cpp")
@ -22,9 +21,6 @@ src += env.Glob('libmv/simple_pipeline/*.cc')
src += env.Glob('libmv/tracking/*.cc')
src += env.Glob('third_party/fast/*.c')
src += env.Glob('third_party/gflags/*.cc')
src += env.Glob('third_party/ldl/Source/*.c')
src += env.Glob('third_party/ssba/Geometry/*.cpp')
src += env.Glob('third_party/ssba/Math/*.cpp')
incs = '. ../Eigen3 third_party/ceres/include'
incs += ' ' + env['BF_PNG_INC']
@ -41,8 +37,6 @@ else:
src += env.Glob("third_party/glog/src/*.cc")
incs += ' ./third_party/glog/src'
incs += ' ./third_party/ssba ./third_party/ldl/Include ../colamd/Include'
env.BlenderLib ( libname = 'extern_libmv', sources=src, includes=Split(incs), defines=defs, libtype=['extern', 'player'], priority=[20,137] )
SConscript(['third_party/SConscript'])

@ -124,14 +124,11 @@ cat > CMakeLists.txt << EOF
set(INC
.
../colamd/Include
third_party/ceres/include
)
set(INC_SYS
../Eigen3
third_party/ssba
third_party/ldl/Include
\${PNG_INCLUDE_DIR}
\${ZLIB_INCLUDE_DIRS}
)
@ -197,7 +194,6 @@ ${third_glog_headers}
endif()
add_definitions(
-DV3DLIB_ENABLE_SUITESPARSE
-DGOOGLE_GLOG_DLL_DECL=
)
@ -220,7 +216,6 @@ Import('env')
defs = []
defs.append('V3DLIB_ENABLE_SUITESPARSE')
defs.append('GOOGLE_GLOG_DLL_DECL=')
src = env.Glob("*.cpp")
@ -241,8 +236,6 @@ else:
src += env.Glob("third_party/glog/src/*.cc")
incs += ' ./third_party/glog/src'
incs += ' ./third_party/ssba ./third_party/ldl/Include ../colamd/Include'
env.BlenderLib ( libname = 'extern_libmv', sources=src, includes=Split(incs), defines=defs, libtype=['extern', 'player'], priority=[20,137] )
SConscript(['third_party/SConscript'])

@ -21,6 +21,7 @@ libmv/multiview/homography.cc
libmv/multiview/homography.h
libmv/multiview/homography_parameterization.h
libmv/multiview/nviewtriangulation.h
libmv/multiview/panography.h
libmv/multiview/projection.cc
libmv/multiview/projection.h
libmv/multiview/resection.h
@ -53,8 +54,6 @@ libmv/simple_pipeline/reconstruction.cc
libmv/simple_pipeline/reconstruction.h
libmv/simple_pipeline/resect.cc
libmv/simple_pipeline/resect.h
libmv/simple_pipeline/rigid_registration.cc
libmv/simple_pipeline/rigid_registration.h
libmv/simple_pipeline/tracks.cc
libmv/simple_pipeline/tracks.h
libmv/tracking/brute_region_tracker.cc
@ -142,25 +141,6 @@ third_party/glog/src/windows/glog/vlog_is_on.h
third_party/glog/src/windows/port.cc
third_party/glog/src/windows/port.h
third_party/glog/src/windows/preprocess.sh
third_party/ldl/CMakeLists.txt
third_party/ldl/Doc/ChangeLog
third_party/ldl/Doc/lesser.txt
third_party/ldl/Include/ldl.h
third_party/ldl/README.libmv
third_party/ldl/README.txt
third_party/ldl/Source/ldl.c
third_party/msinttypes/inttypes.h
third_party/msinttypes/README.libmv
third_party/msinttypes/stdint.h
third_party/ssba/COPYING.TXT
third_party/ssba/Geometry/v3d_cameramatrix.h
third_party/ssba/Geometry/v3d_distortion.h
third_party/ssba/Geometry/v3d_metricbundle.cpp
third_party/ssba/Geometry/v3d_metricbundle.h
third_party/ssba/Math/v3d_linear.h
third_party/ssba/Math/v3d_linear_utils.h
third_party/ssba/Math/v3d_mathutilities.h
third_party/ssba/Math/v3d_optimization.cpp
third_party/ssba/Math/v3d_optimization.h
third_party/ssba/README.libmv
third_party/ssba/README.TXT

@ -34,21 +34,8 @@
#include "libmv-capi.h"
#include "third_party/gflags/gflags/gflags.h"
#include "glog/logging.h"
#include "libmv/logging/logging.h"
#include "Math/v3d_optimization.h"
#include "libmv/numeric/numeric.h"
#include "libmv/tracking/esm_region_tracker.h"
#include "libmv/tracking/brute_region_tracker.h"
#include "libmv/tracking/hybrid_region_tracker.h"
#include "libmv/tracking/klt_region_tracker.h"
#include "libmv/tracking/trklt_region_tracker.h"
#include "libmv/tracking/lmicklt_region_tracker.h"
#include "libmv/tracking/pyramid_region_tracker.h"
#include "libmv/tracking/track_region.h"
#include "libmv/simple_pipeline/callbacks.h"
@ -58,7 +45,6 @@
#include "libmv/simple_pipeline/detect.h"
#include "libmv/simple_pipeline/pipeline.h"
#include "libmv/simple_pipeline/camera_intrinsics.h"
#include "libmv/simple_pipeline/rigid_registration.h"
#include "libmv/simple_pipeline/modal_solver.h"
#include <stdlib.h>
@ -96,7 +82,6 @@ void libmv_initLogging(const char *argv0)
google::SetCommandLineOption("v", "0");
google::SetCommandLineOption("stderrthreshold", "7");
google::SetCommandLineOption("minloglevel", "7");
V3D::optimizerVerbosenessLevel = 0;
}
void libmv_startDebugLogging(void)
@ -105,7 +90,6 @@ void libmv_startDebugLogging(void)
google::SetCommandLineOption("v", "2");
google::SetCommandLineOption("stderrthreshold", "1");
google::SetCommandLineOption("minloglevel", "0");
V3D::optimizerVerbosenessLevel = 1;
}
void libmv_setLoggingVerbosity(int verbosity)
@ -114,54 +98,9 @@ void libmv_setLoggingVerbosity(int verbosity)
snprintf(val, sizeof(val), "%d", verbosity);
google::SetCommandLineOption("v", val);
V3D::optimizerVerbosenessLevel = verbosity;
}
/* ************ RegionTracker ************ */
libmv_RegionTracker *libmv_pyramidRegionTrackerNew(int max_iterations, int pyramid_level, int half_window_size, double minimum_correlation)
{
libmv::EsmRegionTracker *esm_region_tracker = new libmv::EsmRegionTracker;
esm_region_tracker->half_window_size = half_window_size;
esm_region_tracker->max_iterations = max_iterations;
esm_region_tracker->min_determinant = 1e-4;
esm_region_tracker->minimum_correlation = minimum_correlation;
libmv::PyramidRegionTracker *pyramid_region_tracker =
new libmv::PyramidRegionTracker(esm_region_tracker, pyramid_level);
return (libmv_RegionTracker *)pyramid_region_tracker;
}
libmv_RegionTracker *libmv_hybridRegionTrackerNew(int max_iterations, int half_window_size, double minimum_correlation)
{
libmv::EsmRegionTracker *esm_region_tracker = new libmv::EsmRegionTracker;
esm_region_tracker->half_window_size = half_window_size;
esm_region_tracker->max_iterations = max_iterations;
esm_region_tracker->min_determinant = 1e-4;
esm_region_tracker->minimum_correlation = minimum_correlation;
libmv::BruteRegionTracker *brute_region_tracker = new libmv::BruteRegionTracker;
brute_region_tracker->half_window_size = half_window_size;
/* do not use correlation check for brute checker itself,
* this check will happen in esm tracker */
brute_region_tracker->minimum_correlation = 0.0;
libmv::HybridRegionTracker *hybrid_region_tracker =
new libmv::HybridRegionTracker(brute_region_tracker, esm_region_tracker);
return (libmv_RegionTracker *)hybrid_region_tracker;
}
libmv_RegionTracker *libmv_bruteRegionTrackerNew(int half_window_size, double minimum_correlation)
{
libmv::BruteRegionTracker *brute_region_tracker = new libmv::BruteRegionTracker;
brute_region_tracker->half_window_size = half_window_size;
brute_region_tracker->minimum_correlation = minimum_correlation;
return (libmv_RegionTracker *)brute_region_tracker;
}
/* ************ Utility ************ */
static void floatBufToImage(const float *buf, int width, int height, int channels, libmv::FloatImage *image)
{
@ -303,7 +242,7 @@ static void saveBytesImage(const char *prefix, unsigned char *data, int width, i
}
{
static int a= 0;
static int a = 0;
char buf[128];
snprintf(buf, sizeof(buf), "%s_%02d.png", prefix, ++a);
savePNGImage(row_pointers, width, height, 8, PNG_COLOR_TYPE_RGBA, buf);
@ -316,43 +255,6 @@ static void saveBytesImage(const char *prefix, unsigned char *data, int width, i
}
#endif
int libmv_regionTrackerTrack(libmv_RegionTracker *libmv_tracker, const float *ima1, const float *ima2,
int width, int height, double x1, double y1, double *x2, double *y2)
{
libmv::RegionTracker *region_tracker = (libmv::RegionTracker *)libmv_tracker;
libmv::FloatImage old_patch, new_patch;
floatBufToImage(ima1, width, height, 1, &old_patch);
floatBufToImage(ima2, width, height, 1, &new_patch);
#if !defined(DUMP_FAILURE) && !defined(DUMP_ALWAYS)
return region_tracker->Track(old_patch, new_patch, x1, y1, x2, y2);
#else
{
/* double sx2 = *x2, sy2 = *y2; */
int result = region_tracker->Track(old_patch, new_patch, x1, y1, x2, y2);
#if defined(DUMP_ALWAYS)
{
#else
if (!result) {
#endif
saveImage("old_patch", old_patch, x1, y1);
saveImage("new_patch", new_patch, *x2, *y2);
}
return result;
}
#endif
}
void libmv_regionTrackerDestroy(libmv_RegionTracker *libmv_tracker)
{
libmv::RegionTracker *region_tracker= (libmv::RegionTracker *)libmv_tracker;
delete region_tracker;
}
/* ************ Planar tracker ************ */
/* TrackRegion (new planar tracker) */
@ -523,12 +425,15 @@ int libmv_refineParametersAreValid(int parameters) {
LIBMV_REFINE_RADIAL_DISTORTION_K1 |
LIBMV_REFINE_RADIAL_DISTORTION_K2)) ||
(parameters == (LIBMV_REFINE_FOCAL_LENGTH |
LIBMV_REFINE_RADIAL_DISTORTION_K1));
LIBMV_REFINE_RADIAL_DISTORTION_K1)) ||
(parameters == (LIBMV_REFINE_RADIAL_DISTORTION_K1 |
LIBMV_REFINE_RADIAL_DISTORTION_K2));
}
static void libmv_solveRefineIntrinsics(libmv::Tracks *tracks, libmv::CameraIntrinsics *intrinsics,
libmv::EuclideanReconstruction *reconstruction, int refine_intrinsics,
reconstruct_progress_update_cb progress_update_callback, void *callback_customdata)
reconstruct_progress_update_cb progress_update_callback, void *callback_customdata,
int bundle_constraints = libmv::BUNDLE_NO_CONSTRAINTS)
{
/* only a few combinations are supported but trust the caller */
int libmv_refine_flags = 0;
@ -549,43 +454,84 @@ static void libmv_solveRefineIntrinsics(libmv::Tracks *tracks, libmv::CameraIntr
progress_update_callback(callback_customdata, 1.0, "Refining solution");
libmv::EuclideanBundleCommonIntrinsics(*(libmv::Tracks *)tracks, libmv_refine_flags,
reconstruction, intrinsics);
reconstruction, intrinsics, bundle_constraints);
}
libmv_Reconstruction *libmv_solveReconstruction(libmv_Tracks *tracks, int keyframe1, int keyframe2,
int refine_intrinsics, double focal_length, double principal_x, double principal_y,
double k1, double k2, double k3, struct libmv_reconstructionOptions *options,
reconstruct_progress_update_cb progress_update_callback, void *callback_customdata)
static void cameraIntrinsicsFromOptions(libmv::CameraIntrinsics *camera_intrinsics,
libmv_cameraIntrinsicsOptions *camera_intrinsics_options)
{
camera_intrinsics->SetFocalLength(camera_intrinsics_options->focal_length,
camera_intrinsics_options->focal_length);
camera_intrinsics->SetPrincipalPoint(camera_intrinsics_options->principal_point_x,
camera_intrinsics_options->principal_point_y);
camera_intrinsics->SetRadialDistortion(camera_intrinsics_options->k1,
camera_intrinsics_options->k2,
camera_intrinsics_options->k3);
camera_intrinsics->SetImageSize(camera_intrinsics_options->image_width,
camera_intrinsics_options->image_height);
}
static libmv::Tracks getNormalizedTracks(libmv::Tracks *tracks, libmv::CameraIntrinsics *camera_intrinsics)
{
libmv::vector<libmv::Marker> markers = tracks->AllMarkers();
for (int i = 0; i < markers.size(); ++i) {
camera_intrinsics->InvertIntrinsics(markers[i].x, markers[i].y,
&(markers[i].x), &(markers[i].y));
}
return libmv::Tracks(markers);
}
static void finishReconstruction(libmv::Tracks *tracks, libmv::CameraIntrinsics *camera_intrinsics,
libmv_Reconstruction *libmv_reconstruction,
reconstruct_progress_update_cb progress_update_callback,
void *callback_customdata)
{
/* Invert the camera intrinsics. */
libmv::vector<libmv::Marker> markers = ((libmv::Tracks*)tracks)->AllMarkers();
libmv_Reconstruction *libmv_reconstruction = new libmv_Reconstruction();
libmv::EuclideanReconstruction *reconstruction = &libmv_reconstruction->reconstruction;
libmv::CameraIntrinsics *intrinsics = &libmv_reconstruction->intrinsics;
libmv::ReconstructionOptions reconstruction_options;
/* reprojection error calculation */
progress_update_callback(callback_customdata, 1.0, "Finishing solution");
libmv_reconstruction->tracks = *tracks;
libmv_reconstruction->error = libmv::EuclideanReprojectionError(*tracks, *reconstruction, *camera_intrinsics);
}
libmv_Reconstruction *libmv_solveReconstruction(libmv_Tracks *libmv_tracks,
libmv_cameraIntrinsicsOptions *libmv_camera_intrinsics_options,
libmv_reconstructionOptions *libmv_reconstruction_options,
reconstruct_progress_update_cb progress_update_callback,
void *callback_customdata)
{
libmv_Reconstruction *libmv_reconstruction = new libmv_Reconstruction();
libmv::Tracks *tracks = ((libmv::Tracks *) libmv_tracks);
libmv::EuclideanReconstruction *reconstruction = &libmv_reconstruction->reconstruction;
libmv::CameraIntrinsics *camera_intrinsics = &libmv_reconstruction->intrinsics;
ReconstructUpdateCallback update_callback =
ReconstructUpdateCallback(progress_update_callback, callback_customdata);
intrinsics->SetFocalLength(focal_length, focal_length);
intrinsics->SetPrincipalPoint(principal_x, principal_y);
intrinsics->SetRadialDistortion(k1, k2, k3);
cameraIntrinsicsFromOptions(camera_intrinsics, libmv_camera_intrinsics_options);
reconstruction_options.success_threshold = options->success_threshold;
reconstruction_options.use_fallback_reconstruction = options->use_fallback_reconstruction;
/* Invert the camera intrinsics */
libmv::Tracks normalized_tracks = getNormalizedTracks(tracks, camera_intrinsics);
for (int i = 0; i < markers.size(); ++i) {
intrinsics->InvertIntrinsics(markers[i].x,
markers[i].y,
&(markers[i].x),
&(markers[i].y));
}
/* actual reconstruction */
libmv::ReconstructionOptions reconstruction_options;
reconstruction_options.success_threshold = libmv_reconstruction_options->success_threshold;
reconstruction_options.use_fallback_reconstruction = libmv_reconstruction_options->use_fallback_reconstruction;
libmv::Tracks normalized_tracks(markers);
int keyframe1 = libmv_reconstruction_options->keyframe1,
keyframe2 = libmv_reconstruction_options->keyframe2;
LG << "frames to init from: " << keyframe1 << " " << keyframe2;
libmv::vector<libmv::Marker> keyframe_markers =
normalized_tracks.MarkersForTracksInBothImages(keyframe1, keyframe2);
LG << "number of markers for init: " << keyframe_markers.size();
update_callback.invoke(0, "Initial reconstruction");
@ -595,49 +541,61 @@ libmv_Reconstruction *libmv_solveReconstruction(libmv_Tracks *tracks, int keyfra
libmv::EuclideanCompleteReconstruction(reconstruction_options, normalized_tracks,
reconstruction, &update_callback);
if (refine_intrinsics) {
libmv_solveRefineIntrinsics((libmv::Tracks *)tracks, intrinsics, reconstruction,
refine_intrinsics, progress_update_callback, callback_customdata);
/* refinement */
if (libmv_reconstruction_options->refine_intrinsics) {
libmv_solveRefineIntrinsics((libmv::Tracks *)tracks, camera_intrinsics, reconstruction,
libmv_reconstruction_options->refine_intrinsics,
progress_update_callback, callback_customdata);
}
progress_update_callback(callback_customdata, 1.0, "Finishing solution");
libmv_reconstruction->tracks = *(libmv::Tracks *)tracks;
libmv_reconstruction->error = libmv::EuclideanReprojectionError(*(libmv::Tracks *)tracks, *reconstruction, *intrinsics);
/* finish reconstruction */
finishReconstruction(tracks, camera_intrinsics, libmv_reconstruction,
progress_update_callback, callback_customdata);
return (libmv_Reconstruction *)libmv_reconstruction;
}
struct libmv_Reconstruction *libmv_solveModal(struct libmv_Tracks *tracks, double focal_length,
double principal_x, double principal_y, double k1, double k2, double k3,
reconstruct_progress_update_cb progress_update_callback, void *callback_customdata)
struct libmv_Reconstruction *libmv_solveModal(struct libmv_Tracks *libmv_tracks,
libmv_cameraIntrinsicsOptions *libmv_camera_intrinsics_options,
libmv_reconstructionOptions *libmv_reconstruction_options,
reconstruct_progress_update_cb progress_update_callback,
void *callback_customdata)
{
/* Invert the camera intrinsics. */
libmv::vector<libmv::Marker> markers = ((libmv::Tracks*)tracks)->AllMarkers();
libmv_Reconstruction *libmv_reconstruction = new libmv_Reconstruction();
libmv::Tracks *tracks = ((libmv::Tracks *) libmv_tracks);
libmv::EuclideanReconstruction *reconstruction = &libmv_reconstruction->reconstruction;
libmv::CameraIntrinsics *intrinsics = &libmv_reconstruction->intrinsics;
libmv::CameraIntrinsics *camera_intrinsics = &libmv_reconstruction->intrinsics;
ReconstructUpdateCallback update_callback =
ReconstructUpdateCallback(progress_update_callback, callback_customdata);
intrinsics->SetFocalLength(focal_length, focal_length);
intrinsics->SetPrincipalPoint(principal_x, principal_y);
intrinsics->SetRadialDistortion(k1, k2, k3);
cameraIntrinsicsFromOptions(camera_intrinsics, libmv_camera_intrinsics_options);
for (int i = 0; i < markers.size(); ++i) {
intrinsics->InvertIntrinsics(markers[i].x,
markers[i].y,
&(markers[i].x),
&(markers[i].y));
}
libmv::Tracks normalized_tracks(markers);
/* Invert the camera intrinsics. */
libmv::Tracks normalized_tracks = getNormalizedTracks(tracks, camera_intrinsics);
/* Actual reconstruction. */
libmv::ModalSolver(normalized_tracks, reconstruction, &update_callback);
progress_update_callback(callback_customdata, 1.0, "Finishing solution");
libmv_reconstruction->tracks = *(libmv::Tracks *)tracks;
libmv_reconstruction->error = libmv::EuclideanReprojectionError(*(libmv::Tracks *)tracks, *reconstruction, *intrinsics);
libmv::CameraIntrinsics empty_intrinsics;
libmv::EuclideanBundleCommonIntrinsics(normalized_tracks,
libmv::BUNDLE_NO_INTRINSICS,
reconstruction,
&empty_intrinsics,
libmv::BUNDLE_NO_TRANSLATION);
/* Refinement. */
if (libmv_reconstruction_options->refine_intrinsics) {
libmv_solveRefineIntrinsics((libmv::Tracks *)tracks, camera_intrinsics, reconstruction,
libmv_reconstruction_options->refine_intrinsics,
progress_update_callback, callback_customdata,
libmv::BUNDLE_NO_TRANSLATION);
}
/* Finish reconstruction. */
finishReconstruction(tracks, camera_intrinsics, libmv_reconstruction,
progress_update_callback, callback_customdata);
return (libmv_Reconstruction *)libmv_reconstruction;
}
@ -863,17 +821,13 @@ struct libmv_CameraIntrinsics *libmv_ReconstructionExtractIntrinsics(struct libm
return (struct libmv_CameraIntrinsics *)&libmv_Reconstruction->intrinsics;
}
struct libmv_CameraIntrinsics *libmv_CameraIntrinsicsNew(double focal_length, double principal_x, double principal_y,
double k1, double k2, double k3, int width, int height)
struct libmv_CameraIntrinsics *libmv_CameraIntrinsicsNew(libmv_cameraIntrinsicsOptions *libmv_camera_intrinsics_options)
{
libmv::CameraIntrinsics *intrinsics= new libmv::CameraIntrinsics();
libmv::CameraIntrinsics *camera_intrinsics = new libmv::CameraIntrinsics();
intrinsics->SetFocalLength(focal_length, focal_length);
intrinsics->SetPrincipalPoint(principal_x, principal_y);
intrinsics->SetRadialDistortion(k1, k2, k3);
intrinsics->SetImageSize(width, height);
cameraIntrinsicsFromOptions(camera_intrinsics, libmv_camera_intrinsics_options);
return (struct libmv_CameraIntrinsics *) intrinsics;
return (struct libmv_CameraIntrinsics *) camera_intrinsics;
}
struct libmv_CameraIntrinsics *libmv_CameraIntrinsicsCopy(struct libmv_CameraIntrinsics *libmvIntrinsics)
@ -891,40 +845,63 @@ void libmv_CameraIntrinsicsDestroy(struct libmv_CameraIntrinsics *libmvIntrinsic
delete intrinsics;
}
void libmv_CameraIntrinsicsUpdate(struct libmv_CameraIntrinsics *libmvIntrinsics, double focal_length,
double principal_x, double principal_y, double k1, double k2, double k3, int width, int height)
void libmv_CameraIntrinsicsUpdate(struct libmv_CameraIntrinsics *libmv_intrinsics,
libmv_cameraIntrinsicsOptions *libmv_camera_intrinsics_options)
{
libmv::CameraIntrinsics *intrinsics = (libmv::CameraIntrinsics *) libmvIntrinsics;
libmv::CameraIntrinsics *camera_intrinsics = (libmv::CameraIntrinsics *) libmv_intrinsics;
if (intrinsics->focal_length() != focal_length)
intrinsics->SetFocalLength(focal_length, focal_length);
double focal_length = libmv_camera_intrinsics_options->focal_length;
double principal_x = libmv_camera_intrinsics_options->principal_point_x;
double principal_y = libmv_camera_intrinsics_options->principal_point_y;
double k1 = libmv_camera_intrinsics_options->k1;
double k2 = libmv_camera_intrinsics_options->k2;
double k3 = libmv_camera_intrinsics_options->k3;
int image_width = libmv_camera_intrinsics_options->image_width;
int image_height = libmv_camera_intrinsics_options->image_height;
if (intrinsics->principal_point_x() != principal_x || intrinsics->principal_point_y() != principal_y)
intrinsics->SetPrincipalPoint(principal_x, principal_y);
/* try avoid unnecessary updates so pre-computed distortion grids are not freed */
if (intrinsics->k1() != k1 || intrinsics->k2() != k2 || intrinsics->k3() != k3)
intrinsics->SetRadialDistortion(k1, k2, k3);
if (camera_intrinsics->focal_length() != focal_length)
camera_intrinsics->SetFocalLength(focal_length, focal_length);
if (intrinsics->image_width() != width || intrinsics->image_height() != height)
intrinsics->SetImageSize(width, height);
if (camera_intrinsics->principal_point_x() != principal_x ||
camera_intrinsics->principal_point_y() != principal_y)
{
camera_intrinsics->SetPrincipalPoint(principal_x, principal_y);
}
if (camera_intrinsics->k1() != k1 ||
camera_intrinsics->k2() != k2 ||
camera_intrinsics->k3() != k3)
{
camera_intrinsics->SetRadialDistortion(k1, k2, k3);
}
if (camera_intrinsics->image_width() != image_width ||
camera_intrinsics->image_height() != image_height)
{
camera_intrinsics->SetImageSize(image_width, image_height);
}
}
void libmv_CameraIntrinsicsExtract(struct libmv_CameraIntrinsics *libmvIntrinsics, double *focal_length,
double *principal_x, double *principal_y, double *k1, double *k2, double *k3, int *width, int *height) {
libmv::CameraIntrinsics *intrinsics= (libmv::CameraIntrinsics *) libmvIntrinsics;
*focal_length = intrinsics->focal_length();
*principal_x = intrinsics->principal_point_x();
*principal_y = intrinsics->principal_point_y();
*k1 = intrinsics->k1();
*k2 = intrinsics->k2();
void libmv_CameraIntrinsicsExtract(struct libmv_CameraIntrinsics *libmv_intrinsics, double *focal_length,
double *principal_x, double *principal_y, double *k1, double *k2, double *k3, int *width, int *height)
{
libmv::CameraIntrinsics *camera_intrinsics = (libmv::CameraIntrinsics *) libmv_intrinsics;
*focal_length = camera_intrinsics->focal_length();
*principal_x = camera_intrinsics->principal_point_x();
*principal_y = camera_intrinsics->principal_point_y();
*k1 = camera_intrinsics->k1();
*k2 = camera_intrinsics->k2();
}
void libmv_CameraIntrinsicsUndistortByte(struct libmv_CameraIntrinsics *libmvIntrinsics,
void libmv_CameraIntrinsicsUndistortByte(struct libmv_CameraIntrinsics *libmv_intrinsics,
unsigned char *src, unsigned char *dst, int width, int height, float overscan, int channels)
{
libmv::CameraIntrinsics *intrinsics = (libmv::CameraIntrinsics *) libmvIntrinsics;
libmv::CameraIntrinsics *camera_intrinsics = (libmv::CameraIntrinsics *) libmv_intrinsics;
intrinsics->Undistort(src, dst, width, height, overscan, channels);
camera_intrinsics->Undistort(src, dst, width, height, overscan, channels);
}
void libmv_CameraIntrinsicsUndistortFloat(struct libmv_CameraIntrinsics *libmvIntrinsics,
@ -950,139 +927,32 @@ void libmv_CameraIntrinsicsDistortFloat(struct libmv_CameraIntrinsics *libmvIntr
intrinsics->Distort(src, dst, width, height, overscan, channels);
}
/* ************ distortion ************ */
void libmv_undistortByte(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
unsigned char *src, unsigned char *dst, int width, int height, float overscan, int channels)
{
libmv::CameraIntrinsics intrinsics;
intrinsics.SetFocalLength(focal_length, focal_length);
intrinsics.SetPrincipalPoint(principal_x, principal_y);
intrinsics.SetRadialDistortion(k1, k2, k3);
intrinsics.Undistort(src, dst, width, height, overscan, channels);
}
void libmv_undistortFloat(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
float *src, float *dst, int width, int height, float overscan, int channels)
{
libmv::CameraIntrinsics intrinsics;
intrinsics.SetFocalLength(focal_length, focal_length);
intrinsics.SetPrincipalPoint(principal_x, principal_y);
intrinsics.SetRadialDistortion(k1, k2, k3);
intrinsics.Undistort(src, dst, width, height, overscan, channels);
}
void libmv_distortByte(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
unsigned char *src, unsigned char *dst, int width, int height, float overscan, int channels)
{
libmv::CameraIntrinsics intrinsics;
intrinsics.SetFocalLength(focal_length, focal_length);
intrinsics.SetPrincipalPoint(principal_x, principal_y);
intrinsics.SetRadialDistortion(k1, k2, k3);
intrinsics.Distort(src, dst, width, height, overscan, channels);
}
void libmv_distortFloat(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
float *src, float *dst, int width, int height, float overscan, int channels)
{
libmv::CameraIntrinsics intrinsics;
intrinsics.SetFocalLength(focal_length, focal_length);
intrinsics.SetPrincipalPoint(principal_x, principal_y);
intrinsics.SetRadialDistortion(k1, k2, k3);
intrinsics.Distort(src, dst, width, height, overscan, channels);
}
/* ************ utils ************ */
void libmv_applyCameraIntrinsics(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
double x, double y, double *x1, double *y1)
void libmv_applyCameraIntrinsics(libmv_cameraIntrinsicsOptions *libmv_camera_intrinsics_options,
double x, double y, double *x1, double *y1)
{
libmv::CameraIntrinsics intrinsics;
libmv::CameraIntrinsics camera_intrinsics;
intrinsics.SetFocalLength(focal_length, focal_length);
intrinsics.SetPrincipalPoint(principal_x, principal_y);
intrinsics.SetRadialDistortion(k1, k2, k3);
cameraIntrinsicsFromOptions(&camera_intrinsics, libmv_camera_intrinsics_options);
if(focal_length) {
if (libmv_camera_intrinsics_options->focal_length) {
/* do a lens undistortion if focal length is non-zero only */
intrinsics.ApplyIntrinsics(x, y, x1, y1);
camera_intrinsics.ApplyIntrinsics(x, y, x1, y1);
}
}
void libmv_InvertIntrinsics(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
double x, double y, double *x1, double *y1)
void libmv_InvertIntrinsics(libmv_cameraIntrinsicsOptions *libmv_camera_intrinsics_options,
double x, double y, double *x1, double *y1)
{
libmv::CameraIntrinsics intrinsics;
libmv::CameraIntrinsics camera_intrinsics;
intrinsics.SetFocalLength(focal_length, focal_length);
intrinsics.SetPrincipalPoint(principal_x, principal_y);
intrinsics.SetRadialDistortion(k1, k2, k3);
cameraIntrinsicsFromOptions(&camera_intrinsics, libmv_camera_intrinsics_options);
if(focal_length) {
if (libmv_camera_intrinsics_options->focal_length) {
/* do a lens distortion if focal length is non-zero only */
intrinsics.InvertIntrinsics(x, y, x1, y1);
camera_intrinsics.InvertIntrinsics(x, y, x1, y1);
}
}
/* ************ point clouds ************ */
static void libmvTransformToMat4(libmv::Mat3 &R, libmv::Vec3 &S, libmv::Vec3 &t, double M[4][4])
{
for (int j = 0; j < 3; ++j)
for (int k = 0; k < 3; ++k)
M[j][k] = R(k, j) * S(j);
for (int i = 0; i < 3; ++i) {
M[3][0] = t(0);
M[3][1] = t(1);
M[3][2] = t(2);
M[0][3] = M[1][3] = M[2][3] = 0;
}
M[3][3] = 1.0;
}
void libmv_rigidRegistration(float (*reference_points)[3], float (*points)[3], int total_points,
int use_scale, int use_translation, double M[4][4])
{
libmv::Mat3 R;
libmv::Vec3 S;
libmv::Vec3 t;
libmv::vector<libmv::Vec3> reference_points_vector, points_vector;
for (int i = 0; i < total_points; i++) {
reference_points_vector.push_back(libmv::Vec3(reference_points[i][0],
reference_points[i][1],
reference_points[i][2]));
points_vector.push_back(libmv::Vec3(points[i][0],
points[i][1],
points[i][2]));
}
if (use_scale && use_translation) {
libmv::RigidRegistration(reference_points_vector, points_vector, R, S, t);
}
else if (use_translation) {
S = libmv::Vec3(1.0, 1.0, 1.0);
libmv::RigidRegistration(reference_points_vector, points_vector, R, t);
}
else {
S = libmv::Vec3(1.0, 1.0, 1.0);
t = libmv::Vec3::Zero();
libmv::RigidRegistration(reference_points_vector, points_vector, R);
}
libmvTransformToMat4(R, S, t, M);
}

@ -31,7 +31,6 @@
extern "C" {
#endif
struct libmv_RegionTracker;
struct libmv_Tracks;
struct libmv_Reconstruction;
struct libmv_Features;
@ -42,14 +41,6 @@ void libmv_initLogging(const char *argv0);
void libmv_startDebugLogging(void);
void libmv_setLoggingVerbosity(int verbosity);
/* RegionTracker */
struct libmv_RegionTracker *libmv_pyramidRegionTrackerNew(int max_iterations, int pyramid_level, int half_window_size, double minimum_correlation);
struct libmv_RegionTracker *libmv_hybridRegionTrackerNew(int max_iterations, int half_window_size, double minimum_correlation);
struct libmv_RegionTracker *libmv_bruteRegionTrackerNew(int half_window_size, double minimum_correlation);
int libmv_regionTrackerTrack(struct libmv_RegionTracker *libmv_tracker, const float *ima1, const float *ima2,
int width, int height, double x1, double y1, double *x2, double *y2);
void libmv_regionTrackerDestroy(struct libmv_RegionTracker *libmv_tracker);
/* TrackRegion (new planar tracker) */
struct libmv_trackRegionOptions {
int motion_model;
@ -86,28 +77,42 @@ void libmv_tracksInsert(struct libmv_Tracks *libmv_tracks, int image, int track,
void libmv_tracksDestroy(struct libmv_Tracks *libmv_tracks);
/* Reconstruction solver */
#define LIBMV_REFINE_FOCAL_LENGTH (1<<0)
#define LIBMV_REFINE_PRINCIPAL_POINT (1<<1)
#define LIBMV_REFINE_RADIAL_DISTORTION_K1 (1<<2)
#define LIBMV_REFINE_RADIAL_DISTORTION_K2 (1<<4)
/* TODO: make keyframes/distortion model a part of options? */
struct libmv_reconstructionOptions {
#define LIBMV_REFINE_FOCAL_LENGTH (1 << 0)
#define LIBMV_REFINE_PRINCIPAL_POINT (1 << 1)
#define LIBMV_REFINE_RADIAL_DISTORTION_K1 (1 << 2)
#define LIBMV_REFINE_RADIAL_DISTORTION_K2 (1 << 4)
typedef struct libmv_cameraIntrinsicsOptions {
double focal_length;
double principal_point_x, principal_point_y;
double k1, k2, k3;
double p1, p2;
int image_width, image_height;
} libmv_cameraIntrinsicsOptions;
typedef struct libmv_reconstructionOptions {
int keyframe1, keyframe2;
int refine_intrinsics;
double success_threshold;
int use_fallback_reconstruction;
};
} libmv_reconstructionOptions;
typedef void (*reconstruct_progress_update_cb) (void *customdata, double progress, const char *message);
int libmv_refineParametersAreValid(int parameters);
struct libmv_Reconstruction *libmv_solveReconstruction(struct libmv_Tracks *tracks, int keyframe1, int keyframe2,
int refine_intrinsics, double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
struct libmv_reconstructionOptions *options, reconstruct_progress_update_cb progress_update_callback,
struct libmv_Reconstruction *libmv_solveReconstruction(struct libmv_Tracks *libmv_tracks,
libmv_cameraIntrinsicsOptions *libmv_camera_intrinsics_options,
libmv_reconstructionOptions *libmv_reconstruction_options,
reconstruct_progress_update_cb progress_update_callback,
void *callback_customdata);
struct libmv_Reconstruction *libmv_solveModal(struct libmv_Tracks *libmv_tracks,
libmv_cameraIntrinsicsOptions *libmv_camera_intrinsics_options,
libmv_reconstructionOptions *libmv_reconstruction_options,
reconstruct_progress_update_cb progress_update_callback,
void *callback_customdata);
struct libmv_Reconstruction *libmv_solveModal(struct libmv_Tracks *tracks, double focal_length,
double principal_x, double principal_y, double k1, double k2, double k3,
reconstruct_progress_update_cb progress_update_callback, void *callback_customdata);
int libmv_reporojectionPointForTrack(struct libmv_Reconstruction *libmv_reconstruction, int track, double pos[3]);
double libmv_reporojectionErrorForTrack(struct libmv_Reconstruction *libmv_reconstruction, int track);
double libmv_reporojectionErrorForImage(struct libmv_Reconstruction *libmv_reconstruction, int image);
@ -127,52 +132,36 @@ void libmv_destroyFeatures(struct libmv_Features *libmv_features);
/* camera intrinsics */
struct libmv_CameraIntrinsics *libmv_ReconstructionExtractIntrinsics(struct libmv_Reconstruction *libmv_Reconstruction);
struct libmv_CameraIntrinsics *libmv_CameraIntrinsicsNew(double focal_length, double principal_x, double principal_y,
double k1, double k2, double k3, int width, int height);
struct libmv_CameraIntrinsics *libmv_CameraIntrinsicsNew(libmv_cameraIntrinsicsOptions *libmv_camera_intrinsics_options);
struct libmv_CameraIntrinsics *libmv_CameraIntrinsicsCopy(struct libmv_CameraIntrinsics *libmvIntrinsics);
struct libmv_CameraIntrinsics *libmv_CameraIntrinsicsCopy(struct libmv_CameraIntrinsics *libmv_intrinsics);
void libmv_CameraIntrinsicsDestroy(struct libmv_CameraIntrinsics *libmvIntrinsics);
void libmv_CameraIntrinsicsDestroy(struct libmv_CameraIntrinsics *libmv_intrinsics);
void libmv_CameraIntrinsicsUpdate(struct libmv_CameraIntrinsics *libmvIntrinsics, double focal_length,
double principal_x, double principal_y, double k1, double k2, double k3, int width, int height);
void libmv_CameraIntrinsicsUpdate(struct libmv_CameraIntrinsics *libmv_intrinsics,
libmv_cameraIntrinsicsOptions *libmv_camera_intrinsics_options);
void libmv_CameraIntrinsicsExtract(struct libmv_CameraIntrinsics *libmvIntrinsics, double *focal_length,
void libmv_CameraIntrinsicsExtract(struct libmv_CameraIntrinsics *libmv_intrinsics, double *focal_length,
double *principal_x, double *principal_y, double *k1, double *k2, double *k3, int *width, int *height);
void libmv_CameraIntrinsicsUndistortByte(struct libmv_CameraIntrinsics *libmvIntrinsics,
void libmv_CameraIntrinsicsUndistortByte(struct libmv_CameraIntrinsics *libmv_intrinsics,
unsigned char *src, unsigned char *dst, int width, int height, float overscan, int channels);
void libmv_CameraIntrinsicsUndistortFloat(struct libmv_CameraIntrinsics *libmvIntrinsics,
void libmv_CameraIntrinsicsUndistortFloat(struct libmv_CameraIntrinsics *libmv_intrinsics,
float *src, float *dst, int width, int height, float overscan, int channels);
void libmv_CameraIntrinsicsDistortByte(struct libmv_CameraIntrinsics *libmvIntrinsics,
void libmv_CameraIntrinsicsDistortByte(struct libmv_CameraIntrinsics *libmv_intrinsics,
unsigned char *src, unsigned char *dst, int width, int height, float overscan, int channels);
void libmv_CameraIntrinsicsDistortFloat(struct libmv_CameraIntrinsics *libmvIntrinsics,
float *src, float *dst, int width, int height, float overscan, int channels);
/* dsitortion */
void libmv_undistortByte(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
unsigned char *src, unsigned char *dst, int width, int height, float overscan, int channels);
void libmv_undistortFloat(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
float *src, float *dst, int width, int height, float overscan, int channels);
void libmv_distortByte(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
unsigned char *src, unsigned char *dst, int width, int height, float overscan, int channels);
void libmv_distortFloat(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
void libmv_CameraIntrinsicsDistortFloat(struct libmv_CameraIntrinsics *libmv_intrinsics,
float *src, float *dst, int width, int height, float overscan, int channels);
/* utils */
void libmv_applyCameraIntrinsics(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
void libmv_applyCameraIntrinsics(libmv_cameraIntrinsicsOptions *libmv_camera_intrinsics_options,
double x, double y, double *x1, double *y1);
void libmv_InvertIntrinsics(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
void libmv_InvertIntrinsics(libmv_cameraIntrinsicsOptions *libmv_camera_intrinsics_options,
double x, double y, double *x1, double *y1);
/* point clouds */
void libmv_rigidRegistration(float (*reference_points)[3], float (*points)[3], int total_points,
int use_scale, int use_translation, double M[4][4]);
#ifdef __cplusplus
}
#endif

@ -653,8 +653,8 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
// Finally, with all three solutions, select the (R, t) with the best RMSE.
VLOG(2) << "RMSE for solution 0: " << rmse(0);
VLOG(2) << "RMSE for solution 1: " << rmse(0);
VLOG(2) << "RMSE for solution 2: " << rmse(0);
VLOG(2) << "RMSE for solution 1: " << rmse(1);
VLOG(2) << "RMSE for solution 2: " << rmse(2);
size_t n = 0;
if (rmse(1) < rmse(0)) {
n = 1;

@ -254,8 +254,8 @@ double SymmetricEpipolarDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2) {
Vec3 Ft_y = F.transpose() * y;
double y_F_x = y.dot(F_x);
return y_F_x * ( 1 / F_x.head<2>().norm()
+ 1 / Ft_y.head<2>().norm());
return Square(y_F_x) * ( 1 / F_x.head<2>().squaredNorm()
+ 1 / Ft_y.head<2>().squaredNorm());
}
// HZ 9.6 pag 257 (formula 9.12)

@ -264,4 +264,19 @@ bool Homography3DFromCorrespondencesLinear(const Mat &x1,
return false;
}
}
double SymmetricGeometricDistance(Mat3 &H, Vec2 &x1, Vec2 &x2) {
Vec3 x(x1(0), x1(1), 1.0);
Vec3 y(x2(0), x2(1), 1.0);
Vec3 H_x = H * x;
Vec3 Hinv_y = H.inverse() * y;
H_x /= H_x(2);
Hinv_y /= Hinv_y(2);
return (H_x.head<2>() - y.head<2>()).squaredNorm() +
(Hinv_y.head<2>() - x.head<2>()).squaredNorm();
}
} // namespace libmv

@ -79,6 +79,14 @@ bool Homography3DFromCorrespondencesLinear(const Mat &x1,
Mat4 *H,
double expected_precision =
EigenDouble::dummy_precision());
/**
* Calculate symmetric geometric cost:
*
* D(H * x1, x2)^2 + D(H^-1 * x2, x1)
*/
double SymmetricGeometricDistance(Mat3 &H, Vec2 &x1, Vec2 &x2);
} // namespace libmv
#endif // LIBMV_MULTIVIEW_HOMOGRAPHY_H_

@ -0,0 +1,181 @@
// Copyright (c) 2009 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
#ifndef LIBMV_MULTIVIEW_PANOGRAPHY_H
#define LIBMV_MULTIVIEW_PANOGRAPHY_H
#include "libmv/numeric/numeric.h"
#include "libmv/numeric/poly.h"
#include "libmv/base/vector.h"
namespace libmv {
static bool Build_Minimal2Point_PolynomialFactor(
const Mat & x1, const Mat & x2,
double * P) //P must be a double[4]
{
assert(2 == x1.rows());
assert(2 == x1.cols());
assert(x1.rows() == x2.rows());
assert(x1.cols() == x2.cols());
// Setup the variable of the input problem:
Vec xx1 = (x1.col(0)).transpose();
Vec yx1 = (x1.col(1)).transpose();
double a12 = xx1.dot(yx1);
Vec xx2 = (x2.col(0)).transpose();
Vec yx2 = (x2.col(1)).transpose();
double b12 = xx2.dot(yx2);
double a1 = xx1.squaredNorm();
double a2 = yx1.squaredNorm();
double b1 = xx2.squaredNorm();
double b2 = yx2.squaredNorm();
// Build the 3rd degre polynomial in F^2.
//
// f^6 * p + f^4 * q + f^2* r + s = 0;
//
// Coefficients in ascending powers of alpha, i.e. P[N]*x^N.
// Run panography_coeffs.py to get the below coefficients.
P[0] = b1*b2*a12*a12-a1*a2*b12*b12;
P[1] = -2*a1*a2*b12+2*a12*b1*b2+b1*a12*a12+b2*a12*a12-a1*b12*b12-a2*b12*b12;
P[2] = b1*b2-a1*a2-2*a1*b12-2*a2*b12+2*a12*b1+2*a12*b2+a12*a12-b12*b12;
P[3] = b1+b2-2*b12-a1-a2+2*a12;
// If P[3] equal to 0 we get ill conditionned data
return (P[3] != 0.0);
}
// This implements a minimal solution (2 points) for panoramic stitching:
//
// http://www.cs.ubc.ca/~mbrown/minimal/minimal.html
//
// [1] M. Brown and R. Hartley and D. Nister. Minimal Solutions for Panoramic
// Stitching. CVPR07.
//
// The 2-point algorithm solves for the rotation of the camera with a single
// focal length (4 degrees of freedom).
//
// Compute from 1 to 3 possible focal lenght for 2 point correspondences.
// Suppose that the cameras share the same optical center and focal lengths:
//
// Image 1 => H*x = x' => Image 2
// x (u1j) x' (u2j)
// a (u11) a' (u21)
// b (u12) b' (u22)
//
// The return values are 1 to 3 possible values for the focal lengths such
// that:
//
// [f 0 0]
// K = [0 f 0]
// [0 0 1]
//
static void F_FromCorrespondance_2points(const Mat &x1, const Mat &x2,
vector<double> *fs) {
// Build Polynomial factor to get squared focal value.
double P[4];
Build_Minimal2Point_PolynomialFactor(x1, x2, &P[0]);
// Solve it by using F = f^2 and a Cubic polynomial solver
//
// F^3 * p + F^2 * q + F^1 * r + s = 0
//
double roots[3];
int num_roots = SolveCubicPolynomial(P, roots);
for (int i = 0; i < num_roots; ++i) {
if (roots[i] > 0.0) {
fs->push_back(sqrt(roots[i]));
}
}
}
// Compute the 3x3 rotation matrix that fits two 3D point clouds in the least
// square sense. The method is from:
//
// K. Arun,T. Huand and D. Blostein. Least-squares fitting of 2 3-D point
// sets. IEEE Transactions on Pattern Analysis and Machine Intelligence,
// 9:698-700, 1987.
//
// Given the calibration matrices K1, K2 solve for the rotation from
// corresponding image rays.
//
// R = min || X2 - R * x1 ||.
//
// In case of panography, which is for a camera that shares the same camera
// center,
//
// H = K2 * R * K1.inverse();
//
// For the full explanation, see Section 8, Solving for Rotation from [1].
//
// Parameters:
//
// x1 : Point cloud A (3D coords)
// x2 : Point cloud B (3D coords)
//
// [f 0 0]
// K1 = [0 f 0]
// [0 0 1]
//
// K2 (the same form as K1, but may have different f)
//
// Returns: A rotation matrix that minimizes
//
// R = arg min || X2 - R * x1 ||
//
static void GetR_FixedCameraCenter(const Mat &x1, const Mat &x2,
const double focal,
Mat3 *R) {
assert(3 == x1.rows());
assert(2 <= x1.cols());
assert(x1.rows() == x2.rows());
assert(x1.cols() == x2.cols());
// Build simplified K matrix
Mat3 K( Mat3::Identity() * 1.0/focal );
K(2,2)= 1.0;
// Build the correlation matrix; equation (22) in [1].
Mat3 C = Mat3::Zero();
for(int i = 0; i < x1.cols(); ++i) {
Mat r1i = (K * x1.col(i)).normalized();
Mat r2i = (K * x2.col(i)).normalized();
C += r2i * r1i.transpose();
}
// Solve for rotation. Equations (24) and (25) in [1].
Eigen::JacobiSVD<Mat> svd(C, Eigen::ComputeThinU | Eigen::ComputeThinV);
Mat3 scale = Mat3::Identity();
scale(2,2) = ((svd.matrixU() * svd.matrixV().transpose()).determinant() > 0.0)
? 1.0
: -1.0;
(*R) = svd.matrixU() * scale * svd.matrixV().transpose();
}
} // namespace libmv
#endif // LIBMV_MULTIVIEW_PANOGRAPHY_H

@ -1,4 +1,4 @@
// Copyright (c) 2011 libmv authors.
// Copyright (c) 2011, 2012, 2013 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
@ -18,10 +18,11 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#define V3DLIB_ENABLE_SUITESPARSE 1
#include <map>
#include "ceres/ceres.h"
#include "ceres/rotation.h"
#include "libmv/base/scoped_ptr.h"
#include "libmv/base/vector.h"
#include "libmv/logging/logging.h"
#include "libmv/multiview/fundamental.h"
@ -31,13 +32,195 @@
#include "libmv/simple_pipeline/bundle.h"
#include "libmv/simple_pipeline/reconstruction.h"
#include "libmv/simple_pipeline/tracks.h"
#include "third_party/ssba/Geometry/v3d_cameramatrix.h"
#include "third_party/ssba/Geometry/v3d_metricbundle.h"
#include "third_party/ssba/Math/v3d_linear.h"
#include "third_party/ssba/Math/v3d_linear_utils.h"
#ifdef _OPENMP
# include <omp.h>
#endif
namespace libmv {
// The intrinsics need to get combined into a single parameter block; use these
// enums to index instead of numeric constants.
enum {
OFFSET_FOCAL_LENGTH,
OFFSET_PRINCIPAL_POINT_X,
OFFSET_PRINCIPAL_POINT_Y,
OFFSET_K1,
OFFSET_K2,
OFFSET_K3,
OFFSET_P1,
OFFSET_P2,
};
namespace {
struct OpenCVReprojectionError {
OpenCVReprojectionError(double observed_x, double observed_y)
: observed_x(observed_x), observed_y(observed_y) {}
template <typename T>
bool operator()(const T* const intrinsics,
const T* const R, // Rotation 3x3 column-major.
const T* const t, // Translation 3x1.
const T* const X, // Point coordinates 3x1.
T* residuals) const {
// Unpack the intrinsics.
const T& focal_length = intrinsics[OFFSET_FOCAL_LENGTH];
const T& principal_point_x = intrinsics[OFFSET_PRINCIPAL_POINT_X];
const T& principal_point_y = intrinsics[OFFSET_PRINCIPAL_POINT_Y];
const T& k1 = intrinsics[OFFSET_K1];
const T& k2 = intrinsics[OFFSET_K2];
const T& k3 = intrinsics[OFFSET_K3];
const T& p1 = intrinsics[OFFSET_P1];
const T& p2 = intrinsics[OFFSET_P2];
// Compute projective coordinates: x = RX + t.
T x[3];
x[0] = R[0]*X[0] + R[3]*X[1] + R[6]*X[2] + t[0];
x[1] = R[1]*X[0] + R[4]*X[1] + R[7]*X[2] + t[1];
x[2] = R[2]*X[0] + R[5]*X[1] + R[8]*X[2] + t[2];
// Compute normalized coordinates: x /= x[2].
T xn = x[0] / x[2];
T yn = x[1] / x[2];
T predicted_x, predicted_y;
// EuclideanBundle uses empty intrinsics, which breaks undistortion code;
// so use an implied focal length of 1.0 if the focal length is exactly
// zero.
// TODO(keir): Figure out a better way to do this.
if (focal_length != T(0)) {
// Apply distortion to the normalized points to get (xd, yd).
// TODO(keir): Do early bailouts for zero distortion; these are expensive
// jet operations.
ApplyRadialDistortionCameraIntrinsics(focal_length,
focal_length,
principal_point_x,
principal_point_y,
k1, k2, k3,
p1, p2,
xn, yn,
&predicted_x,
&predicted_y);
} else {
predicted_x = xn;
predicted_y = yn;
}
// The error is the difference between the predicted and observed position.
residuals[0] = predicted_x - T(observed_x);
residuals[1] = predicted_y - T(observed_y);
return true;
}
double observed_x;
double observed_y;
};
// TODO(keir): Get rid of the parameterization! Ceres will work much faster if
// the rotation block is angle-axis and also the translation is merged into a
// single parameter block.
struct RotationMatrixPlus {
template<typename T>
bool operator()(const T* R_array, // Rotation 3x3 col-major.
const T* delta, // Angle-axis delta
T* R_plus_delta_array) const {
T angle_axis[3];
ceres::RotationMatrixToAngleAxis(R_array, angle_axis);
angle_axis[0] += delta[0];
angle_axis[1] += delta[1];
angle_axis[2] += delta[2];
ceres::AngleAxisToRotationMatrix(angle_axis, R_plus_delta_array);
return true;
}
};
// TODO(sergey): would be nice to have this in Ceres upstream
template<typename PlusFunctor, int kGlobalSize, int kLocalSize>
class AutodiffParameterization : public ceres::LocalParameterization {
public:
AutodiffParameterization(const PlusFunctor &plus_functor)
: plus_functor_(plus_functor) {}
virtual ~AutodiffParameterization() {}
virtual bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const {
return plus_functor_(x, delta, x_plus_delta);
}
virtual bool ComputeJacobian(const double* x, double* jacobian) const {
double zero_delta[kLocalSize] = { 0.0 };
double x_plus_delta[kGlobalSize];
const double* parameters[2] = { x, zero_delta };
double* jacobians_array[2] = { NULL, jacobian };
Plus(x, zero_delta, x_plus_delta);
return ceres::internal::AutoDiff<PlusFunctor,
double,
kGlobalSize, kLocalSize>
::Differentiate(plus_functor_,
parameters,
kGlobalSize,
x_plus_delta,
jacobians_array);
return true;
}
virtual int GlobalSize() const { return kGlobalSize; }
virtual int LocalSize() const { return kLocalSize; }
private:
const PlusFunctor &plus_functor_;
};
void BundleIntrinsicsLogMessage(int bundle_intrinsics) {
if (bundle_intrinsics == BUNDLE_NO_INTRINSICS) {
LG << "Bundling only camera positions.";
} else if (bundle_intrinsics == BUNDLE_FOCAL_LENGTH) {
LG << "Bundling f.";
} else if (bundle_intrinsics == (BUNDLE_FOCAL_LENGTH |
BUNDLE_PRINCIPAL_POINT)) {
LG << "Bundling f, px, py.";
} else if (bundle_intrinsics == (BUNDLE_FOCAL_LENGTH |
BUNDLE_PRINCIPAL_POINT |
BUNDLE_RADIAL)) {
LG << "Bundling f, px, py, k1, k2.";
} else if (bundle_intrinsics == (BUNDLE_FOCAL_LENGTH |
BUNDLE_PRINCIPAL_POINT |
BUNDLE_RADIAL |
BUNDLE_TANGENTIAL)) {
LG << "Bundling f, px, py, k1, k2, p1, p2.";
} else if (bundle_intrinsics == (BUNDLE_FOCAL_LENGTH |
BUNDLE_RADIAL |
BUNDLE_TANGENTIAL)) {
LG << "Bundling f, px, py, k1, k2, p1, p2.";
} else if (bundle_intrinsics == (BUNDLE_FOCAL_LENGTH |
BUNDLE_RADIAL)) {
LG << "Bundling f, k1, k2.";
} else if (bundle_intrinsics == (BUNDLE_FOCAL_LENGTH |
BUNDLE_RADIAL_K1)) {
LG << "Bundling f, k1.";
} else if (bundle_intrinsics == (BUNDLE_RADIAL_K1 |
BUNDLE_RADIAL_K2)) {
LG << "Bundling k1, k2.";
} else {
LOG(FATAL) << "Unsupported bundle combination.";
}
}
} // namespace
void EuclideanBundle(const Tracks &tracks,
EuclideanReconstruction *reconstruction) {
CameraIntrinsics intrinsics;
@ -50,199 +233,139 @@ void EuclideanBundle(const Tracks &tracks,
void EuclideanBundleCommonIntrinsics(const Tracks &tracks,
int bundle_intrinsics,
EuclideanReconstruction *reconstruction,
CameraIntrinsics *intrinsics) {
CameraIntrinsics *intrinsics,
int bundle_constraints) {
LG << "Original intrinsics: " << *intrinsics;
vector<Marker> markers = tracks.AllMarkers();
// "index" in this context is the index that V3D's optimizer will see. The
// V3D index must be dense in that the cameras are numbered 0...n-1, which is
// not the case for the "image" numbering that arises from the tracks
// structure. The complicated mapping is necessary to convert between the two
// representations.
std::map<EuclideanCamera *, int> camera_to_index;
std::map<EuclideanPoint *, int> point_to_index;
vector<EuclideanCamera *> index_to_camera;
vector<EuclideanPoint *> index_to_point;
int num_cameras = 0;
int num_points = 0;
ceres::Problem::Options problem_options;
problem_options.local_parameterization_ownership =
ceres::DO_NOT_TAKE_OWNERSHIP;
ceres::Problem problem(problem_options);
// Residual blocks with 10 parameters are unwieldly with Ceres, so pack the
// intrinsics into a single block and rely on local parameterizations to
// control which intrinsics are allowed to vary.
double ceres_intrinsics[8];
ceres_intrinsics[OFFSET_FOCAL_LENGTH] = intrinsics->focal_length();
ceres_intrinsics[OFFSET_PRINCIPAL_POINT_X] = intrinsics->principal_point_x();
ceres_intrinsics[OFFSET_PRINCIPAL_POINT_Y] = intrinsics->principal_point_y();
ceres_intrinsics[OFFSET_K1] = intrinsics->k1();
ceres_intrinsics[OFFSET_K2] = intrinsics->k2();
ceres_intrinsics[OFFSET_K3] = intrinsics->k3();
ceres_intrinsics[OFFSET_P1] = intrinsics->p1();
ceres_intrinsics[OFFSET_P2] = intrinsics->p2();
RotationMatrixPlus rotation_matrix_plus;
AutodiffParameterization<RotationMatrixPlus, 9, 3>
rotation_parameterization(rotation_matrix_plus);
int num_residuals = 0;
for (int i = 0; i < markers.size(); ++i) {
const Marker &marker = markers[i];
EuclideanCamera *camera = reconstruction->CameraForImage(marker.image);
EuclideanPoint *point = reconstruction->PointForTrack(marker.track);
if (camera && point) {
if (camera_to_index.find(camera) == camera_to_index.end()) {
camera_to_index[camera] = num_cameras;
index_to_camera.push_back(camera);
num_cameras++;
}
if (point_to_index.find(point) == point_to_index.end()) {
point_to_index[point] = num_points;
index_to_point.push_back(point);
num_points++;
}
}
}
// Convert libmv's K matrix to V3d's K matrix.
V3D::Matrix3x3d v3d_K;
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
v3d_K[i][j] = intrinsics->K()(i, j);
}
}
// Convert libmv's distortion to v3d distortion.
V3D::StdDistortionFunction v3d_distortion;
v3d_distortion.k1 = intrinsics->k1();
v3d_distortion.k2 = intrinsics->k2();
v3d_distortion.p1 = intrinsics->p1();
v3d_distortion.p2 = intrinsics->p2();
// Convert libmv's cameras to V3D's cameras.
std::vector<V3D::CameraMatrix> v3d_cameras(index_to_camera.size());
for (int k = 0; k < index_to_camera.size(); ++k) {
V3D::Matrix3x3d R;
V3D::Vector3d t;
// Libmv's rotation matrix type.
const Mat3 &R_libmv = index_to_camera[k]->R;
const Vec3 &t_libmv = index_to_camera[k]->t;
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
R[i][j] = R_libmv(i, j);
}
t[i] = t_libmv(i);
}
v3d_cameras[k].setIntrinsic(v3d_K);
v3d_cameras[k].setRotation(R);
v3d_cameras[k].setTranslation(t);
}
LG << "Number of cameras: " << index_to_camera.size();
// Convert libmv's points to V3D's points.
std::vector<V3D::Vector3d> v3d_points(index_to_point.size());
for (int i = 0; i < index_to_point.size(); i++) {
v3d_points[i][0] = index_to_point[i]->X(0);
v3d_points[i][1] = index_to_point[i]->X(1);
v3d_points[i][2] = index_to_point[i]->X(2);
}
LG << "Number of points: " << index_to_point.size();
// Convert libmv's measurements to v3d measurements.
int num_residuals = 0;
std::vector<V3D::Vector2d> v3d_measurements;
std::vector<int> v3d_camera_for_measurement;
std::vector<int> v3d_point_for_measurement;
for (int i = 0; i < markers.size(); ++i) {
EuclideanCamera *camera = reconstruction->CameraForImage(markers[i].image);
EuclideanPoint *point = reconstruction->PointForTrack(markers[i].track);
if (!camera || !point) {
continue;
}
V3D::Vector2d v3d_point;
v3d_point[0] = markers[i].x;
v3d_point[1] = markers[i].y;
v3d_measurements.push_back(v3d_point);
v3d_camera_for_measurement.push_back(camera_to_index[camera]);
v3d_point_for_measurement.push_back(point_to_index[point]);
problem.AddResidualBlock(new ceres::AutoDiffCostFunction<
OpenCVReprojectionError, 2, 8, 9, 3, 3>(
new OpenCVReprojectionError(
marker.x,
marker.y)),
NULL,
ceres_intrinsics,
&camera->R(0, 0),
&camera->t(0),
&point->X(0));
// It's fine if the parameterization for one camera is set repeatedly.
problem.SetParameterization(&camera->R(0, 0),
&rotation_parameterization);
if (bundle_constraints & BUNDLE_NO_TRANSLATION) {
problem.SetParameterBlockConstant(&camera->t(0));
}
num_residuals++;
}
LG << "Number of residuals: " << num_residuals;
// Convert from libmv's specification for which intrinsics to bundle to V3D's.
int v3d_bundle_intrinsics;
if(!num_residuals) {
LG << "Skipping running minimizer with zero residuals";
return;
}
BundleIntrinsicsLogMessage(bundle_intrinsics);
scoped_ptr<ceres::SubsetParameterization>
subset_parameterization(NULL);
if (bundle_intrinsics == BUNDLE_NO_INTRINSICS) {
LG << "Bundling only camera positions.";
v3d_bundle_intrinsics = V3D::FULL_BUNDLE_METRIC;
} else if (bundle_intrinsics == BUNDLE_FOCAL_LENGTH) {
LG << "Bundling f.";
v3d_bundle_intrinsics = V3D::FULL_BUNDLE_FOCAL_LENGTH;
} else if (bundle_intrinsics == (BUNDLE_FOCAL_LENGTH |
BUNDLE_PRINCIPAL_POINT)) {
LG << "Bundling f, px, py.";
v3d_bundle_intrinsics = V3D::FULL_BUNDLE_FOCAL_LENGTH_PP;
} else if (bundle_intrinsics == (BUNDLE_FOCAL_LENGTH |
BUNDLE_PRINCIPAL_POINT |
BUNDLE_RADIAL)) {
LG << "Bundling f, px, py, k1, k2.";
v3d_bundle_intrinsics = V3D::FULL_BUNDLE_RADIAL;
} else if (bundle_intrinsics == (BUNDLE_FOCAL_LENGTH |
BUNDLE_PRINCIPAL_POINT |
BUNDLE_RADIAL |
BUNDLE_TANGENTIAL)) {
LG << "Bundling f, px, py, k1, k2, p1, p2.";
v3d_bundle_intrinsics = V3D::FULL_BUNDLE_RADIAL_TANGENTIAL;
} else if (bundle_intrinsics == (BUNDLE_FOCAL_LENGTH |
BUNDLE_RADIAL |
BUNDLE_TANGENTIAL)) {
LG << "Bundling f, px, py, k1, k2, p1, p2.";
v3d_bundle_intrinsics = V3D::FULL_BUNDLE_RADIAL_TANGENTIAL;
} else if (bundle_intrinsics == (BUNDLE_FOCAL_LENGTH |
BUNDLE_RADIAL)) {
LG << "Bundling f, k1, k2.";
v3d_bundle_intrinsics = V3D::FULL_BUNDLE_FOCAL_AND_RADIAL;
} else if (bundle_intrinsics == (BUNDLE_FOCAL_LENGTH |
BUNDLE_RADIAL_K1)) {
LG << "Bundling f, k1.";
v3d_bundle_intrinsics = V3D::FULL_BUNDLE_FOCAL_AND_RADIAL_K1;
// No camera intrinsics are refining,
// set the whole parameter block as constant for best performance
problem.SetParameterBlockConstant(ceres_intrinsics);
} else {
LOG(FATAL) << "Unsupported bundle combination.";
}
// Set intrinsics not being bundles as constant
// Ignore any outliers; assume supervised tracking.
double v3d_inlier_threshold = 500000.0;
// Finally, run the bundle adjustment.
V3D::CommonInternalsMetricBundleOptimizer opt(v3d_bundle_intrinsics,
v3d_inlier_threshold,
v3d_K,
v3d_distortion,
v3d_cameras,
v3d_points,
v3d_measurements,
v3d_camera_for_measurement,
v3d_point_for_measurement);
opt.maxIterations = 500;
opt.minimize();
if (opt.status == V3D::LEVENBERG_OPTIMIZER_TIMEOUT) {
LG << "Bundle status: Timed out.";
} else if (opt.status == V3D::LEVENBERG_OPTIMIZER_SMALL_UPDATE) {
LG << "Bundle status: Small update.";
} else if (opt.status == V3D::LEVENBERG_OPTIMIZER_CONVERGED) {
LG << "Bundle status: Converged.";
}
// Convert V3D's K matrix back to libmv's K matrix.
Mat3 K;
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
K(i, j) = v3d_K[i][j];
std::vector<int> constant_intrinsics;
#define MAYBE_SET_CONSTANT(bundle_enum, offset) \
if (!(bundle_intrinsics & bundle_enum)) { \
constant_intrinsics.push_back(offset); \
}
}
intrinsics->SetK(K);
MAYBE_SET_CONSTANT(BUNDLE_FOCAL_LENGTH, OFFSET_FOCAL_LENGTH);
MAYBE_SET_CONSTANT(BUNDLE_PRINCIPAL_POINT, OFFSET_PRINCIPAL_POINT_X);
MAYBE_SET_CONSTANT(BUNDLE_PRINCIPAL_POINT, OFFSET_PRINCIPAL_POINT_Y);
MAYBE_SET_CONSTANT(BUNDLE_RADIAL_K1, OFFSET_K1);
MAYBE_SET_CONSTANT(BUNDLE_RADIAL_K2, OFFSET_K2);
MAYBE_SET_CONSTANT(BUNDLE_TANGENTIAL_P1, OFFSET_P1);
MAYBE_SET_CONSTANT(BUNDLE_TANGENTIAL_P2, OFFSET_P2);
#undef MAYBE_SET_CONSTANT
// Convert V3D's distortion back to libmv's distortion.
intrinsics->SetRadialDistortion(v3d_distortion.k1, v3d_distortion.k2, 0.0);
intrinsics->SetTangentialDistortion(v3d_distortion.p1, v3d_distortion.p2);
// Always set K3 constant, it's not used at the moment.
constant_intrinsics.push_back(OFFSET_K3);
// Convert V3D's cameras back to libmv's cameras.
for (int k = 0; k < num_cameras; k++) {
V3D::Matrix3x4d const Rt = v3d_cameras[k].getOrientation();
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
index_to_camera[k]->R(i, j) = Rt[i][j];
}
index_to_camera[k]->t(i) = Rt[i][3];
}
subset_parameterization.reset(
new ceres::SubsetParameterization(8, constant_intrinsics));
problem.SetParameterization(ceres_intrinsics, subset_parameterization.get());
}
// Convert V3D's points back to libmv's points.
for (int k = 0; k < num_points; k++) {
for (int i = 0; i < 3; ++i) {
index_to_point[k]->X(i) = v3d_points[k][i];
}
ceres::Solver::Options options;
options.use_nonmonotonic_steps = true;
options.preconditioner_type = ceres::SCHUR_JACOBI;
options.linear_solver_type = ceres::ITERATIVE_SCHUR;
options.use_inner_iterations = true;
options.max_num_iterations = 100;
#ifdef _OPENMP
options.num_threads = omp_get_max_threads();
options.num_linear_solver_threads = omp_get_max_threads();
#endif
ceres::Solver::Summary summary;
ceres::Solve(options, &problem, &summary);
LG << "Final report:\n" << summary.FullReport();
// Copy intrinsics back.
if (bundle_intrinsics != BUNDLE_NO_INTRINSICS) {
intrinsics->SetFocalLength(ceres_intrinsics[OFFSET_FOCAL_LENGTH],
ceres_intrinsics[OFFSET_FOCAL_LENGTH]);
intrinsics->SetPrincipalPoint(ceres_intrinsics[OFFSET_PRINCIPAL_POINT_X],
ceres_intrinsics[OFFSET_PRINCIPAL_POINT_Y]);
intrinsics->SetRadialDistortion(ceres_intrinsics[OFFSET_K1],
ceres_intrinsics[OFFSET_K2],
ceres_intrinsics[OFFSET_K3]);
intrinsics->SetTangentialDistortion(ceres_intrinsics[OFFSET_P1],
ceres_intrinsics[OFFSET_P2]);
}
LG << "Final intrinsics: " << *intrinsics;
}

@ -67,6 +67,11 @@ void EuclideanBundle(const Tracks &tracks,
BUNDLE_FOCAL_LENGTH | BUNDLE_PRINCIPAL_POINT
BUNDLE_FOCAL_LENGTH | BUNDLE_PRINCIPAL_POINT | BUNDLE_RADIAL
BUNDLE_FOCAL_LENGTH | BUNDLE_PRINCIPAL_POINT | BUNDLE_RADIAL | BUNDLE_TANGENTIAL
BUNDLE_RADIAL
Constraints denotes which blocks to keep constant during bundling.
For example it is useful to keep camera translations constant
when bundling tripod motions.
\note This assumes an outlier-free set of markers.
@ -83,10 +88,15 @@ enum BundleIntrinsics {
BUNDLE_TANGENTIAL_P2 = 32,
BUNDLE_TANGENTIAL = 48,
};
enum BundleConstraints {
BUNDLE_NO_CONSTRAINTS = 0,
BUNDLE_NO_TRANSLATION = 1,
};
void EuclideanBundleCommonIntrinsics(const Tracks &tracks,
int bundle_intrinsics,
EuclideanReconstruction *reconstruction,
CameraIntrinsics *intrinsics);
CameraIntrinsics *intrinsics,
int bundle_constraints = BUNDLE_NO_CONSTRAINTS);
/*!
Refine camera poses and 3D coordinates using bundle adjustment.

@ -124,20 +124,16 @@ void CameraIntrinsics::ApplyIntrinsics(double normalized_x,
double normalized_y,
double *image_x,
double *image_y) const {
double x = normalized_x;
double y = normalized_y;
// Apply distortion to the normalized points to get (xd, yd).
double r2 = x*x + y*y;
double r4 = r2 * r2;
double r6 = r4 * r2;
double r_coeff = (1 + k1_*r2 + k2_*r4 + k3_*r6);
double xd = x * r_coeff + 2*p1_*x*y + p2_*(r2 + 2*x*x);
double yd = y * r_coeff + 2*p2_*x*y + p1_*(r2 + 2*y*y);
// Apply focal length and principal point to get the final image coordinates.
*image_x = focal_length_x() * xd + principal_point_x();
*image_y = focal_length_y() * yd + principal_point_y();
ApplyRadialDistortionCameraIntrinsics(focal_length_x(),
focal_length_y(),
principal_point_x(),
principal_point_y(),
k1(), k2(), k3(),
p1(), p2(),
normalized_x,
normalized_y,
image_x,
image_y);
}
struct InvertIntrinsicsCostFunction {

@ -159,6 +159,37 @@ class CameraIntrinsics {
std::ostream& operator <<(std::ostream &os,
const CameraIntrinsics &intrinsics);
// Apply camera intrinsics to the normalized point to get image coordinates.
// This applies the radial lens distortion to a point which is in normalized
// camera coordinates (i.e. the principal point is at (0, 0)) to get image
// coordinates in pixels. Templated for use with autodifferentiation.
template <typename T>
inline void ApplyRadialDistortionCameraIntrinsics(T focal_length_x,
T focal_length_y,
T principal_point_x,
T principal_point_y,
T k1, T k2, T k3,
T p1, T p2,
T normalized_x,
T normalized_y,
T *image_x,
T *image_y) {
T x = normalized_x;
T y = normalized_y;
// Apply distortion to the normalized points to get (xd, yd).
T r2 = x*x + y*y;
T r4 = r2 * r2;
T r6 = r4 * r2;
T r_coeff = (T(1) + k1*r2 + k2*r4 + k3*r6);
T xd = x * r_coeff + T(2)*p1*x*y + p2*(r2 + T(2)*x*x);
T yd = y * r_coeff + T(2)*p2*x*y + p1*(r2 + T(2)*y*y);
// Apply focal length and principal point to get the final image coordinates.
*image_x = focal_length_x * xd + principal_point_x;
*image_y = focal_length_y * yd + principal_point_y;
}
} // namespace libmv
#endif // LIBMV_SIMPLE_PIPELINE_CAMERA_INTRINSICS_H_

@ -18,6 +18,8 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/simple_pipeline/intersect.h"
#include "libmv/base/vector.h"
#include "libmv/logging/logging.h"
#include "libmv/multiview/projection.h"
@ -26,39 +28,41 @@
#include "libmv/multiview/projection.h"
#include "libmv/numeric/numeric.h"
#include "libmv/numeric/levenberg_marquardt.h"
#include "libmv/simple_pipeline/intersect.h"
#include "libmv/simple_pipeline/reconstruction.h"
#include "libmv/simple_pipeline/tracks.h"
#include "ceres/ceres.h"
namespace libmv {
namespace {
struct EuclideanIntersectCostFunction {
class EuclideanIntersectCostFunctor {
public:
typedef Vec FMatrixType;
typedef Vec3 XMatrixType;
EuclideanIntersectCostFunctor(const Marker &marker,
const EuclideanCamera &camera)
: marker_(marker), camera_(camera) {}
EuclideanIntersectCostFunction(const vector<Marker> &markers,
const EuclideanReconstruction &reconstruction)
: markers(markers),
reconstruction(reconstruction) {}
template<typename T>
bool operator()(const T *X, T *residuals) const {
typedef Eigen::Matrix<T, 3, 3> Mat3;
typedef Eigen::Matrix<T, 3, 1> Vec3;
Vec operator()(const Vec3 &X) const {
Vec residuals(2 * markers.size());
residuals.setZero();
for (int i = 0; i < markers.size(); ++i) {
const EuclideanCamera &camera =
*reconstruction.CameraForImage(markers[i].image);
Vec3 projected = camera.R * X + camera.t;
projected /= projected(2);
residuals[2*i + 0] = projected(0) - markers[i].x;
residuals[2*i + 1] = projected(1) - markers[i].y;
}
return residuals;
Vec3 x(X);
Mat3 R(camera_.R.cast<T>());
Vec3 t(camera_.t.cast<T>());
Vec3 projected = R * x + t;
projected /= projected(2);
residuals[0] = projected(0) - T(marker_.x);
residuals[1] = projected(1) - T(marker_.y);
return true;
}
const vector<Marker> &markers;
const EuclideanReconstruction &reconstruction;
const Marker &marker_;
const EuclideanCamera &camera_;
};
} // namespace
@ -95,13 +99,35 @@ bool EuclideanIntersect(const vector<Marker> &markers,
Xp /= Xp(3);
Vec3 X = Xp.head<3>();
typedef LevenbergMarquardt<EuclideanIntersectCostFunction> Solver;
ceres::Problem problem;
EuclideanIntersectCostFunction triangulate_cost(markers, *reconstruction);
Solver::SolverParameters params;
Solver solver(triangulate_cost);
for (int i = 0; i < markers.size(); ++i) {
const Marker &marker = markers[i];
const EuclideanCamera &camera =
*reconstruction->CameraForImage(marker.image);
Solver::Results results = solver.minimize(params, &X);
problem.AddResidualBlock(
new ceres::AutoDiffCostFunction<
EuclideanIntersectCostFunctor,
2, /* num_residuals */
3>(new EuclideanIntersectCostFunctor(marker, camera)),
NULL,
&X(0));
}
// Configure the solve.
ceres::Solver::Options solver_options;
solver_options.linear_solver_type = ceres::DENSE_NORMAL_CHOLESKY;
solver_options.max_num_iterations = 50;
solver_options.update_state_every_iteration = true;
solver_options.parameter_tolerance = 1e-16;
solver_options.function_tolerance = 1e-16;
// Run the solve.
ceres::Solver::Summary summary;
ceres::Solve(solver_options, &problem, &summary);
VLOG(1) << "Summary:\n" << summary.FullReport();
// Try projecting the point; make sure it's in front of everyone.
for (int i = 0; i < cameras.size(); ++i) {

@ -18,11 +18,14 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/simple_pipeline/modal_solver.h"
#include <cstdio>
#include "ceres/ceres.h"
#include "ceres/rotation.h"
#include "libmv/logging/logging.h"
#include "libmv/simple_pipeline/modal_solver.h"
#include "libmv/simple_pipeline/rigid_registration.h"
#include "libmv/multiview/panography.h"
#ifdef _MSC_VER
# define snprintf _snprintf
@ -30,7 +33,8 @@
namespace libmv {
static void ProjectMarkerOnSphere(Marker &marker, Vec3 &X) {
namespace {
void ProjectMarkerOnSphere(Marker &marker, Vec3 &X) {
X(0) = marker.x;
X(1) = marker.y;
X(2) = 1.0;
@ -38,18 +42,60 @@ static void ProjectMarkerOnSphere(Marker &marker, Vec3 &X) {
X *= 5.0 / X.norm();
}
static void ModalSolverLogProress(ProgressUpdateCallback *update_callback,
double progress)
void ModalSolverLogProress(ProgressUpdateCallback *update_callback,
double progress)
{
if (update_callback) {
char message[256];
snprintf(message, sizeof(message), "Solving progress %d%%", (int)(progress * 100));
snprintf(message, sizeof(message), "Solving progress %d%%",
(int)(progress * 100));
update_callback->invoke(progress, message);
}
}
struct ModalReprojectionError {
ModalReprojectionError(double observed_x, double observed_y, Vec3 &bundle)
: observed_x(observed_x), observed_y(observed_y), bundle(bundle) { }
template <typename T>
bool operator()(const T* quaternion, // Rotation quaternion
T* residuals) const {
T R[9];
ceres::QuaternionToRotation(quaternion, R);
// Convert bundle position from double to T.
T X[3];
X[0] = T(bundle(0));
X[1] = T(bundle(1));
X[2] = T(bundle(2));
// Compute projective coordinates: x = RX.
T x[3];
x[0] = R[0]*X[0] + R[3]*X[1] + R[6]*X[2];
x[1] = R[1]*X[0] + R[4]*X[1] + R[7]*X[2];
x[2] = R[2]*X[0] + R[5]*X[1] + R[8]*X[2];
// Compute normalized coordinates: x /= x[2].
T xn = x[0] / x[2];
T yn = x[1] / x[2];
// The error is the difference between reprojected
// and observed marker position.
residuals[0] = xn - T(observed_x);
residuals[1] = yn - T(observed_y);
return true;
}
double observed_x;
double observed_y;
Vec3 bundle;
};
} // namespace
void ModalSolver(Tracks &tracks,
EuclideanReconstruction *reconstruction,
ProgressUpdateCallback *update_callback) {
@ -59,58 +105,132 @@ void ModalSolver(Tracks &tracks,
LG << "Max image: " << max_image;
LG << "Max track: " << max_track;
Mat3 R = Mat3::Identity();
// For minimization we're using quaternions.
Vec3 zero_rotation = Vec3::Zero();
Vec4 quaternion;
ceres::AngleAxisToQuaternion(&zero_rotation(0), &quaternion(0));
for (int image = 0; image <= max_image; ++image) {
vector<Marker> all_markers = tracks.MarkersInImage(image);
ModalSolverLogProress(update_callback, (float) image / max_image);
// Skip empty frames without doing anything
// Skip empty images without doing anything.
if (all_markers.size() == 0) {
LG << "Skipping frame: " << image;
LG << "Skipping image: " << image;
continue;
}
vector<Vec3> points, reference_points;
// Cnstruct pairs of markers from current and previous image,
// to reproject them and find rigid transformation between
// previous and current image
for (int track = 0; track <= max_track; ++track) {
EuclideanPoint *point = reconstruction->PointForTrack(track);
// STEP 1: Estimate rotation analytically.
Mat3 current_R;
ceres::QuaternionToRotation(&quaternion(0), &current_R(0, 0));
// Construct point cloud for current and previous images,
// using markers appear at current image for which we know
// 3D positions.
Mat x1, x2;
for (int i = 0; i < all_markers.size(); ++i) {
Marker &marker = all_markers[i];
EuclideanPoint *point = reconstruction->PointForTrack(marker.track);
if (point) {
Marker marker = tracks.MarkerInImageForTrack(image, track);
Vec3 X;
ProjectMarkerOnSphere(marker, X);
if (marker.image == image) {
Vec3 X;
int last_column = x1.cols();
x1.conservativeResize(3, last_column + 1);
x2.conservativeResize(3, last_column + 1);
LG << "Use track " << track << " for rigid registration between image " <<
image - 1 << " and " << image;
ProjectMarkerOnSphere(marker, X);
points.push_back(point->X);
reference_points.push_back(X);
}
x1.col(last_column) = current_R * point->X;
x2.col(last_column) = X;
}
}
if (points.size()) {
// Find rigid delta transformation to current image
RigidRegistration(reference_points, points, R);
if (x1.cols() >= 2) {
Mat3 delta_R;
// Compute delta rotation matrix for two point clouds.
// Could be a bit confusing at first glance, but order
// of clouds is indeed so.
GetR_FixedCameraCenter(x2, x1, 1.0, &delta_R);
// Convert delta rotation form matrix to final image
// rotation stored in a quaternion
Vec3 delta_angle_axis;
ceres::RotationMatrixToAngleAxis(&delta_R(0, 0), &delta_angle_axis(0));
Vec3 current_angle_axis;
ceres::QuaternionToAngleAxis(&quaternion(0), &current_angle_axis(0));
Vec3 angle_axis = current_angle_axis + delta_angle_axis;
ceres::AngleAxisToQuaternion(&angle_axis(0), &quaternion(0));
LG << "Analytically computed quaternion "
<< quaternion.transpose();
}
// STEP 2: Refine rotation with Ceres.
ceres::Problem problem;
ceres::LocalParameterization* quaternion_parameterization =
new ceres::QuaternionParameterization;
int num_residuals = 0;
for (int i = 0; i < all_markers.size(); ++i) {
Marker &marker = all_markers[i];
EuclideanPoint *point = reconstruction->PointForTrack(marker.track);
if (point) {
problem.AddResidualBlock(new ceres::AutoDiffCostFunction<
ModalReprojectionError,
2, /* num_residuals */
4>(new ModalReprojectionError(marker.x, marker.y,
point->X)),
NULL,
&quaternion(0));
num_residuals++;
problem.SetParameterization(&quaternion(0),
quaternion_parameterization);
}
}
LG << "Number of residuals: " << num_residuals;
if (num_residuals) {
// Configure the solve.
ceres::Solver::Options solver_options;
solver_options.linear_solver_type = ceres::DENSE_NORMAL_CHOLESKY;
solver_options.max_num_iterations = 50;
solver_options.update_state_every_iteration = true;
solver_options.gradient_tolerance = 1e-36;
solver_options.parameter_tolerance = 1e-36;
solver_options.function_tolerance = 1e-36;
// Run the solve.
ceres::Solver::Summary summary;
ceres::Solve(solver_options, &problem, &summary);
LG << "Summary:\n" << summary.FullReport();
LG << "Refined quaternion " << quaternion.transpose();
}
// Convert quaternion to rotation matrix.
Mat3 R;
ceres::QuaternionToRotation(&quaternion(0), &R(0, 0));
reconstruction->InsertCamera(image, R, Vec3::Zero());
// Review if there's new tracks for which position might be reconstructed
// STEP 3: reproject all new markers appeared at image
// Check if there're new markers appeared on current image
// and reproject them on sphere to obtain 3D position/
for (int track = 0; track <= max_track; ++track) {
if (!reconstruction->PointForTrack(track)) {
Marker marker = tracks.MarkerInImageForTrack(image, track);
if (marker.image == image) {
// New track appeared on this image, project it's position onto sphere
// New track appeared on this image,
// project it's position onto sphere.
LG << "Projecting track " << track << " at image " << image;

@ -300,7 +300,7 @@ double InternalReprojectionError(const Tracks &image_tracks,
ex,
ey,
sqrt(ex*ex + ey*ey));
LG << line;
VLOG(1) << line;
total_error += sqrt(ex*ex + ey*ey);
}

@ -1,182 +0,0 @@
// Copyright (c) 2012 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/simple_pipeline/rigid_registration.h"
#include "libmv/numeric/levenberg_marquardt.h"
namespace libmv {
template<class RigidTransformation>
struct RigidRegistrationCostFunction {
public:
typedef Vec FMatrixType;
typedef RigidTransformation XMatrixType;
RigidRegistrationCostFunction(const vector<Vec3> &reference_points,
const vector<Vec3> &points):
reference_points_(reference_points),
points_(points) {}
Vec CalculateResiduals(const Mat3 &R,
const Vec3 &S,
const Vec3 &t) const {
Vec residuals(points_.size());
residuals.setZero();
// Convert scale vector to matrix
Mat3 SMat = Mat3::Identity();
SMat(0, 0) *= S(0);
SMat(1, 1) *= S(1);
SMat(2, 2) *= S(2);
for (int i = 0; i < points_.size(); i++) {
Vec3 transformed_point = R * SMat * points_[i] + t;
double norm = (transformed_point - reference_points_[i]).norm();
residuals(i) = norm * norm;
}
return residuals;
}
Vec operator()(const Vec9 &RSt) const {
Mat3 R = RotationFromEulerVector(RSt.head<3>());
Vec3 S = RSt.segment<3>(3);
Vec3 t = RSt.tail<3>();
return CalculateResiduals(R, S, t);
}
Vec operator()(const Vec3 &euler) const {
Mat3 R = RotationFromEulerVector(euler);
return CalculateResiduals(R, Vec3(1.0, 1.0, 1.0), Vec3::Zero());
}
Vec operator()(const Vec6 &Rt) const {
Mat3 R = RotationFromEulerVector(Rt.head<3>());
Vec3 t = Rt.tail<3>();
return CalculateResiduals(R, Vec3(1.0, 1.0, 1.0), t);
}
private:
vector<Vec3> reference_points_;
vector<Vec3> points_;
};
static double RigidRegistrationError(const vector<Vec3> &reference_points,
const vector<Vec3> &points,
const Mat3 &R,
const Vec3 &S,
const Vec3 &t) {
double error = 0.0;
Mat3 SMat = Mat3::Identity();
SMat(0, 0) *= S(0);
SMat(1, 1) *= S(1);
SMat(2, 2) *= S(2);
for (int i = 0; i < points.size(); i++) {
Vec3 new_point = R * SMat * points[i] + t;
double norm = (new_point - reference_points[i]).norm();
error += norm * norm;
}
error /= points.size();
return error;
}
double RigidRegistration(const vector<Vec3> &reference_points,
const vector<Vec3> &points,
Mat3 &R,
Vec3 &S,
Vec3 &t) {
typedef LevenbergMarquardt<RigidRegistrationCostFunction <Vec9> > Solver;
RigidRegistrationCostFunction<Vec9> rigidregistration_cost(reference_points, points);
Solver solver(rigidregistration_cost);
Vec9 RSt = Vec9::Zero();
RSt(3) = RSt(4) = RSt(5) = 1.0;
Solver::SolverParameters params;
/*Solver::Results results = */ solver.minimize(params, &RSt);
/* TODO(sergey): better error handling here */
LG << "Rigid registration completed, rotation is:" << RSt.head<3>().transpose()
<< ", scale is " << RSt.segment<3>(3).transpose()
<< ", translation is " << RSt.tail<3>().transpose();
// Decompose individual rotation and translation
R = RotationFromEulerVector(RSt.head<3>());
S = RSt.segment<3>(3);
t = RSt.tail<3>();
return RigidRegistrationError(reference_points, points, R, S, t);
}
double RigidRegistration(const vector<Vec3> &reference_points,
const vector<Vec3> &points,
Mat3 &R,
Vec3 &t) {
typedef LevenbergMarquardt<RigidRegistrationCostFunction <Vec6> > Solver;
RigidRegistrationCostFunction<Vec6> rigidregistration_cost(reference_points, points);
Solver solver(rigidregistration_cost);
Vec6 Rt = Vec6::Zero();
Solver::SolverParameters params;
/*Solver::Results results = */solver.minimize(params, &Rt);
/* TODO(sergey): better error handling here */
LG << "Rigid registration completed, rotation is:" << Rt.head<3>().transpose()
<< ", translation is " << Rt.tail<3>().transpose();
R = RotationFromEulerVector(Rt.head<3>());
t = Rt.tail<3>();
return RigidRegistrationError(reference_points, points, R, Vec3(1.0, 1.0, 1.0), t);
}
double RigidRegistration(const vector<Vec3> &reference_points,
const vector<Vec3> &points,
Mat3 &R) {
typedef LevenbergMarquardt<RigidRegistrationCostFunction <Vec3> > Solver;
RigidRegistrationCostFunction<Vec3> rigidregistration_cost(reference_points, points);
Solver solver(rigidregistration_cost);
Vec3 euler = Vec3::Zero();
Solver::SolverParameters params;
/*Solver::Results results = */solver.minimize(params, &euler);
/* TODO(sergey): better error handling here */
LG << "Rigid registration completed, rotation is:" << euler.transpose();
R = RotationFromEulerVector(euler);
return RigidRegistrationError(reference_points, points, R, Vec3(1.0, 1.0, 1.0), Vec3::Zero());
}
} // namespace libmv

@ -1,68 +0,0 @@
// Copyright (c) 2012 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_SIMPLE_PIPELINE_RIGID_REGISTRATION_H_
#define LIBMV_SIMPLE_PIPELINE_RIGID_REGISTRATION_H_
#include "libmv/base/vector.h"
#include "libmv/numeric/numeric.h"
namespace libmv {
/*!
Searched for an affine transformation of rigid 3D object defined by it's
vertices positions from it's current state called points to it's desired
state called reference points.
Returns rotation matrix, per-component scale vector and translation which
transforms points to the mot close state to reference_points.
Return value is an average squared distance between reference state
and transformed one.
Minimzation of distance between point pairs is used to register such a
rigid transformation and algorithm assumes that pairs of points are
defined by point's index in a vector, so points with the same index
belongs to the same pair.
*/
double RigidRegistration(const vector<Vec3> &reference_points,
const vector<Vec3> &points,
Mat3 &R,
Vec3 &S,
Vec3 &t);
/*!
* Same as RigidRegistration but provides registration of rotation and translation only
*/
double RigidRegistration(const vector<Vec3> &reference_points,
const vector<Vec3> &points,
Mat3 &R,
Vec3 &t);
/*!
* Same as RigidRegistration but provides registration of rotation only
*/
double RigidRegistration(const vector<Vec3> &reference_points,
const vector<Vec3> &points,
Mat3 &R);
} // namespace libmv
#endif // LIBMV_SIMPLE_PIPELINE_RIGID_REGISTRATION_H_

@ -1 +0,0 @@
v3d_verbosity.patch

@ -1,12 +0,0 @@
diff --git a/src/libmv/simple_pipeline/bundle.cc b/src/libmv/simple_pipeline/bundle.cc
index fa0b6cc..d382cd5 100644
--- a/src/libmv/simple_pipeline/bundle.cc
+++ b/src/libmv/simple_pipeline/bundle.cc
@@ -194,7 +194,6 @@ void EuclideanBundleCommonIntrinsics(const Tracks &tracks,
double v3d_inlier_threshold = 500000.0;
// Finally, run the bundle adjustment.
- V3D::optimizerVerbosenessLevel = 1;
V3D::CommonInternalsMetricBundleOptimizer opt(v3d_bundle_intrinsics,
v3d_inlier_threshold,
v3d_K,

@ -54,6 +54,7 @@ set(SRC
internal/ceres/compressed_row_sparse_matrix.cc
internal/ceres/conditioned_cost_function.cc
internal/ceres/conjugate_gradients_solver.cc
internal/ceres/coordinate_descent_minimizer.cc
internal/ceres/corrector.cc
internal/ceres/cxsparse.cc
internal/ceres/dense_normal_cholesky_solver.cc
@ -71,11 +72,18 @@ set(SRC
internal/ceres/linear_least_squares_problems.cc
internal/ceres/linear_operator.cc
internal/ceres/linear_solver.cc
internal/ceres/line_search.cc
internal/ceres/line_search_direction.cc
internal/ceres/line_search_minimizer.cc
internal/ceres/local_parameterization.cc
internal/ceres/loss_function.cc
internal/ceres/low_rank_inverse_hessian.cc
internal/ceres/minimizer.cc
internal/ceres/normal_prior.cc
internal/ceres/parameter_block_ordering.cc
internal/ceres/partitioned_matrix_view.cc
internal/ceres/polynomial_solver.cc
internal/ceres/polynomial.cc
internal/ceres/preconditioner.cc
internal/ceres/problem.cc
internal/ceres/problem_impl.cc
internal/ceres/program.cc
@ -84,7 +92,7 @@ set(SRC
internal/ceres/runtime_numeric_diff_cost_function.cc
internal/ceres/schur_complement_solver.cc
internal/ceres/schur_eliminator.cc
internal/ceres/schur_ordering.cc
internal/ceres/schur_jacobi_preconditioner.cc
internal/ceres/scratch_evaluate_preparer.cc
internal/ceres/solver.cc
internal/ceres/solver_impl.cc
@ -99,26 +107,34 @@ set(SRC
internal/ceres/types.cc
internal/ceres/visibility_based_preconditioner.cc
internal/ceres/visibility.cc
internal/ceres/wall_time.cc
include/ceres/autodiff_cost_function.h
include/ceres/ceres.h
include/ceres/conditioned_cost_function.h
include/ceres/cost_function.h
include/ceres/cost_function_to_functor.h
include/ceres/crs_matrix.h
include/ceres/dynamic_autodiff_cost_function.h
include/ceres/fpclassify.h
include/ceres/gradient_checker.h
include/ceres/internal/autodiff.h
include/ceres/internal/eigen.h
include/ceres/internal/fixed_array.h
include/ceres/internal/macros.h
include/ceres/internal/manual_constructor.h
include/ceres/internal/numeric_diff.h
include/ceres/internal/port.h
include/ceres/internal/scoped_ptr.h
include/ceres/internal/variadic_evaluate.h
include/ceres/iteration_callback.h
include/ceres/jet.h
include/ceres/local_parameterization.h
include/ceres/loss_function.h
include/ceres/normal_prior.h
include/ceres/numeric_diff_cost_function.h
include/ceres/numeric_diff_functor.h
include/ceres/ordered_groups.h
include/ceres/problem.h
include/ceres/rotation.h
include/ceres/sized_cost_function.h
@ -141,6 +157,7 @@ set(SRC
internal/ceres/compressed_row_jacobian_writer.h
internal/ceres/compressed_row_sparse_matrix.h
internal/ceres/conjugate_gradients_solver.h
internal/ceres/coordinate_descent_minimizer.h
internal/ceres/corrector.h
internal/ceres/cxsparse.h
internal/ceres/dense_jacobian_writer.h
@ -150,6 +167,7 @@ set(SRC
internal/ceres/detect_structure.h
internal/ceres/dogleg_strategy.h
internal/ceres/evaluator.h
internal/ceres/execution_summary.h
internal/ceres/file.h
internal/ceres/gradient_checking_cost_function.h
internal/ceres/graph_algorithms.h
@ -161,13 +179,19 @@ set(SRC
internal/ceres/linear_least_squares_problems.h
internal/ceres/linear_operator.h
internal/ceres/linear_solver.h
internal/ceres/line_search_direction.h
internal/ceres/line_search.h
internal/ceres/line_search_minimizer.h
internal/ceres/low_rank_inverse_hessian.h
internal/ceres/map_util.h
internal/ceres/matrix_proto.h
internal/ceres/minimizer.h
internal/ceres/mutex.h
internal/ceres/parameter_block.h
internal/ceres/parameter_block_ordering.h
internal/ceres/partitioned_matrix_view.h
internal/ceres/polynomial_solver.h
internal/ceres/polynomial.h
internal/ceres/preconditioner.h
internal/ceres/problem_impl.h
internal/ceres/program_evaluator.h
internal/ceres/program.h
@ -178,7 +202,7 @@ set(SRC
internal/ceres/schur_complement_solver.h
internal/ceres/schur_eliminator.h
internal/ceres/schur_eliminator_impl.h
internal/ceres/schur_ordering.h
internal/ceres/schur_jacobi_preconditioner.h
internal/ceres/scratch_evaluate_preparer.h
internal/ceres/solver_impl.h
internal/ceres/sparse_matrix.h
@ -192,6 +216,7 @@ set(SRC
internal/ceres/trust_region_strategy.h
internal/ceres/visibility_based_preconditioner.h
internal/ceres/visibility.h
internal/ceres/wall_time.h
)
#if(FALSE)
@ -236,8 +261,15 @@ add_definitions(
-DCERES_NO_CXSPARSE
-DCERES_NO_PROTOCOL_BUFFERS
-DCERES_RESTRICT_SCHUR_SPECIALIZATION
-DCERES_HAVE_RWLOCK
)
if(WITH_OPENMP)
add_definitions(
-DCERES_USE_OPENMP
)
endif()
if(MSVC10)
add_definitions(
-D"CERES_HASH_NAMESPACE_START=namespace std {"

File diff suppressed because it is too large Load Diff

@ -23,6 +23,10 @@ defs.append('CERES_NO_SUITESPARSE')
defs.append('CERES_NO_CXSPARSE')
defs.append('CERES_NO_PROTOCOL_BUFFERS')
defs.append('CERES_RESTRICT_SCHUR_SPECIALIZATION')
defs.append('CERES_HAVE_RWLOCK')
if env['WITH_BF_OPENMP']:
defs.append('CERES_USE_OPENMP')
if 'Mac OS X 10.5' in env['MACOSX_SDK_CHECK']:
defs.append('CERES_NO_TR1')

@ -9,7 +9,8 @@ fi
repo="https://ceres-solver.googlesource.com/ceres-solver"
branch="master"
tag="1.3.0"
#tag="1.4.0"
tag=""
tmp=`mktemp -d`
checkout="$tmp/ceres"
@ -120,7 +121,7 @@ set(INC
include
internal
../gflags
../..
../../
)
set(INC_SYS
@ -161,8 +162,15 @@ add_definitions(
-DCERES_NO_CXSPARSE
-DCERES_NO_PROTOCOL_BUFFERS
-DCERES_RESTRICT_SCHUR_SPECIALIZATION
-DCERES_HAVE_RWLOCK
)
if(WITH_OPENMP)
add_definitions(
-DCERES_USE_OPENMP
)
endif()
if(MSVC10)
add_definitions(
-D"CERES_HASH_NAMESPACE_START=namespace std {"
@ -212,6 +220,10 @@ defs.append('CERES_NO_SUITESPARSE')
defs.append('CERES_NO_CXSPARSE')
defs.append('CERES_NO_PROTOCOL_BUFFERS')
defs.append('CERES_RESTRICT_SCHUR_SPECIALIZATION')
defs.append('CERES_HAVE_RWLOCK')
if env['WITH_BF_OPENMP']:
defs.append('CERES_USE_OPENMP')
if 'Mac OS X 10.5' in env['MACOSX_SDK_CHECK']:
defs.append('CERES_NO_TR1')

@ -2,21 +2,28 @@ include/ceres/autodiff_cost_function.h
include/ceres/ceres.h
include/ceres/conditioned_cost_function.h
include/ceres/cost_function.h
include/ceres/cost_function_to_functor.h
include/ceres/crs_matrix.h
include/ceres/dynamic_autodiff_cost_function.h
include/ceres/fpclassify.h
include/ceres/gradient_checker.h
include/ceres/internal/autodiff.h
include/ceres/internal/eigen.h
include/ceres/internal/fixed_array.h
include/ceres/internal/macros.h
include/ceres/internal/manual_constructor.h
include/ceres/internal/numeric_diff.h
include/ceres/internal/port.h
include/ceres/internal/scoped_ptr.h
include/ceres/internal/variadic_evaluate.h
include/ceres/iteration_callback.h
include/ceres/jet.h
include/ceres/local_parameterization.h
include/ceres/loss_function.h
include/ceres/normal_prior.h
include/ceres/numeric_diff_cost_function.h
include/ceres/numeric_diff_functor.h
include/ceres/ordered_groups.h
include/ceres/problem.h
include/ceres/rotation.h
include/ceres/sized_cost_function.h
@ -54,6 +61,8 @@ internal/ceres/compressed_row_sparse_matrix.h
internal/ceres/conditioned_cost_function.cc
internal/ceres/conjugate_gradients_solver.cc
internal/ceres/conjugate_gradients_solver.h
internal/ceres/coordinate_descent_minimizer.cc
internal/ceres/coordinate_descent_minimizer.h
internal/ceres/corrector.cc
internal/ceres/corrector.h
internal/ceres/cxsparse.cc
@ -71,6 +80,7 @@ internal/ceres/dogleg_strategy.cc
internal/ceres/dogleg_strategy.h
internal/ceres/evaluator.cc
internal/ceres/evaluator.h
internal/ceres/execution_summary.h
internal/ceres/file.cc
internal/ceres/file.h
internal/ceres/generated/schur_eliminator_2_2_2.cc
@ -107,18 +117,31 @@ internal/ceres/linear_operator.cc
internal/ceres/linear_operator.h
internal/ceres/linear_solver.cc
internal/ceres/linear_solver.h
internal/ceres/line_search.cc
internal/ceres/line_search_direction.cc
internal/ceres/line_search_direction.h
internal/ceres/line_search.h
internal/ceres/line_search_minimizer.cc
internal/ceres/line_search_minimizer.h
internal/ceres/local_parameterization.cc
internal/ceres/loss_function.cc
internal/ceres/low_rank_inverse_hessian.cc
internal/ceres/low_rank_inverse_hessian.h
internal/ceres/map_util.h
internal/ceres/matrix_proto.h
internal/ceres/minimizer.cc
internal/ceres/minimizer.h
internal/ceres/mutex.h
internal/ceres/normal_prior.cc
internal/ceres/parameter_block.h
internal/ceres/parameter_block_ordering.cc
internal/ceres/parameter_block_ordering.h
internal/ceres/partitioned_matrix_view.cc
internal/ceres/partitioned_matrix_view.h
internal/ceres/polynomial_solver.cc
internal/ceres/polynomial_solver.h
internal/ceres/polynomial.cc
internal/ceres/polynomial.h
internal/ceres/preconditioner.cc
internal/ceres/preconditioner.h
internal/ceres/problem.cc
internal/ceres/problem_impl.cc
internal/ceres/problem_impl.h
@ -137,8 +160,8 @@ internal/ceres/schur_complement_solver.h
internal/ceres/schur_eliminator.cc
internal/ceres/schur_eliminator.h
internal/ceres/schur_eliminator_impl.h
internal/ceres/schur_ordering.cc
internal/ceres/schur_ordering.h
internal/ceres/schur_jacobi_preconditioner.cc
internal/ceres/schur_jacobi_preconditioner.h
internal/ceres/scratch_evaluate_preparer.cc
internal/ceres/scratch_evaluate_preparer.h
internal/ceres/solver.cc
@ -166,3 +189,5 @@ internal/ceres/visibility_based_preconditioner.cc
internal/ceres/visibility_based_preconditioner.h
internal/ceres/visibility.cc
internal/ceres/visibility.h
internal/ceres/wall_time.cc
internal/ceres/wall_time.h

@ -28,10 +28,10 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
//
// Helpers for making CostFunctions as needed by the least squares framework,
// with Jacobians computed via automatic differentiation. For more information
// on automatic differentation, see the wikipedia article at
// http://en.wikipedia.org/wiki/Automatic_differentiation
// Create CostFunctions as needed by the least squares framework, with
// Jacobians computed via automatic differentiation. For more
// information on automatic differentation, see the wikipedia article
// at http://en.wikipedia.org/wiki/Automatic_differentiation
//
// To get an auto differentiated cost function, you must define a class with a
// templated operator() (a functor) that computes the cost function in terms of
@ -57,8 +57,8 @@
// To write an auto-differentiable cost function for the above model, first
// define the object
//
// class MyScalarCostFunction {
// MyScalarCostFunction(double k): k_(k) {}
// class MyScalarCostFunctor {
// MyScalarCostFunctor(double k): k_(k) {}
//
// template <typename T>
// bool operator()(const T* const x , const T* const y, T* e) const {
@ -80,32 +80,32 @@
// it can be constructed as follows.
//
// CostFunction* cost_function
// = new AutoDiffCostFunction<MyScalarCostFunction, 1, 2, 2>(
// new MyScalarCostFunction(1.0)); ^ ^ ^
// | | |
// Dimension of residual ------+ | |
// Dimension of x ----------------+ |
// Dimension of y -------------------+
// = new AutoDiffCostFunction<MyScalarCostFunctor, 1, 2, 2>(
// new MyScalarCostFunctor(1.0)); ^ ^ ^
// | | |
// Dimension of residual -----+ | |
// Dimension of x ---------------+ |
// Dimension of y ------------------+
//
// In this example, there is usually an instance for each measumerent of k.
//
// In the instantiation above, the template parameters following
// "MyScalarCostFunction", "1, 2, 2", describe the functor as computing a
// "MyScalarCostFunctor", "1, 2, 2", describe the functor as computing a
// 1-dimensional output from two arguments, both 2-dimensional.
//
// The autodiff cost function also supports cost functions with a
// runtime-determined number of residuals. For example:
//
// CostFunction* cost_function
// = new AutoDiffCostFunction<MyScalarCostFunction, DYNAMIC, 2, 2>(
// new CostFunctionWithDynamicNumResiduals(1.0), ^ ^ ^
// runtime_number_of_residuals); <----+ | | |
// | | | |
// | | | |
// Actual number of residuals ------+ | | |
// Indicate dynamic number of residuals ---------+ | |
// Dimension of x -------------------------------------+ |
// Dimension of y ----------------------------------------+
// = new AutoDiffCostFunction<MyScalarCostFunctor, DYNAMIC, 2, 2>(
// new CostFunctorWithDynamicNumResiduals(1.0), ^ ^ ^
// runtime_number_of_residuals); <----+ | | |
// | | | |
// | | | |
// Actual number of residuals ------+ | | |
// Indicate dynamic number of residuals --------+ | |
// Dimension of x ------------------------------------+ |
// Dimension of y ---------------------------------------+
//
// The framework can currently accommodate cost functions of up to 6 independent
// variables, and there is no limit on the dimensionality of each of them.
@ -119,7 +119,7 @@
// functions is to get the sizing wrong. In particular, there is a tendency to
// set the template parameters to (dimension of residual, number of parameters)
// instead of passing a dimension parameter for *every parameter*. In the
// example above, that would be <MyScalarCostFunction, 1, 2>, which is missing
// example above, that would be <MyScalarCostFunctor, 1, 2>, which is missing
// the last '2' argument. Please be careful when setting the size parameters.
#ifndef CERES_PUBLIC_AUTODIFF_COST_FUNCTION_H_
@ -154,16 +154,21 @@ template <typename CostFunctor,
int N2 = 0, // Number of parameters in block 2.
int N3 = 0, // Number of parameters in block 3.
int N4 = 0, // Number of parameters in block 4.
int N5 = 0> // Number of parameters in block 5.
class AutoDiffCostFunction :
public SizedCostFunction<M, N0, N1, N2, N3, N4, N5> {
int N5 = 0, // Number of parameters in block 5.
int N6 = 0, // Number of parameters in block 6.
int N7 = 0, // Number of parameters in block 7.
int N8 = 0, // Number of parameters in block 8.
int N9 = 0> // Number of parameters in block 9.
class AutoDiffCostFunction : public SizedCostFunction<M,
N0, N1, N2, N3, N4,
N5, N6, N7, N8, N9> {
public:
// Takes ownership of functor. Uses the template-provided value for the
// number of residuals ("M").
explicit AutoDiffCostFunction(CostFunctor* functor)
: functor_(functor) {
CHECK_NE(M, DYNAMIC) << "Can't run the fixed-size constructor if the "
<< "number of residuals is set to ceres::DYNAMIC.";
<< "number of residuals is set to ceres::DYNAMIC.";
}
// Takes ownership of functor. Ignores the template-provided number of
@ -174,8 +179,9 @@ class AutoDiffCostFunction :
AutoDiffCostFunction(CostFunctor* functor, int num_residuals)
: functor_(functor) {
CHECK_EQ(M, DYNAMIC) << "Can't run the dynamic-size constructor if the "
<< "number of residuals is not ceres::DYNAMIC.";
SizedCostFunction<M, N0, N1, N2, N3, N4, N5>::set_num_residuals(num_residuals);
<< "number of residuals is not ceres::DYNAMIC.";
SizedCostFunction<M, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>
::set_num_residuals(num_residuals);
}
virtual ~AutoDiffCostFunction() {}
@ -190,14 +196,15 @@ class AutoDiffCostFunction :
double** jacobians) const {
if (!jacobians) {
return internal::VariadicEvaluate<
CostFunctor, double, N0, N1, N2, N3, N4, N5>
CostFunctor, double, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>
::Call(*functor_, parameters, residuals);
}
return internal::AutoDiff<CostFunctor, double,
N0, N1, N2, N3, N4, N5>::Differentiate(
N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>::Differentiate(
*functor_,
parameters,
SizedCostFunction<M, N0, N1, N2, N3, N4, N5>::num_residuals(),
SizedCostFunction<M, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>
::num_residuals(),
residuals,
jacobians);
}

@ -34,12 +34,20 @@
#ifndef CERES_PUBLIC_CERES_H_
#define CERES_PUBLIC_CERES_H_
#define CERES_VERSION 1.5.0
#define CERES_ABI_VERSION 1.5.0
#include "ceres/autodiff_cost_function.h"
#include "ceres/cost_function.h"
#include "ceres/cost_function_to_functor.h"
#include "ceres/crs_matrix.h"
#include "ceres/iteration_callback.h"
#include "ceres/jet.h"
#include "ceres/local_parameterization.h"
#include "ceres/loss_function.h"
#include "ceres/numeric_diff_cost_function.h"
#include "ceres/numeric_diff_functor.h"
#include "ceres/ordered_groups.h"
#include "ceres/problem.h"
#include "ceres/sized_cost_function.h"
#include "ceres/solver.h"

@ -0,0 +1,752 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
//
// CostFunctionToFunctor is an adapter class that allows users to use
// CostFunction objects in templated functors which are to be used for
// automatic differentiation. This allows the user to seamlessly mix
// analytic, numeric and automatic differentiation.
//
// For example, let us assume that
//
// class IntrinsicProjection : public SizedCostFunction<2, 5, 3> {
// public:
// IntrinsicProjection(const double* observations);
// virtual bool Evaluate(double const* const* parameters,
// double* residuals,
// double** jacobians) const;
// };
//
// is a cost function that implements the projection of a point in its
// local coordinate system onto its image plane and subtracts it from
// the observed point projection. It can compute its residual and
// either via analytic or numerical differentiation can compute its
// jacobians.
//
// Now we would like to compose the action of this CostFunction with
// the action of camera extrinsics, i.e., rotation and
// translation. Say we have a templated function
//
// template<typename T>
// void RotateAndTranslatePoint(const T* rotation,
// const T* translation,
// const T* point,
// T* result);
//
// Then we can now do the following,
//
// struct CameraProjection {
// CameraProjection(double* observation) {
// intrinsic_projection_.reset(
// new CostFunctionToFunctor<2, 5, 3>(
// new IntrinsicProjection(observation_)));
// }
// template <typename T>
// bool operator(const T* rotation,
// const T* translation,
// const T* intrinsics,
// const T* point,
// T* residual) const {
// T transformed_point[3];
// RotateAndTranslatePoint(rotation, translation, point, transformed_point);
//
// // Note that we call intrinsic_projection_, just like it was
// // any other templated functor.
//
// return (*intrinsic_projection_)(intrinsics, transformed_point, residual);
// }
//
// private:
// scoped_ptr<CostFunctionToFunctor<2,5,3> > intrinsic_projection_;
// };
#ifndef CERES_PUBLIC_COST_FUNCTION_TO_FUNCTOR_H_
#define CERES_PUBLIC_COST_FUNCTION_TO_FUNCTOR_H_
#include <numeric>
#include <vector>
#include "ceres/cost_function.h"
#include "ceres/internal/fixed_array.h"
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
namespace ceres {
template <int kNumResiduals,
int N0, int N1 = 0, int N2 = 0, int N3 = 0, int N4 = 0,
int N5 = 0, int N6 = 0, int N7 = 0, int N8 = 0, int N9 = 0>
class CostFunctionToFunctor {
public:
explicit CostFunctionToFunctor(CostFunction* cost_function)
: cost_function_(cost_function) {
CHECK_NOTNULL(cost_function);
CHECK_GE(kNumResiduals, 0);
CHECK_EQ(cost_function->num_residuals(), kNumResiduals);
// This block breaks the 80 column rule to keep it somewhat readable.
CHECK((!N1 && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && (N9 > 0)))
<< "Zero block cannot precede a non-zero block. Block sizes are "
<< "(ignore trailing 0s): " << N0 << ", " << N1 << ", " << N2 << ", "
<< N3 << ", " << N4 << ", " << N5 << ", " << N6 << ", " << N7 << ", "
<< N8 << ", " << N9;
const vector<int16>& parameter_block_sizes =
cost_function->parameter_block_sizes();
const int num_parameter_blocks =
(N0 > 0) + (N1 > 0) + (N2 > 0) + (N3 > 0) + (N4 > 0) +
(N5 > 0) + (N6 > 0) + (N7 > 0) + (N8 > 0) + (N9 > 0);
CHECK_EQ(parameter_block_sizes.size(), num_parameter_blocks);
CHECK_EQ(N0, parameter_block_sizes[0]);
if (parameter_block_sizes.size() > 1) CHECK_EQ(N1, parameter_block_sizes[1]); // NOLINT
if (parameter_block_sizes.size() > 2) CHECK_EQ(N2, parameter_block_sizes[2]); // NOLINT
if (parameter_block_sizes.size() > 3) CHECK_EQ(N3, parameter_block_sizes[3]); // NOLINT
if (parameter_block_sizes.size() > 4) CHECK_EQ(N4, parameter_block_sizes[4]); // NOLINT
if (parameter_block_sizes.size() > 5) CHECK_EQ(N5, parameter_block_sizes[5]); // NOLINT
if (parameter_block_sizes.size() > 6) CHECK_EQ(N6, parameter_block_sizes[6]); // NOLINT
if (parameter_block_sizes.size() > 7) CHECK_EQ(N7, parameter_block_sizes[7]); // NOLINT
if (parameter_block_sizes.size() > 8) CHECK_EQ(N8, parameter_block_sizes[8]); // NOLINT
if (parameter_block_sizes.size() > 9) CHECK_EQ(N9, parameter_block_sizes[9]); // NOLINT
CHECK_EQ(accumulate(parameter_block_sizes.begin(),
parameter_block_sizes.end(), 0),
N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8 + N9);
}
bool operator()(const double* x0, double* residuals) const {
CHECK_NE(N0, 0);
CHECK_EQ(N1, 0);
CHECK_EQ(N2, 0);
CHECK_EQ(N3, 0);
CHECK_EQ(N4, 0);
CHECK_EQ(N5, 0);
CHECK_EQ(N6, 0);
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
return cost_function_->Evaluate(&x0, residuals, NULL);
}
bool operator()(const double* x0,
const double* x1,
double* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_EQ(N2, 0);
CHECK_EQ(N3, 0);
CHECK_EQ(N4, 0);
CHECK_EQ(N5, 0);
CHECK_EQ(N6, 0);
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const double*> parameter_blocks(2);
parameter_blocks[0] = x0;
parameter_blocks[1] = x1;
return cost_function_->Evaluate(parameter_blocks.get(), residuals, NULL);
}
bool operator()(const double* x0,
const double* x1,
const double* x2,
double* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_EQ(N3, 0);
CHECK_EQ(N4, 0);
CHECK_EQ(N5, 0);
CHECK_EQ(N6, 0);
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const double*> parameter_blocks(3);
parameter_blocks[0] = x0;
parameter_blocks[1] = x1;
parameter_blocks[2] = x2;
return cost_function_->Evaluate(parameter_blocks.get(), residuals, NULL);
}
bool operator()(const double* x0,
const double* x1,
const double* x2,
const double* x3,
double* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_NE(N3, 0);
CHECK_EQ(N4, 0);
CHECK_EQ(N5, 0);
CHECK_EQ(N6, 0);
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const double*> parameter_blocks(4);
parameter_blocks[0] = x0;
parameter_blocks[1] = x1;
parameter_blocks[2] = x2;
parameter_blocks[3] = x3;
return cost_function_->Evaluate(parameter_blocks.get(), residuals, NULL);
}
bool operator()(const double* x0,
const double* x1,
const double* x2,
const double* x3,
const double* x4,
double* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_NE(N3, 0);
CHECK_NE(N4, 0);
CHECK_EQ(N5, 0);
CHECK_EQ(N6, 0);
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const double*> parameter_blocks(5);
parameter_blocks[0] = x0;
parameter_blocks[1] = x1;
parameter_blocks[2] = x2;
parameter_blocks[3] = x3;
parameter_blocks[4] = x4;
return cost_function_->Evaluate(parameter_blocks.get(), residuals, NULL);
}
bool operator()(const double* x0,
const double* x1,
const double* x2,
const double* x3,
const double* x4,
const double* x5,
double* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_NE(N3, 0);
CHECK_NE(N4, 0);
CHECK_NE(N5, 0);
CHECK_EQ(N6, 0);
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const double*> parameter_blocks(6);
parameter_blocks[0] = x0;
parameter_blocks[1] = x1;
parameter_blocks[2] = x2;
parameter_blocks[3] = x3;
parameter_blocks[4] = x4;
parameter_blocks[5] = x5;
return cost_function_->Evaluate(parameter_blocks.get(), residuals, NULL);
}
bool operator()(const double* x0,
const double* x1,
const double* x2,
const double* x3,
const double* x4,
const double* x5,
const double* x6,
double* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_NE(N3, 0);
CHECK_NE(N4, 0);
CHECK_NE(N5, 0);
CHECK_NE(N6, 0);
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const double*> parameter_blocks(7);
parameter_blocks[0] = x0;
parameter_blocks[1] = x1;
parameter_blocks[2] = x2;
parameter_blocks[3] = x3;
parameter_blocks[4] = x4;
parameter_blocks[5] = x5;
parameter_blocks[6] = x6;
return cost_function_->Evaluate(parameter_blocks.get(), residuals, NULL);
}
bool operator()(const double* x0,
const double* x1,
const double* x2,
const double* x3,
const double* x4,
const double* x5,
const double* x6,
const double* x7,
double* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_NE(N3, 0);
CHECK_NE(N4, 0);
CHECK_NE(N5, 0);
CHECK_NE(N6, 0);
CHECK_NE(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const double*> parameter_blocks(8);
parameter_blocks[0] = x0;
parameter_blocks[1] = x1;
parameter_blocks[2] = x2;
parameter_blocks[3] = x3;
parameter_blocks[4] = x4;
parameter_blocks[5] = x5;
parameter_blocks[6] = x6;
parameter_blocks[7] = x7;
return cost_function_->Evaluate(parameter_blocks.get(), residuals, NULL);
}
bool operator()(const double* x0,
const double* x1,
const double* x2,
const double* x3,
const double* x4,
const double* x5,
const double* x6,
const double* x7,
const double* x8,
double* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_NE(N3, 0);
CHECK_NE(N4, 0);
CHECK_NE(N5, 0);
CHECK_NE(N6, 0);
CHECK_NE(N7, 0);
CHECK_NE(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const double*> parameter_blocks(9);
parameter_blocks[0] = x0;
parameter_blocks[1] = x1;
parameter_blocks[2] = x2;
parameter_blocks[3] = x3;
parameter_blocks[4] = x4;
parameter_blocks[5] = x5;
parameter_blocks[6] = x6;
parameter_blocks[7] = x7;
parameter_blocks[8] = x8;
return cost_function_->Evaluate(parameter_blocks.get(), residuals, NULL);
}
bool operator()(const double* x0,
const double* x1,
const double* x2,
const double* x3,
const double* x4,
const double* x5,
const double* x6,
const double* x7,
const double* x8,
const double* x9,
double* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_NE(N3, 0);
CHECK_NE(N4, 0);
CHECK_NE(N5, 0);
CHECK_NE(N6, 0);
CHECK_NE(N7, 0);
CHECK_NE(N8, 0);
CHECK_NE(N9, 0);
internal::FixedArray<const double*> parameter_blocks(10);
parameter_blocks[0] = x0;
parameter_blocks[1] = x1;
parameter_blocks[2] = x2;
parameter_blocks[3] = x3;
parameter_blocks[4] = x4;
parameter_blocks[5] = x5;
parameter_blocks[6] = x6;
parameter_blocks[7] = x7;
parameter_blocks[8] = x8;
parameter_blocks[9] = x9;
return cost_function_->Evaluate(parameter_blocks.get(), residuals, NULL);
}
template <typename JetT>
bool operator()(const JetT* x0, JetT* residuals) const {
CHECK_NE(N0, 0);
CHECK_EQ(N1, 0);
CHECK_EQ(N2, 0);
CHECK_EQ(N3, 0);
CHECK_EQ(N4, 0);
CHECK_EQ(N5, 0);
CHECK_EQ(N6, 0);
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
return EvaluateWithJets(&x0, residuals);
}
template <typename JetT>
bool operator()(const JetT* x0,
const JetT* x1,
JetT* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_EQ(N2, 0);
CHECK_EQ(N3, 0);
CHECK_EQ(N4, 0);
CHECK_EQ(N5, 0);
CHECK_EQ(N6, 0);
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const JetT*> jets(2);
jets[0] = x0;
jets[1] = x1;
return EvaluateWithJets(jets.get(), residuals);
}
template <typename JetT>
bool operator()(const JetT* x0,
const JetT* x1,
const JetT* x2,
JetT* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_EQ(N3, 0);
CHECK_EQ(N4, 0);
CHECK_EQ(N5, 0);
CHECK_EQ(N6, 0);
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const JetT*> jets(3);
jets[0] = x0;
jets[1] = x1;
jets[2] = x2;
return EvaluateWithJets(jets.get(), residuals);
}
template <typename JetT>
bool operator()(const JetT* x0,
const JetT* x1,
const JetT* x2,
const JetT* x3,
JetT* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_NE(N3, 0);
CHECK_EQ(N4, 0);
CHECK_EQ(N5, 0);
CHECK_EQ(N6, 0);
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const JetT*> jets(4);
jets[0] = x0;
jets[1] = x1;
jets[2] = x2;
jets[3] = x3;
return EvaluateWithJets(jets.get(), residuals);
}
template <typename JetT>
bool operator()(const JetT* x0,
const JetT* x1,
const JetT* x2,
const JetT* x3,
const JetT* x4,
JetT* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_NE(N3, 0);
CHECK_NE(N4, 0);
CHECK_EQ(N5, 0);
CHECK_EQ(N6, 0);
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const JetT*> jets(5);
jets[0] = x0;
jets[1] = x1;
jets[2] = x2;
jets[3] = x3;
jets[4] = x4;
return EvaluateWithJets(jets.get(), residuals);
}
template <typename JetT>
bool operator()(const JetT* x0,
const JetT* x1,
const JetT* x2,
const JetT* x3,
const JetT* x4,
const JetT* x5,
JetT* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_NE(N3, 0);
CHECK_NE(N4, 0);
CHECK_NE(N5, 0);
CHECK_EQ(N6, 0);
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const JetT*> jets(6);
jets[0] = x0;
jets[1] = x1;
jets[2] = x2;
jets[3] = x3;
jets[4] = x4;
jets[5] = x5;
return EvaluateWithJets(jets.get(), residuals);
}
template <typename JetT>
bool operator()(const JetT* x0,
const JetT* x1,
const JetT* x2,
const JetT* x3,
const JetT* x4,
const JetT* x5,
const JetT* x6,
JetT* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_NE(N3, 0);
CHECK_NE(N4, 0);
CHECK_NE(N5, 0);
CHECK_NE(N6, 0);
CHECK_EQ(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const JetT*> jets(7);
jets[0] = x0;
jets[1] = x1;
jets[2] = x2;
jets[3] = x3;
jets[4] = x4;
jets[5] = x5;
jets[6] = x6;
return EvaluateWithJets(jets.get(), residuals);
}
template <typename JetT>
bool operator()(const JetT* x0,
const JetT* x1,
const JetT* x2,
const JetT* x3,
const JetT* x4,
const JetT* x5,
const JetT* x6,
const JetT* x7,
JetT* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_NE(N3, 0);
CHECK_NE(N4, 0);
CHECK_NE(N5, 0);
CHECK_NE(N6, 0);
CHECK_NE(N7, 0);
CHECK_EQ(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const JetT*> jets(8);
jets[0] = x0;
jets[1] = x1;
jets[2] = x2;
jets[3] = x3;
jets[4] = x4;
jets[5] = x5;
jets[6] = x6;
jets[7] = x7;
return EvaluateWithJets(jets.get(), residuals);
}
template <typename JetT>
bool operator()(const JetT* x0,
const JetT* x1,
const JetT* x2,
const JetT* x3,
const JetT* x4,
const JetT* x5,
const JetT* x6,
const JetT* x7,
const JetT* x8,
JetT* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_NE(N3, 0);
CHECK_NE(N4, 0);
CHECK_NE(N5, 0);
CHECK_NE(N6, 0);
CHECK_NE(N7, 0);
CHECK_NE(N8, 0);
CHECK_EQ(N9, 0);
internal::FixedArray<const JetT*> jets(9);
jets[0] = x0;
jets[1] = x1;
jets[2] = x2;
jets[3] = x3;
jets[4] = x4;
jets[5] = x5;
jets[6] = x6;
jets[7] = x7;
jets[8] = x8;
return EvaluateWithJets(jets.get(), residuals);
}
template <typename JetT>
bool operator()(const JetT* x0,
const JetT* x1,
const JetT* x2,
const JetT* x3,
const JetT* x4,
const JetT* x5,
const JetT* x6,
const JetT* x7,
const JetT* x8,
const JetT* x9,
JetT* residuals) const {
CHECK_NE(N0, 0);
CHECK_NE(N1, 0);
CHECK_NE(N2, 0);
CHECK_NE(N3, 0);
CHECK_NE(N4, 0);
CHECK_NE(N5, 0);
CHECK_NE(N6, 0);
CHECK_NE(N7, 0);
CHECK_NE(N8, 0);
CHECK_NE(N9, 0);
internal::FixedArray<const JetT*> jets(10);
jets[0] = x0;
jets[1] = x1;
jets[2] = x2;
jets[3] = x3;
jets[4] = x4;
jets[5] = x5;
jets[6] = x6;
jets[7] = x7;
jets[8] = x8;
jets[9] = x9;
return EvaluateWithJets(jets.get(), residuals);
}
private:
template <typename JetT>
bool EvaluateWithJets(const JetT** inputs, JetT* output) const {
const int kNumParameters = N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8 + N9;
const vector<int16>& parameter_block_sizes =
cost_function_->parameter_block_sizes();
const int num_parameter_blocks = parameter_block_sizes.size();
const int num_residuals = cost_function_->num_residuals();
internal::FixedArray<double> parameters(kNumParameters);
internal::FixedArray<double*> parameter_blocks(num_parameter_blocks);
internal::FixedArray<double> jacobians(num_residuals * kNumParameters);
internal::FixedArray<double*> jacobian_blocks(num_parameter_blocks);
internal::FixedArray<double> residuals(num_residuals);
// Build a set of arrays to get the residuals and jacobians from
// the CostFunction wrapped by this functor.
double* parameter_ptr = parameters.get();
double* jacobian_ptr = jacobians.get();
for (int i = 0; i < num_parameter_blocks; ++i) {
parameter_blocks[i] = parameter_ptr;
jacobian_blocks[i] = jacobian_ptr;
for (int j = 0; j < parameter_block_sizes[i]; ++j) {
*parameter_ptr++ = inputs[i][j].a;
}
jacobian_ptr += num_residuals * parameter_block_sizes[i];
}
if (!cost_function_->Evaluate(parameter_blocks.get(),
residuals.get(),
jacobian_blocks.get())) {
return false;
}
// Now that we have the incoming Jets, which are carrying the
// partial derivatives of each of the inputs w.r.t to some other
// underlying parameters. The derivative of the outputs of the
// cost function w.r.t to the same underlying parameters can now
// be computed by applying the chain rule.
//
// d output[i] d output[i] d input[j]
// -------------- = sum_j ----------- * ------------
// d parameter[k] d input[j] d parameter[k]
//
// d input[j]
// -------------- = inputs[j], so
// d parameter[k]
//
// outputJet[i] = sum_k jacobian[i][k] * inputJet[k]
//
// The following loop, iterates over the residuals, computing one
// output jet at a time.
for (int i = 0; i < num_residuals; ++i) {
output[i].a = residuals[i];
output[i].v.setZero();
for (int j = 0; j < num_parameter_blocks; ++j) {
const int16 block_size = parameter_block_sizes[j];
for (int k = 0; k < parameter_block_sizes[j]; ++k) {
output[i].v +=
jacobian_blocks[j][i * block_size + k] * inputs[j][k].v;
}
}
}
return true;
}
private:
internal::scoped_ptr<CostFunction> cost_function_;
};
} // namespace ceres
#endif // CERES_PUBLIC_COST_FUNCTION_TO_FUNCTOR_H_

@ -44,17 +44,35 @@ struct CRSMatrix {
int num_rows;
int num_cols;
// A compressed row matrix stores its contents in three arrays.
// The non-zero pattern of the i^th row is given by
// A compressed row matrix stores its contents in three arrays,
// rows, cols and values.
//
// rows[cols[i] ... cols[i + 1]]
// rows is a num_rows + 1 sized array that points into the cols and
// values array. For each row i:
//
// and the corresponding values by
// cols[rows[i]] ... cols[rows[i + 1] - 1] are the indices of the
// non-zero columns of row i.
//
// values[cols[i] ... cols[i + 1]]
// values[rows[i]] .. values[rows[i + 1] - 1] are the values of the
// corresponding entries.
//
// Thus, cols is a vector of size num_cols + 1, and rows and values
// have as many entries as number of non-zeros in the matrix.
// cols and values contain as many entries as there are non-zeros in
// the matrix.
//
// e.g, consider the 3x4 sparse matrix
//
// [ 0 10 0 4 ]
// [ 0 2 -3 2 ]
// [ 1 2 0 0 ]
//
// The three arrays will be:
//
//
// -row0- ---row1--- -row2-
// rows = [ 0, 2, 5, 7]
// cols = [ 1, 3, 1, 2, 3, 0, 1]
// values = [10, 4, 2, -3, 2, 1, 2]
vector<int> cols;
vector<int> rows;
vector<double> values;

@ -0,0 +1,215 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: mierle@gmail.com (Keir Mierle)
// sameeragarwal@google.com (Sameer Agarwal)
// thadh@gmail.com (Thad Hughes)
//
// This autodiff implementation differs from the one found in
// autodiff_cost_function.h by supporting autodiff on cost functions with
// variable numbers of parameters with variable sizes. With the other
// implementation, all the sizes (both the number of parameter blocks and the
// size of each block) must be fixed at compile time.
//
// The functor API differs slightly from the API for fixed size autodiff; the
// expected interface for the cost functors is:
//
// struct MyCostFunctor {
// template<typename T>
// bool operator()(T const* const* parameters, T* residuals) const {
// // Use parameters[i] to access the i'th parameter block.
// }
// }
//
// Since the sizing of the parameters is done at runtime, you must also specify
// the sizes after creating the dynamic autodiff cost function. For example:
//
// DynamicAutoDiffCostFunction<MyCostFunctor, 3> cost_function(
// new MyCostFunctor());
// cost_function.AddParameterBlock(5);
// cost_function.AddParameterBlock(10);
// cost_function.SetNumResiduals(21);
//
// Under the hood, the implementation evaluates the cost function multiple
// times, computing a small set of the derivatives (four by default, controlled
// by the Stride template parameter) with each pass. There is a tradeoff with
// the size of the passes; you may want to experiment with the stride.
#ifndef CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_
#define CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_
#include <cmath>
#include <numeric>
#include <vector>
#include "ceres/cost_function.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/jet.h"
#include "glog/logging.h"
namespace ceres {
template <typename CostFunctor, int Stride = 4>
class DynamicAutoDiffCostFunction : public CostFunction {
public:
explicit DynamicAutoDiffCostFunction(CostFunctor* functor)
: functor_(functor) {}
virtual ~DynamicAutoDiffCostFunction() {}
void AddParameterBlock(int size) {
mutable_parameter_block_sizes()->push_back(size);
}
void SetNumResiduals(int num_residuals) {
set_num_residuals(num_residuals);
}
virtual bool Evaluate(double const* const* parameters,
double* residuals,
double** jacobians) const {
CHECK_GT(num_residuals(), 0)
<< "You must call DynamicAutoDiffCostFunction::SetNumResiduals() "
<< "before DynamicAutoDiffCostFunction::Evaluate().";
if (jacobians == NULL) {
return (*functor_)(parameters, residuals);
}
// The difficulty with Jets, as implemented in Ceres, is that they were
// originally designed for strictly compile-sized use. At this point, there
// is a large body of code that assumes inside a cost functor it is
// acceptable to do e.g. T(1.5) and get an appropriately sized jet back.
//
// Unfortunately, it is impossible to communicate the expected size of a
// dynamically sized jet to the static instantiations that existing code
// depends on.
//
// To work around this issue, the solution here is to evaluate the
// jacobians in a series of passes, each one computing Stripe *
// num_residuals() derivatives. This is done with small, fixed-size jets.
const int num_parameter_blocks = parameter_block_sizes().size();
const int num_parameters = std::accumulate(parameter_block_sizes().begin(),
parameter_block_sizes().end(),
0);
// Allocate scratch space for the strided evaluation.
vector<Jet<double, Stride> > input_jets(num_parameters);
vector<Jet<double, Stride> > output_jets(num_residuals());
// Make the parameter pack that is sent to the functor (reused).
vector<Jet<double, Stride>* > jet_parameters(num_parameter_blocks, NULL);
int num_active_parameters = 0;
int start_derivative_section = -1;
for (int i = 0, parameter_cursor = 0; i < num_parameter_blocks; ++i) {
jet_parameters[i] = &input_jets[parameter_cursor];
const int parameter_block_size = parameter_block_sizes()[i];
if (jacobians[i] != NULL) {
start_derivative_section =
(start_derivative_section == -1)
? parameter_cursor
: start_derivative_section;
num_active_parameters += parameter_block_size;
}
for (int j = 0; j < parameter_block_size; ++j, parameter_cursor++) {
input_jets[parameter_cursor].a = parameters[i][j];
}
}
// Evaluate all of the strides. Each stride is a chunk of the derivative to
// evaluate, typically some size proportional to the size of the SIMD
// registers of the CPU.
int num_strides = static_cast<int>(ceil(num_active_parameters /
static_cast<float>(Stride)));
for (int pass = 0; pass < num_strides; ++pass) {
// Set most of the jet components to zero, except for
// non-constant #Stride parameters.
int active_parameter_count = 0;
int end_derivative_section = start_derivative_section;
for (int i = 0, parameter_cursor = 0; i < num_parameter_blocks; ++i) {
for (int j = 0; j < parameter_block_sizes()[i];
++j, parameter_cursor++) {
input_jets[parameter_cursor].v.setZero();
if (parameter_cursor >= start_derivative_section &&
active_parameter_count < Stride) {
if (jacobians[i] != NULL) {
input_jets[parameter_cursor]
.v[parameter_cursor - start_derivative_section] = 1.0;
++active_parameter_count;
}
++end_derivative_section;
}
}
}
if (!(*functor_)(&jet_parameters[0], &output_jets[0])) {
return false;
}
// Copy the pieces of the jacobians into their final place.
active_parameter_count = 0;
for (int i = 0, parameter_cursor = 0; i < num_parameter_blocks; ++i) {
for (int j = 0; j < parameter_block_sizes()[i];
++j, parameter_cursor++) {
if (parameter_cursor >= start_derivative_section &&
active_parameter_count < Stride) {
if (jacobians[i] != NULL) {
for (int k = 0; k < num_residuals(); ++k) {
jacobians[i][k * parameter_block_sizes()[i] + j] =
output_jets[k].v[parameter_cursor -
start_derivative_section];
}
++active_parameter_count;
}
}
}
}
// Only copy the residuals over once (even though we compute them on
// every loop).
if (pass == num_strides - 1) {
for (int k = 0; k < num_residuals(); ++k) {
residuals[k] = output_jets[k].a;
}
}
start_derivative_section = end_derivative_section;
}
return true;
}
private:
internal::scoped_ptr<CostFunctor> functor_;
};
} // namespace ceres
#endif // CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_

@ -41,6 +41,8 @@
#include <float.h>
#endif
#include <limits>
namespace ceres {
#if defined(_MSC_VER)

@ -0,0 +1,222 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
// Copyright 2007 Google Inc. All Rights Reserved.
//
// Author: wjr@google.com (William Rucklidge)
//
// This file contains a class that exercises a cost function, to make sure
// that it is computing reasonable derivatives. It compares the Jacobians
// computed by the cost function with those obtained by finite
// differences.
#ifndef CERES_PUBLIC_GRADIENT_CHECKER_H_
#define CERES_PUBLIC_GRADIENT_CHECKER_H_
#include <cstddef>
#include <algorithm>
#include <vector>
#include "ceres/internal/eigen.h"
#include "ceres/internal/fixed_array.h"
#include "ceres/internal/macros.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/numeric_diff_cost_function.h"
#include "glog/logging.h"
namespace ceres {
// An object that exercises a cost function, to compare the answers that it
// gives with derivatives estimated using finite differencing.
//
// The only likely usage of this is for testing.
//
// How to use: Fill in an array of pointers to parameter blocks for your
// CostFunction, and then call Probe(). Check that the return value is
// 'true'. See prober_test.cc for an example.
//
// This is templated similarly to NumericDiffCostFunction, as it internally
// uses that.
template <typename CostFunctionToProbe,
int M = 0, int N0 = 0, int N1 = 0, int N2 = 0, int N3 = 0, int N4 = 0>
class GradientChecker {
public:
// Here we stash some results from the probe, for later
// inspection.
struct GradientCheckResults {
// Computed cost.
Vector cost;
// The sizes of these matrices are dictated by the cost function's
// parameter and residual block sizes. Each vector's length will
// term->parameter_block_sizes().size(), and each matrix is the
// Jacobian of the residual with respect to the corresponding parameter
// block.
// Derivatives as computed by the cost function.
vector<Matrix> term_jacobians;
// Derivatives as computed by finite differencing.
vector<Matrix> finite_difference_jacobians;
// Infinity-norm of term_jacobians - finite_difference_jacobians.
double error_jacobians;
};
// Checks the Jacobian computed by a cost function.
//
// probe_point: The parameter values at which to probe.
// error_tolerance: A threshold for the infinity-norm difference
// between the Jacobians. If the Jacobians differ by more than
// this amount, then the probe fails.
//
// term: The cost function to test. Not retained after this call returns.
//
// results: On return, the two Jacobians (and other information)
// will be stored here. May be NULL.
//
// Returns true if no problems are detected and the difference between the
// Jacobians is less than error_tolerance.
static bool Probe(double const* const* probe_point,
double error_tolerance,
CostFunctionToProbe *term,
GradientCheckResults* results) {
CHECK_NOTNULL(probe_point);
CHECK_NOTNULL(term);
LOG(INFO) << "-------------------- Starting Probe() --------------------";
// We need a GradientCheckeresults, whether or not they supplied one.
internal::scoped_ptr<GradientCheckResults> owned_results;
if (results == NULL) {
owned_results.reset(new GradientCheckResults);
results = owned_results.get();
}
// Do a consistency check between the term and the template parameters.
CHECK_EQ(M, term->num_residuals());
const int num_residuals = M;
const vector<int16>& block_sizes = term->parameter_block_sizes();
const int num_blocks = block_sizes.size();
CHECK_LE(num_blocks, 5) << "Unable to test functions that take more "
<< "than 5 parameter blocks";
if (N0) {
CHECK_EQ(N0, block_sizes[0]);
CHECK_GE(num_blocks, 1);
} else {
CHECK_LT(num_blocks, 1);
}
if (N1) {
CHECK_EQ(N1, block_sizes[1]);
CHECK_GE(num_blocks, 2);
} else {
CHECK_LT(num_blocks, 2);
}
if (N2) {
CHECK_EQ(N2, block_sizes[2]);
CHECK_GE(num_blocks, 3);
} else {
CHECK_LT(num_blocks, 3);
}
if (N3) {
CHECK_EQ(N3, block_sizes[3]);
CHECK_GE(num_blocks, 4);
} else {
CHECK_LT(num_blocks, 4);
}
if (N4) {
CHECK_EQ(N4, block_sizes[4]);
CHECK_GE(num_blocks, 5);
} else {
CHECK_LT(num_blocks, 5);
}
results->term_jacobians.clear();
results->term_jacobians.resize(num_blocks);
results->finite_difference_jacobians.clear();
results->finite_difference_jacobians.resize(num_blocks);
internal::FixedArray<double*> term_jacobian_pointers(num_blocks);
internal::FixedArray<double*>
finite_difference_jacobian_pointers(num_blocks);
for (int i = 0; i < num_blocks; i++) {
results->term_jacobians[i].resize(num_residuals, block_sizes[i]);
term_jacobian_pointers[i] = results->term_jacobians[i].data();
results->finite_difference_jacobians[i].resize(
num_residuals, block_sizes[i]);
finite_difference_jacobian_pointers[i] =
results->finite_difference_jacobians[i].data();
}
results->cost.resize(num_residuals, 1);
CHECK(term->Evaluate(probe_point, results->cost.data(),
term_jacobian_pointers.get()));
NumericDiffCostFunction<CostFunctionToProbe, CENTRAL, M, N0, N1, N2, N3, N4>
numeric_term(term, DO_NOT_TAKE_OWNERSHIP);
CHECK(numeric_term.Evaluate(probe_point, results->cost.data(),
finite_difference_jacobian_pointers.get()));
results->error_jacobians = 0;
for (int i = 0; i < num_blocks; i++) {
Matrix jacobian_difference = results->term_jacobians[i] -
results->finite_difference_jacobians[i];
results->error_jacobians =
std::max(results->error_jacobians,
jacobian_difference.lpNorm<Eigen::Infinity>());
}
LOG(INFO) << "========== term-computed derivatives ==========";
for (int i = 0; i < num_blocks; i++) {
LOG(INFO) << "term_computed block " << i;
LOG(INFO) << "\n" << results->term_jacobians[i];
}
LOG(INFO) << "========== finite-difference derivatives ==========";
for (int i = 0; i < num_blocks; i++) {
LOG(INFO) << "finite_difference block " << i;
LOG(INFO) << "\n" << results->finite_difference_jacobians[i];
}
LOG(INFO) << "========== difference ==========";
for (int i = 0; i < num_blocks; i++) {
LOG(INFO) << "difference block " << i;
LOG(INFO) << (results->term_jacobians[i] -
results->finite_difference_jacobians[i]);
}
LOG(INFO) << "||difference|| = " << results->error_jacobians;
return results->error_jacobians < error_tolerance;
}
private:
CERES_DISALLOW_IMPLICIT_CONSTRUCTORS(GradientChecker);
};
} // namespace ceres
#endif // CERES_PUBLIC_GRADIENT_CHECKER_H_

@ -146,6 +146,7 @@
#include "ceres/jet.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/fixed_array.h"
#include "ceres/internal/variadic_evaluate.h"
namespace ceres {
namespace internal {
@ -191,134 +192,71 @@ inline void Take1stOrderPart(const int M, const JetT *src, T *dst) {
DCHECK(src);
DCHECK(dst);
for (int i = 0; i < M; ++i) {
Eigen::Map<Eigen::Matrix<T, N, 1> >(dst + N * i, N) = src[i].v.template segment<N>(N0);
Eigen::Map<Eigen::Matrix<T, N, 1> >(dst + N * i, N) =
src[i].v.template segment<N>(N0);
}
}
// This block of quasi-repeated code calls the user-supplied functor, which may
// take a variable number of arguments. This is accomplished by specializing the
// struct based on the size of the trailing parameters; parameters with 0 size
// are assumed missing.
//
// Supporting variadic functions is the primary source of complexity in the
// autodiff implementation.
template<typename Functor, typename T,
int N0, int N1, int N2, int N3, int N4, int N5>
struct VariadicEvaluate {
static bool Call(const Functor& functor, T const *const *input, T* output) {
return functor(input[0],
input[1],
input[2],
input[3],
input[4],
input[5],
output);
}
};
template<typename Functor, typename T,
int N0, int N1, int N2, int N3, int N4>
struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, N4, 0> {
static bool Call(const Functor& functor, T const *const *input, T* output) {
return functor(input[0],
input[1],
input[2],
input[3],
input[4],
output);
}
};
template<typename Functor, typename T,
int N0, int N1, int N2, int N3>
struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, 0, 0> {
static bool Call(const Functor& functor, T const *const *input, T* output) {
return functor(input[0],
input[1],
input[2],
input[3],
output);
}
};
template<typename Functor, typename T,
int N0, int N1, int N2>
struct VariadicEvaluate<Functor, T, N0, N1, N2, 0, 0, 0> {
static bool Call(const Functor& functor, T const *const *input, T* output) {
return functor(input[0],
input[1],
input[2],
output);
}
};
template<typename Functor, typename T,
int N0, int N1>
struct VariadicEvaluate<Functor, T, N0, N1, 0, 0, 0, 0> {
static bool Call(const Functor& functor, T const *const *input, T* output) {
return functor(input[0],
input[1],
output);
}
};
template<typename Functor, typename T, int N0>
struct VariadicEvaluate<Functor, T, N0, 0, 0, 0, 0, 0> {
static bool Call(const Functor& functor, T const *const *input, T* output) {
return functor(input[0],
output);
}
};
// This is in a struct because default template parameters on a function are not
// supported in C++03 (though it is available in C++0x). N0 through N5 are the
// dimension of the input arguments to the user supplied functor.
// This is in a struct because default template parameters on a
// function are not supported in C++03 (though it is available in
// C++0x). N0 through N5 are the dimension of the input arguments to
// the user supplied functor.
template <typename Functor, typename T,
int N0 = 0, int N1 = 0, int N2 = 0, int N3 = 0, int N4 = 0, int N5=0>
int N0 = 0, int N1 = 0, int N2 = 0, int N3 = 0, int N4 = 0,
int N5 = 0, int N6 = 0, int N7 = 0, int N8 = 0, int N9 = 0>
struct AutoDiff {
static bool Differentiate(const Functor& functor,
T const *const *parameters,
int num_outputs,
T *function_value,
T **jacobians) {
typedef Jet<T, N0 + N1 + N2 + N3 + N4 + N5> JetT;
DCHECK_GT(N0, 0)
<< "Cost functions must have at least one parameter block.";
DCHECK((!N1 && !N2 && !N3 && !N4 && !N5) ||
((N1 > 0) && !N2 && !N3 && !N4 && !N5) ||
((N1 > 0) && (N2 > 0) && !N3 && !N4 && !N5) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && !N4 && !N5) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && !N5) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0)))
// This block breaks the 80 column rule to keep it somewhat readable.
DCHECK_GT(num_outputs, 0);
CHECK((!N1 && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && (N9 > 0)))
<< "Zero block cannot precede a non-zero block. Block sizes are "
<< "(ignore trailing 0s): " << N0 << ", " << N1 << ", " << N2 << ", "
<< N3 << ", " << N4 << ", " << N5;
DCHECK_GT(num_outputs, 0);
<< N3 << ", " << N4 << ", " << N5 << ", " << N6 << ", " << N7 << ", "
<< N8 << ", " << N9;
typedef Jet<T, N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8 + N9> JetT;
FixedArray<JetT, (256 * 7) / sizeof(JetT)> x(
N0 + N1 + N2 + N3 + N4 + N5 + num_outputs);
N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8 + N9 + num_outputs);
// It's ugly, but it works.
const int jet0 = 0;
const int jet1 = N0;
const int jet2 = N0 + N1;
const int jet3 = N0 + N1 + N2;
const int jet4 = N0 + N1 + N2 + N3;
const int jet5 = N0 + N1 + N2 + N3 + N4;
const int jet6 = N0 + N1 + N2 + N3 + N4 + N5;
// These are the positions of the respective jets in the fixed array x.
const int jet0 = 0;
const int jet1 = N0;
const int jet2 = N0 + N1;
const int jet3 = N0 + N1 + N2;
const int jet4 = N0 + N1 + N2 + N3;
const int jet5 = N0 + N1 + N2 + N3 + N4;
const int jet6 = N0 + N1 + N2 + N3 + N4 + N5;
const int jet7 = N0 + N1 + N2 + N3 + N4 + N5 + N6;
const int jet8 = N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7;
const int jet9 = N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8;
const JetT *unpacked_parameters[6] = {
const JetT *unpacked_parameters[10] = {
x.get() + jet0,
x.get() + jet1,
x.get() + jet2,
x.get() + jet3,
x.get() + jet4,
x.get() + jet5,
x.get() + jet6,
x.get() + jet7,
x.get() + jet8,
x.get() + jet9,
};
JetT *output = x.get() + jet6;
JetT* output = x.get() + N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8 + N9;
#define CERES_MAKE_1ST_ORDER_PERTURBATION(i) \
if (N ## i) { \
@ -333,10 +271,14 @@ struct AutoDiff {
CERES_MAKE_1ST_ORDER_PERTURBATION(3);
CERES_MAKE_1ST_ORDER_PERTURBATION(4);
CERES_MAKE_1ST_ORDER_PERTURBATION(5);
CERES_MAKE_1ST_ORDER_PERTURBATION(6);
CERES_MAKE_1ST_ORDER_PERTURBATION(7);
CERES_MAKE_1ST_ORDER_PERTURBATION(8);
CERES_MAKE_1ST_ORDER_PERTURBATION(9);
#undef CERES_MAKE_1ST_ORDER_PERTURBATION
if (!VariadicEvaluate<Functor, JetT,
N0, N1, N2, N3, N4, N5>::Call(
N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>::Call(
functor, unpacked_parameters, output)) {
return false;
}
@ -359,6 +301,10 @@ struct AutoDiff {
CERES_TAKE_1ST_ORDER_PERTURBATION(3);
CERES_TAKE_1ST_ORDER_PERTURBATION(4);
CERES_TAKE_1ST_ORDER_PERTURBATION(5);
CERES_TAKE_1ST_ORDER_PERTURBATION(6);
CERES_TAKE_1ST_ORDER_PERTURBATION(7);
CERES_TAKE_1ST_ORDER_PERTURBATION(8);
CERES_TAKE_1ST_ORDER_PERTURBATION(9);
#undef CERES_TAKE_1ST_ORDER_PERTURBATION
return true;
}

@ -168,11 +168,11 @@ inline FixedArray<T, S>::FixedArray(typename FixedArray<T, S>::size_type n)
array_((n <= kInlineElements
? reinterpret_cast<InnerContainer*>(inline_space_)
: new InnerContainer[n])) {
DCHECK_GE(n, 0);
DCHECK_GE(n, size_t(0));
// Construct only the elements actually used.
if (array_ == reinterpret_cast<InnerContainer*>(inline_space_)) {
for (int i = 0; i != size_; ++i) {
for (size_t i = 0; i != size_; ++i) {
inline_space_[i].Init();
}
}
@ -183,7 +183,7 @@ inline FixedArray<T, S>::~FixedArray() {
if (array_ != reinterpret_cast<InnerContainer*>(inline_space_)) {
delete[] array_;
} else {
for (int i = 0; i != size_; ++i) {
for (size_t i = 0; i != size_; ++i) {
inline_space_[i].Destroy();
}
}

@ -132,16 +132,16 @@ char (&ArraySizeHelper(const T (&array)[N]))[N];
// - wan 2005-11-16
//
// Starting with Visual C++ 2005, WinNT.h includes ARRAYSIZE. However,
// the definition comes from the over-broad windows.h header that
// the definition comes from the over-broad windows.h header that
// introduces a macro, ERROR, that conflicts with the logging framework
// that Ceres uses. Instead, rename ARRAYSIZE to CERES_ARRAYSIZE.
#define CERES_ARRAYSIZE(a) \
((sizeof(a) / sizeof(*(a))) / \
#define CERES_ARRAYSIZE(a) \
((sizeof(a) / sizeof(*(a))) / \
static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
// Tell the compiler to warn about unused return values for functions declared
// with this macro. The macro should be used on function declarations
// following the argument list:
// Tell the compiler to warn about unused return values for functions
// declared with this macro. The macro should be used on function
// declarations following the argument list:
//
// Sprocket* AllocateSprocket() MUST_USE_RESULT;
//

@ -110,56 +110,61 @@ class ManualConstructor {
inline Type& operator*() { return *get(); }
inline const Type& operator*() const { return *get(); }
// This is needed to get around the strict aliasing warning GCC generates.
inline void* space() {
return reinterpret_cast<void*>(space_);
}
// You can pass up to four constructor arguments as arguments of Init().
inline void Init() {
new(space_) Type;
new(space()) Type;
}
template <typename T1>
inline void Init(const T1& p1) {
new(space_) Type(p1);
new(space()) Type(p1);
}
template <typename T1, typename T2>
inline void Init(const T1& p1, const T2& p2) {
new(space_) Type(p1, p2);
new(space()) Type(p1, p2);
}
template <typename T1, typename T2, typename T3>
inline void Init(const T1& p1, const T2& p2, const T3& p3) {
new(space_) Type(p1, p2, p3);
new(space()) Type(p1, p2, p3);
}
template <typename T1, typename T2, typename T3, typename T4>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4) {
new(space_) Type(p1, p2, p3, p4);
new(space()) Type(p1, p2, p3, p4);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
const T5& p5) {
new(space_) Type(p1, p2, p3, p4, p5);
new(space()) Type(p1, p2, p3, p4, p5);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5,
typename T6>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
const T5& p5, const T6& p6) {
new(space_) Type(p1, p2, p3, p4, p5, p6);
new(space()) Type(p1, p2, p3, p4, p5, p6);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5,
typename T6, typename T7>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
const T5& p5, const T6& p6, const T7& p7) {
new(space_) Type(p1, p2, p3, p4, p5, p6, p7);
new(space()) Type(p1, p2, p3, p4, p5, p6, p7);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5,
typename T6, typename T7, typename T8>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
const T5& p5, const T6& p6, const T7& p7, const T8& p8) {
new(space_) Type(p1, p2, p3, p4, p5, p6, p7, p8);
new(space()) Type(p1, p2, p3, p4, p5, p6, p7, p8);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5,
@ -167,7 +172,7 @@ class ManualConstructor {
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
const T5& p5, const T6& p6, const T7& p7, const T8& p8,
const T9& p9) {
new(space_) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9);
new(space()) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5,
@ -175,7 +180,7 @@ class ManualConstructor {
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
const T5& p5, const T6& p6, const T7& p7, const T8& p8,
const T9& p9, const T10& p10) {
new(space_) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10);
new(space()) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5,
@ -184,7 +189,7 @@ class ManualConstructor {
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
const T5& p5, const T6& p6, const T7& p7, const T8& p8,
const T9& p9, const T10& p10, const T11& p11) {
new(space_) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11);
new(space()) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11);
}
inline void Destroy() {

@ -0,0 +1,199 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
// mierle@gmail.com (Keir Mierle)
//
// Finite differencing routine used by NumericDiffCostFunction.
#ifndef CERES_PUBLIC_INTERNAL_NUMERIC_DIFF_H_
#define CERES_PUBLIC_INTERNAL_NUMERIC_DIFF_H_
#include <cstring>
#include "Eigen/Dense"
#include "ceres/cost_function.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/internal/variadic_evaluate.h"
#include "ceres/types.h"
#include "glog/logging.h"
namespace ceres {
namespace internal {
// Helper templates that allow evaluation of a variadic functor or a
// CostFunction object.
template <typename CostFunctor,
int N0, int N1, int N2, int N3, int N4,
int N5, int N6, int N7, int N8, int N9 >
bool EvaluateImpl(const CostFunctor* functor,
double const* const* parameters,
double* residuals,
const void* /* NOT USED */) {
return VariadicEvaluate<CostFunctor,
double,
N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>::Call(
*functor,
parameters,
residuals);
}
template <typename CostFunctor,
int N0, int N1, int N2, int N3, int N4,
int N5, int N6, int N7, int N8, int N9 >
bool EvaluateImpl(const CostFunctor* functor,
double const* const* parameters,
double* residuals,
const CostFunction* /* NOT USED */) {
return functor->Evaluate(parameters, residuals, NULL);
}
// This is split from the main class because C++ doesn't allow partial template
// specializations for member functions. The alternative is to repeat the main
// class for differing numbers of parameters, which is also unfortunate.
template <typename CostFunctor,
NumericDiffMethod kMethod,
int kNumResiduals,
int N0, int N1, int N2, int N3, int N4,
int N5, int N6, int N7, int N8, int N9,
int kParameterBlock,
int kParameterBlockSize>
struct NumericDiff {
// Mutates parameters but must restore them before return.
static bool EvaluateJacobianForParameterBlock(
const CostFunctor* functor,
double const* residuals_at_eval_point,
const double relative_step_size,
double **parameters,
double *jacobian) {
using Eigen::Map;
using Eigen::Matrix;
using Eigen::RowMajor;
using Eigen::ColMajor;
typedef Matrix<double, kNumResiduals, 1> ResidualVector;
typedef Matrix<double, kParameterBlockSize, 1> ParameterVector;
typedef Matrix<double, kNumResiduals, kParameterBlockSize,
(kParameterBlockSize == 1 &&
kNumResiduals > 1) ? ColMajor : RowMajor> JacobianMatrix;
Map<JacobianMatrix> parameter_jacobian(jacobian,
kNumResiduals,
kParameterBlockSize);
// Mutate 1 element at a time and then restore.
Map<ParameterVector> x_plus_delta(parameters[kParameterBlock],
kParameterBlockSize);
ParameterVector x(x_plus_delta);
ParameterVector step_size = x.array().abs() * relative_step_size;
// To handle cases where a parameter is exactly zero, instead use
// the mean step_size for the other dimensions. If all the
// parameters are zero, there's no good answer. Take
// relative_step_size as a guess and hope for the best.
const double fallback_step_size =
(step_size.sum() == 0)
? relative_step_size
: step_size.sum() / step_size.rows();
// For each parameter in the parameter block, use finite differences to
// compute the derivative for that parameter.
for (int j = 0; j < kParameterBlockSize; ++j) {
const double delta =
(step_size(j) == 0.0) ? fallback_step_size : step_size(j);
x_plus_delta(j) = x(j) + delta;
double residuals[kNumResiduals]; // NOLINT
if (!EvaluateImpl<CostFunctor, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>(
functor, parameters, residuals, functor)) {
return false;
}
// Compute this column of the jacobian in 3 steps:
// 1. Store residuals for the forward part.
// 2. Subtract residuals for the backward (or 0) part.
// 3. Divide out the run.
parameter_jacobian.col(j) =
Map<const ResidualVector>(residuals, kNumResiduals);
double one_over_delta = 1.0 / delta;
if (kMethod == CENTRAL) {
// Compute the function on the other side of x(j).
x_plus_delta(j) = x(j) - delta;
if (!EvaluateImpl<CostFunctor, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>(
functor, parameters, residuals, functor)) {
return false;
}
parameter_jacobian.col(j) -=
Map<ResidualVector>(residuals, kNumResiduals, 1);
one_over_delta /= 2;
} else {
// Forward difference only; reuse existing residuals evaluation.
parameter_jacobian.col(j) -=
Map<const ResidualVector>(residuals_at_eval_point, kNumResiduals);
}
x_plus_delta(j) = x(j); // Restore x_plus_delta.
// Divide out the run to get slope.
parameter_jacobian.col(j) *= one_over_delta;
}
return true;
}
};
template <typename CostFunctor,
NumericDiffMethod kMethod,
int kNumResiduals,
int N0, int N1, int N2, int N3, int N4,
int N5, int N6, int N7, int N8, int N9,
int kParameterBlock>
struct NumericDiff<CostFunctor, kMethod, kNumResiduals,
N0, N1, N2, N3, N4, N5, N6, N7, N8, N9,
kParameterBlock, 0> {
// Mutates parameters but must restore them before return.
static bool EvaluateJacobianForParameterBlock(
const CostFunctor* functor,
double const* residuals_at_eval_point,
const double relative_step_size,
double **parameters,
double *jacobian) {
LOG(FATAL) << "Control should never reach here.";
return true;
}
};
} // namespace internal
} // namespace ceres
#endif // CERES_PUBLIC_INTERNAL_NUMERIC_DIFF_H_

@ -38,6 +38,7 @@
#include <assert.h>
#include <stdlib.h>
#include <cstddef>
#include <algorithm>
namespace ceres {
namespace internal {
@ -49,18 +50,17 @@ template <class C> class scoped_array;
template <class C>
scoped_ptr<C> make_scoped_ptr(C *);
// A scoped_ptr<T> is like a T*, except that the destructor of scoped_ptr<T>
// automatically deletes the pointer it holds (if any). That is, scoped_ptr<T>
// owns the T object that it points to. Like a T*, a scoped_ptr<T> may hold
// either NULL or a pointer to a T object. Also like T*, scoped_ptr<T> is
// thread-compatible, and once you dereference it, you get the threadsafety
// guarantees of T.
// A scoped_ptr<T> is like a T*, except that the destructor of
// scoped_ptr<T> automatically deletes the pointer it holds (if
// any). That is, scoped_ptr<T> owns the T object that it points
// to. Like a T*, a scoped_ptr<T> may hold either NULL or a pointer to
// a T object. Also like T*, scoped_ptr<T> is thread-compatible, and
// once you dereference it, you get the threadsafety guarantees of T.
//
// The size of a scoped_ptr is small: sizeof(scoped_ptr<C>) == sizeof(C*)
template <class C>
class scoped_ptr {
public:
// The element type
typedef C element_type;
@ -193,7 +193,6 @@ scoped_ptr<C> make_scoped_ptr(C *p) {
template <class C>
class scoped_array {
public:
// The element type
typedef C element_type;

@ -0,0 +1,182 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
// mierle@gmail.com (Keir Mierle)
#ifndef CERES_PUBLIC_INTERNAL_VARIADIC_EVALUATE_H_
#define CERES_PUBLIC_INTERNAL_VARIADIC_EVALUATE_H_
#include <stddef.h>
#include <glog/logging.h>
#include "ceres/jet.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/fixed_array.h"
namespace ceres {
namespace internal {
// This block of quasi-repeated code calls the user-supplied functor, which may
// take a variable number of arguments. This is accomplished by specializing the
// struct based on the size of the trailing parameters; parameters with 0 size
// are assumed missing.
template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4,
int N5, int N6, int N7, int N8, int N9>
struct VariadicEvaluate {
static bool Call(const Functor& functor, T const *const *input, T* output) {
return functor(input[0],
input[1],
input[2],
input[3],
input[4],
input[5],
input[6],
input[7],
input[8],
input[9],
output);
}
};
template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4,
int N5, int N6, int N7, int N8>
struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, N4, N5, N6, N7, N8, 0> {
static bool Call(const Functor& functor, T const *const *input, T* output) {
return functor(input[0],
input[1],
input[2],
input[3],
input[4],
input[5],
input[6],
input[7],
input[8],
output);
}
};
template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4,
int N5, int N6, int N7>
struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, N4, N5, N6, N7, 0, 0> {
static bool Call(const Functor& functor, T const *const *input, T* output) {
return functor(input[0],
input[1],
input[2],
input[3],
input[4],
input[5],
input[6],
input[7],
output);
}
};
template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4,
int N5, int N6>
struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, N4, N5, N6, 0, 0, 0> {
static bool Call(const Functor& functor, T const *const *input, T* output) {
return functor(input[0],
input[1],
input[2],
input[3],
input[4],
input[5],
input[6],
output);
}
};
template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4,
int N5>
struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, N4, N5, 0, 0, 0, 0> {
static bool Call(const Functor& functor, T const *const *input, T* output) {
return functor(input[0],
input[1],
input[2],
input[3],
input[4],
input[5],
output);
}
};
template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4>
struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, N4, 0, 0, 0, 0, 0> {
static bool Call(const Functor& functor, T const *const *input, T* output) {
return functor(input[0],
input[1],
input[2],
input[3],
input[4],
output);
}
};
template<typename Functor, typename T, int N0, int N1, int N2, int N3>
struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, 0, 0, 0, 0, 0, 0> {
static bool Call(const Functor& functor, T const *const *input, T* output) {
return functor(input[0],
input[1],
input[2],
input[3],
output);
}
};
template<typename Functor, typename T, int N0, int N1, int N2>
struct VariadicEvaluate<Functor, T, N0, N1, N2, 0, 0, 0, 0, 0, 0, 0> {
static bool Call(const Functor& functor, T const *const *input, T* output) {
return functor(input[0],
input[1],
input[2],
output);
}
};
template<typename Functor, typename T, int N0, int N1>
struct VariadicEvaluate<Functor, T, N0, N1, 0, 0, 0, 0, 0, 0, 0, 0> {
static bool Call(const Functor& functor, T const *const *input, T* output) {
return functor(input[0],
input[1],
output);
}
};
template<typename Functor, typename T, int N0>
struct VariadicEvaluate<Functor, T, N0, 0, 0, 0, 0, 0, 0, 0, 0, 0> {
static bool Call(const Functor& functor, T const *const *input, T* output) {
return functor(input[0],
output);
}
};
} // namespace internal
} // namespace ceres
#endif // CERES_PUBLIC_INTERNAL_VARIADIC_EVALUATE_H_

@ -42,6 +42,23 @@ namespace ceres {
// This struct describes the state of the optimizer after each
// iteration of the minimization.
struct IterationSummary {
IterationSummary()
: iteration(0),
step_is_valid(false),
step_is_nonmonotonic(false),
step_is_successful(false),
cost(0.0),
cost_change(0.0),
gradient_max_norm(0.0),
step_norm(0.0),
eta(0.0),
step_size(0.0),
line_search_function_evaluations(0),
linear_solver_iterations(0),
iteration_time_in_seconds(0.0),
step_solver_time_in_seconds(0.0),
cumulative_time_in_seconds(0.0) {}
// Current iteration number.
int32 iteration;
@ -51,7 +68,22 @@ struct IterationSummary {
// Note: step_is_valid is false when iteration = 0.
bool step_is_valid;
// Whether or not the algorithm made progress in this iteration.
// Step did not reduce the value of the objective function
// sufficiently, but it was accepted because of the relaxed
// acceptance criterion used by the non-monotonic trust region
// algorithm.
//
// Note: step_is_nonmonotonic is false when iteration = 0;
bool step_is_nonmonotonic;
// Whether or not the minimizer accepted this step or not. If the
// ordinary trust region algorithm is used, this means that the
// relative reduction in the objective function value was greater
// than Solver::Options::min_relative_decrease. However, if the
// non-monotonic trust region algorithm is used
// (Solver::Options:use_nonmonotonic_steps = true), then even if the
// relative decrease is not sufficient, the algorithm may accept the
// step and the step is declared successful.
//
// Note: step_is_successful is false when iteration = 0.
bool step_is_successful;
@ -60,8 +92,7 @@ struct IterationSummary {
double cost;
// Change in the value of the objective function in this
// iteration. This can be positive or negative. Negative change
// means that the step was not successful.
// iteration. This can be positive or negative.
double cost_change;
// Infinity norm of the gradient vector.
@ -87,6 +118,12 @@ struct IterationSummary {
// ignore it.
double eta;
// Step sized computed by the line search algorithm.
double step_size;
// Number of function evaluations used by the line search algorithm.
int line_search_function_evaluations;
// Number of iterations taken by the linear solver to solve for the
// Newton step.
int linear_solver_iterations;

@ -27,19 +27,109 @@
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: keir@google.com (Keir Mierle)
// sameeragarwal@google.com (Sameer Agarwal)
//
// Create CostFunctions as needed by the least squares framework with jacobians
// computed via numeric (a.k.a. finite) differentiation. For more details see
// http://en.wikipedia.org/wiki/Numerical_differentiation.
//
// To get a numerically differentiated cost function, define a subclass of
// CostFunction such that the Evaluate() function ignores the jacobian
// parameter. The numeric differentiation wrapper will fill in the jacobian
// parameter if nececssary by repeatedly calling the Evaluate() function with
// small changes to the appropriate parameters, and computing the slope. For
// performance, the numeric differentiation wrapper class is templated on the
// concrete cost function, even though it could be implemented only in terms of
// the virtual CostFunction interface.
// To get an numerically differentiated cost function, you must define
// a class with a operator() (a functor) that computes the residuals.
//
// The function must write the computed value in the last argument (the only
// non-const one) and return true to indicate success.
//
// For example, consider a scalar error e = k - x'y, where both x and y are
// two-dimensional column vector parameters, the prime sign indicates
// transposition, and k is a constant. The form of this error, which is the
// difference between a constant and an expression, is a common pattern in least
// squares problems. For example, the value x'y might be the model expectation
// for a series of measurements, where there is an instance of the cost function
// for each measurement k.
//
// The actual cost added to the total problem is e^2, or (k - x'k)^2; however,
// the squaring is implicitly done by the optimization framework.
//
// To write an numerically-differentiable cost function for the above model, first
// define the object
//
// class MyScalarCostFunctor {
// MyScalarCostFunctor(double k): k_(k) {}
//
// bool operator()(const double* const x,
// const double* const y,
// double* residuals) const {
// residuals[0] = k_ - x[0] * y[0] + x[1] * y[1];
// return true;
// }
//
// private:
// double k_;
// };
//
// Note that in the declaration of operator() the input parameters x
// and y come first, and are passed as const pointers to arrays of
// doubles. If there were three input parameters, then the third input
// parameter would come after y. The output is always the last
// parameter, and is also a pointer to an array. In the example above,
// the residual is a scalar, so only residuals[0] is set.
//
// Then given this class definition, the numerically differentiated
// cost function with central differences used for computing the
// derivative can be constructed as follows.
//
// CostFunction* cost_function
// = new NumericDiffCostFunction<MyScalarCostFunctor, CENTRAL, 1, 2, 2>(
// new MyScalarCostFunctor(1.0)); ^ ^ ^
// | | | |
// Finite Differencing Scheme -+ | | |
// Dimension of residual ----------+ | |
// Dimension of x --------------------+ |
// Dimension of y -----------------------+
//
// In this example, there is usually an instance for each measumerent of k.
//
// In the instantiation above, the template parameters following
// "MyScalarCostFunctor", "1, 2, 2", describe the functor as computing
// a 1-dimensional output from two arguments, both 2-dimensional.
//
// The framework can currently accommodate cost functions of up to 10
// independent variables, and there is no limit on the dimensionality
// of each of them.
//
// The central difference method is considerably more accurate at the cost of
// twice as many function evaluations than forward difference. Consider using
// central differences begin with, and only after that works, trying forward
// difference to improve performance.
//
// TODO(sameeragarwal): Add support for dynamic number of residuals.
//
// WARNING #1: A common beginner's error when first using
// NumericDiffCostFunction is to get the sizing wrong. In particular,
// there is a tendency to set the template parameters to (dimension of
// residual, number of parameters) instead of passing a dimension
// parameter for *every parameter*. In the example above, that would
// be <MyScalarCostFunctor, 1, 2>, which is missing the last '2'
// argument. Please be careful when setting the size parameters.
//
////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////
//
// ALTERNATE INTERFACE
//
// For a variety of reason, including compatibility with legacy code,
// NumericDiffCostFunction can also take CostFunction objects as
// input. The following describes how.
//
// To get a numerically differentiated cost function, define a
// subclass of CostFunction such that the Evaluate() function ignores
// the jacobian parameter. The numeric differentiation wrapper will
// fill in the jacobian parameter if nececssary by repeatedly calling
// the Evaluate() function with small changes to the appropriate
// parameters, and computing the slope. For performance, the numeric
// differentiation wrapper class is templated on the concrete cost
// function, even though it could be implemented only in terms of the
// virtual CostFunction interface.
//
// The numerically differentiated version of a cost function for a cost function
// can be constructed as follows:
@ -51,233 +141,153 @@
// where MyCostFunction has 1 residual and 2 parameter blocks with sizes 4 and 8
// respectively. Look at the tests for a more detailed example.
//
// The central difference method is considerably more accurate at the cost of
// twice as many function evaluations than forward difference. Consider using
// central differences begin with, and only after that works, trying forward
// difference to improve performance.
//
// TODO(keir): Characterize accuracy; mention pitfalls; provide alternatives.
#ifndef CERES_PUBLIC_NUMERIC_DIFF_COST_FUNCTION_H_
#define CERES_PUBLIC_NUMERIC_DIFF_COST_FUNCTION_H_
#include <cstring>
#include <glog/logging.h>
#include "Eigen/Dense"
#include "ceres/cost_function.h"
#include "ceres/internal/numeric_diff.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/sized_cost_function.h"
#include "ceres/types.h"
namespace ceres {
enum NumericDiffMethod {
CENTRAL,
FORWARD
};
// This is split from the main class because C++ doesn't allow partial template
// specializations for member functions. The alternative is to repeat the main
// class for differing numbers of parameters, which is also unfortunate.
template <typename CostFunctionNoJacobian,
int num_residuals,
int parameter_block_size,
int parameter_block,
NumericDiffMethod method>
struct Differencer {
// Mutates parameters but must restore them before return.
static bool EvaluateJacobianForParameterBlock(
const CostFunctionNoJacobian *function,
double const* residuals_at_eval_point,
double **parameters,
double **jacobians) {
using Eigen::Map;
using Eigen::Matrix;
using Eigen::RowMajor;
using Eigen::ColMajor;
typedef Matrix<double, num_residuals, 1> ResidualVector;
typedef Matrix<double, parameter_block_size, 1> ParameterVector;
typedef Matrix<double, num_residuals, parameter_block_size,
(parameter_block_size == 1 &&
num_residuals > 1) ? ColMajor : RowMajor> JacobianMatrix;
Map<JacobianMatrix> parameter_jacobian(jacobians[parameter_block],
num_residuals,
parameter_block_size);
// Mutate 1 element at a time and then restore.
Map<ParameterVector> x_plus_delta(parameters[parameter_block],
parameter_block_size);
ParameterVector x(x_plus_delta);
// TODO(keir): Pick a smarter number! In theory a good choice is sqrt(eps) *
// x, which for doubles means about 1e-8 * x. However, I have found this
// number too optimistic. This number should be exposed for users to change.
const double kRelativeStepSize = 1e-6;
ParameterVector step_size = x.array().abs() * kRelativeStepSize;
// To handle cases where a parameter is exactly zero, instead use the mean
// step_size for the other dimensions.
double fallback_step_size = step_size.sum() / step_size.rows();
if (fallback_step_size == 0.0) {
// If all the parameters are zero, there's no good answer. Take
// kRelativeStepSize as a guess and hope for the best.
fallback_step_size = kRelativeStepSize;
}
// For each parameter in the parameter block, use finite differences to
// compute the derivative for that parameter.
for (int j = 0; j < parameter_block_size; ++j) {
if (step_size(j) == 0.0) {
// The parameter is exactly zero, so compromise and use the mean
// step_size from the other parameters. This can break in many cases,
// but it's hard to pick a good number without problem specific
// knowledge.
step_size(j) = fallback_step_size;
}
x_plus_delta(j) = x(j) + step_size(j);
double residuals[num_residuals]; // NOLINT
if (!function->Evaluate(parameters, residuals, NULL)) {
// Something went wrong; bail.
return false;
}
// Compute this column of the jacobian in 3 steps:
// 1. Store residuals for the forward part.
// 2. Subtract residuals for the backward (or 0) part.
// 3. Divide out the run.
parameter_jacobian.col(j) =
Map<const ResidualVector>(residuals, num_residuals);
double one_over_h = 1 / step_size(j);
if (method == CENTRAL) {
// Compute the function on the other side of x(j).
x_plus_delta(j) = x(j) - step_size(j);
if (!function->Evaluate(parameters, residuals, NULL)) {
// Something went wrong; bail.
return false;
}
parameter_jacobian.col(j) -=
Map<ResidualVector>(residuals, num_residuals, 1);
one_over_h /= 2;
} else {
// Forward difference only; reuse existing residuals evaluation.
parameter_jacobian.col(j) -=
Map<const ResidualVector>(residuals_at_eval_point, num_residuals);
}
x_plus_delta(j) = x(j); // Restore x_plus_delta.
// Divide out the run to get slope.
parameter_jacobian.col(j) *= one_over_h;
}
return true;
}
};
// Prevent invalid instantiations.
template <typename CostFunctionNoJacobian,
int num_residuals,
int parameter_block,
NumericDiffMethod method>
struct Differencer<CostFunctionNoJacobian,
num_residuals,
0 /* parameter_block_size */,
parameter_block,
method> {
static bool EvaluateJacobianForParameterBlock(
const CostFunctionNoJacobian *function,
double const* residuals_at_eval_point,
double **parameters,
double **jacobians) {
LOG(FATAL) << "Shouldn't get here.";
return true;
}
};
template <typename CostFunctionNoJacobian,
NumericDiffMethod method = CENTRAL, int M = 0,
int N0 = 0, int N1 = 0, int N2 = 0, int N3 = 0, int N4 = 0, int N5 = 0>
template <typename CostFunctor,
NumericDiffMethod method = CENTRAL,
int kNumResiduals = 0, // Number of residuals, or ceres::DYNAMIC
int N0 = 0, // Number of parameters in block 0.
int N1 = 0, // Number of parameters in block 1.
int N2 = 0, // Number of parameters in block 2.
int N3 = 0, // Number of parameters in block 3.
int N4 = 0, // Number of parameters in block 4.
int N5 = 0, // Number of parameters in block 5.
int N6 = 0, // Number of parameters in block 6.
int N7 = 0, // Number of parameters in block 7.
int N8 = 0, // Number of parameters in block 8.
int N9 = 0> // Number of parameters in block 9.
class NumericDiffCostFunction
: public SizedCostFunction<M, N0, N1, N2, N3, N4, N5> {
: public SizedCostFunction<kNumResiduals,
N0, N1, N2, N3, N4,
N5, N6, N7, N8, N9> {
public:
NumericDiffCostFunction(CostFunctionNoJacobian* function,
Ownership ownership)
: function_(function), ownership_(ownership) {}
NumericDiffCostFunction(CostFunctor* functor,
const double relative_step_size = 1e-6)
:functor_(functor),
ownership_(TAKE_OWNERSHIP),
relative_step_size_(relative_step_size) {}
virtual ~NumericDiffCostFunction() {
NumericDiffCostFunction(CostFunctor* functor,
Ownership ownership,
const double relative_step_size = 1e-6)
: functor_(functor),
ownership_(ownership),
relative_step_size_(relative_step_size) {}
~NumericDiffCostFunction() {
if (ownership_ != TAKE_OWNERSHIP) {
function_.release();
functor_.release();
}
}
virtual bool Evaluate(double const* const* parameters,
double* residuals,
double** jacobians) const {
using internal::FixedArray;
using internal::NumericDiff;
const int kNumParameters = N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8 + N9;
const int kNumParameterBlocks =
(N0 > 0) + (N1 > 0) + (N2 > 0) + (N3 > 0) + (N4 > 0) +
(N5 > 0) + (N6 > 0) + (N7 > 0) + (N8 > 0) + (N9 > 0);
// Get the function value (residuals) at the the point to evaluate.
bool success = function_->Evaluate(parameters, residuals, NULL);
if (!success) {
// Something went wrong; ignore the jacobian.
if (!internal::EvaluateImpl<CostFunctor,
N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>(
functor_.get(),
parameters,
residuals,
functor_.get())) {
return false;
}
if (!jacobians) {
// Nothing to do; just forward.
return true;
}
// Create a copy of the parameters which will get mutated.
const int kParametersSize = N0 + N1 + N2 + N3 + N4 + N5;
double parameters_copy[kParametersSize];
double *parameters_references_copy[6];
parameters_references_copy[0] = &parameters_copy[0];
parameters_references_copy[1] = &parameters_copy[0] + N0;
parameters_references_copy[2] = &parameters_copy[0] + N0 + N1;
parameters_references_copy[3] = &parameters_copy[0] + N0 + N1 + N2;
parameters_references_copy[4] = &parameters_copy[0] + N0 + N1 + N2 + N3;
parameters_references_copy[5] =
&parameters_copy[0] + N0 + N1 + N2 + N3 + N4;
FixedArray<double> parameters_copy(kNumParameters);
FixedArray<double*> parameters_reference_copy(kNumParameterBlocks);
parameters_reference_copy[0] = parameters_copy.get();
if (N1) parameters_reference_copy[1] = parameters_reference_copy[0] + N0;
if (N2) parameters_reference_copy[2] = parameters_reference_copy[1] + N1;
if (N3) parameters_reference_copy[3] = parameters_reference_copy[2] + N2;
if (N4) parameters_reference_copy[4] = parameters_reference_copy[3] + N3;
if (N5) parameters_reference_copy[5] = parameters_reference_copy[4] + N4;
if (N6) parameters_reference_copy[6] = parameters_reference_copy[5] + N5;
if (N7) parameters_reference_copy[7] = parameters_reference_copy[6] + N6;
if (N7) parameters_reference_copy[8] = parameters_reference_copy[7] + N7;
if (N8) parameters_reference_copy[9] = parameters_reference_copy[8] + N8;
#define COPY_PARAMETER_BLOCK(block) \
if (N ## block) memcpy(parameters_reference_copy[block], \
parameters[block], \
sizeof(double) * N ## block); // NOLINT
#define COPY_PARAMETER_BLOCK(block) \
if (N ## block) memcpy(parameters_references_copy[block], \
parameters[block], \
sizeof(double) * N ## block); // NOLINT
COPY_PARAMETER_BLOCK(0);
COPY_PARAMETER_BLOCK(1);
COPY_PARAMETER_BLOCK(2);
COPY_PARAMETER_BLOCK(3);
COPY_PARAMETER_BLOCK(4);
COPY_PARAMETER_BLOCK(5);
COPY_PARAMETER_BLOCK(6);
COPY_PARAMETER_BLOCK(7);
COPY_PARAMETER_BLOCK(8);
COPY_PARAMETER_BLOCK(9);
#undef COPY_PARAMETER_BLOCK
#define EVALUATE_JACOBIAN_FOR_BLOCK(block) \
if (N ## block && jacobians[block]) { \
if (!Differencer<CostFunctionNoJacobian, /* NOLINT */ \
M, \
N ## block, \
block, \
method>::EvaluateJacobianForParameterBlock( \
function_.get(), \
residuals, \
parameters_references_copy, \
jacobians)) { \
return false; \
} \
#define EVALUATE_JACOBIAN_FOR_BLOCK(block) \
if (N ## block && jacobians[block] != NULL) { \
if (!NumericDiff<CostFunctor, \
method, \
kNumResiduals, \
N0, N1, N2, N3, N4, N5, N6, N7, N8, N9, \
block, \
N ## block >::EvaluateJacobianForParameterBlock( \
functor_.get(), \
residuals, \
relative_step_size_, \
parameters_reference_copy.get(), \
jacobians[block])) { \
return false; \
} \
}
EVALUATE_JACOBIAN_FOR_BLOCK(0);
EVALUATE_JACOBIAN_FOR_BLOCK(1);
EVALUATE_JACOBIAN_FOR_BLOCK(2);
EVALUATE_JACOBIAN_FOR_BLOCK(3);
EVALUATE_JACOBIAN_FOR_BLOCK(4);
EVALUATE_JACOBIAN_FOR_BLOCK(5);
EVALUATE_JACOBIAN_FOR_BLOCK(6);
EVALUATE_JACOBIAN_FOR_BLOCK(7);
EVALUATE_JACOBIAN_FOR_BLOCK(8);
EVALUATE_JACOBIAN_FOR_BLOCK(9);
#undef EVALUATE_JACOBIAN_FOR_BLOCK
return true;
}
private:
internal::scoped_ptr<CostFunctionNoJacobian> function_;
internal::scoped_ptr<CostFunctor> functor_;
Ownership ownership_;
const double relative_step_size_;
};
} // namespace ceres

@ -0,0 +1,346 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
//
// A wrapper class that takes a variadic functor evaluating a
// function, numerically differentiates it and makes it available as a
// templated functor so that it can be easily used as part of Ceres'
// automatic differentiation framework.
//
// For example:
//
// For example, let us assume that
//
// struct IntrinsicProjection
// IntrinsicProjection(const double* observations);
// bool operator()(const double* calibration,
// const double* point,
// double* residuals);
// };
//
// is a functor that implements the projection of a point in its local
// coordinate system onto its image plane and subtracts it from the
// observed point projection.
//
// Now we would like to compose the action of this functor with the
// action of camera extrinsics, i.e., rotation and translation, which
// is given by the following templated function
//
// template<typename T>
// void RotateAndTranslatePoint(const T* rotation,
// const T* translation,
// const T* point,
// T* result);
//
// To compose the extrinsics and intrinsics, we can construct a
// CameraProjection functor as follows.
//
// struct CameraProjection {
// typedef NumericDiffFunctor<IntrinsicProjection, CENTRAL, 2, 5, 3>
// IntrinsicProjectionFunctor;
//
// CameraProjection(double* observation) {
// intrinsic_projection_.reset(
// new IntrinsicProjectionFunctor(observation)) {
// }
//
// template <typename T>
// bool operator(const T* rotation,
// const T* translation,
// const T* intrinsics,
// const T* point,
// T* residuals) const {
// T transformed_point[3];
// RotateAndTranslatePoint(rotation, translation, point, transformed_point);
// return (*intrinsic_projection_)(intrinsics, transformed_point, residual);
// }
//
// private:
// scoped_ptr<IntrinsicProjectionFunctor> intrinsic_projection_;
// };
//
// Here, we made the choice of using CENTRAL differences to compute
// the jacobian of IntrinsicProjection.
//
// Now, we are ready to construct an automatically differentiated cost
// function as
//
// CostFunction* cost_function =
// new AutoDiffCostFunction<CameraProjection, 2, 3, 3, 5>(
// new CameraProjection(observations));
//
// cost_function now seamlessly integrates automatic differentiation
// of RotateAndTranslatePoint with a numerically differentiated
// version of IntrinsicProjection.
#ifndef CERES_PUBLIC_NUMERIC_DIFF_FUNCTOR_H_
#define CERES_PUBLIC_NUMERIC_DIFF_FUNCTOR_H_
#include "ceres/numeric_diff_cost_function.h"
#include "ceres/types.h"
#include "ceres/cost_function_to_functor.h"
namespace ceres {
template<typename Functor,
NumericDiffMethod kMethod = CENTRAL,
int kNumResiduals = 0,
int N0 = 0, int N1 = 0 , int N2 = 0, int N3 = 0, int N4 = 0,
int N5 = 0, int N6 = 0 , int N7 = 0, int N8 = 0, int N9 = 0>
class NumericDiffFunctor {
public:
// relative_step_size controls the step size used by the numeric
// differentiation process.
explicit NumericDiffFunctor(double relative_step_size = 1e-6)
: functor_(
new NumericDiffCostFunction<Functor,
kMethod,
kNumResiduals,
N0, N1, N2, N3, N4,
N5, N6, N7, N8, N9>(new Functor,
relative_step_size)) {
}
NumericDiffFunctor(Functor* functor, double relative_step_size = 1e-6)
: functor_(new NumericDiffCostFunction<Functor,
kMethod,
kNumResiduals,
N0, N1, N2, N3, N4,
N5, N6, N7, N8, N9>(
functor, relative_step_size)) {
}
bool operator()(const double* x0, double* residuals) const {
return functor_(x0, residuals);
}
bool operator()(const double* x0,
const double* x1,
double* residuals) const {
return functor_(x0, x1, residuals);
}
bool operator()(const double* x0,
const double* x1,
const double* x2,
double* residuals) const {
return functor_(x0, x1, x2, residuals);
}
bool operator()(const double* x0,
const double* x1,
const double* x2,
const double* x3,
double* residuals) const {
return functor_(x0, x1, x2, x3, residuals);
}
bool operator()(const double* x0,
const double* x1,
const double* x2,
const double* x3,
const double* x4,
double* residuals) const {
return functor_(x0, x1, x2, x3, x4, residuals);
}
bool operator()(const double* x0,
const double* x1,
const double* x2,
const double* x3,
const double* x4,
const double* x5,
double* residuals) const {
return functor_(x0, x1, x2, x3, x4, x5, residuals);
}
bool operator()(const double* x0,
const double* x1,
const double* x2,
const double* x3,
const double* x4,
const double* x5,
const double* x6,
double* residuals) const {
return functor_(x0, x1, x2, x3, x4, x5, x6, residuals);
}
bool operator()(const double* x0,
const double* x1,
const double* x2,
const double* x3,
const double* x4,
const double* x5,
const double* x6,
const double* x7,
double* residuals) const {
return functor_(x0, x1, x2, x3, x4, x5, x6, x7, residuals);
}
bool operator()(const double* x0,
const double* x1,
const double* x2,
const double* x3,
const double* x4,
const double* x5,
const double* x6,
const double* x7,
const double* x8,
double* residuals) const {
return functor_(x0, x1, x2, x3, x4, x5, x6, x7, x8, residuals);
}
bool operator()(const double* x0,
const double* x1,
const double* x2,
const double* x3,
const double* x4,
const double* x5,
const double* x6,
const double* x7,
const double* x8,
const double* x9,
double* residuals) const {
return functor_(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, residuals);
}
template <typename T>
bool operator()(const T* x0, T* residuals) const {
return functor_(x0, residuals);
}
template <typename T>
bool operator()(const T* x0,
const T* x1,
T* residuals) const {
return functor_(x0, x1, residuals);
}
template <typename T>
bool operator()(const T* x0,
const T* x1,
const T* x2,
T* residuals) const {
return functor_(x0, x1, x2, residuals);
}
template <typename T>
bool operator()(const T* x0,
const T* x1,
const T* x2,
const T* x3,
T* residuals) const {
return functor_(x0, x1, x2, x3, residuals);
}
template <typename T>
bool operator()(const T* x0,
const T* x1,
const T* x2,
const T* x3,
const T* x4,
T* residuals) const {
return functor_(x0, x1, x2, x3, x4, residuals);
}
template <typename T>
bool operator()(const T* x0,
const T* x1,
const T* x2,
const T* x3,
const T* x4,
const T* x5,
T* residuals) const {
return functor_(x0, x1, x2, x3, x4, x5, residuals);
}
template <typename T>
bool operator()(const T* x0,
const T* x1,
const T* x2,
const T* x3,
const T* x4,
const T* x5,
const T* x6,
T* residuals) const {
return functor_(x0, x1, x2, x3, x4, x5, x6, residuals);
}
template <typename T>
bool operator()(const T* x0,
const T* x1,
const T* x2,
const T* x3,
const T* x4,
const T* x5,
const T* x6,
const T* x7,
T* residuals) const {
return functor_(x0, x1, x2, x3, x4, x5, x6, x7, residuals);
}
template <typename T>
bool operator()(const T* x0,
const T* x1,
const T* x2,
const T* x3,
const T* x4,
const T* x5,
const T* x6,
const T* x7,
const T* x8,
T* residuals) const {
return functor_(x0, x1, x2, x3, x4, x5, x6, x7, x8, residuals);
}
template <typename T>
bool operator()(const T* x0,
const T* x1,
const T* x2,
const T* x3,
const T* x4,
const T* x5,
const T* x6,
const T* x7,
const T* x8,
const T* x9,
T* residuals) const {
return functor_(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, residuals);
}
private:
CostFunctionToFunctor<kNumResiduals,
N0, N1, N2, N3, N4,
N5, N6, N7, N8, N9> functor_;
};
} // namespace ceres
#endif // CERES_PUBLIC_NUMERIC_DIFF_FUNCTOR_H_

@ -0,0 +1,176 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
#ifndef CERES_PUBLIC_ORDERED_GROUPS_H_
#define CERES_PUBLIC_ORDERED_GROUPS_H_
#include <map>
#include <set>
#include "ceres/internal/port.h"
namespace ceres {
// A class for storing and manipulating an ordered collection of
// groups/sets with the following semantics:
//
// Group ids are non-negative integer values. Elements are any type
// that can serve as a key in a map or an element of a set.
//
// An element can only belong to one group at a time. A group may
// contain an arbitrary number of elements.
//
// Groups are ordered by their group id.
template <typename T>
class OrderedGroups {
public:
// Add an element to a group. If a group with this id does not
// exist, one is created. This method can be called any number of
// times for the same element. Group ids should be non-negative
// numbers.
//
// Return value indicates if adding the element was a success.
bool AddElementToGroup(const T element, const int group) {
if (group < 0) {
return false;
}
typename map<T, int>::const_iterator it = element_to_group_.find(element);
if (it != element_to_group_.end()) {
if (it->second == group) {
// Element is already in the right group, nothing to do.
return true;
}
group_to_elements_[it->second].erase(element);
if (group_to_elements_[it->second].size() == 0) {
group_to_elements_.erase(it->second);
}
}
element_to_group_[element] = group;
group_to_elements_[group].insert(element);
return true;
}
void Clear() {
group_to_elements_.clear();
element_to_group_.clear();
}
// Remove the element, no matter what group it is in. If the element
// is not a member of any group, calling this method will result in
// a crash.
//
// Return value indicates if the element was actually removed.
bool Remove(const T element) {
const int current_group = GroupId(element);
if (current_group < 0) {
return false;
}
group_to_elements_[current_group].erase(element);
if (group_to_elements_[current_group].size() == 0) {
// If the group is empty, then get rid of it.
group_to_elements_.erase(current_group);
}
element_to_group_.erase(element);
return true;
}
// Reverse the order of the groups in place.
void Reverse() {
typename map<int, set<T> >::reverse_iterator it =
group_to_elements_.rbegin();
map<int, set<T> > new_group_to_elements;
new_group_to_elements[it->first] = it->second;
int new_group_id = it->first + 1;
for (++it; it != group_to_elements_.rend(); ++it) {
for (typename set<T>::const_iterator element_it = it->second.begin();
element_it != it->second.end();
++element_it) {
element_to_group_[*element_it] = new_group_id;
}
new_group_to_elements[new_group_id] = it->second;
new_group_id++;
}
group_to_elements_.swap(new_group_to_elements);
}
// Return the group id for the element. If the element is not a
// member of any group, return -1.
int GroupId(const T element) const {
typename map<T, int>::const_iterator it = element_to_group_.find(element);
if (it == element_to_group_.end()) {
return -1;
}
return it->second;
}
bool IsMember(const T element) const {
typename map<T, int>::const_iterator it = element_to_group_.find(element);
return (it != element_to_group_.end());
}
// This function always succeeds, i.e., implicitly there exists a
// group for every integer.
int GroupSize(const int group) const {
typename map<int, set<T> >::const_iterator it =
group_to_elements_.find(group);
return (it == group_to_elements_.end()) ? 0 : it->second.size();
}
int NumElements() const {
return element_to_group_.size();
}
// Number of groups with one or more elements.
int NumGroups() const {
return group_to_elements_.size();
}
const map<int, set<T> >& group_to_elements() const {
return group_to_elements_;
}
private:
map<int, set<T> > group_to_elements_;
map<T, int> element_to_group_;
};
// Typedef for the most commonly used version of OrderedGroups.
typedef OrderedGroups<double*> ParameterBlockOrdering;
} // namespace ceres
#endif // CERES_PUBLIC_ORDERED_GROUP_H_

@ -39,11 +39,12 @@
#include <set>
#include <vector>
#include <glog/logging.h>
#include "ceres/internal/macros.h"
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
#include "glog/logging.h"
namespace ceres {
@ -51,6 +52,7 @@ class CostFunction;
class LossFunction;
class LocalParameterization;
class Solver;
struct CRSMatrix;
namespace internal {
class Preprocessor;
@ -59,10 +61,9 @@ class ParameterBlock;
class ResidualBlock;
} // namespace internal
// A ResidualBlockId is a handle clients can use to delete residual
// blocks after creating them. They are opaque for any purposes other
// than that.
typedef const internal::ResidualBlock* ResidualBlockId;
// A ResidualBlockId is an opaque handle clients can use to remove residual
// blocks from a Problem after adding them.
typedef internal::ResidualBlock* ResidualBlockId;
// A class to represent non-linear least squares problems. Such
// problems have a cost function that is a sum of error terms (known
@ -122,7 +123,9 @@ class Problem {
Options()
: cost_function_ownership(TAKE_OWNERSHIP),
loss_function_ownership(TAKE_OWNERSHIP),
local_parameterization_ownership(TAKE_OWNERSHIP) {}
local_parameterization_ownership(TAKE_OWNERSHIP),
enable_fast_parameter_block_removal(false),
disable_all_safety_checks(false) {}
// These flags control whether the Problem object owns the cost
// functions, loss functions, and parameterizations passed into
@ -134,6 +137,29 @@ class Problem {
Ownership cost_function_ownership;
Ownership loss_function_ownership;
Ownership local_parameterization_ownership;
// If true, trades memory for a faster RemoveParameterBlock() operation.
//
// RemoveParameterBlock() takes time proportional to the size of the entire
// Problem. If you only remove parameter blocks from the Problem
// occassionaly, this may be acceptable. However, if you are modifying the
// Problem frequently, and have memory to spare, then flip this switch to
// make RemoveParameterBlock() take time proportional to the number of
// residual blocks that depend on it. The increase in memory usage is an
// additonal hash set per parameter block containing all the residuals that
// depend on the parameter block.
bool enable_fast_parameter_block_removal;
// By default, Ceres performs a variety of safety checks when constructing
// the problem. There is a small but measurable performance penalty to
// these checks, typically around 5% of construction time. If you are sure
// your problem construction is correct, and 5% of the problem construction
// time is truly an overhead you want to avoid, then you can set
// disable_all_safety_checks to true.
//
// WARNING: Do not set this to true, unless you are absolutely sure of what
// you are doing.
bool disable_all_safety_checks;
};
// The default constructor is equivalent to the
@ -208,6 +234,27 @@ class Problem {
LossFunction* loss_function,
double* x0, double* x1, double* x2,
double* x3, double* x4, double* x5);
ResidualBlockId AddResidualBlock(CostFunction* cost_function,
LossFunction* loss_function,
double* x0, double* x1, double* x2,
double* x3, double* x4, double* x5,
double* x6);
ResidualBlockId AddResidualBlock(CostFunction* cost_function,
LossFunction* loss_function,
double* x0, double* x1, double* x2,
double* x3, double* x4, double* x5,
double* x6, double* x7);
ResidualBlockId AddResidualBlock(CostFunction* cost_function,
LossFunction* loss_function,
double* x0, double* x1, double* x2,
double* x3, double* x4, double* x5,
double* x6, double* x7, double* x8);
ResidualBlockId AddResidualBlock(CostFunction* cost_function,
LossFunction* loss_function,
double* x0, double* x1, double* x2,
double* x3, double* x4, double* x5,
double* x6, double* x7, double* x8,
double* x9);
// Add a parameter block with appropriate size to the problem.
// Repeated calls with the same arguments are ignored. Repeated
@ -223,6 +270,33 @@ class Problem {
int size,
LocalParameterization* local_parameterization);
// Remove a parameter block from the problem. The parameterization of the
// parameter block, if it exists, will persist until the deletion of the
// problem (similar to cost/loss functions in residual block removal). Any
// residual blocks that depend on the parameter are also removed, as
// described above in RemoveResidualBlock().
//
// If Problem::Options::enable_fast_parameter_block_removal is true, then the
// removal is fast (almost constant time). Otherwise, removing a parameter
// block will incur a scan of the entire Problem object.
//
// WARNING: Removing a residual or parameter block will destroy the implicit
// ordering, rendering the jacobian or residuals returned from the solver
// uninterpretable. If you depend on the evaluated jacobian, do not use
// remove! This may change in a future release.
void RemoveParameterBlock(double* values);
// Remove a residual block from the problem. Any parameters that the residual
// block depends on are not removed. The cost and loss functions for the
// residual block will not get deleted immediately; won't happen until the
// problem itself is deleted.
//
// WARNING: Removing a residual or parameter block will destroy the implicit
// ordering, rendering the jacobian or residuals returned from the solver
// uninterpretable. If you depend on the evaluated jacobian, do not use
// remove! This may change in a future release.
void RemoveResidualBlock(ResidualBlockId residual_block);
// Hold the indicated parameter block constant during optimization.
void SetParameterBlockConstant(double* values);
@ -254,6 +328,76 @@ class Problem {
// sizes of all of the residual blocks.
int NumResiduals() const;
// Options struct to control Problem::Evaluate.
struct EvaluateOptions {
EvaluateOptions()
: num_threads(1) {
}
// The set of parameter blocks for which evaluation should be
// performed. This vector determines the order that parameter
// blocks occur in the gradient vector and in the columns of the
// jacobian matrix. If parameter_blocks is empty, then it is
// assumed to be equal to vector containing ALL the parameter
// blocks. Generally speaking the parameter blocks will occur in
// the order in which they were added to the problem. But, this
// may change if the user removes any parameter blocks from the
// problem.
//
// NOTE: This vector should contain the same pointers as the ones
// used to add parameter blocks to the Problem. These parmeter
// block should NOT point to new memory locations. Bad things will
// happen otherwise.
vector<double*> parameter_blocks;
// The set of residual blocks to evaluate. This vector determines
// the order in which the residuals occur, and how the rows of the
// jacobian are ordered. If residual_blocks is empty, then it is
// assumed to be equal to the vector containing all the residual
// blocks. If this vector is empty, then it is assumed to be equal
// to a vector containing ALL the residual blocks. Generally
// speaking the residual blocks will occur in the order in which
// they were added to the problem. But, this may change if the
// user removes any residual blocks from the problem.
vector<ResidualBlockId> residual_blocks;
int num_threads;
};
// Evaluate Problem. Any of the output pointers can be NULL. Which
// residual blocks and parameter blocks are used is controlled by
// the EvaluateOptions struct above.
//
// Note 1: The evaluation will use the values stored in the memory
// locations pointed to by the parameter block pointers used at the
// time of the construction of the problem. i.e.,
//
// Problem problem;
// double x = 1;
// problem.Add(new MyCostFunction, NULL, &x);
//
// double cost = 0.0;
// problem.Evaluate(Problem::EvaluateOptions(), &cost, NULL, NULL, NULL);
//
// The cost is evaluated at x = 1. If you wish to evaluate the
// problem at x = 2, then
//
// x = 2;
// problem.Evaluate(Problem::EvaluateOptions(), &cost, NULL, NULL, NULL);
//
// is the way to do so.
//
// Note 2: If no local parameterizations are used, then the size of
// the gradient vector (and the number of columns in the jacobian)
// is the sum of the sizes of all the parameter blocks. If a
// parameter block has a local parameterization, then it contributes
// "LocalSize" entries to the gradient vecto (and the number of
// columns in the jacobian).
bool Evaluate(const EvaluateOptions& options,
double* cost,
vector<double>* residuals,
vector<double>* gradient,
CRSMatrix* jacobian);
private:
friend class Solver;
internal::scoped_ptr<internal::ProblemImpl> problem_impl_;

@ -51,6 +51,31 @@
namespace ceres {
// Trivial wrapper to index linear arrays as matrices, given a fixed
// column and row stride. When an array "T* array" is wrapped by a
//
// (const) MatrixAdapter<T, row_stride, col_stride> M"
//
// the expression M(i, j) is equivalent to
//
// arrary[i * row_stride + j * col_stride]
//
// Conversion functions to and from rotation matrices accept
// MatrixAdapters to permit using row-major and column-major layouts,
// and rotation matrices embedded in larger matrices (such as a 3x4
// projection matrix).
template <typename T, int row_stride, int col_stride>
struct MatrixAdapter;
// Convenience functions to create a MatrixAdapter that treats the
// array pointed to by "pointer" as a 3x3 (contiguous) column-major or
// row-major matrix.
template <typename T>
MatrixAdapter<T, 1, 3> ColumnMajorAdapter3x3(T* pointer);
template <typename T>
MatrixAdapter<T, 3, 1> RowMajorAdapter3x3(T* pointer);
// Convert a value in combined axis-angle representation to a quaternion.
// The value angle_axis is a triple whose norm is an angle in radians,
// and whose direction is aligned with the axis of rotation,
@ -73,9 +98,20 @@ void QuaternionToAngleAxis(T const* quaternion, T* angle_axis);
// axis-angle rotation representations. Templated for use with
// autodifferentiation.
template <typename T>
void RotationMatrixToAngleAxis(T const * R, T * angle_axis);
void RotationMatrixToAngleAxis(T const* R, T* angle_axis);
template <typename T, int row_stride, int col_stride>
void RotationMatrixToAngleAxis(
const MatrixAdapter<const T, row_stride, col_stride>& R,
T* angle_axis);
template <typename T>
void AngleAxisToRotationMatrix(T const * angle_axis, T * R);
void AngleAxisToRotationMatrix(T const* angle_axis, T* R);
template <typename T, int row_stride, int col_stride>
void AngleAxisToRotationMatrix(
T const* angle_axis,
const MatrixAdapter<T, row_stride, col_stride>& R);
// Conversions between 3x3 rotation matrix (in row major order) and
// Euler angle (in degrees) rotation representations.
@ -86,6 +122,11 @@ void AngleAxisToRotationMatrix(T const * angle_axis, T * R);
template <typename T>
void EulerAnglesToRotationMatrix(const T* euler, int row_stride, T* R);
template <typename T, int row_stride, int col_stride>
void EulerAnglesToRotationMatrix(
const T* euler,
const MatrixAdapter<T, row_stride, col_stride>& R);
// Convert a 4-vector to a 3x3 scaled rotation matrix.
//
// The choice of rotation is such that the quaternion [1 0 0 0] goes to an
@ -108,11 +149,21 @@ void EulerAnglesToRotationMatrix(const T* euler, int row_stride, T* R);
template <typename T> inline
void QuaternionToScaledRotation(const T q[4], T R[3 * 3]);
template <typename T, int row_stride, int col_stride> inline
void QuaternionToScaledRotation(
const T q[4],
const MatrixAdapter<T, row_stride, col_stride>& R);
// Same as above except that the rotation matrix is normalized by the
// Frobenius norm, so that R * R' = I (and det(R) = 1).
template <typename T> inline
void QuaternionToRotation(const T q[4], T R[3 * 3]);
template <typename T, int row_stride, int col_stride> inline
void QuaternionToRotation(
const T q[4],
const MatrixAdapter<T, row_stride, col_stride>& R);
// Rotates a point pt by a quaternion q:
//
// result = R(q) * pt
@ -146,6 +197,28 @@ void AngleAxisRotatePoint(const T angle_axis[3], const T pt[3], T result[3]);
// --- IMPLEMENTATION
template<typename T, int row_stride, int col_stride>
struct MatrixAdapter {
T* pointer_;
explicit MatrixAdapter(T* pointer)
: pointer_(pointer)
{}
T& operator()(int r, int c) const {
return pointer_[r * row_stride + c * col_stride];
}
};
template <typename T>
MatrixAdapter<T, 1, 3> ColumnMajorAdapter3x3(T* pointer) {
return MatrixAdapter<T, 1, 3>(pointer);
}
template <typename T>
MatrixAdapter<T, 3, 1> RowMajorAdapter3x3(T* pointer) {
return MatrixAdapter<T, 3, 1>(pointer);
}
template<typename T>
inline void AngleAxisToQuaternion(const T* angle_axis, T* quaternion) {
const T& a0 = angle_axis[0];
@ -228,17 +301,24 @@ inline void QuaternionToAngleAxis(const T* quaternion, T* angle_axis) {
// to not perform division by a small number.
template <typename T>
inline void RotationMatrixToAngleAxis(const T * R, T * angle_axis) {
RotationMatrixToAngleAxis(ColumnMajorAdapter3x3(R), angle_axis);
}
template <typename T, int row_stride, int col_stride>
void RotationMatrixToAngleAxis(
const MatrixAdapter<const T, row_stride, col_stride>& R,
T * angle_axis) {
// x = k * 2 * sin(theta), where k is the axis of rotation.
angle_axis[0] = R[5] - R[7];
angle_axis[1] = R[6] - R[2];
angle_axis[2] = R[1] - R[3];
angle_axis[0] = R(2, 1) - R(1, 2);
angle_axis[1] = R(0, 2) - R(2, 0);
angle_axis[2] = R(1, 0) - R(0, 1);
static const T kOne = T(1.0);
static const T kTwo = T(2.0);
// Since the right hand side may give numbers just above 1.0 or
// below -1.0 leading to atan misbehaving, we threshold.
T costheta = std::min(std::max((R[0] + R[4] + R[8] - kOne) / kTwo,
T costheta = std::min(std::max((R(0, 0) + R(1, 1) + R(2, 2) - kOne) / kTwo,
T(-1.0)),
kOne);
@ -296,7 +376,7 @@ inline void RotationMatrixToAngleAxis(const T * R, T * angle_axis) {
// with the sign of sin(theta). If they are the same, then
// angle_axis[i] should be positive, otherwise negative.
for (int i = 0; i < 3; ++i) {
angle_axis[i] = theta * sqrt((R[i*4] - costheta) * inv_one_minus_costheta);
angle_axis[i] = theta * sqrt((R(i, i) - costheta) * inv_one_minus_costheta);
if (((sintheta < 0.0) && (angle_axis[i] > 0.0)) ||
((sintheta > 0.0) && (angle_axis[i] < 0.0))) {
angle_axis[i] = -angle_axis[i];
@ -306,6 +386,13 @@ inline void RotationMatrixToAngleAxis(const T * R, T * angle_axis) {
template <typename T>
inline void AngleAxisToRotationMatrix(const T * angle_axis, T * R) {
AngleAxisToRotationMatrix(angle_axis, ColumnMajorAdapter3x3(R));
}
template <typename T, int row_stride, int col_stride>
void AngleAxisToRotationMatrix(
const T * angle_axis,
const MatrixAdapter<T, row_stride, col_stride>& R) {
static const T kOne = T(1.0);
const T theta2 = DotProduct(angle_axis, angle_axis);
if (theta2 > 0.0) {
@ -320,33 +407,41 @@ inline void AngleAxisToRotationMatrix(const T * angle_axis, T * R) {
const T costheta = cos(theta);
const T sintheta = sin(theta);
R[0] = costheta + wx*wx*(kOne - costheta);
R[1] = wz*sintheta + wx*wy*(kOne - costheta);
R[2] = -wy*sintheta + wx*wz*(kOne - costheta);
R[3] = wx*wy*(kOne - costheta) - wz*sintheta;
R[4] = costheta + wy*wy*(kOne - costheta);
R[5] = wx*sintheta + wy*wz*(kOne - costheta);
R[6] = wy*sintheta + wx*wz*(kOne - costheta);
R[7] = -wx*sintheta + wy*wz*(kOne - costheta);
R[8] = costheta + wz*wz*(kOne - costheta);
R(0, 0) = costheta + wx*wx*(kOne - costheta);
R(1, 0) = wz*sintheta + wx*wy*(kOne - costheta);
R(2, 0) = -wy*sintheta + wx*wz*(kOne - costheta);
R(0, 1) = wx*wy*(kOne - costheta) - wz*sintheta;
R(1, 1) = costheta + wy*wy*(kOne - costheta);
R(2, 1) = wx*sintheta + wy*wz*(kOne - costheta);
R(0, 2) = wy*sintheta + wx*wz*(kOne - costheta);
R(1, 2) = -wx*sintheta + wy*wz*(kOne - costheta);
R(2, 2) = costheta + wz*wz*(kOne - costheta);
} else {
// At zero, we switch to using the first order Taylor expansion.
R[0] = kOne;
R[1] = -angle_axis[2];
R[2] = angle_axis[1];
R[3] = angle_axis[2];
R[4] = kOne;
R[5] = -angle_axis[0];
R[6] = -angle_axis[1];
R[7] = angle_axis[0];
R[8] = kOne;
R(0, 0) = kOne;
R(1, 0) = -angle_axis[2];
R(2, 0) = angle_axis[1];
R(0, 1) = angle_axis[2];
R(1, 1) = kOne;
R(2, 1) = -angle_axis[0];
R(0, 2) = -angle_axis[1];
R(1, 2) = angle_axis[0];
R(2, 2) = kOne;
}
}
template <typename T>
inline void EulerAnglesToRotationMatrix(const T* euler,
const int row_stride,
const int row_stride_parameter,
T* R) {
CHECK_EQ(row_stride_parameter, 3);
EulerAnglesToRotationMatrix(euler, RowMajorAdapter3x3(R));
}
template <typename T, int row_stride, int col_stride>
void EulerAnglesToRotationMatrix(
const T* euler,
const MatrixAdapter<T, row_stride, col_stride>& R) {
const double kPi = 3.14159265358979323846;
const T degrees_to_radians(kPi / 180.0);
@ -361,26 +456,28 @@ inline void EulerAnglesToRotationMatrix(const T* euler,
const T c3 = cos(pitch);
const T s3 = sin(pitch);
// Rows of the rotation matrix.
T* R1 = R;
T* R2 = R1 + row_stride;
T* R3 = R2 + row_stride;
R(0, 0) = c1*c2;
R(0, 1) = -s1*c3 + c1*s2*s3;
R(0, 2) = s1*s3 + c1*s2*c3;
R1[0] = c1*c2;
R1[1] = -s1*c3 + c1*s2*s3;
R1[2] = s1*s3 + c1*s2*c3;
R(1, 0) = s1*c2;
R(1, 1) = c1*c3 + s1*s2*s3;
R(1, 2) = -c1*s3 + s1*s2*c3;
R2[0] = s1*c2;
R2[1] = c1*c3 + s1*s2*s3;
R2[2] = -c1*s3 + s1*s2*c3;
R3[0] = -s2;
R3[1] = c2*s3;
R3[2] = c2*c3;
R(2, 0) = -s2;
R(2, 1) = c2*s3;
R(2, 2) = c2*c3;
}
template <typename T> inline
void QuaternionToScaledRotation(const T q[4], T R[3 * 3]) {
QuaternionToScaledRotation(q, RowMajorAdapter3x3(R));
}
template <typename T, int row_stride, int col_stride> inline
void QuaternionToScaledRotation(
const T q[4],
const MatrixAdapter<T, row_stride, col_stride>& R) {
// Make convenient names for elements of q.
T a = q[0];
T b = q[1];
@ -399,21 +496,29 @@ void QuaternionToScaledRotation(const T q[4], T R[3 * 3]) {
T cd = c * d;
T dd = d * d;
R[0] = aa + bb - cc - dd; R[1] = T(2) * (bc - ad); R[2] = T(2) * (ac + bd); // NOLINT
R[3] = T(2) * (ad + bc); R[4] = aa - bb + cc - dd; R[5] = T(2) * (cd - ab); // NOLINT
R[6] = T(2) * (bd - ac); R[7] = T(2) * (ab + cd); R[8] = aa - bb - cc + dd; // NOLINT
R(0, 0) = aa + bb - cc - dd; R(0, 1) = T(2) * (bc - ad); R(0, 2) = T(2) * (ac + bd); // NOLINT
R(1, 0) = T(2) * (ad + bc); R(1, 1) = aa - bb + cc - dd; R(1, 2) = T(2) * (cd - ab); // NOLINT
R(2, 0) = T(2) * (bd - ac); R(2, 1) = T(2) * (ab + cd); R(2, 2) = aa - bb - cc + dd; // NOLINT
}
template <typename T> inline
void QuaternionToRotation(const T q[4], T R[3 * 3]) {
QuaternionToRotation(q, RowMajorAdapter3x3(R));
}
template <typename T, int row_stride, int col_stride> inline
void QuaternionToRotation(const T q[4],
const MatrixAdapter<T, row_stride, col_stride>& R) {
QuaternionToScaledRotation(q, R);
T normalizer = q[0]*q[0] + q[1]*q[1] + q[2]*q[2] + q[3]*q[3];
CHECK_NE(normalizer, T(0));
normalizer = T(1) / normalizer;
for (int i = 0; i < 9; ++i) {
R[i] *= normalizer;
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
R(i, j) *= normalizer;
}
}
}
@ -433,7 +538,6 @@ void UnitQuaternionRotatePoint(const T q[4], const T pt[3], T result[3]) {
result[2] = T(2) * ((t7 - t3) * pt[0] + (t2 + t9) * pt[1] + (t5 + t8) * pt[2]) + pt[2]; // NOLINT
}
template <typename T> inline
void QuaternionRotatePoint(const T q[4], const T pt[3], T result[3]) {
// 'scale' is 1 / norm(q).

@ -45,25 +45,29 @@
namespace ceres {
template<int kNumResiduals,
int N0 = 0, int N1 = 0, int N2 = 0, int N3 = 0, int N4 = 0, int N5 = 0>
int N0 = 0, int N1 = 0, int N2 = 0, int N3 = 0, int N4 = 0,
int N5 = 0, int N6 = 0, int N7 = 0, int N8 = 0, int N9 = 0>
class SizedCostFunction : public CostFunction {
public:
SizedCostFunction() {
// Sanity checking.
CHECK(kNumResiduals > 0 || kNumResiduals == DYNAMIC)
<< "Cost functions must have at least one residual block.";
CHECK_GT(N0, 0)
<< "Cost functions must have at least one parameter block.";
CHECK((!N1 && !N2 && !N3 && !N4 && !N5) ||
((N1 > 0) && !N2 && !N3 && !N4 && !N5) ||
((N1 > 0) && (N2 > 0) && !N3 && !N4 && !N5) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && !N4 && !N5) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && !N5) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0)))
// This block breaks the 80 column rule to keep it somewhat readable.
CHECK((!N1 && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && !N5 && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && !N6 && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && !N7 && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && !N8 && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && !N9) ||
((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && (N9 > 0)))
<< "Zero block cannot precede a non-zero block. Block sizes are "
<< "(ignore trailing 0s): " << N0 << ", " << N1 << ", " << N2 << ", "
<< N3 << ", " << N4 << ", " << N5;
<< N3 << ", " << N4 << ", " << N5 << ", " << N6 << ", " << N7 << ", "
<< N8 << ", " << N9;
set_num_residuals(kNumResiduals);
@ -75,6 +79,10 @@ class SizedCostFunction : public CostFunction {
ADD_PARAMETER_BLOCK(N3);
ADD_PARAMETER_BLOCK(N4);
ADD_PARAMETER_BLOCK(N5);
ADD_PARAMETER_BLOCK(N6);
ADD_PARAMETER_BLOCK(N7);
ADD_PARAMETER_BLOCK(N8);
ADD_PARAMETER_BLOCK(N9);
#undef ADD_PARAMETER_BLOCK
}

@ -38,6 +38,7 @@
#include "ceres/internal/macros.h"
#include "ceres/internal/port.h"
#include "ceres/iteration_callback.h"
#include "ceres/ordered_groups.h"
#include "ceres/types.h"
namespace ceres {
@ -57,6 +58,11 @@ class Solver {
struct Options {
// Default constructor that sets up a generic sparse problem.
Options() {
minimizer_type = TRUST_REGION;
line_search_direction_type = LBFGS;
line_search_type = ARMIJO;
nonlinear_conjugate_gradient_type = FLETCHER_REEVES;
max_lbfgs_rank = 20;
trust_region_strategy_type = LEVENBERG_MARQUARDT;
dogleg_type = TRADITIONAL_DOGLEG;
use_nonmonotonic_steps = false;
@ -89,27 +95,21 @@ class Solver {
#endif
num_linear_solver_threads = 1;
num_eliminate_blocks = 0;
ordering_type = NATURAL;
#if defined(CERES_NO_SUITESPARSE)
use_block_amd = false;
#else
use_block_amd = true;
#endif
linear_solver_ordering = NULL;
use_inner_iterations = false;
inner_iteration_ordering = NULL;
linear_solver_min_num_iterations = 1;
linear_solver_max_num_iterations = 500;
eta = 1e-1;
jacobi_scaling = true;
logging_type = PER_MINIMIZER_ITERATION;
minimizer_progress_to_stdout = false;
return_initial_residuals = false;
return_initial_gradient = false;
return_initial_jacobian = false;
return_final_residuals = false;
return_final_gradient = false;
return_final_jacobian = false;
lsqp_dump_directory = "/tmp";
lsqp_dump_format_type = TEXTFILE;
check_gradients = false;
@ -118,8 +118,64 @@ class Solver {
update_state_every_iteration = false;
}
~Options();
// Minimizer options ----------------------------------------
// Ceres supports the two major families of optimization strategies -
// Trust Region and Line Search.
//
// 1. The line search approach first finds a descent direction
// along which the objective function will be reduced and then
// computes a step size that decides how far should move along
// that direction. The descent direction can be computed by
// various methods, such as gradient descent, Newton's method and
// Quasi-Newton method. The step size can be determined either
// exactly or inexactly.
//
// 2. The trust region approach approximates the objective
// function using using a model function (often a quadratic) over
// a subset of the search space known as the trust region. If the
// model function succeeds in minimizing the true objective
// function the trust region is expanded; conversely, otherwise it
// is contracted and the model optimization problem is solved
// again.
//
// Trust region methods are in some sense dual to line search methods:
// trust region methods first choose a step size (the size of the
// trust region) and then a step direction while line search methods
// first choose a step direction and then a step size.
MinimizerType minimizer_type;
LineSearchDirectionType line_search_direction_type;
LineSearchType line_search_type;
NonlinearConjugateGradientType nonlinear_conjugate_gradient_type;
// The LBFGS hessian approximation is a low rank approximation to
// the inverse of the Hessian matrix. The rank of the
// approximation determines (linearly) the space and time
// complexity of using the approximation. Higher the rank, the
// better is the quality of the approximation. The increase in
// quality is however is bounded for a number of reasons.
//
// 1. The method only uses secant information and not actual
// derivatives.
//
// 2. The Hessian approximation is constrained to be positive
// definite.
//
// So increasing this rank to a large number will cost time and
// space complexity without the corresponding increase in solution
// quality. There are no hard and fast rules for choosing the
// maximum rank. The best choice usually requires some problem
// specific experimentation.
//
// For more theoretical and implementation details of the LBFGS
// method, please see:
//
// Nocedal, J. (1980). "Updating Quasi-Newton Matrices with
// Limited Storage". Mathematics of Computation 35 (151): 773782.
int max_lbfgs_rank;
TrustRegionStrategyType trust_region_strategy_type;
// Type of dogleg strategy to use.
@ -229,29 +285,76 @@ class Solver {
// using this setting.
int num_linear_solver_threads;
// For Schur reduction based methods, the first 0 to num blocks are
// eliminated using the Schur reduction. For example, when solving
// traditional structure from motion problems where the parameters are in
// two classes (cameras and points) then num_eliminate_blocks would be the
// number of points.
// The order in which variables are eliminated in a linear solver
// can have a significant of impact on the efficiency and accuracy
// of the method. e.g., when doing sparse Cholesky factorization,
// there are matrices for which a good ordering will give a
// Cholesky factor with O(n) storage, where as a bad ordering will
// result in an completely dense factor.
//
// This parameter is used in conjunction with the ordering.
// Applies to: Preprocessor and linear least squares solver.
int num_eliminate_blocks;
// Internally Ceres reorders the parameter blocks to help the
// various linear solvers. This parameter allows the user to
// influence the re-ordering strategy used. For structure from
// motion problems use SCHUR, for other problems NATURAL (default)
// is a good choice. In case you wish to specify your own ordering
// scheme, for example in conjunction with num_eliminate_blocks,
// use USER.
OrderingType ordering_type;
// The ordering of the parameter blocks. The solver pays attention
// to it if the ordering_type is set to USER and the vector is
// non-empty.
vector<double*> ordering;
// Ceres allows the user to provide varying amounts of hints to
// the solver about the variable elimination ordering to use. This
// can range from no hints, where the solver is free to decide the
// best possible ordering based on the user's choices like the
// linear solver being used, to an exact order in which the
// variables should be eliminated, and a variety of possibilities
// in between.
//
// Instances of the ParameterBlockOrdering class are used to
// communicate this information to Ceres.
//
// Formally an ordering is an ordered partitioning of the
// parameter blocks, i.e, each parameter block belongs to exactly
// one group, and each group has a unique non-negative integer
// associated with it, that determines its order in the set of
// groups.
//
// Given such an ordering, Ceres ensures that the parameter blocks in
// the lowest numbered group are eliminated first, and then the
// parmeter blocks in the next lowest numbered group and so on. Within
// each group, Ceres is free to order the parameter blocks as it
// chooses.
//
// If NULL, then all parameter blocks are assumed to be in the
// same group and the solver is free to decide the best
// ordering.
//
// e.g. Consider the linear system
//
// x + y = 3
// 2x + 3y = 7
//
// There are two ways in which it can be solved. First eliminating x
// from the two equations, solving for y and then back substituting
// for x, or first eliminating y, solving for x and back substituting
// for y. The user can construct three orderings here.
//
// {0: x}, {1: y} - eliminate x first.
// {0: y}, {1: x} - eliminate y first.
// {0: x, y} - Solver gets to decide the elimination order.
//
// Thus, to have Ceres determine the ordering automatically using
// heuristics, put all the variables in group 0 and to control the
// ordering for every variable, create groups 0..N-1, one per
// variable, in the desired order.
//
// Bundle Adjustment
// -----------------
//
// A particular case of interest is bundle adjustment, where the user
// has two options. The default is to not specify an ordering at all,
// the solver will see that the user wants to use a Schur type solver
// and figure out the right elimination ordering.
//
// But if the user already knows what parameter blocks are points and
// what are cameras, they can save preprocessing time by partitioning
// the parameter blocks into two groups, one for the points and one
// for the cameras, where the group containing the points has an id
// smaller than the group containing cameras.
//
// Once assigned, Solver::Options owns this pointer and will
// deallocate the memory when destroyed.
ParameterBlockOrdering* linear_solver_ordering;
// By virtue of the modeling layer in Ceres being block oriented,
// all the matrices used by Ceres are also block oriented. When
@ -267,6 +370,77 @@ class Solver {
// sparse_linear_algebra_library = SUITE_SPARSE.
bool use_block_amd;
// Some non-linear least squares problems have additional
// structure in the way the parameter blocks interact that it is
// beneficial to modify the way the trust region step is computed.
//
// e.g., consider the following regression problem
//
// y = a_1 exp(b_1 x) + a_2 exp(b_3 x^2 + c_1)
//
// Given a set of pairs{(x_i, y_i)}, the user wishes to estimate
// a_1, a_2, b_1, b_2, and c_1.
//
// Notice here that the expression on the left is linear in a_1
// and a_2, and given any value for b_1, b_2 and c_1, it is
// possible to use linear regression to estimate the optimal
// values of a_1 and a_2. Indeed, its possible to analytically
// eliminate the variables a_1 and a_2 from the problem all
// together. Problems like these are known as separable least
// squares problem and the most famous algorithm for solving them
// is the Variable Projection algorithm invented by Golub &
// Pereyra.
//
// Similar structure can be found in the matrix factorization with
// missing data problem. There the corresponding algorithm is
// known as Wiberg's algorithm.
//
// Ruhe & Wedin (Algorithms for Separable Nonlinear Least Squares
// Problems, SIAM Reviews, 22(3), 1980) present an analyis of
// various algorithms for solving separable non-linear least
// squares problems and refer to "Variable Projection" as
// Algorithm I in their paper.
//
// Implementing Variable Projection is tedious and expensive, and
// they present a simpler algorithm, which they refer to as
// Algorithm II, where once the Newton/Trust Region step has been
// computed for the whole problem (a_1, a_2, b_1, b_2, c_1) and
// additional optimization step is performed to estimate a_1 and
// a_2 exactly.
//
// This idea can be generalized to cases where the residual is not
// linear in a_1 and a_2, i.e., Solve for the trust region step
// for the full problem, and then use it as the starting point to
// further optimize just a_1 and a_2. For the linear case, this
// amounts to doing a single linear least squares solve. For
// non-linear problems, any method for solving the a_1 and a_2
// optimization problems will do. The only constraint on a_1 and
// a_2 is that they do not co-occur in any residual block.
//
// This idea can be further generalized, by not just optimizing
// (a_1, a_2), but decomposing the graph corresponding to the
// Hessian matrix's sparsity structure in a collection of
// non-overlapping independent sets and optimizing each of them.
//
// Setting "use_inner_iterations" to true enables the use of this
// non-linear generalization of Ruhe & Wedin's Algorithm II. This
// version of Ceres has a higher iteration complexity, but also
// displays better convergence behaviour per iteration. Setting
// Solver::Options::num_threads to the maximum number possible is
// highly recommended.
bool use_inner_iterations;
// If inner_iterations is true, then the user has two choices.
//
// 1. Let the solver heuristically decide which parameter blocks
// to optimize in each inner iteration. To do this leave
// Solver::Options::inner_iteration_ordering untouched.
//
// 2. Specify a collection of of ordered independent sets. Where
// the lower numbered groups are optimized before the higher
// number groups. Each group must be an independent set.
ParameterBlockOrdering* inner_iteration_ordering;
// Minimum number of iterations for which the linear solver should
// run, even if the convergence criterion is satisfied.
int linear_solver_min_num_iterations;
@ -301,14 +475,6 @@ class Solver {
// is sent to STDOUT.
bool minimizer_progress_to_stdout;
bool return_initial_residuals;
bool return_initial_gradient;
bool return_initial_jacobian;
bool return_final_residuals;
bool return_final_gradient;
bool return_final_jacobian;
// List of iterations at which the optimizer should dump the
// linear least squares problem to disk. Useful for testing and
// benchmarking. If empty (default), no problems are dumped.
@ -398,6 +564,8 @@ class Solver {
string FullReport() const;
// Minimizer summary -------------------------------------------------
MinimizerType minimizer_type;
SolverTerminationType termination_type;
// If the solver did not run, or there was a failure, a
@ -414,54 +582,6 @@ class Solver {
// blocks that they depend on were fixed.
double fixed_cost;
// Vectors of residuals before and after the optimization. The
// entries of these vectors are in the order in which
// ResidualBlocks were added to the Problem object.
//
// Whether the residual vectors are populated with values is
// controlled by Solver::Options::return_initial_residuals and
// Solver::Options::return_final_residuals respectively.
vector<double> initial_residuals;
vector<double> final_residuals;
// Gradient vectors, before and after the optimization. The rows
// are in the same order in which the ParameterBlocks were added
// to the Problem object.
//
// NOTE: Since AddResidualBlock adds ParameterBlocks to the
// Problem automatically if they do not already exist, if you wish
// to have explicit control over the ordering of the vectors, then
// use Problem::AddParameterBlock to explicitly add the
// ParameterBlocks in the order desired.
//
// Whether the vectors are populated with values is controlled by
// Solver::Options::return_initial_gradient and
// Solver::Options::return_final_gradient respectively.
vector<double> initial_gradient;
vector<double> final_gradient;
// Jacobian matrices before and after the optimization. The rows
// of these matrices are in the same order in which the
// ResidualBlocks were added to the Problem object. The columns
// are in the same order in which the ParameterBlocks were added
// to the Problem object.
//
// NOTE: Since AddResidualBlock adds ParameterBlocks to the
// Problem automatically if they do not already exist, if you wish
// to have explicit control over the column ordering of the
// matrix, then use Problem::AddParameterBlock to explicitly add
// the ParameterBlocks in the order desired.
//
// The Jacobian matrices are stored as compressed row sparse
// matrices. Please see ceres/crs_matrix.h for more details of the
// format.
//
// Whether the Jacboan matrices are populated with values is
// controlled by Solver::Options::return_initial_jacobian and
// Solver::Options::return_final_jacobian respectively.
CRSMatrix initial_jacobian;
CRSMatrix final_jacobian;
vector<IterationSummary> iterations;
int num_successful_steps;
@ -484,6 +604,10 @@ class Solver {
// Some total of all time spent inside Ceres when Solve is called.
double total_time_in_seconds;
double linear_solver_time_in_seconds;
double residual_evaluation_time_in_seconds;
double jacobian_evaluation_time_in_seconds;
// Preprocessor summary.
int num_parameter_blocks;
int num_parameters;
@ -507,12 +631,23 @@ class Solver {
LinearSolverType linear_solver_type_given;
LinearSolverType linear_solver_type_used;
vector<int> linear_solver_ordering_given;
vector<int> linear_solver_ordering_used;
PreconditionerType preconditioner_type;
OrderingType ordering_type;
TrustRegionStrategyType trust_region_strategy_type;
DoglegType dogleg_type;
bool inner_iterations;
SparseLinearAlgebraLibraryType sparse_linear_algebra_library;
LineSearchDirectionType line_search_direction_type;
LineSearchType line_search_type;
int max_lbfgs_rank;
vector<int> inner_iteration_ordering_given;
vector<int> inner_iteration_ordering_used;
};
// Once a least squares problem has been built, this function takes

@ -37,6 +37,8 @@
#ifndef CERES_PUBLIC_TYPES_H_
#define CERES_PUBLIC_TYPES_H_
#include "ceres/internal/port.h"
namespace ceres {
// Basic integer types. These typedefs are in the Ceres namespace to avoid
@ -99,8 +101,7 @@ enum PreconditionerType {
JACOBI,
// Block diagonal of the Schur complement. This preconditioner may
// only be used with the ITERATIVE_SCHUR solver. Requires
// SuiteSparse/CHOLMOD.
// only be used with the ITERATIVE_SCHUR solver.
SCHUR_JACOBI,
// Visibility clustering based preconditioners.
@ -143,18 +144,6 @@ enum LinearSolverTerminationType {
FAILURE
};
enum OrderingType {
// The order in which the parameter blocks were defined.
NATURAL,
// Use the ordering specificed in the vector ordering.
USER,
// Automatically figure out the best ordering to use the schur
// complement based solver.
SCHUR
};
// Logging options
// The options get progressively noisier.
enum LoggingType {
@ -162,6 +151,55 @@ enum LoggingType {
PER_MINIMIZER_ITERATION
};
enum MinimizerType {
LINE_SEARCH,
TRUST_REGION
};
enum LineSearchDirectionType {
// Negative of the gradient.
STEEPEST_DESCENT,
// A generalization of the Conjugate Gradient method to non-linear
// functions. The generalization can be performed in a number of
// different ways, resulting in a variety of search directions. The
// precise choice of the non-linear conjugate gradient algorithm
// used is determined by NonlinerConjuateGradientType.
NONLINEAR_CONJUGATE_GRADIENT,
// A limited memory approximation to the inverse Hessian is
// maintained and used to compute a quasi-Newton step.
//
// For more details see
//
// Nocedal, J. (1980). "Updating Quasi-Newton Matrices with Limited
// Storage". Mathematics of Computation 35 (151): 773782.
//
// Byrd, R. H.; Nocedal, J.; Schnabel, R. B. (1994).
// "Representations of Quasi-Newton Matrices and their use in
// Limited Memory Methods". Mathematical Programming 63 (4):
// 129156.
LBFGS,
};
// Nonliner conjugate gradient methods are a generalization of the
// method of Conjugate Gradients for linear systems. The
// generalization can be carried out in a number of different ways
// leading to number of different rules for computing the search
// direction. Ceres provides a number of different variants. For more
// details see Numerical Optimization by Nocedal & Wright.
enum NonlinearConjugateGradientType {
FLETCHER_REEVES,
POLAK_RIBIRERE,
HESTENES_STIEFEL,
};
enum LineSearchType {
// Backtracking line search with polynomial interpolation or
// bisection.
ARMIJO,
};
// Ceres supports different strategies for computing the trust region
// step.
enum TrustRegionStrategyType {
@ -296,18 +334,54 @@ enum DimensionType {
DYNAMIC = -1
};
enum NumericDiffMethod {
CENTRAL,
FORWARD
};
const char* LinearSolverTypeToString(LinearSolverType type);
bool StringToLinearSolverType(string value, LinearSolverType* type);
const char* PreconditionerTypeToString(PreconditionerType type);
bool StringToPreconditionerType(string value, PreconditionerType* type);
const char* SparseLinearAlgebraLibraryTypeToString(
SparseLinearAlgebraLibraryType type);
bool StringToSparseLinearAlgebraLibraryType(
string value,
SparseLinearAlgebraLibraryType* type);
const char* TrustRegionStrategyTypeToString(TrustRegionStrategyType type);
bool StringToTrustRegionStrategyType(string value,
TrustRegionStrategyType* type);
const char* DoglegTypeToString(DoglegType type);
bool StringToDoglegType(string value, DoglegType* type);
const char* MinimizerTypeToString(MinimizerType type);
bool StringToMinimizerType(string value, MinimizerType* type);
const char* LineSearchDirectionTypeToString(LineSearchDirectionType type);
bool StringToLineSearchDirectionType(string value,
LineSearchDirectionType* type);
const char* LineSearchTypeToString(LineSearchType type);
bool StringToLineSearchType(string value, LineSearchType* type);
const char* NonlinearConjugateGradientTypeToString(
NonlinearConjugateGradientType type);
bool StringToNonlinearConjugateGradientType(
string value, NonlinearConjugateGradientType* type);
const char* LinearSolverTerminationTypeToString(
LinearSolverTerminationType type);
const char* OrderingTypeToString(OrderingType type);
const char* SolverTerminationTypeToString(SolverTerminationType type);
const char* SparseLinearAlgebraLibraryTypeToString(
SparseLinearAlgebraLibraryType type);
const char* TrustRegionStrategyTypeToString( TrustRegionStrategyType type);
bool IsSchurType(LinearSolverType type);
bool IsSparseLinearAlgebraLibraryTypeAvailable(
SparseLinearAlgebraLibraryType type);
} // namespace ceres

@ -28,7 +28,7 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
//
// Utility routines for validating arrays.
// Utility routines for validating arrays.
//
// These are useful for detecting two common class of errors.
//

@ -40,10 +40,10 @@
namespace ceres {
namespace internal {
BlockJacobiPreconditioner::BlockJacobiPreconditioner(const LinearOperator& A)
BlockJacobiPreconditioner::BlockJacobiPreconditioner(
const BlockSparseMatrixBase& A)
: num_rows_(A.num_rows()),
block_structure_(
*(down_cast<const BlockSparseMatrix*>(&A)->block_structure())) {
block_structure_(*A.block_structure()) {
// Calculate the amount of storage needed.
int storage_needed = 0;
for (int c = 0; c < block_structure_.cols.size(); ++c) {
@ -64,11 +64,10 @@ BlockJacobiPreconditioner::BlockJacobiPreconditioner(const LinearOperator& A)
}
}
BlockJacobiPreconditioner::~BlockJacobiPreconditioner() {
}
BlockJacobiPreconditioner::~BlockJacobiPreconditioner() {}
void BlockJacobiPreconditioner::Update(const LinearOperator& matrix, const double* D) {
const BlockSparseMatrix& A = *(down_cast<const BlockSparseMatrix*>(&matrix));
bool BlockJacobiPreconditioner::Update(const BlockSparseMatrixBase& A,
const double* D) {
const CompressedRowBlockStructure* bs = A.block_structure();
// Compute the diagonal blocks by block inner products.
@ -107,16 +106,19 @@ void BlockJacobiPreconditioner::Update(const LinearOperator& matrix, const doubl
MatrixRef block(blocks_[c], size, size);
if (D != NULL) {
block.diagonal() += ConstVectorRef(D + position, size).array().square().matrix();
block.diagonal() +=
ConstVectorRef(D + position, size).array().square().matrix();
}
block = block.selfadjointView<Eigen::Upper>()
.ldlt()
.solve(Matrix::Identity(size, size));
}
return true;
}
void BlockJacobiPreconditioner::RightMultiply(const double* x, double* y) const {
void BlockJacobiPreconditioner::RightMultiply(const double* x,
double* y) const {
for (int c = 0; c < block_structure_.cols.size(); ++c) {
const int size = block_structure_.cols[c].size;
const int position = block_structure_.cols[c].position;

@ -32,38 +32,33 @@
#define CERES_INTERNAL_BLOCK_JACOBI_PRECONDITIONER_H_
#include <vector>
#include "ceres/linear_operator.h"
#include "ceres/preconditioner.h"
namespace ceres {
namespace internal {
class CompressedRowBlockStructure;
class BlockSparseMatrixBase;
struct CompressedRowBlockStructure;
class LinearOperator;
class SparseMatrix;
// A block Jacobi preconditioner. This is intended for use with conjugate
// gradients, or other iterative symmetric solvers. To use the preconditioner,
// create one by passing a BlockSparseMatrix as the linear operator "A" to the
// constructor. This fixes the sparsity pattern to the pattern of the matrix
// A^TA.
// A block Jacobi preconditioner. This is intended for use with
// conjugate gradients, or other iterative symmetric solvers. To use
// the preconditioner, create one by passing a BlockSparseMatrix "A"
// to the constructor. This fixes the sparsity pattern to the pattern
// of the matrix A^TA.
//
// Before each use of the preconditioner in a solve with conjugate gradients,
// update the matrix by running Update(A, D). The values of the matrix A are
// inspected to construct the preconditioner. The vector D is applied as the
// D^TD diagonal term.
class BlockJacobiPreconditioner : public LinearOperator {
class BlockJacobiPreconditioner : public Preconditioner {
public:
// A must remain valid while the BlockJacobiPreconditioner is.
BlockJacobiPreconditioner(const LinearOperator& A);
explicit BlockJacobiPreconditioner(const BlockSparseMatrixBase& A);
virtual ~BlockJacobiPreconditioner();
// Update the preconditioner with the values found in A. The sparsity pattern
// must match that of the A passed to the constructor. D is a vector that
// must have the same number of rows as A, and is applied as a diagonal in
// addition to the block diagonals of A.
void Update(const LinearOperator& A, const double* D);
// LinearOperator interface.
// Preconditioner interface
virtual bool Update(const BlockSparseMatrixBase& A, const double* D);
virtual void RightMultiply(const double* x, double* y) const;
virtual void LeftMultiply(const double* x, double* y) const;
virtual int num_rows() const { return num_rows_; }

@ -28,12 +28,12 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
#include "glog/logging.h"
#include "ceres/block_random_access_dense_matrix.h"
#include <vector>
#include "ceres/internal/eigen.h"
#include "ceres/internal/scoped_ptr.h"
#include "glog/logging.h"
namespace ceres {
namespace internal {

@ -28,7 +28,6 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
#include "glog/logging.h"
#include "ceres/block_random_access_sparse_matrix.h"
#include <algorithm>
@ -40,6 +39,7 @@
#include "ceres/mutex.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/types.h"
#include "glog/logging.h"
namespace ceres {
namespace internal {

@ -43,16 +43,16 @@
#include <vector>
#include <glog/logging.h>
#include "ceres/collections_port.h"
#include "ceres/graph.h"
#include "ceres/map_util.h"
#include "ceres/internal/macros.h"
#include "ceres/map_util.h"
#include "glog/logging.h"
namespace ceres {
namespace internal {
class CanonicalViewsClusteringOptions;
struct CanonicalViewsClusteringOptions;
// Compute a partitioning of the vertices of the graph using the
// canonical views clustering algorithm.

@ -30,25 +30,28 @@
#include "ceres/cgnr_solver.h"
#include "glog/logging.h"
#include "ceres/linear_solver.h"
#include "ceres/block_jacobi_preconditioner.h"
#include "ceres/cgnr_linear_operator.h"
#include "ceres/conjugate_gradients_solver.h"
#include "ceres/block_jacobi_preconditioner.h"
#include "ceres/linear_solver.h"
#include "ceres/wall_time.h"
#include "glog/logging.h"
namespace ceres {
namespace internal {
CgnrSolver::CgnrSolver(const LinearSolver::Options& options)
: options_(options),
jacobi_preconditioner_(NULL) {
preconditioner_(NULL) {
}
LinearSolver::Summary CgnrSolver::Solve(
LinearOperator* A,
LinearSolver::Summary CgnrSolver::SolveImpl(
BlockSparseMatrixBase* A,
const double* b,
const LinearSolver::PerSolveOptions& per_solve_options,
double* x) {
EventLogger event_logger("CgnrSolver::Solve");
// Form z = Atb.
scoped_array<double> z(new double[A->num_cols()]);
std::fill(z.get(), z.get() + A->num_cols(), 0.0);
@ -57,11 +60,11 @@ LinearSolver::Summary CgnrSolver::Solve(
// Precondition if necessary.
LinearSolver::PerSolveOptions cg_per_solve_options = per_solve_options;
if (options_.preconditioner_type == JACOBI) {
if (jacobi_preconditioner_.get() == NULL) {
jacobi_preconditioner_.reset(new BlockJacobiPreconditioner(*A));
if (preconditioner_.get() == NULL) {
preconditioner_.reset(new BlockJacobiPreconditioner(*A));
}
jacobi_preconditioner_->Update(*A, per_solve_options.D);
cg_per_solve_options.preconditioner = jacobi_preconditioner_.get();
preconditioner_->Update(*A, per_solve_options.D);
cg_per_solve_options.preconditioner = preconditioner_.get();
} else if (options_.preconditioner_type != IDENTITY) {
LOG(FATAL) << "CGNR only supports IDENTITY and JACOBI preconditioners.";
}
@ -69,11 +72,14 @@ LinearSolver::Summary CgnrSolver::Solve(
// Solve (AtA + DtD)x = z (= Atb).
std::fill(x, x + A->num_cols(), 0.0);
CgnrLinearOperator lhs(*A, per_solve_options.D);
event_logger.AddEvent("Setup");
ConjugateGradientsSolver conjugate_gradient_solver(options_);
return conjugate_gradient_solver.Solve(&lhs,
z.get(),
cg_per_solve_options,
x);
LinearSolver::Summary summary =
conjugate_gradient_solver.Solve(&lhs, z.get(), cg_per_solve_options, x);
event_logger.AddEvent("Solve");
return summary;
}
} // namespace internal

@ -37,6 +37,8 @@
namespace ceres {
namespace internal {
class Preconditioner;
class BlockJacobiPreconditioner;
// A conjugate gradients on the normal equations solver. This directly solves
@ -46,17 +48,18 @@ class BlockJacobiPreconditioner;
//
// as required for solving for x in the least squares sense. Currently only
// block diagonal preconditioning is supported.
class CgnrSolver : public LinearSolver {
class CgnrSolver : public BlockSparseMatrixBaseSolver {
public:
explicit CgnrSolver(const LinearSolver::Options& options);
virtual Summary Solve(LinearOperator* A,
const double* b,
const LinearSolver::PerSolveOptions& per_solve_options,
double* x);
virtual Summary SolveImpl(
BlockSparseMatrixBase* A,
const double* b,
const LinearSolver::PerSolveOptions& per_solve_options,
double* x);
private:
const LinearSolver::Options options_;
scoped_ptr<BlockJacobiPreconditioner> jacobi_preconditioner_;
scoped_ptr<Preconditioner> preconditioner_;
CERES_DISALLOW_COPY_AND_ASSIGN(CgnrSolver);
};

@ -37,7 +37,7 @@
# include <map>
# include <set>
#else
# if defined(_MSC_VER) && _MSC_VER <= 1600
# if defined(_MSC_VER)
# include <unordered_map>
# include <unordered_set>
# else

@ -135,7 +135,8 @@ SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
// Populate the row and column block vectors for use by block
// oriented ordering algorithms. This is useful when
// Solver::Options::use_block_amd = true.
const vector<ParameterBlock*>& parameter_blocks = program_->parameter_blocks();
const vector<ParameterBlock*>& parameter_blocks =
program_->parameter_blocks();
vector<int>& col_blocks = *(jacobian->mutable_col_blocks());
col_blocks.resize(parameter_blocks.size());
for (int i = 0; i < parameter_blocks.size(); ++i) {

@ -32,17 +32,18 @@
#define CERES_INTERNAL_COMPRESSED_ROW_SPARSE_MATRIX_H_
#include <vector>
#include <glog/logging.h>
#include "ceres/sparse_matrix.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/macros.h"
#include "ceres/internal/port.h"
#include "ceres/sparse_matrix.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/types.h"
#include "glog/logging.h"
namespace ceres {
class CRSMatrix;
struct CRSMatrix;
namespace internal {

@ -0,0 +1,236 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
#include "ceres/coordinate_descent_minimizer.h"
#ifdef CERES_USE_OPENMP
#include <omp.h>
#endif
#include <iterator>
#include <numeric>
#include <vector>
#include "ceres/evaluator.h"
#include "ceres/linear_solver.h"
#include "ceres/minimizer.h"
#include "ceres/ordered_groups.h"
#include "ceres/parameter_block.h"
#include "ceres/problem_impl.h"
#include "ceres/program.h"
#include "ceres/residual_block.h"
#include "ceres/solver.h"
#include "ceres/solver_impl.h"
#include "ceres/trust_region_minimizer.h"
#include "ceres/trust_region_strategy.h"
namespace ceres {
namespace internal {
CoordinateDescentMinimizer::~CoordinateDescentMinimizer() {
}
bool CoordinateDescentMinimizer::Init(
const Program& program,
const ProblemImpl::ParameterMap& parameter_map,
const ParameterBlockOrdering& ordering,
string* error) {
parameter_blocks_.clear();
independent_set_offsets_.clear();
independent_set_offsets_.push_back(0);
// Serialize the OrderedGroups into a vector of parameter block
// offsets for parallel access.
map<ParameterBlock*, int> parameter_block_index;
map<int, set<double*> > group_to_elements = ordering.group_to_elements();
for (map<int, set<double*> >::const_iterator it = group_to_elements.begin();
it != group_to_elements.end();
++it) {
for (set<double*>::const_iterator ptr_it = it->second.begin();
ptr_it != it->second.end();
++ptr_it) {
parameter_blocks_.push_back(parameter_map.find(*ptr_it)->second);
parameter_block_index[parameter_blocks_.back()] =
parameter_blocks_.size() - 1;
}
independent_set_offsets_.push_back(
independent_set_offsets_.back() + it->second.size());
}
// The ordering does not have to contain all parameter blocks, so
// assign zero offsets/empty independent sets to these parameter
// blocks.
const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
for (int i = 0; i < parameter_blocks.size(); ++i) {
if (!ordering.IsMember(parameter_blocks[i]->mutable_user_state())) {
parameter_blocks_.push_back(parameter_blocks[i]);
independent_set_offsets_.push_back(independent_set_offsets_.back());
}
}
// Compute the set of residual blocks that depend on each parameter
// block.
residual_blocks_.resize(parameter_block_index.size());
const vector<ResidualBlock*>& residual_blocks = program.residual_blocks();
for (int i = 0; i < residual_blocks.size(); ++i) {
ResidualBlock* residual_block = residual_blocks[i];
const int num_parameter_blocks = residual_block->NumParameterBlocks();
for (int j = 0; j < num_parameter_blocks; ++j) {
ParameterBlock* parameter_block = residual_block->parameter_blocks()[j];
const map<ParameterBlock*, int>::const_iterator it =
parameter_block_index.find(parameter_block);
if (it != parameter_block_index.end()) {
residual_blocks_[it->second].push_back(residual_block);
}
}
}
evaluator_options_.linear_solver_type = DENSE_QR;
evaluator_options_.num_eliminate_blocks = 0;
evaluator_options_.num_threads = 1;
return true;
}
void CoordinateDescentMinimizer::Minimize(
const Minimizer::Options& options,
double* parameters,
Solver::Summary* summary) {
// Set the state and mark all parameter blocks constant.
for (int i = 0; i < parameter_blocks_.size(); ++i) {
ParameterBlock* parameter_block = parameter_blocks_[i];
parameter_block->SetState(parameters + parameter_block->state_offset());
parameter_block->SetConstant();
}
scoped_array<LinearSolver*> linear_solvers(
new LinearSolver*[options.num_threads]);
LinearSolver::Options linear_solver_options;
linear_solver_options.type = DENSE_QR;
for (int i = 0; i < options.num_threads; ++i) {
linear_solvers[i] = LinearSolver::Create(linear_solver_options);
}
for (int i = 0; i < independent_set_offsets_.size() - 1; ++i) {
// No point paying the price for an OpemMP call if the set if of
// size zero.
if (independent_set_offsets_[i] == independent_set_offsets_[i + 1]) {
continue;
}
// The parameter blocks in each independent set can be optimized
// in parallel, since they do not co-occur in any residual block.
#pragma omp parallel for num_threads(options.num_threads)
for (int j = independent_set_offsets_[i];
j < independent_set_offsets_[i + 1];
++j) {
#ifdef CERES_USE_OPENMP
int thread_id = omp_get_thread_num();
#else
int thread_id = 0;
#endif
ParameterBlock* parameter_block = parameter_blocks_[j];
const int old_index = parameter_block->index();
const int old_delta_offset = parameter_block->delta_offset();
parameter_block->SetVarying();
parameter_block->set_index(0);
parameter_block->set_delta_offset(0);
Program inner_program;
inner_program.mutable_parameter_blocks()->push_back(parameter_block);
*inner_program.mutable_residual_blocks() = residual_blocks_[j];
// TODO(sameeragarwal): Better error handling. Right now we
// assume that this is not going to lead to problems of any
// sort. Basically we should be checking for numerical failure
// of some sort.
//
// On the other hand, if the optimization is a failure, that in
// some ways is fine, since it won't change the parameters and
// we are fine.
Solver::Summary inner_summary;
Solve(&inner_program,
linear_solvers[thread_id],
parameters + parameter_block->state_offset(),
&inner_summary);
parameter_block->set_index(old_index);
parameter_block->set_delta_offset(old_delta_offset);
parameter_block->SetState(parameters + parameter_block->state_offset());
parameter_block->SetConstant();
}
}
for (int i = 0; i < parameter_blocks_.size(); ++i) {
parameter_blocks_[i]->SetVarying();
}
for (int i = 0; i < options.num_threads; ++i) {
delete linear_solvers[i];
}
}
// Solve the optimization problem for one parameter block.
void CoordinateDescentMinimizer::Solve(Program* program,
LinearSolver* linear_solver,
double* parameter,
Solver::Summary* summary) {
*summary = Solver::Summary();
summary->initial_cost = 0.0;
summary->fixed_cost = 0.0;
summary->final_cost = 0.0;
string error;
scoped_ptr<Evaluator> evaluator(
Evaluator::Create(evaluator_options_, program, &error));
CHECK_NOTNULL(evaluator.get());
scoped_ptr<SparseMatrix> jacobian(evaluator->CreateJacobian());
CHECK_NOTNULL(jacobian.get());
TrustRegionStrategy::Options trs_options;
trs_options.linear_solver = linear_solver;
scoped_ptr<TrustRegionStrategy>trust_region_strategy(
CHECK_NOTNULL(TrustRegionStrategy::Create(trs_options)));
Minimizer::Options minimizer_options;
minimizer_options.evaluator = evaluator.get();
minimizer_options.jacobian = jacobian.get();
minimizer_options.trust_region_strategy = trust_region_strategy.get();
TrustRegionMinimizer minimizer;
minimizer.Minimize(minimizer_options, parameter, summary);
}
} // namespace internal
} // namespace ceres

@ -0,0 +1,88 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
#ifndef CERES_INTERNAL_COORDINATE_DESCENT_MINIMIZER_H_
#define CERES_INTERNAL_COORDINATE_DESCENT_MINIMIZER_H_
#include <vector>
#include "ceres/evaluator.h"
#include "ceres/minimizer.h"
#include "ceres/problem_impl.h"
#include "ceres/program.h"
#include "ceres/solver.h"
namespace ceres {
namespace internal {
// Given a Program, and a ParameterBlockOrdering which partitions
// (non-exhaustively) the Hessian matrix into independent sets,
// perform coordinate descent on the parameter blocks in the
// ordering. The independent set structure allows for all parameter
// blocks in the same independent set to be optimized in parallel, and
// the order of the independent set determines the order in which the
// parameter block groups are optimized.
//
// The minimizer assumes that none of the parameter blocks in the
// program are constant.
class CoordinateDescentMinimizer : public Minimizer {
public:
bool Init(const Program& program,
const ProblemImpl::ParameterMap& parameter_map,
const ParameterBlockOrdering& ordering,
string* error);
// Minimizer interface.
virtual ~CoordinateDescentMinimizer();
virtual void Minimize(const Minimizer::Options& options,
double* parameters,
Solver::Summary* summary);
private:
void Solve(Program* program,
LinearSolver* linear_solver,
double* parameters,
Solver::Summary* summary);
vector<ParameterBlock*> parameter_blocks_;
vector<vector<ResidualBlock*> > residual_blocks_;
// The optimization is performed in rounds. In each round all the
// parameter blocks that form one independent set are optimized in
// parallel. This array, marks the boundaries of the independent
// sets in parameter_blocks_.
vector<int> independent_set_offsets_;
Evaluator::Options evaluator_options_;
};
} // namespace internal
} // namespace ceres
#endif // CERES_INTERNAL_COORDINATE_DESCENT_MINIMIZER_H_

@ -113,12 +113,19 @@ void Corrector::CorrectJacobian(int nrow, int ncol,
double* residuals, double* jacobian) {
DCHECK(residuals != NULL);
DCHECK(jacobian != NULL);
ConstVectorRef r_ref(residuals, nrow);
MatrixRef j_ref(jacobian, nrow, ncol);
// Equation 11 in BANS.
j_ref = sqrt_rho1_ * (j_ref - alpha_sq_norm_ *
r_ref * (r_ref.transpose() * j_ref));
if (nrow == 1) {
// Specialization for the case where the residual is a scalar.
VectorRef j_ref(jacobian, ncol);
j_ref *= sqrt_rho1_ * (1.0 - alpha_sq_norm_ * pow(*residuals, 2));
} else {
ConstVectorRef r_ref(residuals, nrow);
MatrixRef j_ref(jacobian, nrow, ncol);
// Equation 11 in BANS.
j_ref = sqrt_rho1_ * (j_ref - alpha_sq_norm_ *
r_ref * (r_ref.transpose() * j_ref));
}
}
} // namespace internal

@ -39,7 +39,7 @@
namespace ceres {
namespace internal {
CXSparse::CXSparse() : scratch_size_(0), scratch_(NULL) {
CXSparse::CXSparse() : scratch_(NULL), scratch_size_(0) {
}
CXSparse::~CXSparse() {
@ -116,12 +116,12 @@ cs_di* CXSparse::CreateSparseMatrix(TripletSparseMatrix* tsm) {
return cs_compress(&tsm_wrapper);
}
void CXSparse::Free(cs_di* factor) {
cs_free(factor);
void CXSparse::Free(cs_di* sparse_matrix) {
cs_di_spfree(sparse_matrix);
}
void CXSparse::Free(cs_dis* factor) {
cs_sfree(factor);
void CXSparse::Free(cs_dis* symbolic_factorization) {
cs_di_sfree(symbolic_factorization);
}
} // namespace internal

@ -72,9 +72,8 @@ class CXSparse {
// The returned matrix should be deallocated with Free when not used anymore.
cs_dis* AnalyzeCholesky(cs_di* A);
// Deallocates the memory of a matrix obtained from AnalyzeCholesky.
void Free(cs_di* factor);
void Free(cs_dis* factor);
void Free(cs_di* sparse_matrix);
void Free(cs_dis* symbolic_factorization);
private:
// Cached scratch space

@ -62,7 +62,8 @@ class DenseJacobianWriter {
SparseMatrix* CreateJacobian() const {
return new DenseSparseMatrix(program_->NumResiduals(),
program_->NumEffectiveParameters());
program_->NumEffectiveParameters(),
true);
}
void Write(int residual_id,
@ -87,10 +88,10 @@ class DenseJacobianWriter {
continue;
}
int parameter_block_size = parameter_block->LocalSize();
MatrixRef parameter_jacobian(jacobians[j],
num_residuals,
parameter_block_size);
const int parameter_block_size = parameter_block->LocalSize();
ConstMatrixRef parameter_jacobian(jacobians[j],
num_residuals,
parameter_block_size);
dense_jacobian->mutable_matrix().block(
residual_offset,

@ -34,10 +34,11 @@
#include "Eigen/Dense"
#include "ceres/dense_sparse_matrix.h"
#include "ceres/linear_solver.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/linear_solver.h"
#include "ceres/types.h"
#include "ceres/wall_time.h"
namespace ceres {
namespace internal {
@ -51,6 +52,8 @@ LinearSolver::Summary DenseNormalCholeskySolver::SolveImpl(
const double* b,
const LinearSolver::PerSolveOptions& per_solve_options,
double* x) {
EventLogger event_logger("DenseNormalCholeskySolver::Solve");
const int num_rows = A->num_rows();
const int num_cols = A->num_cols();
@ -58,6 +61,7 @@ LinearSolver::Summary DenseNormalCholeskySolver::SolveImpl(
Matrix lhs(num_cols, num_cols);
lhs.setZero();
event_logger.AddEvent("Setup");
// lhs += A'A
//
// Using rankUpdate instead of GEMM, exposes the fact that its the
@ -73,12 +77,13 @@ LinearSolver::Summary DenseNormalCholeskySolver::SolveImpl(
lhs += D.array().square().matrix().asDiagonal();
}
VectorRef(x, num_cols) =
lhs.selfadjointView<Eigen::Upper>().ldlt().solve(rhs);
LinearSolver::Summary summary;
summary.num_iterations = 1;
summary.termination_type = TOLERANCE;
VectorRef(x, num_cols) =
lhs.selfadjointView<Eigen::Upper>().ldlt().solve(rhs);
event_logger.AddEvent("Solve");
return summary;
}

@ -34,10 +34,11 @@
#include "Eigen/Dense"
#include "ceres/dense_sparse_matrix.h"
#include "ceres/linear_solver.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/linear_solver.h"
#include "ceres/types.h"
#include "ceres/wall_time.h"
namespace ceres {
namespace internal {
@ -50,10 +51,10 @@ LinearSolver::Summary DenseQRSolver::SolveImpl(
const double* b,
const LinearSolver::PerSolveOptions& per_solve_options,
double* x) {
EventLogger event_logger("DenseQRSolver::Solve");
const int num_rows = A->num_rows();
const int num_cols = A->num_cols();
VLOG(2) << "DenseQRSolver: "
<< num_rows << " x " << num_cols << " system.";
if (per_solve_options.D != NULL) {
// Temporarily append a diagonal block to the A matrix, but undo
@ -62,18 +63,18 @@ LinearSolver::Summary DenseQRSolver::SolveImpl(
}
// rhs = [b;0] to account for the additional rows in the lhs.
Vector rhs(num_rows + ((per_solve_options.D != NULL) ? num_cols : 0));
rhs.setZero();
rhs.head(num_rows) = ConstVectorRef(b, num_rows);
const int augmented_num_rows =
num_rows + ((per_solve_options.D != NULL) ? num_cols : 0);
if (rhs_.rows() != augmented_num_rows) {
rhs_.resize(augmented_num_rows);
rhs_.setZero();
}
rhs_.head(num_rows) = ConstVectorRef(b, num_rows);
event_logger.AddEvent("Setup");
// Solve the system.
VectorRef(x, num_cols) = A->matrix().colPivHouseholderQr().solve(rhs);
VLOG(3) << "A:\n" << A->matrix();
VLOG(3) << "x:\n" << VectorRef(x, num_cols);
VLOG(3) << "b:\n" << rhs;
VLOG(3) << "error: " << (A->matrix() * VectorRef(x, num_cols) - rhs).norm();
VectorRef(x, num_cols) = A->matrix().colPivHouseholderQr().solve(rhs_);
event_logger.AddEvent("Solve");
if (per_solve_options.D != NULL) {
// Undo the modifications to the matrix A.
@ -86,6 +87,8 @@ LinearSolver::Summary DenseQRSolver::SolveImpl(
LinearSolver::Summary summary;
summary.num_iterations = 1;
summary.termination_type = TOLERANCE;
event_logger.AddEvent("TearDown");
return summary;
}

@ -33,6 +33,7 @@
#define CERES_INTERNAL_DENSE_QR_SOLVER_H_
#include "ceres/linear_solver.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/macros.h"
namespace ceres {
@ -90,6 +91,7 @@ class DenseQRSolver: public DenseSparseMatrixSolver {
double* x);
const LinearSolver::Options options_;
Vector rhs_;
CERES_DISALLOW_COPY_AND_ASSIGN(DenseQRSolver);
};

@ -47,6 +47,20 @@ DenseSparseMatrix::DenseSparseMatrix(int num_rows, int num_cols)
m_.setZero();
}
DenseSparseMatrix::DenseSparseMatrix(int num_rows,
int num_cols,
bool reserve_diagonal)
: has_diagonal_appended_(false),
has_diagonal_reserved_(reserve_diagonal) {
// Allocate enough space for the diagonal.
if (reserve_diagonal) {
m_.resize(num_rows + num_cols, num_cols);
} else {
m_.resize(num_rows, num_cols);
}
m_.setZero();
}
DenseSparseMatrix::DenseSparseMatrix(const TripletSparseMatrix& m)
: m_(Eigen::MatrixXd::Zero(m.num_rows(), m.num_cols())),
has_diagonal_appended_(false),
@ -105,7 +119,7 @@ void DenseSparseMatrix::ScaleColumns(const double* scale) {
}
void DenseSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
*dense_matrix = m_;
*dense_matrix = m_.block(0, 0, num_rows(), num_cols());
}
#ifndef CERES_NO_PROTOCOL_BUFFERS

@ -57,6 +57,7 @@ class DenseSparseMatrix : public SparseMatrix {
#endif
DenseSparseMatrix(int num_rows, int num_cols);
DenseSparseMatrix(int num_rows, int num_cols, bool reserve_diagonal);
virtual ~DenseSparseMatrix() {}

@ -35,7 +35,7 @@
#include "ceres/array_utils.h"
#include "ceres/internal/eigen.h"
#include "ceres/linear_solver.h"
#include "ceres/polynomial_solver.h"
#include "ceres/polynomial.h"
#include "ceres/sparse_matrix.h"
#include "ceres/trust_region_strategy.h"
#include "ceres/types.h"
@ -87,7 +87,7 @@ TrustRegionStrategy::Summary DoglegStrategy::ComputeStep(
// Gauss-Newton and gradient vectors are always available, only a
// new interpolant need to be computed. For the subspace case,
// the subspace and the two-dimensional model are also still valid.
switch(dogleg_type_) {
switch (dogleg_type_) {
case TRADITIONAL_DOGLEG:
ComputeTraditionalDoglegStep(step);
break;
@ -135,7 +135,7 @@ TrustRegionStrategy::Summary DoglegStrategy::ComputeStep(
summary.termination_type = linear_solver_summary.termination_type;
if (linear_solver_summary.termination_type != FAILURE) {
switch(dogleg_type_) {
switch (dogleg_type_) {
// Interpolate the Cauchy point and the Gauss-Newton step.
case TRADITIONAL_DOGLEG:
ComputeTraditionalDoglegStep(step);
@ -415,15 +415,15 @@ Vector DoglegStrategy::MakePolynomialForBoundaryConstrainedProblem() const {
const double trB = subspace_B_.trace();
const double r2 = radius_ * radius_;
Matrix2d B_adj;
B_adj << subspace_B_(1,1) , -subspace_B_(0,1),
-subspace_B_(1,0) , subspace_B_(0,0);
B_adj << subspace_B_(1, 1) , -subspace_B_(0, 1),
-subspace_B_(1, 0) , subspace_B_(0, 0);
Vector polynomial(5);
polynomial(0) = r2;
polynomial(1) = 2.0 * r2 * trB;
polynomial(2) = r2 * ( trB * trB + 2.0 * detB ) - subspace_g_.squaredNorm();
polynomial(3) = -2.0 * ( subspace_g_.transpose() * B_adj * subspace_g_
- r2 * detB * trB );
polynomial(2) = r2 * (trB * trB + 2.0 * detB) - subspace_g_.squaredNorm();
polynomial(3) = -2.0 * (subspace_g_.transpose() * B_adj * subspace_g_
- r2 * detB * trB);
polynomial(4) = r2 * detB * detB - (B_adj * subspace_g_).squaredNorm();
return polynomial;
@ -598,7 +598,7 @@ void DoglegStrategy::StepAccepted(double step_quality) {
// Reduce the regularization multiplier, in the hope that whatever
// was causing the rank deficiency has gone away and we can return
// to doing a pure Gauss-Newton solve.
mu_ = max(min_mu_, 2.0 * mu_ / mu_increase_factor_ );
mu_ = max(min_mu_, 2.0 * mu_ / mu_increase_factor_);
reuse_ = false;
}

@ -53,8 +53,8 @@ namespace internal {
// This finds the exact optimum over the two-dimensional subspace
// spanned by the two Dogleg vectors.
class DoglegStrategy : public TrustRegionStrategy {
public:
DoglegStrategy(const TrustRegionStrategy::Options& options);
public:
explicit DoglegStrategy(const TrustRegionStrategy::Options& options);
virtual ~DoglegStrategy() {}
// TrustRegionStrategy interface

@ -32,14 +32,17 @@
#ifndef CERES_INTERNAL_EVALUATOR_H_
#define CERES_INTERNAL_EVALUATOR_H_
#include <map>
#include <string>
#include <vector>
#include "ceres/execution_summary.h"
#include "ceres/internal/port.h"
#include "ceres/types.h"
namespace ceres {
class CRSMatrix;
struct CRSMatrix;
namespace internal {
@ -152,6 +155,18 @@ class Evaluator {
// The number of residuals in the optimization problem.
virtual int NumResiduals() const = 0;
// The following two methods return copies instead of references so
// that the base class implementation does not have to worry about
// life time issues. Further, these calls are not expected to be
// frequent or performance sensitive.
virtual map<string, int> CallStatistics() const {
return map<string, int>();
}
virtual map<string, double> TimeStatistics() const {
return map<string, double>();
}
};
} // namespace internal

@ -0,0 +1,90 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
#ifndef CERES_INTERNAL_EXECUTION_SUMMARY_H_
#define CERES_INTERNAL_EXECUTION_SUMMARY_H_
#include <map>
#include <string>
#include "ceres/internal/port.h"
#include "ceres/wall_time.h"
#include "ceres/mutex.h"
namespace ceres {
namespace internal {
// Struct used by various objects to report statistics and other
// information about their execution. e.g., ExecutionSummary::times
// can be used for reporting times associated with various activities.
class ExecutionSummary {
public:
void IncrementTimeBy(const string& name, const double value) {
CeresMutexLock l(&times_mutex_);
times_[name] += value;
}
void IncrementCall(const string& name) {
CeresMutexLock l(&calls_mutex_);
calls_[name] += 1;
}
const map<string, double>& times() const { return times_; }
const map<string, int>& calls() const { return calls_; }
private:
Mutex times_mutex_;
map<string, double> times_;
Mutex calls_mutex_;
map<string, int> calls_;
};
class ScopedExecutionTimer {
public:
ScopedExecutionTimer(const string& name, ExecutionSummary* summary)
: start_time_(WallTimeInSeconds()),
name_(name),
summary_(summary) {}
~ScopedExecutionTimer() {
summary_->IncrementTimeBy(name_, WallTimeInSeconds() - start_time_);
}
private:
const double start_time_;
const string name_;
ExecutionSummary* summary_;
};
} // namespace internal
} // namespace ceres
#endif // CERES_INTERNAL_EXECUTION_SUMMARY_H_

@ -30,8 +30,9 @@
//
// Really simple file IO.
#include "ceres/file.h"
#include <cstdio>
#include "file.h"
#include "glog/logging.h"
namespace ceres {

@ -154,8 +154,8 @@ class GradientCheckingCostFunction : public CostFunction {
"Jacobian for " "block %d: (%ld by %ld)) "
"==========\n",
k,
term_jacobians[k].rows(),
term_jacobians[k].cols());
static_cast<long>(term_jacobians[k].rows()),
static_cast<long>(term_jacobians[k].cols()));
// The funny spacing creates appropriately aligned column headers.
m += " block row col user dx/dy num diff dx/dy "
"abs error relative error parameter residual\n";

@ -32,12 +32,13 @@
#define CERES_INTERNAL_GRAPH_H_
#include <limits>
#include <glog/logging.h>
#include <utility>
#include "ceres/integral_types.h"
#include "ceres/map_util.h"
#include "ceres/collections_port.h"
#include "ceres/internal/macros.h"
#include "ceres/types.h"
#include "glog/logging.h"
namespace ceres {
namespace internal {
@ -65,6 +66,28 @@ class Graph {
AddVertex(vertex, 1.0);
}
bool RemoveVertex(const Vertex& vertex) {
if (vertices_.find(vertex) == vertices_.end()) {
return false;
}
vertices_.erase(vertex);
vertex_weights_.erase(vertex);
const HashSet<Vertex>& sinks = edges_[vertex];
for (typename HashSet<Vertex>::const_iterator it = sinks.begin();
it != sinks.end(); ++it) {
if (vertex < *it) {
edge_weights_.erase(make_pair(vertex, *it));
} else {
edge_weights_.erase(make_pair(*it, vertex));
}
edges_[*it].erase(vertex);
}
edges_.erase(vertex);
return true;
}
// Add a weighted edge between the vertex1 and vertex2. Calling
// AddEdge on a pair of vertices which do not exist in the graph yet
// will result in undefined behavior.

Some files were not shown because too many files have changed in this diff Show More