blender/extern/libmv/libmv-capi.cc
Sergey Sharybin be6643b5ec Followup to the previous commit
Need to take weight into account when drawing per-frame track
reprojection curve and when computing per-track average error.
2014-01-28 18:02:22 +06:00

1148 lines
37 KiB
C++

/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2011 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
*
* ***** END GPL LICENSE BLOCK *****
*/
#ifdef WITH_LIBMV
/* define this to generate PNG images with content of search areas
tracking between which failed */
#undef DUMP_FAILURE
/* define this to generate PNG images with content of search areas
on every itteration of tracking */
#undef DUMP_ALWAYS
#include "libmv-capi.h"
#include <cstdlib>
#include <cassert>
#if defined(DUMP_FAILURE) || defined (DUMP_ALWAYS)
# include <png.h>
#endif
#include "libmv-capi_intern.h"
#include "libmv/logging/logging.h"
#include "libmv/multiview/homography.h"
#include "libmv/tracking/track_region.h"
#include "libmv/simple_pipeline/callbacks.h"
#include "libmv/simple_pipeline/tracks.h"
#include "libmv/simple_pipeline/initialize_reconstruction.h"
#include "libmv/simple_pipeline/bundle.h"
#include "libmv/simple_pipeline/detect.h"
#include "libmv/simple_pipeline/pipeline.h"
#include "libmv/simple_pipeline/camera_intrinsics.h"
#include "libmv/simple_pipeline/modal_solver.h"
#include "libmv/simple_pipeline/reconstruction_scale.h"
#include "libmv/simple_pipeline/keyframe_selection.h"
#include "libmv/multiview/homography.h"
#include "libmv/image/convolve.h"
#ifdef _MSC_VER
# define snprintf _snprintf
#endif
struct libmv_Reconstruction {
libmv::EuclideanReconstruction reconstruction;
/* used for per-track average error calculation after reconstruction */
libmv::Tracks tracks;
libmv::CameraIntrinsics intrinsics;
double error;
};
struct libmv_Features {
int count;
libmv::Feature *features;
};
/* ************ Logging ************ */
void libmv_initLogging(const char *argv0)
{
/* Make it so FATAL messages are always print into console */
char severity_fatal[32];
snprintf(severity_fatal, sizeof(severity_fatal), "%d",
google::GLOG_FATAL);
google::InitGoogleLogging(argv0);
google::SetCommandLineOption("logtostderr", "1");
google::SetCommandLineOption("v", "0");
google::SetCommandLineOption("stderrthreshold", severity_fatal);
google::SetCommandLineOption("minloglevel", severity_fatal);
}
void libmv_startDebugLogging(void)
{
google::SetCommandLineOption("logtostderr", "1");
google::SetCommandLineOption("v", "2");
google::SetCommandLineOption("stderrthreshold", "1");
google::SetCommandLineOption("minloglevel", "0");
}
void libmv_setLoggingVerbosity(int verbosity)
{
char val[10];
snprintf(val, sizeof(val), "%d", verbosity);
google::SetCommandLineOption("v", val);
}
/* ************ Utility ************ */
static void byteBufToImage(const unsigned char *buf, int width, int height, int channels, libmv::FloatImage *image)
{
int x, y, k, a = 0;
image->Resize(height, width, channels);
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++) {
for (k = 0; k < channels; k++) {
(*image)(y, x, k) = (float)buf[a++] / 255.0f;
}
}
}
}
static void floatBufToImage(const float *buf, int width, int height, int channels, libmv::FloatImage *image)
{
int x, y, k, a = 0;
image->Resize(height, width, channels);
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++) {
for (k = 0; k < channels; k++) {
(*image)(y, x, k) = buf[a++];
}
}
}
}
static void imageToFloatBuf(const libmv::FloatImage *image, int channels, float *buf)
{
int x, y, k, a = 0;
for (y = 0; y < image->Height(); y++) {
for (x = 0; x < image->Width(); x++) {
for (k = 0; k < channels; k++) {
buf[a++] = (*image)(y, x, k);
}
}
}
}
#if defined(DUMP_FAILURE) || defined (DUMP_ALWAYS)
static void savePNGImage(png_bytep *row_pointers, int width, int height, int depth, int color_type,
const char *file_name)
{
png_infop info_ptr;
png_structp png_ptr;
FILE *fp = fopen(file_name, "wb");
if (!fp)
return;
/* Initialize stuff */
png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
info_ptr = png_create_info_struct(png_ptr);
if (setjmp(png_jmpbuf(png_ptr))) {
fclose(fp);
return;
}
png_init_io(png_ptr, fp);
/* write header */
if (setjmp(png_jmpbuf(png_ptr))) {
fclose(fp);
return;
}
png_set_IHDR(png_ptr, info_ptr, width, height,
depth, color_type, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);
png_write_info(png_ptr, info_ptr);
/* write bytes */
if (setjmp(png_jmpbuf(png_ptr))) {
fclose(fp);
return;
}
png_write_image(png_ptr, row_pointers);
/* end write */
if (setjmp(png_jmpbuf(png_ptr))) {
fclose(fp);
return;
}
png_write_end(png_ptr, NULL);
fclose(fp);
}
static void saveImage(const char *prefix, libmv::FloatImage image, int x0, int y0)
{
int x, y;
png_bytep *row_pointers;
row_pointers = (png_bytep *) malloc(sizeof(png_bytep) * image.Height());
for (y = 0; y < image.Height(); y++) {
row_pointers[y] = (png_bytep) malloc(sizeof(png_byte) * 4 * image.Width());
for (x = 0; x < image.Width(); x++) {
if (x0 == x && image.Height() - y0 - 1 == y) {
row_pointers[y][x * 4 + 0] = 255;
row_pointers[y][x * 4 + 1] = 0;
row_pointers[y][x * 4 + 2] = 0;
row_pointers[y][x * 4 + 3] = 255;
}
else {
float pixel = image(image.Height() - y - 1, x, 0);
row_pointers[y][x * 4 + 0] = pixel * 255;
row_pointers[y][x * 4 + 1] = pixel * 255;
row_pointers[y][x * 4 + 2] = pixel * 255;
row_pointers[y][x * 4 + 3] = 255;
}
}
}
{
static int a = 0;
char buf[128];
snprintf(buf, sizeof(buf), "%s_%02d.png", prefix, ++a);
savePNGImage(row_pointers, image.Width(), image.Height(), 8, PNG_COLOR_TYPE_RGBA, buf);
}
for (y = 0; y < image.Height(); y++) {
free(row_pointers[y]);
}
free(row_pointers);
}
static void saveBytesImage(const char *prefix, unsigned char *data, int width, int height)
{
int x, y;
png_bytep *row_pointers;
row_pointers = (png_bytep *) malloc(sizeof(png_bytep) * height);
for (y = 0; y < height; y++) {
row_pointers[y] = (png_bytep) malloc(sizeof(png_byte) * 4 * width);
for (x = 0; x < width; x++) {
char pixel = data[width * y + x];
row_pointers[y][x * 4 + 0] = pixel;
row_pointers[y][x * 4 + 1] = pixel;
row_pointers[y][x * 4 + 2] = pixel;
row_pointers[y][x * 4 + 3] = 255;
}
}
{
static int a = 0;
char buf[128];
snprintf(buf, sizeof(buf), "%s_%02d.png", prefix, ++a);
savePNGImage(row_pointers, width, height, 8, PNG_COLOR_TYPE_RGBA, buf);
}
for (y = 0; y < height; y++) {
free(row_pointers[y]);
}
free(row_pointers);
}
#endif
/* ************ Planar tracker ************ */
/* TrackRegion */
int libmv_trackRegion(const libmv_TrackRegionOptions *options,
const float *image1, int image1_width, int image1_height,
const float *image2, int image2_width, int image2_height,
const double *x1, const double *y1,
libmv_TrackRegionResult *result,
double *x2, double *y2)
{
double xx1[5], yy1[5];
double xx2[5], yy2[5];
bool tracking_result = false;
/* Convert to doubles for the libmv api. The four corners and the center. */
for (int i = 0; i < 5; ++i) {
xx1[i] = x1[i];
yy1[i] = y1[i];
xx2[i] = x2[i];
yy2[i] = y2[i];
}
libmv::TrackRegionOptions track_region_options;
libmv::FloatImage image1_mask;
switch (options->motion_model) {
#define LIBMV_CONVERT(the_model) \
case libmv::TrackRegionOptions::the_model: \
track_region_options.mode = libmv::TrackRegionOptions::the_model; \
break;
LIBMV_CONVERT(TRANSLATION)
LIBMV_CONVERT(TRANSLATION_ROTATION)
LIBMV_CONVERT(TRANSLATION_SCALE)
LIBMV_CONVERT(TRANSLATION_ROTATION_SCALE)
LIBMV_CONVERT(AFFINE)
LIBMV_CONVERT(HOMOGRAPHY)
#undef LIBMV_CONVERT
}
track_region_options.minimum_correlation = options->minimum_correlation;
track_region_options.max_iterations = options->num_iterations;
track_region_options.sigma = options->sigma;
track_region_options.num_extra_points = 1;
track_region_options.image1_mask = NULL;
track_region_options.use_brute_initialization = options->use_brute;
/* TODO(keir): This will make some cases better, but may be a regression until
* the motion model is in. Since this is on trunk, enable it for now. */
track_region_options.attempt_refine_before_brute = true;
track_region_options.use_normalized_intensities = options->use_normalization;
if (options->image1_mask) {
floatBufToImage(options->image1_mask, image1_width, image1_height, 1, &image1_mask);
track_region_options.image1_mask = &image1_mask;
}
/* Convert from raw float buffers to libmv's FloatImage. */
libmv::FloatImage old_patch, new_patch;
floatBufToImage(image1, image1_width, image1_height, 1, &old_patch);
floatBufToImage(image2, image2_width, image2_height, 1, &new_patch);
libmv::TrackRegionResult track_region_result;
libmv::TrackRegion(old_patch, new_patch, xx1, yy1, track_region_options, xx2, yy2, &track_region_result);
/* Convert to floats for the blender api. */
for (int i = 0; i < 5; ++i) {
x2[i] = xx2[i];
y2[i] = yy2[i];
}
/* TODO(keir): Update the termination string with failure details. */
if (track_region_result.termination == libmv::TrackRegionResult::CONVERGENCE ||
track_region_result.termination == libmv::TrackRegionResult::NO_CONVERGENCE)
{
tracking_result = true;
}
#if defined(DUMP_FAILURE) || defined(DUMP_ALWAYS)
#if defined(DUMP_ALWAYS)
{
#else
if (!tracking_result) {
#endif
saveImage("old_patch", old_patch, x1[4], y1[4]);
saveImage("new_patch", new_patch, x2[4], y2[4]);
if (options->image1_mask)
saveImage("mask", image1_mask, x2[4], y2[4]);
}
#endif
return tracking_result;
}
void libmv_samplePlanarPatch(const float *image, int width, int height,
int channels, const double *xs, const double *ys,
int num_samples_x, int num_samples_y,
const float *mask, float *patch,
double *warped_position_x, double *warped_position_y)
{
libmv::FloatImage libmv_image, libmv_patch, libmv_mask;
libmv::FloatImage *libmv_mask_for_sample = NULL;
floatBufToImage(image, width, height, channels, &libmv_image);
if (mask) {
floatBufToImage(mask, width, height, 1, &libmv_mask);
libmv_mask_for_sample = &libmv_mask;
}
libmv::SamplePlanarPatch(libmv_image, xs, ys, num_samples_x, num_samples_y,
libmv_mask_for_sample, &libmv_patch,
warped_position_x, warped_position_y);
imageToFloatBuf(&libmv_patch, channels, patch);
}
/* ************ Tracks ************ */
struct libmv_Tracks *libmv_tracksNew(void)
{
libmv::Tracks *libmv_tracks = LIBMV_OBJECT_NEW(libmv::Tracks);
return (struct libmv_Tracks *)libmv_tracks;
}
void libmv_tracksDestroy(struct libmv_Tracks *libmv_tracks)
{
using libmv::Tracks;
LIBMV_OBJECT_DELETE(libmv_tracks, Tracks);
}
void libmv_tracksInsert(struct libmv_Tracks *libmv_tracks, int image, int track, double x, double y, double weight)
{
((libmv::Tracks*) libmv_tracks)->Insert(image, track, x, y, weight);
}
/* ************ Reconstruction ************ */
class ReconstructUpdateCallback : public libmv::ProgressUpdateCallback {
public:
ReconstructUpdateCallback(reconstruct_progress_update_cb progress_update_callback, void *callback_customdata)
{
progress_update_callback_ = progress_update_callback;
callback_customdata_ = callback_customdata;
}
void invoke(double progress, const char *message)
{
if (progress_update_callback_) {
progress_update_callback_(callback_customdata_, progress, message);
}
}
protected:
reconstruct_progress_update_cb progress_update_callback_;
void *callback_customdata_;
};
static void libmv_solveRefineIntrinsics(const libmv::Tracks &tracks,
const int refine_intrinsics,
const int bundle_constraints,
reconstruct_progress_update_cb progress_update_callback,
void *callback_customdata,
libmv::EuclideanReconstruction *reconstruction,
libmv::CameraIntrinsics *intrinsics)
{
/* only a few combinations are supported but trust the caller */
int bundle_intrinsics = 0;
if (refine_intrinsics & LIBMV_REFINE_FOCAL_LENGTH) {
bundle_intrinsics |= libmv::BUNDLE_FOCAL_LENGTH;
}
if (refine_intrinsics & LIBMV_REFINE_PRINCIPAL_POINT) {
bundle_intrinsics |= libmv::BUNDLE_PRINCIPAL_POINT;
}
if (refine_intrinsics & LIBMV_REFINE_RADIAL_DISTORTION_K1) {
bundle_intrinsics |= libmv::BUNDLE_RADIAL_K1;
}
if (refine_intrinsics & LIBMV_REFINE_RADIAL_DISTORTION_K2) {
bundle_intrinsics |= libmv::BUNDLE_RADIAL_K2;
}
progress_update_callback(callback_customdata, 1.0, "Refining solution");
libmv::EuclideanBundleCommonIntrinsics(tracks,
bundle_intrinsics,
bundle_constraints,
reconstruction,
intrinsics);
}
static void cameraIntrinsicsFromOptions(const libmv_CameraIntrinsicsOptions *camera_intrinsics_options,
libmv::CameraIntrinsics *camera_intrinsics)
{
camera_intrinsics->SetFocalLength(camera_intrinsics_options->focal_length,
camera_intrinsics_options->focal_length);
camera_intrinsics->SetPrincipalPoint(camera_intrinsics_options->principal_point_x,
camera_intrinsics_options->principal_point_y);
camera_intrinsics->SetRadialDistortion(camera_intrinsics_options->k1,
camera_intrinsics_options->k2,
camera_intrinsics_options->k3);
camera_intrinsics->SetImageSize(camera_intrinsics_options->image_width,
camera_intrinsics_options->image_height);
}
static libmv::Tracks getNormalizedTracks(const libmv::Tracks &tracks, const libmv::CameraIntrinsics &camera_intrinsics)
{
libmv::vector<libmv::Marker> markers = tracks.AllMarkers();
for (int i = 0; i < markers.size(); ++i) {
camera_intrinsics.InvertIntrinsics(markers[i].x, markers[i].y,
&(markers[i].x), &(markers[i].y));
}
return libmv::Tracks(markers);
}
static void finishReconstruction(const libmv::Tracks &tracks, const libmv::CameraIntrinsics &camera_intrinsics,
struct libmv_Reconstruction *libmv_reconstruction,
reconstruct_progress_update_cb progress_update_callback,
void *callback_customdata)
{
libmv::EuclideanReconstruction &reconstruction = libmv_reconstruction->reconstruction;
/* reprojection error calculation */
progress_update_callback(callback_customdata, 1.0, "Finishing solution");
libmv_reconstruction->tracks = tracks;
libmv_reconstruction->error = libmv::EuclideanReprojectionError(tracks, reconstruction, camera_intrinsics);
}
static bool selectTwoKeyframesBasedOnGRICAndVariance(
libmv::Tracks &tracks,
libmv::Tracks &normalized_tracks,
libmv::CameraIntrinsics &camera_intrinsics,
int &keyframe1,
int &keyframe2)
{
libmv::vector<int> keyframes;
/* Get list of all keyframe candidates first. */
SelectKeyframesBasedOnGRICAndVariance(normalized_tracks,
camera_intrinsics,
keyframes);
if (keyframes.size() < 2) {
LG << "Not enough keyframes detected by GRIC";
return false;
}
else if (keyframes.size() == 2) {
keyframe1 = keyframes[0];
keyframe2 = keyframes[1];
return true;
}
/* Now choose two keyframes with minimal reprojection error after initial
* reconstruction choose keyframes with the least reprojection error after
* solving from two candidate keyframes.
*
* In fact, currently libmv returns single pair only, so this code will
* not actually run. But in the future this could change, so let's stay
* prepared.
*/
int previous_keyframe = keyframes[0];
double best_error = std::numeric_limits<double>::max();
for (int i = 1; i < keyframes.size(); i++) {
libmv::EuclideanReconstruction reconstruction;
int current_keyframe = keyframes[i];
libmv::vector<libmv::Marker> keyframe_markers =
normalized_tracks.MarkersForTracksInBothImages(previous_keyframe,
current_keyframe);
libmv::Tracks keyframe_tracks(keyframe_markers);
/* get a solution from two keyframes only */
libmv::EuclideanReconstructTwoFrames(keyframe_markers, &reconstruction);
libmv::EuclideanBundle(keyframe_tracks, &reconstruction);
libmv::EuclideanCompleteReconstruction(keyframe_tracks,
&reconstruction, NULL);
double current_error =
libmv::EuclideanReprojectionError(tracks,
reconstruction,
camera_intrinsics);
LG << "Error between " << previous_keyframe
<< " and " << current_keyframe
<< ": " << current_error;
if (current_error < best_error) {
best_error = current_error;
keyframe1 = previous_keyframe;
keyframe2 = current_keyframe;
}
previous_keyframe = current_keyframe;
}
return true;
}
struct libmv_Reconstruction *libmv_solveReconstruction(const struct libmv_Tracks *libmv_tracks,
const libmv_CameraIntrinsicsOptions *libmv_camera_intrinsics_options,
libmv_ReconstructionOptions *libmv_reconstruction_options,
reconstruct_progress_update_cb progress_update_callback,
void *callback_customdata)
{
struct libmv_Reconstruction *libmv_reconstruction = LIBMV_OBJECT_NEW(libmv_Reconstruction);
libmv::Tracks &tracks = *((libmv::Tracks *) libmv_tracks);
libmv::EuclideanReconstruction &reconstruction = libmv_reconstruction->reconstruction;
libmv::CameraIntrinsics &camera_intrinsics = libmv_reconstruction->intrinsics;
ReconstructUpdateCallback update_callback =
ReconstructUpdateCallback(progress_update_callback, callback_customdata);
/* Retrieve reconstruction options from C-API to libmv API */
cameraIntrinsicsFromOptions(libmv_camera_intrinsics_options, &camera_intrinsics);
/* Invert the camera intrinsics */
libmv::Tracks normalized_tracks = getNormalizedTracks(tracks, camera_intrinsics);
/* keyframe selection */
int keyframe1 = libmv_reconstruction_options->keyframe1,
keyframe2 = libmv_reconstruction_options->keyframe2;
if (libmv_reconstruction_options->select_keyframes) {
LG << "Using automatic keyframe selection";
update_callback.invoke(0, "Selecting keyframes");
selectTwoKeyframesBasedOnGRICAndVariance(tracks,
normalized_tracks,
camera_intrinsics,
keyframe1,
keyframe2);
/* so keyframes in the interface would be updated */
libmv_reconstruction_options->keyframe1 = keyframe1;
libmv_reconstruction_options->keyframe2 = keyframe2;
}
/* actual reconstruction */
LG << "frames to init from: " << keyframe1 << " " << keyframe2;
libmv::vector<libmv::Marker> keyframe_markers =
normalized_tracks.MarkersForTracksInBothImages(keyframe1, keyframe2);
LG << "number of markers for init: " << keyframe_markers.size();
update_callback.invoke(0, "Initial reconstruction");
libmv::EuclideanReconstructTwoFrames(keyframe_markers, &reconstruction);
libmv::EuclideanBundle(normalized_tracks, &reconstruction);
libmv::EuclideanCompleteReconstruction(normalized_tracks,
&reconstruction, &update_callback);
/* refinement */
if (libmv_reconstruction_options->refine_intrinsics) {
libmv_solveRefineIntrinsics(tracks,
libmv_reconstruction_options->refine_intrinsics,
libmv::BUNDLE_NO_CONSTRAINTS,
progress_update_callback,
callback_customdata,
&reconstruction,
&camera_intrinsics);
}
/* set reconstruction scale to unity */
libmv::EuclideanScaleToUnity(&reconstruction);
/* finish reconstruction */
finishReconstruction(tracks, camera_intrinsics, libmv_reconstruction,
progress_update_callback, callback_customdata);
return (struct libmv_Reconstruction *)libmv_reconstruction;
}
struct libmv_Reconstruction *libmv_solveModal(const struct libmv_Tracks *libmv_tracks,
const libmv_CameraIntrinsicsOptions *libmv_camera_intrinsics_options,
const libmv_ReconstructionOptions *libmv_reconstruction_options,
reconstruct_progress_update_cb progress_update_callback,
void *callback_customdata)
{
struct libmv_Reconstruction *libmv_reconstruction = LIBMV_OBJECT_NEW(libmv_Reconstruction);
libmv::Tracks &tracks = *((libmv::Tracks *) libmv_tracks);
libmv::EuclideanReconstruction &reconstruction = libmv_reconstruction->reconstruction;
libmv::CameraIntrinsics &camera_intrinsics = libmv_reconstruction->intrinsics;
ReconstructUpdateCallback update_callback =
ReconstructUpdateCallback(progress_update_callback, callback_customdata);
cameraIntrinsicsFromOptions(libmv_camera_intrinsics_options, &camera_intrinsics);
/* Invert the camera intrinsics. */
libmv::Tracks normalized_tracks = getNormalizedTracks(tracks, camera_intrinsics);
/* Actual reconstruction. */
libmv::ModalSolver(normalized_tracks, &reconstruction, &update_callback);
libmv::CameraIntrinsics empty_intrinsics;
libmv::EuclideanBundleCommonIntrinsics(normalized_tracks,
libmv::BUNDLE_NO_INTRINSICS,
libmv::BUNDLE_NO_TRANSLATION,
&reconstruction,
&empty_intrinsics);
/* Refinement. */
if (libmv_reconstruction_options->refine_intrinsics) {
libmv_solveRefineIntrinsics(tracks,
libmv_reconstruction_options->refine_intrinsics,
libmv::BUNDLE_NO_TRANSLATION,
progress_update_callback, callback_customdata,
&reconstruction,
&camera_intrinsics);
}
/* Finish reconstruction. */
finishReconstruction(tracks, camera_intrinsics, libmv_reconstruction,
progress_update_callback, callback_customdata);
return (struct libmv_Reconstruction *)libmv_reconstruction;
}
void libmv_reconstructionDestroy(struct libmv_Reconstruction *libmv_reconstruction)
{
LIBMV_OBJECT_DELETE(libmv_reconstruction, libmv_Reconstruction);
}
int libmv_reprojectionPointForTrack(const struct libmv_Reconstruction *libmv_reconstruction, int track, double pos[3])
{
const libmv::EuclideanReconstruction *reconstruction = &libmv_reconstruction->reconstruction;
const libmv::EuclideanPoint *point = reconstruction->PointForTrack(track);
if (point) {
pos[0] = point->X[0];
pos[1] = point->X[2];
pos[2] = point->X[1];
return 1;
}
return 0;
}
static libmv::Marker ProjectMarker(const libmv::EuclideanPoint &point,
const libmv::EuclideanCamera &camera,
const libmv::CameraIntrinsics &intrinsics)
{
libmv::Vec3 projected = camera.R * point.X + camera.t;
projected /= projected(2);
libmv::Marker reprojected_marker;
intrinsics.ApplyIntrinsics(projected(0), projected(1), &reprojected_marker.x, &reprojected_marker.y);
reprojected_marker.image = camera.image;
reprojected_marker.track = point.track;
return reprojected_marker;
}
double libmv_reprojectionErrorForTrack(const struct libmv_Reconstruction *libmv_reconstruction, int track)
{
const libmv::EuclideanReconstruction *reconstruction = &libmv_reconstruction->reconstruction;
const libmv::CameraIntrinsics *intrinsics = &libmv_reconstruction->intrinsics;
libmv::vector<libmv::Marker> markers = libmv_reconstruction->tracks.MarkersForTrack(track);
int num_reprojected = 0;
double total_error = 0.0;
for (int i = 0; i < markers.size(); ++i) {
double weight = markers[i].weight;
const libmv::EuclideanCamera *camera = reconstruction->CameraForImage(markers[i].image);
const libmv::EuclideanPoint *point = reconstruction->PointForTrack(markers[i].track);
if (!camera || !point || weight == 0.0) {
continue;
}
num_reprojected++;
libmv::Marker reprojected_marker = ProjectMarker(*point, *camera, *intrinsics);
double ex = (reprojected_marker.x - markers[i].x) * weight;
double ey = (reprojected_marker.y - markers[i].y) * weight;
total_error += sqrt(ex * ex + ey * ey);
}
return total_error / num_reprojected;
}
double libmv_reprojectionErrorForImage(const struct libmv_Reconstruction *libmv_reconstruction, int image)
{
const libmv::EuclideanReconstruction *reconstruction = &libmv_reconstruction->reconstruction;
const libmv::CameraIntrinsics *intrinsics = &libmv_reconstruction->intrinsics;
libmv::vector<libmv::Marker> markers = libmv_reconstruction->tracks.MarkersInImage(image);
const libmv::EuclideanCamera *camera = reconstruction->CameraForImage(image);
int num_reprojected = 0;
double total_error = 0.0;
if (!camera)
return 0;
for (int i = 0; i < markers.size(); ++i) {
const libmv::EuclideanPoint *point = reconstruction->PointForTrack(markers[i].track);
if (!point) {
continue;
}
num_reprojected++;
libmv::Marker reprojected_marker = ProjectMarker(*point, *camera, *intrinsics);
double ex = reprojected_marker.x - markers[i].x;
double ey = reprojected_marker.y - markers[i].y;
total_error += sqrt(ex * ex + ey * ey);
}
return total_error / num_reprojected;
}
int libmv_reprojectionCameraForImage(const struct libmv_Reconstruction *libmv_reconstruction,
int image, double mat[4][4])
{
const libmv::EuclideanReconstruction *reconstruction = &libmv_reconstruction->reconstruction;
const libmv::EuclideanCamera *camera = reconstruction->CameraForImage(image);
if (camera) {
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 3; ++k) {
int l = k;
if (k == 1) l = 2;
else if (k == 2) l = 1;
if (j == 2) mat[j][l] = -camera->R(j,k);
else mat[j][l] = camera->R(j,k);
}
mat[j][3] = 0.0;
}
libmv::Vec3 optical_center = -camera->R.transpose() * camera->t;
mat[3][0] = optical_center(0);
mat[3][1] = optical_center(2);
mat[3][2] = optical_center(1);
mat[3][3] = 1.0;
return 1;
}
return 0;
}
double libmv_reprojectionError(const struct libmv_Reconstruction *libmv_reconstruction)
{
return libmv_reconstruction->error;
}
struct libmv_CameraIntrinsics *libmv_reconstructionExtractIntrinsics(struct libmv_Reconstruction *libmv_reconstruction)
{
return (struct libmv_CameraIntrinsics *)&libmv_reconstruction->intrinsics;
}
/* ************ Feature detector ************ */
static libmv_Features *libmv_featuresFromVector(
const libmv::vector<libmv::Feature> &features)
{
struct libmv_Features *libmv_features = LIBMV_STRUCT_NEW(libmv_Features, 1);
int count = features.size();
if (count) {
libmv_features->features = LIBMV_STRUCT_NEW(libmv::Feature, count);
for (int i = 0; i < count; i++) {
libmv_features->features[i] = features.at(i);
}
}
else {
libmv_features->features = NULL;
}
libmv_features->count = count;
return libmv_features;
}
static void libmv_convertDetectorOptions(libmv_DetectOptions *options,
libmv::DetectOptions *detector_options)
{
switch (options->detector) {
#define LIBMV_CONVERT(the_detector) \
case LIBMV_DETECTOR_ ## the_detector: \
detector_options->type = libmv::DetectOptions::the_detector; \
break;
LIBMV_CONVERT(FAST)
LIBMV_CONVERT(MORAVEC)
LIBMV_CONVERT(HARRIS)
#undef LIBMV_CONVERT
}
detector_options->margin = options->margin;
detector_options->min_distance = options->min_distance;
detector_options->fast_min_trackness = options->fast_min_trackness;
detector_options->moravec_max_count = options->moravec_max_count;
detector_options->moravec_pattern = options->moravec_pattern;
detector_options->harris_threshold = options->harris_threshold;
}
struct libmv_Features *libmv_detectFeaturesByte(const unsigned char *image_buffer,
int width, int height, int channels,
libmv_DetectOptions *options)
{
// Prepare the image.
libmv::FloatImage image;
byteBufToImage(image_buffer, width, height, channels, &image);
// Configure detector.
libmv::DetectOptions detector_options;
libmv_convertDetectorOptions(options, &detector_options);
// Run the detector.
libmv::vector<libmv::Feature> detected_features;
libmv::Detect(image, detector_options, &detected_features);
// Convert result to C-API.
libmv_Features *result = libmv_featuresFromVector(detected_features);
return result;
}
struct libmv_Features *libmv_detectFeaturesFloat(const float *image_buffer,
int width, int height, int channels,
libmv_DetectOptions *options)
{
// Prepare the image.
libmv::FloatImage image;
floatBufToImage(image_buffer, width, height, channels, &image);
// Configure detector.
libmv::DetectOptions detector_options;
libmv_convertDetectorOptions(options, &detector_options);
// Run the detector.
libmv::vector<libmv::Feature> detected_features;
libmv::Detect(image, detector_options, &detected_features);
// Convert result to C-API.
libmv_Features *result = libmv_featuresFromVector(detected_features);
return result;
}
void libmv_featuresDestroy(struct libmv_Features *libmv_features)
{
if (libmv_features->features) {
LIBMV_STRUCT_DELETE(libmv_features->features);
}
LIBMV_STRUCT_DELETE(libmv_features);
}
int libmv_countFeatures(const struct libmv_Features *libmv_features)
{
return libmv_features->count;
}
void libmv_getFeature(const struct libmv_Features *libmv_features, int number, double *x, double *y, double *score, double *size)
{
libmv::Feature feature = libmv_features->features[number];
*x = feature.x;
*y = feature.y;
*score = feature.score;
*size = feature.size;
}
/* ************ Camera intrinsics ************ */
struct libmv_CameraIntrinsics *libmv_cameraIntrinsicsNewEmpty(void)
{
libmv::CameraIntrinsics *camera_intrinsics = LIBMV_OBJECT_NEW(libmv::CameraIntrinsics);
return (struct libmv_CameraIntrinsics *) camera_intrinsics;
}
struct libmv_CameraIntrinsics *libmv_cameraIntrinsicsNew(const libmv_CameraIntrinsicsOptions *libmv_camera_intrinsics_options)
{
libmv::CameraIntrinsics *camera_intrinsics = LIBMV_OBJECT_NEW(libmv::CameraIntrinsics);
cameraIntrinsicsFromOptions(libmv_camera_intrinsics_options, camera_intrinsics);
return (struct libmv_CameraIntrinsics *) camera_intrinsics;
}
struct libmv_CameraIntrinsics *libmv_cameraIntrinsicsCopy(const libmv_CameraIntrinsics *libmvIntrinsics)
{
libmv::CameraIntrinsics *orig_intrinsics = (libmv::CameraIntrinsics *) libmvIntrinsics;
libmv::CameraIntrinsics *new_intrinsics = LIBMV_OBJECT_NEW(libmv::CameraIntrinsics, *orig_intrinsics);
return (struct libmv_CameraIntrinsics *) new_intrinsics;
}
void libmv_cameraIntrinsicsDestroy(struct libmv_CameraIntrinsics *libmvIntrinsics)
{
using libmv::CameraIntrinsics;
LIBMV_OBJECT_DELETE(libmvIntrinsics, CameraIntrinsics);
}
void libmv_cameraIntrinsicsUpdate(const libmv_CameraIntrinsicsOptions *libmv_camera_intrinsics_options,
struct libmv_CameraIntrinsics *libmv_intrinsics)
{
libmv::CameraIntrinsics *camera_intrinsics = (libmv::CameraIntrinsics *) libmv_intrinsics;
double focal_length = libmv_camera_intrinsics_options->focal_length;
double principal_x = libmv_camera_intrinsics_options->principal_point_x;
double principal_y = libmv_camera_intrinsics_options->principal_point_y;
double k1 = libmv_camera_intrinsics_options->k1;
double k2 = libmv_camera_intrinsics_options->k2;
double k3 = libmv_camera_intrinsics_options->k3;
int image_width = libmv_camera_intrinsics_options->image_width;
int image_height = libmv_camera_intrinsics_options->image_height;
/* try avoid unnecessary updates so pre-computed distortion grids are not freed */
if (camera_intrinsics->focal_length() != focal_length)
camera_intrinsics->SetFocalLength(focal_length, focal_length);
if (camera_intrinsics->principal_point_x() != principal_x ||
camera_intrinsics->principal_point_y() != principal_y)
{
camera_intrinsics->SetPrincipalPoint(principal_x, principal_y);
}
if (camera_intrinsics->k1() != k1 ||
camera_intrinsics->k2() != k2 ||
camera_intrinsics->k3() != k3)
{
camera_intrinsics->SetRadialDistortion(k1, k2, k3);
}
if (camera_intrinsics->image_width() != image_width ||
camera_intrinsics->image_height() != image_height)
{
camera_intrinsics->SetImageSize(image_width, image_height);
}
}
void libmv_cameraIntrinsicsSetThreads(struct libmv_CameraIntrinsics *libmv_intrinsics, int threads)
{
libmv::CameraIntrinsics *camera_intrinsics = (libmv::CameraIntrinsics *) libmv_intrinsics;
camera_intrinsics->SetThreads(threads);
}
void libmv_cameraIntrinsicsExtract(const struct libmv_CameraIntrinsics *libmv_intrinsics, double *focal_length,
double *principal_x, double *principal_y, double *k1, double *k2, double *k3,
int *width, int *height)
{
libmv::CameraIntrinsics *camera_intrinsics = (libmv::CameraIntrinsics *) libmv_intrinsics;
*focal_length = camera_intrinsics->focal_length();
*principal_x = camera_intrinsics->principal_point_x();
*principal_y = camera_intrinsics->principal_point_y();
*k1 = camera_intrinsics->k1();
*k2 = camera_intrinsics->k2();
*k3 = camera_intrinsics->k3();
}
void libmv_cameraIntrinsicsUndistortByte(const struct libmv_CameraIntrinsics *libmv_intrinsics,
unsigned char *src, unsigned char *dst, int width, int height,
float overscan, int channels)
{
libmv::CameraIntrinsics *camera_intrinsics = (libmv::CameraIntrinsics *) libmv_intrinsics;
camera_intrinsics->Undistort(src, dst, width, height, overscan, channels);
}
void libmv_cameraIntrinsicsUndistortFloat(const struct libmv_CameraIntrinsics *libmvIntrinsics,
float *src, float *dst, int width, int height,
float overscan, int channels)
{
libmv::CameraIntrinsics *intrinsics = (libmv::CameraIntrinsics *) libmvIntrinsics;
intrinsics->Undistort(src, dst, width, height, overscan, channels);
}
void libmv_cameraIntrinsicsDistortByte(const struct libmv_CameraIntrinsics *libmvIntrinsics,
unsigned char *src, unsigned char *dst, int width, int height,
float overscan, int channels)
{
libmv::CameraIntrinsics *intrinsics = (libmv::CameraIntrinsics *) libmvIntrinsics;
intrinsics->Distort(src, dst, width, height, overscan, channels);
}
void libmv_cameraIntrinsicsDistortFloat(const struct libmv_CameraIntrinsics *libmvIntrinsics,
float *src, float *dst, int width, int height,
float overscan, int channels)
{
libmv::CameraIntrinsics *intrinsics = (libmv::CameraIntrinsics *) libmvIntrinsics;
intrinsics->Distort(src, dst, width, height, overscan, channels);
}
void libmv_cameraIntrinsicsApply(const libmv_CameraIntrinsicsOptions *libmv_camera_intrinsics_options,
double x, double y, double *x1, double *y1)
{
libmv::CameraIntrinsics camera_intrinsics;
cameraIntrinsicsFromOptions(libmv_camera_intrinsics_options, &camera_intrinsics);
if (libmv_camera_intrinsics_options->focal_length) {
/* do a lens undistortion if focal length is non-zero only */
camera_intrinsics.ApplyIntrinsics(x, y, x1, y1);
}
}
void libmv_cameraIntrinsicsInvert(const libmv_CameraIntrinsicsOptions *libmv_camera_intrinsics_options,
double x, double y, double *x1, double *y1)
{
libmv::CameraIntrinsics camera_intrinsics;
cameraIntrinsicsFromOptions(libmv_camera_intrinsics_options, &camera_intrinsics);
if (libmv_camera_intrinsics_options->focal_length) {
/* do a lens distortion if focal length is non-zero only */
camera_intrinsics.InvertIntrinsics(x, y, x1, y1);
}
}
void libmv_homography2DFromCorrespondencesEuc(double (*x1)[2], double (*x2)[2], int num_points, double H[3][3])
{
libmv::Mat x1_mat, x2_mat;
libmv::Mat3 H_mat;
x1_mat.resize(2, num_points);
x2_mat.resize(2, num_points);
for (int i = 0; i < num_points; i++) {
x1_mat.col(i) = libmv::Vec2(x1[i][0], x1[i][1]);
x2_mat.col(i) = libmv::Vec2(x2[i][0], x2[i][1]);
}
LG << "x1: " << x1_mat;
LG << "x2: " << x2_mat;
libmv::EstimateHomographyOptions options;
libmv::EstimateHomography2DFromCorrespondences(x1_mat, x2_mat, options, &H_mat);
LG << "H: " << H_mat;
memcpy(H, H_mat.data(), 9 * sizeof(double));
}
#endif