Camera tracking: merge hybrid tracker from tomato branch

Comment from Keir's commit:

Add a new hybrid region tracker for motion tracking to libmv, and
add it as an option (under "Hybrid") in the tracking settings. The
region tracker is a combination of brute force tracking for coarse
alignment, then refinement with the ESM/KLT algorithm already in
libmv that gives excellent subpixel precision (typically 1/50'th
of a pixel)

This also adds a new "brute force" region tracker which does a
brute force search through every pixel position in the destination
for the pattern in the first frame. It leverages SSE if available,
similar to the SAD tracker, to do this quickly. Currently it does
some unnecessary conversions to/from floating point that will get
fixed later.

The hybrid tracker glues the two trackers (brute & ESM) together
to get an overall better tracker. The algorithm is simple:

1. Track from frame 1 to frame 2 with the brute force tracker.
   This tries every possible pixel position for the pattern from
   frame 1 in frame 2. The position with the smallest
   sum-of-absolute-differences is chosen. By definition, this
   position is only accurate up to 1 pixel or so.
2. Using the result from 1, initialize a track with ESM. This does
   a least-squares fit with subpixel precision.
3. If the ESM shift was more than 2 pixels, report failure.
4. If the ESM track shifted less than 2 pixels, then the track is
   good and we're done. The rationale here is that if the
   refinement stage shifts more than 1 pixel, then the brute force
   result likely found some random position that's not a good fit.

svn command used: svn merge -r 42375:42376 -r 42377:42379 ^/branches/soc-2011-tomato
This commit is contained in:
Sergey Sharybin 2011-12-04 13:26:11 +00:00
commit d261623800
11 changed files with 527 additions and 13 deletions

@ -53,6 +53,8 @@ set(SRC
libmv/image/array_nd.cc
libmv/tracking/pyramid_region_tracker.cc
libmv/tracking/sad.cc
libmv/tracking/brute_region_tracker.cc
libmv/tracking/hybrid_region_tracker.cc
libmv/tracking/esm_region_tracker.cc
libmv/tracking/trklt_region_tracker.cc
libmv/tracking/klt_region_tracker.cc
@ -100,6 +102,8 @@ set(SRC
libmv/image/sample.h
libmv/image/image.h
libmv/tracking/region_tracker.h
libmv/tracking/brute_region_tracker.h
libmv/tracking/hybrid_region_tracker.h
libmv/tracking/retrack_region_tracker.h
libmv/tracking/sad.h
libmv/tracking/pyramid_region_tracker.h

@ -36,6 +36,8 @@
#include "Math/v3d_optimization.h"
#include "libmv/tracking/esm_region_tracker.h"
#include "libmv/tracking/brute_region_tracker.h"
#include "libmv/tracking/hybrid_region_tracker.h"
#include "libmv/tracking/klt_region_tracker.h"
#include "libmv/tracking/trklt_region_tracker.h"
#include "libmv/tracking/lmicklt_region_tracker.h"
@ -109,18 +111,33 @@ void libmv_setLoggingVerbosity(int verbosity)
/* ************ RegionTracker ************ */
libmv_RegionTracker *libmv_regionTrackerNew(int max_iterations, int pyramid_level, int half_window_size)
libmv_RegionTracker *libmv_pyramidRegionTrackerNew(int max_iterations, int pyramid_level, int half_window_size)
{
libmv::EsmRegionTracker *klt_region_tracker = new libmv::EsmRegionTracker;
libmv::EsmRegionTracker *esm_region_tracker = new libmv::EsmRegionTracker;
esm_region_tracker->half_window_size = half_window_size;
esm_region_tracker->max_iterations = max_iterations;
esm_region_tracker->min_determinant = 1e-4;
klt_region_tracker->half_window_size = half_window_size;
klt_region_tracker->max_iterations = max_iterations;
klt_region_tracker->min_determinant = 1e-4;
libmv::PyramidRegionTracker *pyramid_region_tracker =
new libmv::PyramidRegionTracker(esm_region_tracker, pyramid_level);
libmv::PyramidRegionTracker *region_tracker =
new libmv::PyramidRegionTracker(klt_region_tracker, pyramid_level);
return (libmv_RegionTracker *)pyramid_region_tracker;
}
return (libmv_RegionTracker *)region_tracker;
libmv_RegionTracker *libmv_hybridRegionTrackerNew(int max_iterations, int half_window_size)
{
libmv::EsmRegionTracker *esm_region_tracker = new libmv::EsmRegionTracker;
esm_region_tracker->half_window_size = half_window_size;
esm_region_tracker->max_iterations = max_iterations;
esm_region_tracker->min_determinant = 1e-4;
libmv::BruteRegionTracker *brute_region_tracker = new libmv::BruteRegionTracker;
brute_region_tracker->half_window_size = half_window_size;
libmv::HybridRegionTracker *hybrid_region_tracker =
new libmv::HybridRegionTracker(brute_region_tracker, esm_region_tracker);
return (libmv_RegionTracker *)hybrid_region_tracker;
}
static void floatBufToImage(const float *buf, int width, int height, libmv::FloatImage *image)

@ -43,7 +43,8 @@ void libmv_startDebugLogging(void);
void libmv_setLoggingVerbosity(int verbosity);
/* RegionTracker */
struct libmv_RegionTracker *libmv_regionTrackerNew(int max_iterations, int pyramid_level, int half_window_size);
struct libmv_RegionTracker *libmv_pyramidRegionTrackerNew(int max_iterations, int pyramid_level, int half_window_size);
struct libmv_RegionTracker *libmv_hybridRegionTrackerNew(int max_iterations, int half_window_size);
int libmv_regionTrackerTrack(struct libmv_RegionTracker *libmv_tracker, const float *ima1, const float *ima2,
int width, int height, double x1, double y1, double *x2, double *y2);
void libmv_regionTrackerDestroy(struct libmv_RegionTracker *libmv_tracker);

@ -0,0 +1,322 @@
// Copyright (c) 2011 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/tracking/brute_region_tracker.h"
#ifdef __SSE2__
#include <emmintrin.h>
#endif
#ifndef __APPLE__
// Needed for memalign on Linux and _aligned_alloc on Windows.
#include <malloc.h>
#else
// Apple's malloc is 16-byte aligned, and does not have malloc.h, so include
// stdilb instead.
#include <cstdlib>
#endif
#include "libmv/image/image.h"
#include "libmv/image/convolve.h"
#include "libmv/image/sample.h"
#include "libmv/logging/logging.h"
namespace libmv {
namespace {
// TODO(keir): It's stupid that this is needed here. Push this somewhere else.
void *aligned_malloc(int size, int alignment) {
#ifdef _WIN32
return _aligned_malloc(size, alignment);
#elif __APPLE__
// On Mac OS X, both the heap and the stack are guaranteed 16-byte aligned so
// they work natively with SSE types with no further work.
CHECK_EQ(alignment, 16);
return malloc(size);
#else // This is for Linux.
return memalign(alignment, size);
#endif
}
void aligned_free(void *ptr) {
#ifdef _WIN32
_aligned_free(ptr);
#else
free(ptr);
#endif
}
bool RegionIsInBounds(const FloatImage &image1,
double x, double y,
int half_window_size) {
// Check the minimum coordinates.
int min_x = floor(x) - half_window_size - 1;
int min_y = floor(y) - half_window_size - 1;
if (min_x < 0.0 ||
min_y < 0.0) {
return false;
}
// Check the maximum coordinates.
int max_x = ceil(x) + half_window_size + 1;
int max_y = ceil(y) + half_window_size + 1;
if (max_x > image1.cols() ||
max_y > image1.rows()) {
return false;
}
// Ok, we're good.
return true;
}
#ifdef __SSE2__
// Compute the sub of absolute differences between the arrays "a" and "b".
// The array "a" is assumed to be 16-byte aligned, while "b" is not. The
// result is returned as the first and third elements of __m128i if
// interpreted as a 4-element 32-bit integer array. The SAD is the sum of the
// elements.
//
// The function requires size % 16 valid extra elements at the end of both "a"
// and "b", since the SSE load instructionst will pull in memory past the end
// of the arrays if their size is not a multiple of 16.
inline static __m128i SumOfAbsoluteDifferencesContiguousSSE(
const unsigned char *a, // aligned
const unsigned char *b, // not aligned
unsigned int size,
__m128i sad) {
// Do the bulk of the work as 16-way integer operations.
for(unsigned int j = 0; j < size / 16; j++) {
sad = _mm_add_epi32(sad, _mm_sad_epu8( _mm_load_si128 ((__m128i*)(a + 16 * j)),
_mm_loadu_si128((__m128i*)(b + 16 * j))));
}
// Handle the trailing end.
// TODO(keir): Benchmark to verify that the below SSE is a win compared to a
// hand-rolled loop. It's not clear that the hand rolled loop would be slower
// than the potential cache miss when loading the immediate table below.
//
// An alternative to this version is to take a packet of all 1's then do a
// 128-bit shift. The issue is that the shift instruction needs an immediate
// amount rather than a variable amount, so the branch instruction here must
// remain. See _mm_srli_si128 and _mm_slli_si128.
unsigned int remainder = size % 16u;
if (remainder) {
unsigned int j = size / 16;
__m128i a_trail = _mm_load_si128 ((__m128i*)(a + 16 * j));
__m128i b_trail = _mm_loadu_si128((__m128i*)(b + 16 * j));
__m128i mask;
switch (remainder) {
#define X 0xff
case 1: mask = _mm_setr_epi8(X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); break;
case 2: mask = _mm_setr_epi8(X, X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); break;
case 3: mask = _mm_setr_epi8(X, X, X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); break;
case 4: mask = _mm_setr_epi8(X, X, X, X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); break;
case 5: mask = _mm_setr_epi8(X, X, X, X, X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); break;
case 6: mask = _mm_setr_epi8(X, X, X, X, X, X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); break;
case 7: mask = _mm_setr_epi8(X, X, X, X, X, X, X, 0, 0, 0, 0, 0, 0, 0, 0, 0); break;
case 8: mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, 0, 0, 0, 0, 0, 0, 0, 0); break;
case 9: mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, X, 0, 0, 0, 0, 0, 0, 0); break;
case 10: mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, X, X, 0, 0, 0, 0, 0, 0); break;
case 11: mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, X, X, X, 0, 0, 0, 0, 0); break;
case 12: mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, X, X, X, X, 0, 0, 0, 0); break;
case 13: mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, X, X, X, X, X, 0, 0, 0); break;
case 14: mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, X, X, X, X, X, X, 0, 0); break;
case 15: mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, 0); break;
#undef X
}
sad = _mm_add_epi32(sad, _mm_sad_epu8(_mm_and_si128(mask, a_trail),
_mm_and_si128(mask, b_trail)));
}
return sad;
}
#endif
// Computes the sum of absolute differences between pattern and image. Pattern
// must be 16-byte aligned, and the stride must be a multiple of 16. The image
// does pointer does not have to be aligned.
int SumOfAbsoluteDifferencesContiguousImage(
const unsigned char *pattern,
unsigned int pattern_width,
unsigned int pattern_height,
unsigned int pattern_stride,
const unsigned char *image,
unsigned int image_stride) {
#ifdef __SSE2__
// TODO(keir): Add interleaved accumulation, where accumulation is done into
// two or more SSE registers that then get combined at the end. This reduces
// instruction dependency; in Eigen's squared norm code, splitting the
// accumulation produces a ~2x speedup. It's not clear it will help here,
// where the number of SSE instructions in the inner loop is smaller.
__m128i sad = _mm_setzero_si128();
for (int r = 0; r < pattern_height; ++r) {
sad = SumOfAbsoluteDifferencesContiguousSSE(&pattern[pattern_stride * r],
&image[image_stride * r],
pattern_width,
sad);
}
return _mm_cvtsi128_si32(
_mm_add_epi32(sad,
_mm_shuffle_epi32(sad, _MM_SHUFFLE(3, 0, 1, 2))));
#else
int sad = 0;
for (int r = 0; r < pattern_height; ++r) {
for (int c = 0; c < pattern_width; ++c) {
sad += abs(pattern[pattern_stride * r + c] - image[image_stride * r + c]);
}
}
return sad;
#endif
}
// Sample a region of size width, height centered at x,y in image, converting
// from float to byte in the process. Samples from the first channel. Puts
// result into *pattern.
void SampleRectangularPattern(const FloatImage &image,
double x, double y,
int width,
int height,
int pattern_stride,
unsigned char *pattern) {
// There are two cases for width and height: even or odd. If it's odd, then
// the bounds [-width / 2, width / 2] works as expected. However, for even,
// this results in one extra access past the end. So use < instead of <= in
// the loops below, but increase the end limit by one in the odd case.
int end_width = (width / 2) + (width % 2);
int end_height = (height / 2) + (height % 2);
for (int r = -height / 2; r < end_height; ++r) {
for (int c = -width / 2; c < end_width; ++c) {
pattern[pattern_stride * (r + height / 2) + c + width / 2] =
SampleLinear(image, y + r, x + c, 0) * 255.0;
}
}
}
// Returns x rounded up to the nearest multiple of alignment.
inline int PadToAlignment(int x, int alignment) {
if (x % alignment != 0) {
x += alignment - (x % alignment);
}
return x;
}
// Sample a region centered at x,y in image with size extending by half_width
// from x. Samples from the first channel. The resulting array is placed in
// *pattern, and the stride, which will be a multiple of 16 if SSE is enabled,
// is returned in *pattern_stride.
//
// NOTE: Caller must free *pattern with aligned_malloc() from above.
void SampleSquarePattern(const FloatImage &image,
double x, double y,
int half_width,
unsigned char **pattern,
int *pattern_stride) {
int width = 2 * half_width + 1;
// Allocate an aligned block with padding on the end so each row of the
// pattern starts on a 16-byte boundary.
*pattern_stride = PadToAlignment(width, 16);
int pattern_size_bytes = *pattern_stride * width;
*pattern = static_cast<unsigned char *>(
aligned_malloc(pattern_size_bytes, 16));
SampleRectangularPattern(image, x, y, width, width,
*pattern_stride,
*pattern);
}
// NOTE: Caller must free *image with aligned_malloc() from above.
void FloatArrayToByteArrayWithPadding(const FloatImage &float_image,
unsigned char **image,
int *image_stride) {
// Allocate enough so that accessing 16 elements past the end is fine.
*image_stride = float_image.Width() + 16;
*image = static_cast<unsigned char *>(
aligned_malloc(*image_stride * float_image.Height(), 16));
for (int i = 0; i < float_image.Height(); ++i) {
for (int j = 0; j < float_image.Width(); ++j) {
(*image)[*image_stride * i + j] =
static_cast<unsigned char>(255.0 * float_image(i, j, 0));
}
}
}
} // namespace
// TODO(keir): Compare the "sharpness" of the peak around the best pixel. It's
// probably worth plotting a few examples to see what the histogram of SAD
// values for every hypothesis looks like.
//
// TODO(keir): Priority queue for multiple hypothesis.
bool BruteRegionTracker::Track(const FloatImage &image1,
const FloatImage &image2,
double x1, double y1,
double *x2, double *y2) const {
if (!RegionIsInBounds(image1, x1, y1, half_window_size)) {
LG << "Fell out of image1's window with x1=" << x1 << ", y1=" << y1
<< ", hw=" << half_window_size << ".";
return false;
}
int pattern_width = 2 * half_window_size + 1;
Array3Df image_and_gradient1;
Array3Df image_and_gradient2;
BlurredImageAndDerivativesChannels(image1, 0.9, &image_and_gradient1);
BlurredImageAndDerivativesChannels(image2, 0.9, &image_and_gradient2);
// Sample the pattern to get it aligned to an image grid.
unsigned char *pattern;
int pattern_stride;
SampleSquarePattern(image_and_gradient1, x1, y1, half_window_size,
&pattern,
&pattern_stride);
// Convert the search area directly to bytes without sampling.
unsigned char *search_area;
int search_area_stride;
FloatArrayToByteArrayWithPadding(image_and_gradient2, &search_area, &search_area_stride);
// Try all possible locations inside the search area. Yes, everywhere.
int best_i, best_j, best_sad = INT_MAX;
for (int i = 0; i < image2.Height() - pattern_width; ++i) {
for (int j = 0; j < image2.Width() - pattern_width; ++j) {
int sad = SumOfAbsoluteDifferencesContiguousImage(pattern,
pattern_width,
pattern_width,
pattern_stride,
search_area + search_area_stride * i + j,
search_area_stride);
if (sad < best_sad) {
best_i = i;
best_j = j;
best_sad = sad;
}
}
}
aligned_free(pattern);
aligned_free(search_area);
if (best_sad != INT_MAX) {
*x2 = best_j + half_window_size;
*y2 = best_i + half_window_size;
return true;
}
return false;
}
} // namespace libmv

@ -0,0 +1,46 @@
// Copyright (c) 2011 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_REGION_TRACKING_BRUTE_REGION_TRACKER_H_
#define LIBMV_REGION_TRACKING_BRUTE_REGION_TRACKER_H_
#include "libmv/image/image.h"
#include "libmv/tracking/region_tracker.h"
namespace libmv {
struct BruteRegionTracker : public RegionTracker {
BruteRegionTracker() : half_window_size(4) {}
virtual ~BruteRegionTracker() {}
// Tracker interface.
virtual bool Track(const FloatImage &image1,
const FloatImage &image2,
double x1, double y1,
double *x2, double *y2) const;
// No point in creating getters or setters.
int half_window_size;
};
} // namespace libmv
#endif // LIBMV_REGION_TRACKING_BRUTE_REGION_TRACKER_H_

@ -0,0 +1,67 @@
// Copyright (c) 2011 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include "libmv/tracking/hybrid_region_tracker.h"
#include "libmv/image/image.h"
#include "libmv/image/convolve.h"
#include "libmv/image/sample.h"
#include "libmv/logging/logging.h"
namespace libmv {
bool HybridRegionTracker::Track(const FloatImage &image1,
const FloatImage &image2,
double x1, double y1,
double *x2, double *y2) const {
double x2_coarse = *x2;
double y2_coarse = *y2;
if (!coarse_tracker_->Track(image1, image2, x1, y1, &x2_coarse, &y2_coarse)) {
LG << "Coarse tracker failed.";
return false;
}
double x2_fine = x2_coarse;
double y2_fine = y2_coarse;
if (!fine_tracker_->Track(image1, image2, x1, y1, &x2_fine, &y2_fine)) {
LG << "Fine tracker failed.";
return false;
}
// Calculate the shift done by the fine tracker.
double dx2 = x2_coarse - x2_fine;
double dy2 = y2_coarse - y2_fine;
double fine_shift = sqrt(dx2 * dx2 + dy2 * dy2);
LG << "Refinement: dx=" << dx2 << " dy=" << dy2 << ", d=" << fine_shift;
// If the fine tracker shifted the window by more than a pixel, then
// something bad probably happened and we should give up tracking.
if (fine_shift < 2.0) {
LG << "Refinement small enough; success.";
*x2 = x2_fine;
*y2 = y2_fine;
return true;
}
LG << "Refinement was too big; failing.";
return false;
}
} // namespace libmv

@ -0,0 +1,52 @@
// Copyright (c) 2011 libmv authors.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_REGION_TRACKING_HYBRID_REGION_TRACKER_H_
#define LIBMV_REGION_TRACKING_HYBRID_REGION_TRACKER_H_
#include "libmv/image/image.h"
#include "libmv/base/scoped_ptr.h"
#include "libmv/tracking/region_tracker.h"
namespace libmv {
// TODO(keir): Documentation!
class HybridRegionTracker : public RegionTracker {
public:
HybridRegionTracker(RegionTracker *coarse_tracker,
RegionTracker *fine_tracker)
: coarse_tracker_(coarse_tracker),
fine_tracker_(fine_tracker) {}
virtual ~HybridRegionTracker() {}
// Tracker interface.
virtual bool Track(const FloatImage &image1,
const FloatImage &image2,
double x1, double y1,
double *x2, double *y2) const;
scoped_ptr<RegionTracker> coarse_tracker_;
scoped_ptr<RegionTracker> fine_tracker_;
};
} // namespace libmv
#endif // LIBMV_REGION_TRACKING_HYBRID_REGION_TRACKER_H_

@ -129,7 +129,7 @@ class CLIP_PT_tools_marker(Panel):
if settings.default_tracker == 'KLT':
col.prop(settings, "default_pyramid_levels")
else:
elif settings.default_tracker == 'SAD':
col.prop(settings, "default_correlation_min")
col.separator()

@ -791,7 +791,7 @@ MovieTrackingContext *BKE_tracking_context_new(MovieClip *clip, MovieClipUser *u
patx= (int)((track->pat_max[0]-track->pat_min[0])*width);
paty= (int)((track->pat_max[1]-track->pat_min[1])*height);
if(track->tracker==TRACKER_KLT) {
if(track->tracker==TRACKER_KLT || track->tracker==TRACKER_HYBRID) {
float search_size_x= (track->search_max[0]-track->search_min[0])*width;
float search_size_y= (track->search_max[1]-track->search_min[1])*height;
float pattern_size_x= (track->pat_max[0]-track->pat_min[0])*width;
@ -809,7 +809,10 @@ MovieTrackingContext *BKE_tracking_context_new(MovieClip *clip, MovieClipUser *u
* than the search size */
int level= MIN2(track->pyramid_levels, max_pyramid_levels);
track_context.region_tracker= libmv_regionTrackerNew(100, level, MAX2(wndx, wndy));
if(track->tracker==TRACKER_KLT)
track_context.region_tracker= libmv_pyramidRegionTrackerNew(100, level, MAX2(wndx, wndy));
else
track_context.region_tracker= libmv_hybridRegionTrackerNew(100, MAX2(wndx, wndy));
}
else if(track->tracker==TRACKER_SAD) {
track_context.pattern_size= MAX2(patx, paty);
@ -1181,7 +1184,7 @@ int BKE_tracking_next(MovieTrackingContext *context)
marker->pos[1]<margin[1] || marker->pos[1]>1.0f-margin[1]) {
onbound= 1;
}
else if(track->tracker==TRACKER_KLT) {
else if(track->tracker==TRACKER_KLT || track->tracker==TRACKER_HYBRID) {
float *patch_new;
if(need_readjust) {

@ -211,6 +211,7 @@ enum {
/* MovieTrackingTrack->tracker */
#define TRACKER_KLT 0
#define TRACKER_SAD 1
#define TRACKER_HYBRID 2
/* MovieTrackingTrack->adjframes */
#define TRACK_MATCH_KEYFRAME 0

@ -269,6 +269,7 @@ static MovieTrackingMarker *rna_trackingTrack_marker_find_frame(MovieTrackingTra
static EnumPropertyItem tracker_items[] = {
{TRACKER_KLT, "KLT", 0, "KLT", "KanadeLucasTomasi tracker which works with most of video clips, a bit slower than SAD"},
{TRACKER_SAD, "SAD", 0, "SAD", "Sum of Absolute Differences tracker which can be used when KLT tracker fails"},
{TRACKER_HYBRID, "Hybrid", 0, "Hybrid", "A hybrid tracker that uses SAD for rough tracking, KLT for refinement."},
{0, NULL, 0, NULL, NULL}};
static EnumPropertyItem pattern_match_items[] = {