blender/extern/libmv/patches/detect.patch

182 lines
6.1 KiB
Diff
Raw Normal View History

Camera tracking integration =========================== Commiting camera tracking integration gsoc project into trunk. This commit includes: - Bundled version of libmv library (with some changes against official repo, re-sync with libmv repo a bit later) - New datatype ID called MovieClip which is optimized to work with movie clips (both of movie files and image sequences) and doing camera/motion tracking operations. - New editor called Clip Editor which is currently used for motion/tracking stuff only, but which can be easily extended to work with masks too. This editor supports: * Loading movie files/image sequences * Build proxies with different size for loaded movie clip, also supports building undistorted proxies to increase speed of playback in undistorted mode. * Manual lens distortion mode calibration using grid and grease pencil * Supervised 2D tracking using two different algorithms KLT and SAD. * Basic algorithm for feature detection * Camera motion solving. scene orientation - New constraints to "link" scene objects with solved motions from clip: * Follow Track (make object follow 2D motion of track with given name or parent object to reconstructed 3D position of track) * Camera Solver to make camera moving in the same way as reconstructed camera This commit NOT includes changes from tomato branch: - New nodes (they'll be commited as separated patch) - Automatic image offset guessing for image input node and image editor (need to do more tests and gather more feedback) - Code cleanup in libmv-capi. It's not so critical cleanup, just increasing readability and understanadability of code. Better to make this chaneg when Keir will finish his current patch. More details about this project can be found on this page: http://wiki.blender.org/index.php/User:Nazg-gul/GSoC-2011 Further development of small features would be done in trunk, bigger/experimental features would first be implemented in tomato branch.
2011-11-07 12:55:18 +00:00
diff --git a/src/libmv/simple_pipeline/detect.cc b/src/libmv/simple_pipeline/detect.cc
index 6fc0cdd..8ac42ab 100644
--- a/src/libmv/simple_pipeline/detect.cc
+++ b/src/libmv/simple_pipeline/detect.cc
@@ -23,15 +23,89 @@
****************************************************************************/
#include "libmv/simple_pipeline/detect.h"
+#include <third_party/fast/fast.h>
#include <stdlib.h>
-#include <string.h>
+#include <memory.h>
+
+#ifdef __SSE2__
+#include <emmintrin.h>
+#endif
namespace libmv {
typedef unsigned int uint;
+int featurecmp(const void *a_v, const void *b_v)
+{
+ Feature *a = (Feature*)a_v;
+ Feature *b = (Feature*)b_v;
+
+ return b->score - a->score;
+}
+
+std::vector<Feature> DetectFAST(const unsigned char* data, int width, int height, int stride,
+ int min_trackness, int min_distance) {
+ std::vector<Feature> features;
+ // TODO(MatthiasF): Support targetting a feature count (binary search trackness)
+ int num_features;
+ xy* all = fast9_detect(data, width, height,
+ stride, min_trackness, &num_features);
+ if(num_features == 0) {
+ free(all);
+ return features;
+ }
+ int* scores = fast9_score(data, stride, all, num_features, min_trackness);
+ // TODO: merge with close feature suppression
+ xy* nonmax = nonmax_suppression(all, scores, num_features, &num_features);
+ free(all);
+ // Remove too close features
+ // TODO(MatthiasF): A resolution independent parameter would be better than distance
+ // e.g. a coefficient going from 0 (no minimal distance) to 1 (optimal circle packing)
+ // FIXME(MatthiasF): this method will not necessarily give all maximum markers
+ if(num_features) {
+ Feature *all_features = new Feature[num_features];
+
+ for(int i = 0; i < num_features; ++i) {
+ Feature a = { nonmax[i].x, nonmax[i].y, scores[i], 0 };
+ all_features[i] = a;
+ }
+
+ qsort((void *)all_features, num_features, sizeof(Feature), featurecmp);
+
+ features.reserve(num_features);
+
+ int prev_score = all_features[0].score;
+ for(int i = 0; i < num_features; ++i) {
+ bool ok = true;
+ Feature a = all_features[i];
+ if(a.score>prev_score)
+ abort();
+ prev_score = a.score;
+
+ // compare each feature against filtered set
+ for(int j = 0; j < features.size(); j++) {
+ Feature& b = features[j];
+ if ( (a.x-b.x)*(a.x-b.x)+(a.y-b.y)*(a.y-b.y) < min_distance*min_distance ) {
+ // already a nearby feature
+ ok = false;
+ break;
+ }
+ }
+
+ if(ok) {
+ // add the new feature
+ features.push_back(a);
+ }
+ }
+
+ delete [] all_features;
+ }
+ free(scores);
+ free(nonmax);
+ return features;
+}
+
#ifdef __SSE2__
-#include <emmintrin.h>
static uint SAD(const ubyte* imageA, const ubyte* imageB, int strideA, int strideB) {
__m128i a = _mm_setzero_si128();
for(int i = 0; i < 16; i++) {
@@ -52,7 +126,7 @@ static uint SAD(const ubyte* imageA, const ubyte* imageB, int strideA, int strid
}
#endif
-void Detect(ubyte* image, int stride, int width, int height, Feature* detected, int* count, int distance, ubyte* pattern) {
+void DetectMORAVEC(ubyte* image, int stride, int width, int height, Feature* detected, int* count, int distance, ubyte* pattern) {
unsigned short histogram[256];
memset(histogram,0,sizeof(histogram));
ubyte* scores = new ubyte[width*height];
diff --git a/src/libmv/simple_pipeline/detect.h b/src/libmv/simple_pipeline/detect.h
index 23b239b..bbe7aed 100644
--- a/src/libmv/simple_pipeline/detect.h
+++ b/src/libmv/simple_pipeline/detect.h
@@ -25,27 +25,52 @@
#ifndef LIBMV_SIMPLE_PIPELINE_DETECT_H_
#define LIBMV_SIMPLE_PIPELINE_DETECT_H_
-#ifdef __cplusplus
+#include <vector>
+
namespace libmv {
-#endif
typedef unsigned char ubyte;
/*!
- \a Feature is the 2D location of a detected feature in an image.
+ A Feature is the 2D location of a detected feature in an image.
- \a x, \a y is the position of the center in pixels (from image top-left).
- \a score is an estimate of how well the pattern will be tracked.
- \a size can be used as an initial size to track the pattern.
+ \a x, \a y is the position of the feature in pixels from the top left corner.
+ \a score is an estimate of how well the feature will be tracked.
+ \a size can be used as an initial pattern size to track the feature.
\sa Detect
*/
struct Feature {
+ /// Position in pixels (from top-left corner)
+ /// \note libmv might eventually support subpixel precision.
float x, y;
+ /// Trackness of the feature
float score;
+ /// Size of the feature in pixels
float size;
};
- //radius for non maximal suppression
+
+/*!
+ Detect features in an image.
+
+ You need to input a single channel 8-bit image using pointer to image \a data,
+ \a width, \a height and \a stride (i.e bytes per line).
+
+ You can tweak the count of detected features using \a min_trackness, which is
+ the minimum score to add a feature, and \a min_distance which is the minimal
+ distance accepted between two featuress.
+
+ \note You can binary search over \a min_trackness to get a given feature count.
+
+ \note a way to get an uniform distribution of a given feature count is:
+ \a min_distance = \a width * \a height / desired_feature_count ^ 2
+
+ \return All detected feartures matching given parameters
+*/
+std::vector<Feature> DetectFAST(const unsigned char* data, int width, int height,
+ int stride, int min_trackness = 128,
+ int min_distance = 120);
+
/*!
Detect features in an image.
@@ -63,10 +88,8 @@ struct Feature {
\note \a You can crop the image (to avoid detecting markers near the borders) without copying:
image += marginY*stride+marginX, width -= 2*marginX, height -= 2*marginY;
*/
-void Detect(ubyte* image, int stride, int width, int height, Feature* detected, int* count, int distance /*=32*/, ubyte* pattern /*=0*/);
+void DetectMORAVEC(ubyte* image, int stride, int width, int height, Feature* detected, int* count, int distance /*=32*/, ubyte* pattern /*=0*/);
-#ifdef __cplusplus
}
-#endif
#endif