Split tracking.c into several files

File tracking.c became rather huge and annoying to
maintain and it really contains several independent
areas of motrack pipeline.

Now we've got:

* tracking.c: general-purpose functions which are used
  by blender, clip editor, RNA and so.

* tracking_detect.c: feature detection functions
  (blender-side, logic is still in libmv).

* tracking_plane_tracker.c: blender-side 2D tracking logic.

* tracking_plane_tracker.c: plane track tracker.

* tracking_solver.c: functions for camera solving.

* tracking_stabilize.c: 2D stabilization functions.

* tracking_util.c: utility functions for all those files
  and which shouldn't be public.
This commit is contained in:
Sergey Sharybin 2013-12-30 17:03:59 +06:00
parent 5933b2455c
commit 2785e8e73d
10 changed files with 2780 additions and 2416 deletions

@ -74,6 +74,7 @@ void BKE_tracking_clipboard_paste_tracks(struct MovieTracking *tracking, struct
/* **** Track **** */
struct MovieTrackingTrack *BKE_tracking_track_add(struct MovieTracking *tracking, struct ListBase *tracksbase,
float x, float y, int framenr, int width, int height);
struct MovieTrackingTrack *BKE_tracking_track_duplicate(struct MovieTrackingTrack *track);
void BKE_tracking_track_unique_name(struct ListBase *tracksbase, struct MovieTrackingTrack *track);
void BKE_tracking_track_free(struct MovieTrackingTrack *track);

@ -154,6 +154,12 @@ set(SRC
intern/text.c
intern/texture.c
intern/tracking.c
intern/tracking_detect.c
intern/tracking_plane_tracker.c
intern/tracking_region_tracker.c
intern/tracking_solver.c
intern/tracking_stabilize.c
intern/tracking_util.c
intern/treehash.c
intern/unit.c
intern/world.c

File diff suppressed because it is too large Load Diff

@ -0,0 +1,181 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2011 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
* Keir Mierle
*
* ***** END GPL LICENSE BLOCK *****
*/
/** \file blender/blenkernel/intern/tracking_detect.c
* \ingroup bke
*
* This file contains blender-side implementation of feature detection.
*/
#include "MEM_guardedalloc.h"
#include "DNA_gpencil_types.h"
#include "DNA_movieclip_types.h"
#include "DNA_object_types.h" /* SELECT */
#include "BLI_utildefines.h"
#include "BKE_tracking.h"
#include "IMB_imbuf_types.h"
#include "IMB_imbuf.h"
#include "libmv-capi.h"
/* Check whether point is inside grease pencil stroke. */
static bool check_point_in_stroke(bGPDstroke *stroke, float x, float y)
{
int i, prev;
int count = 0;
bGPDspoint *points = stroke->points;
/* Count intersections of horizontal ray coming from the point.
* Point will be inside layer if and only if number of intersection
* is uneven.
*
* Well, if layer has got self-intersections, this logic wouldn't
* work, but such situation is crappy anyway.
*/
prev = stroke->totpoints - 1;
for (i = 0; i < stroke->totpoints; i++) {
if ((points[i].y < y && points[prev].y >= y) || (points[prev].y < y && points[i].y >= y)) {
float fac = (y - points[i].y) / (points[prev].y - points[i].y);
if (points[i].x + fac * (points[prev].x - points[i].x) < x)
count++;
}
prev = i;
}
return (count % 2) ? true : false;
}
/* Check whether point is inside any stroke of grease pencil layer. */
static bool check_point_in_layer(bGPDlayer *layer, float x, float y)
{
bGPDframe *frame = layer->frames.first;
while (frame) {
bGPDstroke *stroke = frame->strokes.first;
while (stroke) {
if (check_point_in_stroke(stroke, x, y))
return true;
stroke = stroke->next;
}
frame = frame->next;
}
return false;
}
/* Get features detected by libmv and create tracks on the clip for them. */
static void detect_retrieve_libmv_features(MovieTracking *tracking, ListBase *tracksbase,
struct libmv_Features *features, int framenr, int width, int height,
bGPDlayer *layer, bool place_outside_layer)
{
int a;
a = libmv_countFeatures(features);
while (a--) {
MovieTrackingTrack *track;
double x, y, size, score;
bool ok = true;
float xu, yu;
libmv_getFeature(features, a, &x, &y, &score, &size);
xu = x / width;
yu = y / height;
if (layer)
ok = check_point_in_layer(layer, xu, yu) != place_outside_layer;
if (ok) {
track = BKE_tracking_track_add(tracking, tracksbase, xu, yu, framenr, width, height);
track->flag |= SELECT;
track->pat_flag |= SELECT;
track->search_flag |= SELECT;
}
}
}
/* Get a gray-scale unsigned char buffer from given image buffer
* wich will be used for feature detection.
*/
static unsigned char *detect_get_frame_ucharbuf(ImBuf *ibuf)
{
int x, y;
unsigned char *pixels, *cp;
cp = pixels = MEM_callocN(ibuf->x * ibuf->y * sizeof(unsigned char), "tracking ucharBuf");
for (y = 0; y < ibuf->y; y++) {
for (x = 0; x < ibuf->x; x++) {
int pixel = ibuf->x * y + x;
if (ibuf->rect_float) {
const float *rrgbf = ibuf->rect_float + pixel * 4;
const float gray_f = 0.2126f * rrgbf[0] + 0.7152f * rrgbf[1] + 0.0722f * rrgbf[2];
*cp = FTOCHAR(gray_f);
}
else {
const unsigned char *rrgb = (unsigned char *)ibuf->rect + pixel * 4;
*cp = 0.2126f * rrgb[0] + 0.7152f * rrgb[1] + 0.0722f * rrgb[2];
}
cp++;
}
}
return pixels;
}
/* Detect features using FAST detector */
void BKE_tracking_detect_fast(MovieTracking *tracking, ListBase *tracksbase, ImBuf *ibuf,
int framenr, int margin, int min_trackness, int min_distance, bGPDlayer *layer,
bool place_outside_layer)
{
struct libmv_Features *features;
unsigned char *pixels = detect_get_frame_ucharbuf(ibuf);
features = libmv_detectFeaturesFAST(pixels, ibuf->x, ibuf->y, ibuf->x,
margin, min_trackness, min_distance);
MEM_freeN(pixels);
detect_retrieve_libmv_features(tracking, tracksbase, features,
framenr, ibuf->x, ibuf->y, layer,
place_outside_layer);
libmv_featuresDestroy(features);
}

@ -0,0 +1,244 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2011 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
* Keir Mierle
*
* ***** END GPL LICENSE BLOCK *****
*/
/** \file blender/blenkernel/intern/tracking_plane_tracker.c
* \ingroup bke
*
* This file contains implementation of plane tracker.
*/
#include "MEM_guardedalloc.h"
#include "DNA_movieclip_types.h"
#include "BLI_utildefines.h"
#include "BLI_math.h"
#include "BKE_tracking.h"
#include "libmv-capi.h"
typedef double Vec2[2];
static int point_markers_correspondences_on_both_image(MovieTrackingPlaneTrack *plane_track, int frame1, int frame2,
Vec2 **x1_r, Vec2 **x2_r)
{
int i, correspondence_index;
Vec2 *x1, *x2;
*x1_r = x1 = MEM_mallocN(sizeof(*x1) * plane_track->point_tracksnr, "point correspondences x1");
*x2_r = x2 = MEM_mallocN(sizeof(*x1) * plane_track->point_tracksnr, "point correspondences x2");
for (i = 0, correspondence_index = 0; i < plane_track->point_tracksnr; i++) {
MovieTrackingTrack *point_track = plane_track->point_tracks[i];
MovieTrackingMarker *point_marker1, *point_marker2;
point_marker1 = BKE_tracking_marker_get_exact(point_track, frame1);
point_marker2 = BKE_tracking_marker_get_exact(point_track, frame2);
if (point_marker1 != NULL && point_marker2 != NULL) {
/* Here conversion from float to double happens. */
x1[correspondence_index][0] = point_marker1->pos[0];
x1[correspondence_index][1] = point_marker1->pos[1];
x2[correspondence_index][0] = point_marker2->pos[0];
x2[correspondence_index][1] = point_marker2->pos[1];
correspondence_index++;
}
}
return correspondence_index;
}
/* NOTE: frame number should be in clip space, not scene space */
static void track_plane_from_existing_motion(MovieTrackingPlaneTrack *plane_track, int start_frame,
int direction, bool retrack)
{
MovieTrackingPlaneMarker *start_plane_marker = BKE_tracking_plane_marker_get(plane_track, start_frame);
MovieTrackingPlaneMarker *keyframe_plane_marker = NULL;
MovieTrackingPlaneMarker new_plane_marker;
int current_frame, frame_delta = direction > 0 ? 1 : -1;
if (plane_track->flag & PLANE_TRACK_AUTOKEY) {
/* Find a keyframe in given direction. */
for (current_frame = start_frame; ; current_frame += frame_delta) {
MovieTrackingPlaneMarker *next_plane_marker =
BKE_tracking_plane_marker_get_exact(plane_track, current_frame + frame_delta);
if (next_plane_marker == NULL) {
break;
}
if ((next_plane_marker->flag & PLANE_MARKER_TRACKED) == 0) {
keyframe_plane_marker = next_plane_marker;
break;
}
}
}
else {
start_plane_marker->flag |= PLANE_MARKER_TRACKED;
}
new_plane_marker = *start_plane_marker;
new_plane_marker.flag |= PLANE_MARKER_TRACKED;
for (current_frame = start_frame; ; current_frame += frame_delta) {
MovieTrackingPlaneMarker *next_plane_marker =
BKE_tracking_plane_marker_get_exact(plane_track, current_frame + frame_delta);
Vec2 *x1, *x2;
int i, num_correspondences;
double H_double[3][3];
float H[3][3];
/* As soon as we meet keyframed plane, we stop updating the sequence. */
if (next_plane_marker && (next_plane_marker->flag & PLANE_MARKER_TRACKED) == 0) {
/* Don't override keyframes if track is in auto-keyframe mode */
if (plane_track->flag & PLANE_TRACK_AUTOKEY) {
break;
}
}
num_correspondences =
point_markers_correspondences_on_both_image(plane_track, current_frame, current_frame + frame_delta,
&x1, &x2);
if (num_correspondences < 4) {
MEM_freeN(x1);
MEM_freeN(x2);
break;
}
libmv_homography2DFromCorrespondencesEuc(x1, x2, num_correspondences, H_double);
copy_m3_m3d(H, H_double);
for (i = 0; i < 4; i++) {
float vec[3] = {0.0f, 0.0f, 1.0f}, vec2[3];
copy_v2_v2(vec, new_plane_marker.corners[i]);
/* Apply homography */
mul_v3_m3v3(vec2, H, vec);
/* Normalize. */
vec2[0] /= vec2[2];
vec2[1] /= vec2[2];
copy_v2_v2(new_plane_marker.corners[i], vec2);
}
new_plane_marker.framenr = current_frame + frame_delta;
if (!retrack && keyframe_plane_marker &&
next_plane_marker &&
(plane_track->flag & PLANE_TRACK_AUTOKEY))
{
float fac = ((float) next_plane_marker->framenr - start_plane_marker->framenr) /
((float) keyframe_plane_marker->framenr - start_plane_marker->framenr);
fac = 3 * fac * fac - 2 * fac * fac * fac;
for (i = 0; i < 4; i++) {
interp_v2_v2v2(new_plane_marker.corners[i], new_plane_marker.corners[i],
next_plane_marker->corners[i], fac);
}
}
BKE_tracking_plane_marker_insert(plane_track, &new_plane_marker);
MEM_freeN(x1);
MEM_freeN(x2);
}
}
/* NOTE: frame number should be in clip space, not scene space */
void BKE_tracking_track_plane_from_existing_motion(MovieTrackingPlaneTrack *plane_track, int start_frame)
{
track_plane_from_existing_motion(plane_track, start_frame, 1, false);
track_plane_from_existing_motion(plane_track, start_frame, -1, false);
}
static MovieTrackingPlaneMarker *find_plane_keyframe(MovieTrackingPlaneTrack *plane_track,
int start_frame, int direction)
{
MovieTrackingPlaneMarker *plane_marker = BKE_tracking_plane_marker_get(plane_track, start_frame);
int index = plane_marker - plane_track->markers;
int frame_delta = direction > 0 ? 1 : -1;
while (index >= 0 && index < plane_track->markersnr) {
if ((plane_marker->flag & PLANE_MARKER_TRACKED) == 0) {
return plane_marker;
}
plane_marker += frame_delta;
}
return NULL;
}
void BKE_tracking_retrack_plane_from_existing_motion_at_segment(MovieTrackingPlaneTrack *plane_track, int start_frame)
{
MovieTrackingPlaneMarker *prev_plane_keyframe, *next_plane_keyframe;
prev_plane_keyframe = find_plane_keyframe(plane_track, start_frame, -1);
next_plane_keyframe = find_plane_keyframe(plane_track, start_frame, 1);
if (prev_plane_keyframe != NULL && next_plane_keyframe != NULL) {
/* First we track from left keyframe to the right one without any blending. */
track_plane_from_existing_motion(plane_track, prev_plane_keyframe->framenr, 1, true);
/* And then we track from the right keyframe to the left one, so shape blends in nicely */
track_plane_from_existing_motion(plane_track, next_plane_keyframe->framenr, -1, false);
}
else if (prev_plane_keyframe != NULL) {
track_plane_from_existing_motion(plane_track, prev_plane_keyframe->framenr, 1, true);
}
else if (next_plane_keyframe != NULL) {
track_plane_from_existing_motion(plane_track, next_plane_keyframe->framenr, -1, true);
}
}
BLI_INLINE void float_corners_to_double(/*const*/ float corners[4][2], double double_corners[4][2])
{
copy_v2db_v2fl(double_corners[0], corners[0]);
copy_v2db_v2fl(double_corners[1], corners[1]);
copy_v2db_v2fl(double_corners[2], corners[2]);
copy_v2db_v2fl(double_corners[3], corners[3]);
}
void BKE_tracking_homography_between_two_quads(/*const*/ float reference_corners[4][2], /*const*/ float corners[4][2], float H[3][3])
{
Vec2 x1[4], x2[4];
double H_double[3][3];
float_corners_to_double(reference_corners, x1);
float_corners_to_double(corners, x2);
libmv_homography2DFromCorrespondencesEuc(x1, x2, 4, H_double);
copy_m3_m3d(H, H_double);
}

@ -0,0 +1,801 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2011 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
* Keir Mierle
*
* ***** END GPL LICENSE BLOCK *****
*/
/** \file blender/blenkernel/intern/tracking_tracker.c
* \ingroup bke
*
* This file contains implementation of blender-side region tracker
* which is used for 2D feature tracking.
*/
#include "MEM_guardedalloc.h"
#include "DNA_movieclip_types.h"
#include "DNA_object_types.h" /* SELECT */
#include "BLI_utildefines.h"
#include "BLI_math.h"
#include "BLI_ghash.h"
#include "BLI_threads.h"
#include "BKE_tracking.h"
#include "BKE_movieclip.h"
#include "IMB_imbuf_types.h"
#include "IMB_imbuf.h"
#include "libmv-capi.h"
#include "tracking_private.h"
typedef struct TrackContext {
/* the reference marker and cutout search area */
MovieTrackingMarker reference_marker;
/* keyframed patch. This is the search area */
float *search_area;
int search_area_height;
int search_area_width;
int framenr;
float *mask;
} TrackContext;
typedef struct MovieTrackingContext {
MovieClipUser user;
MovieClip *clip;
int clip_flag;
int frames, first_frame;
bool first_time;
MovieTrackingSettings settings;
TracksMap *tracks_map;
bool backwards, sequence;
int sync_frame;
} MovieTrackingContext;
static void track_context_free(void *customdata)
{
TrackContext *track_context = (TrackContext *)customdata;
if (track_context->search_area)
MEM_freeN(track_context->search_area);
if (track_context->mask)
MEM_freeN(track_context->mask);
}
/* Create context for motion 2D tracking, copies all data needed
* for thread-safe tracking, allowing clip modifications during
* tracking.
*/
MovieTrackingContext *BKE_tracking_context_new(MovieClip *clip, MovieClipUser *user, short backwards, short sequence)
{
MovieTrackingContext *context = MEM_callocN(sizeof(MovieTrackingContext), "trackingContext");
MovieTracking *tracking = &clip->tracking;
MovieTrackingSettings *settings = &tracking->settings;
ListBase *tracksbase = BKE_tracking_get_active_tracks(tracking);
MovieTrackingTrack *track;
MovieTrackingObject *object = BKE_tracking_object_get_active(tracking);
int num_tracks = 0;
context->clip = clip;
context->settings = *settings;
context->backwards = backwards;
context->sync_frame = user->framenr;
context->first_time = true;
context->first_frame = user->framenr;
context->sequence = sequence;
/* count */
track = tracksbase->first;
while (track) {
if (TRACK_SELECTED(track) && (track->flag & (TRACK_LOCKED | TRACK_HIDDEN)) == 0) {
int framenr = BKE_movieclip_remap_scene_to_clip_frame(clip, user->framenr);
MovieTrackingMarker *marker = BKE_tracking_marker_get(track, framenr);
if ((marker->flag & MARKER_DISABLED) == 0)
num_tracks++;
}
track = track->next;
}
/* create tracking contextx for all tracks which would be tracked */
if (num_tracks) {
int width, height;
context->tracks_map = tracks_map_new(object->name, object->flag & TRACKING_OBJECT_CAMERA,
num_tracks, sizeof(TrackContext));
BKE_movieclip_get_size(clip, user, &width, &height);
/* create tracking data */
track = tracksbase->first;
while (track) {
if (TRACK_SELECTED(track) && (track->flag & (TRACK_HIDDEN | TRACK_LOCKED)) == 0) {
int framenr = BKE_movieclip_remap_scene_to_clip_frame(clip, user->framenr);
MovieTrackingMarker *marker = BKE_tracking_marker_get(track, framenr);
if ((marker->flag & MARKER_DISABLED) == 0) {
TrackContext track_context;
memset(&track_context, 0, sizeof(TrackContext));
tracks_map_insert(context->tracks_map, track, &track_context);
}
}
track = track->next;
}
}
/* store needed clip flags passing to get_buffer functions
* - MCLIP_USE_PROXY is needed to because timecode affects on movie clip
* only in case Proxy/Timecode flag is set, so store this flag to use
* timecodes properly but reset render size to SIZE_FULL so correct resolution
* would be used for images
* - MCLIP_USE_PROXY_CUSTOM_DIR is needed because proxy/timecode files might
* be stored in a different location
* ignore all the rest possible flags for now
*/
context->clip_flag = clip->flag & MCLIP_TIMECODE_FLAGS;
context->user = *user;
context->user.render_size = MCLIP_PROXY_RENDER_SIZE_FULL;
context->user.render_flag = 0;
if (!sequence)
BLI_begin_threaded_malloc();
return context;
}
/* Free context used for tracking. */
void BKE_tracking_context_free(MovieTrackingContext *context)
{
if (!context->sequence)
BLI_end_threaded_malloc();
tracks_map_free(context->tracks_map, track_context_free);
MEM_freeN(context);
}
/* Synchronize tracks between clip editor and tracking context,
* by merging them together so all new created tracks and tracked
* ones presents in the movie clip.
*/
void BKE_tracking_context_sync(MovieTrackingContext *context)
{
MovieTracking *tracking = &context->clip->tracking;
int newframe;
tracks_map_merge(context->tracks_map, tracking);
if (context->backwards)
newframe = context->user.framenr + 1;
else
newframe = context->user.framenr - 1;
context->sync_frame = newframe;
BKE_tracking_dopesheet_tag_update(tracking);
}
/* Synchronize clip user's frame number with a frame number from tracking context,
* used to update current frame displayed in the clip editor while tracking.
*/
void BKE_tracking_context_sync_user(const MovieTrackingContext *context, MovieClipUser *user)
{
user->framenr = context->sync_frame;
}
/* **** utility functions for tracking **** */
/* convert from float and byte RGBA to grayscale. Supports different coefficients for RGB. */
static void float_rgba_to_gray(const float *rgba, float *gray, int num_pixels,
float weight_red, float weight_green, float weight_blue)
{
int i;
for (i = 0; i < num_pixels; i++) {
const float *pixel = rgba + 4 * i;
gray[i] = weight_red * pixel[0] + weight_green * pixel[1] + weight_blue * pixel[2];
}
}
static void uint8_rgba_to_float_gray(const unsigned char *rgba, float *gray, int num_pixels,
float weight_red, float weight_green, float weight_blue)
{
int i;
for (i = 0; i < num_pixels; i++) {
const unsigned char *pixel = rgba + i * 4;
gray[i] = (weight_red * pixel[0] + weight_green * pixel[1] + weight_blue * pixel[2]) / 255.0f;
}
}
/* Get grayscale float search buffer for given marker and frame. */
static float *track_get_search_floatbuf(ImBuf *ibuf, MovieTrackingTrack *track, MovieTrackingMarker *marker,
int *width_r, int *height_r)
{
ImBuf *searchibuf;
float *gray_pixels;
int width, height;
searchibuf = BKE_tracking_get_search_imbuf(ibuf, track, marker, false, true);
if (!searchibuf) {
*width_r = 0;
*height_r = 0;
return NULL;
}
width = searchibuf->x;
height = searchibuf->y;
gray_pixels = MEM_callocN(width * height * sizeof(float), "tracking floatBuf");
if (searchibuf->rect_float) {
float_rgba_to_gray(searchibuf->rect_float, gray_pixels, width * height,
0.2126f, 0.7152f, 0.0722f);
}
else {
uint8_rgba_to_float_gray((unsigned char *)searchibuf->rect, gray_pixels, width * height,
0.2126f, 0.7152f, 0.0722f);
}
IMB_freeImBuf(searchibuf);
*width_r = width;
*height_r = height;
return gray_pixels;
}
/* Get image boffer for a given frame
*
* Frame is in clip space.
*/
static ImBuf *tracking_context_get_frame_ibuf(MovieClip *clip, MovieClipUser *user, int clip_flag, int framenr)
{
ImBuf *ibuf;
MovieClipUser new_user = *user;
new_user.framenr = BKE_movieclip_remap_clip_to_scene_frame(clip, framenr);
ibuf = BKE_movieclip_get_ibuf_flag(clip, &new_user, clip_flag, MOVIECLIP_CACHE_SKIP);
return ibuf;
}
/* Get previous keyframed marker. */
static MovieTrackingMarker *tracking_context_get_keyframed_marker(MovieTrackingTrack *track,
int curfra, bool backwards)
{
MovieTrackingMarker *marker_keyed = NULL;
MovieTrackingMarker *marker_keyed_fallback = NULL;
int a = BKE_tracking_marker_get(track, curfra) - track->markers;
while (a >= 0 && a < track->markersnr) {
int next = backwards ? a + 1 : a - 1;
bool is_keyframed = false;
MovieTrackingMarker *cur_marker = &track->markers[a];
MovieTrackingMarker *next_marker = NULL;
if (next >= 0 && next < track->markersnr)
next_marker = &track->markers[next];
if ((cur_marker->flag & MARKER_DISABLED) == 0) {
/* If it'll happen so we didn't find a real keyframe marker,
* fallback to the first marker in current tracked segment
* as a keyframe.
*/
if (next_marker && next_marker->flag & MARKER_DISABLED) {
if (marker_keyed_fallback == NULL)
marker_keyed_fallback = cur_marker;
}
is_keyframed |= (cur_marker->flag & MARKER_TRACKED) == 0;
}
if (is_keyframed) {
marker_keyed = cur_marker;
break;
}
a = next;
}
if (marker_keyed == NULL)
marker_keyed = marker_keyed_fallback;
return marker_keyed;
}
/* Get image buffer for previous marker's keyframe. */
static ImBuf *tracking_context_get_keyframed_ibuf(MovieClip *clip, MovieClipUser *user, int clip_flag,
MovieTrackingTrack *track, int curfra, bool backwards,
MovieTrackingMarker **marker_keyed_r)
{
MovieTrackingMarker *marker_keyed;
int keyed_framenr;
marker_keyed = tracking_context_get_keyframed_marker(track, curfra, backwards);
if (marker_keyed == NULL) {
return NULL;
}
keyed_framenr = marker_keyed->framenr;
*marker_keyed_r = marker_keyed;
return tracking_context_get_frame_ibuf(clip, user, clip_flag, keyed_framenr);
}
/* Get image buffer which si used as referece for track. */
static ImBuf *tracking_context_get_reference_ibuf(MovieClip *clip, MovieClipUser *user, int clip_flag,
MovieTrackingTrack *track, int curfra, bool backwards,
MovieTrackingMarker **reference_marker)
{
ImBuf *ibuf = NULL;
if (track->pattern_match == TRACK_MATCH_KEYFRAME) {
ibuf = tracking_context_get_keyframed_ibuf(clip, user, clip_flag, track, curfra, backwards, reference_marker);
}
else {
ibuf = tracking_context_get_frame_ibuf(clip, user, clip_flag, curfra);
/* use current marker as keyframed position */
*reference_marker = BKE_tracking_marker_get(track, curfra);
}
return ibuf;
}
/* Update track's reference patch (patch from which track is tracking from)
*
* Returns false if reference image buffer failed to load.
*/
static bool track_context_update_reference(MovieTrackingContext *context, TrackContext *track_context,
MovieTrackingTrack *track, MovieTrackingMarker *marker, int curfra,
int frame_width, int frame_height)
{
MovieTrackingMarker *reference_marker = NULL;
ImBuf *reference_ibuf = NULL;
int width, height;
/* calculate patch for keyframed position */
reference_ibuf = tracking_context_get_reference_ibuf(context->clip, &context->user, context->clip_flag,
track, curfra, context->backwards, &reference_marker);
if (!reference_ibuf)
return false;
track_context->reference_marker = *reference_marker;
if (track_context->search_area) {
MEM_freeN(track_context->search_area);
}
track_context->search_area = track_get_search_floatbuf(reference_ibuf, track, reference_marker, &width, &height);
track_context->search_area_height = height;
track_context->search_area_width = width;
if ((track->algorithm_flag & TRACK_ALGORITHM_FLAG_USE_MASK) != 0) {
if (track_context->mask)
MEM_freeN(track_context->mask);
track_context->mask = BKE_tracking_track_get_mask(frame_width, frame_height, track, marker);
}
IMB_freeImBuf(reference_ibuf);
return true;
}
/* Fill in libmv tracker options structure with settings need to be used to perform track. */
static void tracking_configure_tracker(const MovieTrackingTrack *track, float *mask,
libmv_TrackRegionOptions *options)
{
options->motion_model = track->motion_model;
options->use_brute = ((track->algorithm_flag & TRACK_ALGORITHM_FLAG_USE_BRUTE) != 0);
options->use_normalization = ((track->algorithm_flag & TRACK_ALGORITHM_FLAG_USE_NORMALIZATION) != 0);
options->num_iterations = 50;
options->minimum_correlation = track->minimum_correlation;
options->sigma = 0.9;
if ((track->algorithm_flag & TRACK_ALGORITHM_FLAG_USE_MASK) != 0)
options->image1_mask = mask;
else
options->image1_mask = NULL;
}
/* returns false if marker crossed margin area from frame bounds */
static bool tracking_check_marker_margin(MovieTrackingTrack *track, MovieTrackingMarker *marker,
int frame_width, int frame_height)
{
float pat_min[2], pat_max[2];
float margin_left, margin_top, margin_right, margin_bottom;
float normalized_track_margin[2];
/* margin from frame boundaries */
BKE_tracking_marker_pattern_minmax(marker, pat_min, pat_max);
normalized_track_margin[0] = (float)track->margin / frame_width;
normalized_track_margin[1] = (float)track->margin / frame_height;
margin_left = max_ff(-pat_min[0], normalized_track_margin[0]);
margin_top = max_ff( pat_max[1], normalized_track_margin[1]);
margin_right = max_ff( pat_max[0], normalized_track_margin[0]);
margin_bottom = max_ff(-pat_min[1], normalized_track_margin[1]);
/* do not track markers which are too close to boundary */
if (marker->pos[0] < margin_left || marker->pos[0] > 1.0f - margin_right ||
marker->pos[1] < margin_bottom || marker->pos[1] > 1.0f - margin_top)
{
return false;
}
return true;
}
/* Scale search area of marker based on scale changes of pattern area,
*
* TODO(sergey): currently based on pattern bounding box scale change,
* smarter approach here is welcome.
*/
static void tracking_scale_marker_search(const MovieTrackingMarker *old_marker, MovieTrackingMarker *new_marker)
{
float old_pat_min[2], old_pat_max[2];
float new_pat_min[2], new_pat_max[2];
float scale_x, scale_y;
BKE_tracking_marker_pattern_minmax(old_marker, old_pat_min, old_pat_max);
BKE_tracking_marker_pattern_minmax(new_marker, new_pat_min, new_pat_max);
scale_x = (new_pat_max[0] - new_pat_min[0]) / (old_pat_max[0] - old_pat_min[0]);
scale_y = (new_pat_max[1] - new_pat_min[1]) / (old_pat_max[1] - old_pat_min[1]);
new_marker->search_min[0] *= scale_x;
new_marker->search_min[1] *= scale_y;
new_marker->search_max[0] *= scale_x;
new_marker->search_max[1] *= scale_y;
}
/* Insert new marker which was tracked from old_marker to a new image,
* will also ensure tracked segment is surrounded by disabled markers.
*/
static void tracking_insert_new_marker(MovieTrackingContext *context, MovieTrackingTrack *track,
const MovieTrackingMarker *old_marker, int curfra, bool tracked,
int frame_width, int frame_height,
double dst_pixel_x[5], double dst_pixel_y[5])
{
MovieTrackingMarker new_marker;
int frame_delta = context->backwards ? -1 : 1;
int nextfra = curfra + frame_delta;
new_marker = *old_marker;
if (tracked) {
tracking_set_marker_coords_from_tracking(frame_width, frame_height, &new_marker, dst_pixel_x, dst_pixel_y);
new_marker.flag |= MARKER_TRACKED;
new_marker.framenr = nextfra;
tracking_scale_marker_search(old_marker, &new_marker);
if (context->first_time) {
/* check if there's no keyframe/tracked markers before tracking marker.
* if so -- create disabled marker before currently tracking "segment"
*/
tracking_marker_insert_disabled(track, old_marker, !context->backwards, false);
}
/* insert currently tracked marker */
BKE_tracking_marker_insert(track, &new_marker);
/* make currently tracked segment be finished with disabled marker */
tracking_marker_insert_disabled(track, &new_marker, context->backwards, false);
}
else {
new_marker.framenr = nextfra;
new_marker.flag |= MARKER_DISABLED;
BKE_tracking_marker_insert(track, &new_marker);
}
}
/* Peform tracking from a reference_marker to destination_ibuf.
* Uses marker as an initial position guess.
*
* Returns truth if tracker returned success, puts result
* to dst_pixel_x and dst_pixel_y.
*/
static bool configure_and_run_tracker(ImBuf *destination_ibuf, MovieTrackingTrack *track,
MovieTrackingMarker *reference_marker, MovieTrackingMarker *marker,
float *reference_search_area, int reference_search_area_width,
int reference_search_area_height, float *mask,
double dst_pixel_x[5], double dst_pixel_y[5])
{
/* To convert to the x/y split array format for libmv. */
double src_pixel_x[5], src_pixel_y[5];
/* Settings for the tracker */
libmv_TrackRegionOptions options = {0};
libmv_TrackRegionResult result;
float *patch_new;
int new_search_area_width, new_search_area_height;
int frame_width, frame_height;
bool tracked;
frame_width = destination_ibuf->x;
frame_height = destination_ibuf->y;
/* for now track to the same search area dimension as marker has got for current frame
* will make all tracked markers in currently tracked segment have the same search area
* size, but it's quite close to what is actually needed
*/
patch_new = track_get_search_floatbuf(destination_ibuf, track, marker,
&new_search_area_width, &new_search_area_height);
/* configure the tracker */
tracking_configure_tracker(track, mask, &options);
/* convert the marker corners and center into pixel coordinates in the search/destination images. */
tracking_get_marker_coords_for_tracking(frame_width, frame_height, reference_marker, src_pixel_x, src_pixel_y);
tracking_get_marker_coords_for_tracking(frame_width, frame_height, marker, dst_pixel_x, dst_pixel_y);
if (patch_new == NULL || reference_search_area == NULL)
return false;
/* run the tracker! */
tracked = libmv_trackRegion(&options,
reference_search_area,
reference_search_area_width,
reference_search_area_height,
patch_new,
new_search_area_width,
new_search_area_height,
src_pixel_x, src_pixel_y,
&result,
dst_pixel_x, dst_pixel_y);
MEM_freeN(patch_new);
return tracked;
}
/* Track all the tracks from context one more frame,
* returns FALSe if nothing was tracked.
*/
bool BKE_tracking_context_step(MovieTrackingContext *context)
{
ImBuf *destination_ibuf;
int frame_delta = context->backwards ? -1 : 1;
int curfra = BKE_movieclip_remap_scene_to_clip_frame(context->clip, context->user.framenr);
int a, map_size;
bool ok = false;
int frame_width, frame_height;
map_size = tracks_map_get_size(context->tracks_map);
/* Nothing to track, avoid unneeded frames reading to save time and memory. */
if (!map_size)
return false;
/* Get an image buffer for frame we're tracking to. */
context->user.framenr += frame_delta;
destination_ibuf = BKE_movieclip_get_ibuf_flag(context->clip, &context->user,
context->clip_flag, MOVIECLIP_CACHE_SKIP);
if (!destination_ibuf)
return false;
frame_width = destination_ibuf->x;
frame_height = destination_ibuf->y;
#pragma omp parallel for private(a) shared(destination_ibuf, ok) if (map_size > 1)
for (a = 0; a < map_size; a++) {
TrackContext *track_context = NULL;
MovieTrackingTrack *track;
MovieTrackingMarker *marker;
tracks_map_get_indexed_element(context->tracks_map, a, &track, (void **)&track_context);
marker = BKE_tracking_marker_get_exact(track, curfra);
if (marker && (marker->flag & MARKER_DISABLED) == 0) {
bool tracked = false, need_readjust;
double dst_pixel_x[5], dst_pixel_y[5];
if (track->pattern_match == TRACK_MATCH_KEYFRAME)
need_readjust = context->first_time;
else
need_readjust = true;
/* do not track markers which are too close to boundary */
if (tracking_check_marker_margin(track, marker, frame_width, frame_height)) {
if (need_readjust) {
if (track_context_update_reference(context, track_context, track, marker,
curfra, frame_width, frame_height) == false)
{
/* happens when reference frame fails to be loaded */
continue;
}
}
tracked = configure_and_run_tracker(destination_ibuf, track,
&track_context->reference_marker, marker,
track_context->search_area,
track_context->search_area_width,
track_context->search_area_height,
track_context->mask,
dst_pixel_x, dst_pixel_y);
}
#pragma omp critical
{
tracking_insert_new_marker(context, track, marker, curfra, tracked,
frame_width, frame_height, dst_pixel_x, dst_pixel_y);
}
ok = true;
}
}
IMB_freeImBuf(destination_ibuf);
context->first_time = false;
context->frames++;
return ok;
}
void BKE_tracking_context_finish(MovieTrackingContext *context)
{
MovieClip *clip = context->clip;
ListBase *plane_tracks_base = BKE_tracking_get_active_plane_tracks(&clip->tracking);
MovieTrackingPlaneTrack *plane_track;
int map_size = tracks_map_get_size(context->tracks_map);
for (plane_track = plane_tracks_base->first;
plane_track;
plane_track = plane_track->next)
{
if ((plane_track->flag & PLANE_TRACK_AUTOKEY) == 0) {
int i;
for (i = 0; i < map_size; i++) {
TrackContext *track_context = NULL;
MovieTrackingTrack *track, *old_track;
bool do_update = false;
int j;
tracks_map_get_indexed_element(context->tracks_map, i, &track, (void **)&track_context);
old_track = BLI_ghash_lookup(context->tracks_map->hash, track);
for (j = 0; j < plane_track->point_tracksnr; j++) {
if (plane_track->point_tracks[j] == old_track) {
do_update = true;
break;
}
}
if (do_update) {
BKE_tracking_track_plane_from_existing_motion(plane_track, context->first_frame);
break;
}
}
}
}
}
/* Refine marker's position using previously known keyframe.
* Direction of searching for a keyframe depends on backwards flag,
* which means if backwards is false, previous keyframe will be as
* reference.
*/
void BKE_tracking_refine_marker(MovieClip *clip, MovieTrackingTrack *track, MovieTrackingMarker *marker, bool backwards)
{
MovieTrackingMarker *reference_marker = NULL;
ImBuf *reference_ibuf, *destination_ibuf;
float *search_area, *mask = NULL;
int frame_width, frame_height;
int search_area_height, search_area_width;
int clip_flag = clip->flag & MCLIP_TIMECODE_FLAGS;
int reference_framenr;
MovieClipUser user = {0};
double dst_pixel_x[5], dst_pixel_y[5];
bool tracked;
/* Construct a temporary clip used, used to acquire image buffers. */
user.framenr = BKE_movieclip_remap_clip_to_scene_frame(clip, marker->framenr);
BKE_movieclip_get_size(clip, &user, &frame_width, &frame_height);
/* Get an image buffer for reference frame, also gets reference marker.
*
* Usually tracking_context_get_reference_ibuf will return current frame
* if marker is keyframed, which is correct for normal tracking. But here
* we'll want to have next/previous frame in such cases. So let's use small
* magic with original frame number used to get reference frame for.
*/
reference_framenr = backwards ? marker->framenr + 1 : marker->framenr - 1;
reference_ibuf = tracking_context_get_reference_ibuf(clip, &user, clip_flag, track, reference_framenr,
backwards, &reference_marker);
if (reference_ibuf == NULL) {
return;
}
/* Could not refine with self. */
if (reference_marker == marker) {
return;
}
/* Destination image buffer has got frame number corresponding to refining marker. */
destination_ibuf = BKE_movieclip_get_ibuf_flag(clip, &user, clip_flag, MOVIECLIP_CACHE_SKIP);
if (destination_ibuf == NULL) {
IMB_freeImBuf(reference_ibuf);
return;
}
/* Get search area from reference image. */
search_area = track_get_search_floatbuf(reference_ibuf, track, reference_marker,
&search_area_width, &search_area_height);
/* If needed, compute track's mask. */
if ((track->algorithm_flag & TRACK_ALGORITHM_FLAG_USE_MASK) != 0)
mask = BKE_tracking_track_get_mask(frame_width, frame_height, track, marker);
/* Run the tracker from reference frame to current one. */
tracked = configure_and_run_tracker(destination_ibuf, track, reference_marker, marker,
search_area, search_area_width, search_area_height,
mask, dst_pixel_x, dst_pixel_y);
/* Refine current marker's position if track was successful. */
if (tracked) {
tracking_set_marker_coords_from_tracking(frame_width, frame_height, marker, dst_pixel_x, dst_pixel_y);
marker->flag |= MARKER_TRACKED;
}
/* Free memory used for refining */
MEM_freeN(search_area);
if (mask)
MEM_freeN(mask);
IMB_freeImBuf(reference_ibuf);
IMB_freeImBuf(destination_ibuf);
}

@ -0,0 +1,619 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2011 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
* Keir Mierle
*
* ***** END GPL LICENSE BLOCK *****
*/
/** \file blender/blenkernel/intern/tracking_solver.c
* \ingroup bke
*
* This file contains blender-side implementation of camera solver.
*/
#include <limits.h>
#include "MEM_guardedalloc.h"
#include "DNA_anim_types.h"
#include "DNA_movieclip_types.h"
#include "BLI_utildefines.h"
#include "BLI_math.h"
#include "BLI_listbase.h"
#include "BLI_string.h"
#include "BLF_translation.h"
#include "BKE_fcurve.h"
#include "BKE_tracking.h"
#include "BKE_movieclip.h"
#include "RNA_access.h"
#include "libmv-capi.h"
#include "tracking_private.h"
typedef struct MovieReconstructContext {
struct libmv_Tracks *tracks;
bool select_keyframes;
int keyframe1, keyframe2;
int refine_flags;
struct libmv_Reconstruction *reconstruction;
char object_name[MAX_NAME];
bool is_camera;
short motion_flag;
float focal_length;
float principal_point[2];
float k1, k2, k3;
int width, height;
float reprojection_error;
TracksMap *tracks_map;
int sfra, efra;
} MovieReconstructContext;
typedef struct ReconstructProgressData {
short *stop;
short *do_update;
float *progress;
char *stats_message;
int message_size;
} ReconstructProgressData;
/* Create new libmv Tracks structure from blender's tracks list. */
static struct libmv_Tracks *libmv_tracks_new(MovieClip *clip, ListBase *tracksbase, int width, int height)
{
int tracknr = 0;
MovieTrackingTrack *track;
struct libmv_Tracks *tracks = libmv_tracksNew();
track = tracksbase->first;
while (track) {
FCurve *weight_fcurve;
int a = 0;
weight_fcurve = id_data_find_fcurve(&clip->id, track, &RNA_MovieTrackingTrack,
"weight", 0, NULL);
for (a = 0; a < track->markersnr; a++) {
MovieTrackingMarker *marker = &track->markers[a];
if ((marker->flag & MARKER_DISABLED) == 0) {
float weight = track->weight;
if (weight_fcurve) {
int scene_framenr =
BKE_movieclip_remap_clip_to_scene_frame(clip, marker->framenr);
weight = evaluate_fcurve(weight_fcurve, scene_framenr);
}
libmv_tracksInsert(tracks, marker->framenr, tracknr,
(marker->pos[0] + track->offset[0]) * width,
(marker->pos[1] + track->offset[1]) * height,
weight);
}
}
track = track->next;
tracknr++;
}
return tracks;
}
/* Retrieve refined camera intrinsics from libmv to blender. */
static void reconstruct_retrieve_libmv_intrinsics(MovieReconstructContext *context, MovieTracking *tracking)
{
struct libmv_Reconstruction *libmv_reconstruction = context->reconstruction;
struct libmv_CameraIntrinsics *libmv_intrinsics = libmv_reconstructionExtractIntrinsics(libmv_reconstruction);
float aspy = 1.0f / tracking->camera.pixel_aspect;
double focal_length, principal_x, principal_y, k1, k2, k3;
int width, height;
libmv_cameraIntrinsicsExtract(libmv_intrinsics, &focal_length, &principal_x, &principal_y,
&k1, &k2, &k3, &width, &height);
tracking->camera.focal = focal_length;
tracking->camera.principal[0] = principal_x;
tracking->camera.principal[1] = principal_y / (double)aspy;
tracking->camera.k1 = k1;
tracking->camera.k2 = k2;
tracking->camera.k3 = k3;
}
/* Retrieve reconstructed tracks from libmv to blender.
* Actually, this also copies reconstructed cameras
* from libmv to movie clip datablock.
*/
static bool reconstruct_retrieve_libmv_tracks(MovieReconstructContext *context, MovieTracking *tracking)
{
struct libmv_Reconstruction *libmv_reconstruction = context->reconstruction;
MovieTrackingReconstruction *reconstruction = NULL;
MovieReconstructedCamera *reconstructed;
MovieTrackingTrack *track;
ListBase *tracksbase = NULL;
int tracknr = 0, a;
bool ok = true;
bool origin_set = false;
int sfra = context->sfra, efra = context->efra;
float imat[4][4];
if (context->is_camera) {
tracksbase = &tracking->tracks;
reconstruction = &tracking->reconstruction;
}
else {
MovieTrackingObject *object = BKE_tracking_object_get_named(tracking, context->object_name);
tracksbase = &object->tracks;
reconstruction = &object->reconstruction;
}
unit_m4(imat);
track = tracksbase->first;
while (track) {
double pos[3];
if (libmv_reprojectionPointForTrack(libmv_reconstruction, tracknr, pos)) {
track->bundle_pos[0] = pos[0];
track->bundle_pos[1] = pos[1];
track->bundle_pos[2] = pos[2];
track->flag |= TRACK_HAS_BUNDLE;
track->error = libmv_reprojectionErrorForTrack(libmv_reconstruction, tracknr);
}
else {
track->flag &= ~TRACK_HAS_BUNDLE;
ok = false;
printf("Unable to reconstruct position for track #%d '%s'\n", tracknr, track->name);
}
track = track->next;
tracknr++;
}
if (reconstruction->cameras)
MEM_freeN(reconstruction->cameras);
reconstruction->camnr = 0;
reconstruction->cameras = NULL;
reconstructed = MEM_callocN((efra - sfra + 1) * sizeof(MovieReconstructedCamera),
"temp reconstructed camera");
for (a = sfra; a <= efra; a++) {
double matd[4][4];
if (libmv_reprojectionCameraForImage(libmv_reconstruction, a, matd)) {
int i, j;
float mat[4][4];
float error = libmv_reprojectionErrorForImage(libmv_reconstruction, a);
for (i = 0; i < 4; i++) {
for (j = 0; j < 4; j++)
mat[i][j] = matd[i][j];
}
/* Ensure first camera has got zero rotation and transform.
* This is essential for object tracking to work -- this way
* we'll always know object and environment are properly
* oriented.
*
* There's one weak part tho, which is requirement object
* motion starts at the same frame as camera motion does,
* otherwise that;' be a russian roulette whether object is
* aligned correct or not.
*/
if (!origin_set) {
invert_m4_m4(imat, mat);
unit_m4(mat);
origin_set = true;
}
else {
mul_m4_m4m4(mat, imat, mat);
}
copy_m4_m4(reconstructed[reconstruction->camnr].mat, mat);
reconstructed[reconstruction->camnr].framenr = a;
reconstructed[reconstruction->camnr].error = error;
reconstruction->camnr++;
}
else {
ok = false;
printf("No camera for frame %d\n", a);
}
}
if (reconstruction->camnr) {
int size = reconstruction->camnr * sizeof(MovieReconstructedCamera);
reconstruction->cameras = MEM_callocN(size, "reconstructed camera");
memcpy(reconstruction->cameras, reconstructed, size);
}
if (origin_set) {
track = tracksbase->first;
while (track) {
if (track->flag & TRACK_HAS_BUNDLE)
mul_v3_m4v3(track->bundle_pos, imat, track->bundle_pos);
track = track->next;
}
}
MEM_freeN(reconstructed);
return ok;
}
/* Retrieve all the libmv data from context to blender's side data blocks. */
static int reconstruct_retrieve_libmv(MovieReconstructContext *context, MovieTracking *tracking)
{
/* take the intrinsics back from libmv */
reconstruct_retrieve_libmv_intrinsics(context, tracking);
return reconstruct_retrieve_libmv_tracks(context, tracking);
}
/* Convert blender's refinement flags to libmv's. */
static int reconstruct_refine_intrinsics_get_flags(MovieTracking *tracking, MovieTrackingObject *object)
{
int refine = tracking->settings.refine_camera_intrinsics;
int flags = 0;
if ((object->flag & TRACKING_OBJECT_CAMERA) == 0)
return 0;
if (refine & REFINE_FOCAL_LENGTH)
flags |= LIBMV_REFINE_FOCAL_LENGTH;
if (refine & REFINE_PRINCIPAL_POINT)
flags |= LIBMV_REFINE_PRINCIPAL_POINT;
if (refine & REFINE_RADIAL_DISTORTION_K1)
flags |= LIBMV_REFINE_RADIAL_DISTORTION_K1;
if (refine & REFINE_RADIAL_DISTORTION_K2)
flags |= LIBMV_REFINE_RADIAL_DISTORTION_K2;
return flags;
}
/* Count tracks which has markers at both of keyframes. */
static int reconstruct_count_tracks_on_both_keyframes(MovieTracking *tracking, MovieTrackingObject *object)
{
ListBase *tracksbase = BKE_tracking_object_get_tracks(tracking, object);
int tot = 0;
int frame1 = object->keyframe1, frame2 = object->keyframe2;
MovieTrackingTrack *track;
track = tracksbase->first;
while (track) {
if (BKE_tracking_track_has_enabled_marker_at_frame(track, frame1)) {
if (BKE_tracking_track_has_enabled_marker_at_frame(track, frame2)) {
tot++;
}
}
track = track->next;
}
return tot;
}
/* Perform early check on whether everything is fine to start reconstruction. */
bool BKE_tracking_reconstruction_check(MovieTracking *tracking, MovieTrackingObject *object,
char *error_msg, int error_size)
{
if (tracking->settings.motion_flag & TRACKING_MOTION_MODAL) {
/* TODO: check for number of tracks? */
return true;
}
else if ((tracking->settings.reconstruction_flag & TRACKING_USE_KEYFRAME_SELECTION) == 0) {
/* automatic keyframe selection does not require any pre-process checks */
if (reconstruct_count_tracks_on_both_keyframes(tracking, object) < 8) {
BLI_strncpy(error_msg,
N_("At least 8 common tracks on both of keyframes are needed for reconstruction"),
error_size);
return false;
}
}
#ifndef WITH_LIBMV
BLI_strncpy(error_msg, N_("Blender is compiled without motion tracking library"), error_size);
return false;
#endif
return true;
}
/* Create context for camera/object motion reconstruction.
* Copies all data needed for reconstruction from movie
* clip datablock, so editing this clip is safe during
* reconstruction job is in progress.
*/
MovieReconstructContext *BKE_tracking_reconstruction_context_new(MovieClip *clip, MovieTrackingObject *object,
int keyframe1, int keyframe2, int width, int height)
{
MovieTracking *tracking = &clip->tracking;
MovieReconstructContext *context = MEM_callocN(sizeof(MovieReconstructContext), "MovieReconstructContext data");
MovieTrackingCamera *camera = &tracking->camera;
ListBase *tracksbase = BKE_tracking_object_get_tracks(tracking, object);
float aspy = 1.0f / tracking->camera.pixel_aspect;
int num_tracks = BLI_countlist(tracksbase);
int sfra = INT_MAX, efra = INT_MIN;
MovieTrackingTrack *track;
BLI_strncpy(context->object_name, object->name, sizeof(context->object_name));
context->is_camera = object->flag & TRACKING_OBJECT_CAMERA;
context->motion_flag = tracking->settings.motion_flag;
context->select_keyframes =
(tracking->settings.reconstruction_flag & TRACKING_USE_KEYFRAME_SELECTION) != 0;
context->focal_length = camera->focal;
context->principal_point[0] = camera->principal[0];
context->principal_point[1] = camera->principal[1] * aspy;
context->width = width;
context->height = height;
context->k1 = camera->k1;
context->k2 = camera->k2;
context->k3 = camera->k3;
context->tracks_map = tracks_map_new(context->object_name, context->is_camera, num_tracks, 0);
track = tracksbase->first;
while (track) {
int first = 0, last = track->markersnr - 1;
MovieTrackingMarker *first_marker = &track->markers[0];
MovieTrackingMarker *last_marker = &track->markers[track->markersnr - 1];
/* find first not-disabled marker */
while (first <= track->markersnr - 1 && first_marker->flag & MARKER_DISABLED) {
first++;
first_marker++;
}
/* find last not-disabled marker */
while (last >= 0 && last_marker->flag & MARKER_DISABLED) {
last--;
last_marker--;
}
if (first < track->markersnr - 1)
sfra = min_ii(sfra, first_marker->framenr);
if (last >= 0)
efra = max_ii(efra, last_marker->framenr);
tracks_map_insert(context->tracks_map, track, NULL);
track = track->next;
}
context->sfra = sfra;
context->efra = efra;
context->tracks = libmv_tracks_new(clip, tracksbase, width, height * aspy);
context->keyframe1 = keyframe1;
context->keyframe2 = keyframe2;
context->refine_flags = reconstruct_refine_intrinsics_get_flags(tracking, object);
return context;
}
/* Free memory used by a reconstruction process. */
void BKE_tracking_reconstruction_context_free(MovieReconstructContext *context)
{
if (context->reconstruction)
libmv_reconstructionDestroy(context->reconstruction);
libmv_tracksDestroy(context->tracks);
tracks_map_free(context->tracks_map, NULL);
MEM_freeN(context);
}
/* Callback which is called from libmv side to update progress in the interface. */
static void reconstruct_update_solve_cb(void *customdata, double progress, const char *message)
{
ReconstructProgressData *progressdata = customdata;
if (progressdata->progress) {
*progressdata->progress = progress;
*progressdata->do_update = TRUE;
}
BLI_snprintf(progressdata->stats_message, progressdata->message_size, "Solving camera | %s", message);
}
/* FIll in camera intrinsics structure from reconstruction context. */
static void camraIntrincicsOptionsFromContext(libmv_CameraIntrinsicsOptions *camera_intrinsics_options,
MovieReconstructContext *context)
{
camera_intrinsics_options->focal_length = context->focal_length;
camera_intrinsics_options->principal_point_x = context->principal_point[0];
camera_intrinsics_options->principal_point_y = context->principal_point[1];
camera_intrinsics_options->k1 = context->k1;
camera_intrinsics_options->k2 = context->k2;
camera_intrinsics_options->k3 = context->k3;
camera_intrinsics_options->image_width = context->width;
camera_intrinsics_options->image_height = context->height;
}
/* Fill in reconstruction options structure from reconstruction context. */
static void reconstructionOptionsFromContext(libmv_ReconstructionOptions *reconstruction_options,
MovieReconstructContext *context)
{
reconstruction_options->select_keyframes = context->select_keyframes;
reconstruction_options->keyframe1 = context->keyframe1;
reconstruction_options->keyframe2 = context->keyframe2;
reconstruction_options->refine_intrinsics = context->refine_flags;
}
/* Solve camera/object motion and reconstruct 3D markers position
* from a prepared reconstruction context.
*
* stop is not actually used at this moment, so reconstruction
* job could not be stopped.
*
* do_update, progress and stat_message are set by reconstruction
* callback in libmv side and passing to an interface.
*/
void BKE_tracking_reconstruction_solve(MovieReconstructContext *context, short *stop, short *do_update,
float *progress, char *stats_message, int message_size)
{
float error;
ReconstructProgressData progressdata;
libmv_CameraIntrinsicsOptions camera_intrinsics_options;
libmv_ReconstructionOptions reconstruction_options;
progressdata.stop = stop;
progressdata.do_update = do_update;
progressdata.progress = progress;
progressdata.stats_message = stats_message;
progressdata.message_size = message_size;
camraIntrincicsOptionsFromContext(&camera_intrinsics_options, context);
reconstructionOptionsFromContext(&reconstruction_options, context);
if (context->motion_flag & TRACKING_MOTION_MODAL) {
context->reconstruction = libmv_solveModal(context->tracks,
&camera_intrinsics_options,
&reconstruction_options,
reconstruct_update_solve_cb, &progressdata);
}
else {
context->reconstruction = libmv_solveReconstruction(context->tracks,
&camera_intrinsics_options,
&reconstruction_options,
reconstruct_update_solve_cb, &progressdata);
if (context->select_keyframes) {
/* store actual keyframes used for reconstruction to update them in the interface later */
context->keyframe1 = reconstruction_options.keyframe1;
context->keyframe2 = reconstruction_options.keyframe2;
}
}
error = libmv_reprojectionError(context->reconstruction);
context->reprojection_error = error;
}
/* Finish reconstruction process by copying reconstructed data
* to an actual movie clip datablock.
*/
bool BKE_tracking_reconstruction_finish(MovieReconstructContext *context, MovieTracking *tracking)
{
MovieTrackingReconstruction *reconstruction;
MovieTrackingObject *object;
tracks_map_merge(context->tracks_map, tracking);
BKE_tracking_dopesheet_tag_update(tracking);
object = BKE_tracking_object_get_named(tracking, context->object_name);
if (context->is_camera)
reconstruction = &tracking->reconstruction;
else
reconstruction = &object->reconstruction;
/* update keyframe in the interface */
if (context->select_keyframes) {
object->keyframe1 = context->keyframe1;
object->keyframe2 = context->keyframe2;
}
reconstruction->error = context->reprojection_error;
reconstruction->flag |= TRACKING_RECONSTRUCTED;
if (!reconstruct_retrieve_libmv(context, tracking))
return false;
return true;
}
static void tracking_scale_reconstruction(ListBase *tracksbase, MovieTrackingReconstruction *reconstruction,
float scale[3])
{
MovieTrackingTrack *track;
int i;
float first_camera_delta[3] = {0.0f, 0.0f, 0.0f};
if (reconstruction->camnr > 0) {
mul_v3_v3v3(first_camera_delta, reconstruction->cameras[0].mat[3], scale);
}
for (i = 0; i < reconstruction->camnr; i++) {
MovieReconstructedCamera *camera = &reconstruction->cameras[i];
mul_v3_v3(camera->mat[3], scale);
sub_v3_v3(camera->mat[3], first_camera_delta);
}
for (track = tracksbase->first; track; track = track->next) {
if (track->flag & TRACK_HAS_BUNDLE) {
mul_v3_v3(track->bundle_pos, scale);
sub_v3_v3(track->bundle_pos, first_camera_delta);
}
}
}
/* Apply scale on all reconstructed cameras and bundles,
* used by camera scale apply operator.
*/
void BKE_tracking_reconstruction_scale(MovieTracking *tracking, float scale[3])
{
MovieTrackingObject *object;
for (object = tracking->objects.first; object; object = object->next) {
ListBase *tracksbase;
MovieTrackingReconstruction *reconstruction;
tracksbase = BKE_tracking_object_get_tracks(tracking, object);
reconstruction = BKE_tracking_object_get_reconstruction(tracking, object);
tracking_scale_reconstruction(tracksbase, reconstruction, scale);
}
}

@ -0,0 +1,445 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2011 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
* Keir Mierle
*
* ***** END GPL LICENSE BLOCK *****
*/
/** \file blender/blenkernel/intern/tracking_stabilize.c
* \ingroup bke
*
* This file contains implementation of 2D frame stabilization.
*/
#include <limits.h>
#include "MEM_guardedalloc.h"
#include "DNA_movieclip_types.h"
#include "BLI_utildefines.h"
#include "BLI_math.h"
#include "BKE_tracking.h"
#include "IMB_imbuf_types.h"
#include "IMB_imbuf.h"
/* Calculate median point of markers of tracks marked as used for
* 2D stabilization.
*
* NOTE: frame number should be in clip space, not scene space
*/
static bool stabilization_median_point_get(MovieTracking *tracking, int framenr, float median[2])
{
bool ok = false;
float min[2], max[2];
MovieTrackingTrack *track;
INIT_MINMAX2(min, max);
track = tracking->tracks.first;
while (track) {
if (track->flag & TRACK_USE_2D_STAB) {
MovieTrackingMarker *marker = BKE_tracking_marker_get(track, framenr);
minmax_v2v2_v2(min, max, marker->pos);
ok = true;
}
track = track->next;
}
median[0] = (max[0] + min[0]) / 2.0f;
median[1] = (max[1] + min[1]) / 2.0f;
return ok;
}
/* Calculate stabilization data (translation, scale and rotation) from
* given median of first and current frame medians, tracking data and
* frame number.
*
* NOTE: frame number should be in clip space, not scene space
*/
static void stabilization_calculate_data(MovieTracking *tracking, int framenr, int width, int height,
float firstmedian[2], float median[2],
float translation[2], float *scale, float *angle)
{
MovieTrackingStabilization *stab = &tracking->stabilization;
*scale = (stab->scale - 1.0f) * stab->scaleinf + 1.0f;
*angle = 0.0f;
translation[0] = (firstmedian[0] - median[0]) * width * (*scale);
translation[1] = (firstmedian[1] - median[1]) * height * (*scale);
mul_v2_fl(translation, stab->locinf);
if ((stab->flag & TRACKING_STABILIZE_ROTATION) && stab->rot_track && stab->rotinf) {
MovieTrackingMarker *marker;
float a[2], b[2];
float x0 = (float)width / 2.0f, y0 = (float)height / 2.0f;
float x = median[0] * width, y = median[1] * height;
marker = BKE_tracking_marker_get(stab->rot_track, 1);
sub_v2_v2v2(a, marker->pos, firstmedian);
a[0] *= width;
a[1] *= height;
marker = BKE_tracking_marker_get(stab->rot_track, framenr);
sub_v2_v2v2(b, marker->pos, median);
b[0] *= width;
b[1] *= height;
*angle = -atan2f(a[0] * b[1] - a[1] * b[0], a[0] * b[0] + a[1] * b[1]);
*angle *= stab->rotinf;
/* convert to rotation around image center */
translation[0] -= (x0 + (x - x0) * cosf(*angle) - (y - y0) * sinf(*angle) - x) * (*scale);
translation[1] -= (y0 + (x - x0) * sinf(*angle) + (y - y0) * cosf(*angle) - y) * (*scale);
}
}
/* Calculate factor of a scale, which will eliminate black areas
* appearing on the frame caused by frame translation.
*/
static float stabilization_calculate_autoscale_factor(MovieTracking *tracking, int width, int height)
{
float firstmedian[2];
MovieTrackingStabilization *stab = &tracking->stabilization;
float aspect = tracking->camera.pixel_aspect;
/* Early output if stabilization data is already up-to-date. */
if (stab->ok)
return stab->scale;
/* See comment in BKE_tracking_stabilization_data_get about first frame. */
if (stabilization_median_point_get(tracking, 1, firstmedian)) {
int sfra = INT_MAX, efra = INT_MIN, cfra;
float scale = 1.0f;
MovieTrackingTrack *track;
stab->scale = 1.0f;
/* Calculate frame range of tracks used for stabilization. */
track = tracking->tracks.first;
while (track) {
if (track->flag & TRACK_USE_2D_STAB ||
((stab->flag & TRACKING_STABILIZE_ROTATION) && track == stab->rot_track))
{
sfra = min_ii(sfra, track->markers[0].framenr);
efra = max_ii(efra, track->markers[track->markersnr - 1].framenr);
}
track = track->next;
}
/* For every frame we calculate scale factor needed to eliminate black
* aread and choose largest scale factor as final one.
*/
for (cfra = sfra; cfra <= efra; cfra++) {
float median[2];
float translation[2], angle, tmp_scale;
int i;
float mat[4][4];
float points[4][2] = {{0.0f, 0.0f}, {0.0f, height}, {width, height}, {width, 0.0f}};
float si, co;
stabilization_median_point_get(tracking, cfra, median);
stabilization_calculate_data(tracking, cfra, width, height, firstmedian, median, translation, &tmp_scale, &angle);
BKE_tracking_stabilization_data_to_mat4(width, height, aspect, translation, 1.0f, angle, mat);
si = sinf(angle);
co = cosf(angle);
for (i = 0; i < 4; i++) {
int j;
float a[3] = {0.0f, 0.0f, 0.0f}, b[3] = {0.0f, 0.0f, 0.0f};
copy_v3_v3(a, points[i]);
copy_v3_v3(b, points[(i + 1) % 4]);
mul_m4_v3(mat, a);
mul_m4_v3(mat, b);
for (j = 0; j < 4; j++) {
float point[3] = {points[j][0], points[j][1], 0.0f};
float v1[3], v2[3];
sub_v3_v3v3(v1, b, a);
sub_v3_v3v3(v2, point, a);
if (cross_v2v2(v1, v2) >= 0.0f) {
const float rotDx[4][2] = {{1.0f, 0.0f}, {0.0f, -1.0f}, {-1.0f, 0.0f}, {0.0f, 1.0f}};
const float rotDy[4][2] = {{0.0f, 1.0f}, {1.0f, 0.0f}, {0.0f, -1.0f}, {-1.0f, 0.0f}};
float dx = translation[0] * rotDx[j][0] + translation[1] * rotDx[j][1],
dy = translation[0] * rotDy[j][0] + translation[1] * rotDy[j][1];
float w, h, E, F, G, H, I, J, K, S;
if (j % 2) {
w = (float)height / 2.0f;
h = (float)width / 2.0f;
}
else {
w = (float)width / 2.0f;
h = (float)height / 2.0f;
}
E = -w * co + h * si;
F = -h * co - w * si;
if ((i % 2) == (j % 2)) {
G = -w * co - h * si;
H = h * co - w * si;
}
else {
G = w * co + h * si;
H = -h * co + w * si;
}
I = F - H;
J = G - E;
K = G * F - E * H;
S = (-w * I - h * J) / (dx * I + dy * J + K);
scale = max_ff(scale, S);
}
}
}
}
stab->scale = scale;
if (stab->maxscale > 0.0f)
stab->scale = min_ff(stab->scale, stab->maxscale);
}
else {
stab->scale = 1.0f;
}
stab->ok = TRUE;
return stab->scale;
}
/* Get stabilization data (translation, scaling and angle) for a given frame.
*
* NOTE: frame number should be in clip space, not scene space
*/
void BKE_tracking_stabilization_data_get(MovieTracking *tracking, int framenr, int width, int height,
float translation[2], float *scale, float *angle)
{
float firstmedian[2], median[2];
MovieTrackingStabilization *stab = &tracking->stabilization;
/* Early output if stabilization is disabled. */
if ((stab->flag & TRACKING_2D_STABILIZATION) == 0) {
zero_v2(translation);
*scale = 1.0f;
*angle = 0.0f;
return;
}
/* Even if tracks does not start at frame 1, their position will
* be estimated at this frame, which will give reasonable result
* in most of cases.
*
* However, it's still better to replace this with real first
* frame number at which tracks are appearing.
*/
if (stabilization_median_point_get(tracking, 1, firstmedian)) {
stabilization_median_point_get(tracking, framenr, median);
if ((stab->flag & TRACKING_AUTOSCALE) == 0)
stab->scale = 1.0f;
if (!stab->ok) {
if (stab->flag & TRACKING_AUTOSCALE)
stabilization_calculate_autoscale_factor(tracking, width, height);
stabilization_calculate_data(tracking, framenr, width, height, firstmedian, median,
translation, scale, angle);
stab->ok = TRUE;
}
else {
stabilization_calculate_data(tracking, framenr, width, height, firstmedian, median,
translation, scale, angle);
}
}
else {
zero_v2(translation);
*scale = 1.0f;
*angle = 0.0f;
}
}
/* Stabilize given image buffer using stabilization data for
* a specified frame number.
*
* NOTE: frame number should be in clip space, not scene space
*/
ImBuf *BKE_tracking_stabilize_frame(MovieTracking *tracking, int framenr, ImBuf *ibuf,
float translation[2], float *scale, float *angle)
{
float tloc[2], tscale, tangle;
MovieTrackingStabilization *stab = &tracking->stabilization;
ImBuf *tmpibuf;
int width = ibuf->x, height = ibuf->y;
float aspect = tracking->camera.pixel_aspect;
float mat[4][4];
int j, filter = tracking->stabilization.filter;
void (*interpolation)(struct ImBuf *, struct ImBuf *, float, float, int, int) = NULL;
int ibuf_flags;
if (translation)
copy_v2_v2(tloc, translation);
if (scale)
tscale = *scale;
/* Perform early output if no stabilization is used. */
if ((stab->flag & TRACKING_2D_STABILIZATION) == 0) {
if (translation)
zero_v2(translation);
if (scale)
*scale = 1.0f;
if (angle)
*angle = 0.0f;
return ibuf;
}
/* Allocate frame for stabilization result. */
ibuf_flags = 0;
if (ibuf->rect)
ibuf_flags |= IB_rect;
if (ibuf->rect_float)
ibuf_flags |= IB_rectfloat;
tmpibuf = IMB_allocImBuf(ibuf->x, ibuf->y, ibuf->planes, ibuf_flags);
/* Calculate stabilization matrix. */
BKE_tracking_stabilization_data_get(tracking, framenr, width, height, tloc, &tscale, &tangle);
BKE_tracking_stabilization_data_to_mat4(ibuf->x, ibuf->y, aspect, tloc, tscale, tangle, mat);
invert_m4(mat);
if (filter == TRACKING_FILTER_NEAREST)
interpolation = nearest_interpolation;
else if (filter == TRACKING_FILTER_BILINEAR)
interpolation = bilinear_interpolation;
else if (filter == TRACKING_FILTER_BICUBIC)
interpolation = bicubic_interpolation;
else
/* fallback to default interpolation method */
interpolation = nearest_interpolation;
/* This function is only used for display in clip editor and
* sequencer only, which would only benefit of using threads
* here.
*
* But need to keep an eye on this if the function will be
* used in other cases.
*/
#pragma omp parallel for if (tmpibuf->y > 128)
for (j = 0; j < tmpibuf->y; j++) {
int i;
for (i = 0; i < tmpibuf->x; i++) {
float vec[3] = {i, j, 0.0f};
mul_v3_m4v3(vec, mat, vec);
interpolation(ibuf, tmpibuf, vec[0], vec[1], i, j);
}
}
if (tmpibuf->rect_float)
tmpibuf->userflags |= IB_RECT_INVALID;
if (translation)
copy_v2_v2(translation, tloc);
if (scale)
*scale = tscale;
if (angle)
*angle = tangle;
return tmpibuf;
}
/* Get 4x4 transformation matrix which corresponds to
* stabilization data and used for easy coordinate
* transformation.
*
* NOTE: The reason it is 4x4 matrix is because it's
* used for OpenGL drawing directly.
*/
void BKE_tracking_stabilization_data_to_mat4(int width, int height, float aspect,
float translation[2], float scale, float angle,
float mat[4][4])
{
float translation_mat[4][4], rotation_mat[4][4], scale_mat[4][4],
center_mat[4][4], inv_center_mat[4][4],
aspect_mat[4][4], inv_aspect_mat[4][4];
float scale_vector[3] = {scale, scale, scale};
unit_m4(translation_mat);
unit_m4(rotation_mat);
unit_m4(scale_mat);
unit_m4(center_mat);
unit_m4(aspect_mat);
/* aspect ratio correction matrix */
aspect_mat[0][0] = 1.0f / aspect;
invert_m4_m4(inv_aspect_mat, aspect_mat);
/* image center as rotation center
*
* Rotation matrix is constructing in a way rotaion happens around image center,
* and it's matter of calculating trasnlation in a way, that applying translation
* after rotation would make it so rotation happens around median point of tracks
* used for translation stabilization.
*/
center_mat[3][0] = (float)width / 2.0f;
center_mat[3][1] = (float)height / 2.0f;
invert_m4_m4(inv_center_mat, center_mat);
size_to_mat4(scale_mat, scale_vector); /* scale matrix */
add_v2_v2(translation_mat[3], translation); /* translation matrix */
rotate_m4(rotation_mat, 'Z', angle); /* rotation matrix */
/* compose transformation matrix */
mul_serie_m4(mat, translation_mat, center_mat, aspect_mat, rotation_mat, inv_aspect_mat,
scale_mat, inv_center_mat, NULL);
}

@ -0,0 +1,379 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2011 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
* Keir Mierle
*
* ***** END GPL LICENSE BLOCK *****
*/
/** \file blender/blenkernel/tracking_private.c
* \ingroup bke
*
* This file contains implementation of function which are used
* by multiple tracking files but which should not be public.
*/
#include <stddef.h>
#include "MEM_guardedalloc.h"
#include "DNA_movieclip_types.h"
#include "BLI_utildefines.h"
#include "BLI_math.h"
#include "BLI_listbase.h"
#include "BLI_ghash.h"
#include "BLI_path_util.h"
#include "BLI_string.h"
#include "BLF_translation.h"
#include "BKE_tracking.h"
#include "tracking_private.h"
/*********************** Tracks map *************************/
TracksMap *tracks_map_new(const char *object_name, bool is_camera, int num_tracks, int customdata_size)
{
TracksMap *map = MEM_callocN(sizeof(TracksMap), "TrackingsMap");
BLI_strncpy(map->object_name, object_name, sizeof(map->object_name));
map->is_camera = is_camera;
map->num_tracks = num_tracks;
map->customdata_size = customdata_size;
map->tracks = MEM_callocN(sizeof(MovieTrackingTrack) * num_tracks, "TrackingsMap tracks");
if (customdata_size)
map->customdata = MEM_callocN(customdata_size * num_tracks, "TracksMap customdata");
map->hash = BLI_ghash_ptr_new("TracksMap hash");
return map;
}
int tracks_map_get_size(TracksMap *map)
{
return map->num_tracks;
}
void tracks_map_get_indexed_element(TracksMap *map, int index, MovieTrackingTrack **track, void **customdata)
{
*track = &map->tracks[index];
if (map->customdata)
*customdata = &map->customdata[index * map->customdata_size];
}
void tracks_map_insert(TracksMap *map, MovieTrackingTrack *track, void *customdata)
{
MovieTrackingTrack new_track = *track;
new_track.markers = MEM_dupallocN(new_track.markers);
map->tracks[map->ptr] = new_track;
if (customdata)
memcpy(&map->customdata[map->ptr * map->customdata_size], customdata, map->customdata_size);
BLI_ghash_insert(map->hash, &map->tracks[map->ptr], track);
map->ptr++;
}
void tracks_map_merge(TracksMap *map, MovieTracking *tracking)
{
MovieTrackingTrack *track;
ListBase tracks = {NULL, NULL}, new_tracks = {NULL, NULL};
ListBase *old_tracks;
int a;
if (map->is_camera) {
old_tracks = &tracking->tracks;
}
else {
MovieTrackingObject *object = BKE_tracking_object_get_named(tracking, map->object_name);
if (!object) {
/* object was deleted by user, create new one */
object = BKE_tracking_object_add(tracking, map->object_name);
}
old_tracks = &object->tracks;
}
/* duplicate currently operating tracks to temporary list.
* this is needed to keep names in unique state and it's faster to change names
* of currently operating tracks (if needed)
*/
for (a = 0; a < map->num_tracks; a++) {
MovieTrackingTrack *old_track;
bool mapped_to_old = false;
track = &map->tracks[a];
/* find original of operating track in list of previously displayed tracks */
old_track = BLI_ghash_lookup(map->hash, track);
if (old_track) {
if (BLI_findindex(old_tracks, old_track) != -1) {
BLI_remlink(old_tracks, old_track);
/* Copy flags like selection back to the track map. */
track->flag = old_track->flag;
track->pat_flag = old_track->pat_flag;
track->search_flag = old_track->search_flag;
/* Copy all the rest settings back from the map to the actual tracks. */
MEM_freeN(old_track->markers);
*old_track = *track;
old_track->markers = MEM_dupallocN(old_track->markers);
BLI_addtail(&tracks, old_track);
mapped_to_old = true;
}
}
if (mapped_to_old == false) {
MovieTrackingTrack *new_track = BKE_tracking_track_duplicate(track);
/* Update old-new track mapping */
BLI_ghash_remove(map->hash, track, NULL, NULL);
BLI_ghash_insert(map->hash, track, new_track);
BLI_addtail(&tracks, new_track);
}
}
/* move all tracks, which aren't operating */
track = old_tracks->first;
while (track) {
MovieTrackingTrack *next = track->next;
BLI_addtail(&new_tracks, track);
track = next;
}
/* now move all tracks which are currently operating and keep their names unique */
track = tracks.first;
while (track) {
MovieTrackingTrack *next = track->next;
BLI_remlink(&tracks, track);
track->next = track->prev = NULL;
BLI_addtail(&new_tracks, track);
BLI_uniquename(&new_tracks, track, CTX_DATA_(BLF_I18NCONTEXT_ID_MOVIECLIP, "Track"), '.',
offsetof(MovieTrackingTrack, name), sizeof(track->name));
track = next;
}
*old_tracks = new_tracks;
}
void tracks_map_free(TracksMap *map, void (*customdata_free)(void *customdata))
{
int i = 0;
BLI_ghash_free(map->hash, NULL, NULL);
for (i = 0; i < map->num_tracks; i++) {
if (map->customdata && customdata_free)
customdata_free(&map->customdata[i * map->customdata_size]);
BKE_tracking_track_free(&map->tracks[i]);
}
if (map->customdata)
MEM_freeN(map->customdata);
MEM_freeN(map->tracks);
MEM_freeN(map);
}
/*********************** Space transformation functions *************************/
/* Three coordinate frames: Frame, Search, and Marker
* Two units: Pixels, Unified
* Notation: {coordinate frame}_{unit}; for example, "search_pixel" are search
* window relative coordinates in pixels, and "frame_unified" are unified 0..1
* coordinates relative to the entire frame.
*/
static void unified_to_pixel(int frame_width, int frame_height,
const float unified_coords[2], float pixel_coords[2])
{
pixel_coords[0] = unified_coords[0] * frame_width;
pixel_coords[1] = unified_coords[1] * frame_height;
}
static void marker_to_frame_unified(const MovieTrackingMarker *marker, const float marker_unified_coords[2],
float frame_unified_coords[2])
{
frame_unified_coords[0] = marker_unified_coords[0] + marker->pos[0];
frame_unified_coords[1] = marker_unified_coords[1] + marker->pos[1];
}
static void marker_unified_to_frame_pixel_coordinates(int frame_width, int frame_height,
const MovieTrackingMarker *marker,
const float marker_unified_coords[2],
float frame_pixel_coords[2])
{
marker_to_frame_unified(marker, marker_unified_coords, frame_pixel_coords);
unified_to_pixel(frame_width, frame_height, frame_pixel_coords, frame_pixel_coords);
}
void tracking_get_search_origin_frame_pixel(int frame_width, int frame_height,
const MovieTrackingMarker *marker,
float frame_pixel[2])
{
/* Get the lower left coordinate of the search window and snap to pixel coordinates */
marker_unified_to_frame_pixel_coordinates(frame_width, frame_height, marker, marker->search_min, frame_pixel);
frame_pixel[0] = (int)frame_pixel[0];
frame_pixel[1] = (int)frame_pixel[1];
}
static void pixel_to_unified(int frame_width, int frame_height, const float pixel_coords[2], float unified_coords[2])
{
unified_coords[0] = pixel_coords[0] / frame_width;
unified_coords[1] = pixel_coords[1] / frame_height;
}
static void marker_unified_to_search_pixel(int frame_width, int frame_height,
const MovieTrackingMarker *marker,
const float marker_unified[2], float search_pixel[2])
{
float frame_pixel[2];
float search_origin_frame_pixel[2];
marker_unified_to_frame_pixel_coordinates(frame_width, frame_height, marker, marker_unified, frame_pixel);
tracking_get_search_origin_frame_pixel(frame_width, frame_height, marker, search_origin_frame_pixel);
sub_v2_v2v2(search_pixel, frame_pixel, search_origin_frame_pixel);
}
static void search_pixel_to_marker_unified(int frame_width, int frame_height,
const MovieTrackingMarker *marker,
const float search_pixel[2], float marker_unified[2])
{
float frame_unified[2];
float search_origin_frame_pixel[2];
tracking_get_search_origin_frame_pixel(frame_width, frame_height, marker, search_origin_frame_pixel);
add_v2_v2v2(frame_unified, search_pixel, search_origin_frame_pixel);
pixel_to_unified(frame_width, frame_height, frame_unified, frame_unified);
/* marker pos is in frame unified */
sub_v2_v2v2(marker_unified, frame_unified, marker->pos);
}
/* Each marker has 5 coordinates associated with it that get warped with
* tracking: the four corners ("pattern_corners"), and the center ("pos").
* This function puts those 5 points into the appropriate frame for tracking
* (the "search" coordinate frame).
*/
void tracking_get_marker_coords_for_tracking(int frame_width, int frame_height,
const MovieTrackingMarker *marker,
double search_pixel_x[5], double search_pixel_y[5])
{
int i;
float unified_coords[2];
float pixel_coords[2];
/* Convert the corners into search space coordinates. */
for (i = 0; i < 4; i++) {
marker_unified_to_search_pixel(frame_width, frame_height, marker, marker->pattern_corners[i], pixel_coords);
search_pixel_x[i] = pixel_coords[0] - 0.5f;
search_pixel_y[i] = pixel_coords[1] - 0.5f;
}
/* Convert the center position (aka "pos"); this is the origin */
unified_coords[0] = 0.0f;
unified_coords[1] = 0.0f;
marker_unified_to_search_pixel(frame_width, frame_height, marker, unified_coords, pixel_coords);
search_pixel_x[4] = pixel_coords[0] - 0.5f;
search_pixel_y[4] = pixel_coords[1] - 0.5f;
}
/* Inverse of above. */
void tracking_set_marker_coords_from_tracking(int frame_width, int frame_height, MovieTrackingMarker *marker,
const double search_pixel_x[5], const double search_pixel_y[5])
{
int i;
float marker_unified[2];
float search_pixel[2];
/* Convert the corners into search space coordinates. */
for (i = 0; i < 4; i++) {
search_pixel[0] = search_pixel_x[i] + 0.5;
search_pixel[1] = search_pixel_y[i] + 0.5;
search_pixel_to_marker_unified(frame_width, frame_height, marker, search_pixel, marker->pattern_corners[i]);
}
/* Convert the center position (aka "pos"); this is the origin */
search_pixel[0] = search_pixel_x[4] + 0.5;
search_pixel[1] = search_pixel_y[4] + 0.5;
search_pixel_to_marker_unified(frame_width, frame_height, marker, search_pixel, marker_unified);
/* If the tracker tracked nothing, then "marker_unified" would be zero.
* Otherwise, the entire patch shifted, and that delta should be applied to
* all the coordinates.
*/
for (i = 0; i < 4; i++) {
marker->pattern_corners[i][0] -= marker_unified[0];
marker->pattern_corners[i][1] -= marker_unified[1];
}
marker->pos[0] += marker_unified[0];
marker->pos[1] += marker_unified[1];
}
/*********************** General purpose utility functions *************************/
/* Place a disabled marker before or after specified ref_marker.
*
* If before is truth, disabled marker is placed before reference
* one, and it's placed after it otherwise.
*
* If there's already a marker at the frame where disabled one
* is expected to be placed, nothing will happen if overwrite
* is false.
*/
void tracking_marker_insert_disabled(MovieTrackingTrack *track, const MovieTrackingMarker *ref_marker,
bool before, bool overwrite)
{
MovieTrackingMarker marker_new;
marker_new = *ref_marker;
marker_new.flag &= ~MARKER_TRACKED;
marker_new.flag |= MARKER_DISABLED;
if (before)
marker_new.framenr--;
else
marker_new.framenr++;
if (overwrite || !BKE_tracking_track_has_marker_at_frame(track, marker_new.framenr))
BKE_tracking_marker_insert(track, &marker_new);
}

@ -0,0 +1,84 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2011 Blender Foundation.
* All rights reserved.
*
* Contributor(s): Blender Foundation,
* Sergey Sharybin
* Keir Mierle
*
* ***** END GPL LICENSE BLOCK *****
*/
/** \file blender/blenkernel/tracking_private.h
* \ingroup bke
*
* This file contains declarations of function which are used
* by multiple tracking files but which should not be public.
*/
#ifndef __BKE_TRACKING_PRIVATE__
#define __BKE_TRACKING_PRIVATE__
struct GHash;
struct MovieTracking;
struct MovieTrackingMarker;
/*********************** Tracks map *************************/
typedef struct TracksMap {
char object_name[MAX_NAME];
bool is_camera;
int num_tracks;
int customdata_size;
char *customdata;
MovieTrackingTrack *tracks;
struct GHash *hash;
int ptr;
} TracksMap;
struct TracksMap *tracks_map_new(const char *object_name, bool is_camera, int num_tracks, int customdata_size);
int tracks_map_get_size(struct TracksMap *map);
void tracks_map_get_indexed_element(struct TracksMap *map, int index, struct MovieTrackingTrack **track, void **customdata);
void tracks_map_insert(struct TracksMap *map, struct MovieTrackingTrack *track, void *customdata);
void tracks_map_free(struct TracksMap *map, void (*customdata_free)(void *customdata));
void tracks_map_merge(struct TracksMap *map, struct MovieTracking *tracking);
/*********************** Space transformation functions *************************/
void tracking_get_search_origin_frame_pixel(int frame_width, int frame_height,
const struct MovieTrackingMarker *marker,
float frame_pixel[2]);
void tracking_get_marker_coords_for_tracking(int frame_width, int frame_height,
const struct MovieTrackingMarker *marker,
double search_pixel_x[5], double search_pixel_y[5]);
void tracking_set_marker_coords_from_tracking(int frame_width, int frame_height, struct MovieTrackingMarker *marker,
const double search_pixel_x[5], const double search_pixel_y[5]);
/*********************** General purpose utility functions *************************/
void tracking_marker_insert_disabled(struct MovieTrackingTrack *track, const struct MovieTrackingMarker *ref_marker,
bool before, bool overwrite);
#endif /* __BKE_TRACKING_PRIVATE__ */