remove unused parts of raskter module.

This commit is contained in:
Campbell Barton 2012-09-15 23:05:34 +00:00
parent d9788e4fa4
commit 518c80fc94
7 changed files with 69 additions and 2455 deletions

@ -33,11 +33,8 @@ set(INC_SYS
set(SRC
raskter.c
raskter_mt.c
raskter_kdtree.c
raskter.h
raskter_kdtree.h
)
blender_add_lib(bf_intern_raskter "${SRC}" "${INC}" "${INC_SYS}")

File diff suppressed because it is too large Load Diff

@ -27,80 +27,14 @@
/** \file raskter.h
* \ingroup RASKTER
*/
/* from BLI_utildefines.h */
#define MIN2(x, y) ( (x) < (y) ? (x) : (y) )
#define MAX2(x, y) ( (x) > (y) ? (x) : (y) )
#define ABS(a) ( (a) < 0 ? (-(a)) : (a) )
struct poly_vert {
int x;
int y;
};
struct scan_line {
int xstart;
int xend;
};
struct scan_line_batch {
int num;
int ystart;
struct scan_line *slines;
};
struct e_status {
int x;
int ybeg;
int xshift;
int xdir;
int drift;
int drift_inc;
int drift_dec;
int num;
struct e_status *e_next;
};
struct r_buffer_stats {
float *buf;
int sizex;
int sizey;
int ymin;
int ymax;
int xmin;
int xmax;
};
struct r_fill_context {
struct e_status *all_edges, *possible_edges;
struct r_buffer_stats rb;
struct scan_line *bounds;
void *kdo; //only used with kd tree
void *kdi; //only used with kd tree
int *bound_indexes;
int bounds_length;
};
struct layer_init_data {
struct poly_vert *imask;
struct poly_vert *omask;
struct scan_line *bounds;
int *bound_indexes;
int bounds_length;
};
#ifdef __cplusplus
extern "C" {
#endif
void preprocess_all_edges(struct r_fill_context *ctx, struct poly_vert *verts, int num_verts, struct e_status *open_edge);
int PLX_init_base_data(struct layer_init_data *mlayer_data, float(*base_verts)[2], int num_base_verts,
float *buf, int buf_x, int buf_y);
int PLX_raskterize(float (*base_verts)[2], int num_base_verts,
float *buf, int buf_x, int buf_y, int do_mask_AA);
int PLX_raskterize_feather(float (*base_verts)[2], int num_base_verts,
float (*feather_verts)[2], int num_feather_verts,
float *buf, int buf_x, int buf_y);
int PLX_antialias_buffer(float *buf, int buf_x, int buf_y);
float *buf, int buf_x, int buf_y);
#ifdef __cplusplus
}
#endif

@ -1,836 +0,0 @@
/*
This file is part of ``kdtree'', a library for working with kd-trees.
Copyright (C) 2007-2011 John Tsiombikas <nuclear@member.fsf.org>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
*/
/* single nearest neighbor search written by Tamas Nepusz <tamas@cs.rhul.ac.uk> */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "raskter_kdtree.h"
#if defined(WIN32) || defined(__WIN32__)
#include <malloc.h>
#endif
#ifdef USE_LIST_NODE_ALLOCATOR
#ifndef NO_PTHREADS
#include <pthread.h>
#else
#ifndef I_WANT_THREAD_BUGS
#error "You are compiling with the fast list node allocator, with pthreads disabled! This WILL break if used from multiple threads."
#endif /* I want thread bugs */
#endif /* pthread support */
#endif /* use list node allocator */
struct kdhyperrect {
int dim;
double *min, *max; /* minimum/maximum coords */
};
struct kdnode {
double *pos;
int dir;
void *data;
struct kdnode *left, *right; /* negative/positive side */
};
struct res_node {
struct kdnode *item;
double dist_sq;
struct res_node *next;
};
struct kdtree {
int dim;
struct kdnode *root;
struct kdhyperrect *rect;
void (*destr)(void*);
};
struct kdres {
struct kdtree *tree;
struct res_node *rlist, *riter;
int size;
};
#define SQ(x) ((x) * (x))
static void clear_rec(struct kdnode *node, void (*destr)(void*));
static int insert_rec(struct kdnode **node, const double *pos, void *data, int dir, int dim);
static int rlist_insert(struct res_node *list, struct kdnode *item, double dist_sq);
static void clear_results(struct kdres *set);
static struct kdhyperrect* hyperrect_create(int dim, const double *min, const double *max);
static void hyperrect_free(struct kdhyperrect *rect);
static struct kdhyperrect* hyperrect_duplicate(const struct kdhyperrect *rect);
static void hyperrect_extend(struct kdhyperrect *rect, const double *pos);
static double hyperrect_dist_sq(struct kdhyperrect *rect, const double *pos);
#ifdef USE_LIST_NODE_ALLOCATOR
static struct res_node *alloc_resnode(void);
static void free_resnode(struct res_node*);
#else
#define alloc_resnode() malloc(sizeof(struct res_node))
#define free_resnode(n) free(n)
#endif
struct kdtree *kd_create(int k)
{
struct kdtree *tree;
if(!(tree = malloc(sizeof *tree))) {
return 0;
}
tree->dim = k;
tree->root = 0;
tree->destr = 0;
tree->rect = 0;
return tree;
}
void kd_free(struct kdtree *tree)
{
if(tree) {
kd_clear(tree);
free(tree);
}
}
static void clear_rec(struct kdnode *node, void (*destr)(void*))
{
if(!node) return;
clear_rec(node->left, destr);
clear_rec(node->right, destr);
if(destr) {
destr(node->data);
}
free(node->pos);
free(node);
}
void kd_clear(struct kdtree *tree)
{
clear_rec(tree->root, tree->destr);
tree->root = 0;
if (tree->rect) {
hyperrect_free(tree->rect);
tree->rect = 0;
}
}
void kd_data_destructor(struct kdtree *tree, void (*destr)(void*))
{
tree->destr = destr;
}
static int insert_rec(struct kdnode **nptr, const double *pos, void *data, int dir, int dim)
{
int new_dir;
struct kdnode *node;
if(!*nptr) {
if(!(node = malloc(sizeof *node))) {
return -1;
}
if(!(node->pos = malloc(dim * sizeof *node->pos))) {
free(node);
return -1;
}
memcpy(node->pos, pos, dim * sizeof *node->pos);
node->data = data;
node->dir = dir;
node->left = node->right = 0;
*nptr = node;
return 0;
}
node = *nptr;
new_dir = (node->dir + 1) % dim;
if(pos[node->dir] < node->pos[node->dir]) {
return insert_rec(&(*nptr)->left, pos, data, new_dir, dim);
}
return insert_rec(&(*nptr)->right, pos, data, new_dir, dim);
}
int kd_insert(struct kdtree *tree, const double *pos, void *data)
{
if (insert_rec(&tree->root, pos, data, 0, tree->dim)) {
return -1;
}
if (tree->rect == 0) {
tree->rect = hyperrect_create(tree->dim, pos, pos);
} else {
hyperrect_extend(tree->rect, pos);
}
return 0;
}
int kd_insertf(struct kdtree *tree, const float *pos, void *data)
{
static double sbuf[16];
double *bptr, *buf = 0;
int res, dim = tree->dim;
if(dim > 16) {
#ifndef NO_ALLOCA
if(dim <= 256)
bptr = buf = alloca(dim * sizeof *bptr);
else
#endif
if(!(bptr = buf = malloc(dim * sizeof *bptr))) {
return -1;
}
} else {
bptr = buf = sbuf;
}
while(dim-- > 0) {
*bptr++ = *pos++;
}
res = kd_insert(tree, buf, data);
#ifndef NO_ALLOCA
if(tree->dim > 256)
#else
if(tree->dim > 16)
#endif
free(buf);
return res;
}
int kd_insert3(struct kdtree *tree, double x, double y, double z, void *data)
{
double buf[3];
buf[0] = x;
buf[1] = y;
buf[2] = z;
return kd_insert(tree, buf, data);
}
int kd_insert3f(struct kdtree *tree, float x, float y, float z, void *data)
{
double buf[3];
buf[0] = x;
buf[1] = y;
buf[2] = z;
return kd_insert(tree, buf, data);
}
static int find_nearest(struct kdnode *node, const double *pos, double range, struct res_node *list, int ordered, int dim)
{
double dist_sq, dx;
int i, ret, added_res = 0;
if(!node) return 0;
dist_sq = 0;
for(i=0; i<dim; i++) {
dist_sq += SQ(node->pos[i] - pos[i]);
}
if(dist_sq <= SQ(range)) {
if(rlist_insert(list, node, ordered ? dist_sq : -1.0) == -1) {
return -1;
}
added_res = 1;
}
dx = pos[node->dir] - node->pos[node->dir];
ret = find_nearest(dx <= 0.0 ? node->left : node->right, pos, range, list, ordered, dim);
if(ret >= 0 && fabs(dx) < range) {
added_res += ret;
ret = find_nearest(dx <= 0.0 ? node->right : node->left, pos, range, list, ordered, dim);
}
if(ret == -1) {
return -1;
}
added_res += ret;
return added_res;
}
#if 0
static int find_nearest_n(struct kdnode *node, const double *pos, double range, int num, struct rheap *heap, int dim)
{
double dist_sq, dx;
int i, ret, added_res = 0;
if(!node) return 0;
/* if the photon is close enough, add it to the result heap */
dist_sq = 0;
for(i=0; i<dim; i++) {
dist_sq += SQ(node->pos[i] - pos[i]);
}
if(dist_sq <= range_sq) {
if(heap->size >= num) {
/* get furthest element */
struct res_node *maxelem = rheap_get_max(heap);
/* and check if the new one is closer than that */
if(maxelem->dist_sq > dist_sq) {
rheap_remove_max(heap);
if(rheap_insert(heap, node, dist_sq) == -1) {
return -1;
}
added_res = 1;
range_sq = dist_sq;
}
} else {
if(rheap_insert(heap, node, dist_sq) == -1) {
return =1;
}
added_res = 1;
}
}
/* find signed distance from the splitting plane */
dx = pos[node->dir] - node->pos[node->dir];
ret = find_nearest_n(dx <= 0.0 ? node->left : node->right, pos, range, num, heap, dim);
if(ret >= 0 && fabs(dx) < range) {
added_res += ret;
ret = find_nearest_n(dx <= 0.0 ? node->right : node->left, pos, range, num, heap, dim);
}
}
#endif
static void kd_nearest_i(struct kdnode *node, const double *pos, struct kdnode **result, double *result_dist_sq, struct kdhyperrect* rect)
{
int dir = node->dir;
int i;
double dummy, dist_sq;
struct kdnode *nearer_subtree, *farther_subtree;
double *nearer_hyperrect_coord, *farther_hyperrect_coord;
/* Decide whether to go left or right in the tree */
dummy = pos[dir] - node->pos[dir];
if (dummy <= 0) {
nearer_subtree = node->left;
farther_subtree = node->right;
nearer_hyperrect_coord = rect->max + dir;
farther_hyperrect_coord = rect->min + dir;
} else {
nearer_subtree = node->right;
farther_subtree = node->left;
nearer_hyperrect_coord = rect->min + dir;
farther_hyperrect_coord = rect->max + dir;
}
if (nearer_subtree) {
/* Slice the hyperrect to get the hyperrect of the nearer subtree */
dummy = *nearer_hyperrect_coord;
*nearer_hyperrect_coord = node->pos[dir];
/* Recurse down into nearer subtree */
kd_nearest_i(nearer_subtree, pos, result, result_dist_sq, rect);
/* Undo the slice */
*nearer_hyperrect_coord = dummy;
}
/* Check the distance of the point at the current node, compare it
* with our best so far */
dist_sq = 0;
for(i=0; i < rect->dim; i++) {
dist_sq += SQ(node->pos[i] - pos[i]);
}
if (dist_sq < *result_dist_sq) {
*result = node;
*result_dist_sq = dist_sq;
}
if (farther_subtree) {
/* Get the hyperrect of the farther subtree */
dummy = *farther_hyperrect_coord;
*farther_hyperrect_coord = node->pos[dir];
/* Check if we have to recurse down by calculating the closest
* point of the hyperrect and see if it's closer than our
* minimum distance in result_dist_sq. */
if (hyperrect_dist_sq(rect, pos) < *result_dist_sq) {
/* Recurse down into farther subtree */
kd_nearest_i(farther_subtree, pos, result, result_dist_sq, rect);
}
/* Undo the slice on the hyperrect */
*farther_hyperrect_coord = dummy;
}
}
struct kdres *kd_nearest(struct kdtree *kd, const double *pos)
{
struct kdhyperrect *rect;
struct kdnode *result;
struct kdres *rset;
double dist_sq;
int i;
if (!kd) return 0;
if (!kd->rect) return 0;
/* Allocate result set */
if(!(rset = malloc(sizeof *rset))) {
return 0;
}
if(!(rset->rlist = alloc_resnode())) {
free(rset);
return 0;
}
rset->rlist->next = 0;
rset->tree = kd;
/* Duplicate the bounding hyperrectangle, we will work on the copy */
if (!(rect = hyperrect_duplicate(kd->rect))) {
kd_res_free(rset);
return 0;
}
/* Our first guesstimate is the root node */
result = kd->root;
dist_sq = 0;
for (i = 0; i < kd->dim; i++)
dist_sq += SQ(result->pos[i] - pos[i]);
/* Search for the nearest neighbour recursively */
kd_nearest_i(kd->root, pos, &result, &dist_sq, rect);
/* Free the copy of the hyperrect */
hyperrect_free(rect);
/* Store the result */
if (result) {
if (rlist_insert(rset->rlist, result, -1.0) == -1) {
kd_res_free(rset);
return 0;
}
rset->size = 1;
kd_res_rewind(rset);
return rset;
} else {
kd_res_free(rset);
return 0;
}
}
struct kdres *kd_nearestf(struct kdtree *tree, const float *pos)
{
static double sbuf[16];
double *bptr, *buf = 0;
int dim = tree->dim;
struct kdres *res;
if(dim > 16) {
#ifndef NO_ALLOCA
if(dim <= 256)
bptr = buf = alloca(dim * sizeof *bptr);
else
#endif
if(!(bptr = buf = malloc(dim * sizeof *bptr))) {
return 0;
}
} else {
bptr = buf = sbuf;
}
while(dim-- > 0) {
*bptr++ = *pos++;
}
res = kd_nearest(tree, buf);
#ifndef NO_ALLOCA
if(tree->dim > 256)
#else
if(tree->dim > 16)
#endif
free(buf);
return res;
}
struct kdres *kd_nearest3(struct kdtree *tree, double x, double y, double z)
{
double pos[3];
pos[0] = x;
pos[1] = y;
pos[2] = z;
return kd_nearest(tree, pos);
}
struct kdres *kd_nearest3f(struct kdtree *tree, float x, float y, float z)
{
double pos[3];
pos[0] = x;
pos[1] = y;
pos[2] = z;
return kd_nearest(tree, pos);
}
/* ---- nearest N search ---- */
/*
static kdres *kd_nearest_n(struct kdtree *kd, const double *pos, int num)
{
int ret;
struct kdres *rset;
if(!(rset = malloc(sizeof *rset))) {
return 0;
}
if(!(rset->rlist = alloc_resnode())) {
free(rset);
return 0;
}
rset->rlist->next = 0;
rset->tree = kd;
if((ret = find_nearest_n(kd->root, pos, range, num, rset->rlist, kd->dim)) == -1) {
kd_res_free(rset);
return 0;
}
rset->size = ret;
kd_res_rewind(rset);
return rset;
}*/
struct kdres *kd_nearest_range(struct kdtree *kd, const double *pos, double range)
{
int ret;
struct kdres *rset;
if(!(rset = malloc(sizeof *rset))) {
return 0;
}
if(!(rset->rlist = alloc_resnode())) {
free(rset);
return 0;
}
rset->rlist->next = 0;
rset->tree = kd;
if((ret = find_nearest(kd->root, pos, range, rset->rlist, 0, kd->dim)) == -1) {
kd_res_free(rset);
return 0;
}
rset->size = ret;
kd_res_rewind(rset);
return rset;
}
struct kdres *kd_nearest_rangef(struct kdtree *kd, const float *pos, float range)
{
static double sbuf[16];
double *bptr, *buf = 0;
int dim = kd->dim;
struct kdres *res;
if(dim > 16) {
#ifndef NO_ALLOCA
if(dim <= 256)
bptr = buf = alloca(dim * sizeof *bptr);
else
#endif
if(!(bptr = buf = malloc(dim * sizeof *bptr))) {
return 0;
}
} else {
bptr = buf = sbuf;
}
while(dim-- > 0) {
*bptr++ = *pos++;
}
res = kd_nearest_range(kd, buf, range);
#ifndef NO_ALLOCA
if(kd->dim > 256)
#else
if(kd->dim > 16)
#endif
free(buf);
return res;
}
struct kdres *kd_nearest_range3(struct kdtree *tree, double x, double y, double z, double range)
{
double buf[3];
buf[0] = x;
buf[1] = y;
buf[2] = z;
return kd_nearest_range(tree, buf, range);
}
struct kdres *kd_nearest_range3f(struct kdtree *tree, float x, float y, float z, float range)
{
double buf[3];
buf[0] = x;
buf[1] = y;
buf[2] = z;
return kd_nearest_range(tree, buf, range);
}
void kd_res_free(struct kdres *rset)
{
clear_results(rset);
free_resnode(rset->rlist);
free(rset);
}
int kd_res_size(struct kdres *set)
{
return (set->size);
}
void kd_res_rewind(struct kdres *rset)
{
rset->riter = rset->rlist->next;
}
int kd_res_end(struct kdres *rset)
{
return rset->riter == 0;
}
int kd_res_next(struct kdres *rset)
{
rset->riter = rset->riter->next;
return rset->riter != 0;
}
void *kd_res_item(struct kdres *rset, double *pos)
{
if(rset->riter) {
if(pos) {
memcpy(pos, rset->riter->item->pos, rset->tree->dim * sizeof *pos);
}
return rset->riter->item->data;
}
return 0;
}
void *kd_res_itemf(struct kdres *rset, float *pos)
{
if(rset->riter) {
if(pos) {
int i;
for(i=0; i<rset->tree->dim; i++) {
pos[i] = rset->riter->item->pos[i];
}
}
return rset->riter->item->data;
}
return 0;
}
void *kd_res_item3(struct kdres *rset, double *x, double *y, double *z)
{
if(rset->riter) {
if(*x) *x = rset->riter->item->pos[0];
if(*y) *y = rset->riter->item->pos[1];
if(*z) *z = rset->riter->item->pos[2];
}
return 0;
}
void *kd_res_item3f(struct kdres *rset, float *x, float *y, float *z)
{
if(rset->riter) {
if(*x) *x = rset->riter->item->pos[0];
if(*y) *y = rset->riter->item->pos[1];
if(*z) *z = rset->riter->item->pos[2];
}
return 0;
}
void *kd_res_item_data(struct kdres *set)
{
return kd_res_item(set, 0);
}
/* ---- hyperrectangle helpers ---- */
static struct kdhyperrect* hyperrect_create(int dim, const double *min, const double *max)
{
size_t size = dim * sizeof(double);
struct kdhyperrect* rect = 0;
if (!(rect = malloc(sizeof(struct kdhyperrect)))) {
return 0;
}
rect->dim = dim;
if (!(rect->min = malloc(size))) {
free(rect);
return 0;
}
if (!(rect->max = malloc(size))) {
free(rect->min);
free(rect);
return 0;
}
memcpy(rect->min, min, size);
memcpy(rect->max, max, size);
return rect;
}
static void hyperrect_free(struct kdhyperrect *rect)
{
free(rect->min);
free(rect->max);
free(rect);
}
static struct kdhyperrect* hyperrect_duplicate(const struct kdhyperrect *rect)
{
return hyperrect_create(rect->dim, rect->min, rect->max);
}
static void hyperrect_extend(struct kdhyperrect *rect, const double *pos)
{
int i;
for (i=0; i < rect->dim; i++) {
if (pos[i] < rect->min[i]) {
rect->min[i] = pos[i];
}
if (pos[i] > rect->max[i]) {
rect->max[i] = pos[i];
}
}
}
static double hyperrect_dist_sq(struct kdhyperrect *rect, const double *pos)
{
int i;
double result = 0;
for (i=0; i < rect->dim; i++) {
if (pos[i] < rect->min[i]) {
result += SQ(rect->min[i] - pos[i]);
} else if (pos[i] > rect->max[i]) {
result += SQ(rect->max[i] - pos[i]);
}
}
return result;
}
/* ---- static helpers ---- */
#ifdef USE_LIST_NODE_ALLOCATOR
/* special list node allocators. */
static struct res_node *free_nodes;
#ifndef NO_PTHREADS
static pthread_mutex_t alloc_mutex = PTHREAD_MUTEX_INITIALIZER;
#endif
static struct res_node *alloc_resnode(void)
{
struct res_node *node;
#ifndef NO_PTHREADS
pthread_mutex_lock(&alloc_mutex);
#endif
if(!free_nodes) {
node = malloc(sizeof *node);
} else {
node = free_nodes;
free_nodes = free_nodes->next;
node->next = 0;
}
#ifndef NO_PTHREADS
pthread_mutex_unlock(&alloc_mutex);
#endif
return node;
}
static void free_resnode(struct res_node *node)
{
#ifndef NO_PTHREADS
pthread_mutex_lock(&alloc_mutex);
#endif
node->next = free_nodes;
free_nodes = node;
#ifndef NO_PTHREADS
pthread_mutex_unlock(&alloc_mutex);
#endif
}
#endif /* list node allocator or not */
/* inserts the item. if dist_sq is >= 0, then do an ordered insert */
/* TODO make the ordering code use heapsort */
static int rlist_insert(struct res_node *list, struct kdnode *item, double dist_sq)
{
struct res_node *rnode;
if(!(rnode = alloc_resnode())) {
return -1;
}
rnode->item = item;
rnode->dist_sq = dist_sq;
if(dist_sq >= 0.0) {
while(list->next && list->next->dist_sq < dist_sq) {
list = list->next;
}
}
rnode->next = list->next;
list->next = rnode;
return 0;
}
static void clear_results(struct kdres *rset)
{
struct res_node *tmp, *node = rset->rlist->next;
while(node) {
tmp = node;
node = node->next;
free_resnode(tmp);
}
rset->rlist->next = 0;
}

@ -1,129 +0,0 @@
/*
This file is part of ``kdtree'', a library for working with kd-trees.
Copyright (C) 2007-2011 John Tsiombikas <nuclear@member.fsf.org>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
*/
#ifndef _KDTREE_H_
#define _KDTREE_H_
#define USE_LIST_NODE_ALLOCATOR
#ifdef __cplusplus
extern "C" {
#endif
struct kdtree;
struct kdres;
/* create a kd-tree for "k"-dimensional data */
struct kdtree *kd_create(int k);
/* free the struct kdtree */
void kd_free(struct kdtree *tree);
/* remove all the elements from the tree */
void kd_clear(struct kdtree *tree);
/* if called with non-null 2nd argument, the function provided
* will be called on data pointers (see kd_insert) when nodes
* are to be removed from the tree.
*/
void kd_data_destructor(struct kdtree *tree, void (*destr)(void*));
/* insert a node, specifying its position, and optional data */
int kd_insert(struct kdtree *tree, const double *pos, void *data);
int kd_insertf(struct kdtree *tree, const float *pos, void *data);
int kd_insert3(struct kdtree *tree, double x, double y, double z, void *data);
int kd_insert3f(struct kdtree *tree, float x, float y, float z, void *data);
/* Find the nearest node from a given point.
*
* This function returns a pointer to a result set with at most one element.
*/
struct kdres *kd_nearest(struct kdtree *tree, const double *pos);
struct kdres *kd_nearestf(struct kdtree *tree, const float *pos);
struct kdres *kd_nearest3(struct kdtree *tree, double x, double y, double z);
struct kdres *kd_nearest3f(struct kdtree *tree, float x, float y, float z);
/* Find the N nearest nodes from a given point.
*
* This function returns a pointer to a result set, with at most N elements,
* which can be manipulated with the kd_res_* functions.
* The returned pointer can be null as an indication of an error. Otherwise
* a valid result set is always returned which may contain 0 or more elements.
* The result set must be deallocated with kd_res_free after use.
*/
/*
struct kdres *kd_nearest_n(struct kdtree *tree, const double *pos, int num);
struct kdres *kd_nearest_nf(struct kdtree *tree, const float *pos, int num);
struct kdres *kd_nearest_n3(struct kdtree *tree, double x, double y, double z);
struct kdres *kd_nearest_n3f(struct kdtree *tree, float x, float y, float z);
*/
/* Find any nearest nodes from a given point within a range.
*
* This function returns a pointer to a result set, which can be manipulated
* by the kd_res_* functions.
* The returned pointer can be null as an indication of an error. Otherwise
* a valid result set is always returned which may contain 0 or more elements.
* The result set must be deallocated with kd_res_free after use.
*/
struct kdres *kd_nearest_range(struct kdtree *tree, const double *pos, double range);
struct kdres *kd_nearest_rangef(struct kdtree *tree, const float *pos, float range);
struct kdres *kd_nearest_range3(struct kdtree *tree, double x, double y, double z, double range);
struct kdres *kd_nearest_range3f(struct kdtree *tree, float x, float y, float z, float range);
/* frees a result set returned by kd_nearest_range() */
void kd_res_free(struct kdres *set);
/* returns the size of the result set (in elements) */
int kd_res_size(struct kdres *set);
/* rewinds the result set iterator */
void kd_res_rewind(struct kdres *set);
/* returns non-zero if the set iterator reached the end after the last element */
int kd_res_end(struct kdres *set);
/* advances the result set iterator, returns non-zero on success, zero if
* there are no more elements in the result set.
*/
int kd_res_next(struct kdres *set);
/* returns the data pointer (can be null) of the current result set item
* and optionally sets its position to the pointers(s) if not null.
*/
void *kd_res_item(struct kdres *set, double *pos);
void *kd_res_itemf(struct kdres *set, float *pos);
void *kd_res_item3(struct kdres *set, double *x, double *y, double *z);
void *kd_res_item3f(struct kdres *set, float *x, float *y, float *z);
/* equivalent to kd_res_item(set, 0) */
void *kd_res_item_data(struct kdres *set);
#ifdef __cplusplus
}
#endif
#endif /* _KDTREE_H_ */

@ -1,290 +0,0 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2012 Blender Foundation.
* All rights reserved.
*
* The Original Code is: all of this file.
*
* Contributor(s): Peter Larabell.
*
* ***** END GPL LICENSE BLOCK *****
*/
/** \file raskter_mt.c
* \ingroup RASKTER
*/
#include <stdlib.h>
#include "raskter.h"
static int rast_scan_init(struct layer_init_data *mlayer_data, struct r_fill_context *ctx, struct poly_vert *verts, int num_verts) {
int x_curr; /* current pixel position in X */
int y_curr; /* current scan line being drawn */
int yp; /* y-pixel's position in frame buffer */
int swixd = 0; /* whether or not edges switched position in X */
int i=0; /* counter */
float *cpxl; /* pixel pointers... */
float *mpxl;
float *spxl;
struct e_status *e_curr; /* edge pointers... */
struct e_status *e_temp;
struct e_status *edgbuf;
struct e_status **edgec;
if(num_verts < 3) {
return(1);
}
if((edgbuf = (struct e_status *)(malloc(sizeof(struct e_status) * num_verts))) == NULL) {
return(0);
}
/* set initial bounds length to 0 */
mlayer_data->bounds_length=0;
/* round 1, count up all the possible spans in the base buffer */
preprocess_all_edges(ctx, verts, num_verts, edgbuf);
/* can happen with a zero area mask */
if (ctx->all_edges == NULL) {
free(edgbuf);
return(1);
}
ctx->possible_edges = NULL;
for(y_curr = ctx->all_edges->ybeg; (ctx->all_edges || ctx->possible_edges); y_curr++) {
for(edgec = &ctx->possible_edges; ctx->all_edges && (ctx->all_edges->ybeg == y_curr);) {
x_curr = ctx->all_edges->x; /* Set current X position. */
for(;;) { /* Start looping edges. Will break when edges run out. */
e_curr = *edgec; /* Set up a current edge pointer. */
if(!e_curr || (e_curr->x >= x_curr)) { /* If we have an no edge, or we need to skip some X-span, */
e_temp = ctx->all_edges->e_next; /* set a temp "next" edge to test. */
*edgec = ctx->all_edges; /* Add this edge to the list to be scanned. */
ctx->all_edges->e_next = e_curr; /* Set up the next edge. */
edgec = &ctx->all_edges->e_next; /* Set our list to the next edge's location in memory. */
ctx->all_edges = e_temp; /* Skip the NULL or bad X edge, set pointer to next edge. */
break; /* Stop looping edges (since we ran out or hit empty X span. */
} else {
edgec = &e_curr->e_next; /* Set the pointer to the edge list the "next" edge. */
}
}
}
yp = y_curr * ctx->rb.sizex;
spxl = ctx->rb.buf + (yp);
for(e_curr = ctx->possible_edges; e_curr; e_curr = e_curr->e_next) {
/* set up xmin and xmax bounds on this scan line */
cpxl = spxl + MAX2(e_curr->x, 0);
e_curr = e_curr->e_next;
mpxl = spxl + MIN2(e_curr->x, ctx->rb.sizex) - 1;
if((y_curr >= 0) && (y_curr < ctx->rb.sizey)) {
mlayer_data->bounds_length++;
}
}
for(edgec = &ctx->possible_edges; (e_curr = *edgec);) {
if(!(--(e_curr->num))) {
*edgec = e_curr->e_next;
} else {
e_curr->x += e_curr->xshift;
if((e_curr->drift += e_curr->drift_inc) > 0) {
e_curr->x += e_curr->xdir;
e_curr->drift -= e_curr->drift_dec;
}
edgec = &e_curr->e_next;
}
}
if(ctx->possible_edges) {
for(edgec = &ctx->possible_edges; (e_curr = *edgec)->e_next; edgec = &(*edgec)->e_next) {
/* if the current edge hits scan line at greater X than the next edge, we need to exchange the edges */
if(e_curr->x > e_curr->e_next->x) {
*edgec = e_curr->e_next;
/* exchange the pointers */
e_temp = e_curr->e_next->e_next;
e_curr->e_next->e_next = e_curr;
e_curr->e_next = e_temp;
/* set flag that we had at least one switch */
swixd = 1;
}
}
/* if we did have a switch, look for more (there will more if there was one) */
for(;;) {
/* reset exchange flag so it's only set if we encounter another one */
swixd = 0;
for(edgec = &ctx->possible_edges; (e_curr = *edgec)->e_next; edgec = &(*edgec)->e_next) {
/* again, if current edge hits scan line at higher X than next edge, exchange the edges and set flag */
if(e_curr->x > e_curr->e_next->x) {
*edgec = e_curr->e_next;
/* exchange the pointers */
e_temp = e_curr->e_next->e_next;
e_curr->e_next->e_next = e_curr;
e_curr->e_next = e_temp;
/* flip the exchanged flag */
swixd = 1;
}
}
/* if we had no exchanges, we're done reshuffling the pointers */
if(!swixd) {
break;
}
}
}
}
/*initialize index buffer and bounds buffers*/
//gets the +1 for dummy at the end
if((mlayer_data->bound_indexes = (int *)(malloc(sizeof(int) * ctx->rb.sizey+1)))==NULL) {
return(0);
}
//gets the +1 for dummy at the start
if((mlayer_data->bounds = (struct scan_line *)(malloc(sizeof(struct scan_line) * mlayer_data->bounds_length+1)))==NULL){
return(0);
}
//init all the indexes to zero (are they already zeroed from malloc???)
for(i=0;i<ctx->rb.sizey+1;i++){
mlayer_data->bound_indexes[i]=0;
}
/* round 2, fill in the full list of bounds, and create indexes to the list... */
preprocess_all_edges(ctx, verts, num_verts, edgbuf);
/* can happen with a zero area mask */
if (ctx->all_edges == NULL) {
free(edgbuf);
return(1);
}
ctx->possible_edges = NULL;
/* restart i as a counter for total span placement in buffer */
i=1;
for(y_curr = ctx->all_edges->ybeg; (ctx->all_edges || ctx->possible_edges); y_curr++) {
for(edgec = &ctx->possible_edges; ctx->all_edges && (ctx->all_edges->ybeg == y_curr);) {
x_curr = ctx->all_edges->x; /* Set current X position. */
for(;;) { /* Start looping edges. Will break when edges run out. */
e_curr = *edgec; /* Set up a current edge pointer. */
if(!e_curr || (e_curr->x >= x_curr)) { /* If we have an no edge, or we need to skip some X-span, */
e_temp = ctx->all_edges->e_next; /* set a temp "next" edge to test. */
*edgec = ctx->all_edges; /* Add this edge to the list to be scanned. */
ctx->all_edges->e_next = e_curr; /* Set up the next edge. */
edgec = &ctx->all_edges->e_next; /* Set our list to the next edge's location in memory. */
ctx->all_edges = e_temp; /* Skip the NULL or bad X edge, set pointer to next edge. */
break; /* Stop looping edges (since we ran out or hit empty X span. */
} else {
edgec = &e_curr->e_next; /* Set the pointer to the edge list the "next" edge. */
}
}
}
yp = y_curr * ctx->rb.sizex;
spxl = ctx->rb.buf + (yp);
if((y_curr >=0) && (y_curr < ctx->rb.sizey)){
ctx->bound_indexes[y_curr]=i;
}
for(e_curr = ctx->possible_edges; e_curr; e_curr = e_curr->e_next) {
/* set up xmin and xmax bounds on this scan line */
cpxl = spxl + MAX2(e_curr->x, 0);
e_curr = e_curr->e_next;
mpxl = spxl + MIN2(e_curr->x, ctx->rb.sizex) - 1;
if((y_curr >= 0) && (y_curr < ctx->rb.sizey)) {
mlayer_data->bounds[i].xstart=cpxl-spxl;
mlayer_data->bounds[i].xend=mpxl-spxl;
i++;
}
}
for(edgec = &ctx->possible_edges; (e_curr = *edgec);) {
if(!(--(e_curr->num))) {
*edgec = e_curr->e_next;
} else {
e_curr->x += e_curr->xshift;
if((e_curr->drift += e_curr->drift_inc) > 0) {
e_curr->x += e_curr->xdir;
e_curr->drift -= e_curr->drift_dec;
}
edgec = &e_curr->e_next;
}
}
if(ctx->possible_edges) {
for(edgec = &ctx->possible_edges; (e_curr = *edgec)->e_next; edgec = &(*edgec)->e_next) {
/* if the current edge hits scan line at greater X than the next edge, we need to exchange the edges */
if(e_curr->x > e_curr->e_next->x) {
*edgec = e_curr->e_next;
/* exchange the pointers */
e_temp = e_curr->e_next->e_next;
e_curr->e_next->e_next = e_curr;
e_curr->e_next = e_temp;
/* set flag that we had at least one switch */
swixd = 1;
}
}
/* if we did have a switch, look for more (there will more if there was one) */
for(;;) {
/* reset exchange flag so it's only set if we encounter another one */
swixd = 0;
for(edgec = &ctx->possible_edges; (e_curr = *edgec)->e_next; edgec = &(*edgec)->e_next) {
/* again, if current edge hits scan line at higher X than next edge, exchange the edges and set flag */
if(e_curr->x > e_curr->e_next->x) {
*edgec = e_curr->e_next;
/* exchange the pointers */
e_temp = e_curr->e_next->e_next;
e_curr->e_next->e_next = e_curr;
e_curr->e_next = e_temp;
/* flip the exchanged flag */
swixd = 1;
}
}
/* if we had no exchanges, we're done reshuffling the pointers */
if(!swixd) {
break;
}
}
}
}
free(edgbuf);
return 1;
}
/* static */ int PLX_init_base_data(struct layer_init_data *mlayer_data, float(*base_verts)[2], int num_base_verts,
float *buf, int buf_x, int buf_y) {
int i; /* i: Loop counter. */
struct poly_vert *ply; /* ply: Pointer to a list of integer buffer-space vertex coordinates. */
struct r_fill_context ctx = {0};
const float buf_x_f = (float)(buf_x);
const float buf_y_f = (float)(buf_y);
if((ply = (struct poly_vert *)(malloc(sizeof(struct poly_vert) * num_base_verts))) == NULL) {
return(0);
}
ctx.rb.buf = buf; /* Set the output buffer pointer. */
ctx.rb.sizex = buf_x; /* Set the output buffer size in X. (width) */
ctx.rb.sizey = buf_y; /* Set the output buffer size in Y. (height) */
for(i = 0; i < num_base_verts; i++) { /* Loop over all base_verts. */
ply[i].x = (int)((base_verts[i][0] * buf_x_f) + 0.5f); /* Range expand normalized X to integer buffer-space X. */
ply[i].y = (int)((base_verts[i][1] * buf_y_f) + 0.5f); /* Range expand normalized Y to integer buffer-space Y. */
}
i = rast_scan_init(mlayer_data, &ctx, ply, num_base_verts); /* Call our rasterizer, passing in the integer coords for each vert. */
free(ply); /* Free the memory allocated for the integer coordinate table. */
return(i); /* Return the value returned by the rasterizer. */
}

@ -856,7 +856,7 @@ static void track_mask_gpencil_layer_rasterize(int frame_width, int frame_height
}
/* TODO: add an option to control whether AA is enabled or not */
PLX_raskterize((float (*)[2])mask_points, stroke->totpoints, mask, mask_width, mask_height, FALSE);
PLX_raskterize((float (*)[2])mask_points, stroke->totpoints, mask, mask_width, mask_height);
MEM_freeN(mask_points);
}