forked from bartvdbraak/blender
BMesh: hole support for intersect tool
Support cutting many outlines into a single face (creating edges between isolated regions).
This commit is contained in:
parent
bb4bff3576
commit
c593855b29
@ -29,11 +29,14 @@
|
||||
#include "MEM_guardedalloc.h"
|
||||
|
||||
#include "BLI_math.h"
|
||||
#include "BLI_memarena.h"
|
||||
#include "BLI_array.h"
|
||||
#include "BLI_alloca.h"
|
||||
#include "BLI_stackdefines.h"
|
||||
#include "BLI_linklist_stack.h"
|
||||
#include "BLI_sort.h"
|
||||
#include "BLI_sort_utils.h"
|
||||
#include "BLI_kdopbvh.h"
|
||||
|
||||
#include "BKE_customdata.h"
|
||||
|
||||
@ -580,3 +583,652 @@ bool BM_face_split_edgenet(
|
||||
#undef EDGE_NET
|
||||
|
||||
/** \} */
|
||||
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
/* Face Split Edge-Net Connect Islands */
|
||||
|
||||
/** \name BM_face_split_edgenet_connect_islands and helper functions.
|
||||
*
|
||||
* Connect isolated mesh 'islands' so they form legal regions from which we can create faces.
|
||||
*
|
||||
* Intended to be used as a pre-processing step for #BM_face_split_edgenet.
|
||||
*
|
||||
* \warning Currently this risks running out of stack memory (#alloca),
|
||||
* likely we'll pass in a memory arena (cleared each use) eventually.
|
||||
*
|
||||
* \{ */
|
||||
|
||||
#define VERT_IS_VALID BM_ELEM_INTERNAL_TAG
|
||||
|
||||
/* can be X or Y */
|
||||
#define SORT_AXIS 0
|
||||
|
||||
BLI_INLINE bool edge_isect_verts_point_2d(
|
||||
const BMEdge *e, const BMVert *v_a, const BMVert *v_b,
|
||||
float r_isect[2])
|
||||
{
|
||||
return ((isect_seg_seg_v2_point(v_a->co, v_b->co, e->v1->co, e->v2->co, r_isect) == 1) &&
|
||||
((e->v1 != v_a) && (e->v2 != v_a) && (e->v1 != v_b) && (e->v2 != v_b)));
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents isolated edge-links,
|
||||
* each island owns contiguous slices of the vert array.
|
||||
* (edges remain in `edge_links`).
|
||||
*/
|
||||
struct EdgeGroupIsland {
|
||||
LinkNode edge_links; /* keep first */
|
||||
unsigned int vert_len, edge_len;
|
||||
|
||||
/* Set the following vars once we have >1 groups */
|
||||
|
||||
/* when when an edge in a previous group connects to this one,
|
||||
* so theres no need to create one pointing back. */
|
||||
unsigned int has_prev_edge : 1;
|
||||
|
||||
/* verts in the group which has the lowest & highest values,
|
||||
* the lower vertex is connected to the first edge */
|
||||
struct {
|
||||
BMVert *min, *max;
|
||||
} vert_span;
|
||||
};
|
||||
|
||||
static int group_min_cmp_fn(const void *p1, const void *p2)
|
||||
{
|
||||
const struct EdgeGroupIsland *g1 = *(struct EdgeGroupIsland **)p1;
|
||||
const struct EdgeGroupIsland *g2 = *(struct EdgeGroupIsland **)p2;
|
||||
const float f1 = g1->vert_span.min->co[SORT_AXIS];
|
||||
const float f2 = g2->vert_span.min->co[SORT_AXIS];
|
||||
|
||||
if (f1 < f2) return -1;
|
||||
if (f1 > f2) return 1;
|
||||
else return 0;
|
||||
}
|
||||
|
||||
struct Edges_VertVert_BVHTreeTest {
|
||||
float dist_orig;
|
||||
BMEdge **edge_arr;
|
||||
|
||||
BMVert *v_origin;
|
||||
BMVert *v_other;
|
||||
|
||||
const unsigned int *vert_range;
|
||||
};
|
||||
|
||||
struct Edges_VertRay_BVHTreeTest {
|
||||
BMEdge **edge_arr;
|
||||
|
||||
BMVert *v_origin;
|
||||
|
||||
const unsigned int *vert_range;
|
||||
};
|
||||
|
||||
static void bvhtree_test_edges_isect_2d_vert_cb(
|
||||
void *user_data, int index, const BVHTreeRay *UNUSED(ray), BVHTreeRayHit *hit)
|
||||
{
|
||||
struct Edges_VertVert_BVHTreeTest *data = user_data;
|
||||
const BMEdge *e = data->edge_arr[index];
|
||||
const int v1_index = BM_elem_index_get(e->v1);
|
||||
float co_isect[2];
|
||||
|
||||
if (edge_isect_verts_point_2d(e, data->v_origin, data->v_other, co_isect)) {
|
||||
const float t = line_point_factor_v2(co_isect, data->v_origin->co, data->v_other->co);
|
||||
const float dist_new = data->dist_orig * t;
|
||||
/* avoid float precision issues, possible this is greater */
|
||||
if (LIKELY(dist_new < hit->dist)) {
|
||||
/* v1/v2 will both be in the same group */
|
||||
if (v1_index < (int)data->vert_range[0] ||
|
||||
v1_index >= (int)data->vert_range[1])
|
||||
{
|
||||
hit->dist = dist_new;
|
||||
hit->index = index;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void bvhtree_test_edges_isect_2d_ray_cb(
|
||||
void *user_data, int index, const BVHTreeRay *ray, BVHTreeRayHit *hit)
|
||||
{
|
||||
struct Edges_VertRay_BVHTreeTest *data = user_data;
|
||||
const BMEdge *e = data->edge_arr[index];
|
||||
|
||||
/* direction is normalized, so this will be the distance */
|
||||
float dist_new;
|
||||
if (isect_ray_seg_v2(data->v_origin->co, ray->direction, e->v1->co, e->v2->co, &dist_new, NULL)) {
|
||||
/* avoid float precision issues, possible this is greater */
|
||||
if (LIKELY(dist_new < hit->dist)) {
|
||||
if (e->v1 != data->v_origin && e->v2 != data->v_origin) {
|
||||
const int v1_index = BM_elem_index_get(e->v1);
|
||||
/* v1/v2 will both be in the same group */
|
||||
if (v1_index < (int)data->vert_range[0] ||
|
||||
v1_index >= (int)data->vert_range[1])
|
||||
{
|
||||
hit->dist = dist_new;
|
||||
hit->index = index;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Store values for:
|
||||
* - #bm_face_split_edgenet_find_connection
|
||||
* - #test_edges_isect_2d
|
||||
* ... which don't change each call.
|
||||
*/
|
||||
struct EdgeGroup_FindConnection_Args {
|
||||
BVHTree *bvhtree;
|
||||
BMEdge **edge_arr;
|
||||
unsigned int edge_arr_len;
|
||||
|
||||
BMEdge **edge_arr_new;
|
||||
unsigned int edge_arr_new_len;
|
||||
|
||||
const unsigned int *vert_range;
|
||||
};
|
||||
|
||||
static BMEdge *test_edges_isect_2d_vert(
|
||||
const struct EdgeGroup_FindConnection_Args *args,
|
||||
BMVert *v_origin, BMVert *v_other)
|
||||
{
|
||||
int index;
|
||||
|
||||
BVHTreeRayHit hit = {0};
|
||||
float dir[3];
|
||||
|
||||
sub_v2_v2v2(dir, v_other->co, v_origin->co);
|
||||
dir[2] = 0.0f;
|
||||
hit.index = -1;
|
||||
hit.dist = normalize_v2(dir);
|
||||
|
||||
struct Edges_VertVert_BVHTreeTest user_data = {0};
|
||||
user_data.dist_orig = hit.dist;
|
||||
user_data.edge_arr = args->edge_arr;
|
||||
user_data.v_origin = v_origin;
|
||||
user_data.v_other = v_other;
|
||||
user_data.vert_range = args->vert_range;
|
||||
|
||||
index = BLI_bvhtree_ray_cast_ex(
|
||||
args->bvhtree, v_origin->co, dir, 0.0f, &hit,
|
||||
bvhtree_test_edges_isect_2d_vert_cb, &user_data, 0);
|
||||
|
||||
BMEdge *e_hit = (index != -1) ? args->edge_arr[index] : NULL;
|
||||
|
||||
/* check existing connections (no spatial optimization here since we're continually adding). */
|
||||
if (LIKELY(index == -1)) {
|
||||
float t_best = 1.0f;
|
||||
for (unsigned int i = 0; i < args->edge_arr_new_len; i++) {
|
||||
float co_isect[2];
|
||||
if (UNLIKELY(edge_isect_verts_point_2d(args->edge_arr_new[i], v_origin, v_other, co_isect))) {
|
||||
const float t_test = line_point_factor_v2(co_isect, v_origin->co, v_other->co);
|
||||
if (t_test < t_best) {
|
||||
t_best = t_test;
|
||||
|
||||
e_hit = args->edge_arr_new[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return e_hit;
|
||||
}
|
||||
|
||||
/**
|
||||
* Similar to #test_edges_isect_2d_vert but we're casting into a direction,
|
||||
* (not to a vertex)
|
||||
*/
|
||||
static BMEdge *test_edges_isect_2d_ray(
|
||||
const struct EdgeGroup_FindConnection_Args *args,
|
||||
BMVert *v_origin, const float dir[3])
|
||||
{
|
||||
int index;
|
||||
BVHTreeRayHit hit = {0};
|
||||
|
||||
BLI_ASSERT_UNIT_V2(dir);
|
||||
|
||||
hit.index = -1;
|
||||
hit.dist = FLT_MAX;
|
||||
|
||||
struct Edges_VertRay_BVHTreeTest user_data = {0};
|
||||
user_data.edge_arr = args->edge_arr;
|
||||
user_data.v_origin = v_origin;
|
||||
user_data.vert_range = args->vert_range;
|
||||
|
||||
index = BLI_bvhtree_ray_cast_ex(
|
||||
args->bvhtree, v_origin->co, dir, 0.0f, &hit,
|
||||
bvhtree_test_edges_isect_2d_ray_cb, &user_data, 0);
|
||||
|
||||
BMEdge *e_hit = (index != -1) ? args->edge_arr[index] : NULL;
|
||||
|
||||
/* check existing connections (no spatial optimization here since we're continually adding). */
|
||||
if (LIKELY(index != -1)) {
|
||||
for (unsigned int i = 0; i < args->edge_arr_new_len; i++) {
|
||||
BMEdge *e = args->edge_arr_new[i];
|
||||
float dist_new;
|
||||
if (isect_ray_seg_v2(v_origin->co, dir, e->v1->co, e->v2->co, &dist_new, NULL)) {
|
||||
if (e->v1 != v_origin && e->v2 != v_origin) {
|
||||
/* avoid float precision issues, possible this is greater */
|
||||
if (LIKELY(dist_new < hit.dist)) {
|
||||
hit.dist = dist_new;
|
||||
|
||||
e_hit = args->edge_arr_new[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return e_hit;
|
||||
}
|
||||
|
||||
static int bm_face_split_edgenet_find_connection(
|
||||
const struct EdgeGroup_FindConnection_Args *args,
|
||||
BMVert *v_origin,
|
||||
/* false = negative, true = positive */
|
||||
bool direction_sign)
|
||||
{
|
||||
/**
|
||||
* Method for finding connection is as follows:
|
||||
*
|
||||
* - Cast a ray along either the positive or negative directions.
|
||||
* - Take the hit-edge, and cast rays to their vertices checking those rays don't intersect a closer edge.
|
||||
* - Keep taking the hit-edge and testing its verts until a vertex is found which isn't blocked by an edge.
|
||||
*
|
||||
* \note It's possible none of the verts can be accessed (with self-intersecting lines).
|
||||
* In that case theres no right answer (without subdividing edges),
|
||||
* so return a fall-back vertex in that case.
|
||||
*/
|
||||
|
||||
const float dir[3] = {[SORT_AXIS] = direction_sign ? 1.0 : -1.0f};
|
||||
BMEdge *e_hit = test_edges_isect_2d_ray(args, v_origin, dir);
|
||||
BMVert *v_other = NULL;
|
||||
|
||||
if (e_hit) {
|
||||
BMVert *v_other_fallback = NULL;
|
||||
|
||||
BLI_SMALLSTACK_DECLARE(vert_search, BMVert *);
|
||||
|
||||
/* ensure we never add verts multiple times (not all that likely - but possible) */
|
||||
BLI_SMALLSTACK_DECLARE(vert_blacklist, BMVert *);
|
||||
|
||||
do {
|
||||
BMVert *v_pair[2];
|
||||
/* ensure the closest vertex is popped back off the stack first */
|
||||
if (len_squared_v2v2(v_origin->co, e_hit->v1->co) >
|
||||
len_squared_v2v2(v_origin->co, e_hit->v2->co))
|
||||
{
|
||||
ARRAY_SET_ITEMS(v_pair, e_hit->v1, e_hit->v2);
|
||||
}
|
||||
else {
|
||||
ARRAY_SET_ITEMS(v_pair, e_hit->v2, e_hit->v1);
|
||||
}
|
||||
|
||||
for (int j = 0; j < 2; j++) {
|
||||
BMVert *v_iter = v_pair[j];
|
||||
if (BM_elem_flag_test(v_iter, VERT_IS_VALID)) {
|
||||
if (direction_sign ? (v_iter->co[SORT_AXIS] >= v_origin->co[SORT_AXIS]) :
|
||||
(v_iter->co[SORT_AXIS] <= v_origin->co[SORT_AXIS]))
|
||||
{
|
||||
BLI_SMALLSTACK_PUSH(vert_search, v_iter);
|
||||
BLI_SMALLSTACK_PUSH(vert_blacklist, v_iter);
|
||||
BM_elem_flag_disable(v_iter, VERT_IS_VALID);
|
||||
}
|
||||
}
|
||||
}
|
||||
v_other_fallback = v_other;
|
||||
|
||||
} while ((v_other = BLI_SMALLSTACK_POP(vert_search)) &&
|
||||
(e_hit = test_edges_isect_2d_vert(args, v_origin, v_other)));
|
||||
|
||||
if (v_other == NULL) {
|
||||
printf("Using fallback\n");
|
||||
v_other = v_other_fallback;
|
||||
}
|
||||
|
||||
/* reset the blacklist flag, for future use */
|
||||
BMVert *v;
|
||||
while ((v = BLI_SMALLSTACK_POP(vert_blacklist))) {
|
||||
BM_elem_flag_enable(v, VERT_IS_VALID);
|
||||
}
|
||||
}
|
||||
|
||||
/* if we reach this line, v_other is either the best vertex or its NULL */
|
||||
return v_other ? BM_elem_index_get(v_other) : -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* For when the edge-net has holes in it-this connects them.
|
||||
*
|
||||
* \param mem_arena: Avoids many small allocs & should be cleared after each use.
|
||||
* take care since \a r_edge_net_new is stored in \a r_edge_net_new.
|
||||
*/
|
||||
bool BM_face_split_edgenet_connect_islands(
|
||||
BMesh *bm,
|
||||
BMFace *f, BMEdge **edge_net_init, const unsigned int edge_net_init_len,
|
||||
MemArena *mem_arena,
|
||||
BMEdge ***r_edge_net_new, unsigned int *r_edge_net_new_len)
|
||||
{
|
||||
/* -------------------------------------------------------------------- */
|
||||
/* This function has 2 main parts.
|
||||
*
|
||||
* - Check if there are any holes.
|
||||
* - Connect the holes with edges (if any are found).
|
||||
*
|
||||
* Keep the first part fast since it will run very often for edge-nets that have no holes.
|
||||
*
|
||||
* \note Don't use the mem_arena unless he have holes to fill.
|
||||
* (avoid thrashing the area when the initial check isn't so intensive on the stack).
|
||||
*/
|
||||
|
||||
const unsigned int edge_arr_len = (unsigned int)edge_net_init_len + (unsigned int)f->len;
|
||||
BMEdge **edge_arr = BLI_array_alloca(edge_arr, edge_arr_len);
|
||||
bool ok = false;
|
||||
|
||||
memcpy(edge_arr, edge_net_init, sizeof(*edge_arr) * (size_t)edge_net_init_len);
|
||||
|
||||
/* _must_ clear on exit */
|
||||
#define EDGE_NOT_IN_STACK BM_ELEM_INTERNAL_TAG
|
||||
#define VERT_NOT_IN_STACK BM_ELEM_INTERNAL_TAG
|
||||
|
||||
{
|
||||
unsigned int i = edge_net_init_len;
|
||||
BMLoop *l_iter, *l_first;
|
||||
l_iter = l_first = BM_FACE_FIRST_LOOP(f);
|
||||
do {
|
||||
edge_arr[i++] = l_iter->e;
|
||||
} while ((l_iter = l_iter->next) != l_first);
|
||||
BLI_assert(i == edge_arr_len);
|
||||
}
|
||||
|
||||
for (unsigned int i = 0; i < edge_arr_len; i++) {
|
||||
BM_elem_flag_enable(edge_arr[i], EDGE_NOT_IN_STACK);
|
||||
BM_elem_flag_enable(edge_arr[i]->v1, VERT_NOT_IN_STACK);
|
||||
BM_elem_flag_enable(edge_arr[i]->v2, VERT_NOT_IN_STACK);
|
||||
}
|
||||
|
||||
unsigned int group_arr_len = 0;
|
||||
LinkNode *group_head = NULL;
|
||||
{
|
||||
/* scan 'edge_arr' backwards so the outer face boundary is handled first
|
||||
* (since its likely to be the largest) */
|
||||
unsigned int edge_index = (edge_arr_len - 1);
|
||||
unsigned int edge_in_group_tot = 0;
|
||||
|
||||
BLI_SMALLSTACK_DECLARE(vstack, BMVert *);
|
||||
|
||||
while (true) {
|
||||
LinkNode *edge_links = NULL;
|
||||
unsigned int unique_verts_in_group = 0, unique_edges_in_group = 0;
|
||||
|
||||
/* list of groups */
|
||||
BLI_assert(BM_elem_flag_test(edge_arr[edge_index]->v1, VERT_NOT_IN_STACK));
|
||||
BLI_SMALLSTACK_PUSH(vstack, edge_arr[edge_index]->v1);
|
||||
BM_elem_flag_disable(edge_arr[edge_index]->v1, VERT_NOT_IN_STACK);
|
||||
|
||||
BMVert *v_iter;
|
||||
while ((v_iter = BLI_SMALLSTACK_POP(vstack))) {
|
||||
unique_verts_in_group++;
|
||||
|
||||
BMEdge *e_iter = v_iter->e;
|
||||
do {
|
||||
if (BM_elem_flag_test(e_iter, EDGE_NOT_IN_STACK)) {
|
||||
BM_elem_flag_disable(e_iter, EDGE_NOT_IN_STACK);
|
||||
unique_edges_in_group++;
|
||||
|
||||
BLI_linklist_prepend_alloca(&edge_links, e_iter);
|
||||
|
||||
BMVert *v_other = BM_edge_other_vert(e_iter, v_iter);
|
||||
if (BM_elem_flag_test(v_other, VERT_NOT_IN_STACK)) {
|
||||
BLI_SMALLSTACK_PUSH(vstack, v_other);
|
||||
BM_elem_flag_disable(v_other, VERT_NOT_IN_STACK);
|
||||
}
|
||||
}
|
||||
} while ((e_iter = BM_DISK_EDGE_NEXT(e_iter, v_iter)) != v_iter->e);
|
||||
}
|
||||
|
||||
struct EdgeGroupIsland *g = alloca(sizeof(*g));
|
||||
g->vert_len = unique_verts_in_group;
|
||||
g->edge_len = unique_edges_in_group;
|
||||
edge_in_group_tot += unique_edges_in_group;
|
||||
|
||||
BLI_linklist_prepend_nlink(&group_head, edge_links, (LinkNode *)g);
|
||||
|
||||
group_arr_len++;
|
||||
|
||||
if (edge_in_group_tot == edge_arr_len) {
|
||||
break;
|
||||
}
|
||||
|
||||
/* skip edges in the stack */
|
||||
while (BM_elem_flag_test(edge_arr[edge_index], EDGE_NOT_IN_STACK) == false) {
|
||||
BLI_assert(edge_index != 0);
|
||||
edge_index--;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* single group - no holes */
|
||||
if (group_arr_len == 1) {
|
||||
goto finally;
|
||||
}
|
||||
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
/* Previous checks need to be kept fast, since they will run very often,
|
||||
* now we know there are holes, so calculate a spatial lookup info and
|
||||
* other per-group data.
|
||||
*/
|
||||
|
||||
#define VERT_IN_ARRAY BM_ELEM_INTERNAL_TAG
|
||||
|
||||
struct EdgeGroupIsland **group_arr = BLI_memarena_alloc(mem_arena, sizeof(*group_arr) * group_arr_len);
|
||||
unsigned int vert_arr_len = 0;
|
||||
/* sort groups by lowest value vertex */
|
||||
{
|
||||
/* fill 'groups_arr' in reverse order so the boundary face is first */
|
||||
struct EdgeGroupIsland **group_arr_p = &group_arr[group_arr_len];
|
||||
|
||||
for (struct EdgeGroupIsland *g = (void *)group_head; g; g = (struct EdgeGroupIsland *)g->edge_links.next) {
|
||||
LinkNode *edge_links = g->edge_links.link;
|
||||
|
||||
/* init with *any* different verts */
|
||||
g->vert_span.min = ((BMEdge *)edge_links->link)->v1;
|
||||
g->vert_span.max = ((BMEdge *)edge_links->link)->v2;
|
||||
|
||||
do {
|
||||
BMEdge *e = edge_links->link;
|
||||
BLI_assert(e->head.htype == BM_EDGE);
|
||||
|
||||
for (int j = 0; j < 2; j++) {
|
||||
BMVert *v_iter = (&e->v1)[j];
|
||||
BLI_assert(v_iter->head.htype == BM_VERT);
|
||||
const float axis_value = v_iter->co[SORT_AXIS];
|
||||
|
||||
if (axis_value < g->vert_span.min->co[SORT_AXIS]) {
|
||||
g->vert_span.min = v_iter;
|
||||
}
|
||||
if (axis_value > g->vert_span.max->co[SORT_AXIS]) {
|
||||
g->vert_span.max = v_iter;
|
||||
}
|
||||
}
|
||||
} while ((edge_links = edge_links->next));
|
||||
|
||||
g->has_prev_edge = false;
|
||||
|
||||
vert_arr_len += g->vert_len;
|
||||
|
||||
*(--group_arr_p) = g;
|
||||
}
|
||||
}
|
||||
|
||||
qsort(group_arr, group_arr_len, sizeof(*group_arr), group_min_cmp_fn);
|
||||
|
||||
/* we don't know how many unique verts there are connecting the edges, so over-alloc */
|
||||
BMVert **vert_arr = BLI_memarena_alloc(mem_arena, sizeof(*vert_arr) * vert_arr_len);
|
||||
/* map vertex -> group index */
|
||||
unsigned int *verts_group_table = BLI_memarena_alloc(mem_arena, sizeof(*verts_group_table) * vert_arr_len);
|
||||
|
||||
float (*vert_coords_backup)[3] = BLI_memarena_alloc(mem_arena, sizeof(*vert_coords_backup) * vert_arr_len);
|
||||
|
||||
{
|
||||
float axis_mat[3][3];
|
||||
axis_dominant_v3_to_m3(axis_mat, f->no);
|
||||
/* relative location, for higher precision calculations */
|
||||
const float f_co_ref[3] = {UNPACK3(BM_FACE_FIRST_LOOP(f)->v->co)};
|
||||
|
||||
int v_index = 0; /* global vert index */
|
||||
for (unsigned int g_index = 0; g_index < group_arr_len; g_index++) {
|
||||
LinkNode *edge_links = group_arr[g_index]->edge_links.link;
|
||||
do {
|
||||
BMEdge *e = edge_links->link;
|
||||
for (int j = 0; j < 2; j++) {
|
||||
BMVert *v_iter = (&e->v1)[j];
|
||||
if (!BM_elem_flag_test(v_iter, VERT_IN_ARRAY)) {
|
||||
BM_elem_flag_enable(v_iter, VERT_IN_ARRAY);
|
||||
|
||||
/* not nice, but alternatives arent much better :S */
|
||||
{
|
||||
copy_v3_v3(vert_coords_backup[v_index], v_iter->co);
|
||||
|
||||
/* for higher precision */
|
||||
sub_v3_v3(v_iter->co, f_co_ref);
|
||||
|
||||
float co_2d[2];
|
||||
mul_v2_m3v3(co_2d, axis_mat, v_iter->co);
|
||||
v_iter->co[0] = co_2d[0];
|
||||
v_iter->co[1] = co_2d[1];
|
||||
v_iter->co[2] = 0.0f;
|
||||
}
|
||||
|
||||
BM_elem_index_set(v_iter, v_index); /* set_dirty */
|
||||
|
||||
vert_arr[v_index] = v_iter;
|
||||
verts_group_table[v_index] = g_index;
|
||||
v_index++;
|
||||
}
|
||||
}
|
||||
} while ((edge_links = edge_links->next));
|
||||
}
|
||||
}
|
||||
|
||||
bm->elem_index_dirty |= BM_VERT;
|
||||
|
||||
/* Now create bvh tree*/
|
||||
BVHTree *bvhtree = BLI_bvhtree_new(edge_arr_len, 0.0f, 8, 8);
|
||||
for (unsigned int i = 0; i < edge_arr_len; i++) {
|
||||
const float e_cos[2][3] = {
|
||||
{UNPACK2(edge_arr[i]->v1->co), 0.0f},
|
||||
{UNPACK2(edge_arr[i]->v2->co), 0.0f},
|
||||
};
|
||||
BLI_bvhtree_insert(bvhtree, i, (const float *)e_cos, 2);
|
||||
}
|
||||
BLI_bvhtree_balance(bvhtree);
|
||||
|
||||
/* Create connections between groups */
|
||||
|
||||
/* may be an over-alloc, but not by much */
|
||||
unsigned int edge_net_new_len = (unsigned int)edge_net_init_len + ((group_arr_len - 1) * 2);
|
||||
BMEdge **edge_net_new = BLI_memarena_alloc(mem_arena, sizeof(*edge_net_new) * edge_net_new_len);
|
||||
memcpy(edge_net_new, edge_net_init, sizeof(*edge_net_new) * (size_t)edge_net_init_len);
|
||||
|
||||
{
|
||||
unsigned int edge_net_new_index = edge_net_init_len;
|
||||
/* start-end of the verts in the current group */
|
||||
|
||||
unsigned int vert_range[2];
|
||||
|
||||
vert_range[0] = 0;
|
||||
vert_range[1] = group_arr[0]->vert_len;
|
||||
|
||||
struct EdgeGroup_FindConnection_Args args = {
|
||||
.bvhtree = bvhtree,
|
||||
|
||||
/* use the new edge array so we can scan edges which have been added */
|
||||
.edge_arr = edge_arr,
|
||||
.edge_arr_len = edge_arr_len,
|
||||
|
||||
/* we only want to check newly created edges */
|
||||
.edge_arr_new = edge_net_new + edge_net_init_len,
|
||||
.edge_arr_new_len = 0,
|
||||
|
||||
.vert_range = vert_range,
|
||||
};
|
||||
|
||||
for (unsigned int g_index = 1; g_index < group_arr_len; g_index++) {
|
||||
struct EdgeGroupIsland *g = group_arr[g_index];
|
||||
|
||||
/* the range of verts this group uses in 'verts_arr' (not uncluding the last index) */
|
||||
vert_range[0] = vert_range[1];
|
||||
vert_range[1] += g->vert_len;
|
||||
|
||||
if (g->has_prev_edge == false) {
|
||||
BMVert *v_origin = g->vert_span.min;
|
||||
|
||||
const int index_other = bm_face_split_edgenet_find_connection(&args, v_origin, false);
|
||||
// BLI_assert(index_other >= 0 && index_other < (int)vert_arr_len);
|
||||
|
||||
/* only for degenerate geometry */
|
||||
if (index_other != -1) {
|
||||
BMVert *v_end = vert_arr[index_other];
|
||||
|
||||
edge_net_new[edge_net_new_index] = BM_edge_create(bm, v_origin, v_end, NULL, 0);
|
||||
edge_net_new_index++;
|
||||
args.edge_arr_new_len++;
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
BMVert *v_origin = g->vert_span.max;
|
||||
|
||||
const int index_other = bm_face_split_edgenet_find_connection(&args, v_origin, true);
|
||||
// BLI_assert(index_other >= 0 && index_other < (int)vert_arr_len);
|
||||
|
||||
/* only for degenerate geometry */
|
||||
if (index_other != -1) {
|
||||
BMVert *v_end = vert_arr[index_other];
|
||||
|
||||
edge_net_new[edge_net_new_index] = BM_edge_create(bm, v_origin, v_end, NULL, 0);
|
||||
edge_net_new_index++;
|
||||
args.edge_arr_new_len++;
|
||||
|
||||
/* tell the 'next' group it doesn't need to create its own back-link */
|
||||
unsigned int g_index_other = verts_group_table[index_other];
|
||||
group_arr[g_index_other]->has_prev_edge = true;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
BLI_assert(edge_net_new_len >= edge_net_new_index);
|
||||
edge_net_new_len = edge_net_new_index;
|
||||
}
|
||||
|
||||
BLI_bvhtree_free(bvhtree);
|
||||
|
||||
*r_edge_net_new = edge_net_new;
|
||||
*r_edge_net_new_len = edge_net_new_len;
|
||||
ok = true;
|
||||
|
||||
for (unsigned int i = 0; i < vert_arr_len; i++) {
|
||||
copy_v3_v3(vert_arr[i]->co, vert_coords_backup[i]);
|
||||
}
|
||||
|
||||
finally:
|
||||
for (unsigned int i = 0; i < edge_arr_len; i++) {
|
||||
BM_elem_flag_disable(edge_arr[i], EDGE_NOT_IN_STACK);
|
||||
BM_elem_flag_disable(edge_arr[i]->v1, VERT_NOT_IN_STACK);
|
||||
BM_elem_flag_disable(edge_arr[i]->v2, VERT_NOT_IN_STACK);
|
||||
}
|
||||
|
||||
#undef VERT_IN_ARRAY
|
||||
#undef VERT_NOT_IN_STACK
|
||||
#undef EDGE_NOT_IN_STACK
|
||||
|
||||
return ok;
|
||||
}
|
||||
|
||||
#undef SORT_AXIS
|
||||
|
||||
/** \} */
|
||||
|
@ -30,5 +30,11 @@ bool BM_face_split_edgenet(
|
||||
BMEdge **edge_net, const int edge_net_len,
|
||||
BMFace ***r_face_arr, int *r_face_arr_len);
|
||||
|
||||
bool BM_face_split_edgenet_connect_islands(
|
||||
BMesh *bm,
|
||||
BMFace *f, BMEdge **edge_net_init, const unsigned int edge_net_init_len,
|
||||
struct MemArena *arena,
|
||||
BMEdge ***r_edge_net_new, unsigned int *r_edge_net_new_len)
|
||||
ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1, 2, 3, 5, 6, 7);
|
||||
|
||||
#endif /* __BMESH_POLYGON_EDGENET_H__ */
|
||||
|
@ -70,6 +70,8 @@
|
||||
#define USE_SEPARATE
|
||||
/* remove verts created by intersecting triangles */
|
||||
#define USE_DISSOLVE
|
||||
/* detect isolated holes and fill them */
|
||||
#define USE_NET_ISLAND_CONNECT
|
||||
|
||||
/* strict asserts that may fail in practice (handy for debugging cases which should succeed) */
|
||||
// #define USE_PARANOID
|
||||
@ -230,12 +232,13 @@ static void face_edges_add(
|
||||
|
||||
#ifdef USE_NET
|
||||
static void face_edges_split(
|
||||
BMesh *bm,
|
||||
BMFace *f,
|
||||
struct LinkBase *e_ls_base)
|
||||
BMesh *bm, BMFace *f, struct LinkBase *e_ls_base,
|
||||
bool use_island_connect,
|
||||
MemArena *mem_arena_edgenet)
|
||||
{
|
||||
unsigned int i;
|
||||
BMEdge **edge_arr = BLI_array_alloca(edge_arr, e_ls_base->list_len);
|
||||
unsigned int edge_arr_len = e_ls_base->list_len;
|
||||
BMEdge **edge_arr = BLI_array_alloca(edge_arr, edge_arr_len);
|
||||
LinkNode *node;
|
||||
BLI_assert(f->head.htype == BM_FACE);
|
||||
|
||||
@ -248,7 +251,28 @@ static void face_edges_split(
|
||||
printf("# %s: %d %u\n", __func__, BM_elem_index_get(f), e_ls_base->list_len);
|
||||
#endif
|
||||
|
||||
BM_face_split_edgenet(bm, f, edge_arr, (int)e_ls_base->list_len, NULL, NULL);
|
||||
#ifdef USE_NET_ISLAND_CONNECT
|
||||
if (use_island_connect) {
|
||||
unsigned int edge_arr_holes_len;
|
||||
BMEdge **edge_arr_holes;
|
||||
if (BM_face_split_edgenet_connect_islands(
|
||||
bm, f,
|
||||
edge_arr, edge_arr_len,
|
||||
mem_arena_edgenet,
|
||||
&edge_arr_holes, &edge_arr_holes_len))
|
||||
{
|
||||
/* newly created wire edges need to be tagged */
|
||||
for (i = edge_arr_len; i < edge_arr_holes_len; i++) {
|
||||
BM_elem_flag_enable(edge_arr_holes[i], BM_ELEM_TAG);
|
||||
}
|
||||
|
||||
edge_arr_len = edge_arr_holes_len;
|
||||
edge_arr = edge_arr_holes; /* owned by the arena */
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
BM_face_split_edgenet(bm, f, edge_arr, (int)edge_arr_len, NULL, NULL);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -777,13 +801,14 @@ static void bm_isect_tri_tri(
|
||||
* Intersect tessellated faces
|
||||
* leaving the resulting edges tagged.
|
||||
*
|
||||
* \param test_fn Return value: -1: skip, 0: tree_a, 1: tree_b (use_self == false)
|
||||
* \param test_fn: Return value: -1: skip, 0: tree_a, 1: tree_b (use_self == false)
|
||||
* \param use_island_connect: Create edges connecting face to isolated edge-regions so cuts can be made.
|
||||
*/
|
||||
bool BM_mesh_intersect(
|
||||
BMesh *bm,
|
||||
struct BMLoop *(*looptris)[3], const int looptris_tot,
|
||||
int (*test_fn)(BMFace *f, void *user_data), void *user_data,
|
||||
const bool use_self, const bool use_separate,
|
||||
const bool use_self, const bool use_separate, const bool use_island_connect,
|
||||
const float eps)
|
||||
{
|
||||
struct ISectState s;
|
||||
@ -978,7 +1003,7 @@ bool BM_mesh_intersect(
|
||||
}
|
||||
|
||||
#ifdef USE_DUMP
|
||||
printf("# SPLITTING EDGE: %d, %d\n", e_index, v_ls_base->list_len);
|
||||
printf("# SPLITTING EDGE: %d, %d\n", BM_elem_index_get(e), v_ls_base->list_len);
|
||||
#endif
|
||||
/* intersect */
|
||||
is_wire = BLI_gset_haskey(s.wire_edges, e);
|
||||
@ -1249,6 +1274,8 @@ bool BM_mesh_intersect(
|
||||
GHashIterator gh_iter;
|
||||
BMFace **faces;
|
||||
|
||||
MemArena *mem_arena_edgenet = BLI_memarena_new(BLI_MEMARENA_STD_BUFSIZE, __func__);
|
||||
|
||||
faces = bm->ftable;
|
||||
|
||||
GHASH_ITER (gh_iter, s.face_edges) {
|
||||
@ -1265,8 +1292,12 @@ bool BM_mesh_intersect(
|
||||
|
||||
BLI_assert(BM_elem_index_get(f) == f_index);
|
||||
|
||||
face_edges_split(bm, f, e_ls_base);
|
||||
face_edges_split(bm, f, e_ls_base, use_island_connect, mem_arena_edgenet);
|
||||
|
||||
BLI_memarena_clear(mem_arena_edgenet);
|
||||
}
|
||||
|
||||
BLI_memarena_free(mem_arena_edgenet);
|
||||
}
|
||||
#endif /* USE_NET */
|
||||
(void)totface_orig;
|
||||
|
@ -29,7 +29,7 @@ bool BM_mesh_intersect(
|
||||
BMesh *bm,
|
||||
struct BMLoop *(*looptris)[3], const int looptris_tot,
|
||||
int (*test_fn)(BMFace *f, void *user_data), void *user_data,
|
||||
const bool use_self, const bool use_separate,
|
||||
const bool use_self, const bool use_separate, const bool use_island_connect,
|
||||
const float eps);
|
||||
|
||||
#endif /* __BMESH_INTERSECT_H__ */
|
||||
|
@ -123,7 +123,7 @@ static int edbm_intersect_exec(bContext *C, wmOperator *op)
|
||||
bm,
|
||||
em->looptris, em->tottri,
|
||||
test_fn, NULL,
|
||||
use_self, use_separate,
|
||||
use_self, use_separate, true,
|
||||
eps);
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user