Refactor: Merge non-functional-change part of 'edit normals' 2017 GSoC.

This merges changes in internals, runtime-only of existing custom
normals code, which make sense as of themselves, and will make diff of
soc branch easier/lighter to review.

In the details, it mostly changes two things:

* Now, smooth fans (aka MLoopNorSpaceArray) can store either loop
indices, or pointers to BMLoop themselves. This makes sense since in
BMesh, it's relatively easy to get index from a BMElement, but nearly
impracticable to go the other way around.

* First change enforces another, now we cannot rely anymore on `loops`
being NULL in MLoopNorSpace to detect single-loop fans, so we instead
store that info in a new flag.

Again, these are expected to be totally non-functional changes.
This commit is contained in:
Bastien Montagne 2018-03-01 16:54:21 +01:00
parent 7377d411b4
commit 1178518a68
5 changed files with 102 additions and 41 deletions

@ -195,6 +195,10 @@ void BKE_mesh_loop_tangents_ex(
struct ReportList *reports);
void BKE_mesh_loop_tangents(
struct Mesh *mesh, const char *uvmap, float (*r_looptangents)[4], struct ReportList *reports);
void BKE_mesh_loop_manifold_fan_around_vert_next(
const struct MLoop *mloops, const struct MPoly *mpolys,
const int *loop_to_poly, const int *e2lfan_curr, const uint mv_pivot_index,
const struct MLoop **r_mlfan_curr, int *r_mlfan_curr_index, int *r_mlfan_vert_index, int *r_mpfan_curr_index);
void BKE_edges_sharp_from_angle_set(
const struct MVert *mverts, const int numVerts,
@ -212,17 +216,38 @@ typedef struct MLoopNorSpace {
float vec_ortho[3]; /* Third vector, orthogonal to vec_lnor and vec_ref. */
float ref_alpha; /* Reference angle, around vec_ortho, in ]0, pi] range (0.0 marks that space as invalid). */
float ref_beta; /* Reference angle, around vec_lnor, in ]0, 2pi] range (0.0 marks that space as invalid). */
struct LinkNode *loops; /* All indices (uint_in_ptr) of loops using this lnor space (i.e. smooth fan of loops). */
/* All loops using this lnor space (i.e. smooth fan of loops),
* as (depending on owning MLoopNorSpaceArrary.data_type):
* - Indices (uint_in_ptr), or
* - BMLoop pointers. */
struct LinkNode *loops;
char flags;
} MLoopNorSpace;
/**
* MLoopNorSpace.flags
*/
enum {
MLNOR_SPACE_IS_SINGLE = 1 << 0,
};
/**
* Collection of #MLoopNorSpace basic storage & pre-allocation.
*/
typedef struct MLoopNorSpaceArray {
MLoopNorSpace **lspacearr; /* MLoop aligned array */
struct LinkNode *loops_pool; /* Allocated once, avoids to call BLI_linklist_prepend_arena() for each loop! */
char data_type; /* Whether we store loop indices, or pointers to BMLoop. */
struct MemArena *mem;
} MLoopNorSpaceArray;
void BKE_lnor_spacearr_init(MLoopNorSpaceArray *lnors_spacearr, const int numLoops);
/**
* MLoopNorSpaceArray.data_type
*/
enum {
MLNOR_SPACEARR_LOOP_INDEX = 0,
MLNOR_SPACEARR_BMLOOP_PTR = 1,
};
void BKE_lnor_spacearr_init(MLoopNorSpaceArray *lnors_spacearr, const int numLoops, const char data_type);
void BKE_lnor_spacearr_clear(MLoopNorSpaceArray *lnors_spacearr);
void BKE_lnor_spacearr_free(MLoopNorSpaceArray *lnors_spacearr);
MLoopNorSpace *BKE_lnor_space_create(MLoopNorSpaceArray *lnors_spacearr);
@ -230,7 +255,8 @@ void BKE_lnor_space_define(
MLoopNorSpace *lnor_space, const float lnor[3], float vec_ref[3], float vec_other[3],
struct BLI_Stack *edge_vectors);
void BKE_lnor_space_add_loop(
MLoopNorSpaceArray *lnors_spacearr, MLoopNorSpace *lnor_space, const int ml_index, const bool add_to_list);
MLoopNorSpaceArray *lnors_spacearr, MLoopNorSpace *lnor_space,
const int ml_index, void *bm_loop, const bool is_single);
void BKE_lnor_space_custom_data_to_normal(MLoopNorSpace *lnor_space, const short clnor_data[2], float r_custom_lnor[3]);
void BKE_lnor_space_custom_normal_to_data(MLoopNorSpace *lnor_space, const float custom_lnor[3], short r_clnor_data[2]);

@ -2165,6 +2165,8 @@ static int split_faces_prepare_new_verts(
MLoop *ml = mloop;
MLoopNorSpace **lnor_space = lnors_spacearr->lspacearr;
BLI_assert(lnors_spacearr->data_type == MLNOR_SPACEARR_LOOP_INDEX);
for (int loop_idx = 0; loop_idx < num_loops; loop_idx++, ml++, lnor_space++) {
if (!BLI_BITMAP_TEST(done_loops, loop_idx)) {
const int vert_idx = ml->v;
@ -2174,7 +2176,15 @@ static int split_faces_prepare_new_verts(
BLI_assert(*lnor_space);
if ((*lnor_space)->loops) {
if ((*lnor_space)->flags & MLNOR_SPACE_IS_SINGLE) {
/* Single loop in this fan... */
BLI_assert(GET_INT_FROM_POINTER((*lnor_space)->loops) == loop_idx);
BLI_BITMAP_ENABLE(done_loops, loop_idx);
if (vert_used) {
ml->v = new_vert_idx;
}
}
else {
for (LinkNode *lnode = (*lnor_space)->loops; lnode; lnode = lnode->next) {
const int ml_fan_idx = GET_INT_FROM_POINTER(lnode->link);
BLI_BITMAP_ENABLE(done_loops, ml_fan_idx);
@ -2183,13 +2193,6 @@ static int split_faces_prepare_new_verts(
}
}
}
else {
/* Single loop in this fan... */
BLI_BITMAP_ENABLE(done_loops, loop_idx);
if (vert_used) {
ml->v = new_vert_idx;
}
}
if (!vert_used) {
BLI_BITMAP_ENABLE(verts_used, vert_idx);

@ -444,7 +444,7 @@ cleanup:
MEM_freeN(fnors);
}
void BKE_lnor_spacearr_init(MLoopNorSpaceArray *lnors_spacearr, const int numLoops)
void BKE_lnor_spacearr_init(MLoopNorSpaceArray *lnors_spacearr, const int numLoops, const char data_type)
{
if (!(lnors_spacearr->lspacearr && lnors_spacearr->loops_pool)) {
MemArena *mem;
@ -456,6 +456,8 @@ void BKE_lnor_spacearr_init(MLoopNorSpaceArray *lnors_spacearr, const int numLoo
lnors_spacearr->lspacearr = BLI_memarena_calloc(mem, sizeof(MLoopNorSpace *) * (size_t)numLoops);
lnors_spacearr->loops_pool = BLI_memarena_alloc(mem, sizeof(LinkNode) * (size_t)numLoops);
}
BLI_assert(ELEM(data_type, MLNOR_SPACEARR_BMLOOP_PTR, MLNOR_SPACEARR_LOOP_INDEX));
lnors_spacearr->data_type = data_type;
}
void BKE_lnor_spacearr_clear(MLoopNorSpaceArray *lnors_spacearr)
@ -549,12 +551,32 @@ void BKE_lnor_space_define(MLoopNorSpace *lnor_space, const float lnor[3],
}
}
void BKE_lnor_space_add_loop(MLoopNorSpaceArray *lnors_spacearr, MLoopNorSpace *lnor_space, const int ml_index,
const bool do_add_loop)
/**
* Add a new given loop to given lnor_space.
* Depending on \a lnor_space->data_type, we expect \a bm_loop to be a pointer to BMLoop struct (in case of BMLOOP_PTR),
* or NULL (in case of LOOP_INDEX), loop index is then stored in pointer.
* If \a is_single is set, the BMLoop or loop index is directly stored in \a lnor_space->loops pointer (since there
* is only one loop in this fan), else it is added to the linked list of loops in the fan.
*/
void BKE_lnor_space_add_loop(
MLoopNorSpaceArray *lnors_spacearr, MLoopNorSpace *lnor_space,
const int ml_index, void *bm_loop, const bool is_single)
{
BLI_assert((lnors_spacearr->data_type == MLNOR_SPACEARR_LOOP_INDEX && bm_loop == NULL) ||
(lnors_spacearr->data_type == MLNOR_SPACEARR_BMLOOP_PTR && bm_loop != NULL));
lnors_spacearr->lspacearr[ml_index] = lnor_space;
if (do_add_loop) {
BLI_linklist_prepend_nlink(&lnor_space->loops, SET_INT_IN_POINTER(ml_index), &lnors_spacearr->loops_pool[ml_index]);
if (bm_loop == NULL) {
bm_loop = SET_INT_IN_POINTER(ml_index);
}
if (is_single) {
BLI_assert(lnor_space->loops == NULL);
lnor_space->flags |= MLNOR_SPACE_IS_SINGLE;
lnor_space->loops = bm_loop;
}
else {
BLI_assert((lnor_space->flags & MLNOR_SPACE_IS_SINGLE) == 0);
BLI_linklist_prepend_nlink(&lnor_space->loops, bm_loop, &lnors_spacearr->loops_pool[ml_index]);
}
}
@ -841,7 +863,7 @@ void BKE_edges_sharp_from_angle_set(
MEM_freeN(loop_to_poly);
}
static void loop_manifold_fan_around_vert_next(
void BKE_mesh_loop_manifold_fan_around_vert_next(
const MLoop *mloops, const MPoly *mpolys,
const int *loop_to_poly, const int *e2lfan_curr, const uint mv_pivot_index,
const MLoop **r_mlfan_curr, int *r_mlfan_curr_index, int *r_mlfan_vert_index, int *r_mpfan_curr_index)
@ -930,7 +952,7 @@ static void split_loop_nor_single_do(LoopSplitTaskDataCommon *common_data, LoopS
BKE_lnor_space_define(lnor_space, *lnor, vec_curr, vec_prev, NULL);
/* We know there is only one loop in this space, no need to create a linklist in this case... */
BKE_lnor_space_add_loop(lnors_spacearr, lnor_space, ml_curr_index, false);
BKE_lnor_space_add_loop(lnors_spacearr, lnor_space, ml_curr_index, NULL, true);
if (clnors_data) {
BKE_lnor_space_custom_data_to_normal(lnor_space, clnors_data[ml_curr_index], *lnor);
@ -1063,7 +1085,7 @@ static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data, LoopSpli
if (lnors_spacearr) {
/* Assign current lnor space to current 'vertex' loop. */
BKE_lnor_space_add_loop(lnors_spacearr, lnor_space, mlfan_vert_index, true);
BKE_lnor_space_add_loop(lnors_spacearr, lnor_space, mlfan_vert_index, NULL, false);
if (me_curr != me_org) {
/* We store here all edges-normalized vectors processed. */
BLI_stack_push(edge_vectors, vec_curr);
@ -1081,7 +1103,7 @@ static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data, LoopSpli
copy_v3_v3(vec_prev, vec_curr);
/* Find next loop of the smooth fan. */
loop_manifold_fan_around_vert_next(
BKE_mesh_loop_manifold_fan_around_vert_next(
mloops, mpolys, loop_to_poly, e2lfan_curr, mv_pivot_index,
&mlfan_curr, &mlfan_curr_index, &mlfan_vert_index, &mpfan_curr_index);
@ -1218,7 +1240,7 @@ static bool loop_split_generator_check_cyclic_smooth_fan(
while (true) {
/* Find next loop of the smooth fan. */
loop_manifold_fan_around_vert_next(
BKE_mesh_loop_manifold_fan_around_vert_next(
mloops, mpolys, loop_to_poly, e2lfan_curr, mv_pivot_index,
&mlfan_curr, &mlfan_curr_index, &mlfan_vert_index, &mpfan_curr_index);
@ -1475,7 +1497,7 @@ void BKE_mesh_normals_loop_split(
r_lnors_spacearr = &_lnors_spacearr;
}
if (r_lnors_spacearr) {
BKE_lnor_spacearr_init(r_lnors_spacearr, numLoops);
BKE_lnor_spacearr_init(r_lnors_spacearr, numLoops, MLNOR_SPACEARR_LOOP_INDEX);
}
/* Init data common to all tasks. */
@ -1589,6 +1611,8 @@ static void mesh_normals_loop_custom_set(
}
}
BLI_assert(lnors_spacearr.data_type == MLNOR_SPACEARR_LOOP_INDEX);
/* Now, check each current smooth fan (one lnor space per smooth fan!), and if all its matching custom lnors
* are not (enough) equal, add sharp edges as needed.
* This way, next time we run BKE_mesh_normals_loop_split(), we'll get lnor spacearr/smooth fans matching
@ -1612,7 +1636,7 @@ static void mesh_normals_loop_custom_set(
if (!BLI_BITMAP_TEST(done_loops, i)) {
/* Notes:
* * In case of mono-loop smooth fan, loops is NULL, so everything is fine (we have nothing to do).
* * In case of mono-loop smooth fan, we have nothing to do.
* * Loops in this linklist are ordered (in reversed order compared to how they were discovered by
* BKE_mesh_normals_loop_split(), but this is not a problem). Which means if we find a
* mismatching clnor, we know all remaining loops will have to be in a new, different smooth fan/
@ -1620,6 +1644,11 @@ static void mesh_normals_loop_custom_set(
* * In smooth fan case, we compare each clnor against a ref one, to avoid small differences adding
* up into a real big one in the end!
*/
if (lnors_spacearr.lspacearr[i]->flags & MLNOR_SPACE_IS_SINGLE) {
BLI_BITMAP_ENABLE(done_loops, i);
continue;
}
LinkNode *loops = lnors_spacearr.lspacearr[i]->loops;
MLoop *prev_ml = NULL;
const float *org_nor = NULL;
@ -1667,9 +1696,6 @@ static void mesh_normals_loop_custom_set(
medges[(prev_ml->e == mlp->e) ? prev_ml->e : ml->e].flag |= ME_SHARP;
}
}
/* For single loops, where lnors_spacearr.lspacearr[i]->loops is NULL. */
BLI_BITMAP_ENABLE(done_loops, i);
}
}
@ -1699,7 +1725,15 @@ static void mesh_normals_loop_custom_set(
* computed 2D factors).
*/
LinkNode *loops = lnors_spacearr.lspacearr[i]->loops;
if (loops) {
if (lnors_spacearr.lspacearr[i]->flags & MLNOR_SPACE_IS_SINGLE) {
BLI_assert(GET_INT_FROM_POINTER(loops) == i);
const int nidx = use_vertices ? (int)mloops[i].v : i;
float *nor = r_custom_loopnors[nidx];
BKE_lnor_space_custom_normal_to_data(lnors_spacearr.lspacearr[i], nor, r_clnors_data[i]);
BLI_BITMAP_DISABLE(done_loops, i);
}
else {
int nbr_nors = 0;
float avg_nor[3];
short clnor_data_tmp[2], *clnor_data;
@ -1726,13 +1760,6 @@ static void mesh_normals_loop_custom_set(
clnor_data[1] = clnor_data_tmp[1];
}
}
else {
const int nidx = use_vertices ? (int)mloops[i].v : i;
float *nor = r_custom_loopnors[nidx];
BKE_lnor_space_custom_normal_to_data(lnors_spacearr.lspacearr[i], nor, r_clnors_data[i]);
BLI_BITMAP_DISABLE(done_loops, i);
}
}
}

@ -597,9 +597,11 @@ static void bm_mesh_edges_sharp_tag(
bm->elem_index_dirty &= ~BM_EDGE;
}
/* Check whether gievn loop is part of an unknown-so-far cyclic smooth fan, or not.
* Needed because cyclic smooth fans have no obvious 'entry point', and yet we need to walk them once, and only once. */
static bool bm_mesh_loop_check_cyclic_smooth_fan(BMLoop *l_curr)
/**
* Check whether given loop is part of an unknown-so-far cyclic smooth fan, or not.
* Needed because cyclic smooth fans have no obvious 'entry point', and yet we need to walk them once, and only once.
*/
bool BM_loop_check_cyclic_smooth_fan(BMLoop *l_curr)
{
BMLoop *lfan_pivot_next = l_curr;
BMEdge *e_next = l_curr->e;
@ -665,7 +667,7 @@ static void bm_mesh_loops_calc_normals(
r_lnors_spacearr = &_lnors_spacearr;
}
if (r_lnors_spacearr) {
BKE_lnor_spacearr_init(r_lnors_spacearr, bm->totloop);
BKE_lnor_spacearr_init(r_lnors_spacearr, bm->totloop, MLNOR_SPACEARR_BMLOOP_PTR);
edge_vectors = BLI_stack_new(sizeof(float[3]), __func__);
}
@ -700,7 +702,7 @@ static void bm_mesh_loops_calc_normals(
* However, this would complicate the code, add more memory usage, and BM_vert_step_fan_loop()
* is quite cheap in term of CPU cycles, so really think it's not worth it. */
if (BM_elem_flag_test(l_curr->e, BM_ELEM_TAG) &&
(BM_elem_flag_test(l_curr, BM_ELEM_TAG) || !bm_mesh_loop_check_cyclic_smooth_fan(l_curr)))
(BM_elem_flag_test(l_curr, BM_ELEM_TAG) || !BM_loop_check_cyclic_smooth_fan(l_curr)))
{
}
else if (!BM_elem_flag_test(l_curr->e, BM_ELEM_TAG) &&
@ -734,7 +736,7 @@ static void bm_mesh_loops_calc_normals(
BKE_lnor_space_define(lnor_space, r_lnos[l_curr_index], vec_curr, vec_prev, NULL);
/* We know there is only one loop in this space, no need to create a linklist in this case... */
BKE_lnor_space_add_loop(r_lnors_spacearr, lnor_space, l_curr_index, false);
BKE_lnor_space_add_loop(r_lnors_spacearr, lnor_space, l_curr_index, l_curr, true);
if (has_clnors) {
short (*clnor)[2] = clnors_data ? &clnors_data[l_curr_index] :
@ -853,7 +855,7 @@ static void bm_mesh_loops_calc_normals(
if (r_lnors_spacearr) {
/* Assign current lnor space to current 'vertex' loop. */
BKE_lnor_space_add_loop(r_lnors_spacearr, lnor_space, lfan_pivot_index, true);
BKE_lnor_space_add_loop(r_lnors_spacearr, lnor_space, lfan_pivot_index, lfan_pivot, false);
if (e_next != e_org) {
/* We store here all edges-normalized vectors processed. */
BLI_stack_push(edge_vectors, vec_next);

@ -52,6 +52,9 @@ void BM_loops_calc_normal_vcos(
const bool use_split_normals, const float split_angle, float (*r_lnos)[3],
struct MLoopNorSpaceArray *r_lnors_spacearr, short (*clnors_data)[2], const int cd_loop_clnors_offset);
bool BM_loop_check_cyclic_smooth_fan(BMLoop *l_curr);
void BM_edges_sharp_from_angle_set(BMesh *bm, const float split_angle);
void bmesh_edit_begin(BMesh *bm, const BMOpTypeFlag type_flag);