Build: replace Blender specific DEBUG by standard NDEBUG

NDEBUG is part of the C standard and disables asserts. Only this will
now be used to decide if asserts are enabled.

DEBUG was a Blender specific define, that has now been removed.

_DEBUG is a Visual Studio define for builds in Debug configuration.
Blender defines this for all platforms. This is still used in a few
places in the draw code, and in external libraries Bullet and Mantaflow.

Pull Request: https://projects.blender.org/blender/blender/pulls/115774
This commit is contained in:
Brecht Van Lommel 2023-12-04 15:13:06 +01:00 committed by Gitea
parent fc863c63e6
commit e06561a27a
87 changed files with 159 additions and 156 deletions

@ -55,7 +55,9 @@ endif()
# global compile definitions since add_definitions() adds for all. # global compile definitions since add_definitions() adds for all.
set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS
$<$<CONFIG:Debug>:DEBUG;_DEBUG> # Visual Studio define for debug, enable on all platforms.
$<$<CONFIG:Debug>:_DEBUG>
# Standard C define to disable asserts.
$<$<CONFIG:Release>:NDEBUG> $<$<CONFIG:Release>:NDEBUG>
$<$<CONFIG:MinSizeRel>:NDEBUG> $<$<CONFIG:MinSizeRel>:NDEBUG>
$<$<CONFIG:RelWithDebInfo>:NDEBUG> $<$<CONFIG:RelWithDebInfo>:NDEBUG>

@ -1311,7 +1311,7 @@ int curve_fit_cubic_to_points_refit_db(
#ifdef USE_CORNER_DETECT #ifdef USE_CORNER_DETECT
if (use_corner) { if (use_corner) {
#ifdef DEBUG #ifndef NDEBUG
for (uint i = 0; i < knots_len; i++) { for (uint i = 0; i < knots_len; i++) {
assert(knots[i].heap_node == NULL); assert(knots[i].heap_node == NULL);
} }

@ -206,7 +206,7 @@ static void rt_node_free(RangeTreeUInt *rt, Node *node);
#ifdef USE_BTREE #ifdef USE_BTREE
#ifdef DEBUG #ifndef NDEBUG
static bool rb_is_balanced_root(const Node *root); static bool rb_is_balanced_root(const Node *root);
#endif #endif
@ -238,7 +238,7 @@ static int key_cmp(uint key1, uint key2)
/* removed from the tree */ /* removed from the tree */
static void rb_node_invalidate(Node *node) static void rb_node_invalidate(Node *node)
{ {
#ifdef DEBUG #ifndef NDEBUG
node->left = NULL; node->left = NULL;
node->right = NULL; node->right = NULL;
node->color = false; node->color = false;
@ -481,7 +481,7 @@ static Node *rb_get_or_lower_recursive(Node *n, const uint key)
} }
} }
#ifdef DEBUG #ifndef NDEBUG
static bool rb_is_balanced_recursive(const Node *node, int black) static bool rb_is_balanced_recursive(const Node *node, int black)
{ {
@ -511,7 +511,7 @@ static bool rb_is_balanced_root(const Node *root)
return rb_is_balanced_recursive(root, black); return rb_is_balanced_recursive(root, black);
} }
#endif // DEBUG #endif // NDEBUG
/* End BTree API */ /* End BTree API */
@ -703,7 +703,7 @@ RangeTreeUInt *range_tree_uint_alloc(uint min, uint max)
void range_tree_uint_free(RangeTreeUInt *rt) void range_tree_uint_free(RangeTreeUInt *rt)
{ {
#ifdef DEBUG #ifndef NDEBUG
#ifdef USE_BTREE #ifdef USE_BTREE
assert(rb_is_balanced_root(rt->root)); assert(rb_is_balanced_root(rt->root));
#endif #endif

@ -259,7 +259,7 @@ string HIPDevice::compile_kernel(const uint kernel_features, const char *name, c
# else # else
options.append("Wno-parentheses-equality -Wno-unused-value --hipcc-func-supp -O3 -ffast-math"); options.append("Wno-parentheses-equality -Wno-unused-value --hipcc-func-supp -O3 -ffast-math");
# endif # endif
# ifdef _DEBUG # ifndef NDEBUG
options.append(" -save-temps"); options.append(" -save-temps");
# endif # endif
options.append(" --amdgpu-target=").append(arch); options.append(" --amdgpu-target=").append(arch);

@ -545,7 +545,7 @@ void OneapiDevice::usm_free(void *usm_ptr)
void OneapiDevice::check_usm(SyclQueue *queue_, const void *usm_ptr, bool allow_host = false) void OneapiDevice::check_usm(SyclQueue *queue_, const void *usm_ptr, bool allow_host = false)
{ {
# ifdef _DEBUG # ifndef NDEBUG
sycl::queue *queue = reinterpret_cast<sycl::queue *>(queue_); sycl::queue *queue = reinterpret_cast<sycl::queue *>(queue_);
sycl::info::device_type device_type = sycl::info::device_type device_type =
queue->get_device().get_info<sycl::info::device::device_type>(); queue->get_device().get_info<sycl::info::device::device_type>();

@ -10,7 +10,7 @@
#pragma once #pragma once
#ifdef _MSC_VER #ifdef _MSC_VER
# ifdef DEBUG # ifdef _DEBUG
/* Suppress STL-MSVC debug info warning. */ /* Suppress STL-MSVC debug info warning. */
# pragma warning(disable : 4786) # pragma warning(disable : 4786)
# endif # endif

@ -159,7 +159,7 @@ GHOST_TSuccess GHOST_DisplayManagerX11::setCurrentDisplaySetting(
fprintf(stderr, "Error: XF86VidMode extension missing!\n"); fprintf(stderr, "Error: XF86VidMode extension missing!\n");
return GHOST_kFailure; return GHOST_kFailure;
} }
# ifdef DEBUG # ifndef NDEBUG
printf("Using XFree86-VidModeExtension Version %d.%d\n", majorVersion, minorVersion); printf("Using XFree86-VidModeExtension Version %d.%d\n", majorVersion, minorVersion);
# endif # endif
@ -199,7 +199,7 @@ GHOST_TSuccess GHOST_DisplayManagerX11::setCurrentDisplaySetting(
} }
if (best_fit != -1) { if (best_fit != -1) {
# ifdef DEBUG # ifndef NDEBUG
printf("Switching to video mode %dx%d %dx%d %d\n", printf("Switching to video mode %dx%d %dx%d %d\n",
vidmodes[best_fit]->hdisplay, vidmodes[best_fit]->hdisplay,
vidmodes[best_fit]->vdisplay, vidmodes[best_fit]->vdisplay,

@ -345,7 +345,7 @@ int BLF_load_default(bool unique);
int BLF_load_mono_default(bool unique); int BLF_load_mono_default(bool unique);
void BLF_load_font_stack(void); void BLF_load_font_stack(void);
#ifdef DEBUG #ifndef NDEBUG
void BLF_state_print(int fontid); void BLF_state_print(int fontid);
#endif #endif

@ -1017,7 +1017,7 @@ float BLF_character_to_curves(int fontid,
return blf_character_to_curves(font, unicode, nurbsbase, scale); return blf_character_to_curves(font, unicode, nurbsbase, scale);
} }
#ifdef DEBUG #ifndef NDEBUG
void BLF_state_print(int fontid) void BLF_state_print(int fontid)
{ {
FontBLF *font = blf_get(fontid); FontBLF *font = blf_get(fontid);

@ -765,7 +765,7 @@ static FT_UInt blf_glyph_index_from_charcode(FontBLF **font, const uint charcode
} }
} }
#ifdef DEBUG #ifndef NDEBUG
printf("Unicode character U+%04X not found in loaded fonts. \n", charcode); printf("Unicode character U+%04X not found in loaded fonts. \n", charcode);
#endif #endif

@ -882,7 +882,7 @@ static void where_am_i(char *program_filepath,
/* Remove "/./" and "/../" so string comparisons can be used on the path. */ /* Remove "/./" and "/../" so string comparisons can be used on the path. */
BLI_path_normalize_native(program_filepath); BLI_path_normalize_native(program_filepath);
# if defined(DEBUG) # ifndef NDEBUG
if (!STREQ(program_name, program_filepath)) { if (!STREQ(program_name, program_filepath)) {
CLOG_INFO(&LOG, 2, "guessing '%s' == '%s'", program_name, program_filepath); CLOG_INFO(&LOG, 2, "guessing '%s' == '%s'", program_name, program_filepath);
} }

@ -681,7 +681,7 @@ void MutableAttributeAccessor::remove_anonymous()
/** /**
* Debug utility that checks whether the #finish function of an #AttributeWriter has been called. * Debug utility that checks whether the #finish function of an #AttributeWriter has been called.
*/ */
#ifdef DEBUG #ifndef NDEBUG
struct FinishCallChecker { struct FinishCallChecker {
std::string name; std::string name;
bool finish_called = false; bool finish_called = false;
@ -700,7 +700,7 @@ GAttributeWriter MutableAttributeAccessor::lookup_for_write(const AttributeIDRef
{ {
GAttributeWriter attribute = fn_->lookup_for_write(owner_, attribute_id); GAttributeWriter attribute = fn_->lookup_for_write(owner_, attribute_id);
/* Check that the #finish method is called in debug builds. */ /* Check that the #finish method is called in debug builds. */
#ifdef DEBUG #ifndef NDEBUG
if (attribute) { if (attribute) {
auto checker = std::make_shared<FinishCallChecker>(); auto checker = std::make_shared<FinishCallChecker>();
checker->name = attribute_id.name(); checker->name = attribute_id.name();

@ -1217,7 +1217,7 @@ BVHTree *BKE_bvhtree_from_mesh_get(BVHTreeFromMesh *data,
bvhcache_insert(*bvh_cache_p, data->tree, bvh_cache_type); bvhcache_insert(*bvh_cache_p, data->tree, bvh_cache_type);
bvhcache_unlock(*bvh_cache_p, lock_started); bvhcache_unlock(*bvh_cache_p, lock_started);
#ifdef DEBUG #ifndef NDEBUG
if (data->tree != nullptr) { if (data->tree != nullptr) {
if (BLI_bvhtree_get_tree_type(data->tree) != tree_type) { if (BLI_bvhtree_get_tree_type(data->tree) != tree_type) {
printf("tree_type %d obtained instead of %d\n", printf("tree_type %d obtained instead of %d\n",
@ -1307,7 +1307,7 @@ BVHTree *BKE_bvhtree_from_editmesh_get(BVHTreeFromEditMesh *data,
bvhcache_unlock(*bvh_cache_p, lock_started); bvhcache_unlock(*bvh_cache_p, lock_started);
} }
#ifdef DEBUG #ifndef NDEBUG
if (data->tree != nullptr) { if (data->tree != nullptr) {
if (BLI_bvhtree_get_tree_type(data->tree) != tree_type) { if (BLI_bvhtree_get_tree_type(data->tree) != tree_type) {
printf("tree_type %d obtained instead of %d\n", printf("tree_type %d obtained instead of %d\n",

@ -72,7 +72,7 @@ CurvesGeometry::CurvesGeometry(const int point_num, const int curve_num)
MEM_malloc_arrayN(this->curve_num + 1, sizeof(int), __func__)); MEM_malloc_arrayN(this->curve_num + 1, sizeof(int), __func__));
this->runtime->curve_offsets_sharing_info = implicit_sharing::info_for_mem_free( this->runtime->curve_offsets_sharing_info = implicit_sharing::info_for_mem_free(
this->curve_offsets); this->curve_offsets);
#ifdef DEBUG #ifndef NDEBUG
this->offsets_for_write().fill(-1); this->offsets_for_write().fill(-1);
#endif #endif
/* Set common values for convenience. */ /* Set common values for convenience. */

@ -535,7 +535,7 @@ void BKE_mesh_face_offsets_ensure_alloc(Mesh *mesh)
mesh->runtime->face_offsets_sharing_info = blender::implicit_sharing::info_for_mem_free( mesh->runtime->face_offsets_sharing_info = blender::implicit_sharing::info_for_mem_free(
mesh->face_offset_indices); mesh->face_offset_indices);
#ifdef DEBUG #ifndef NDEBUG
/* Fill offsets with obviously bad values to simplify finding missing initialization. */ /* Fill offsets with obviously bad values to simplify finding missing initialization. */
mesh->face_offsets_for_write().fill(-1); mesh->face_offsets_for_write().fill(-1);
#endif #endif

@ -74,7 +74,7 @@ Mesh *BKE_mesh_wrapper_from_editmesh(BMEditMesh *em,
me->edit_mesh->is_shallow_copy = true; me->edit_mesh->is_shallow_copy = true;
/* Make sure we crash if these are ever used. */ /* Make sure we crash if these are ever used. */
#ifdef DEBUG #ifndef NDEBUG
me->totvert = INT_MAX; me->totvert = INT_MAX;
me->totedge = INT_MAX; me->totedge = INT_MAX;
me->faces_num = INT_MAX; me->faces_num = INT_MAX;

@ -512,7 +512,7 @@ class NodeTreeMainUpdater {
result.interface_changed = true; result.interface_changed = true;
} }
#ifdef DEBUG #ifndef NDEBUG
/* Check the uniqueness of node identifiers. */ /* Check the uniqueness of node identifiers. */
Set<int32_t> node_identifiers; Set<int32_t> node_identifiers;
const Span<const bNode *> nodes = ntree.all_nodes(); const Span<const bNode *> nodes = ntree.all_nodes();

@ -1121,7 +1121,7 @@ bool BKE_paint_ensure(ToolSettings *ts, Paint **r_paint)
(Paint *)ts->uvsculpt, (Paint *)ts->uvsculpt,
(Paint *)ts->curves_sculpt, (Paint *)ts->curves_sculpt,
(Paint *)&ts->imapaint)); (Paint *)&ts->imapaint));
#ifdef DEBUG #ifndef NDEBUG
Paint paint_test = **r_paint; Paint paint_test = **r_paint;
BKE_paint_runtime_init(ts, *r_paint); BKE_paint_runtime_init(ts, *r_paint);
/* Swap so debug doesn't hide errors when release fails. */ /* Swap so debug doesn't hide errors when release fails. */

@ -379,7 +379,7 @@ WorkSpaceLayout *BKE_workspace_layout_add(Main *bmain,
WorkSpaceLayout *layout = MEM_cnew<WorkSpaceLayout>(__func__); WorkSpaceLayout *layout = MEM_cnew<WorkSpaceLayout>(__func__);
BLI_assert(!workspaces_is_screen_used(bmain, screen)); BLI_assert(!workspaces_is_screen_used(bmain, screen));
#ifndef DEBUG #ifdef NDEBUG
UNUSED_VARS(bmain); UNUSED_VARS(bmain);
#endif #endif
layout->screen = screen; layout->screen = screen;

@ -12,7 +12,7 @@
*/ */
/* only validate array-bounds in debug mode */ /* only validate array-bounds in debug mode */
#ifdef DEBUG #ifndef NDEBUG
# define STACK_DECLARE(stack) unsigned int _##stack##_index, _##stack##_num_alloc # define STACK_DECLARE(stack) unsigned int _##stack##_index, _##stack##_num_alloc
# define STACK_INIT(stack, stack_num) \ # define STACK_INIT(stack, stack_num) \
((void)stack, \ ((void)stack, \

@ -49,7 +49,7 @@
/* Setting zero so we can catch bugs in BLI_task/KDOPBVH. /* Setting zero so we can catch bugs in BLI_task/KDOPBVH.
* TODO(sergey): Deduplicate the limits with PBVH from BKE. * TODO(sergey): Deduplicate the limits with PBVH from BKE.
*/ */
#ifdef DEBUG #ifndef NDEBUG
# define KDOPBVH_THREAD_LEAF_THRESHOLD 0 # define KDOPBVH_THREAD_LEAF_THRESHOLD 0
#else #else
# define KDOPBVH_THREAD_LEAF_THRESHOLD 1024 # define KDOPBVH_THREAD_LEAF_THRESHOLD 1024

@ -22,7 +22,7 @@ namespace blender::index_mask {
template<typename T> void build_reverse_map(const IndexMask &mask, MutableSpan<T> r_map) template<typename T> void build_reverse_map(const IndexMask &mask, MutableSpan<T> r_map)
{ {
#ifdef DEBUG #ifndef NDEBUG
/* Catch errors with asserts in debug builds. */ /* Catch errors with asserts in debug builds. */
r_map.fill(-1); r_map.fill(-1);
#endif #endif

@ -37,7 +37,7 @@ struct KDTree {
uint nodes_len; uint nodes_len;
uint root; uint root;
int max_node_index; int max_node_index;
#ifdef DEBUG #ifndef NDEBUG
bool is_balanced; /* ensure we call balance first */ bool is_balanced; /* ensure we call balance first */
uint nodes_len_capacity; /* max size of the tree */ uint nodes_len_capacity; /* max size of the tree */
#endif #endif
@ -97,7 +97,7 @@ KDTree *BLI_kdtree_nd_(new)(uint nodes_len_capacity)
tree->root = KD_NODE_ROOT_IS_INIT; tree->root = KD_NODE_ROOT_IS_INIT;
tree->max_node_index = -1; tree->max_node_index = -1;
#ifdef DEBUG #ifndef NDEBUG
tree->is_balanced = false; tree->is_balanced = false;
tree->nodes_len_capacity = nodes_len_capacity; tree->nodes_len_capacity = nodes_len_capacity;
#endif #endif
@ -120,7 +120,7 @@ void BLI_kdtree_nd_(insert)(KDTree *tree, int index, const float co[KD_DIMS])
{ {
KDTreeNode *node = &tree->nodes[tree->nodes_len++]; KDTreeNode *node = &tree->nodes[tree->nodes_len++];
#ifdef DEBUG #ifndef NDEBUG
BLI_assert(tree->nodes_len <= tree->nodes_len_capacity); BLI_assert(tree->nodes_len <= tree->nodes_len_capacity);
#endif #endif
@ -133,7 +133,7 @@ void BLI_kdtree_nd_(insert)(KDTree *tree, int index, const float co[KD_DIMS])
node->d = 0; node->d = 0;
tree->max_node_index = MAX2(tree->max_node_index, index); tree->max_node_index = MAX2(tree->max_node_index, index);
#ifdef DEBUG #ifndef NDEBUG
tree->is_balanced = false; tree->is_balanced = false;
#endif #endif
} }
@ -205,7 +205,7 @@ void BLI_kdtree_nd_(balance)(KDTree *tree)
tree->root = kdtree_balance(tree->nodes, tree->nodes_len, 0, 0); tree->root = kdtree_balance(tree->nodes, tree->nodes_len, 0, 0);
#ifdef DEBUG #ifndef NDEBUG
tree->is_balanced = true; tree->is_balanced = true;
#endif #endif
} }
@ -236,7 +236,7 @@ int BLI_kdtree_nd_(find_nearest)(const KDTree *tree,
float min_dist, cur_dist; float min_dist, cur_dist;
uint stack_len_capacity, cur = 0; uint stack_len_capacity, cur = 0;
#ifdef DEBUG #ifndef NDEBUG
BLI_assert(tree->is_balanced == true); BLI_assert(tree->is_balanced == true);
#endif #endif
@ -346,7 +346,7 @@ int BLI_kdtree_nd_(find_nearest_cb)(
float min_dist = FLT_MAX, cur_dist; float min_dist = FLT_MAX, cur_dist;
uint stack_len_capacity, cur = 0; uint stack_len_capacity, cur = 0;
#ifdef DEBUG #ifndef NDEBUG
BLI_assert(tree->is_balanced == true); BLI_assert(tree->is_balanced == true);
#endif #endif
@ -487,7 +487,7 @@ int BLI_kdtree_nd_(find_nearest_n_with_len_squared_cb)(
uint stack_len_capacity, cur = 0; uint stack_len_capacity, cur = 0;
uint i, nearest_len = 0; uint i, nearest_len = 0;
#ifdef DEBUG #ifndef NDEBUG
BLI_assert(tree->is_balanced == true); BLI_assert(tree->is_balanced == true);
#endif #endif
@ -652,7 +652,7 @@ int BLI_kdtree_nd_(range_search_with_len_squared_cb)(
uint stack_len_capacity, cur = 0; uint stack_len_capacity, cur = 0;
uint nearest_len = 0, nearest_len_capacity = 0; uint nearest_len = 0, nearest_len_capacity = 0;
#ifdef DEBUG #ifndef NDEBUG
BLI_assert(tree->is_balanced == true); BLI_assert(tree->is_balanced == true);
#endif #endif
@ -746,7 +746,7 @@ void BLI_kdtree_nd_(range_search_cb)(
float range_sq = range * range, dist_sq; float range_sq = range * range, dist_sq;
uint stack_len_capacity, cur = 0; uint stack_len_capacity, cur = 0;
#ifdef DEBUG #ifndef NDEBUG
BLI_assert(tree->is_balanced == true); BLI_assert(tree->is_balanced == true);
#endif #endif
@ -978,7 +978,7 @@ static int kdtree_node_cmp_deduplicate(const void *n0_p, const void *n1_p)
*/ */
int BLI_kdtree_nd_(deduplicate)(KDTree *tree) int BLI_kdtree_nd_(deduplicate)(KDTree *tree)
{ {
#ifdef DEBUG #ifndef NDEBUG
tree->is_balanced = false; tree->is_balanced = false;
#endif #endif
qsort(tree->nodes, (size_t)tree->nodes_len, sizeof(*tree->nodes), kdtree_node_cmp_deduplicate); qsort(tree->nodes, (size_t)tree->nodes_len, sizeof(*tree->nodes), kdtree_node_cmp_deduplicate);

@ -3589,7 +3589,7 @@ static bool barycentric_weights(const float v1[3],
wtot = w[0] + w[1] + w[2]; wtot = w[0] + w[1] + w[2];
#ifdef DEBUG /* Avoid floating point exception when debugging. */ #ifndef NDEBUG /* Avoid floating point exception when debugging. */
if (wtot != 0.0f) if (wtot != 0.0f)
#endif #endif
{ {
@ -3686,7 +3686,7 @@ bool barycentric_coords_v2(
const float x3 = v3[0], y3 = v3[1]; const float x3 = v3[0], y3 = v3[1];
const float det = (y2 - y3) * (x1 - x3) + (x3 - x2) * (y1 - y3); const float det = (y2 - y3) * (x1 - x3) + (x3 - x2) * (y1 - y3);
#ifdef DEBUG /* Avoid floating point exception when debugging. */ #ifndef NDEBUG /* Avoid floating point exception when debugging. */
if (det != 0.0f) if (det != 0.0f)
#endif #endif
{ {
@ -3711,7 +3711,7 @@ void barycentric_weights_v2(
w[2] = cross_tri_v2(v1, v2, co); w[2] = cross_tri_v2(v1, v2, co);
wtot = w[0] + w[1] + w[2]; wtot = w[0] + w[1] + w[2];
#ifdef DEBUG /* Avoid floating point exception when debugging. */ #ifndef NDEBUG /* Avoid floating point exception when debugging. */
if (wtot != 0.0f) if (wtot != 0.0f)
#endif #endif
{ {
@ -3734,7 +3734,7 @@ void barycentric_weights_v2_clamped(
w[2] = max_ff(cross_tri_v2(v1, v2, co), 0.0f); w[2] = max_ff(cross_tri_v2(v1, v2, co), 0.0f);
wtot = w[0] + w[1] + w[2]; wtot = w[0] + w[1] + w[2];
#ifdef DEBUG /* Avoid floating point exception when debugging. */ #ifndef NDEBUG /* Avoid floating point exception when debugging. */
if (wtot != 0.0f) if (wtot != 0.0f)
#endif #endif
{ {
@ -3757,7 +3757,7 @@ void barycentric_weights_v2_persp(
w[2] = cross_tri_v2(v1, v2, co) / v3[3]; w[2] = cross_tri_v2(v1, v2, co) / v3[3];
wtot = w[0] + w[1] + w[2]; wtot = w[0] + w[1] + w[2];
#ifdef DEBUG /* Avoid floating point exception when debugging. */ #ifndef NDEBUG /* Avoid floating point exception when debugging. */
if (wtot != 0.0f) if (wtot != 0.0f)
#endif #endif
{ {
@ -3849,7 +3849,7 @@ void barycentric_weights_v2_quad(const float v1[2],
wtot = w[0] + w[1] + w[2] + w[3]; wtot = w[0] + w[1] + w[2] + w[3];
#ifdef DEBUG /* Avoid floating point exception when debugging. */ #ifndef NDEBUG /* Avoid floating point exception when debugging. */
if (wtot != 0.0f) if (wtot != 0.0f)
#endif #endif
{ {

@ -17,7 +17,7 @@
/******************************** Quaternions ********************************/ /******************************** Quaternions ********************************/
/* used to test is a quat is not normalized (only used for debug prints) */ /* used to test is a quat is not normalized (only used for debug prints) */
#ifdef DEBUG #ifndef NDEBUG
# define QUAT_EPSILON 0.0001 # define QUAT_EPSILON 0.0001
#endif #endif
@ -216,7 +216,7 @@ static void quat_to_mat3_no_error(float m[3][3], const float q[4])
void quat_to_mat3(float m[3][3], const float q[4]) void quat_to_mat3(float m[3][3], const float q[4])
{ {
#ifdef DEBUG #ifndef NDEBUG
float f; float f;
if (!((f = dot_qtqt(q, q)) == 0.0f || (fabsf(f - 1.0f) < (float)QUAT_EPSILON))) { if (!((f = dot_qtqt(q, q)) == 0.0f || (fabsf(f - 1.0f) < (float)QUAT_EPSILON))) {
fprintf(stderr, fprintf(stderr,
@ -232,7 +232,7 @@ void quat_to_mat4(float m[4][4], const float q[4])
{ {
double q0, q1, q2, q3, qda, qdb, qdc, qaa, qab, qac, qbb, qbc, qcc; double q0, q1, q2, q3, qda, qdb, qdc, qaa, qab, qac, qbb, qbc, qcc;
#ifdef DEBUG #ifndef NDEBUG
if (!((q0 = dot_qtqt(q, q)) == 0.0 || (fabs(q0 - 1.0) < QUAT_EPSILON))) { if (!((q0 = dot_qtqt(q, q)) == 0.0 || (fabs(q0 - 1.0) < QUAT_EPSILON))) {
fprintf(stderr, fprintf(stderr,
"Warning! quat_to_mat4() called with non-normalized: size %.8f *** report a bug ***\n", "Warning! quat_to_mat4() called with non-normalized: size %.8f *** report a bug ***\n",
@ -1065,7 +1065,7 @@ void quat_to_axis_angle(float axis[3], float *angle, const float q[4])
{ {
float ha, si; float ha, si;
#ifdef DEBUG #ifndef NDEBUG
if (!((ha = dot_qtqt(q, q)) == 0.0f || (fabsf(ha - 1.0f) < (float)QUAT_EPSILON))) { if (!((ha = dot_qtqt(q, q)) == 0.0f || (fabsf(ha - 1.0f) < (float)QUAT_EPSILON))) {
fprintf(stderr, fprintf(stderr,
"Warning! quat_to_axis_angle() called with non-normalized: size %.8f *** report a bug " "Warning! quat_to_axis_angle() called with non-normalized: size %.8f *** report a bug "

@ -463,7 +463,7 @@ static void pf_coord_remove(PolyFill *pf, PolyIndex *pi)
if (UNLIKELY(pf->indices == pi)) { if (UNLIKELY(pf->indices == pi)) {
pf->indices = pi->next; pf->indices = pi->next;
} }
#ifdef DEBUG #ifndef NDEBUG
pi->index = (uint32_t)-1; pi->index = (uint32_t)-1;
pi->next = pi->prev = NULL; pi->next = pi->prev = NULL;
#endif #endif

@ -842,7 +842,7 @@ uint BLI_scanfill_calc_ex(ScanFillContext *sf_ctx, const int flag, const float n
BLI_assert(!nor_proj || len_squared_v3(nor_proj) > FLT_EPSILON); BLI_assert(!nor_proj || len_squared_v3(nor_proj) > FLT_EPSILON);
#ifdef DEBUG #ifndef NDEBUG
for (eve = sf_ctx->fillvertbase.first; eve; eve = eve->next) { for (eve = sf_ctx->fillvertbase.first; eve; eve = eve->next) {
/* these values used to be set, /* these values used to be set,
* however they should always be zero'd so check instead */ * however they should always be zero'd so check instead */
@ -984,7 +984,7 @@ uint BLI_scanfill_calc_ex(ScanFillContext *sf_ctx, const int flag, const float n
} }
if (eed) { if (eed) {
/* otherwise it's impossible to be sure you can clear vertices */ /* otherwise it's impossible to be sure you can clear vertices */
#ifdef DEBUG #ifndef NDEBUG
printf("No vertices with 250 edges allowed!\n"); printf("No vertices with 250 edges allowed!\n");
#endif #endif
return 0; return 0;
@ -1027,7 +1027,7 @@ uint BLI_scanfill_calc_ex(ScanFillContext *sf_ctx, const int flag, const float n
eed->v1->edge_count++; eed->v1->edge_count++;
eed->v2->edge_count++; eed->v2->edge_count++;
} }
#ifdef DEBUG #ifndef NDEBUG
/* ensure we're right! */ /* ensure we're right! */
for (eed = sf_ctx->filledgebase.first; eed; eed = eed->next) { for (eed = sf_ctx->filledgebase.first; eed; eed = eed->next) {
BLI_assert(eed->v1->edge_count != 1); BLI_assert(eed->v1->edge_count != 1);

@ -76,7 +76,7 @@ void BM_face_copy_shared(BMesh *bm, BMFace *f, BMLoopFilterFunc filter_fn, void
BMLoop *l_first; BMLoop *l_first;
BMLoop *l_iter; BMLoop *l_iter;
#ifdef DEBUG #ifndef NDEBUG
l_iter = l_first = BM_FACE_FIRST_LOOP(f); l_iter = l_first = BM_FACE_FIRST_LOOP(f);
do { do {
BLI_assert(BM_ELEM_API_FLAG_TEST(l_iter, _FLAG_OVERLAP) == 0); BLI_assert(BM_ELEM_API_FLAG_TEST(l_iter, _FLAG_OVERLAP) == 0);

@ -360,7 +360,7 @@ int BM_iter_mesh_count_flag(const char itype, BMesh *bm, const char hflag, const
* allow adding but not removing, this isn't _totally_ safe since * allow adding but not removing, this isn't _totally_ safe since
* you could add/remove within the same loop, but catches common cases * you could add/remove within the same loop, but catches common cases
*/ */
#ifdef DEBUG #ifndef NDEBUG
# define USE_IMMUTABLE_ASSERT # define USE_IMMUTABLE_ASSERT
#endif #endif

@ -73,7 +73,7 @@ extern const char bm_iter_itype_htype_map[BM_ITYPE_MAX];
/* a version of BM_ITER_MESH which keeps the next item in storage /* a version of BM_ITER_MESH which keeps the next item in storage
* so we can delete the current item, see bug #36923. */ * so we can delete the current item, see bug #36923. */
#ifdef DEBUG #ifndef NDEBUG
# define BM_ITER_MESH_MUTABLE(ele, ele_next, iter, bm, itype) \ # define BM_ITER_MESH_MUTABLE(ele, ele_next, iter, bm, itype) \
for (BM_CHECK_TYPE_ELEM_ASSIGN(ele) = BM_iter_new(iter, bm, itype, NULL); \ for (BM_CHECK_TYPE_ELEM_ASSIGN(ele) = BM_iter_new(iter, bm, itype, NULL); \
ele ? ((void)((iter)->count = BM_iter_mesh_count(itype, bm)), \ ele ? ((void)((iter)->count = BM_iter_mesh_count(itype, bm)), \

@ -1156,7 +1156,7 @@ void BM_select_history_merge_from_targetmap(
BMesh *bm, GHash *vert_map, GHash *edge_map, GHash *face_map, const bool use_chain) BMesh *bm, GHash *vert_map, GHash *edge_map, GHash *face_map, const bool use_chain)
{ {
#ifdef DEBUG #ifndef NDEBUG
LISTBASE_FOREACH (BMEditSelection *, ese, &bm->selected) { LISTBASE_FOREACH (BMEditSelection *, ese, &bm->selected) {
BLI_assert(BM_ELEM_API_FLAG_TEST(ese->ele, _FLAG_OVERLAP) == 0); BLI_assert(BM_ELEM_API_FLAG_TEST(ese->ele, _FLAG_OVERLAP) == 0);
} }

@ -335,7 +335,7 @@ void bmesh_edit_end(BMesh *bm, BMOpTypeFlag type_flag)
void BM_mesh_elem_index_ensure_ex(BMesh *bm, const char htype, int elem_offset[4]) void BM_mesh_elem_index_ensure_ex(BMesh *bm, const char htype, int elem_offset[4])
{ {
#ifdef DEBUG #ifndef NDEBUG
BM_ELEM_INDEX_VALIDATE(bm, "Should Never Fail!", __func__); BM_ELEM_INDEX_VALIDATE(bm, "Should Never Fail!", __func__);
#endif #endif
@ -514,7 +514,7 @@ void BM_mesh_elem_index_validate(
} }
#if 0 /* mostly annoying, even in debug mode */ #if 0 /* mostly annoying, even in debug mode */
# ifdef DEBUG # ifndef NDEBUG
if (is_any_error == 0) { if (is_any_error == 0) {
fprintf(stderr, "Valid Index Success: at %s, %s, '%s', '%s'\n", location, func, msg_a, msg_b); fprintf(stderr, "Valid Index Success: at %s, %s, '%s', '%s'\n", location, func, msg_a, msg_b);
} }

@ -49,7 +49,7 @@ BLI_INLINE void bmesh_calc_tessellation_for_face_impl(BMLoop *(*looptris)[3],
MemArena **pf_arena_p, MemArena **pf_arena_p,
const bool face_normal) const bool face_normal)
{ {
#ifdef DEBUG #ifndef NDEBUG
/* The face normal is used for projecting faces into 2D space for tessellation. /* The face normal is used for projecting faces into 2D space for tessellation.
* Invalid normals may result in invalid tessellation. * Invalid normals may result in invalid tessellation.
* Either `face_normal` should be true or normals should be updated first. */ * Either `face_normal` should be true or normals should be updated first. */

@ -9,7 +9,7 @@
*/ */
/* debug builds only */ /* debug builds only */
#ifdef DEBUG #ifndef NDEBUG
# include "BLI_map.hh" # include "BLI_map.hh"
# include "BLI_ordered_edge.hh" # include "BLI_ordered_edge.hh"

@ -132,7 +132,7 @@ void BMO_op_init(BMesh *bm, BMOperator *op, const int flag, const char *opname)
{ {
int opcode = BMO_opcode_from_opname(opname); int opcode = BMO_opcode_from_opname(opname);
#ifdef DEBUG #ifndef NDEBUG
BM_ELEM_INDEX_VALIDATE(bm, "pre bmo", opname); BM_ELEM_INDEX_VALIDATE(bm, "pre bmo", opname);
#else #else
(void)bm; (void)bm;
@ -185,7 +185,7 @@ void BMO_op_finish(BMesh *bm, BMOperator *op)
BLI_memarena_free(op->arena); BLI_memarena_free(op->arena);
#ifdef DEBUG #ifndef NDEBUG
BM_ELEM_INDEX_VALIDATE(bm, "post bmo", bmo_opdefines[op->type]->opname); BM_ELEM_INDEX_VALIDATE(bm, "post bmo", bmo_opdefines[op->type]->opname);
/* avoid accidental re-use */ /* avoid accidental re-use */

@ -492,7 +492,7 @@ bool BM_face_split_edgenet(BMesh *bm,
BLI_assert(BM_ELEM_API_FLAG_TEST(f, FACE_NET) == 0); BLI_assert(BM_ELEM_API_FLAG_TEST(f, FACE_NET) == 0);
BM_ELEM_API_FLAG_ENABLE(f, FACE_NET); BM_ELEM_API_FLAG_ENABLE(f, FACE_NET);
#ifdef DEBUG #ifndef NDEBUG
for (i = 0; i < edge_net_len; i++) { for (i = 0; i < edge_net_len; i++) {
BLI_assert(BM_ELEM_API_FLAG_TEST(edge_net[i], EDGE_NET) == 0); BLI_assert(BM_ELEM_API_FLAG_TEST(edge_net[i], EDGE_NET) == 0);
BLI_assert(BM_edge_in_face(edge_net[i], f) == false); BLI_assert(BM_edge_in_face(edge_net[i], f) == false);
@ -1637,7 +1637,7 @@ finally:
if (use_partial_connect) { if (use_partial_connect) {
/* Sanity check: ensure we don't have connecting edges before splicing begins. */ /* Sanity check: ensure we don't have connecting edges before splicing begins. */
# ifdef DEBUG # ifndef NDEBUG
{ {
struct TempVertPair *tvp = temp_vert_pairs.list; struct TempVertPair *tvp = temp_vert_pairs.list;
do { do {

@ -1828,7 +1828,7 @@ BMFace *BM_face_exists_overlap(BMVert **varr, const int len)
BMFace *f_overlap = nullptr; BMFace *f_overlap = nullptr;
LinkNode *f_lnk = nullptr; LinkNode *f_lnk = nullptr;
#ifdef DEBUG #ifndef NDEBUG
/* check flag isn't already set */ /* check flag isn't already set */
for (i = 0; i < len; i++) { for (i = 0; i < len; i++) {
BM_ITER_ELEM (f, &viter, varr[i], BM_FACES_OF_VERT) { BM_ITER_ELEM (f, &viter, varr[i], BM_FACES_OF_VERT) {
@ -1866,7 +1866,7 @@ bool BM_face_exists_overlap_subset(BMVert **varr, const int len)
bool is_overlap = false; bool is_overlap = false;
LinkNode *f_lnk = nullptr; LinkNode *f_lnk = nullptr;
#ifdef DEBUG #ifndef NDEBUG
/* check flag isn't already set */ /* check flag isn't already set */
for (int i = 0; i < len; i++) { for (int i = 0; i < len; i++) {
BLI_assert(BM_ELEM_API_FLAG_TEST(varr[i], _FLAG_OVERLAP) == 0); BLI_assert(BM_ELEM_API_FLAG_TEST(varr[i], _FLAG_OVERLAP) == 0);
@ -2112,7 +2112,7 @@ int BM_mesh_calc_face_groups(BMesh *bm,
{ {
/* NOTE: almost duplicate of #BM_mesh_calc_edge_groups, keep in sync. */ /* NOTE: almost duplicate of #BM_mesh_calc_edge_groups, keep in sync. */
#ifdef DEBUG #ifndef NDEBUG
int group_index_len = 1; int group_index_len = 1;
#else #else
int group_index_len = 32; int group_index_len = 32;
@ -2266,7 +2266,7 @@ int BM_mesh_calc_edge_groups(BMesh *bm,
{ {
/* NOTE: almost duplicate of #BM_mesh_calc_face_groups, keep in sync. */ /* NOTE: almost duplicate of #BM_mesh_calc_face_groups, keep in sync. */
#ifdef DEBUG #ifndef NDEBUG
int group_index_len = 1; int group_index_len = 1;
#else #else
int group_index_len = 32; int group_index_len = 32;

@ -475,7 +475,7 @@ static void bm_grid_fill(BMesh *bm,
const uint ytot = uint(BM_edgeloop_length_get(estore_rail_a)); const uint ytot = uint(BM_edgeloop_length_get(estore_rail_a));
// BMVert *v; // BMVert *v;
uint i; uint i;
#ifdef DEBUG #ifndef NDEBUG
uint x, y; uint x, y;
#endif #endif
LinkData *el; LinkData *el;
@ -523,7 +523,7 @@ static void bm_grid_fill(BMesh *bm,
for (el = static_cast<LinkData *>(lb_rail_b->first), i = 0; el; el = el->next, i++) { for (el = static_cast<LinkData *>(lb_rail_b->first), i = 0; el; el = el->next, i++) {
v_grid[(xtot * i) + (xtot - 1)] = static_cast<BMVert *>(el->data); v_grid[(xtot * i) + (xtot - 1)] = static_cast<BMVert *>(el->data);
} }
#ifdef DEBUG #ifndef NDEBUG
for (x = 1; x < xtot - 1; x++) { for (x = 1; x < xtot - 1; x++) {
for (y = 1; y < ytot - 1; y++) { for (y = 1; y < ytot - 1; y++) {
BLI_assert(v_grid[(y * xtot) + x] == nullptr); BLI_assert(v_grid[(y * xtot) + x] == nullptr);

@ -300,7 +300,7 @@ static bool apply_mesh_output_to_bmesh(BMesh *bm, IMesh &m_out, bool keep_hidden
BMIter iter; BMIter iter;
BMFace *bmf = static_cast<BMFace *>(BM_iter_new(&iter, bm, BM_FACES_OF_MESH, nullptr)); BMFace *bmf = static_cast<BMFace *>(BM_iter_new(&iter, bm, BM_FACES_OF_MESH, nullptr));
while (bmf != nullptr) { while (bmf != nullptr) {
# ifdef DEBUG # ifndef NDEBUG
iter.count = BM_iter_mesh_count(BM_FACES_OF_MESH, bm); iter.count = BM_iter_mesh_count(BM_FACES_OF_MESH, bm);
# endif # endif
BMFace *bmf_next = static_cast<BMFace *>(BM_iter_step(&iter)); BMFace *bmf_next = static_cast<BMFace *>(BM_iter_step(&iter));
@ -318,7 +318,7 @@ static bool apply_mesh_output_to_bmesh(BMesh *bm, IMesh &m_out, bool keep_hidden
} }
BMVert *bmv = static_cast<BMVert *>(BM_iter_new(&iter, bm, BM_VERTS_OF_MESH, nullptr)); BMVert *bmv = static_cast<BMVert *>(BM_iter_new(&iter, bm, BM_VERTS_OF_MESH, nullptr));
while (bmv != nullptr) { while (bmv != nullptr) {
# ifdef DEBUG # ifndef NDEBUG
iter.count = BM_iter_mesh_count(BM_VERTS_OF_MESH, bm); iter.count = BM_iter_mesh_count(BM_VERTS_OF_MESH, bm);
# endif # endif
BMVert *bmv_next = static_cast<BMVert *>(BM_iter_step(&iter)); BMVert *bmv_next = static_cast<BMVert *>(BM_iter_step(&iter));

@ -1086,7 +1086,7 @@ bool BM_mesh_intersect(BMesh *bm,
const bool isect_tri_tri_no_shared = (boolean_mode != BMESH_ISECT_BOOLEAN_NONE); const bool isect_tri_tri_no_shared = (boolean_mode != BMESH_ISECT_BOOLEAN_NONE);
int flag = BVH_OVERLAP_USE_THREADING | BVH_OVERLAP_RETURN_PAIRS; int flag = BVH_OVERLAP_USE_THREADING | BVH_OVERLAP_RETURN_PAIRS;
# ifdef DEBUG # ifndef NDEBUG
/* The overlap result must match that obtained in Release to succeed /* The overlap result must match that obtained in Release to succeed
* in the `bmesh_boolean` test. */ * in the `bmesh_boolean` test. */
if (looptris_tot < 1024) { if (looptris_tot < 1024) {

@ -237,7 +237,7 @@ float Light::point_radiance_get(const ::Light *la)
void Light::debug_draw() void Light::debug_draw()
{ {
#ifdef DEBUG #ifndef NDEBUG
drw_debug_sphere(_position, influence_radius_max, float4(0.8f, 0.3f, 0.0f, 1.0f)); drw_debug_sphere(_position, influence_radius_max, float4(0.8f, 0.3f, 0.0f, 1.0f));
#endif #endif
} }

@ -52,7 +52,7 @@ ShaderModule::ShaderModule()
shader = nullptr; shader = nullptr;
} }
#ifdef DEBUG #ifndef NDEBUG
/* Ensure all shader are described. */ /* Ensure all shader are described. */
for (auto i : IndexRange(MAX_SHADER_TYPE)) { for (auto i : IndexRange(MAX_SHADER_TYPE)) {
const char *name = static_shader_create_info_name_get(eShaderType(i)); const char *name = static_shader_create_info_name_get(eShaderType(i));

@ -345,7 +345,7 @@ GPU_SHADER_CREATE_INFO(eevee_surf_occupancy)
* Variations that are only there to test shaders at compile time. * Variations that are only there to test shaders at compile time.
* \{ */ * \{ */
#ifdef DEBUG #ifndef NDEBUG
/* Stub functions defined by the material evaluation. */ /* Stub functions defined by the material evaluation. */
GPU_SHADER_CREATE_INFO(eevee_material_stub) GPU_SHADER_CREATE_INFO(eevee_material_stub)

@ -38,7 +38,7 @@ ShaderModule::ShaderModule()
shader = nullptr; shader = nullptr;
} }
#ifdef DEBUG #ifndef NDEBUG
/* Ensure all shader are described. */ /* Ensure all shader are described. */
for (auto i : IndexRange(MAX_SHADER_TYPE)) { for (auto i : IndexRange(MAX_SHADER_TYPE)) {
const char *name = static_shader_create_info_name_get(eShaderType(i)); const char *name = static_shader_create_info_name_get(eShaderType(i));

@ -88,7 +88,7 @@ struct SelectMap {
/** Mapping between internal IDs and `object->runtime->select_id`. */ /** Mapping between internal IDs and `object->runtime->select_id`. */
Vector<uint> select_id_map; Vector<uint> select_id_map;
#ifdef DEBUG #ifndef NDEBUG
/** Debug map containing a copy of the object name. */ /** Debug map containing a copy of the object name. */
Vector<std::string> map_names; Vector<std::string> map_names;
#endif #endif
@ -113,7 +113,7 @@ struct SelectMap {
uint object_id = ob_ref.object->runtime->select_id; uint object_id = ob_ref.object->runtime->select_id;
uint id = select_id_map.append_and_get_index(object_id | sub_object_id); uint id = select_id_map.append_and_get_index(object_id | sub_object_id);
#ifdef DEBUG #ifndef NDEBUG
map_names.append(ob_ref.object->id.name); map_names.append(ob_ref.object->id.name);
#endif #endif
return {id}; return {id};
@ -153,7 +153,7 @@ struct SelectMap {
info_buf.push_update(); info_buf.push_update();
select_id_map.clear(); select_id_map.clear();
#ifdef DEBUG #ifndef NDEBUG
map_names.clear(); map_names.clear();
#endif #endif
} }

@ -641,7 +641,7 @@ static bool workbench_render_framebuffers_init()
GPU_framebuffer_check_valid(dfbl->depth_only_fb, nullptr); GPU_framebuffer_check_valid(dfbl->depth_only_fb, nullptr);
} }
#ifdef DEBUG #ifdef _DEBUG
/* This is just to ease GPU debugging when the frame delimiter is set to Finish */ /* This is just to ease GPU debugging when the frame delimiter is set to Finish */
# define GPU_FINISH_DELIMITER() GPU_finish() # define GPU_FINISH_DELIMITER() GPU_finish()
#else #else

@ -172,7 +172,7 @@ class UniformCommon : public DataBuffer<T, len, false>, NonMovable, NonCopyable
protected: protected:
GPUUniformBuf *ubo_; GPUUniformBuf *ubo_;
#ifdef DEBUG #ifndef NDEBUG
const char *name_ = typeid(T).name(); const char *name_ = typeid(T).name();
#else #else
const char *name_ = "UniformBuffer"; const char *name_ = "UniformBuffer";
@ -215,7 +215,7 @@ class StorageCommon : public DataBuffer<T, len, false>, NonMovable, NonCopyable
protected: protected:
GPUStorageBuf *ssbo_; GPUStorageBuf *ssbo_;
#ifdef DEBUG #ifndef NDEBUG
const char *name_ = typeid(T).name(); const char *name_ = typeid(T).name();
#else #else
const char *name_ = "StorageBuffer"; const char *name_ = "StorageBuffer";

@ -916,7 +916,7 @@ void DRW_curve_batch_cache_create_requested(Object *ob, const Scene *scene)
curve_render_data_free(rdata); curve_render_data_free(rdata);
#ifdef DEBUG #ifndef NDEBUG
/* Make sure all requested batches have been setup. */ /* Make sure all requested batches have been setup. */
for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); i++) { for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); i++) {
BLI_assert(!DRW_batch_requested(((GPUBatch **)&cache->batch)[i], (GPUPrimType)0)); BLI_assert(!DRW_batch_requested(((GPUBatch **)&cache->batch)[i], (GPUPrimType)0));

@ -1299,7 +1299,7 @@ static void drw_add_attributes_vbo(GPUBatch *batch,
} }
} }
#ifdef DEBUG #ifndef NDEBUG
/* Sanity check function to test if all requested batches are available. */ /* Sanity check function to test if all requested batches are available. */
static void drw_mesh_batch_cache_check_available(TaskGraph *task_graph, Mesh *me) static void drw_mesh_batch_cache_check_available(TaskGraph *task_graph, Mesh *me)
{ {
@ -1350,13 +1350,13 @@ void DRW_mesh_batch_cache_create_requested(TaskGraph *task_graph,
/* Early out */ /* Early out */
if (cache.batch_requested == 0) { if (cache.batch_requested == 0) {
#ifdef DEBUG #ifndef NDEBUG
drw_mesh_batch_cache_check_available(task_graph, me); drw_mesh_batch_cache_check_available(task_graph, me);
#endif #endif
return; return;
} }
#ifdef DEBUG #ifndef NDEBUG
/* Map the index of a buffer to a flag containing all batches that use it. */ /* Map the index of a buffer to a flag containing all batches that use it. */
Map<int, DRWBatchFlag> batches_that_use_buffer_local; Map<int, DRWBatchFlag> batches_that_use_buffer_local;
@ -1492,7 +1492,7 @@ void DRW_mesh_batch_cache_create_requested(TaskGraph *task_graph,
/* Second chance to early out */ /* Second chance to early out */
if ((batch_requested & ~cache.batch_ready) == 0) { if ((batch_requested & ~cache.batch_ready) == 0) {
#ifdef DEBUG #ifndef NDEBUG
drw_mesh_batch_cache_check_available(task_graph, me); drw_mesh_batch_cache_check_available(task_graph, me);
#endif #endif
return; return;
@ -1815,7 +1815,7 @@ void DRW_mesh_batch_cache_create_requested(TaskGraph *task_graph,
DRW_vbo_request(cache.batch.surface_viewer_attribute, &mbuflist->vbo.attr_viewer); DRW_vbo_request(cache.batch.surface_viewer_attribute, &mbuflist->vbo.attr_viewer);
} }
#ifdef DEBUG #ifndef NDEBUG
auto assert_final_deps_valid = [&](const int buffer_index) { auto assert_final_deps_valid = [&](const int buffer_index) {
BLI_assert(batches_that_use_buffer(buffer_index) == BLI_assert(batches_that_use_buffer(buffer_index) ==
batches_that_use_buffer_local.lookup(buffer_index)); batches_that_use_buffer_local.lookup(buffer_index));
@ -1940,7 +1940,7 @@ void DRW_mesh_batch_cache_create_requested(TaskGraph *task_graph,
* based on the mode the correct one will be updated. Other option is to look into using * based on the mode the correct one will be updated. Other option is to look into using
* drw_batch_cache_generate_requested_delayed. */ * drw_batch_cache_generate_requested_delayed. */
BLI_task_graph_work_and_wait(task_graph); BLI_task_graph_work_and_wait(task_graph);
#ifdef DEBUG #ifndef NDEBUG
drw_mesh_batch_cache_check_available(task_graph, me); drw_mesh_batch_cache_check_available(task_graph, me);
#endif #endif
} }

@ -24,7 +24,7 @@
#include <iomanip> #include <iomanip>
#include <sstream> #include <sstream>
#if defined(DEBUG) || defined(WITH_DRAW_DEBUG) #if defined(_DEBUG) || defined(WITH_DRAW_DEBUG)
# define DRAW_DEBUG # define DRAW_DEBUG
#else #else
/* Uncomment to forcibly enable debug draw in release mode. */ /* Uncomment to forcibly enable debug draw in release mode. */

@ -47,7 +47,7 @@ void Manager::begin_sync()
acquired_textures.clear(); acquired_textures.clear();
layer_attributes.clear(); layer_attributes.clear();
#ifdef DEBUG #ifndef NDEBUG
/* Detect uninitialized data. */ /* Detect uninitialized data. */
memset(matrix_buf.current().data(), memset(matrix_buf.current().data(),
0xF0, 0xF0,
@ -132,7 +132,7 @@ void Manager::end_sync()
void Manager::debug_bind() void Manager::debug_bind()
{ {
#ifdef DEBUG #ifdef _DEBUG
if (DST.debug == nullptr) { if (DST.debug == nullptr) {
return; return;
} }

@ -123,7 +123,7 @@ static void drw_state_prepare_clean_for_draw(DRWManager *dst)
* where we don't re-use data by accident across different * where we don't re-use data by accident across different
* draw calls. * draw calls.
*/ */
#ifdef DEBUG #ifndef NDEBUG
static void drw_state_ensure_not_reused(DRWManager *dst) static void drw_state_ensure_not_reused(DRWManager *dst)
{ {
memset(dst, 0xff, offsetof(DRWManager, system_gpu_context)); memset(dst, 0xff, offsetof(DRWManager, system_gpu_context));
@ -645,7 +645,7 @@ static void drw_manager_exit(DRWManager *dst)
} }
dst->vmempool = nullptr; dst->vmempool = nullptr;
dst->viewport = nullptr; dst->viewport = nullptr;
#ifdef DEBUG #ifndef NDEBUG
/* Avoid accidental reuse. */ /* Avoid accidental reuse. */
drw_state_ensure_not_reused(dst); drw_state_ensure_not_reused(dst);
#endif #endif

@ -272,7 +272,7 @@ void View::compute_visibility(ObjectBoundsBuf &bounds, uint resource_len, bool d
culling_freeze_[0] = static_cast<ViewCullingData>(culling_[0]); culling_freeze_[0] = static_cast<ViewCullingData>(culling_[0]);
culling_freeze_.push_update(); culling_freeze_.push_update();
} }
#ifdef DEBUG #ifdef _DEBUG
if (debug_freeze) { if (debug_freeze) {
float4x4 persmat = data_freeze_[0].winmat * data_freeze_[0].viewmat; float4x4 persmat = data_freeze_[0].winmat * data_freeze_[0].viewmat;
drw_debug_matrix_as_bbox(math::invert(persmat), float4(0, 1, 0, 1)); drw_debug_matrix_as_bbox(math::invert(persmat), float4(0, 1, 0, 1));

@ -990,7 +990,7 @@ bool ED_operator_rv3d_user_region_poll(bContext *C);
*/ */
void ED_view3d_init_mats_rv3d(const Object *ob, RegionView3D *rv3d); void ED_view3d_init_mats_rv3d(const Object *ob, RegionView3D *rv3d);
void ED_view3d_init_mats_rv3d_gl(const Object *ob, RegionView3D *rv3d); void ED_view3d_init_mats_rv3d_gl(const Object *ob, RegionView3D *rv3d);
#ifdef DEBUG #ifndef NDEBUG
/** /**
* Ensure we correctly initialize. * Ensure we correctly initialize.
*/ */

@ -6382,7 +6382,7 @@ void UI_but_func_search_set(uiBut *but,
search_but->arg_free_fn = search_arg_free_fn; search_but->arg_free_fn = search_arg_free_fn;
if (search_exec_fn) { if (search_exec_fn) {
#ifdef DEBUG #ifndef NDEBUG
if (but->func) { if (but->func) {
/* watch this, can be cause of much confusion, see: #47691 */ /* watch this, can be cause of much confusion, see: #47691 */
printf("%s: warning, overwriting button callback with search function callback!\n", printf("%s: warning, overwriting button callback with search function callback!\n",

@ -9222,7 +9222,7 @@ static bool ui_handle_button_activate_by_type(bContext *C, ARegion *region, uiBu
ui_handle_button_activate(C, region, but, BUTTON_ACTIVATE); ui_handle_button_activate(C, region, but, BUTTON_ACTIVATE);
} }
else { else {
#ifdef DEBUG #ifndef NDEBUG
printf("%s: error, unhandled type: %d\n", __func__, but->type); printf("%s: error, unhandled type: %d\n", __func__, but->type);
#endif #endif
return false; return false;

@ -595,7 +595,7 @@ uiBlock *ui_popup_block_refresh(bContext *C,
BLI_assert(!handle->refresh || handle->can_refresh); BLI_assert(!handle->refresh || handle->can_refresh);
#ifdef DEBUG #ifndef NDEBUG
wmEvent *event_back = window->eventstate; wmEvent *event_back = window->eventstate;
wmEvent *event_last_back = window->event_last_handled; wmEvent *event_last_back = window->event_last_handled;
#endif #endif
@ -618,7 +618,7 @@ uiBlock *ui_popup_block_refresh(bContext *C,
BLI_assert(!block->endblock); BLI_assert(!block->endblock);
/* ensure we don't use mouse coords here! */ /* ensure we don't use mouse coords here! */
#ifdef DEBUG #ifndef NDEBUG
window->eventstate = nullptr; window->eventstate = nullptr;
#endif #endif
@ -783,7 +783,7 @@ uiBlock *ui_popup_block_refresh(bContext *C,
ED_region_update_rect(region); ED_region_update_rect(region);
#ifdef DEBUG #ifndef NDEBUG
window->eventstate = event_back; window->eventstate = event_back;
window->event_last_handled = event_last_back; window->event_last_handled = event_last_back;
#endif #endif

@ -1689,7 +1689,7 @@ void EDBM_update(Mesh *mesh, const EDBMUpdate_Params *params)
em->bm->spacearr_dirty &= ~BM_SPACEARR_BMO_SET; em->bm->spacearr_dirty &= ~BM_SPACEARR_BMO_SET;
} }
#ifdef DEBUG #ifndef NDEBUG
{ {
LISTBASE_FOREACH (BMEditSelection *, ese, &em->bm->selected) { LISTBASE_FOREACH (BMEditSelection *, ese, &em->bm->selected) {
BLI_assert(BM_elem_flag_test(ese->ele, BM_ELEM_SELECT)); BLI_assert(BM_elem_flag_test(ese->ele, BM_ELEM_SELECT));

@ -154,7 +154,7 @@ void ED_view3d_init_mats_rv3d_gl(const Object *ob, RegionView3D *rv3d)
GPU_matrix_mul(ob->object_to_world); GPU_matrix_mul(ob->object_to_world);
} }
#ifdef DEBUG #ifndef NDEBUG
void ED_view3d_clear_mats_rv3d(RegionView3D *rv3d) void ED_view3d_clear_mats_rv3d(RegionView3D *rv3d)
{ {
zero_m4(rv3d->viewmatob); zero_m4(rv3d->viewmatob);

@ -1029,7 +1029,7 @@ static void posttrans_mask_clean(Mask *mask)
} }
} }
#ifdef DEBUG #ifndef NDEBUG
for (masklay_shape = static_cast<MaskLayerShape *>(masklay->splines_shapes.first); for (masklay_shape = static_cast<MaskLayerShape *>(masklay->splines_shapes.first);
masklay_shape; masklay_shape;
masklay_shape = masklay_shape->next) masklay_shape = masklay_shape->next)
@ -1064,7 +1064,7 @@ static void posttrans_gpd_clean(bGPdata *gpd)
} }
} }
#ifdef DEBUG #ifndef NDEBUG
for (gpf = static_cast<bGPDframe *>(gpl->frames.first); gpf; gpf = gpf->next) { for (gpf = static_cast<bGPDframe *>(gpl->frames.first); gpf; gpf = gpf->next) {
BLI_assert(!gpf->next || gpf->framenum < gpf->next->framenum); BLI_assert(!gpf->next || gpf->framenum < gpf->next->framenum);
} }

@ -310,7 +310,7 @@ void SnapData::register_result(SnapObjectContext *sctx,
sctx->ret.loc = math::transform_point(obmat, sctx->ret.loc); sctx->ret.loc = math::transform_point(obmat, sctx->ret.loc);
sctx->ret.no = math::normalize(math::transform_direction(obmat, sctx->ret.no)); sctx->ret.no = math::normalize(math::transform_direction(obmat, sctx->ret.no));
#ifdef DEBUG #ifndef NDEBUG
/* Make sure this is only called once. */ /* Make sure this is only called once. */
r_nearest->index = -2; r_nearest->index = -2;
#endif #endif
@ -1206,7 +1206,7 @@ bool ED_transform_snap_object_project_ray_all(SnapObjectContext *sctx,
return false; return false;
} }
#ifdef DEBUG #ifndef NDEBUG
float ray_depth_prev = sctx->ret.ray_depth_max; float ray_depth_prev = sctx->ret.ray_depth_max;
#endif #endif
if (raycastObjects(sctx)) { if (raycastObjects(sctx)) {
@ -1214,7 +1214,7 @@ bool ED_transform_snap_object_project_ray_all(SnapObjectContext *sctx,
BLI_listbase_sort(r_hit_list, hit_depth_cmp); BLI_listbase_sort(r_hit_list, hit_depth_cmp);
} }
/* meant to be readonly for 'all' hits, ensure it is */ /* meant to be readonly for 'all' hits, ensure it is */
#ifdef DEBUG #ifndef NDEBUG
BLI_assert(ray_depth_prev == sctx->ret.ray_depth_max); BLI_assert(ray_depth_prev == sctx->ret.ray_depth_max);
#endif #endif
return true; return true;

@ -49,7 +49,7 @@
#include <atomic> #include <atomic>
#include <thread> #include <thread>
#ifdef DEBUG #ifndef NDEBUG
# define FN_LAZY_FUNCTION_DEBUG_THREADS # define FN_LAZY_FUNCTION_DEBUG_THREADS
#endif #endif

@ -182,7 +182,7 @@ class ParamsBuilder {
void assert_current_param_type(ParamType param_type, StringRef expected_name = "") void assert_current_param_type(ParamType param_type, StringRef expected_name = "")
{ {
UNUSED_VARS_NDEBUG(param_type, expected_name); UNUSED_VARS_NDEBUG(param_type, expected_name);
#ifdef DEBUG #ifndef NDEBUG
int param_index = this->current_param_index(); int param_index = this->current_param_index();
if (expected_name != "") { if (expected_name != "") {
@ -198,7 +198,7 @@ class ParamsBuilder {
void assert_current_param_name(StringRef expected_name) void assert_current_param_name(StringRef expected_name)
{ {
UNUSED_VARS_NDEBUG(expected_name); UNUSED_VARS_NDEBUG(expected_name);
#ifdef DEBUG #ifndef NDEBUG
if (expected_name.is_empty()) { if (expected_name.is_empty()) {
return; return;
} }
@ -334,7 +334,7 @@ class Params {
void assert_correct_param(int param_index, StringRef name, ParamType param_type) void assert_correct_param(int param_index, StringRef name, ParamType param_type)
{ {
UNUSED_VARS_NDEBUG(param_index, name, param_type); UNUSED_VARS_NDEBUG(param_index, name, param_type);
#ifdef DEBUG #ifndef NDEBUG
BLI_assert(builder_->signature_->params[param_index].type == param_type); BLI_assert(builder_->signature_->params[param_index].type == param_type);
if (name.size() > 0) { if (name.size() > 0) {
BLI_assert(builder_->signature_->params[param_index].name == name); BLI_assert(builder_->signature_->params[param_index].name == name);
@ -345,7 +345,7 @@ class Params {
void assert_correct_param(int param_index, StringRef name, ParamCategory category) void assert_correct_param(int param_index, StringRef name, ParamCategory category)
{ {
UNUSED_VARS_NDEBUG(param_index, name, category); UNUSED_VARS_NDEBUG(param_index, name, category);
#ifdef DEBUG #ifndef NDEBUG
BLI_assert(builder_->signature_->params[param_index].type.category() == category); BLI_assert(builder_->signature_->params[param_index].type.category() == category);
if (name.size() > 0) { if (name.size() > 0) {
BLI_assert(builder_->signature_->params[param_index].name == name); BLI_assert(builder_->signature_->params[param_index].name == name);

@ -927,7 +927,7 @@ class Executor {
this->with_locked_node( this->with_locked_node(
node, node_state, current_task, local_data, [&](LockedNode &locked_node) { node, node_state, current_task, local_data, [&](LockedNode &locked_node) {
#ifdef DEBUG #ifndef NDEBUG
if (node_needs_execution) { if (node_needs_execution) {
this->assert_expected_outputs_have_been_computed(locked_node, local_data); this->assert_expected_outputs_have_been_computed(locked_node, local_data);
} }
@ -1128,7 +1128,7 @@ class Executor {
const int input_index = target_socket->index(); const int input_index = target_socket->index();
InputState &input_state = node_state.inputs[input_index]; InputState &input_state = node_state.inputs[input_index];
const bool is_last_target = target_socket == targets.last(); const bool is_last_target = target_socket == targets.last();
#ifdef DEBUG #ifndef NDEBUG
if (input_state.value != nullptr) { if (input_state.value != nullptr) {
if (self_.logger_ != nullptr) { if (self_.logger_ != nullptr) {
self_.logger_->dump_when_input_is_set_twice(*target_socket, from_socket, local_context); self_.logger_->dump_when_input_is_set_twice(*target_socket, from_socket, local_context);

@ -300,7 +300,7 @@ bke::CurvesGeometry subdivide_curves(
* Storing the leading zero is unnecessary but makes the array a bit simpler to use by avoiding * Storing the leading zero is unnecessary but makes the array a bit simpler to use by avoiding
* a check for the first segment, and because some existing utilities also use leading zeros. */ * a check for the first segment, and because some existing utilities also use leading zeros. */
Array<int> all_point_offset_data(src_curves.points_num() + src_curves.curves_num()); Array<int> all_point_offset_data(src_curves.points_num() + src_curves.curves_num());
#ifdef DEBUG #ifndef NDEBUG
all_point_offset_data.fill(-1); all_point_offset_data.fill(-1);
#endif #endif
calculate_result_offsets(src_curves, calculate_result_offsets(src_curves,

@ -343,7 +343,7 @@ static void sample_interval_linear(const Span<T> src_data,
else { else {
dst_data[dst_index] = bke::attribute_math::mix2( dst_data[dst_index] = bke::attribute_math::mix2(
end_point.parameter, src_data[end_point.index], src_data[end_point.next_index]); end_point.parameter, src_data[end_point.index], src_data[end_point.next_index]);
#ifdef DEBUG #ifndef NDEBUG
++dst_index; ++dst_index;
#endif #endif
} }
@ -381,7 +381,7 @@ static void sample_interval_catmull_rom(const Span<T> src_data,
} }
else { else {
dst_data[dst_index] = interpolate_catmull_rom(src_data, end_point, src_cyclic); dst_data[dst_index] = interpolate_catmull_rom(src_data, end_point, src_cyclic);
#ifdef DEBUG #ifndef NDEBUG
++dst_index; ++dst_index;
#endif #endif
} }
@ -566,9 +566,9 @@ static void sample_interval_bezier(const Span<float3> src_positions,
dst_positions[dst_index] = end_point_insert.position; dst_positions[dst_index] = end_point_insert.position;
dst_types_l[dst_index] = src_types_l[end_point.next_index]; dst_types_l[dst_index] = src_types_l[end_point.next_index];
dst_types_r[dst_index] = src_types_r[end_point.next_index]; dst_types_r[dst_index] = src_types_r[end_point.next_index];
#ifdef DEBUG #ifndef NDEBUG
++dst_index; ++dst_index;
#endif // DEBUG #endif
} }
BLI_assert(dst_index == dst_range.one_after_last()); BLI_assert(dst_index == dst_range.one_after_last());
} }

@ -129,7 +129,7 @@ typedef struct GPUVertBufRaw {
uint stride; uint stride;
unsigned char *data; unsigned char *data;
unsigned char *data_init; unsigned char *data_init;
#ifdef DEBUG #ifndef NDEBUG
/* Only for overflow check */ /* Only for overflow check */
unsigned char *_data_end; unsigned char *_data_end;
#endif #endif
@ -139,7 +139,7 @@ GPU_INLINE void *GPU_vertbuf_raw_step(GPUVertBufRaw *a)
{ {
unsigned char *data = a->data; unsigned char *data = a->data;
a->data += a->stride; a->data += a->stride;
#ifdef DEBUG #ifndef NDEBUG
BLI_assert(data < a->_data_end); BLI_assert(data < a->_data_end);
#endif #endif
return (void *)data; return (void *)data;

@ -64,7 +64,7 @@ inline GPUAttachmentType &operator--(GPUAttachmentType &a)
namespace blender { namespace blender {
namespace gpu { namespace gpu {
#ifdef DEBUG #ifndef NDEBUG
# define DEBUG_NAME_LEN 64 # define DEBUG_NAME_LEN 64
#else #else
# define DEBUG_NAME_LEN 16 # define DEBUG_NAME_LEN 16

@ -512,7 +512,7 @@ void gpu_shader_create_info_init()
info->builtins_ |= gpu_shader_dependency_get_builtins(info->geometry_source_); info->builtins_ |= gpu_shader_dependency_get_builtins(info->geometry_source_);
info->builtins_ |= gpu_shader_dependency_get_builtins(info->compute_source_); info->builtins_ |= gpu_shader_dependency_get_builtins(info->compute_source_);
#ifdef DEBUG #ifndef NDEBUG
/* Automatically amend the create info for ease of use of the debug feature. */ /* Automatically amend the create info for ease of use of the debug feature. */
if ((info->builtins_ & BuiltinBits::USE_DEBUG_DRAW) == BuiltinBits::USE_DEBUG_DRAW) { if ((info->builtins_ & BuiltinBits::USE_DEBUG_DRAW) == BuiltinBits::USE_DEBUG_DRAW) {
info->additional_info("draw_debug_draw"); info->additional_info("draw_debug_draw");

@ -109,7 +109,7 @@ struct GPUSource {
if (source.find("'") != StringRef::not_found) { if (source.find("'") != StringRef::not_found) {
char_literals_preprocess(); char_literals_preprocess();
} }
#ifdef DEBUG #ifndef NDEBUG
if (source.find("drw_print") != StringRef::not_found) { if (source.find("drw_print") != StringRef::not_found) {
string_preprocess(); string_preprocess();
} }
@ -205,7 +205,7 @@ struct GPUSource {
*/ */
void check_no_quotes() void check_no_quotes()
{ {
#ifdef DEBUG #ifndef NDEBUG
int64_t pos = -1; int64_t pos = -1;
do { do {
pos = source.find('"', pos + 1); pos = source.find('"', pos + 1);

@ -18,7 +18,7 @@ namespace gpu {
class VertBuf; class VertBuf;
#ifdef DEBUG #ifndef NDEBUG
# define DEBUG_NAME_LEN 64 # define DEBUG_NAME_LEN 64
#else #else
# define DEBUG_NAME_LEN 8 # define DEBUG_NAME_LEN 8

@ -68,7 +68,7 @@ enum eGPUSamplerFormat {
ENUM_OPERATORS(eGPUSamplerFormat, GPU_SAMPLER_TYPE_UINT) ENUM_OPERATORS(eGPUSamplerFormat, GPU_SAMPLER_TYPE_UINT)
#ifdef DEBUG #ifndef NDEBUG
# define DEBUG_NAME_LEN 64 # define DEBUG_NAME_LEN 64
#else #else
# define DEBUG_NAME_LEN 8 # define DEBUG_NAME_LEN 8

@ -15,7 +15,7 @@ struct GPUUniformBuf;
namespace blender { namespace blender {
namespace gpu { namespace gpu {
#ifdef DEBUG #ifndef NDEBUG
# define DEBUG_NAME_LEN 64 # define DEBUG_NAME_LEN 64
#else #else
# define DEBUG_NAME_LEN 8 # define DEBUG_NAME_LEN 8

@ -273,7 +273,7 @@ void GPU_vertbuf_attr_get_raw_data(GPUVertBuf *verts_, uint a_idx, GPUVertBufRaw
access->stride = format->stride; access->stride = format->stride;
access->data = (uchar *)verts->data + a->offset; access->data = (uchar *)verts->data + a->offset;
access->data_init = access->data; access->data_init = access->data;
#ifdef DEBUG #ifndef NDEBUG
access->_data_end = access->data_init + size_t(verts->vertex_alloc * format->stride); access->_data_end = access->data_init + size_t(verts->vertex_alloc * format->stride);
#endif #endif
} }

@ -206,6 +206,7 @@ void gpu::MTLTexture::bake_mip_swizzle_view()
BLI_assert(range_len > 0); BLI_assert(range_len > 0);
BLI_assert(mip_texture_base_level_ < texture_.mipmapLevelCount); BLI_assert(mip_texture_base_level_ < texture_.mipmapLevelCount);
BLI_assert(mip_texture_base_layer_ < max_slices); BLI_assert(mip_texture_base_layer_ < max_slices);
UNUSED_VARS_NDEBUG(max_slices);
mip_swizzle_view_ = [texture_ mip_swizzle_view_ = [texture_
newTextureViewWithPixelFormat:texture_view_pixel_format newTextureViewWithPixelFormat:texture_view_pixel_format
textureType:texture_view_texture_type textureType:texture_view_texture_type

@ -61,7 +61,7 @@
#define ARG_LIST_CALL(...) VA_NARGS_CALL_OVERLOAD(_VA_ARG_LIST_CALL, __VA_ARGS__) #define ARG_LIST_CALL(...) VA_NARGS_CALL_OVERLOAD(_VA_ARG_LIST_CALL, __VA_ARGS__)
/* clang-format on */ /* clang-format on */
#ifdef DEBUG #ifndef NDEBUG
# define GL_CHECK_RESOURCES(info) debug::check_gl_resources(info) # define GL_CHECK_RESOURCES(info) debug::check_gl_resources(info)
#else #else
# define GL_CHECK_RESOURCES(info) # define GL_CHECK_RESOURCES(info)

@ -93,7 +93,7 @@ void GLStorageBuf::bind(int slot)
slot_ = slot; slot_ = slot;
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, slot_, ssbo_id_); glBindBufferBase(GL_SHADER_STORAGE_BUFFER, slot_, ssbo_id_);
#ifdef DEBUG #ifndef NDEBUG
BLI_assert(slot < 16); BLI_assert(slot < 16);
/* TODO */ /* TODO */
// GLContext::get()->bound_ssbo_slots |= 1 << slot; // GLContext::get()->bound_ssbo_slots |= 1 << slot;
@ -109,7 +109,7 @@ void GLStorageBuf::bind_as(GLenum target)
void GLStorageBuf::unbind() void GLStorageBuf::unbind()
{ {
#ifdef DEBUG #ifndef NDEBUG
/* NOTE: This only unbinds the last bound slot. */ /* NOTE: This only unbinds the last bound slot. */
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, slot_, 0); glBindBufferBase(GL_SHADER_STORAGE_BUFFER, slot_, 0);
/* Hope that the context did not change. */ /* Hope that the context did not change. */

@ -117,7 +117,7 @@ void GLUniformBuf::bind(int slot)
slot_ = slot; slot_ = slot;
glBindBufferBase(GL_UNIFORM_BUFFER, slot_, ubo_id_); glBindBufferBase(GL_UNIFORM_BUFFER, slot_, ubo_id_);
#ifdef DEBUG #ifndef NDEBUG
BLI_assert(slot < 16); BLI_assert(slot < 16);
GLContext::get()->bound_ubo_slots |= 1 << slot; GLContext::get()->bound_ubo_slots |= 1 << slot;
#endif #endif
@ -138,7 +138,7 @@ void GLUniformBuf::bind_as_ssbo(int slot)
void GLUniformBuf::unbind() void GLUniformBuf::unbind()
{ {
#ifdef DEBUG #ifndef NDEBUG
/* NOTE: This only unbinds the last bound slot. */ /* NOTE: This only unbinds the last bound slot. */
glBindBufferBase(GL_UNIFORM_BUFFER, slot_, 0); glBindBufferBase(GL_UNIFORM_BUFFER, slot_, 0);
/* Hope that the context did not change. */ /* Hope that the context did not change. */

@ -92,7 +92,7 @@ static void warning_callback(const char *msg, void *client_data)
fprintf(stream, "[WARNING] %s", msg); fprintf(stream, "[WARNING] %s", msg);
} }
#ifdef DEBUG #ifndef NDEBUG
/** /**
* sample debug callback expecting no client object * sample debug callback expecting no client object
*/ */
@ -378,7 +378,7 @@ static ImBuf *imb_load_jp2_stream(opj_stream_t *stream,
/* configure the event callbacks (not required) */ /* configure the event callbacks (not required) */
opj_set_error_handler(codec, error_callback, stderr); opj_set_error_handler(codec, error_callback, stderr);
opj_set_warning_handler(codec, warning_callback, stderr); opj_set_warning_handler(codec, warning_callback, stderr);
#ifdef DEBUG /* too noisy */ #ifndef NDEBUG /* too noisy */
opj_set_info_handler(codec, info_callback, stderr); opj_set_info_handler(codec, info_callback, stderr);
#endif #endif
@ -1235,7 +1235,7 @@ bool imb_save_jp2_stream(ImBuf *ibuf, opj_stream_t *stream, int /*flags*/)
/* configure the event callbacks (not required) */ /* configure the event callbacks (not required) */
opj_set_error_handler(codec, error_callback, stderr); opj_set_error_handler(codec, error_callback, stderr);
opj_set_warning_handler(codec, warning_callback, stderr); opj_set_warning_handler(codec, warning_callback, stderr);
#ifdef DEBUG /* too noisy */ #ifndef NDEBUG /* too noisy */
opj_set_info_handler(codec, info_callback, stderr); opj_set_info_handler(codec, info_callback, stderr);
#endif #endif

@ -94,7 +94,7 @@ void importer_main(Main *bmain,
if (import_params.use_mesh_validate) { if (import_params.use_mesh_validate) {
bool verbose_validate = false; bool verbose_validate = false;
#ifdef DEBUG #ifndef NDEBUG
verbose_validate = true; verbose_validate = true;
#endif #endif
BKE_mesh_validate(mesh, verbose_validate, false); BKE_mesh_validate(mesh, verbose_validate, false);

@ -67,7 +67,7 @@ Object *MeshFromGeometry::create_mesh(Main *bmain,
if (import_params.validate_meshes || mesh_geometry_.has_invalid_faces_) { if (import_params.validate_meshes || mesh_geometry_.has_invalid_faces_) {
bool verbose_validate = false; bool verbose_validate = false;
#ifdef DEBUG #ifndef NDEBUG
verbose_validate = true; verbose_validate = true;
#endif #endif
BKE_mesh_validate(mesh, verbose_validate, false); BKE_mesh_validate(mesh, verbose_validate, false);

@ -36,7 +36,7 @@
static CLG_LogRef LOG = {"rna.define"}; static CLG_LogRef LOG = {"rna.define"};
#ifdef DEBUG #ifndef NDEBUG
# define ASSERT_SOFT_HARD_LIMITS \ # define ASSERT_SOFT_HARD_LIMITS \
if (softmin < hardmin || softmax > hardmax) { \ if (softmin < hardmin || softmax > hardmax) { \
CLOG_ERROR(&LOG, "error with soft/hard limits: %s.%s", CONTAINER_RNA_ID(cont), identifier); \ CLOG_ERROR(&LOG, "error with soft/hard limits: %s.%s", CONTAINER_RNA_ID(cont), identifier); \
@ -1772,7 +1772,7 @@ void RNA_def_property_range(PropertyRNA *prop, double min, double max)
{ {
StructRNA *srna = DefRNA.laststruct; StructRNA *srna = DefRNA.laststruct;
#ifdef DEBUG #ifndef NDEBUG
if (min > max) { if (min > max) {
CLOG_ERROR(&LOG, "\"%s.%s\", min > max.", srna->identifier, prop->identifier); CLOG_ERROR(&LOG, "\"%s.%s\", min > max.", srna->identifier, prop->identifier);
DefRNA.error = true; DefRNA.error = true;
@ -4240,7 +4240,7 @@ PropertyRNA *RNA_def_float_percentage(StructOrFunctionRNA *cont_,
ASSERT_SOFT_HARD_LIMITS; ASSERT_SOFT_HARD_LIMITS;
#ifdef DEBUG #ifndef NDEBUG
/* Properties with PROP_PERCENTAGE should use a range like 0 to 100, unlike PROP_FACTOR. */ /* Properties with PROP_PERCENTAGE should use a range like 0 to 100, unlike PROP_FACTOR. */
if (hardmax < 2.0f) { if (hardmax < 2.0f) {
CLOG_WARN(&LOG, CLOG_WARN(&LOG,
@ -4559,7 +4559,7 @@ void RNA_enum_item_add(EnumPropertyItem **items, int *totitem, const EnumPropert
if (tot == 0) { if (tot == 0) {
*items = static_cast<EnumPropertyItem *>(MEM_callocN(sizeof(EnumPropertyItem[8]), __func__)); *items = static_cast<EnumPropertyItem *>(MEM_callocN(sizeof(EnumPropertyItem[8]), __func__));
/* Ensure we get crashes on missing calls to 'RNA_enum_item_end', see #74227. */ /* Ensure we get crashes on missing calls to 'RNA_enum_item_end', see #74227. */
#ifdef DEBUG #ifndef NDEBUG
memset(*items, 0xff, sizeof(EnumPropertyItem[8])); memset(*items, 0xff, sizeof(EnumPropertyItem[8]));
#endif #endif
} }
@ -4567,7 +4567,7 @@ void RNA_enum_item_add(EnumPropertyItem **items, int *totitem, const EnumPropert
/* Power of two > 8. */ /* Power of two > 8. */
*items = static_cast<EnumPropertyItem *>( *items = static_cast<EnumPropertyItem *>(
MEM_recallocN_id(*items, sizeof(EnumPropertyItem) * tot * 2, __func__)); MEM_recallocN_id(*items, sizeof(EnumPropertyItem) * tot * 2, __func__));
#ifdef DEBUG #ifndef NDEBUG
memset((*items) + tot, 0xff, sizeof(EnumPropertyItem) * tot); memset((*items) + tot, 0xff, sizeof(EnumPropertyItem) * tot);
#endif #endif
} }

@ -114,7 +114,7 @@ class GeoNodeExecParams {
return value_or_field_type.as_field(params_.try_get_input_data_ptr(index)); return value_or_field_type.as_field(params_.try_get_input_data_ptr(index));
} }
else { else {
#ifdef DEBUG #ifndef NDEBUG
this->check_input_access(identifier, &CPPType::get<T>()); this->check_input_access(identifier, &CPPType::get<T>());
#endif #endif
const int index = this->get_input_index(identifier); const int index = this->get_input_index(identifier);
@ -144,7 +144,7 @@ class GeoNodeExecParams {
return value_or_field.as_field(); return value_or_field.as_field();
} }
else { else {
#ifdef DEBUG #ifndef NDEBUG
this->check_input_access(identifier, &CPPType::get<T>()); this->check_input_access(identifier, &CPPType::get<T>());
#endif #endif
const int index = this->get_input_index(identifier); const int index = this->get_input_index(identifier);
@ -177,7 +177,7 @@ class GeoNodeExecParams {
}); });
} }
else { else {
#ifdef DEBUG #ifndef NDEBUG
const CPPType &type = CPPType::get<StoredT>(); const CPPType &type = CPPType::get<StoredT>();
this->check_output_access(identifier, type); this->check_output_access(identifier, type);
#endif #endif

@ -110,7 +110,7 @@ void GeoNodeExecParams::check_input_geometry_set(StringRef identifier,
void GeoNodeExecParams::check_output_geometry_set(const GeometrySet &geometry_set) const void GeoNodeExecParams::check_output_geometry_set(const GeometrySet &geometry_set) const
{ {
UNUSED_VARS_NDEBUG(geometry_set); UNUSED_VARS_NDEBUG(geometry_set);
#ifdef DEBUG #ifndef NDEBUG
if (const bke::CurvesEditHints *curve_edit_hints = geometry_set.get_curve_edit_hints()) { if (const bke::CurvesEditHints *curve_edit_hints = geometry_set.get_curve_edit_hints()) {
/* If this is not valid, it's likely that the number of stored deformed points does not match /* If this is not valid, it's likely that the number of stored deformed points does not match
* the number of points in the original data. */ * the number of points in the original data. */

@ -8829,7 +8829,7 @@ void pyrna_alloc_types()
* But keep running in debug mode so we get immediate notification of bad class hierarchy * But keep running in debug mode so we get immediate notification of bad class hierarchy
* or any errors in "bpy_types.py" at load time, so errors don't go unnoticed. */ * or any errors in "bpy_types.py" at load time, so errors don't go unnoticed. */
#ifdef DEBUG #ifndef NDEBUG
PyGILState_STATE gilstate; PyGILState_STATE gilstate;
PropertyRNA *prop; PropertyRNA *prop;
@ -8855,7 +8855,7 @@ void pyrna_alloc_types()
RNA_PROP_END; RNA_PROP_END;
PyGILState_Release(gilstate); PyGILState_Release(gilstate);
#endif /* DEBUG */ #endif /* NDEBUG */
} }
void pyrna_free_types() void pyrna_free_types()

@ -20,7 +20,7 @@ typedef unsigned short ushort;
/* matrix[row][col] == MATRIX_ITEM_INDEX(matrix, row, col) */ /* matrix[row][col] == MATRIX_ITEM_INDEX(matrix, row, col) */
#ifdef DEBUG #ifndef NDEBUG
# define MATRIX_ITEM_ASSERT(_mat, _row, _col) \ # define MATRIX_ITEM_ASSERT(_mat, _row, _col) \
(BLI_assert(_row < (_mat)->row_num && _col < (_mat)->col_num)) (BLI_assert(_row < (_mat)->row_num && _col < (_mat)->col_num))
#else #else

@ -53,7 +53,7 @@ struct wmMsgTypeInfo {
struct wmMsg { struct wmMsg {
unsigned int type; unsigned int type;
// #ifdef DEBUG // #ifndef NDEBUG
/* For debugging: '__func__:__LINE__'. */ /* For debugging: '__func__:__LINE__'. */
const char *id; const char *id;
// #endif // #endif