Clang Format: bump to version 17

Along with the 4.1 libraries upgrade, we are bumping the clang-format
version from 8-12 to 17. This affects quite a few files.

If not already the case, you may consider pointing your IDE to the
clang-format binary bundled with the Blender precompiled libraries.
This commit is contained in:
Brecht Van Lommel 2024-01-02 18:12:54 +01:00
parent 40953b0e39
commit d377ef2543
619 changed files with 2440 additions and 1596 deletions

@ -313,7 +313,8 @@ static void attr_create_generic(Scene *scene,
}
if (b_attr.domain == blender::bke::AttrDomain::Corner &&
meta_data.data_type == CD_PROP_BYTE_COLOR) {
meta_data.data_type == CD_PROP_BYTE_COLOR)
{
Attribute *attr = attributes.add(name, TypeRGBA, ATTR_ELEMENT_CORNER_BYTE);
if (is_render_color) {
attr->std = ATTR_STD_VERTEX_COLOR;
@ -422,7 +423,8 @@ static set<ustring> get_blender_uv_names(const ::Mesh &b_mesh)
b_mesh.attributes().for_all([&](const blender::bke::AttributeIDRef &id,
const blender::bke::AttributeMetaData meta_data) {
if (meta_data.domain == blender::bke::AttrDomain::Corner &&
meta_data.data_type == CD_PROP_FLOAT2) {
meta_data.data_type == CD_PROP_FLOAT2)
{
if (!id.is_anonymous()) {
uv_names.emplace(std::string_view(id.name()));
}
@ -1250,7 +1252,8 @@ void BlenderSync::sync_mesh_motion(BL::Depsgraph b_depsgraph,
if (new_attribute) {
/* In case of new attribute, we verify if there really was any motion. */
if (b_verts_num != numverts ||
memcmp(mP, &mesh->get_verts()[0], sizeof(float3) * numverts) == 0) {
memcmp(mP, &mesh->get_verts()[0], sizeof(float3) * numverts) == 0)
{
/* no motion, remove attributes again */
if (b_verts_num != numverts) {
VLOG_WARNING << "Topology differs, disabling motion blur for object " << ob_name;

@ -807,7 +807,8 @@ static PyObject *merge_func(PyObject * /*self*/, PyObject *args, PyObject *keywo
PyObject *pyinput, *pyoutput = NULL;
if (!PyArg_ParseTupleAndKeywords(
args, keywords, "OO", (char **)keyword_list, &pyinput, &pyoutput)) {
args, keywords, "OO", (char **)keyword_list, &pyinput, &pyoutput))
{
return NULL;
}

@ -187,7 +187,8 @@ void BlenderSync::sync_recalc(BL::Depsgraph &b_depsgraph, BL::SpaceView3D &b_v3d
if (updated_geometry) {
BL::Object::particle_systems_iterator b_psys;
for (b_ob.particle_systems.begin(b_psys); b_psys != b_ob.particle_systems.end();
++b_psys) {
++b_psys)
{
particle_system_map.set_recalc(b_ob);
}
}
@ -405,7 +406,8 @@ void BlenderSync::sync_integrator(BL::ViewLayer &b_view_layer, bool background)
/* Only use scrambling distance in the viewport if user wants to. */
bool preview_scrambling_distance = get_boolean(cscene, "preview_scrambling_distance");
if ((preview && !preview_scrambling_distance) ||
sampling_pattern == SAMPLING_PATTERN_SOBOL_BURLEY) {
sampling_pattern == SAMPLING_PATTERN_SOBOL_BURLEY)
{
scrambling_distance = 1.0f;
}

@ -5,7 +5,7 @@
*
* Adapted code from Intel Corporation. */
//#define __KERNEL_SSE__
// #define __KERNEL_SSE__
#include "bvh/binning.h"

@ -23,7 +23,7 @@ CCL_NAMESPACE_BEGIN
metal_printf("%s\n", str.c_str()); \
}
//# define BVH_THROTTLE_DIAGNOSTICS
// # define BVH_THROTTLE_DIAGNOSTICS
# ifdef BVH_THROTTLE_DIAGNOSTICS
# define bvh_throttle_printf(...) printf("BVHMetalBuildThrottler::" __VA_ARGS__)
# else

@ -361,7 +361,8 @@ MetalKernelPipeline *ShaderCache::get_best_pipeline(DeviceKernel kernel, const M
thread_scoped_lock lock(cache_mutex);
for (auto &candidate : pipelines[kernel]) {
if (candidate->loaded &&
candidate->kernels_md5 == device->kernels_md5[candidate->pso_type]) {
candidate->kernels_md5 == device->kernels_md5[candidate->pso_type])
{
/* Replace existing match if candidate is more specialized. */
if (!best_match || candidate->pso_type > best_match->pso_type) {
best_match = candidate.get();
@ -795,7 +796,8 @@ void MetalKernelPipeline::compile()
if (ShaderCache::running) {
if (creating_new_archive || recreate_archive) {
if (![archive serializeToURL:[NSURL fileURLWithPath:@(metalbin_path.c_str())]
error:&error]) {
error:&error])
{
metal_printf("Failed to save binary archive to %s, error:\n%s\n",
metalbin_path.c_str(),
[[error localizedDescription] UTF8String]);

@ -652,7 +652,8 @@ bool MetalDeviceQueue::enqueue(DeviceKernel kernel,
((MyDeviceMemory *)it.first)->device_copy_from__IntegratorQueueCounter();
if (IntegratorQueueCounter *queue_counter = (IntegratorQueueCounter *)
it.first->host_pointer) {
it.first->host_pointer)
{
for (int i = 0; i < DEVICE_KERNEL_INTEGRATOR_NUM; i++)
printf("%s%d", i == 0 ? "" : ",", int(queue_counter->num_queued[i]));
}

@ -154,7 +154,8 @@ void HdCyclesCurves::PopulatePrimvars(HdSceneDelegate *sceneDelegate)
for (const auto &interpolation : interpolations) {
for (const HdPrimvarDescriptor &desc :
GetPrimvarDescriptors(sceneDelegate, interpolation.first)) {
GetPrimvarDescriptors(sceneDelegate, interpolation.first))
{
// Skip special primvars that are handled separately
if (desc.name == HdTokens->points || desc.name == HdTokens->widths) {
continue;
@ -172,7 +173,8 @@ void HdCyclesCurves::PopulatePrimvars(HdSceneDelegate *sceneDelegate)
std = ATTR_STD_UV;
}
else if (desc.name == HdTokens->displayColor &&
interpolation.first == HdInterpolationConstant) {
interpolation.first == HdInterpolationConstant)
{
if (value.IsHolding<VtVec3fArray>() && value.GetArraySize() == 1) {
const GfVec3f color = value.UncheckedGet<VtVec3fArray>()[0];
_instances[0]->set_color(make_float3(color[0], color[1], color[2]));

@ -14,8 +14,7 @@ HDCYCLES_NAMESPACE_OPEN_SCOPE
class HdCyclesCurves final : public HdCyclesGeometry<PXR_NS::HdBasisCurves, CCL_NS::Hair> {
public:
HdCyclesCurves(
const PXR_NS::SdfPath &rprimId
HdCyclesCurves(const PXR_NS::SdfPath &rprimId
#if PXR_VERSION < 2102
,
const PXR_NS::SdfPath &instancerId = {}

@ -311,7 +311,8 @@ void HdCyclesMesh::PopulatePrimvars(HdSceneDelegate *sceneDelegate)
for (const auto &interpolation : interpolations) {
for (const HdPrimvarDescriptor &desc :
GetPrimvarDescriptors(sceneDelegate, interpolation.first)) {
GetPrimvarDescriptors(sceneDelegate, interpolation.first))
{
// Skip special primvars that are handled separately
if (desc.name == HdTokens->points || desc.name == HdTokens->normals) {
continue;
@ -337,7 +338,8 @@ void HdCyclesMesh::PopulatePrimvars(HdSceneDelegate *sceneDelegate)
}
}
else if (desc.name == HdTokens->displayColor &&
interpolation.first == HdInterpolationConstant) {
interpolation.first == HdInterpolationConstant)
{
if (value.IsHolding<VtVec3fArray>() && value.GetArraySize() == 1) {
const GfVec3f color = value.UncheckedGet<VtVec3fArray>()[0];
_instances[0]->set_color(make_float3(color[0], color[1], color[2]));

@ -15,8 +15,7 @@ HDCYCLES_NAMESPACE_OPEN_SCOPE
class HdCyclesMesh final : public HdCyclesGeometry<PXR_NS::HdMesh, CCL_NS::Mesh> {
public:
HdCyclesMesh(
const PXR_NS::SdfPath &rprimId
HdCyclesMesh(const PXR_NS::SdfPath &rprimId
#if PXR_VERSION < 2102
,
const PXR_NS::SdfPath &instancerId = {}

@ -154,7 +154,8 @@ void HdCyclesPoints::PopulatePrimvars(HdSceneDelegate *sceneDelegate)
for (const auto &interpolation : interpolations) {
for (const HdPrimvarDescriptor &desc :
GetPrimvarDescriptors(sceneDelegate, interpolation.first)) {
GetPrimvarDescriptors(sceneDelegate, interpolation.first))
{
// Skip special primvars that are handled separately
if (desc.name == HdTokens->points || desc.name == HdTokens->widths) {
continue;
@ -180,7 +181,8 @@ void HdCyclesPoints::PopulatePrimvars(HdSceneDelegate *sceneDelegate)
}
}
else if (desc.name == HdTokens->displayColor &&
interpolation.first == HdInterpolationConstant) {
interpolation.first == HdInterpolationConstant)
{
if (value.IsHolding<VtVec3fArray>() && value.GetArraySize() == 1) {
const GfVec3f color = value.UncheckedGet<VtVec3fArray>()[0];
_instances[0]->set_color(make_float3(color[0], color[1], color[2]));

@ -14,8 +14,7 @@ HDCYCLES_NAMESPACE_OPEN_SCOPE
class HdCyclesPoints final : public HdCyclesGeometry<PXR_NS::HdPoints, CCL_NS::PointCloud> {
public:
HdCyclesPoints(
const PXR_NS::SdfPath &rprimId
HdCyclesPoints(const PXR_NS::SdfPath &rprimId
#if PXR_VERSION < 2102
,
const PXR_NS::SdfPath &instancerId = {}

@ -14,8 +14,7 @@ HDCYCLES_NAMESPACE_OPEN_SCOPE
class HdCyclesVolume final : public HdCyclesGeometry<PXR_NS::HdVolume, CCL_NS::Volume> {
public:
HdCyclesVolume(
const PXR_NS::SdfPath &rprimId
HdCyclesVolume(const PXR_NS::SdfPath &rprimId
#if PXR_VERSION < 2102
,
const PXR_NS::SdfPath &instancerId = {}

@ -215,7 +215,8 @@ class OIDNDenoiseContext {
DCHECK(!oidn_pass.use_compositing);
if (denoise_params_.prefilter != DENOISER_PREFILTER_ACCURATE &&
!is_pass_scale_needed(oidn_pass)) {
!is_pass_scale_needed(oidn_pass))
{
/* Pass data is available as-is from the render buffers. */
return;
}

@ -196,7 +196,8 @@ bool PassAccessor::get_render_tile_pixels(const RenderBuffers *render_buffers,
/* Use alpha for colors passes. */
if (type == PASS_DIFFUSE_COLOR || type == PASS_GLOSSY_COLOR ||
type == PASS_TRANSMISSION_COLOR) {
type == PASS_TRANSMISSION_COLOR)
{
num_written_components = destination.num_components;
}
}
@ -206,7 +207,8 @@ bool PassAccessor::get_render_tile_pixels(const RenderBuffers *render_buffers,
get_pass_float3(render_buffers, buffer_params, destination);
}
else if (type == PASS_COMBINED || type == PASS_SHADOW_CATCHER ||
type == PASS_SHADOW_CATCHER_MATTE) {
type == PASS_SHADOW_CATCHER_MATTE)
{
/* Passes with transparency as 4th component. */
get_pass_combined(render_buffers, buffer_params, destination);
}

@ -127,7 +127,8 @@ void PathTraceDisplay::copy_pixels_to_texture(
const half4 *rgba_row = rgba_pixels;
half4 *mapped_rgba_row = mapped_rgba_pixels + texture_y * texture_width + texture_x;
for (int y = 0; y < pixels_height;
++y, rgba_row += pixels_width, mapped_rgba_row += texture_width) {
++y, rgba_row += pixels_width, mapped_rgba_row += texture_width)
{
memcpy(mapped_rgba_row, rgba_row, sizeof(half4) * pixels_width);
}
}

@ -134,13 +134,15 @@ void PathTraceWorkCPU::render_samples_full_pipeline(KernelGlobalsCPU *kernel_glo
if (has_bake) {
if (!kernels_.integrator_init_from_bake(
kernel_globals, state, &sample_work_tile, render_buffer)) {
kernel_globals, state, &sample_work_tile, render_buffer))
{
break;
}
}
else {
if (!kernels_.integrator_init_from_camera(
kernel_globals, state, &sample_work_tile, render_buffer)) {
kernel_globals, state, &sample_work_tile, render_buffer))
{
break;
}
}

@ -964,7 +964,8 @@ void PathTraceWorkGPU::copy_to_display_naive(PathTraceDisplay *display,
* change of the resolution divider. However, if the display becomes smaller, shrink the
* allocated memory as well. */
if (display_rgba_half_.data_width != final_width ||
display_rgba_half_.data_height != final_height) {
display_rgba_half_.data_height != final_height)
{
display_rgba_half_.alloc(final_width, final_height);
/* TODO(sergey): There should be a way to make sure device-side memory is allocated without
* transferring zeroes to the device. */

@ -971,7 +971,8 @@ bool RenderScheduler::work_need_denoise(bool &delayed, bool &ready_to_display)
/* Immediately denoise when we reach the start sample or last sample. */
if (num_samples_finished == denoiser_params_.start_sample ||
num_samples_finished == num_samples_) {
num_samples_finished == num_samples_)
{
return true;
}

@ -24,7 +24,8 @@ ccl_device
#else
ccl_device_inline
#endif
bool BVH_FUNCTION_FULL_NAME(BVH)(KernelGlobals kg,
bool
BVH_FUNCTION_FULL_NAME(BVH)(KernelGlobals kg,
ccl_private const Ray *ray,
ccl_private LocalIntersection *local_isect,
int local_object,

@ -26,7 +26,8 @@ ccl_device
#else
ccl_device_inline
#endif
bool BVH_FUNCTION_FULL_NAME(BVH)(KernelGlobals kg,
bool
BVH_FUNCTION_FULL_NAME(BVH)(KernelGlobals kg,
ccl_private const Ray *ray,
IntegratorShadowState state,
const uint visibility,

@ -24,7 +24,8 @@ ccl_device
#else
ccl_device_inline
#endif
bool BVH_FUNCTION_FULL_NAME(BVH)(KernelGlobals kg,
bool
BVH_FUNCTION_FULL_NAME(BVH)(KernelGlobals kg,
ccl_private const Ray *ray,
ccl_private Intersection *isect,
const uint visibility)

@ -24,7 +24,8 @@ ccl_device
#else
ccl_device_inline
#endif
uint BVH_FUNCTION_FULL_NAME(BVH)(KernelGlobals kg,
uint
BVH_FUNCTION_FULL_NAME(BVH)(KernelGlobals kg,
ccl_private const Ray *ray,
Intersection *isect_array,
const uint max_hits,

@ -747,7 +747,8 @@ ccl_device int bsdf_hair_huang_sample(const KernelGlobals kg,
const float T3 = 1.0f - R3;
if (cos_theta_t3 != 0.0f &&
microfacet_visible(wtr, -wtrt, make_float3(wmtr.x, 0.0f, wmtr.z), wh3)) {
microfacet_visible(wtr, -wtrt, make_float3(wmtr.x, 0.0f, wmtr.z), wh3))
{
TRT = bsdf->extra->TRT * TR * make_spectrum(T3) *
bsdf_Go(roughness2, cos_mi3, dot(wmtr, -wtrt));
}

@ -741,7 +741,8 @@ ccl_device_intersect bool kernel_embree_intersect(KernelGlobals kg,
rtcIntersect1(kernel_data.device_bvh, &ctx, &ray_hit);
#endif
if (ray_hit.hit.geomID == RTC_INVALID_GEOMETRY_ID ||
ray_hit.hit.primID == RTC_INVALID_GEOMETRY_ID) {
ray_hit.hit.primID == RTC_INVALID_GEOMETRY_ID)
{
return false;
}

@ -25,13 +25,12 @@ static OneAPIErrorCallback s_error_cb = nullptr;
static void *s_error_user_ptr = nullptr;
# ifdef WITH_EMBREE_GPU
static const RTCFeatureFlags CYCLES_ONEAPI_EMBREE_BASIC_FEATURES =
(const RTCFeatureFlags)(RTC_FEATURE_FLAG_TRIANGLE | RTC_FEATURE_FLAG_INSTANCE |
RTC_FEATURE_FLAG_FILTER_FUNCTION_IN_ARGUMENTS |
RTC_FEATURE_FLAG_POINT | RTC_FEATURE_FLAG_MOTION_BLUR);
static const RTCFeatureFlags CYCLES_ONEAPI_EMBREE_ALL_FEATURES =
(const RTCFeatureFlags)(CYCLES_ONEAPI_EMBREE_BASIC_FEATURES |
RTC_FEATURE_FLAG_ROUND_CATMULL_ROM_CURVE |
static const RTCFeatureFlags CYCLES_ONEAPI_EMBREE_BASIC_FEATURES = (const RTCFeatureFlags)(
RTC_FEATURE_FLAG_TRIANGLE | RTC_FEATURE_FLAG_INSTANCE |
RTC_FEATURE_FLAG_FILTER_FUNCTION_IN_ARGUMENTS | RTC_FEATURE_FLAG_POINT |
RTC_FEATURE_FLAG_MOTION_BLUR);
static const RTCFeatureFlags CYCLES_ONEAPI_EMBREE_ALL_FEATURES = (const RTCFeatureFlags)(
CYCLES_ONEAPI_EMBREE_BASIC_FEATURES | RTC_FEATURE_FLAG_ROUND_CATMULL_ROM_CURVE |
RTC_FEATURE_FLAG_FLAT_CATMULL_ROM_CURVE);
# endif

@ -136,14 +136,14 @@ ccl_device_noinline float subd_triangle_attribute_float(KernelGlobals kg,
}
else
#endif /* __PATCH_EVAL__ */
if (desc.element == ATTR_ELEMENT_FACE)
{
if (desc.element == ATTR_ELEMENT_FACE) {
if (dx)
*dx = 0.0f;
if (dy)
*dy = 0.0f;
return kernel_data_fetch(attributes_float, desc.offset + subd_triangle_patch_face(kg, patch));
return kernel_data_fetch(attributes_float,
desc.offset + subd_triangle_patch_face(kg, patch));
}
else if (desc.element == ATTR_ELEMENT_VERTEX || desc.element == ATTR_ELEMENT_VERTEX_MOTION) {
float2 uv[3];
@ -277,14 +277,14 @@ ccl_device_noinline float2 subd_triangle_attribute_float2(KernelGlobals kg,
}
else
#endif /* __PATCH_EVAL__ */
if (desc.element == ATTR_ELEMENT_FACE)
{
if (desc.element == ATTR_ELEMENT_FACE) {
if (dx)
*dx = make_float2(0.0f, 0.0f);
if (dy)
*dy = make_float2(0.0f, 0.0f);
return kernel_data_fetch(attributes_float2, desc.offset + subd_triangle_patch_face(kg, patch));
return kernel_data_fetch(attributes_float2,
desc.offset + subd_triangle_patch_face(kg, patch));
}
else if (desc.element == ATTR_ELEMENT_VERTEX || desc.element == ATTR_ELEMENT_VERTEX_MOTION) {
float2 uv[3];
@ -419,14 +419,14 @@ ccl_device_noinline float3 subd_triangle_attribute_float3(KernelGlobals kg,
}
else
#endif /* __PATCH_EVAL__ */
if (desc.element == ATTR_ELEMENT_FACE)
{
if (desc.element == ATTR_ELEMENT_FACE) {
if (dx)
*dx = make_float3(0.0f, 0.0f, 0.0f);
if (dy)
*dy = make_float3(0.0f, 0.0f, 0.0f);
return kernel_data_fetch(attributes_float3, desc.offset + subd_triangle_patch_face(kg, patch));
return kernel_data_fetch(attributes_float3,
desc.offset + subd_triangle_patch_face(kg, patch));
}
else if (desc.element == ATTR_ELEMENT_VERTEX || desc.element == ATTR_ELEMENT_VERTEX_MOTION) {
float2 uv[3];
@ -566,14 +566,14 @@ ccl_device_noinline float4 subd_triangle_attribute_float4(KernelGlobals kg,
}
else
#endif /* __PATCH_EVAL__ */
if (desc.element == ATTR_ELEMENT_FACE)
{
if (desc.element == ATTR_ELEMENT_FACE) {
if (dx)
*dx = zero_float4();
if (dy)
*dy = zero_float4();
return kernel_data_fetch(attributes_float4, desc.offset + subd_triangle_patch_face(kg, patch));
return kernel_data_fetch(attributes_float4,
desc.offset + subd_triangle_patch_face(kg, patch));
}
else if (desc.element == ATTR_ELEMENT_VERTEX || desc.element == ATTR_ELEMENT_VERTEX_MOTION) {
float2 uv[3];
@ -614,14 +614,14 @@ ccl_device_noinline float4 subd_triangle_attribute_float4(KernelGlobals kg,
float4 f0, f1, f2, f3;
if (desc.element == ATTR_ELEMENT_CORNER_BYTE) {
f0 = color_srgb_to_linear_v4(
color_uchar4_to_float4(kernel_data_fetch(attributes_uchar4, corners[0] + desc.offset)));
f1 = color_srgb_to_linear_v4(
color_uchar4_to_float4(kernel_data_fetch(attributes_uchar4, corners[1] + desc.offset)));
f2 = color_srgb_to_linear_v4(
color_uchar4_to_float4(kernel_data_fetch(attributes_uchar4, corners[2] + desc.offset)));
f3 = color_srgb_to_linear_v4(
color_uchar4_to_float4(kernel_data_fetch(attributes_uchar4, corners[3] + desc.offset)));
f0 = color_srgb_to_linear_v4(color_uchar4_to_float4(
kernel_data_fetch(attributes_uchar4, corners[0] + desc.offset)));
f1 = color_srgb_to_linear_v4(color_uchar4_to_float4(
kernel_data_fetch(attributes_uchar4, corners[1] + desc.offset)));
f2 = color_srgb_to_linear_v4(color_uchar4_to_float4(
kernel_data_fetch(attributes_uchar4, corners[2] + desc.offset)));
f3 = color_srgb_to_linear_v4(color_uchar4_to_float4(
kernel_data_fetch(attributes_uchar4, corners[3] + desc.offset)));
}
else {
f0 = kernel_data_fetch(attributes_float4, corners[0] + desc.offset);

@ -513,7 +513,8 @@ ccl_device_forceinline bool guiding_bsdf_init(KernelGlobals kg,
{
#if defined(__PATH_GUIDING__) && PATH_GUIDING_LEVEL >= 4
if (kg->opgl_surface_sampling_distribution->Init(
kg->opgl_guiding_field, guiding_point3f(P), rand)) {
kg->opgl_guiding_field, guiding_point3f(P), rand))
{
kg->opgl_surface_sampling_distribution->ApplyCosineProduct(guiding_point3f(N));
return true;
}
@ -576,7 +577,8 @@ ccl_device_forceinline bool guiding_phase_init(KernelGlobals kg,
}
if (kg->opgl_volume_sampling_distribution->Init(
kg->opgl_guiding_field, guiding_point3f(P), rand)) {
kg->opgl_guiding_field, guiding_point3f(P), rand))
{
kg->opgl_volume_sampling_distribution->ApplySingleLobeHenyeyGreensteinProduct(guiding_vec3f(D),
g);
return true;

@ -58,7 +58,8 @@ ccl_device void integrator_volume_stack_update_for_subsurface(KernelGlobals kg,
Intersection isect;
int step = 0;
while (step < 2 * volume_stack_size &&
scene_intersect_volume(kg, &volume_ray, &isect, visibility)) {
scene_intersect_volume(kg, &volume_ray, &isect, visibility))
{
/* Ignore self, SSS itself already enters and exits the object. */
if (isect.object != volume_ray.self.object) {
shader_setup_from_ray(kg, stack_sd, &volume_ray, &isect);

@ -110,7 +110,8 @@ ccl_device_inline void integrate_background(KernelGlobals kg,
float mis_weight = 1.0f;
/* Check if background light exists or if we should skip PDF. */
if (!(INTEGRATOR_STATE(state, path, flag) & PATH_RAY_MIS_SKIP) &&
kernel_data.background.use_mis) {
kernel_data.background.use_mis)
{
mis_weight = light_sample_mis_weight_forward_background(kg, state, path_flag);
}

@ -560,7 +560,8 @@ ccl_device_forceinline void integrate_surface_ao(KernelGlobals kg,
const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag);
if (!(kernel_data.kernel_features & KERNEL_FEATURE_AO_ADDITIVE) &&
!(path_flag & PATH_RAY_CAMERA)) {
!(path_flag & PATH_RAY_CAMERA))
{
return;
}

@ -1212,7 +1212,8 @@ ccl_device void integrator_shade_volume(KernelGlobals kg,
# ifdef __SHADOW_LINKING__
if (shadow_linking_schedule_intersection_kernel<DEVICE_KERNEL_INTEGRATOR_SHADE_VOLUME>(kg,
state)) {
state))
{
return;
}
# endif /* __SHADOW_LINKING__ */

@ -167,7 +167,8 @@ ccl_device_inline void surface_shader_prepare_closures(KernelGlobals kg,
sc->sample_weight = 0.0f;
}
else if ((CLOSURE_IS_BSDF_TRANSPARENT(sc->type) &&
(filter_closures & FILTER_CLOSURE_TRANSPARENT))) {
(filter_closures & FILTER_CLOSURE_TRANSPARENT)))
{
sc->type = CLOSURE_HOLDOUT_ID;
sc->sample_weight = 0.0f;
sd->flag |= SD_HOLDOUT;

@ -55,7 +55,8 @@ ccl_device float3 background_map_sample(KernelGlobals kg, float2 rand, ccl_priva
int middle = first + step;
if (kernel_data_fetch(light_background_conditional_cdf, index_v * cdf_width + middle).y <
rand.x) {
rand.x)
{
first = middle + 1;
count -= step + 1;
}

@ -712,7 +712,8 @@ static bool set_attribute_int(int i, TypeDesc type, bool derivatives, void *val)
static bool set_attribute_string(ustring str, TypeDesc type, bool derivatives, void *val)
{
if (type.basetype == TypeDesc::STRING && type.aggregate == TypeDesc::SCALAR &&
type.arraylen == 0) {
type.arraylen == 0)
{
ustring *sval = (ustring *)val;
sval[0] = str;

@ -101,11 +101,9 @@ struct ShaderGlobals {
int backfacing;
};
struct OSLNoiseOptions {
};
struct OSLNoiseOptions {};
struct OSLTextureOptions {
};
struct OSLTextureOptions {};
#define OSL_TEXTURE_HANDLE_TYPE_IES ((uintptr_t)0x2 << 30)
#define OSL_TEXTURE_HANDLE_TYPE_SVM ((uintptr_t)0x1 << 30)

@ -12,7 +12,7 @@ CCL_NAMESPACE_BEGIN
/* Pseudo random numbers, uncomment this for debugging correlations. Only run
* this single threaded on a CPU for repeatable results. */
//#define __DEBUG_CORRELATION__
// #define __DEBUG_CORRELATION__
/*
* The `path_rng_*()` functions below use a shuffled scrambled Sobol

@ -341,7 +341,7 @@ enum PathRayMNEE {
#define SHADOW_CATCHER_VISIBILITY_SHIFT(visibility) ((visibility) << 16)
#define SHADOW_CATCHER_PATH_VISIBILITY(path_flag, visibility) \
(((path_flag)&PATH_RAY_SHADOW_CATCHER_PASS) ? SHADOW_CATCHER_VISIBILITY_SHIFT(visibility) : \
(((path_flag) & PATH_RAY_SHADOW_CATCHER_PASS) ? SHADOW_CATCHER_VISIBILITY_SHIFT(visibility) : \
(visibility))
#define SHADOW_CATCHER_OBJECT_VISIBILITY(is_shadow_catcher, visibility) \
@ -648,7 +648,8 @@ typedef enum PrimitiveType {
} PrimitiveType;
/* Convert type to index in range 0..PRIMITIVE_NUM-1. */
#define PRIMITIVE_INDEX(type) (bitscan((uint32_t)(type)) * 2 + (((type)&PRIMITIVE_MOTION) ? 1 : 0))
#define PRIMITIVE_INDEX(type) \
(bitscan((uint32_t)(type)) * 2 + (((type) & PRIMITIVE_MOTION) ? 1 : 0))
/* Pack segment into type value to save space. */
#define PRIMITIVE_PACK_SEGMENT(type, segment) ((segment << PRIMITIVE_NUM_BITS) | (type))

@ -236,8 +236,7 @@ template<uint32_t LOG2DIM> struct alignas(NANOVDB_DATA_ALIGNMENT) LeafFnBase {
/* LeafData<Fp16> */
class Fp16 {
};
class Fp16 {};
template<uint32_t LOG2DIM> struct alignas(NANOVDB_DATA_ALIGNMENT) LeafData<Fp16, LOG2DIM> {
using ValueType = float;
@ -254,8 +253,7 @@ template<uint32_t LOG2DIM> struct alignas(NANOVDB_DATA_ALIGNMENT) LeafData<Fp16,
/* LeafData<FpN> */
class FpN {
};
class FpN {};
template<uint32_t LOG2DIM> struct alignas(NANOVDB_DATA_ALIGNMENT) LeafData<FpN, LOG2DIM> {
using ValueType = float;

@ -17,7 +17,7 @@ CCL_NAMESPACE_BEGIN
# define PROFILING_INIT_FOR_SHADER(kg, event) \
ProfilingWithShaderHelper profiling_helper((ProfilingState *)&kg->profiler, event)
# define PROFILING_SHADER(object, shader) \
profiling_helper.set_shader(object, (shader)&SHADER_MASK);
profiling_helper.set_shader(object, (shader) & SHADER_MASK);
#else
# define PROFILING_INIT(kg, event)
# define PROFILING_EVENT(event)

@ -32,11 +32,9 @@ struct MatrixSamplesData {
};
/* Helpers to detect if some type is a `ccl::array`. */
template<typename> struct is_array : public std::false_type {
};
template<typename> struct is_array : public std::false_type {};
template<typename T> struct is_array<array<T>> : public std::true_type {
};
template<typename T> struct is_array<array<T>> : public std::true_type {};
/* Holds the data for a cache lookup at a given time, as well as information to
* help disambiguate successes or failures to get data from the cache. */

@ -390,7 +390,8 @@ void GeometryManager::update_attribute_element_offset(Geometry *geom,
if (geom->is_mesh()) {
Mesh *mesh = static_cast<Mesh *>(geom);
if (mesh->subdivision_type == Mesh::SUBDIVISION_CATMULL_CLARK &&
desc.flags & ATTR_SUBDIVIDED) {
desc.flags & ATTR_SUBDIVIDED)
{
/* Indices for subdivided attributes are retrieved
* from patch table so no need for correction here. */
}

@ -620,7 +620,8 @@ bool ImageManager::file_load_image(Image *img, int texture_limit)
}
if (img->metadata.colorspace != u_colorspace_raw &&
img->metadata.colorspace != u_colorspace_srgb) {
img->metadata.colorspace != u_colorspace_srgb)
{
/* Convert to scene linear. */
ColorSpaceManager::to_scene_linear(
img->metadata.colorspace, pixels, num_pixels, is_rgba, img->metadata.compress_as_srgb);
@ -635,7 +636,8 @@ bool ImageManager::file_load_image(Image *img, int texture_limit)
for (size_t i = 0; i < num_pixels; i += 4) {
StorageType *pixel = &pixels[i * 4];
if (!isfinite(pixel[0]) || !isfinite(pixel[1]) || !isfinite(pixel[2]) ||
!isfinite(pixel[3])) {
!isfinite(pixel[3]))
{
pixel[0] = 0;
pixel[1] = 0;
pixel[2] = 0;

@ -386,7 +386,8 @@ const Pass *Pass::find(const vector<Pass *> &passes,
{
for (const Pass *pass : passes) {
if (pass->get_type() != type || pass->get_mode() != mode ||
pass->get_lightgroup() != lightgroup) {
pass->get_lightgroup() != lightgroup)
{
continue;
}
return pass;

@ -361,7 +361,8 @@ bool DenoiseImage::parse_channels(const ImageSpec &in_spec, string &error)
/* Loop over all detected RenderLayers, check whether they contain a full set of input channels.
* Any channels that won't be processed internally are also passed through. */
for (map<string, DenoiseImageLayer>::iterator i = file_layers.begin(); i != file_layers.end();
++i) {
++i)
{
const string &name = i->first;
DenoiseImageLayer &layer = i->second;

@ -157,7 +157,8 @@ static bool parse_channels(const ImageSpec &in_spec,
string layername, channelname;
if (parse_channel_name(
pass.channel_name, layername, pass.name, channelname, multiview_channels)) {
pass.channel_name, layername, pass.name, channelname, multiview_channels))
{
/* Channel part of a render layer. */
pass.op = parse_channel_operation(pass.name);
}
@ -459,7 +460,8 @@ static bool merge_pixels(const vector<MergeImage> &images,
case MERGE_CHANNEL_SAMPLES: {
const auto &samples = layer_samples.at(layer.name);
for (size_t i = 0; offset < num_pixels;
offset += stride, out_offset += out_stride, i++) {
offset += stride, out_offset += out_stride, i++)
{
out_pixels[out_offset] = 1.0f * samples.per_pixel[i] / samples.total;
}
break;

@ -49,7 +49,7 @@ typedef struct _stati64 path_stat_t;
typedef struct _stat path_stat_t;
# endif
# ifndef S_ISDIR
# define S_ISDIR(x) (((x)&_S_IFDIR) == _S_IFDIR)
# define S_ISDIR(x) (((x) & _S_IFDIR) == _S_IFDIR)
# endif
#else
typedef struct stat path_stat_t;

@ -36,12 +36,12 @@ typedef unsigned char uchar;
typedef struct name##__ { \
int unused; \
MEM_CXX_CLASS_ALLOC_FUNCS(#name) \
} * name
} *name
#else
# define GHOST_DECLARE_HANDLE(name) \
typedef struct name##__ { \
int unused; \
} * name
} *name
#endif
/**

@ -201,7 +201,7 @@ bool win32_chk(bool result, const char *file = nullptr, int line = 0, const char
bool win32_silent_chk(bool result);
# ifndef NDEBUG
# define WIN32_CHK(x) win32_chk((x), __FILE__, __LINE__, # x)
# define WIN32_CHK(x) win32_chk((x), __FILE__, __LINE__, #x)
# else
# define WIN32_CHK(x) win32_chk(x)
# endif

@ -147,7 +147,7 @@ static bool egl_chk(bool result,
}
#ifndef NDEBUG
# define EGL_CHK(x) egl_chk((x), __FILE__, __LINE__, # x)
# define EGL_CHK(x) egl_chk((x), __FILE__, __LINE__, #x)
#else
# define EGL_CHK(x) egl_chk(x)
#endif
@ -475,7 +475,8 @@ GHOST_TSuccess GHOST_ContextEGL::initializeDrawingContext()
attrib_list.clear();
if (epoxy_egl_version(m_display) >= 15 ||
epoxy_has_egl_extension(m_display, "KHR_create_context")) {
epoxy_has_egl_extension(m_display, "KHR_create_context"))
{
if (m_api == EGL_OPENGL_API || m_api == EGL_OPENGL_ES_API) {
if (m_contextMajorVersion != 0) {
attrib_list.push_back(EGL_CONTEXT_MAJOR_VERSION_KHR);

@ -8,7 +8,7 @@
#pragma once
//#define WIN32_COMPOSITING
// #define WIN32_COMPOSITING
#include "GHOST_Context.hh"

@ -13,7 +13,7 @@
# error Apple OSX only!
#endif // __APPLE__
//#define __CARBONSOUND__
// #define __CARBONSOUND__
#include "GHOST_System.hh"

@ -2047,7 +2047,8 @@ GHOST_TSuccess GHOST_SystemCocoa::handleKeyEvent(void *eventPtr)
false));
}
if ((modifiers & NSEventModifierFlagControl) !=
(m_modifierMask & NSEventModifierFlagControl)) {
(m_modifierMask & NSEventModifierFlagControl))
{
pushEvent(new GHOST_EventKey(
[event timestamp] * 1000,
(modifiers & NSEventModifierFlagControl) ? GHOST_kEventKeyDown : GHOST_kEventKeyUp,
@ -2065,7 +2066,8 @@ GHOST_TSuccess GHOST_SystemCocoa::handleKeyEvent(void *eventPtr)
false));
}
if ((modifiers & NSEventModifierFlagCommand) !=
(m_modifierMask & NSEventModifierFlagCommand)) {
(m_modifierMask & NSEventModifierFlagCommand))
{
pushEvent(new GHOST_EventKey(
[event timestamp] * 1000,
(modifiers & NSEventModifierFlagCommand) ? GHOST_kEventKeyDown : GHOST_kEventKeyUp,

@ -4644,7 +4644,8 @@ static void keyboard_handle_keymap(void *data,
if (seat->xkb.state_empty_with_shift) {
seat->xkb_use_non_latin_workaround = true;
for (xkb_keycode_t key_code = KEY_1 + EVDEV_OFFSET; key_code <= KEY_0 + EVDEV_OFFSET;
key_code++) {
key_code++)
{
const xkb_keysym_t sym_test = xkb_state_key_get_one_sym(seat->xkb.state_empty_with_shift,
key_code);
if (!(sym_test >= XKB_KEY_0 && sym_test <= XKB_KEY_9)) {
@ -8534,7 +8535,8 @@ bool GHOST_SystemWayland::window_cursor_grab_set(const GHOST_TGrabCursorMode mod
}
else if (mode_current == GHOST_kGrabHide) {
if ((init_grab_xy[0] != seat->grab_lock_xy[0]) ||
(init_grab_xy[1] != seat->grab_lock_xy[1])) {
(init_grab_xy[1] != seat->grab_lock_xy[1]))
{
const wl_fixed_t xy_next[2] = {
gwl_window_scale_wl_fixed_from(scale_params, wl_fixed_from_int(init_grab_xy[0])),
gwl_window_scale_wl_fixed_from(scale_params, wl_fixed_from_int(init_grab_xy[1])),

@ -964,7 +964,8 @@ void GHOST_SystemWin32::processWintabEvent(GHOST_WindowWin32 *window)
* event queue. */
MSG msg;
if (PeekMessage(&msg, window->getHWND(), message, message, PM_NOYIELD) &&
msg.message != WM_QUIT) {
msg.message != WM_QUIT)
{
/* Test for Win32/Wintab button down match. */
useWintabPos = wt->testCoordinates(msg.pt.x, msg.pt.y, info.x, info.y);
@ -1283,7 +1284,8 @@ GHOST_EventKey *GHOST_SystemWin32::processKeyEvent(GHOST_WindowWin32 *window, RA
/* TODO: #ToUnicodeEx can respond with up to 4 utf16 chars (only 2 here).
* Could be up to 24 utf8 bytes. */
if ((r = ToUnicodeEx(
vk, raw.data.keyboard.MakeCode, state, utf16, 2, 0, system->m_keylayout))) {
vk, raw.data.keyboard.MakeCode, state, utf16, 2, 0, system->m_keylayout)))
{
if ((r > 0 && r < 3)) {
utf16[r] = 0;
conv_utf_16_to_8(utf16, utf8_char, 6);

@ -638,7 +638,8 @@ bool GHOST_SystemX11::processEvents(bool waitForEvent)
}
else if (xevent.type == KeyPress) {
if ((xevent.xkey.keycode == m_last_release_keycode) &&
(xevent.xkey.time <= m_last_release_time)) {
(xevent.xkey.time <= m_last_release_time))
{
continue;
}
}
@ -1149,7 +1150,8 @@ void GHOST_SystemX11::processEvent(XEvent *xe)
/* Use utf8 because its not locale repentant, from XORG docs. */
if (!(len = Xutf8LookupString(
xic, xke, utf8_buf, sizeof(utf8_array) - 5, &key_sym, &status))) {
xic, xke, utf8_buf, sizeof(utf8_array) - 5, &key_sym, &status)))
{
utf8_buf[0] = '\0';
}

@ -1276,7 +1276,8 @@ static void libdecor_frame_handle_configure(libdecor_frame *frame,
win->frame.buffer_scale;
const int scale_as_fractional = scale * FRACTIONAL_DENOMINATOR;
if (libdecor_configuration_get_content_size(
configuration, frame, &size_next[0], &size_next[1])) {
configuration, frame, &size_next[0], &size_next[1]))
{
if (fractional_scale) {
frame_pending.size[0] = gwl_window_fractional_to_viewport_round(win->frame, size_next[0]);
frame_pending.size[1] = gwl_window_fractional_to_viewport_round(win->frame, size_next[1]);

@ -541,7 +541,8 @@ GHOST_TSuccess GHOST_WindowWin32::setState(GHOST_TWindowState state)
case GHOST_kWindowStateNormal:
default:
if (curstate == GHOST_kWindowStateFullScreen &&
m_normal_state == GHOST_kWindowStateMaximized) {
m_normal_state == GHOST_kWindowStateMaximized)
{
wp.showCmd = SW_SHOWMAXIMIZED;
m_normal_state = GHOST_kWindowStateNormal;
}

@ -638,7 +638,7 @@ int GHOST_WindowX11::icccmGetState() const
struct {
CARD32 state;
XID icon;
} * prop_ret;
} *prop_ret;
ulong bytes_after, num_ret;
Atom type_ret;
int ret, format_ret;

@ -554,7 +554,8 @@ void GHOST_XrContext::drawSessionViews(void *draw_customdata)
void GHOST_XrContext::handleSessionStateChange(const XrEventDataSessionStateChanged &lifecycle)
{
if (m_session &&
m_session->handleStateChangeEvent(lifecycle) == GHOST_XrSession::SESSION_DESTROY) {
m_session->handleStateChangeEvent(lifecycle) == GHOST_XrSession::SESSION_DESTROY)
{
m_session = nullptr;
}
}

@ -37,13 +37,13 @@
* but this introduces some overhead to memory header and makes
* things slower a bit, so better to keep disabled by default
*/
//#define DEBUG_MEMDUPLINAME
// #define DEBUG_MEMDUPLINAME
/* Only for debugging:
* lets you count the allocations so as to find the allocator of unfreed memory
* in situations where the leak is predictable */
//#define DEBUG_MEMCOUNTER
// #define DEBUG_MEMCOUNTER
/* Only for debugging:
* Defining DEBUG_BACKTRACE will display a back-trace from where memory block was allocated and

@ -10,7 +10,7 @@
#include "IK_QJacobianSolver.h"
//#include "analyze.h"
// #include "analyze.h"
IK_QJacobianSolver::IK_QJacobianSolver()
{
m_poleconstraint = false;

@ -303,8 +303,8 @@ void libmv_cameraIntrinsicsInvert(
*y1 = 0.0;
}
void libmv_homography2DFromCorrespondencesEuc(/* const */ double (*/*x1*/)[2],
/* const */ double (*/*x2*/)[2],
void libmv_homography2DFromCorrespondencesEuc(/* const */ double (* /*x1*/)[2],
/* const */ double (* /*x2*/)[2],
int /*num_points*/,
double H[3][3]) {
memset(H, 0, sizeof(double[3][3]));

@ -257,7 +257,7 @@ void DetectMORAVEC(const FloatImage& grayscale_image,
histogram[s]--;
}
c[0] = score, histogram[score]++;
nonmax : {} // Do nothing.
nonmax: {} // Do nothing.
}
}
int min = 255, total = 0;

@ -40,7 +40,7 @@ layout(binding = 1) buffer dst_buffer
float dstVertexBuffer[];
};
// derivative buffers (if needed)
// derivative buffers (if needed)
#if defined(OPENSUBDIV_GLSL_COMPUTE_USE_1ST_DERIVATIVES)
uniform ivec3 duDesc;
@ -73,7 +73,7 @@ layout(binding = 12) buffer dvv_buffer
};
#endif
// stencil buffers
// stencil buffers
#if defined(OPENSUBDIV_GLSL_COMPUTE_KERNEL_EVAL_STENCILS)

@ -178,7 +178,8 @@ bool isEqualEdgeTags(const MeshTopology &mesh_topology, const OpenSubdiv_Convert
int requested_edge_vertices[2];
converter->getEdgeVertices(converter, edge_index, requested_edge_vertices);
if (!mesh_topology.isEdgeEqual(
edge_index, requested_edge_vertices[0], requested_edge_vertices[1])) {
edge_index, requested_edge_vertices[0], requested_edge_vertices[1]))
{
return false;
}
}

@ -64,7 +64,6 @@ class TopologyRefinerImpl {
} // namespace opensubdiv
} // namespace blender
struct OpenSubdiv_TopologyRefinerImpl : public blender::opensubdiv::TopologyRefinerImpl {
};
struct OpenSubdiv_TopologyRefinerImpl : public blender::opensubdiv::TopologyRefinerImpl {};
#endif // OPENSUBDIV_TOPOLOGY_REFINER_IMPL_H_

@ -15,7 +15,7 @@ extern "C" {
typedef struct plConvexHull__ {
int unused;
} * plConvexHull;
} *plConvexHull;
plConvexHull plConvexHullCompute(float (*coords)[3], int count);
void plConvexHullDelete(plConvexHull hull);

@ -52,8 +52,7 @@ class AssetCatalogService {
public:
static const CatalogFilePath DEFAULT_CATALOG_FILENAME;
struct read_only_tag {
};
struct read_only_tag {};
public:
AssetCatalogService();

@ -501,7 +501,8 @@ bool AssetCatalogService::write_to_disk_ex(const CatalogFilePath &blend_file_pat
}
if (catalog_collection_->catalogs_.is_empty() &&
catalog_collection_->deleted_catalogs_.is_empty()) {
catalog_collection_->deleted_catalogs_.is_empty())
{
/* Avoid saving anything, when there is nothing to save. */
return true; /* Writing nothing when there is nothing to write is still a success. */
}

@ -32,7 +32,7 @@
* Should be addressed with a proper ownership model for the asset system:
* https://wiki.blender.org/wiki/Source/Architecture/Asset_System/Back_End#Ownership_Model
*/
//#define WITH_DESTROY_VIA_LOAD_HANDLER
// #define WITH_DESTROY_VIA_LOAD_HANDLER
static CLG_LogRef LOG = {"asset_system.asset_library_service"};
@ -309,7 +309,8 @@ int64_t AssetLibraryService::rfind_blendfile_extension(StringRef path)
}
if ((blendfile_extension_pos == StringRef::not_found) ||
(blendfile_extension_pos < iter_ext_pos)) {
(blendfile_extension_pos < iter_ext_pos))
{
blendfile_extension_pos = iter_ext_pos;
}
}

@ -70,8 +70,7 @@ void AssetCatalogTreeTestFunctions::expect_tree_item_child_items(
});
}
class AssetCatalogTreeTest : public AssetLibraryTestBase, public AssetCatalogTreeTestFunctions {
};
class AssetCatalogTreeTest : public AssetLibraryTestBase, public AssetCatalogTreeTestFunctions {};
TEST_F(AssetCatalogTreeTest, insert_item_into_tree)
{

@ -296,7 +296,8 @@ static GlyphBLF *blf_glyph_cache_add_glyph(FontBLF *font,
if (ELEM(glyph->bitmap.pixel_mode,
FT_PIXEL_MODE_GRAY,
FT_PIXEL_MODE_GRAY2,
FT_PIXEL_MODE_GRAY4)) {
FT_PIXEL_MODE_GRAY4))
{
/* Scale 1, 2, 4-bit gray to 8-bit. */
const char scale = char(255 / (glyph->bitmap.num_grays - 1));
for (int i = 0; i < buffer_size; i++) {
@ -1662,7 +1663,8 @@ static void blf_glyph_to_curves(FT_Outline ftoutline, ListBase *nurbsbase, const
{
const int l_next = (k < n - 1) ? (l + 1) : l_first;
if (ftoutline.tags[l] == FT_Curve_Tag_Conic &&
ftoutline.tags[l_next] == FT_Curve_Tag_Conic) {
ftoutline.tags[l_next] == FT_Curve_Tag_Conic)
{
onpoints[j]++;
}
}
@ -1697,7 +1699,8 @@ static void blf_glyph_to_curves(FT_Outline ftoutline, ListBase *nurbsbase, const
{
const int l_next = (k < n - 1) ? (l + 1) : l_first;
if (ftoutline.tags[l] == FT_Curve_Tag_Conic &&
ftoutline.tags[l_next] == FT_Curve_Tag_Conic) {
ftoutline.tags[l_next] == FT_Curve_Tag_Conic)
{
dx = float(ftoutline.points[l].x + ftoutline.points[l_next].x) * scale / 2.0f;
dy = float(ftoutline.points[l].y + ftoutline.points[l_next].y) * scale / 2.0f;

@ -140,42 +140,42 @@ struct ClothSpring {
/* Some macro enhancements for vector treatment. */
#define VECSUBADDSS(v1, v2, aS, v3, bS) \
{ \
*(v1) -= *(v2)*aS + *(v3)*bS; \
*(v1) -= *(v2) * aS + *(v3) * bS; \
*(v1 + 1) -= *(v2 + 1) * aS + *(v3 + 1) * bS; \
*(v1 + 2) -= *(v2 + 2) * aS + *(v3 + 2) * bS; \
} \
((void)0)
#define VECADDSS(v1, v2, aS, v3, bS) \
{ \
*(v1) = *(v2)*aS + *(v3)*bS; \
*(v1) = *(v2) * aS + *(v3) * bS; \
*(v1 + 1) = *(v2 + 1) * aS + *(v3 + 1) * bS; \
*(v1 + 2) = *(v2 + 2) * aS + *(v3 + 2) * bS; \
} \
((void)0)
#define VECADDS(v1, v2, v3, bS) \
{ \
*(v1) = *(v2) + *(v3)*bS; \
*(v1) = *(v2) + *(v3) * bS; \
*(v1 + 1) = *(v2 + 1) + *(v3 + 1) * bS; \
*(v1 + 2) = *(v2 + 2) + *(v3 + 2) * bS; \
} \
((void)0)
#define VECSUBMUL(v1, v2, aS) \
{ \
*(v1) -= *(v2)*aS; \
*(v1) -= *(v2) * aS; \
*(v1 + 1) -= *(v2 + 1) * aS; \
*(v1 + 2) -= *(v2 + 2) * aS; \
} \
((void)0)
#define VECSUBS(v1, v2, v3, bS) \
{ \
*(v1) = *(v2) - *(v3)*bS; \
*(v1) = *(v2) - *(v3) * bS; \
*(v1 + 1) = *(v2 + 1) - *(v3 + 1) * bS; \
*(v1 + 2) = *(v2 + 2) - *(v3 + 2) * bS; \
} \
((void)0)
#define VECADDMUL(v1, v2, aS) \
{ \
*(v1) += *(v2)*aS; \
*(v1) += *(v2) * aS; \
*(v1 + 1) += *(v2 + 1) * aS; \
*(v1 + 2) += *(v2 + 2) * aS; \
} \

@ -320,7 +320,8 @@ bool CTX_data_dir(const char *member);
CollectionPointerLink *ctx_link; \
CTX_data_##member(C, &ctx_data_list); \
for (ctx_link = (CollectionPointerLink *)ctx_data_list.first; ctx_link; \
ctx_link = ctx_link->next) { \
ctx_link = ctx_link->next) \
{ \
Type instance = (Type)ctx_link->ptr.data;
#define CTX_DATA_END \

@ -265,15 +265,25 @@ void IDP_Reset(struct IDProperty *prop, const struct IDProperty *reference);
/* C11 const correctness for casts */
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
# define IDP_Float(prop) \
_Generic((prop), struct IDProperty * : (*(float *)&(prop)->data.val), const struct IDProperty * : (*(const float *)&(prop)->data.val))
_Generic((prop), \
struct IDProperty *: (*(float *)&(prop)->data.val), \
const struct IDProperty *: (*(const float *)&(prop)->data.val))
# define IDP_Double(prop) \
_Generic((prop), struct IDProperty * : (*(double *)&(prop)->data.val), const struct IDProperty * : (*(const double *)&(prop)->data.val))
_Generic((prop), \
struct IDProperty *: (*(double *)&(prop)->data.val), \
const struct IDProperty *: (*(const double *)&(prop)->data.val))
# define IDP_String(prop) \
_Generic((prop), struct IDProperty * : ((char *)(prop)->data.pointer), const struct IDProperty * : ((const char *)(prop)->data.pointer))
_Generic((prop), \
struct IDProperty *: ((char *)(prop)->data.pointer), \
const struct IDProperty *: ((const char *)(prop)->data.pointer))
# define IDP_IDPArray(prop) \
_Generic((prop), struct IDProperty * : ((struct IDProperty *)(prop)->data.pointer), const struct IDProperty * : ((const struct IDProperty *)(prop)->data.pointer))
_Generic((prop), \
struct IDProperty *: ((struct IDProperty *)(prop)->data.pointer), \
const struct IDProperty *: ((const struct IDProperty *)(prop)->data.pointer))
# define IDP_Id(prop) \
_Generic((prop), struct IDProperty * : ((ID *)(prop)->data.pointer), const struct IDProperty * : ((const ID *)(prop)->data.pointer))
_Generic((prop), \
struct IDProperty *: ((ID *)(prop)->data.pointer), \
const struct IDProperty *: ((const ID *)(prop)->data.pointer))
#else
# define IDP_Float(prop) (*(float *)&(prop)->data.val)
# define IDP_Double(prop) (*(double *)&(prop)->data.val)

@ -472,7 +472,7 @@ const char *BKE_main_blendfile_path_from_global(void);
*/
ListBase *which_libbase(Main *bmain, short type);
//#define INDEX_ID_MAX 41
// #define INDEX_ID_MAX 41
/**
* Put the pointers to all the #ListBase structs in given `bmain` into the `*lb[INDEX_ID_MAX]`
* array, and return the number of those for convenience.

@ -84,13 +84,11 @@ struct LooseGeomCache {
/**
* Cache of a mesh's loose edges, accessed with #Mesh::loose_edges(). *
*/
struct LooseEdgeCache : public LooseGeomCache {
};
struct LooseEdgeCache : public LooseGeomCache {};
/**
* Cache of a mesh's loose vertices or vertices not used by faces.
*/
struct LooseVertCache : public LooseGeomCache {
};
struct LooseVertCache : public LooseGeomCache {};
struct MeshRuntime {
/* Evaluated mesh for objects which do not have effective modifiers.

@ -81,18 +81,18 @@ struct CCGDerivedMesh {
struct {
int startVert;
CCGVert *vert;
} * vertMap;
} *vertMap;
struct {
int startVert;
int startEdge;
CCGEdge *edge;
} * edgeMap;
} *edgeMap;
struct {
int startVert;
int startEdge;
int startFace;
CCGFace *face;
} * faceMap;
} *faceMap;
int *reverseFaceMap;

@ -57,8 +57,7 @@ class VolumeGridData : public ImplicitSharingMixin {
/**
* Empty struct that exists so that it can be used as token in #VolumeTreeAccessToken.
*/
struct AccessToken {
};
struct AccessToken {};
/**
* A mutex that needs to be locked whenever working with the data members below.

@ -79,7 +79,7 @@ using blender::bke::GeometrySet;
using blender::bke::MeshComponent;
/* very slow! enable for testing only! */
//#define USE_MODIFIER_VALIDATE
// #define USE_MODIFIER_VALIDATE
#ifdef USE_MODIFIER_VALIDATE
# define ASSERT_IS_VALID_MESH(mesh) \
@ -674,7 +674,8 @@ static void mesh_calc_modifiers(Depsgraph *depsgraph,
}
if ((mti->flags & eModifierTypeFlag_RequiresOriginalData) &&
have_non_onlydeform_modifiers_applied) {
have_non_onlydeform_modifiers_applied)
{
BKE_modifier_set_error(ob, md, "Modifier requires original data, bad stack position");
continue;
}
@ -761,7 +762,8 @@ static void mesh_calc_modifiers(Depsgraph *depsgraph,
* These are created when either requested by evaluation, or if
* following modifiers requested them. */
if (need_mapping ||
((nextmask.vmask | nextmask.emask | nextmask.pmask) & CD_MASK_ORIGINDEX)) {
((nextmask.vmask | nextmask.emask | nextmask.pmask) & CD_MASK_ORIGINDEX))
{
/* calc */
CustomData_add_layer(
&mesh_final->vert_data, CD_ORIGINDEX, CD_CONSTRUCT, mesh_final->verts_num);

@ -1206,7 +1206,8 @@ void BKE_pose_update_constraint_flags(bPose *pose)
{
bPoseChannel *chain_bone = chain_tip;
for (short index = 0; chain_bone && (data->rootbone == 0 || index < data->rootbone);
index++) {
index++)
{
chain_bone->constflag |= PCHAN_INFLUENCED_BY_IK;
chain_bone = chain_bone->parent;
}

@ -604,7 +604,8 @@ static int animsys_quaternion_evaluate_fcurves(PathResolvedRNA quat_rna,
int fcurve_offset = 0;
for (; fcurve_offset < 4 && quat_curve_fcu;
++fcurve_offset, quat_curve_fcu = quat_curve_fcu->next) {
++fcurve_offset, quat_curve_fcu = quat_curve_fcu->next)
{
if (!STREQ(quat_curve_fcu->rna_path, first_fcurve->rna_path)) {
/* This should never happen when the quaternion is fully keyed. Some
* people do use half-keyed quaternions, though, so better to check. */

@ -703,28 +703,32 @@ const char *BKE_appdir_folder_id_user_notest(const int folder_id, const char *su
switch (folder_id) {
case BLENDER_USER_DATAFILES:
if (get_path_environment_ex(
path, sizeof(path), subfolder, "BLENDER_USER_DATAFILES", check_is_dir)) {
path, sizeof(path), subfolder, "BLENDER_USER_DATAFILES", check_is_dir))
{
break;
}
get_path_user_ex(path, sizeof(path), "datafiles", subfolder, version, check_is_dir);
break;
case BLENDER_USER_CONFIG:
if (get_path_environment_ex(
path, sizeof(path), subfolder, "BLENDER_USER_CONFIG", check_is_dir)) {
path, sizeof(path), subfolder, "BLENDER_USER_CONFIG", check_is_dir))
{
break;
}
get_path_user_ex(path, sizeof(path), "config", subfolder, version, check_is_dir);
break;
case BLENDER_USER_AUTOSAVE:
if (get_path_environment_ex(
path, sizeof(path), subfolder, "BLENDER_USER_AUTOSAVE", check_is_dir)) {
path, sizeof(path), subfolder, "BLENDER_USER_AUTOSAVE", check_is_dir))
{
break;
}
get_path_user_ex(path, sizeof(path), "autosave", subfolder, version, check_is_dir);
break;
case BLENDER_USER_SCRIPTS:
if (get_path_environment_ex(
path, sizeof(path), subfolder, "BLENDER_USER_SCRIPTS", check_is_dir)) {
path, sizeof(path), subfolder, "BLENDER_USER_SCRIPTS", check_is_dir))
{
break;
}
get_path_user_ex(path, sizeof(path), "scripts", subfolder, version, check_is_dir);
@ -1053,7 +1057,8 @@ bool BKE_appdir_app_template_has_userpref(const char *app_template)
char app_template_path[FILE_MAX];
if (!BKE_appdir_app_template_id_search(
app_template, app_template_path, sizeof(app_template_path))) {
app_template, app_template_path, sizeof(app_template_path)))
{
return false;
}

@ -93,7 +93,8 @@ static void splineik_init_tree_from_pchan(Scene * /*scene*/,
/* Find the root bone and the chain of bones from the root to the tip.
* NOTE: this assumes that the bones are connected, but that may not be true... */
for (pchan = pchan_tip; pchan && (segcount < ik_data->chainlen);
pchan = pchan->parent, segcount++) {
pchan = pchan->parent, segcount++)
{
/* Store this segment in the chain. */
pchan_chain[segcount] = pchan;

@ -819,7 +819,8 @@ int BKE_id_attribute_to_index(const ID *id,
for (int i = 0; i < customdata->totlayer; i++) {
const CustomDataLayer *layer_iter = customdata->layers + i;
if (!(layer_mask & CD_TYPE_AS_MASK(layer_iter->type)) ||
(layer_iter->flag & CD_FLAG_TEMPORARY)) {
(layer_iter->flag & CD_FLAG_TEMPORARY))
{
continue;
}

@ -1032,7 +1032,8 @@ static std::unique_ptr<BakeItem> deserialize_bake_item(const DictionaryValue &io
return std::make_unique<StringBakeItem>(io_string.value());
}
else if (const io::serialize::DictionaryValue *io_string =
io_data->get()->as_dictionary_value()) {
io_data->get()->as_dictionary_value())
{
const std::optional<int64_t> size = io_string->lookup_int("size");
if (!size) {
return {};

@ -193,7 +193,8 @@ static void rename_attributes(const Span<GeometrySet *> geometries,
GeometryComponent &component = geometry->get_component_for_write(type);
MutableAttributeAccessor attributes = *component.attributes_for_write();
for (const MapItem<std::string, AnonymousAttributeIDPtr> &attribute_item :
attribute_map.items()) {
attribute_map.items())
{
attributes.rename(attribute_item.key, *attribute_item.value);
}
}

@ -93,7 +93,8 @@ void BKE_blender_user_menu_item_free(bUserMenuItem *umi)
void BKE_blender_user_menu_item_free_list(ListBase *lb)
{
for (bUserMenuItem *umi = static_cast<bUserMenuItem *>(lb->first), *umi_next; umi;
umi = umi_next) {
umi = umi_next)
{
umi_next = umi->next;
BKE_blender_user_menu_item_free(umi);
}

@ -224,7 +224,8 @@ void BKE_blendfile_link_append_context_free(BlendfileLinkAppendContext *lapp_con
}
for (LinkNode *liblink = lapp_context->libraries.list; liblink != nullptr;
liblink = liblink->next) {
liblink = liblink->next)
{
BlendfileLinkAppendContextLibrary *lib_context =
static_cast<BlendfileLinkAppendContextLibrary *>(liblink->link);
link_append_context_library_blohandle_release(lapp_context, lib_context);
@ -465,7 +466,8 @@ static bool object_in_any_collection(Main *bmain, Object *ob)
LISTBASE_FOREACH (Scene *, scene, &bmain->scenes) {
if (scene->master_collection != nullptr &&
BKE_collection_has_object(scene->master_collection, ob)) {
BKE_collection_has_object(scene->master_collection, ob))
{
return true;
}
}
@ -1900,7 +1902,8 @@ void BKE_blendfile_library_relocate(BlendfileLinkAppendContext *lapp_context,
ID *id;
FOREACH_MAIN_ID_BEGIN (bmain, id) {
if (ID_IS_LINKED(id) || !ID_IS_OVERRIDE_LIBRARY_REAL(id) ||
(id->tag & LIB_TAG_PRE_EXISTING) == 0) {
(id->tag & LIB_TAG_PRE_EXISTING) == 0)
{
continue;
}
if ((id->override_library->reference->tag & LIB_TAG_MISSING) == 0) {

@ -96,7 +96,8 @@ void BKE_bpath_foreach_path_id(BPathForeachPathData *bpath_data, ID *id)
}
if (id->library_weak_reference != nullptr &&
(flag & BKE_BPATH_TRAVERSE_SKIP_WEAK_REFERENCES) == 0) {
(flag & BKE_BPATH_TRAVERSE_SKIP_WEAK_REFERENCES) == 0)
{
BKE_bpath_foreach_path_fixed_process(bpath_data,
id->library_weak_reference->library_filepath,
sizeof(id->library_weak_reference->library_filepath));
@ -182,7 +183,8 @@ bool BKE_bpath_foreach_path_dirfile_fixed_process(BPathForeachPathData *bpath_da
}
if (bpath_data->callback_function(
bpath_data, path_dst, sizeof(path_dst), (const char *)path_src)) {
bpath_data, path_dst, sizeof(path_dst), (const char *)path_src))
{
BLI_path_split_dir_file(path_dst, path_dir, path_dir_maxncpy, path_file, path_file_maxncpy);
bpath_data->is_path_modified = true;
return true;

@ -1655,7 +1655,8 @@ static bool collection_instance_fix_recursive(Collection *parent_collection,
LISTBASE_FOREACH (CollectionObject *, collection_object, &parent_collection->gobject) {
if (collection_object->ob != nullptr &&
collection_object->ob->instance_collection == collection) {
collection_object->ob->instance_collection == collection)
{
id_us_min(&collection->id);
collection_object->ob->instance_collection = nullptr;
cycles_found = true;

@ -288,7 +288,8 @@ void BKE_constraint_mat_convertspace(Object *ob,
if (ELEM(to,
CONSTRAINT_SPACE_LOCAL,
CONSTRAINT_SPACE_PARLOCAL,
CONSTRAINT_SPACE_OWNLOCAL)) {
CONSTRAINT_SPACE_OWNLOCAL))
{
/* Call self with slightly different values. */
BKE_constraint_mat_convertspace(
ob, pchan, cob, mat, CONSTRAINT_SPACE_POSE, to, keep_scale);

@ -4256,7 +4256,8 @@ void BKE_nurbList_handles_set(ListBase *editnurb,
while (a--) {
const short flag = BKE_nurb_bezt_handle_test_calc_flag(bezt, SELECT, handle_mode);
if (((flag & (1 << 0)) && bezt->h1 != HD_FREE) ||
((flag & (1 << 2)) && bezt->h2 != HD_FREE)) {
((flag & (1 << 2)) && bezt->h2 != HD_FREE))
{
h_new = HD_AUTO;
break;
}
@ -4879,7 +4880,8 @@ bool BKE_nurb_type_convert(Nurb *nu,
bp = nu->bp;
while (a--) {
if ((type == CU_POLY && bezt->h1 == HD_VECT && bezt->h2 == HD_VECT) ||
(use_handles == false)) {
(use_handles == false))
{
/* vector handle becomes one poly vertex */
copy_v3_v3(bp->vec, bezt->vec[1]);
bp->vec[3] = 1.0;

@ -773,7 +773,8 @@ Span<float3> CurvesGeometry::evaluated_tangents() const
positions[points.first()]);
}
if (!math::almost_equal_relative(
handles_left[points.last()], positions[points.last()], epsilon)) {
handles_left[points.last()], positions[points.last()], epsilon))
{
tangents[evaluated_points.last()] = math::normalize(positions[points.last()] -
handles_left[points.last()]);
}

@ -367,7 +367,8 @@ static void layerCopyValue_normal(const void *source,
if (ELEM(mixmode,
CDT_MIX_NOMIX,
CDT_MIX_REPLACE_ABOVE_THRESHOLD,
CDT_MIX_REPLACE_BELOW_THRESHOLD)) {
CDT_MIX_REPLACE_BELOW_THRESHOLD))
{
/* Above/below threshold modes are not supported here, fallback to nomix (just in case). */
copy_v3_v3(no_dst, no_src);
}
@ -813,7 +814,8 @@ static void layerCopyValue_mloopcol(const void *source,
if (ELEM(mixmode,
CDT_MIX_NOMIX,
CDT_MIX_REPLACE_ABOVE_THRESHOLD,
CDT_MIX_REPLACE_BELOW_THRESHOLD)) {
CDT_MIX_REPLACE_BELOW_THRESHOLD))
{
/* Modes that do a full copy or nothing. */
if (ELEM(mixmode, CDT_MIX_REPLACE_ABOVE_THRESHOLD, CDT_MIX_REPLACE_BELOW_THRESHOLD)) {
/* TODO: Check for a real valid way to get 'factor' value of our dest color? */
@ -1244,7 +1246,8 @@ static void layerCopyValue_propcol(const void *source,
if (ELEM(mixmode,
CDT_MIX_NOMIX,
CDT_MIX_REPLACE_ABOVE_THRESHOLD,
CDT_MIX_REPLACE_BELOW_THRESHOLD)) {
CDT_MIX_REPLACE_BELOW_THRESHOLD))
{
/* Modes that do a full copy or nothing. */
if (ELEM(mixmode, CDT_MIX_REPLACE_ABOVE_THRESHOLD, CDT_MIX_REPLACE_BELOW_THRESHOLD)) {
/* TODO: Check for a real valid way to get 'factor' value of our dest color? */

@ -79,7 +79,7 @@ using blender::int3;
/* could enable at some point but for now there are far too many conversions */
#ifdef __GNUC__
//# pragma GCC diagnostic ignored "-Wdouble-promotion"
// # pragma GCC diagnostic ignored "-Wdouble-promotion"
#endif
static CLG_LogRef LOG = {"bke.dynamicpaint"};
@ -2121,7 +2121,8 @@ static void dynamicPaint_frameUpdate(
/* image sequences are handled by bake operator */
if ((surface->format == MOD_DPAINT_SURFACE_F_IMAGESEQ) ||
!(surface->flags & MOD_DPAINT_ACTIVE)) {
!(surface->flags & MOD_DPAINT_ACTIVE))
{
continue;
}
@ -3024,7 +3025,8 @@ int dynamicPaint_createUVSurface(Scene *scene,
if (n_target >= 0 && n_target != index) {
if (!dynamicPaint_pointHasNeighbor(
ed, final_index[index], final_index[n_target])) {
ed, final_index[index], final_index[n_target]))
{
ed->n_target[n_pos] = final_index[n_target];
ed->n_num[final_index[index]]++;
n_pos++;
@ -4770,7 +4772,8 @@ static void dynamic_paint_paint_single_point_cb_ex(void *__restrict userdata,
if (surface->type == MOD_DPAINT_SURFACE_T_PAINT) {
if (brush->proximity_falloff == MOD_DPAINT_PRFALL_RAMP &&
!(brush->flags & MOD_DPAINT_RAMP_ALPHA)) {
!(brush->flags & MOD_DPAINT_RAMP_ALPHA))
{
paintColor[0] = colorband[0];
paintColor[1] = colorband[1];
paintColor[2] = colorband[2];

@ -695,7 +695,8 @@ bool get_effector_data(EffectorCache *eff,
/* In case surface object is in Edit mode when loading the .blend,
* surface modifier is never executed and bvhtree never built, see #48415. */
if (eff->pd && eff->pd->shape == PFIELD_SHAPE_SURFACE && eff->surmd &&
eff->surmd->runtime.bvhtree) {
eff->surmd->runtime.bvhtree)
{
/* closest point in the object surface is an effector */
float vec[3];
@ -848,7 +849,8 @@ static void get_effector_tot(
efd->charge = eff->pd->f_strength;
}
else if (eff->pd->forcefield == PFIELD_HARMONIC &&
(eff->pd->flag & PFIELD_MULTIPLE_SPRINGS) == 0) {
(eff->pd->flag & PFIELD_MULTIPLE_SPRINGS) == 0)
{
/* every particle is mapped to only one harmonic effector particle */
*p = point->index % eff->psys->totpart;
*tot = *p + 1;

@ -1239,7 +1239,8 @@ static void compute_obstaclesemission(Scene *scene,
/* Set scene time */
/* Handle emission subframe */
if ((subframe < subframes || time_per_frame + dt + FLT_EPSILON < frame_length) &&
!is_first_frame) {
!is_first_frame)
{
scene->r.subframe = (time_per_frame + (subframe + 1.0f) * subframe_dt) / frame_length;
scene->r.cfra = frame - 1;
}
@ -1422,7 +1423,8 @@ static void update_obstacles(Depsgraph *depsgraph,
d_index = manta_get_index(dx, fds->res[0], dy, fds->res[1], dz);
/* Make sure emission cell is inside the new domain boundary. */
if (dx < 0 || dy < 0 || dz < 0 || dx >= fds->res[0] || dy >= fds->res[1] ||
dz >= fds->res[2]) {
dz >= fds->res[2])
{
continue;
}
@ -2257,7 +2259,8 @@ static void adaptive_domain_adjust(
/* skip if cell already belongs to new area */
if (xn >= min[0] && xn <= max[0] && yn >= min[1] && yn <= max[1] && zn >= min[2] &&
zn <= max[2]) {
zn <= max[2])
{
continue;
}
@ -2753,7 +2756,8 @@ static void compute_flowsemission(Scene *scene,
/* Set scene time */
if ((subframe < subframes || time_per_frame + dt + FLT_EPSILON < frame_length) &&
!is_first_frame) {
!is_first_frame)
{
scene->r.subframe = (time_per_frame + (subframe + 1.0f) * subframe_dt) / frame_length;
scene->r.cfra = frame - 1;
}
@ -2998,7 +3002,8 @@ static void update_flowsfluids(Depsgraph *depsgraph,
d_index = manta_get_index(dx, fds->res[0], dy, fds->res[1], dz);
/* Make sure emission cell is inside the new domain boundary. */
if (dx < 0 || dy < 0 || dz < 0 || dx >= fds->res[0] || dy >= fds->res[1] ||
dz >= fds->res[2]) {
dz >= fds->res[2])
{
continue;
}
@ -4111,7 +4116,8 @@ Mesh *BKE_fluid_modifier_do(
/* Always update viewport in cache replay mode. */
if (fds->cache_type == FLUID_DOMAIN_CACHE_REPLAY ||
fds->flags & FLUID_DOMAIN_USE_ADAPTIVE_DOMAIN) {
fds->flags & FLUID_DOMAIN_USE_ADAPTIVE_DOMAIN)
{
needs_viewport_update = true;
}
/* In other cache modes, only update the viewport when no bake is going on. */

Some files were not shown because too many files have changed in this diff Show More