Cleanup: format (with BraceWrapping::AfterControlStatement "MultiLine")

This commit is contained in:
Campbell Barton 2023-05-02 08:42:26 +10:00
parent 391f86bc38
commit 6859bb6e67
1314 changed files with 9714 additions and 5571 deletions

@ -156,7 +156,8 @@ int CLG_color_support_get(CLG_LogRef *clg_ref);
{ \ { \
CLG_LogType *_lg_ty = CLOG_ENSURE(clg_ref); \ CLG_LogType *_lg_ty = CLOG_ENSURE(clg_ref); \
if (((_lg_ty->flag & CLG_FLAG_USE) && (_lg_ty->level >= verbose_level)) || \ if (((_lg_ty->flag & CLG_FLAG_USE) && (_lg_ty->level >= verbose_level)) || \
(severity >= CLG_SEVERITY_WARN)) { \ (severity >= CLG_SEVERITY_WARN)) \
{ \
CLG_logf(_lg_ty, severity, __FILE__ ":" STRINGIFY(__LINE__), __func__, __VA_ARGS__); \ CLG_logf(_lg_ty, severity, __FILE__ ":" STRINGIFY(__LINE__), __func__, __VA_ARGS__); \
} \ } \
} \ } \
@ -166,7 +167,8 @@ int CLG_color_support_get(CLG_LogRef *clg_ref);
{ \ { \
CLG_LogType *_lg_ty = CLOG_ENSURE(clg_ref); \ CLG_LogType *_lg_ty = CLOG_ENSURE(clg_ref); \
if (((_lg_ty->flag & CLG_FLAG_USE) && (_lg_ty->level >= verbose_level)) || \ if (((_lg_ty->flag & CLG_FLAG_USE) && (_lg_ty->level >= verbose_level)) || \
(severity >= CLG_SEVERITY_WARN)) { \ (severity >= CLG_SEVERITY_WARN)) \
{ \
CLG_log_str(_lg_ty, severity, __FILE__ ":" STRINGIFY(__LINE__), __func__, str); \ CLG_log_str(_lg_ty, severity, __FILE__ ":" STRINGIFY(__LINE__), __func__, str); \
} \ } \
} \ } \
@ -176,7 +178,8 @@ int CLG_color_support_get(CLG_LogRef *clg_ref);
{ \ { \
CLG_LogType *_lg_ty = CLOG_ENSURE(clg_ref); \ CLG_LogType *_lg_ty = CLOG_ENSURE(clg_ref); \
if (((_lg_ty->flag & CLG_FLAG_USE) && (_lg_ty->level >= verbose_level)) || \ if (((_lg_ty->flag & CLG_FLAG_USE) && (_lg_ty->level >= verbose_level)) || \
(severity >= CLG_SEVERITY_WARN)) { \ (severity >= CLG_SEVERITY_WARN)) \
{ \
const char *_str = str; \ const char *_str = str; \
CLG_log_str(_lg_ty, severity, __FILE__ ":" STRINGIFY(__LINE__), __func__, _str); \ CLG_log_str(_lg_ty, severity, __FILE__ ":" STRINGIFY(__LINE__), __func__, _str); \
MEM_freeN((void *)_str); \ MEM_freeN((void *)_str); \

@ -316,7 +316,8 @@ static bool clg_ctx_filter_check(CLogContext *ctx, const char *identifier)
} }
else if ((len >= 2) && (STREQLEN(".*", &flt->match[len - 2], 2))) { else if ((len >= 2) && (STREQLEN(".*", &flt->match[len - 2], 2))) {
if (((identifier_len == len - 2) && STREQLEN(identifier, flt->match, len - 2)) || if (((identifier_len == len - 2) && STREQLEN(identifier, flt->match, len - 2)) ||
((identifier_len >= len - 1) && STREQLEN(identifier, flt->match, len - 1))) { ((identifier_len >= len - 1) && STREQLEN(identifier, flt->match, len - 1)))
{
return (bool)i; return (bool)i;
} }
} }

@ -511,7 +511,8 @@ static void options_parse(int argc, const char **argv)
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} }
else if (options.scene_params.shadingsystem == SHADINGSYSTEM_OSL && else if (options.scene_params.shadingsystem == SHADINGSYSTEM_OSL &&
options.session_params.device.type != DEVICE_CPU) { options.session_params.device.type != DEVICE_CPU)
{
fprintf(stderr, "OSL shading system only works with CPU device\n"); fprintf(stderr, "OSL shading system only works with CPU device\n");
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} }

@ -59,7 +59,8 @@ void OIIOOutputDriver::write_render_tile(const Tile &tile)
/* Apply gamma correction for (some) non-linear file formats. /* Apply gamma correction for (some) non-linear file formats.
* TODO: use OpenColorIO view transform if available. */ * TODO: use OpenColorIO view transform if available. */
if (ColorSpaceManager::detect_known_colorspace( if (ColorSpaceManager::detect_known_colorspace(
u_colorspace_auto, "", image_output->format_name(), true) == u_colorspace_srgb) { u_colorspace_auto, "", image_output->format_name(), true) == u_colorspace_srgb)
{
const float g = 1.0f / 2.2f; const float g = 1.0f / 2.2f;
ImageBufAlgo::pow(image_buffer, image_buffer, {g, g, g, 1.0f}); ImageBufAlgo::pow(image_buffer, image_buffer, {g, g, g, 1.0f});
} }

@ -314,7 +314,8 @@ void window_main_loop(const char *title,
} }
else if (event.type == SDL_WINDOWEVENT) { else if (event.type == SDL_WINDOWEVENT) {
if (event.window.event == SDL_WINDOWEVENT_RESIZED || if (event.window.event == SDL_WINDOWEVENT_RESIZED ||
event.window.event == SDL_WINDOWEVENT_SIZE_CHANGED) { event.window.event == SDL_WINDOWEVENT_SIZE_CHANGED)
{
window_reshape(event.window.data1, event.window.data2); window_reshape(event.window.data1, event.window.data2);
} }
} }

@ -432,7 +432,8 @@ static void blender_camera_sync(Camera *cam,
/* panorama sensor */ /* panorama sensor */
if (bcam->type == CAMERA_PANORAMA && (bcam->panorama_type == PANORAMA_FISHEYE_EQUISOLID || if (bcam->type == CAMERA_PANORAMA && (bcam->panorama_type == PANORAMA_FISHEYE_EQUISOLID ||
bcam->panorama_type == PANORAMA_FISHEYE_LENS_POLYNOMIAL)) { bcam->panorama_type == PANORAMA_FISHEYE_LENS_POLYNOMIAL))
{
float fit_xratio = (float)bcam->render_width * bcam->pixelaspect.x; float fit_xratio = (float)bcam->render_width * bcam->pixelaspect.x;
float fit_yratio = (float)bcam->render_height * bcam->pixelaspect.y; float fit_yratio = (float)bcam->render_height * bcam->pixelaspect.y;
bool horizontal_fit; bool horizontal_fit;

@ -55,13 +55,15 @@ static bool ObtainCacheParticleData(
for (BL::Modifier &b_mod : b_ob->modifiers) { for (BL::Modifier &b_mod : b_ob->modifiers) {
if ((b_mod.type() == b_mod.type_PARTICLE_SYSTEM) && if ((b_mod.type() == b_mod.type_PARTICLE_SYSTEM) &&
(background ? b_mod.show_render() : b_mod.show_viewport())) { (background ? b_mod.show_render() : b_mod.show_viewport()))
{
BL::ParticleSystemModifier psmd((const PointerRNA)b_mod.ptr); BL::ParticleSystemModifier psmd((const PointerRNA)b_mod.ptr);
BL::ParticleSystem b_psys((const PointerRNA)psmd.particle_system().ptr); BL::ParticleSystem b_psys((const PointerRNA)psmd.particle_system().ptr);
BL::ParticleSettings b_part((const PointerRNA)b_psys.settings().ptr); BL::ParticleSettings b_part((const PointerRNA)b_psys.settings().ptr);
if ((b_part.render_type() == BL::ParticleSettings::render_type_PATH) && if ((b_part.render_type() == BL::ParticleSettings::render_type_PATH) &&
(b_part.type() == BL::ParticleSettings::type_HAIR)) { (b_part.type() == BL::ParticleSettings::type_HAIR))
{
int shader = clamp(b_part.material() - 1, 0, hair->get_used_shaders().size() - 1); int shader = clamp(b_part.material() - 1, 0, hair->get_used_shaders().size() - 1);
int display_step = background ? b_part.render_step() : b_part.display_step(); int display_step = background ? b_part.render_step() : b_part.display_step();
int totparts = b_psys.particles.length(); int totparts = b_psys.particles.length();
@ -150,13 +152,15 @@ static bool ObtainCacheParticleUV(Hair *hair,
for (BL::Modifier &b_mod : b_ob->modifiers) { for (BL::Modifier &b_mod : b_ob->modifiers) {
if ((b_mod.type() == b_mod.type_PARTICLE_SYSTEM) && if ((b_mod.type() == b_mod.type_PARTICLE_SYSTEM) &&
(background ? b_mod.show_render() : b_mod.show_viewport())) { (background ? b_mod.show_render() : b_mod.show_viewport()))
{
BL::ParticleSystemModifier psmd((const PointerRNA)b_mod.ptr); BL::ParticleSystemModifier psmd((const PointerRNA)b_mod.ptr);
BL::ParticleSystem b_psys((const PointerRNA)psmd.particle_system().ptr); BL::ParticleSystem b_psys((const PointerRNA)psmd.particle_system().ptr);
BL::ParticleSettings b_part((const PointerRNA)b_psys.settings().ptr); BL::ParticleSettings b_part((const PointerRNA)b_psys.settings().ptr);
if ((b_part.render_type() == BL::ParticleSettings::render_type_PATH) && if ((b_part.render_type() == BL::ParticleSettings::render_type_PATH) &&
(b_part.type() == BL::ParticleSettings::type_HAIR)) { (b_part.type() == BL::ParticleSettings::type_HAIR))
{
int totparts = b_psys.particles.length(); int totparts = b_psys.particles.length();
int totchild = background ? b_psys.child_particles.length() : int totchild = background ? b_psys.child_particles.length() :
(int)((float)b_psys.child_particles.length() * (int)((float)b_psys.child_particles.length() *
@ -212,13 +216,15 @@ static bool ObtainCacheParticleVcol(Hair *hair,
for (BL::Modifier &b_mod : b_ob->modifiers) { for (BL::Modifier &b_mod : b_ob->modifiers) {
if ((b_mod.type() == b_mod.type_PARTICLE_SYSTEM) && if ((b_mod.type() == b_mod.type_PARTICLE_SYSTEM) &&
(background ? b_mod.show_render() : b_mod.show_viewport())) { (background ? b_mod.show_render() : b_mod.show_viewport()))
{
BL::ParticleSystemModifier psmd((const PointerRNA)b_mod.ptr); BL::ParticleSystemModifier psmd((const PointerRNA)b_mod.ptr);
BL::ParticleSystem b_psys((const PointerRNA)psmd.particle_system().ptr); BL::ParticleSystem b_psys((const PointerRNA)psmd.particle_system().ptr);
BL::ParticleSettings b_part((const PointerRNA)b_psys.settings().ptr); BL::ParticleSettings b_part((const PointerRNA)b_psys.settings().ptr);
if ((b_part.render_type() == BL::ParticleSettings::render_type_PATH) && if ((b_part.render_type() == BL::ParticleSettings::render_type_PATH) &&
(b_part.type() == BL::ParticleSettings::type_HAIR)) { (b_part.type() == BL::ParticleSettings::type_HAIR))
{
int totparts = b_psys.particles.length(); int totparts = b_psys.particles.length();
int totchild = background ? b_psys.child_particles.length() : int totchild = background ? b_psys.child_particles.length() :
(int)((float)b_psys.child_particles.length() * (int)((float)b_psys.child_particles.length() *
@ -283,7 +289,8 @@ static void ExportCurveSegments(Scene *scene, Hair *hair, ParticleCurveData *CDa
for (int sys = 0; sys < CData->psys_firstcurve.size(); sys++) { for (int sys = 0; sys < CData->psys_firstcurve.size(); sys++) {
for (int curve = CData->psys_firstcurve[sys]; for (int curve = CData->psys_firstcurve[sys];
curve < CData->psys_firstcurve[sys] + CData->psys_curvenum[sys]; curve < CData->psys_firstcurve[sys] + CData->psys_curvenum[sys];
curve++) { curve++)
{
num_keys += CData->curve_keynum[curve]; num_keys += CData->curve_keynum[curve];
num_curves++; num_curves++;
} }
@ -298,12 +305,14 @@ static void ExportCurveSegments(Scene *scene, Hair *hair, ParticleCurveData *CDa
for (int sys = 0; sys < CData->psys_firstcurve.size(); sys++) { for (int sys = 0; sys < CData->psys_firstcurve.size(); sys++) {
for (int curve = CData->psys_firstcurve[sys]; for (int curve = CData->psys_firstcurve[sys];
curve < CData->psys_firstcurve[sys] + CData->psys_curvenum[sys]; curve < CData->psys_firstcurve[sys] + CData->psys_curvenum[sys];
curve++) { curve++)
{
size_t num_curve_keys = 0; size_t num_curve_keys = 0;
for (int curvekey = CData->curve_firstkey[curve]; for (int curvekey = CData->curve_firstkey[curve];
curvekey < CData->curve_firstkey[curve] + CData->curve_keynum[curve]; curvekey < CData->curve_firstkey[curve] + CData->curve_keynum[curve];
curvekey++) { curvekey++)
{
const float3 ickey_loc = CData->curvekey_co[curvekey]; const float3 ickey_loc = CData->curvekey_co[curvekey];
const float curve_time = CData->curvekey_time[curvekey]; const float curve_time = CData->curvekey_time[curvekey];
const float curve_length = CData->curve_length[curve]; const float curve_length = CData->curve_length[curve];
@ -311,7 +320,8 @@ static void ExportCurveSegments(Scene *scene, Hair *hair, ParticleCurveData *CDa
float radius = shaperadius( float radius = shaperadius(
CData->psys_shape[sys], CData->psys_rootradius[sys], CData->psys_tipradius[sys], time); CData->psys_shape[sys], CData->psys_rootradius[sys], CData->psys_tipradius[sys], time);
if (CData->psys_closetip[sys] && if (CData->psys_closetip[sys] &&
(curvekey == CData->curve_firstkey[curve] + CData->curve_keynum[curve] - 1)) { (curvekey == CData->curve_firstkey[curve] + CData->curve_keynum[curve] - 1))
{
radius = 0.0f; radius = 0.0f;
} }
hair->add_curve_key(ickey_loc, radius); hair->add_curve_key(ickey_loc, radius);
@ -433,7 +443,8 @@ static void ExportCurveSegmentsMotion(Hair *hair, ParticleCurveData *CData, int
for (int sys = 0; sys < CData->psys_firstcurve.size(); sys++) { for (int sys = 0; sys < CData->psys_firstcurve.size(); sys++) {
for (int curve = CData->psys_firstcurve[sys]; for (int curve = CData->psys_firstcurve[sys];
curve < CData->psys_firstcurve[sys] + CData->psys_curvenum[sys]; curve < CData->psys_firstcurve[sys] + CData->psys_curvenum[sys];
curve++) { curve++)
{
/* Curve lengths may not match! Curves can be clipped. */ /* Curve lengths may not match! Curves can be clipped. */
int curve_key_end = (num_curves + 1 < (int)hair->get_curve_first_key().size() ? int curve_key_end = (num_curves + 1 < (int)hair->get_curve_first_key().size() ?
hair->get_curve_first_key()[num_curves + 1] : hair->get_curve_first_key()[num_curves + 1] :
@ -444,7 +455,8 @@ static void ExportCurveSegmentsMotion(Hair *hair, ParticleCurveData *CData, int
if (!is_num_keys_different) { if (!is_num_keys_different) {
for (int curvekey = CData->curve_firstkey[curve]; for (int curvekey = CData->curve_firstkey[curve];
curvekey < CData->curve_firstkey[curve] + CData->curve_keynum[curve]; curvekey < CData->curve_firstkey[curve] + CData->curve_keynum[curve];
curvekey++) { curvekey++)
{
if (i < hair->get_curve_keys().size()) { if (i < hair->get_curve_keys().size()) {
mP[i] = CurveSegmentMotionCV(CData, sys, curve, curvekey); mP[i] = CurveSegmentMotionCV(CData, sys, curve, curvekey);
if (!have_motion) { if (!have_motion) {
@ -489,13 +501,15 @@ bool BlenderSync::object_has_particle_hair(BL::Object b_ob)
/* Test if the object has a particle modifier with hair. */ /* Test if the object has a particle modifier with hair. */
for (BL::Modifier &b_mod : b_ob.modifiers) { for (BL::Modifier &b_mod : b_ob.modifiers) {
if ((b_mod.type() == b_mod.type_PARTICLE_SYSTEM) && if ((b_mod.type() == b_mod.type_PARTICLE_SYSTEM) &&
(preview ? b_mod.show_viewport() : b_mod.show_render())) { (preview ? b_mod.show_viewport() : b_mod.show_render()))
{
BL::ParticleSystemModifier psmd((const PointerRNA)b_mod.ptr); BL::ParticleSystemModifier psmd((const PointerRNA)b_mod.ptr);
BL::ParticleSystem b_psys((const PointerRNA)psmd.particle_system().ptr); BL::ParticleSystem b_psys((const PointerRNA)psmd.particle_system().ptr);
BL::ParticleSettings b_part((const PointerRNA)b_psys.settings().ptr); BL::ParticleSettings b_part((const PointerRNA)b_psys.settings().ptr);
if ((b_part.render_type() == BL::ParticleSettings::render_type_PATH) && if ((b_part.render_type() == BL::ParticleSettings::render_type_PATH) &&
(b_part.type() == BL::ParticleSettings::type_HAIR)) { (b_part.type() == BL::ParticleSettings::type_HAIR))
{
return true; return true;
} }
} }
@ -677,7 +691,8 @@ static void fill_generic_attribute(const int num_curves,
static void attr_create_motion(Hair *hair, BL::Attribute &b_attribute, const float motion_scale) static void attr_create_motion(Hair *hair, BL::Attribute &b_attribute, const float motion_scale)
{ {
if (!(b_attribute.domain() == BL::Attribute::domain_POINT) && if (!(b_attribute.domain() == BL::Attribute::domain_POINT) &&
(b_attribute.data_type() == BL::Attribute::data_type_FLOAT_VECTOR)) { (b_attribute.data_type() == BL::Attribute::data_type_FLOAT_VECTOR))
{
return; return;
} }
@ -748,7 +763,8 @@ static void attr_create_generic(Scene *scene,
/* Weak, use first float2 attribute as standard UV. */ /* Weak, use first float2 attribute as standard UV. */
if (need_uv && !have_uv && b_data_type == BL::Attribute::data_type_FLOAT2 && if (need_uv && !have_uv && b_data_type == BL::Attribute::data_type_FLOAT2 &&
b_domain == BL::Attribute::domain_CURVE) { b_domain == BL::Attribute::domain_CURVE)
{
attr_create_uv(attributes, num_curves, num_keys, b_attribute, name); attr_create_uv(attributes, num_curves, num_keys, b_attribute, name);
have_uv = true; have_uv = true;
continue; continue;
@ -1100,7 +1116,8 @@ void BlenderSync::sync_hair(BL::Depsgraph b_depsgraph, BObjectInfo &b_ob_info, H
for (const SocketType &socket : new_hair.type->inputs) { for (const SocketType &socket : new_hair.type->inputs) {
/* Those sockets are updated in sync_object, so do not modify them. */ /* Those sockets are updated in sync_object, so do not modify them. */
if (socket.name == "use_motion_blur" || socket.name == "motion_steps" || if (socket.name == "use_motion_blur" || socket.name == "motion_steps" ||
socket.name == "used_shaders") { socket.name == "used_shaders")
{
continue; continue;
} }
hair->set_value(socket, new_hair, socket); hair->set_value(socket, new_hair, socket);

@ -115,7 +115,8 @@ DeviceInfo blender_device_info(BL::Preferences &b_preferences,
bool accumulated_use_hardware_raytracing = false; bool accumulated_use_hardware_raytracing = false;
foreach ( foreach (
DeviceInfo &info, DeviceInfo &info,
(device.multi_devices.size() != 0 ? device.multi_devices : vector<DeviceInfo>({device}))) { (device.multi_devices.size() != 0 ? device.multi_devices : vector<DeviceInfo>({device})))
{
if (info.type == DEVICE_METAL && !get_boolean(cpreferences, "use_metalrt")) { if (info.type == DEVICE_METAL && !get_boolean(cpreferences, "use_metalrt")) {
info.use_hardware_raytracing = false; info.use_hardware_raytracing = false;
} }

@ -312,7 +312,8 @@ class DisplayGPUPixelBuffer {
/* Try to re-use the existing PBO if it has usable size. */ /* Try to re-use the existing PBO if it has usable size. */
if (gpu_pixel_buffer) { if (gpu_pixel_buffer) {
if (new_width != width || new_height != height || if (new_width != width || new_height != height ||
GPU_pixel_buffer_size(gpu_pixel_buffer) < required_size) { GPU_pixel_buffer_size(gpu_pixel_buffer) < required_size)
{
gpu_resources_destroy(); gpu_resources_destroy();
} }
} }
@ -513,7 +514,8 @@ bool BlenderDisplayDriver::update_begin(const Params &params,
const int buffer_height = params.size.y; const int buffer_height = params.size.y;
if (!current_tile_buffer_object.gpu_resources_ensure(buffer_width, buffer_height) || if (!current_tile_buffer_object.gpu_resources_ensure(buffer_width, buffer_height) ||
!current_tile.texture.gpu_resources_ensure(texture_width, texture_height)) { !current_tile.texture.gpu_resources_ensure(texture_width, texture_height))
{
tiles_->current_tile.gpu_resources_destroy(); tiles_->current_tile.gpu_resources_destroy();
gpu_context_disable(); gpu_context_disable();
return false; return false;
@ -563,7 +565,8 @@ void BlenderDisplayDriver::update_end()
* renders while Blender is drawing. As a workaround update texture during draw, under assumption * renders while Blender is drawing. As a workaround update texture during draw, under assumption
* that there is no graphics interop on macOS and viewport render has a single tile. */ * that there is no graphics interop on macOS and viewport render has a single tile. */
if (!background_ && if (!background_ &&
GPU_type_matches_ex(GPU_DEVICE_NVIDIA, GPU_OS_MAC, GPU_DRIVER_ANY, GPU_BACKEND_ANY)) { GPU_type_matches_ex(GPU_DEVICE_NVIDIA, GPU_OS_MAC, GPU_DRIVER_ANY, GPU_BACKEND_ANY))
{
tiles_->current_tile.need_update_texture_pixels = true; tiles_->current_tile.need_update_texture_pixels = true;
} }
else { else {
@ -708,7 +711,8 @@ static void draw_tile(const float2 &zoom,
GPU_texture_bind_ex(texture.gpu_texture, GPUSamplerState::default_sampler(), 0); GPU_texture_bind_ex(texture.gpu_texture, GPUSamplerState::default_sampler(), 0);
} }
else if (zoomed_width - draw_tile.params.size.x > 0.5f || else if (zoomed_width - draw_tile.params.size.x > 0.5f ||
zoomed_height - draw_tile.params.size.y > 0.5f) { zoomed_height - draw_tile.params.size.y > 0.5f)
{
GPU_texture_bind_ex(texture.gpu_texture, GPUSamplerState::default_sampler(), 0); GPU_texture_bind_ex(texture.gpu_texture, GPUSamplerState::default_sampler(), 0);
} }
else { else {

@ -28,7 +28,8 @@ static Geometry::Type determine_geom_type(BObjectInfo &b_ob_info, bool use_parti
if (b_ob_info.object_data.is_a(&RNA_Volume) || if (b_ob_info.object_data.is_a(&RNA_Volume) ||
(b_ob_info.object_data == b_ob_info.real_object.data() && (b_ob_info.object_data == b_ob_info.real_object.data() &&
object_fluid_gas_domain_find(b_ob_info.real_object))) { object_fluid_gas_domain_find(b_ob_info.real_object)))
{
return Geometry::VOLUME; return Geometry::VOLUME;
} }
@ -192,7 +193,8 @@ void BlenderSync::sync_geometry_motion(BL::Depsgraph &b_depsgraph,
Geometry *geom = object->get_geometry(); Geometry *geom = object->get_geometry();
if (geometry_motion_synced.find(geom) != geometry_motion_synced.end() || if (geometry_motion_synced.find(geom) != geometry_motion_synced.end() ||
geometry_motion_attribute_synced.find(geom) != geometry_motion_attribute_synced.end()) { geometry_motion_attribute_synced.find(geom) != geometry_motion_attribute_synced.end())
{
return; return;
} }
@ -218,7 +220,8 @@ void BlenderSync::sync_geometry_motion(BL::Depsgraph &b_depsgraph,
sync_hair_motion(b_depsgraph, b_ob_info, hair, motion_step); sync_hair_motion(b_depsgraph, b_ob_info, hair, motion_step);
} }
else if (b_ob_info.object_data.is_a(&RNA_Volume) || else if (b_ob_info.object_data.is_a(&RNA_Volume) ||
object_fluid_gas_domain_find(b_ob_info.real_object)) { object_fluid_gas_domain_find(b_ob_info.real_object))
{
/* No volume motion blur support yet. */ /* No volume motion blur support yet. */
} }
else if (b_ob_info.object_data.is_a(&RNA_PointCloud)) { else if (b_ob_info.object_data.is_a(&RNA_PointCloud)) {

@ -169,7 +169,8 @@ void BlenderSync::sync_background_light(BL::SpaceView3D &b_v3d, bool use_portal)
ObjectKey key(b_world, 0, b_world, false); ObjectKey key(b_world, 0, b_world, false);
if (light_map.add_or_update(&light, b_world, b_world, key) || world_recalc || if (light_map.add_or_update(&light, b_world, b_world, key) || world_recalc ||
b_world.ptr.data != world_map) { b_world.ptr.data != world_map)
{
light->set_light_type(LIGHT_BACKGROUND); light->set_light_type(LIGHT_BACKGROUND);
if (sampling_method == SAMPLING_MANUAL) { if (sampling_method == SAMPLING_MANUAL) {
light->set_map_resolution(get_int(cworld, "sample_map_resolution")); light->set_map_resolution(get_int(cworld, "sample_map_resolution"));

@ -337,7 +337,8 @@ static void fill_generic_attribute(BL::Mesh &b_mesh,
static void attr_create_motion(Mesh *mesh, BL::Attribute &b_attribute, const float motion_scale) static void attr_create_motion(Mesh *mesh, BL::Attribute &b_attribute, const float motion_scale)
{ {
if (!(b_attribute.domain() == BL::Attribute::domain_POINT) && if (!(b_attribute.domain() == BL::Attribute::domain_POINT) &&
(b_attribute.data_type() == BL::Attribute::data_type_FLOAT_VECTOR)) { (b_attribute.data_type() == BL::Attribute::data_type_FLOAT_VECTOR))
{
return; return;
} }
@ -384,7 +385,8 @@ static void attr_create_generic(Scene *scene,
} }
if (!(mesh->need_attribute(scene, name) || if (!(mesh->need_attribute(scene, name) ||
(is_render_color && mesh->need_attribute(scene, ATTR_STD_VERTEX_COLOR)))) { (is_render_color && mesh->need_attribute(scene, ATTR_STD_VERTEX_COLOR))))
{
continue; continue;
} }
if (attributes.find(name)) { if (attributes.find(name)) {
@ -741,13 +743,15 @@ static void attr_create_pointiness(Scene *scene, Mesh *mesh, BL::Mesh &b_mesh, b
const float3 &vert_co = mesh->get_verts()[vert_index]; const float3 &vert_co = mesh->get_verts()[vert_index];
bool found = false; bool found = false;
for (int other_sorted_vert_index = sorted_vert_index + 1; other_sorted_vert_index < num_verts; for (int other_sorted_vert_index = sorted_vert_index + 1; other_sorted_vert_index < num_verts;
++other_sorted_vert_index) { ++other_sorted_vert_index)
{
const int other_vert_index = sorted_vert_indeices[other_sorted_vert_index]; const int other_vert_index = sorted_vert_indeices[other_sorted_vert_index];
const float3 &other_vert_co = mesh->get_verts()[other_vert_index]; const float3 &other_vert_co = mesh->get_verts()[other_vert_index];
/* We are too far away now, we wouldn't have duplicate. */ /* We are too far away now, we wouldn't have duplicate. */
if ((other_vert_co.x + other_vert_co.y + other_vert_co.z) - if ((other_vert_co.x + other_vert_co.y + other_vert_co.z) -
(vert_co.x + vert_co.y + vert_co.z) > (vert_co.x + vert_co.y + vert_co.z) >
3 * FLT_EPSILON) { 3 * FLT_EPSILON)
{
break; break;
} }
/* Found duplicate. */ /* Found duplicate. */
@ -1325,7 +1329,8 @@ void BlenderSync::sync_mesh(BL::Depsgraph b_depsgraph, BObjectInfo &b_ob_info, M
for (const SocketType &socket : new_mesh.type->inputs) { for (const SocketType &socket : new_mesh.type->inputs) {
/* Those sockets are updated in sync_object, so do not modify them. */ /* Those sockets are updated in sync_object, so do not modify them. */
if (socket.name == "use_motion_blur" || socket.name == "motion_steps" || if (socket.name == "use_motion_blur" || socket.name == "motion_steps" ||
socket.name == "used_shaders") { socket.name == "used_shaders")
{
continue; continue;
} }
mesh->set_value(socket, new_mesh, socket); mesh->set_value(socket, new_mesh, socket);

@ -63,7 +63,8 @@ bool BlenderSync::object_is_geometry(BObjectInfo &b_ob_info)
BL::Object::type_enum type = b_ob_info.iter_object.type(); BL::Object::type_enum type = b_ob_info.iter_object.type();
if (type == BL::Object::type_VOLUME || type == BL::Object::type_CURVES || if (type == BL::Object::type_VOLUME || type == BL::Object::type_CURVES ||
type == BL::Object::type_POINTCLOUD) { type == BL::Object::type_POINTCLOUD)
{
/* Will be exported attached to mesh. */ /* Will be exported attached to mesh. */
return true; return true;
} }
@ -325,7 +326,8 @@ Object *BlenderSync::sync_object(BL::Depsgraph &b_depsgraph,
* transform comparison should not be needed, but duplis don't work perfect * transform comparison should not be needed, but duplis don't work perfect
* in the depsgraph and may not signal changes, so this is a workaround */ * in the depsgraph and may not signal changes, so this is a workaround */
if (object->is_modified() || object_updated || if (object->is_modified() || object_updated ||
(object->get_geometry() && object->get_geometry()->is_modified())) { (object->get_geometry() && object->get_geometry()->is_modified()))
{
object->name = b_ob.name().c_str(); object->name = b_ob.name().c_str();
object->set_pass_id(b_ob.pass_index()); object->set_pass_id(b_ob.pass_index());
const BL::Array<float, 4> object_color = b_ob.color(); const BL::Array<float, 4> object_color = b_ob.color();
@ -408,7 +410,8 @@ bool BlenderSync::sync_object_attributes(BL::DepsgraphObjectInstance &b_instance
BlenderAttributeType type = blender_attribute_name_split_type(name, &real_name); BlenderAttributeType type = blender_attribute_name_split_type(name, &real_name);
if (type == BL::ShaderNodeAttribute::attribute_type_OBJECT || if (type == BL::ShaderNodeAttribute::attribute_type_OBJECT ||
type == BL::ShaderNodeAttribute::attribute_type_INSTANCER) { type == BL::ShaderNodeAttribute::attribute_type_INSTANCER)
{
bool use_instancer = (type == BL::ShaderNodeAttribute::attribute_type_INSTANCER); bool use_instancer = (type == BL::ShaderNodeAttribute::attribute_type_INSTANCER);
float4 value = lookup_instance_property(b_instance, real_name, use_instancer); float4 value = lookup_instance_property(b_instance, real_name, use_instancer);
@ -556,7 +559,8 @@ void BlenderSync::sync_objects(BL::Depsgraph &b_depsgraph,
for (b_depsgraph.object_instances.begin(b_instance_iter); for (b_depsgraph.object_instances.begin(b_instance_iter);
b_instance_iter != b_depsgraph.object_instances.end() && !cancel; b_instance_iter != b_depsgraph.object_instances.end() && !cancel;
++b_instance_iter) { ++b_instance_iter)
{
BL::DepsgraphObjectInstance b_instance = *b_instance_iter; BL::DepsgraphObjectInstance b_instance = *b_instance_iter;
BL::Object b_ob = b_instance.object(); BL::Object b_ob = b_instance.object();
@ -667,7 +671,8 @@ void BlenderSync::sync_motion(BL::RenderSettings &b_render,
float frame_center_delta = 0.0f; float frame_center_delta = 0.0f;
if (scene->need_motion() != Scene::MOTION_PASS && if (scene->need_motion() != Scene::MOTION_PASS &&
scene->camera->get_motion_position() != MOTION_POSITION_CENTER) { scene->camera->get_motion_position() != MOTION_POSITION_CENTER)
{
float shuttertime = scene->camera->get_shuttertime(); float shuttertime = scene->camera->get_shuttertime();
if (scene->camera->get_motion_position() == MOTION_POSITION_END) { if (scene->camera->get_motion_position() == MOTION_POSITION_END) {
frame_center_delta = -shuttertime * 0.5f; frame_center_delta = -shuttertime * 0.5f;

@ -21,7 +21,8 @@ static void attr_create_motion(PointCloud *pointcloud,
const float motion_scale) const float motion_scale)
{ {
if (!(b_attribute.domain() == BL::Attribute::domain_POINT) && if (!(b_attribute.domain() == BL::Attribute::domain_POINT) &&
(b_attribute.data_type() == BL::Attribute::data_type_FLOAT_VECTOR)) { (b_attribute.data_type() == BL::Attribute::data_type_FLOAT_VECTOR))
{
return; return;
} }
@ -313,7 +314,8 @@ void BlenderSync::sync_pointcloud(PointCloud *pointcloud, BObjectInfo &b_ob_info
for (const SocketType &socket : new_pointcloud.type->inputs) { for (const SocketType &socket : new_pointcloud.type->inputs) {
/* Those sockets are updated in sync_object, so do not modify them. */ /* Those sockets are updated in sync_object, so do not modify them. */
if (socket.name == "use_motion_blur" || socket.name == "motion_steps" || if (socket.name == "use_motion_blur" || socket.name == "motion_steps" ||
socket.name == "used_shaders") { socket.name == "used_shaders")
{
continue; continue;
} }
pointcloud->set_value(socket, new_pointcloud, socket); pointcloud->set_value(socket, new_pointcloud, socket);

@ -163,7 +163,8 @@ static PyObject *create_func(PyObject * /*self*/, PyObject *args)
&pyregion, &pyregion,
&pyv3d, &pyv3d,
&pyrv3d, &pyrv3d,
&preview_osl)) { &preview_osl))
{
return NULL; return NULL;
} }
@ -522,7 +523,8 @@ static PyObject *osl_update_node_func(PyObject * /*self*/, PyObject *args)
} }
else if (param->type.vecsemantics == TypeDesc::POINT || else if (param->type.vecsemantics == TypeDesc::POINT ||
param->type.vecsemantics == TypeDesc::VECTOR || param->type.vecsemantics == TypeDesc::VECTOR ||
param->type.vecsemantics == TypeDesc::NORMAL) { param->type.vecsemantics == TypeDesc::NORMAL)
{
socket_type = "NodeSocketVector"; socket_type = "NodeSocketVector";
data_type = BL::NodeSocket::type_VECTOR; data_type = BL::NodeSocket::type_VECTOR;
@ -738,7 +740,8 @@ static PyObject *denoise_func(PyObject * /*self*/, PyObject *args, PyObject *key
&pyscene, &pyscene,
&pyviewlayer, &pyviewlayer,
&pyinput, &pyinput,
&pyoutput)) { &pyoutput))
{
return NULL; return NULL;
} }

@ -202,7 +202,8 @@ void BlenderSession::reset_session(BL::BlendData &b_data, BL::Depsgraph &b_depsg
b_scene, background, use_developer_ui); b_scene, background, use_developer_ui);
if (scene->params.modified(scene_params) || session->params.modified(session_params) || if (scene->params.modified(scene_params) || session->params.modified(session_params) ||
!this->b_render.use_persistent_data()) { !this->b_render.use_persistent_data())
{
/* if scene or session parameters changed, it's easier to simply re-create /* if scene or session parameters changed, it's easier to simply re-create
* them rather than trying to distinguish which settings need to be updated * them rather than trying to distinguish which settings need to be updated
*/ */
@ -376,8 +377,8 @@ void BlenderSession::render(BL::Depsgraph &b_depsgraph_)
} }
int view_index = 0; int view_index = 0;
for (b_rr.views.begin(b_view_iter); b_view_iter != b_rr.views.end(); for (b_rr.views.begin(b_view_iter); b_view_iter != b_rr.views.end(); ++b_view_iter, ++view_index)
++b_view_iter, ++view_index) { {
b_rview_name = b_view_iter->name(); b_rview_name = b_view_iter->name();
buffer_params.layer = b_view_layer.name(); buffer_params.layer = b_view_layer.name();
@ -562,7 +563,8 @@ static bool bake_setup_pass(Scene *scene, const string &bake_type_str, const int
/* Light component passes. */ /* Light component passes. */
else if (strcmp(bake_type, "DIFFUSE") == 0) { else if (strcmp(bake_type, "DIFFUSE") == 0) {
if ((bake_filter & BL::BakeSettings::pass_filter_DIRECT) && if ((bake_filter & BL::BakeSettings::pass_filter_DIRECT) &&
bake_filter & BL::BakeSettings::pass_filter_INDIRECT) { bake_filter & BL::BakeSettings::pass_filter_INDIRECT)
{
type = PASS_DIFFUSE; type = PASS_DIFFUSE;
use_direct_light = true; use_direct_light = true;
use_indirect_light = true; use_indirect_light = true;
@ -583,7 +585,8 @@ static bool bake_setup_pass(Scene *scene, const string &bake_type_str, const int
} }
else if (strcmp(bake_type, "GLOSSY") == 0) { else if (strcmp(bake_type, "GLOSSY") == 0) {
if ((bake_filter & BL::BakeSettings::pass_filter_DIRECT) && if ((bake_filter & BL::BakeSettings::pass_filter_DIRECT) &&
bake_filter & BL::BakeSettings::pass_filter_INDIRECT) { bake_filter & BL::BakeSettings::pass_filter_INDIRECT)
{
type = PASS_GLOSSY; type = PASS_GLOSSY;
use_direct_light = true; use_direct_light = true;
use_indirect_light = true; use_indirect_light = true;
@ -604,7 +607,8 @@ static bool bake_setup_pass(Scene *scene, const string &bake_type_str, const int
} }
else if (strcmp(bake_type, "TRANSMISSION") == 0) { else if (strcmp(bake_type, "TRANSMISSION") == 0) {
if ((bake_filter & BL::BakeSettings::pass_filter_DIRECT) && if ((bake_filter & BL::BakeSettings::pass_filter_DIRECT) &&
bake_filter & BL::BakeSettings::pass_filter_INDIRECT) { bake_filter & BL::BakeSettings::pass_filter_INDIRECT)
{
type = PASS_TRANSMISSION; type = PASS_TRANSMISSION;
use_direct_light = true; use_direct_light = true;
use_indirect_light = true; use_indirect_light = true;

@ -1246,7 +1246,8 @@ static void add_nodes(Scene *scene,
} }
} }
else if (b_node.is_a(&RNA_ShaderNodeGroup) || b_node.is_a(&RNA_NodeCustomGroup) || else if (b_node.is_a(&RNA_ShaderNodeGroup) || b_node.is_a(&RNA_NodeCustomGroup) ||
b_node.is_a(&RNA_ShaderNodeCustomGroup)) { b_node.is_a(&RNA_ShaderNodeCustomGroup))
{
BL::ShaderNodeTree b_group_ntree(PointerRNA_NULL); BL::ShaderNodeTree b_group_ntree(PointerRNA_NULL);
if (b_node.is_a(&RNA_ShaderNodeGroup)) if (b_node.is_a(&RNA_ShaderNodeGroup))
@ -1382,7 +1383,8 @@ static void add_nodes(Scene *scene,
/* Ignore invalid links to avoid unwanted cycles created in graph. /* Ignore invalid links to avoid unwanted cycles created in graph.
* Also ignore links with unavailable sockets. */ * Also ignore links with unavailable sockets. */
if (!(b_link.is_valid() && b_link.from_socket().enabled() && b_link.to_socket().enabled()) || if (!(b_link.is_valid() && b_link.from_socket().enabled() && b_link.to_socket().enabled()) ||
b_link.is_muted()) { b_link.is_muted())
{
continue; continue;
} }
/* get blender link data */ /* get blender link data */
@ -1531,7 +1533,8 @@ void BlenderSync::sync_materials(BL::Depsgraph &b_depsgraph, bool update_all)
/* test if we need to sync */ /* test if we need to sync */
if (shader_map.add_or_update(&shader, b_mat) || update_all || if (shader_map.add_or_update(&shader, b_mat) || update_all ||
scene_attr_needs_recalc(shader, b_depsgraph)) { scene_attr_needs_recalc(shader, b_depsgraph))
{
ShaderGraph *graph = new ShaderGraph(); ShaderGraph *graph = new ShaderGraph();
shader->name = b_mat.name().c_str(); shader->name = b_mat.name().c_str();
@ -1614,12 +1617,14 @@ void BlenderSync::sync_world(BL::Depsgraph &b_depsgraph, BL::SpaceView3D &b_v3d,
if (world_recalc || update_all || b_world.ptr.data != world_map || if (world_recalc || update_all || b_world.ptr.data != world_map ||
viewport_parameters.shader_modified(new_viewport_parameters) || viewport_parameters.shader_modified(new_viewport_parameters) ||
scene_attr_needs_recalc(shader, b_depsgraph)) { scene_attr_needs_recalc(shader, b_depsgraph))
{
ShaderGraph *graph = new ShaderGraph(); ShaderGraph *graph = new ShaderGraph();
/* create nodes */ /* create nodes */
if (new_viewport_parameters.use_scene_world && b_world && b_world.use_nodes() && if (new_viewport_parameters.use_scene_world && b_world && b_world.use_nodes() &&
b_world.node_tree()) { b_world.node_tree())
{
BL::ShaderNodeTree b_ntree(b_world.node_tree()); BL::ShaderNodeTree b_ntree(b_world.node_tree());
add_nodes(scene, b_engine, b_data, b_depsgraph, b_scene, graph, b_ntree); add_nodes(scene, b_engine, b_data, b_depsgraph, b_scene, graph, b_ntree);
@ -1781,7 +1786,8 @@ void BlenderSync::sync_lights(BL::Depsgraph &b_depsgraph, bool update_all)
/* test if we need to sync */ /* test if we need to sync */
if (shader_map.add_or_update(&shader, b_light) || update_all || if (shader_map.add_or_update(&shader, b_light) || update_all ||
scene_attr_needs_recalc(shader, b_depsgraph)) { scene_attr_needs_recalc(shader, b_depsgraph))
{
ShaderGraph *graph = new ShaderGraph(); ShaderGraph *graph = new ShaderGraph();
/* create nodes */ /* create nodes */

@ -169,7 +169,8 @@ void BlenderSync::sync_recalc(BL::Depsgraph &b_depsgraph, BL::SpaceView3D &b_v3d
} }
if (updated_geometry || if (updated_geometry ||
(object_subdivision_type(b_ob, preview, experimental) != Mesh::SUBDIVISION_NONE)) { (object_subdivision_type(b_ob, preview, experimental) != Mesh::SUBDIVISION_NONE))
{
BL::ID key = BKE_object_is_modified(b_ob) ? b_ob : b_ob.data(); BL::ID key = BKE_object_is_modified(b_ob) ? b_ob : b_ob.data();
geometry_map.set_recalc(key); geometry_map.set_recalc(key);
@ -277,7 +278,8 @@ void BlenderSync::sync_data(BL::RenderSettings &b_render,
geometry_synced.clear(); /* use for objects and motion sync */ geometry_synced.clear(); /* use for objects and motion sync */
if (scene->need_motion() == Scene::MOTION_PASS || scene->need_motion() == Scene::MOTION_NONE || if (scene->need_motion() == Scene::MOTION_PASS || scene->need_motion() == Scene::MOTION_NONE ||
scene->camera->get_motion_position() == MOTION_POSITION_CENTER) { scene->camera->get_motion_position() == MOTION_POSITION_CENTER)
{
sync_objects(b_depsgraph, b_v3d); sync_objects(b_depsgraph, b_v3d);
} }
sync_motion(b_render, b_depsgraph, b_v3d, b_override, width, height, python_thread_state); sync_motion(b_render, b_depsgraph, b_v3d, b_override, width, height, python_thread_state);
@ -445,7 +447,8 @@ void BlenderSync::sync_integrator(BL::ViewLayer &b_view_layer, bool background)
/* No denoising support for vertex color baking, vertices packed into image /* No denoising support for vertex color baking, vertices packed into image
* buffer have no relation to neighbors. */ * buffer have no relation to neighbors. */
if (scene->bake_manager->get_baking() && if (scene->bake_manager->get_baking() &&
b_scene.render().bake().target() != BL::BakeSettings::target_IMAGE_TEXTURES) { b_scene.render().bake().target() != BL::BakeSettings::target_IMAGE_TEXTURES)
{
denoise_params.use = false; denoise_params.use = false;
} }
@ -709,7 +712,8 @@ void BlenderSync::sync_render_passes(BL::RenderLayer &b_rlay, BL::ViewLayer &b_v
BL::ViewLayer::lightgroups_iterator b_lightgroup_iter; BL::ViewLayer::lightgroups_iterator b_lightgroup_iter;
for (b_view_layer.lightgroups.begin(b_lightgroup_iter); for (b_view_layer.lightgroups.begin(b_lightgroup_iter);
b_lightgroup_iter != b_view_layer.lightgroups.end(); b_lightgroup_iter != b_view_layer.lightgroups.end();
++b_lightgroup_iter) { ++b_lightgroup_iter)
{
BL::Lightgroup b_lightgroup(*b_lightgroup_iter); BL::Lightgroup b_lightgroup(*b_lightgroup_iter);
string name = string_printf("Combined_%s", b_lightgroup.name().c_str()); string name = string_printf("Combined_%s", b_lightgroup.name().c_str());
@ -732,7 +736,8 @@ void BlenderSync::sync_render_passes(BL::RenderLayer &b_rlay, BL::ViewLayer &b_v
} }
if (pass_type == PASS_MOTION && if (pass_type == PASS_MOTION &&
(b_view_layer.use_motion_blur() && b_scene.render().use_motion_blur())) { (b_view_layer.use_motion_blur() && b_scene.render().use_motion_blur()))
{
continue; continue;
} }

@ -588,7 +588,8 @@ static inline BL::FluidDomainSettings object_fluid_gas_domain_find(BL::Object &b
BL::FluidModifier b_mmd(b_mod); BL::FluidModifier b_mmd(b_mod);
if (b_mmd.fluid_type() == BL::FluidModifier::fluid_type_DOMAIN && if (b_mmd.fluid_type() == BL::FluidModifier::fluid_type_DOMAIN &&
b_mmd.domain_settings().domain_type() == BL::FluidDomainSettings::domain_type_GAS) { b_mmd.domain_settings().domain_type() == BL::FluidDomainSettings::domain_type_GAS)
{
return b_mmd.domain_settings(); return b_mmd.domain_settings();
} }
} }
@ -637,7 +638,8 @@ static inline Mesh::SubdivisionType object_subdivision_type(BL::Object &b_ob,
bool enabled = preview ? mod.show_viewport() : mod.show_render(); bool enabled = preview ? mod.show_viewport() : mod.show_render();
if (enabled && mod.type() == BL::Modifier::type_SUBSURF && if (enabled && mod.type() == BL::Modifier::type_SUBSURF &&
RNA_boolean_get(&cobj, "use_adaptive_subdivision")) { RNA_boolean_get(&cobj, "use_adaptive_subdivision"))
{
BL::SubsurfModifier subsurf(mod); BL::SubsurfModifier subsurf(mod);
if (subsurf.subdivision_type() == BL::SubsurfModifier::subdivision_type_CATMULL_CLARK) { if (subsurf.subdivision_type() == BL::SubsurfModifier::subdivision_type_CATMULL_CLARK) {

@ -35,7 +35,8 @@ class BlenderSmokeLoader : public ImageLoader {
} }
if (attribute == ATTR_STD_VOLUME_DENSITY || attribute == ATTR_STD_VOLUME_FLAME || if (attribute == ATTR_STD_VOLUME_DENSITY || attribute == ATTR_STD_VOLUME_FLAME ||
attribute == ATTR_STD_VOLUME_HEAT || attribute == ATTR_STD_VOLUME_TEMPERATURE) { attribute == ATTR_STD_VOLUME_HEAT || attribute == ATTR_STD_VOLUME_TEMPERATURE)
{
metadata.type = IMAGE_DATA_TYPE_FLOAT; metadata.type = IMAGE_DATA_TYPE_FLOAT;
metadata.channels = 1; metadata.channels = 1;
} }
@ -315,24 +316,29 @@ static void sync_volume_object(BL::BlendData &b_data,
std = ATTR_STD_VOLUME_TEMPERATURE; std = ATTR_STD_VOLUME_TEMPERATURE;
} }
else if (name == Attribute::standard_name(ATTR_STD_VOLUME_VELOCITY) || else if (name == Attribute::standard_name(ATTR_STD_VOLUME_VELOCITY) ||
name == b_volume.velocity_grid()) { name == b_volume.velocity_grid())
{
std = ATTR_STD_VOLUME_VELOCITY; std = ATTR_STD_VOLUME_VELOCITY;
} }
else if (name == Attribute::standard_name(ATTR_STD_VOLUME_VELOCITY_X) || else if (name == Attribute::standard_name(ATTR_STD_VOLUME_VELOCITY_X) ||
name == b_volume.velocity_x_grid()) { name == b_volume.velocity_x_grid())
{
std = ATTR_STD_VOLUME_VELOCITY_X; std = ATTR_STD_VOLUME_VELOCITY_X;
} }
else if (name == Attribute::standard_name(ATTR_STD_VOLUME_VELOCITY_Y) || else if (name == Attribute::standard_name(ATTR_STD_VOLUME_VELOCITY_Y) ||
name == b_volume.velocity_y_grid()) { name == b_volume.velocity_y_grid())
{
std = ATTR_STD_VOLUME_VELOCITY_Y; std = ATTR_STD_VOLUME_VELOCITY_Y;
} }
else if (name == Attribute::standard_name(ATTR_STD_VOLUME_VELOCITY_Z) || else if (name == Attribute::standard_name(ATTR_STD_VOLUME_VELOCITY_Z) ||
name == b_volume.velocity_z_grid()) { name == b_volume.velocity_z_grid())
{
std = ATTR_STD_VOLUME_VELOCITY_Z; std = ATTR_STD_VOLUME_VELOCITY_Z;
} }
if ((std != ATTR_STD_NONE && volume->need_attribute(scene, std)) || if ((std != ATTR_STD_NONE && volume->need_attribute(scene, std)) ||
volume->need_attribute(scene, name)) { volume->need_attribute(scene, name))
{
Attribute *attr = (std != ATTR_STD_NONE) ? Attribute *attr = (std != ATTR_STD_NONE) ?
volume->attributes.add(std) : volume->attributes.add(std) :
volume->attributes.add(name, TypeDesc::TypeFloat, ATTR_ELEMENT_VOXEL); volume->attributes.add(name, TypeDesc::TypeFloat, ATTR_ELEMENT_VOXEL);

@ -689,7 +689,8 @@ BVHNode *BVHBuild::build_node(const BVHObjectBinning &range, int level)
if (!(range.size() > 0 && params.top_level && level == 0)) { if (!(range.size() > 0 && params.top_level && level == 0)) {
/* Make leaf node when threshold reached or SAH tells us. */ /* Make leaf node when threshold reached or SAH tells us. */
if ((params.small_enough_for_leaf(size, level)) || if ((params.small_enough_for_leaf(size, level)) ||
(range_within_max_leaf_size(range, references) && leafSAH < splitSAH)) { (range_within_max_leaf_size(range, references) && leafSAH < splitSAH))
{
return create_leaf_node(range, references); return create_leaf_node(range, references);
} }
} }
@ -708,7 +709,8 @@ BVHNode *BVHBuild::build_node(const BVHObjectBinning &range, int level)
unalignedLeafSAH = params.sah_primitive_cost * unaligned_range.leafSAH; unalignedLeafSAH = params.sah_primitive_cost * unaligned_range.leafSAH;
if (!(range.size() > 0 && params.top_level && level == 0)) { if (!(range.size() > 0 && params.top_level && level == 0)) {
if (unalignedLeafSAH < unalignedSplitSAH && unalignedSplitSAH < splitSAH && if (unalignedLeafSAH < unalignedSplitSAH && unalignedSplitSAH < splitSAH &&
range_within_max_leaf_size(range, references)) { range_within_max_leaf_size(range, references))
{
return create_leaf_node(range, references); return create_leaf_node(range, references);
} }
} }

@ -516,7 +516,8 @@ void BVH2::pack_instances(size_t nodes_size, size_t leaf_nodes_size)
pack.object_node.resize(objects.size()); pack.object_node.resize(objects.size());
if (params.num_motion_curve_steps > 0 || params.num_motion_triangle_steps > 0 || if (params.num_motion_curve_steps > 0 || params.num_motion_triangle_steps > 0 ||
params.num_motion_point_steps > 0) { params.num_motion_point_steps > 0)
{
pack.prim_time.resize(prim_index_size); pack.prim_time.resize(prim_index_size);
} }

@ -265,7 +265,8 @@ void CPUDevice::build_bvh(BVH *bvh, Progress &progress, bool refit)
#ifdef WITH_EMBREE #ifdef WITH_EMBREE
if (bvh->params.bvh_layout == BVH_LAYOUT_EMBREE || if (bvh->params.bvh_layout == BVH_LAYOUT_EMBREE ||
bvh->params.bvh_layout == BVH_LAYOUT_MULTI_OPTIX_EMBREE || bvh->params.bvh_layout == BVH_LAYOUT_MULTI_OPTIX_EMBREE ||
bvh->params.bvh_layout == BVH_LAYOUT_MULTI_METAL_EMBREE) { bvh->params.bvh_layout == BVH_LAYOUT_MULTI_METAL_EMBREE)
{
BVHEmbree *const bvh_embree = static_cast<BVHEmbree *>(bvh); BVHEmbree *const bvh_embree = static_cast<BVHEmbree *>(bvh);
if (refit) { if (refit) {
bvh_embree->refit(progress); bvh_embree->refit(progress);

@ -75,10 +75,12 @@ Device *device_cuda_create(const DeviceInfo &info, Stats &stats, Profiler &profi
static CUresult device_cuda_safe_init() static CUresult device_cuda_safe_init()
{ {
# ifdef _WIN32 # ifdef _WIN32
__try { __try
{
return cuInit(0); return cuInit(0);
} }
__except (EXCEPTION_EXECUTE_HANDLER) { __except (EXCEPTION_EXECUTE_HANDLER)
{
/* Ignore crashes inside the CUDA driver and hope we can /* Ignore crashes inside the CUDA driver and hope we can
* survive even with corrupted CUDA installs. */ * survive even with corrupted CUDA installs. */
fprintf(stderr, "Cycles CUDA: driver crashed, continuing without CUDA.\n"); fprintf(stderr, "Cycles CUDA: driver crashed, continuing without CUDA.\n");

@ -333,7 +333,8 @@ string CUDADevice::compile_kernel(const string &common_cflags,
return string(); return string();
} }
else if (!(nvcc_cuda_version == 101 || nvcc_cuda_version == 102 || nvcc_cuda_version == 111 || else if (!(nvcc_cuda_version == 101 || nvcc_cuda_version == 102 || nvcc_cuda_version == 111 ||
nvcc_cuda_version == 112 || nvcc_cuda_version == 113 || nvcc_cuda_version == 114)) { nvcc_cuda_version == 112 || nvcc_cuda_version == 113 || nvcc_cuda_version == 114))
{
printf( printf(
"CUDA version %d.%d detected, build may succeed but only " "CUDA version %d.%d detected, build may succeed but only "
"CUDA 10.1 to 11.4 are officially supported.\n", "CUDA 10.1 to 11.4 are officially supported.\n",
@ -847,7 +848,8 @@ void CUDADevice::tex_alloc(device_texture &mem)
if (mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FLOAT && if (mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FLOAT &&
mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FLOAT3 && mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FLOAT3 &&
mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FPN && mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FPN &&
mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FP16) { mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FP16)
{
CUDA_RESOURCE_DESC resDesc; CUDA_RESOURCE_DESC resDesc;
memset(&resDesc, 0, sizeof(resDesc)); memset(&resDesc, 0, sizeof(resDesc));

@ -694,7 +694,8 @@ GPUDevice::Mem *GPUDevice::generic_alloc(device_memory &mem, size_t pitch_paddin
* since other devices might be using the memory. */ * since other devices might be using the memory. */
if (!move_texture_to_host && pitch_padding == 0 && mem.host_pointer && if (!move_texture_to_host && pitch_padding == 0 && mem.host_pointer &&
mem.host_pointer != shared_pointer) { mem.host_pointer != shared_pointer)
{
memcpy(shared_pointer, mem.host_pointer, size); memcpy(shared_pointer, mem.host_pointer, size);
/* A Call to device_memory::host_free() should be preceded by /* A Call to device_memory::host_free() should be preceded by

@ -91,10 +91,12 @@ Device *device_hip_create(const DeviceInfo &info, Stats &stats, Profiler &profil
static hipError_t device_hip_safe_init() static hipError_t device_hip_safe_init()
{ {
# ifdef _WIN32 # ifdef _WIN32
__try { __try
{
return hipInit(0); return hipInit(0);
} }
__except (EXCEPTION_EXECUTE_HANDLER) { __except (EXCEPTION_EXECUTE_HANDLER)
{
/* Ignore crashes inside the HIP driver and hope we can /* Ignore crashes inside the HIP driver and hope we can
* survive even with corrupted HIP installs. */ * survive even with corrupted HIP installs. */
fprintf(stderr, "Cycles HIP: driver crashed, continuing without HIP.\n"); fprintf(stderr, "Cycles HIP: driver crashed, continuing without HIP.\n");

@ -812,7 +812,8 @@ void HIPDevice::tex_alloc(device_texture &mem)
if (mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FLOAT && if (mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FLOAT &&
mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FLOAT3 && mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FLOAT3 &&
mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FPN && mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FPN &&
mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FP16) { mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FP16)
{
/* Bindless textures. */ /* Bindless textures. */
hipResourceDesc resDesc; hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc)); memset(&resDesc, 0, sizeof(resDesc));

@ -387,7 +387,8 @@ hiprtGeometryBuildInput HIPRTDevice::prepare_triangle_blas(BVHHIPRT *bvh, Mesh *
geom_input.geomType = Triangle; geom_input.geomType = Triangle;
if (mesh->has_motion_blur() && if (mesh->has_motion_blur() &&
!(bvh->params.num_motion_triangle_steps == 0 || bvh->params.use_spatial_split)) { !(bvh->params.num_motion_triangle_steps == 0 || bvh->params.use_spatial_split))
{
const Attribute *attr_mP = mesh->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION); const Attribute *attr_mP = mesh->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);
const size_t num_triangles = mesh->num_triangles(); const size_t num_triangles = mesh->num_triangles();

@ -1032,8 +1032,7 @@ void MetalDevice::const_copy_to(const char *name, void *host, size_t size)
offsetof(KernelParamsMetal, integrator_state), host, size, pointer_block_size); offsetof(KernelParamsMetal, integrator_state), host, size, pointer_block_size);
} }
# define KERNEL_DATA_ARRAY(data_type, tex_name) \ # define KERNEL_DATA_ARRAY(data_type, tex_name) \
else if (strcmp(name, #tex_name) == 0) \ else if (strcmp(name, #tex_name) == 0) { \
{ \
update_launch_pointers(offsetof(KernelParamsMetal, tex_name), host, size, size); \ update_launch_pointers(offsetof(KernelParamsMetal, tex_name), host, size, size); \
} }
# include "kernel/data_arrays.h" # include "kernel/data_arrays.h"
@ -1096,9 +1095,8 @@ void MetalDevice::tex_alloc(device_texture &mem)
} }
MTLStorageMode storage_mode = MTLStorageModeManaged; MTLStorageMode storage_mode = MTLStorageModeManaged;
if (@available(macos 10.15, *)) { if (@available(macos 10.15, *)) {
if ([mtlDevice hasUnifiedMemory] && if ([mtlDevice hasUnifiedMemory] && device_vendor != METAL_GPU_INTEL)
device_vendor != { /* Intel GPUs don't support MTLStorageModeShared for MTLTextures */
METAL_GPU_INTEL) { /* Intel GPUs don't support MTLStorageModeShared for MTLTextures */
storage_mode = MTLStorageModeShared; storage_mode = MTLStorageModeShared;
} }
} }

@ -263,7 +263,8 @@ bool ShaderCache::should_load_kernel(DeviceKernel device_kernel,
if (pso_type != PSO_GENERIC) { if (pso_type != PSO_GENERIC) {
/* Only specialize kernels where it can make an impact. */ /* Only specialize kernels where it can make an impact. */
if (device_kernel < DEVICE_KERNEL_INTEGRATOR_INTERSECT_CLOSEST || if (device_kernel < DEVICE_KERNEL_INTEGRATOR_INTERSECT_CLOSEST ||
device_kernel > DEVICE_KERNEL_INTEGRATOR_MEGAKERNEL) { device_kernel > DEVICE_KERNEL_INTEGRATOR_MEGAKERNEL)
{
return false; return false;
} }
@ -400,7 +401,8 @@ bool MetalKernelPipeline::should_use_binary_archive() const
if ((device_kernel >= DEVICE_KERNEL_INTEGRATOR_SHADE_BACKGROUND && if ((device_kernel >= DEVICE_KERNEL_INTEGRATOR_SHADE_BACKGROUND &&
device_kernel <= DEVICE_KERNEL_INTEGRATOR_SHADE_SHADOW) || device_kernel <= DEVICE_KERNEL_INTEGRATOR_SHADE_SHADOW) ||
(device_kernel >= DEVICE_KERNEL_SHADER_EVAL_DISPLACE && (device_kernel >= DEVICE_KERNEL_SHADER_EVAL_DISPLACE &&
device_kernel <= DEVICE_KERNEL_SHADER_EVAL_CURVE_SHADOW_TRANSPARENCY)) { device_kernel <= DEVICE_KERNEL_SHADER_EVAL_CURVE_SHADOW_TRANSPARENCY))
{
/* Archive all shade kernels - they take a long time to compile. */ /* Archive all shade kernels - they take a long time to compile. */
return true; return true;
} }
@ -704,7 +706,8 @@ void MetalKernelPipeline::compile()
if (creating_new_archive && ShaderCache::running) { if (creating_new_archive && ShaderCache::running) {
NSError *error; NSError *error;
if (![archive addComputePipelineFunctionsWithDescriptor:computePipelineStateDescriptor if (![archive addComputePipelineFunctionsWithDescriptor:computePipelineStateDescriptor
error:&error]) { error:&error])
{
NSString *errStr = [error localizedDescription]; NSString *errStr = [error localizedDescription];
metal_printf("Failed to add PSO to archive:\n%s\n", metal_printf("Failed to add PSO to archive:\n%s\n",
errStr ? [errStr UTF8String] : "nil"); errStr ? [errStr UTF8String] : "nil");

@ -892,7 +892,8 @@ id<MTLComputeCommandEncoder> MetalDeviceQueue::get_compute_encoder(DeviceKernel
if (mtlComputeEncoder_) { if (mtlComputeEncoder_) {
if (mtlComputeEncoder_.dispatchType == concurrent ? MTLDispatchTypeConcurrent : if (mtlComputeEncoder_.dispatchType == concurrent ? MTLDispatchTypeConcurrent :
MTLDispatchTypeSerial) { MTLDispatchTypeSerial)
{
/* declare usage of MTLBuffers etc */ /* declare usage of MTLBuffers etc */
prepare_resources(kernel); prepare_resources(kernel);

@ -35,7 +35,8 @@ int MetalInfo::get_apple_gpu_core_count(id<MTLDevice> device)
io_service_t gpu_service = IOServiceGetMatchingService( io_service_t gpu_service = IOServiceGetMatchingService(
kIOMainPortDefault, IORegistryEntryIDMatching(device.registryID)); kIOMainPortDefault, IORegistryEntryIDMatching(device.registryID));
if (CFNumberRef numberRef = (CFNumberRef)IORegistryEntryCreateCFProperty( if (CFNumberRef numberRef = (CFNumberRef)IORegistryEntryCreateCFProperty(
gpu_service, CFSTR("gpu-core-count"), 0, 0)) { gpu_service, CFSTR("gpu-core-count"), 0, 0))
{
if (CFGetTypeID(numberRef) == CFNumberGetTypeID()) { if (CFGetTypeID(numberRef) == CFNumberGetTypeID()) {
CFNumberGetValue(numberRef, kCFNumberSInt32Type, &core_count); CFNumberGetValue(numberRef, kCFNumberSInt32Type, &core_count);
} }
@ -170,7 +171,8 @@ id<MTLBuffer> MetalBufferPool::get_buffer(id<MTLDevice> device,
/* Check if buffer matches size and storage mode and is old enough to reuse */ /* Check if buffer matches size and storage mode and is old enough to reuse */
if (bufferEntry.buffer.length == length && storageMode == bufferEntry.buffer.storageMode && if (bufferEntry.buffer.length == length && storageMode == bufferEntry.buffer.storageMode &&
cpuCacheMode == bufferEntry.buffer.cpuCacheMode) { cpuCacheMode == bufferEntry.buffer.cpuCacheMode)
{
buffer = bufferEntry.buffer; buffer = bufferEntry.buffer;
buffer_free_list.erase(entry); buffer_free_list.erase(entry);
bufferEntry.command_buffer = command_buffer; bufferEntry.command_buffer = command_buffer;

@ -72,7 +72,8 @@ class MultiDevice : public Device {
foreach (SubDevice &peer_sub, devices) { foreach (SubDevice &peer_sub, devices) {
if (peer_sub.peer_island_index < 0 && if (peer_sub.peer_island_index < 0 &&
peer_sub.device->info.type == sub.device->info.type && peer_sub.device->info.type == sub.device->info.type &&
peer_sub.device->check_peer_access(sub.device)) { peer_sub.device->check_peer_access(sub.device))
{
peer_sub.peer_island_index = sub.peer_island_index; peer_sub.peer_island_index = sub.peer_island_index;
peer_islands[sub.peer_island_index].push_back(&peer_sub); peer_islands[sub.peer_island_index].push_back(&peer_sub);
} }
@ -205,7 +206,8 @@ class MultiDevice : public Device {
/* Skip building a bottom level acceleration structure for non-instanced geometry on Embree /* Skip building a bottom level acceleration structure for non-instanced geometry on Embree
* (since they are put into the top level directly, see bvh_embree.cpp) */ * (since they are put into the top level directly, see bvh_embree.cpp) */
if (!params.top_level && params.bvh_layout == BVH_LAYOUT_EMBREE && if (!params.top_level && params.bvh_layout == BVH_LAYOUT_EMBREE &&
!bvh->geometry[0]->is_instanced()) { !bvh->geometry[0]->is_instanced())
{
i++; i++;
continue; continue;
} }
@ -251,8 +253,8 @@ class MultiDevice : public Device {
SubDevice *owner_sub = &sub; SubDevice *owner_sub = &sub;
if (owner_sub->ptr_map.find(key) == owner_sub->ptr_map.end()) { if (owner_sub->ptr_map.find(key) == owner_sub->ptr_map.end()) {
foreach (SubDevice *island_sub, peer_islands[sub.peer_island_index]) { foreach (SubDevice *island_sub, peer_islands[sub.peer_island_index]) {
if (island_sub != owner_sub && if (island_sub != owner_sub && island_sub->ptr_map.find(key) != island_sub->ptr_map.end())
island_sub->ptr_map.find(key) != island_sub->ptr_map.end()) { {
owner_sub = island_sub; owner_sub = island_sub;
} }
} }
@ -268,7 +270,8 @@ class MultiDevice : public Device {
SubDevice *owner_sub = island.front(); SubDevice *owner_sub = island.front();
foreach (SubDevice *island_sub, island) { foreach (SubDevice *island_sub, island) {
if (key ? (island_sub->ptr_map.find(key) != island_sub->ptr_map.end()) : if (key ? (island_sub->ptr_map.find(key) != island_sub->ptr_map.end()) :
(island_sub->device->stats.mem_used < owner_sub->device->stats.mem_used)) { (island_sub->device->stats.mem_used < owner_sub->device->stats.mem_used))
{
owner_sub = island_sub; owner_sub = island_sub;
} }
} }

@ -696,8 +696,7 @@ void OneapiDevice::set_global_memory(SyclQueue *queue_,
/* This macro will change global ptr of KernelGlobals via name matching. */ /* This macro will change global ptr of KernelGlobals via name matching. */
# define KERNEL_DATA_ARRAY(type, name) \ # define KERNEL_DATA_ARRAY(type, name) \
else if (#name == matched_name) \ else if (#name == matched_name) { \
{ \
globals->__##name = (type *)memory_device_pointer; \ globals->__##name = (type *)memory_device_pointer; \
return; \ return; \
} }
@ -709,8 +708,7 @@ void OneapiDevice::set_global_memory(SyclQueue *queue_,
} }
KERNEL_DATA_ARRAY(KernelData, data) KERNEL_DATA_ARRAY(KernelData, data)
# include "kernel/data_arrays.h" # include "kernel/data_arrays.h"
else else {
{
std::cerr << "Can't found global/constant memory with name \"" << matched_name << "\"!" std::cerr << "Can't found global/constant memory with name \"" << matched_name << "\"!"
<< std::endl; << std::endl;
assert(false); assert(false);
@ -823,7 +821,8 @@ std::vector<sycl::device> OneapiDevice::available_devices()
int driver_build_version = parse_driver_build_version(device); int driver_build_version = parse_driver_build_version(device);
if ((driver_build_version > 100000 && if ((driver_build_version > 100000 &&
driver_build_version < lowest_supported_driver_version_win) || driver_build_version < lowest_supported_driver_version_win) ||
driver_build_version < lowest_supported_driver_version_neo) { driver_build_version < lowest_supported_driver_version_neo)
{
filter_out = true; filter_out = true;
} }
} }
@ -965,7 +964,8 @@ int OneapiDevice::get_max_num_threads_per_multiprocessor()
{ {
const sycl::device &device = reinterpret_cast<sycl::queue *>(device_queue_)->get_device(); const sycl::device &device = reinterpret_cast<sycl::queue *>(device_queue_)->get_device();
if (device.has(sycl::aspect::ext_intel_gpu_eu_simd_width) && if (device.has(sycl::aspect::ext_intel_gpu_eu_simd_width) &&
device.has(sycl::aspect::ext_intel_gpu_hw_threads_per_eu)) { device.has(sycl::aspect::ext_intel_gpu_hw_threads_per_eu))
{
return device.get_info<sycl::ext::intel::info::device::gpu_eu_simd_width>() * return device.get_info<sycl::ext::intel::info::device::gpu_eu_simd_width>() *
device.get_info<sycl::ext::intel::info::device::gpu_hw_threads_per_eu>(); device.get_info<sycl::ext::intel::info::device::gpu_hw_threads_per_eu>();
} }

@ -715,7 +715,8 @@ bool OptiXDevice::load_osl_kernels()
vector<OSLKernel> osl_kernels; vector<OSLKernel> osl_kernels;
for (ShaderType type = SHADER_TYPE_SURFACE; type <= SHADER_TYPE_BUMP; for (ShaderType type = SHADER_TYPE_SURFACE; type <= SHADER_TYPE_BUMP;
type = static_cast<ShaderType>(type + 1)) { type = static_cast<ShaderType>(type + 1))
{
const vector<OSL::ShaderGroupRef> &groups = (type == SHADER_TYPE_SURFACE ? const vector<OSL::ShaderGroupRef> &groups = (type == SHADER_TYPE_SURFACE ?
osl_globals.surface_state : osl_globals.surface_state :
type == SHADER_TYPE_VOLUME ? type == SHADER_TYPE_VOLUME ?
@ -1015,7 +1016,8 @@ bool OptiXDevice::build_optix_bvh(BVHOptiX *bvh,
if (use_fast_trace_bvh || if (use_fast_trace_bvh ||
/* The build flags have to match the ones used to query the built-in curve intersection /* The build flags have to match the ones used to query the built-in curve intersection
* program (see optixBuiltinISModuleGet above) */ * program (see optixBuiltinISModuleGet above) */
build_input.type == OPTIX_BUILD_INPUT_TYPE_CURVES) { build_input.type == OPTIX_BUILD_INPUT_TYPE_CURVES)
{
VLOG_INFO << "Using fast to trace OptiX BVH"; VLOG_INFO << "Using fast to trace OptiX BVH";
options.buildFlags = OPTIX_BUILD_FLAG_PREFER_FAST_TRACE | OPTIX_BUILD_FLAG_ALLOW_COMPACTION; options.buildFlags = OPTIX_BUILD_FLAG_PREFER_FAST_TRACE | OPTIX_BUILD_FLAG_ALLOW_COMPACTION;
} }
@ -1522,7 +1524,8 @@ void OptiXDevice::build_bvh(BVH *bvh, Progress &progress, bool refit)
} }
if (ob->get_geometry()->geometry_type == Geometry::HAIR && if (ob->get_geometry()->geometry_type == Geometry::HAIR &&
static_cast<const Hair *>(ob->get_geometry())->curve_shape == CURVE_THICK) { static_cast<const Hair *>(ob->get_geometry())->curve_shape == CURVE_THICK)
{
if (pipeline_options.usesMotionBlur && ob->get_geometry()->has_motion_blur()) { if (pipeline_options.usesMotionBlur && ob->get_geometry()->has_motion_blur()) {
/* Select between motion blur and non-motion blur built-in intersection module. */ /* Select between motion blur and non-motion blur built-in intersection module. */
instance.sbtOffset = PG_HITD_MOTION - PG_HITD; instance.sbtOffset = PG_HITD_MOTION - PG_HITD;

@ -80,7 +80,8 @@ bool OptiXDeviceQueue::enqueue(DeviceKernel kernel,
} }
if (kernel == DEVICE_KERNEL_SHADER_EVAL_DISPLACE || if (kernel == DEVICE_KERNEL_SHADER_EVAL_DISPLACE ||
kernel == DEVICE_KERNEL_SHADER_EVAL_BACKGROUND || kernel == DEVICE_KERNEL_SHADER_EVAL_BACKGROUND ||
kernel == DEVICE_KERNEL_SHADER_EVAL_CURVE_SHADOW_TRANSPARENCY) { kernel == DEVICE_KERNEL_SHADER_EVAL_CURVE_SHADOW_TRANSPARENCY)
{
cuda_device_assert(cuda_device_, cuda_device_assert(cuda_device_,
cuMemcpyHtoDAsync(launch_params_ptr + offsetof(KernelParamsOptiX, offset), cuMemcpyHtoDAsync(launch_params_ptr + offsetof(KernelParamsOptiX, offset),
args.values[2], // &d_offset args.values[2], // &d_offset

@ -72,7 +72,8 @@ void HdCyclesCurves::PopulatePoints(HdSceneDelegate *sceneDelegate)
VtValue value; VtValue value;
for (const HdExtComputationPrimvarDescriptor &desc : for (const HdExtComputationPrimvarDescriptor &desc :
sceneDelegate->GetExtComputationPrimvarDescriptors(GetId(), HdInterpolationVertex)) { sceneDelegate->GetExtComputationPrimvarDescriptors(GetId(), HdInterpolationVertex))
{
if (desc.name == HdTokens->points) { if (desc.name == HdTokens->points) {
auto valueStore = HdExtComputationUtils::GetComputedPrimvarValues({desc}, sceneDelegate); auto valueStore = HdExtComputationUtils::GetComputedPrimvarValues({desc}, sceneDelegate);
const auto valueStoreIt = valueStore.find(desc.name); const auto valueStoreIt = valueStore.find(desc.name);
@ -179,7 +180,8 @@ void HdCyclesCurves::PopulatePrimvars(HdSceneDelegate *sceneDelegate)
// Skip attributes that are not needed // Skip attributes that are not needed
if ((std != ATTR_STD_NONE && _geom->need_attribute(scene, std)) || if ((std != ATTR_STD_NONE && _geom->need_attribute(scene, std)) ||
_geom->need_attribute(scene, name)) { _geom->need_attribute(scene, name))
{
ApplyPrimvars(_geom->attributes, name, value, interpolation.second, std); ApplyPrimvars(_geom->attributes, name, value, interpolation.second, std);
} }
} }

@ -226,7 +226,8 @@ void HdCyclesDisplayDriver::draw(const Params &params)
const auto renderBuffer = static_cast<HdCyclesRenderBuffer *>( const auto renderBuffer = static_cast<HdCyclesRenderBuffer *>(
_renderParam->GetDisplayAovBinding().renderBuffer); _renderParam->GetDisplayAovBinding().renderBuffer);
if (!renderBuffer || // Ensure this render buffer matches the texture dimensions if (!renderBuffer || // Ensure this render buffer matches the texture dimensions
(renderBuffer->GetWidth() != params.size.x || renderBuffer->GetHeight() != params.size.y)) { (renderBuffer->GetWidth() != params.size.x || renderBuffer->GetHeight() != params.size.y))
{
return; return;
} }

@ -48,7 +48,8 @@ void HdCyclesInstancer::SyncPrimvars()
sceneDelegate->GetRenderIndex().GetChangeTracker().GetInstancerDirtyBits(GetId()); sceneDelegate->GetRenderIndex().GetChangeTracker().GetInstancerDirtyBits(GetId());
for (const HdPrimvarDescriptor &desc : for (const HdPrimvarDescriptor &desc :
sceneDelegate->GetPrimvarDescriptors(GetId(), HdInterpolationInstance)) { sceneDelegate->GetPrimvarDescriptors(GetId(), HdInterpolationInstance))
{
if (!HdChangeTracker::IsPrimvarDirty(dirtyBits, GetId(), desc.name)) { if (!HdChangeTracker::IsPrimvarDirty(dirtyBits, GetId(), desc.name)) {
continue; continue;
} }
@ -119,7 +120,8 @@ VtMatrix4dArray HdCyclesInstancer::ComputeInstanceTransforms(const SdfPath &prot
VtMatrix4dArray resultTransforms; VtMatrix4dArray resultTransforms;
if (const auto instancer = static_cast<HdCyclesInstancer *>( if (const auto instancer = static_cast<HdCyclesInstancer *>(
GetDelegate()->GetRenderIndex().GetInstancer(GetParentId()))) { GetDelegate()->GetRenderIndex().GetInstancer(GetParentId())))
{
for (const GfMatrix4d &parentTransform : instancer->ComputeInstanceTransforms(GetId())) { for (const GfMatrix4d &parentTransform : instancer->ComputeInstanceTransforms(GetId())) {
for (const GfMatrix4d &localTransform : transforms) { for (const GfMatrix4d &localTransform : transforms) {
resultTransforms.push_back(parentTransform * localTransform); resultTransforms.push_back(parentTransform * localTransform);

@ -168,7 +168,8 @@ void HdCyclesLight::Sync(HdSceneDelegate *sceneDelegate,
} }
// Need to update shader graph when transform changes in case transform was baked into it // Need to update shader graph when transform changes in case transform was baked into it
else if (_light->tfm_is_modified() && (_lightType == HdPrimTypeTokens->domeLight || else if (_light->tfm_is_modified() && (_lightType == HdPrimTypeTokens->domeLight ||
_light->get_shader()->has_surface_spatial_varying)) { _light->get_shader()->has_surface_spatial_varying))
{
PopulateShaderGraph(sceneDelegate); PopulateShaderGraph(sceneDelegate);
} }

@ -71,7 +71,8 @@ class UsdToCyclesMapping {
} }
// TODO: Is there a better mapping than 'color'? // TODO: Is there a better mapping than 'color'?
if (name == CyclesMaterialTokens->r || name == CyclesMaterialTokens->g || if (name == CyclesMaterialTokens->r || name == CyclesMaterialTokens->g ||
name == CyclesMaterialTokens->b) { name == CyclesMaterialTokens->b)
{
return "color"; return "color";
} }
@ -168,7 +169,8 @@ class UsdToCycles {
usdNodeType == CyclesMaterialTokens->UsdPrimvarReader_float2 || usdNodeType == CyclesMaterialTokens->UsdPrimvarReader_float2 ||
usdNodeType == CyclesMaterialTokens->UsdPrimvarReader_float3 || usdNodeType == CyclesMaterialTokens->UsdPrimvarReader_float3 ||
usdNodeType == CyclesMaterialTokens->UsdPrimvarReader_float4 || usdNodeType == CyclesMaterialTokens->UsdPrimvarReader_float4 ||
usdNodeType == CyclesMaterialTokens->UsdPrimvarReader_int) { usdNodeType == CyclesMaterialTokens->UsdPrimvarReader_int)
{
return &UsdPrimvarReader; return &UsdPrimvarReader;
} }
@ -492,7 +494,8 @@ void HdCyclesMaterial::PopulateShaderGraph(const HdMaterialNetwork2 &networkMap)
const char *inputName = nullptr; const char *inputName = nullptr;
const char *outputName = nullptr; const char *outputName = nullptr;
if (terminalName == HdMaterialTerminalTokens->surface || if (terminalName == HdMaterialTerminalTokens->surface ||
terminalName == CyclesMaterialTokens->cyclesSurface) { terminalName == CyclesMaterialTokens->cyclesSurface)
{
inputName = "Surface"; inputName = "Surface";
// Find default output name based on the node if none is provided // Find default output name based on the node if none is provided
if (node->type->name == "add_closure" || node->type->name == "mix_closure") { if (node->type->name == "add_closure" || node->type->name == "mix_closure") {
@ -506,11 +509,13 @@ void HdCyclesMaterial::PopulateShaderGraph(const HdMaterialNetwork2 &networkMap)
} }
} }
else if (terminalName == HdMaterialTerminalTokens->displacement || else if (terminalName == HdMaterialTerminalTokens->displacement ||
terminalName == CyclesMaterialTokens->cyclesDisplacement) { terminalName == CyclesMaterialTokens->cyclesDisplacement)
{
inputName = outputName = "Displacement"; inputName = outputName = "Displacement";
} }
else if (terminalName == HdMaterialTerminalTokens->volume || else if (terminalName == HdMaterialTerminalTokens->volume ||
terminalName == CyclesMaterialTokens->cyclesVolume) { terminalName == CyclesMaterialTokens->cyclesVolume)
{
inputName = outputName = "Volume"; inputName = outputName = "Volume";
} }

@ -53,7 +53,8 @@ VtValue ComputeTriangulatedFaceVaryingPrimvar(VtValue value,
HdMeshUtil &meshUtil) HdMeshUtil &meshUtil)
{ {
if (meshUtil.ComputeTriangulatedFaceVaryingPrimvar( if (meshUtil.ComputeTriangulatedFaceVaryingPrimvar(
HdGetValueData(value), value.GetArraySize(), valueType, &value)) { HdGetValueData(value), value.GetArraySize(), valueType, &value))
{
return value; return value;
} }
@ -113,7 +114,8 @@ HdDirtyBits HdCyclesMesh::_PropagateDirtyBits(HdDirtyBits bits) const
} }
if (bits & (HdChangeTracker::DirtyTopology | HdChangeTracker::DirtyDisplayStyle | if (bits & (HdChangeTracker::DirtyTopology | HdChangeTracker::DirtyDisplayStyle |
HdChangeTracker::DirtySubdivTags)) { HdChangeTracker::DirtySubdivTags))
{
// Do full topology update when display style or subdivision changes // Do full topology update when display style or subdivision changes
bits |= HdChangeTracker::DirtyTopology | HdChangeTracker::DirtyDisplayStyle | bits |= HdChangeTracker::DirtyTopology | HdChangeTracker::DirtyDisplayStyle |
HdChangeTracker::DirtySubdivTags; HdChangeTracker::DirtySubdivTags;
@ -159,7 +161,8 @@ void HdCyclesMesh::PopulatePoints(HdSceneDelegate *sceneDelegate)
VtValue value; VtValue value;
for (const HdExtComputationPrimvarDescriptor &desc : for (const HdExtComputationPrimvarDescriptor &desc :
sceneDelegate->GetExtComputationPrimvarDescriptors(GetId(), HdInterpolationVertex)) { sceneDelegate->GetExtComputationPrimvarDescriptors(GetId(), HdInterpolationVertex))
{
if (desc.name == HdTokens->points) { if (desc.name == HdTokens->points) {
auto valueStore = HdExtComputationUtils::GetComputedPrimvarValues({desc}, sceneDelegate); auto valueStore = HdExtComputationUtils::GetComputedPrimvarValues({desc}, sceneDelegate);
const auto valueStoreIt = valueStore.find(desc.name); const auto valueStoreIt = valueStore.find(desc.name);
@ -208,7 +211,8 @@ void HdCyclesMesh::PopulateNormals(HdSceneDelegate *sceneDelegate)
for (int i = 0; i < HdInterpolationCount && interpolation == HdInterpolationCount; ++i) { for (int i = 0; i < HdInterpolationCount && interpolation == HdInterpolationCount; ++i) {
for (const HdExtComputationPrimvarDescriptor &desc : for (const HdExtComputationPrimvarDescriptor &desc :
sceneDelegate->GetExtComputationPrimvarDescriptors(GetId(), sceneDelegate->GetExtComputationPrimvarDescriptors(GetId(),
static_cast<HdInterpolation>(i))) { static_cast<HdInterpolation>(i)))
{
if (desc.name == HdTokens->normals) { if (desc.name == HdTokens->normals) {
auto valueStore = HdExtComputationUtils::GetComputedPrimvarValues({desc}, sceneDelegate); auto valueStore = HdExtComputationUtils::GetComputedPrimvarValues({desc}, sceneDelegate);
const auto valueStoreIt = valueStore.find(desc.name); const auto valueStoreIt = valueStore.find(desc.name);
@ -270,7 +274,8 @@ void HdCyclesMesh::PopulateNormals(HdSceneDelegate *sceneDelegate)
TF_VERIFY(normals.size() == static_cast<size_t>(_topology.GetNumFaceVaryings())); TF_VERIFY(normals.size() == static_cast<size_t>(_topology.GetNumFaceVaryings()));
if (!_util.ComputeTriangulatedFaceVaryingPrimvar( if (!_util.ComputeTriangulatedFaceVaryingPrimvar(
normals.data(), normals.size(), HdTypeFloatVec3, &value)) { normals.data(), normals.size(), HdTypeFloatVec3, &value))
{
return; return;
} }
@ -340,7 +345,8 @@ void HdCyclesMesh::PopulatePrimvars(HdSceneDelegate *sceneDelegate)
// Skip attributes that are not needed // Skip attributes that are not needed
if ((std != ATTR_STD_NONE && _geom->need_attribute(scene, std)) || if ((std != ATTR_STD_NONE && _geom->need_attribute(scene, std)) ||
_geom->need_attribute(scene, name)) { _geom->need_attribute(scene, name))
{
const HdType valueType = HdGetValueTupleType(value).type; const HdType valueType = HdGetValueTupleType(value).type;
if (!subdivision) { if (!subdivision) {

@ -44,7 +44,8 @@ bool HdCyclesOutputDriver::update_render_tile(const Tile &tile)
// Avoid extra copy by mapping render buffer directly when dimensions/format match the tile // Avoid extra copy by mapping render buffer directly when dimensions/format match the tile
if (tile.offset.x == 0 && tile.offset.y == 0 && tile.size.x == renderBuffer->GetWidth() && if (tile.offset.x == 0 && tile.offset.y == 0 && tile.size.x == renderBuffer->GetWidth() &&
tile.size.y == renderBuffer->GetHeight() && tile.size.y == renderBuffer->GetHeight() &&
(format >= HdFormatFloat32 && format <= HdFormatFloat32Vec4)) { (format >= HdFormatFloat32 && format <= HdFormatFloat32Vec4))
{
float *const data = static_cast<float *>(renderBuffer->Map()); float *const data = static_cast<float *>(renderBuffer->Map());
TF_VERIFY(tile.get_pass_pixels(aovBinding.aovName.GetString(), channels, data)); TF_VERIFY(tile.get_pass_pixels(aovBinding.aovName.GetString(), channels, data));
renderBuffer->Unmap(); renderBuffer->Unmap();

@ -74,7 +74,8 @@ void HdCyclesPoints::PopulatePoints(HdSceneDelegate *sceneDelegate)
VtValue value; VtValue value;
for (const HdExtComputationPrimvarDescriptor &desc : for (const HdExtComputationPrimvarDescriptor &desc :
sceneDelegate->GetExtComputationPrimvarDescriptors(GetId(), HdInterpolationVertex)) { sceneDelegate->GetExtComputationPrimvarDescriptors(GetId(), HdInterpolationVertex))
{
if (desc.name == HdTokens->points) { if (desc.name == HdTokens->points) {
auto valueStore = HdExtComputationUtils::GetComputedPrimvarValues({desc}, sceneDelegate); auto valueStore = HdExtComputationUtils::GetComputedPrimvarValues({desc}, sceneDelegate);
const auto valueStoreIt = valueStore.find(desc.name); const auto valueStoreIt = valueStore.find(desc.name);
@ -187,7 +188,8 @@ void HdCyclesPoints::PopulatePrimvars(HdSceneDelegate *sceneDelegate)
// Skip attributes that are not needed // Skip attributes that are not needed
if ((std != ATTR_STD_NONE && _geom->need_attribute(scene, std)) || if ((std != ATTR_STD_NONE && _geom->need_attribute(scene, std)) ||
_geom->need_attribute(scene, name)) { _geom->need_attribute(scene, name))
{
ApplyPrimvars(_geom->attributes, name, value, interpolation.second, std); ApplyPrimvars(_geom->attributes, name, value, interpolation.second, std);
} }
} }

@ -123,7 +123,8 @@ HdCyclesDelegate::HdCyclesDelegate(const HdRenderSettingsMap &settingsMap,
for (const auto &setting : settingsMap) { for (const auto &setting : settingsMap) {
// Skip over the settings known to be used for initialization only // Skip over the settings known to be used for initialization only
if (setting.first == HdCyclesRenderSettingsTokens->device || if (setting.first == HdCyclesRenderSettingsTokens->device ||
setting.first == HdCyclesRenderSettingsTokens->threads) { setting.first == HdCyclesRenderSettingsTokens->threads)
{
continue; continue;
} }
@ -284,7 +285,8 @@ HdSprim *HdCyclesDelegate::CreateSprim(const TfToken &typeId, const SdfPath &spr
} }
if (typeId == HdPrimTypeTokens->diskLight || typeId == HdPrimTypeTokens->distantLight || if (typeId == HdPrimTypeTokens->diskLight || typeId == HdPrimTypeTokens->distantLight ||
typeId == HdPrimTypeTokens->domeLight || typeId == HdPrimTypeTokens->rectLight || typeId == HdPrimTypeTokens->domeLight || typeId == HdPrimTypeTokens->rectLight ||
typeId == HdPrimTypeTokens->sphereLight) { typeId == HdPrimTypeTokens->sphereLight)
{
return new HdCyclesLight(sprimId, typeId); return new HdCyclesLight(sprimId, typeId);
} }
if (typeId == HdPrimTypeTokens->extComputation) { if (typeId == HdPrimTypeTokens->extComputation) {
@ -400,7 +402,8 @@ HdAovDescriptor HdCyclesDelegate::GetDefaultAovDescriptor(const TfToken &name) c
return HdAovDescriptor(HdFormatFloat32Vec3, false, VtValue(GfVec3f(0.0f))); return HdAovDescriptor(HdFormatFloat32Vec3, false, VtValue(GfVec3f(0.0f)));
} }
if (name == HdAovTokens->primId || name == HdAovTokens->instanceId || if (name == HdAovTokens->primId || name == HdAovTokens->instanceId ||
name == HdAovTokens->elementId) { name == HdAovTokens->elementId)
{
return HdAovDescriptor(HdFormatInt32, false, VtValue(-1)); return HdAovDescriptor(HdFormatInt32, false, VtValue(-1));
} }

@ -90,8 +90,8 @@ void HdCyclesRenderPass::_Execute(const HdRenderPassStateSharedPtr &renderPassSt
const HdRenderPassAovBindingVector &aovBindings = renderPassState->GetAovBindings(); const HdRenderPassAovBindingVector &aovBindings = renderPassState->GetAovBindings();
if (_renderParam->GetAovBindings() != aovBindings || if (_renderParam->GetAovBindings() != aovBindings ||
// Need to resync passes when denoising is enabled or disabled to update the pass mode // Need to resync passes when denoising is enabled or disabled to update the pass mode
(settingsVersion != _lastSettingsVersion && (settingsVersion != _lastSettingsVersion && scene->integrator->use_denoise_is_modified()))
scene->integrator->use_denoise_is_modified())) { {
_renderParam->SyncAovBindings(aovBindings); _renderParam->SyncAovBindings(aovBindings);
if (renderDelegate->IsDisplaySupported()) { if (renderDelegate->IsDisplaySupported()) {

@ -44,10 +44,11 @@ void HdCyclesVolume::Populate(HdSceneDelegate *sceneDelegate, HdDirtyBits dirtyB
Scene *const scene = (Scene *)_geom->get_owner(); Scene *const scene = (Scene *)_geom->get_owner();
if (dirtyBits & HdChangeTracker::DirtyVolumeField) { if (dirtyBits & HdChangeTracker::DirtyVolumeField) {
for (const HdVolumeFieldDescriptor &field : for (const HdVolumeFieldDescriptor &field : sceneDelegate->GetVolumeFieldDescriptors(GetId()))
sceneDelegate->GetVolumeFieldDescriptors(GetId())) { {
if (const auto openvdbAsset = static_cast<HdCyclesField *>( if (const auto openvdbAsset = static_cast<HdCyclesField *>(
sceneDelegate->GetRenderIndex().GetBprim(_tokens->openvdbAsset, field.fieldId))) { sceneDelegate->GetRenderIndex().GetBprim(_tokens->openvdbAsset, field.fieldId)))
{
const ustring name(field.fieldName.GetString()); const ustring name(field.fieldName.GetString());
AttributeStandard std = ATTR_STD_NONE; AttributeStandard std = ATTR_STD_NONE;
@ -72,7 +73,8 @@ void HdCyclesVolume::Populate(HdSceneDelegate *sceneDelegate, HdDirtyBits dirtyB
// Skip attributes that are not needed // Skip attributes that are not needed
if ((std != ATTR_STD_NONE && _geom->need_attribute(scene, std)) || if ((std != ATTR_STD_NONE && _geom->need_attribute(scene, std)) ||
_geom->need_attribute(scene, name)) { _geom->need_attribute(scene, name))
{
Attribute *const attr = (std != ATTR_STD_NONE) ? Attribute *const attr = (std != ATTR_STD_NONE) ?
_geom->attributes.add(std) : _geom->attributes.add(std) :
_geom->attributes.add( _geom->attributes.add(

@ -109,8 +109,8 @@ static Device *find_best_device(Device *device, DenoiserType type)
} }
else { else {
/* Prefer a device that can use graphics interop for faster display update. */ /* Prefer a device that can use graphics interop for faster display update. */
if (sub_device->should_use_graphics_interop() && if (sub_device->should_use_graphics_interop() && !best_device->should_use_graphics_interop())
!best_device->should_use_graphics_interop()) { {
best_device = sub_device; best_device = sub_device;
} }

@ -164,7 +164,8 @@ class OIDNDenoiseContext {
oidn_filter.set("hdr", true); oidn_filter.set("hdr", true);
oidn_filter.set("srgb", false); oidn_filter.set("srgb", false);
if (denoise_params_.prefilter == DENOISER_PREFILTER_NONE || if (denoise_params_.prefilter == DENOISER_PREFILTER_NONE ||
denoise_params_.prefilter == DENOISER_PREFILTER_ACCURATE) { denoise_params_.prefilter == DENOISER_PREFILTER_ACCURATE)
{
oidn_filter.set("cleanAux", true); oidn_filter.set("cleanAux", true);
} }
oidn_filter.commit(); oidn_filter.commit();
@ -189,7 +190,8 @@ class OIDNDenoiseContext {
void filter_guiding_pass_if_needed(oidn::DeviceRef &oidn_device, OIDNPass &oidn_pass) void filter_guiding_pass_if_needed(oidn::DeviceRef &oidn_device, OIDNPass &oidn_pass)
{ {
if (denoise_params_.prefilter != DENOISER_PREFILTER_ACCURATE || !oidn_pass || if (denoise_params_.prefilter != DENOISER_PREFILTER_ACCURATE || !oidn_pass ||
oidn_pass.is_filtered) { oidn_pass.is_filtered)
{
return; return;
} }

@ -180,7 +180,8 @@ bool PassAccessor::get_render_tile_pixels(const RenderBuffers *render_buffers,
} }
else if ((pass_info.divide_type != PASS_NONE || pass_info.direct_type != PASS_NONE || else if ((pass_info.divide_type != PASS_NONE || pass_info.direct_type != PASS_NONE ||
pass_info.indirect_type != PASS_NONE) && pass_info.indirect_type != PASS_NONE) &&
mode != PassMode::DENOISED) { mode != PassMode::DENOISED)
{
/* RGB lighting passes that need to divide out color and/or sum direct and indirect. /* RGB lighting passes that need to divide out color and/or sum direct and indirect.
* These can also optionally write alpha like the combined pass. */ * These can also optionally write alpha like the combined pass. */
get_pass_light_path(render_buffers, buffer_params, destination); get_pass_light_path(render_buffers, buffer_params, destination);

@ -348,7 +348,8 @@ void PathTrace::update_work_buffer_params_if_needed(const RenderWork &render_wor
} }
if (render_state_.need_reset_params || if (render_state_.need_reset_params ||
render_state_.resolution_divider != render_work.resolution_divider) { render_state_.resolution_divider != render_work.resolution_divider)
{
update_effective_work_buffer_params(render_work); update_effective_work_buffer_params(render_work);
} }
@ -565,7 +566,8 @@ void PathTrace::denoise(const RenderWork &render_work)
if (denoiser_->denoise_buffer(render_state_.effective_big_tile_params, if (denoiser_->denoise_buffer(render_state_.effective_big_tile_params,
buffer_to_denoise, buffer_to_denoise,
get_num_samples_in_buffer(), get_num_samples_in_buffer(),
allow_inplace_modification)) { allow_inplace_modification))
{
render_state_.has_denoised_result = true; render_state_.has_denoised_result = true;
} }

@ -117,7 +117,8 @@ void PathTraceDisplay::copy_pixels_to_texture(
const int texture_height = texture_state_.size.y; const int texture_height = texture_state_.size.y;
if (texture_x == 0 && texture_y == 0 && pixels_width == texture_width && if (texture_x == 0 && texture_y == 0 && pixels_width == texture_width &&
pixels_height == texture_height) { pixels_height == texture_height)
{
const size_t size_in_bytes = sizeof(half4) * texture_width * texture_height; const size_t size_in_bytes = sizeof(half4) * texture_width * texture_height;
memcpy(mapped_rgba_pixels, rgba_pixels, size_in_bytes); memcpy(mapped_rgba_pixels, rgba_pixels, size_in_bytes);
} }

@ -232,7 +232,8 @@ int PathTraceWorkCPU::adaptive_sampling_converge_filter_count_active(float thres
uint num_row_pixels_active = 0; uint num_row_pixels_active = 0;
for (int x = 0; x < width; ++x) { for (int x = 0; x < width; ++x) {
if (!kernels_.adaptive_sampling_convergence_check( if (!kernels_.adaptive_sampling_convergence_check(
kernel_globals, render_buffer, full_x + x, y, threshold, reset, offset, stride)) { kernel_globals, render_buffer, full_x + x, y, threshold, reset, offset, stride))
{
++num_row_pixels_active; ++num_row_pixels_active;
row_converged = false; row_converged = false;
} }

@ -22,7 +22,8 @@ static size_t estimate_single_state_size(const uint kernel_features)
{ {
size_t state_size = 0; size_t state_size = 0;
#define KERNEL_STRUCT_BEGIN(name) for (int array_index = 0;; array_index++) { #define KERNEL_STRUCT_BEGIN(name) \
for (int array_index = 0;; array_index++) {
#define KERNEL_STRUCT_MEMBER(parent_struct, type, name, feature) \ #define KERNEL_STRUCT_MEMBER(parent_struct, type, name, feature) \
state_size += (kernel_features & (feature)) ? sizeof(type) : 0; state_size += (kernel_features & (feature)) ? sizeof(type) : 0;
#define KERNEL_STRUCT_ARRAY_MEMBER(parent_struct, type, name, feature) \ #define KERNEL_STRUCT_ARRAY_MEMBER(parent_struct, type, name, feature) \
@ -96,7 +97,8 @@ void PathTraceWorkGPU::alloc_integrator_soa()
const int requested_volume_stack_size = device_scene_->data.volume_stack_size; const int requested_volume_stack_size = device_scene_->data.volume_stack_size;
const uint kernel_features = device_scene_->data.kernel_features; const uint kernel_features = device_scene_->data.kernel_features;
if ((integrator_state_soa_kernel_features_ & kernel_features) == kernel_features && if ((integrator_state_soa_kernel_features_ & kernel_features) == kernel_features &&
integrator_state_soa_volume_stack_size_ >= requested_volume_stack_size) { integrator_state_soa_volume_stack_size_ >= requested_volume_stack_size)
{
return; return;
} }
integrator_state_soa_kernel_features_ = kernel_features; integrator_state_soa_kernel_features_ = kernel_features;
@ -121,7 +123,8 @@ void PathTraceWorkGPU::alloc_integrator_soa()
* write the pointers into a struct that resides in constant memory. * write the pointers into a struct that resides in constant memory.
* *
* TODO: store float3 in separate XYZ arrays. */ * TODO: store float3 in separate XYZ arrays. */
#define KERNEL_STRUCT_BEGIN(name) for (int array_index = 0;; array_index++) { #define KERNEL_STRUCT_BEGIN(name) \
for (int array_index = 0;; array_index++) {
#define KERNEL_STRUCT_MEMBER(parent_struct, type, name, feature) \ #define KERNEL_STRUCT_MEMBER(parent_struct, type, name, feature) \
if ((kernel_features & (feature)) && (integrator_state_gpu_.parent_struct.name == nullptr)) { \ if ((kernel_features & (feature)) && (integrator_state_gpu_.parent_struct.name == nullptr)) { \
device_only_memory<type> *array = new device_only_memory<type>(device_, \ device_only_memory<type> *array = new device_only_memory<type>(device_, \
@ -132,7 +135,8 @@ void PathTraceWorkGPU::alloc_integrator_soa()
} }
#define KERNEL_STRUCT_ARRAY_MEMBER(parent_struct, type, name, feature) \ #define KERNEL_STRUCT_ARRAY_MEMBER(parent_struct, type, name, feature) \
if ((kernel_features & (feature)) && \ if ((kernel_features & (feature)) && \
(integrator_state_gpu_.parent_struct[array_index].name == nullptr)) { \ (integrator_state_gpu_.parent_struct[array_index].name == nullptr)) \
{ \
device_only_memory<type> *array = new device_only_memory<type>(device_, \ device_only_memory<type> *array = new device_only_memory<type>(device_, \
"integrator_state_" #name); \ "integrator_state_" #name); \
array->alloc_to_device(max_num_paths_); \ array->alloc_to_device(max_num_paths_); \
@ -611,7 +615,8 @@ void PathTraceWorkGPU::compact_main_paths(const int num_active_paths)
const int min_compact_paths = 32; const int min_compact_paths = 32;
if (max_active_main_path_index_ == num_active_paths || if (max_active_main_path_index_ == num_active_paths ||
max_active_main_path_index_ < min_compact_paths) { max_active_main_path_index_ < min_compact_paths)
{
return; return;
} }
@ -647,7 +652,8 @@ void PathTraceWorkGPU::compact_shadow_paths()
const float shadow_compact_ratio = 0.5f; const float shadow_compact_ratio = 0.5f;
const int min_compact_paths = 32; const int min_compact_paths = 32;
if (integrator_next_shadow_path_index_.data()[0] < num_active_paths * shadow_compact_ratio || if (integrator_next_shadow_path_index_.data()[0] < num_active_paths * shadow_compact_ratio ||
integrator_next_shadow_path_index_.data()[0] < min_compact_paths) { integrator_next_shadow_path_index_.data()[0] < min_compact_paths)
{
return; return;
} }

@ -465,7 +465,8 @@ void RenderScheduler::report_work_begin(const RenderWork &render_work)
* because it might be wrongly 0. Check for whether path tracing is actually happening as it is * because it might be wrongly 0. Check for whether path tracing is actually happening as it is
* expected to happen in the first work. */ * expected to happen in the first work. */
if (render_work.resolution_divider == pixel_size_ && render_work.path_trace.num_samples != 0 && if (render_work.resolution_divider == pixel_size_ && render_work.path_trace.num_samples != 0 &&
render_work.path_trace.start_sample == get_start_sample()) { render_work.path_trace.start_sample == get_start_sample())
{
state_.start_render_time = time_dt(); state_.start_render_time = time_dt();
} }
} }

@ -159,7 +159,8 @@ ccl_device_inline
tmin, tmin,
isect_t, isect_t,
lcg_state, lcg_state,
max_hits)) { max_hits))
{
return true; return true;
} }
} }
@ -196,7 +197,8 @@ ccl_device_inline
tmin, tmin,
isect_t, isect_t,
lcg_state, lcg_state,
max_hits)) { max_hits))
{
return true; return true;
} }
} }

@ -140,7 +140,8 @@ ccl_device_noinline bool BVH_FUNCTION_FULL_NAME(BVH)(KernelGlobals kg,
visibility, visibility,
prim_object, prim_object,
prim, prim,
prim_addr)) { prim_addr))
{
/* shadow ray early termination */ /* shadow ray early termination */
if (visibility & PATH_RAY_SHADOW_OPAQUE) if (visibility & PATH_RAY_SHADOW_OPAQUE)
return true; return true;
@ -159,7 +160,8 @@ ccl_device_noinline bool BVH_FUNCTION_FULL_NAME(BVH)(KernelGlobals kg,
visibility, visibility,
prim_object, prim_object,
prim, prim,
prim_addr)) { prim_addr))
{
/* shadow ray early termination */ /* shadow ray early termination */
if (visibility & PATH_RAY_SHADOW_OPAQUE) if (visibility & PATH_RAY_SHADOW_OPAQUE)
return true; return true;

@ -55,7 +55,8 @@ ccl_device_forceinline Spectrum bsdf_ashikhmin_shirley_eval(ccl_private const Sh
float out = 0.0f; float out = 0.0f;
if ((cosNgO < 0.0f) || fmaxf(bsdf->alpha_x, bsdf->alpha_y) <= 1e-4f || if ((cosNgO < 0.0f) || fmaxf(bsdf->alpha_x, bsdf->alpha_y) <= 1e-4f ||
!(NdotI > 0.0f && NdotO > 0.0f)) { !(NdotI > 0.0f && NdotO > 0.0f))
{
*pdf = 0.0f; *pdf = 0.0f;
return zero_spectrum(); return zero_spectrum();
} }

@ -399,7 +399,8 @@ ccl_device Spectrum bsdf_microfacet_eval(ccl_private const ShaderClosure *sc,
* - Purely refractive closures can't have reflection. * - Purely refractive closures can't have reflection.
*/ */
if ((cos_NI <= 0) || (alpha_x * alpha_y <= 1e-7f) || ((cos_NgO < 0.0f) != is_refraction) || if ((cos_NI <= 0) || (alpha_x * alpha_y <= 1e-7f) || ((cos_NgO < 0.0f) != is_refraction) ||
(is_refraction && !m_refractive) || (!is_refraction && m_refractive && !m_glass)) { (is_refraction && !m_refractive) || (!is_refraction && m_refractive && !m_glass))
{
*pdf = 0.0f; *pdf = 0.0f;
return zero_spectrum(); return zero_spectrum();
} }

@ -38,7 +38,8 @@ ccl_device_forceinline Spectrum MF_FUNCTION_FULL_NAME(mf_eval)(float3 wi,
} }
else else
#endif #endif
if (wo.z < wi.z) { if (wo.z < wi.z)
{
swapped = true; swapped = true;
float3 tmp = wo; float3 tmp = wo;
wo = wi; wo = wi;

@ -284,7 +284,8 @@ ccl_device_forceinline void kernel_embree_filter_intersection_func_impl(
const Ray *cray = ctx->ray; const Ray *cray = ctx->ray;
if (kernel_embree_is_self_intersection( if (kernel_embree_is_self_intersection(
kg, hit, cray, reinterpret_cast<intptr_t>(args->geometryUserPtr))) { kg, hit, cray, reinterpret_cast<intptr_t>(args->geometryUserPtr)))
{
*args->valid = 0; *args->valid = 0;
} }
} }
@ -577,7 +578,8 @@ ccl_device void kernel_embree_filter_func_backface_cull(const RTCFilterFunctionN
/* Always ignore back-facing intersections. */ /* Always ignore back-facing intersections. */
if (dot(make_float3(ray->dir_x, ray->dir_y, ray->dir_z), if (dot(make_float3(ray->dir_x, ray->dir_y, ray->dir_z),
make_float3(hit->Ng_x, hit->Ng_y, hit->Ng_z)) > 0.0f) { make_float3(hit->Ng_x, hit->Ng_y, hit->Ng_z)) > 0.0f)
{
*args->valid = 0; *args->valid = 0;
return; return;
} }
@ -587,7 +589,8 @@ ccl_device void kernel_embree_filter_func_backface_cull(const RTCFilterFunctionN
const Ray *cray = ctx->ray; const Ray *cray = ctx->ray;
if (kernel_embree_is_self_intersection( if (kernel_embree_is_self_intersection(
kg, hit, cray, reinterpret_cast<intptr_t>(args->geometryUserPtr))) { kg, hit, cray, reinterpret_cast<intptr_t>(args->geometryUserPtr)))
{
*args->valid = 0; *args->valid = 0;
} }
} }
@ -600,7 +603,8 @@ ccl_device void kernel_embree_filter_occluded_func_backface_cull(
/* Always ignore back-facing intersections. */ /* Always ignore back-facing intersections. */
if (dot(make_float3(ray->dir_x, ray->dir_y, ray->dir_z), if (dot(make_float3(ray->dir_x, ray->dir_y, ray->dir_z),
make_float3(hit->Ng_x, hit->Ng_y, hit->Ng_z)) > 0.0f) { make_float3(hit->Ng_x, hit->Ng_y, hit->Ng_z)) > 0.0f)
{
*args->valid = 0; *args->valid = 0;
return; return;
} }

@ -72,8 +72,7 @@ void kernel_global_memory_copy(KernelGlobalsCPU *kg, const char *name, void *mem
} }
#define KERNEL_DATA_ARRAY(type, tname) \ #define KERNEL_DATA_ARRAY(type, tname) \
else if (strcmp(name, #tname) == 0) \ else if (strcmp(name, #tname) == 0) { \
{ \
kg->tname.data = (type *)mem; \ kg->tname.data = (type *)mem; \
kg->tname.width = size; \ kg->tname.width = size; \
} }

@ -193,7 +193,8 @@ ccl_device float4 kernel_tex_image_interp(KernelGlobals kg, int id, float x, flo
/* float4, byte4, ushort4 and half4 */ /* float4, byte4, ushort4 and half4 */
const int texture_type = info.data_type; const int texture_type = info.data_type;
if (texture_type == IMAGE_DATA_TYPE_FLOAT4 || texture_type == IMAGE_DATA_TYPE_BYTE4 || if (texture_type == IMAGE_DATA_TYPE_FLOAT4 || texture_type == IMAGE_DATA_TYPE_BYTE4 ||
texture_type == IMAGE_DATA_TYPE_HALF4 || texture_type == IMAGE_DATA_TYPE_USHORT4) { texture_type == IMAGE_DATA_TYPE_HALF4 || texture_type == IMAGE_DATA_TYPE_USHORT4)
{
if (info.interpolation == INTERPOLATION_CUBIC || info.interpolation == INTERPOLATION_SMART) { if (info.interpolation == INTERPOLATION_CUBIC || info.interpolation == INTERPOLATION_SMART) {
return kernel_tex_image_interp_bicubic<float4>(info, x, y); return kernel_tex_image_interp_bicubic<float4>(info, x, y);
} }
@ -256,7 +257,8 @@ ccl_device float4 kernel_tex_image_interp_3d(KernelGlobals kg,
} }
#endif #endif
if (texture_type == IMAGE_DATA_TYPE_FLOAT4 || texture_type == IMAGE_DATA_TYPE_BYTE4 || if (texture_type == IMAGE_DATA_TYPE_FLOAT4 || texture_type == IMAGE_DATA_TYPE_BYTE4 ||
texture_type == IMAGE_DATA_TYPE_HALF4 || texture_type == IMAGE_DATA_TYPE_USHORT4) { texture_type == IMAGE_DATA_TYPE_HALF4 || texture_type == IMAGE_DATA_TYPE_USHORT4)
{
if (interpolation == INTERPOLATION_CUBIC || interpolation == INTERPOLATION_SMART) { if (interpolation == INTERPOLATION_CUBIC || interpolation == INTERPOLATION_SMART) {
return kernel_tex_image_interp_tricubic<float4>(info, x, y, z); return kernel_tex_image_interp_tricubic<float4>(info, x, y, z);
} }

@ -53,7 +53,8 @@ ccl_device_inline void gpu_parallel_sort_bucket_pass(const uint num_states,
const uint partition_end = min(num_states, partition_start + partition_size); const uint partition_end = min(num_states, partition_start + partition_size);
for (int state_index = partition_start + uint(local_id); state_index < partition_end; for (int state_index = partition_start + uint(local_id); state_index < partition_end;
state_index += uint(local_size)) { state_index += uint(local_size))
{
ushort kernel_index = d_queued_kernel[state_index]; ushort kernel_index = d_queued_kernel[state_index];
if (kernel_index == queued_kernel) { if (kernel_index == queued_kernel) {
uint key = d_shader_sort_key[state_index] % max_shaders; uint key = d_shader_sort_key[state_index] % max_shaders;
@ -115,7 +116,8 @@ ccl_device_inline void gpu_parallel_sort_write_pass(const uint num_states,
ccl_global int *key_offsets = partition_key_offsets + (uint(grid_id) * max_shaders); ccl_global int *key_offsets = partition_key_offsets + (uint(grid_id) * max_shaders);
for (int state_index = partition_start + uint(local_id); state_index < partition_end; for (int state_index = partition_start + uint(local_id); state_index < partition_end;
state_index += uint(local_size)) { state_index += uint(local_size))
{
ushort kernel_index = d_queued_kernel[state_index]; ushort kernel_index = d_queued_kernel[state_index];
if (kernel_index == queued_kernel) { if (kernel_index == queued_kernel) {
uint key = d_shader_sort_key[state_index] % max_shaders; uint key = d_shader_sort_key[state_index] % max_shaders;

@ -442,7 +442,8 @@ ccl_device_inline bool shadow_intersection_filter(const hiprtRay &ray,
# else # else
if (num_hits >= max_hits || if (num_hits >= max_hits ||
!(intersection_get_shader_flags(NULL, prim, type) & SD_HAS_TRANSPARENT_SHADOW)) { !(intersection_get_shader_flags(NULL, prim, type) & SD_HAS_TRANSPARENT_SHADOW))
{
return false; return false;
} }

@ -330,7 +330,8 @@ ccl_device float4 kernel_tex_image_interp_3d(KernelGlobals, int id, float3 P, in
if (info.data_type == IMAGE_DATA_TYPE_NANOVDB_FLOAT || if (info.data_type == IMAGE_DATA_TYPE_NANOVDB_FLOAT ||
info.data_type == IMAGE_DATA_TYPE_NANOVDB_FLOAT3 || info.data_type == IMAGE_DATA_TYPE_NANOVDB_FLOAT3 ||
info.data_type == IMAGE_DATA_TYPE_NANOVDB_FPN || info.data_type == IMAGE_DATA_TYPE_NANOVDB_FPN ||
info.data_type == IMAGE_DATA_TYPE_NANOVDB_FP16) { info.data_type == IMAGE_DATA_TYPE_NANOVDB_FP16)
{
return make_float4( return make_float4(
TEX_IMAGE_MISSING_R, TEX_IMAGE_MISSING_G, TEX_IMAGE_MISSING_B, TEX_IMAGE_MISSING_A); TEX_IMAGE_MISSING_R, TEX_IMAGE_MISSING_G, TEX_IMAGE_MISSING_B, TEX_IMAGE_MISSING_A);
} }

@ -217,7 +217,8 @@ bool oneapi_load_kernels(SyclQueue *queue_,
const std::string &kernel_name = kernel_id.get_name(); const std::string &kernel_name = kernel_id.get_name();
if (!oneapi_kernel_is_required_for_features(kernel_name, kernel_features) || if (!oneapi_kernel_is_required_for_features(kernel_name, kernel_features) ||
!oneapi_kernel_is_using_embree(kernel_name)) { !oneapi_kernel_is_using_embree(kernel_name))
{
continue; continue;
} }
@ -259,7 +260,8 @@ bool oneapi_load_kernels(SyclQueue *queue_,
/* In case HWRT is on, compilation of kernels using Embree is already handled in previous /* In case HWRT is on, compilation of kernels using Embree is already handled in previous
* block. */ * block. */
if (!oneapi_kernel_is_required_for_features(kernel_name, kernel_features) || if (!oneapi_kernel_is_required_for_features(kernel_name, kernel_features) ||
(use_hardware_raytracing && oneapi_kernel_is_using_embree(kernel_name))) { (use_hardware_raytracing && oneapi_kernel_is_using_embree(kernel_name)))
{
continue; continue;
} }
@ -323,7 +325,8 @@ bool oneapi_enqueue_kernel(KernelContext *kernel_context,
device_kernel == DEVICE_KERNEL_INTEGRATOR_TERMINATED_PATHS_ARRAY || device_kernel == DEVICE_KERNEL_INTEGRATOR_TERMINATED_PATHS_ARRAY ||
device_kernel == DEVICE_KERNEL_INTEGRATOR_TERMINATED_SHADOW_PATHS_ARRAY || device_kernel == DEVICE_KERNEL_INTEGRATOR_TERMINATED_SHADOW_PATHS_ARRAY ||
device_kernel == DEVICE_KERNEL_INTEGRATOR_COMPACT_PATHS_ARRAY || device_kernel == DEVICE_KERNEL_INTEGRATOR_COMPACT_PATHS_ARRAY ||
device_kernel == DEVICE_KERNEL_INTEGRATOR_COMPACT_SHADOW_PATHS_ARRAY) { device_kernel == DEVICE_KERNEL_INTEGRATOR_COMPACT_SHADOW_PATHS_ARRAY)
{
int num_states = *((int *)(args[0])); int num_states = *((int *)(args[0]));
/* Round up to the next work-group. */ /* Round up to the next work-group. */
size_t groups_count = (num_states + local_size - 1) / local_size; size_t groups_count = (num_states + local_size - 1) / local_size;

@ -195,7 +195,8 @@ extern "C" __global__ void __anyhit__kernel_optix_shadow_all_hit()
/* If no transparent shadows, all light is blocked and we can stop immediately. */ /* If no transparent shadows, all light is blocked and we can stop immediately. */
if (num_hits >= max_hits || if (num_hits >= max_hits ||
!(intersection_get_shader_flags(NULL, prim, type) & SD_HAS_TRANSPARENT_SHADOW)) { !(intersection_get_shader_flags(NULL, prim, type) & SD_HAS_TRANSPARENT_SHADOW))
{
optixSetPayload_5(true); optixSetPayload_5(true);
return optixTerminateRay(); return optixTerminateRay();
} }

@ -68,7 +68,8 @@ ccl_device_inline void film_write_data_passes(KernelGlobals kg,
} }
if (!(sd->flag & SD_TRANSPARENT) || kernel_data.film.pass_alpha_threshold == 0.0f || if (!(sd->flag & SD_TRANSPARENT) || kernel_data.film.pass_alpha_threshold == 0.0f ||
average(surface_shader_alpha(kg, sd)) >= kernel_data.film.pass_alpha_threshold) { average(surface_shader_alpha(kg, sd)) >= kernel_data.film.pass_alpha_threshold)
{
if (flag & PASSMASK(NORMAL)) { if (flag & PASSMASK(NORMAL)) {
const float3 normal = surface_shader_average_normal(kg, sd); const float3 normal = surface_shader_average_normal(kg, sd);
film_write_pass_float3(buffer + kernel_data.film.pass_normal, normal); film_write_pass_float3(buffer + kernel_data.film.pass_normal, normal);

@ -366,7 +366,8 @@ ccl_device_inline void film_write_emission_or_background_pass(
const bool is_shadowcatcher = (path_flag & PATH_RAY_SHADOW_CATCHER_HIT) != 0; const bool is_shadowcatcher = (path_flag & PATH_RAY_SHADOW_CATCHER_HIT) != 0;
if (!is_shadowcatcher && lightgroup != LIGHTGROUP_NONE && if (!is_shadowcatcher && lightgroup != LIGHTGROUP_NONE &&
kernel_data.film.pass_lightgroup != PASS_UNUSED) { kernel_data.film.pass_lightgroup != PASS_UNUSED)
{
film_write_pass_spectrum(buffer + kernel_data.film.pass_lightgroup + 3 * lightgroup, film_write_pass_spectrum(buffer + kernel_data.film.pass_lightgroup + 3 * lightgroup,
contribution); contribution);
} }

@ -538,8 +538,8 @@ ccl_device_inline void film_apply_pass_pixel_overlays_rgba(
ccl_global const float *ccl_restrict buffer, ccl_global const float *ccl_restrict buffer,
ccl_private float *ccl_restrict pixel) ccl_private float *ccl_restrict pixel)
{ {
if (kfilm_convert->show_active_pixels && if (kfilm_convert->show_active_pixels && kfilm_convert->pass_adaptive_aux_buffer != PASS_UNUSED)
kfilm_convert->pass_adaptive_aux_buffer != PASS_UNUSED) { {
if (buffer[kfilm_convert->pass_adaptive_aux_buffer + 3] == 0.0f) { if (buffer[kfilm_convert->pass_adaptive_aux_buffer + 3] == 0.0f) {
const float3 active_rgb = make_float3(1.0f, 0.0f, 0.0f); const float3 active_rgb = make_float3(1.0f, 0.0f, 0.0f);
const float3 mix_rgb = interp(make_float3(pixel[0], pixel[1], pixel[2]), active_rgb, 0.5f); const float3 mix_rgb = interp(make_float3(pixel[0], pixel[1], pixel[2]), active_rgb, 0.5f);

@ -77,7 +77,8 @@ find_attribute(KernelGlobals kg, int object, int prim, int type, uint64_t id)
desc.element = (AttributeElement)attr_map.element; desc.element = (AttributeElement)attr_map.element;
if (prim == PRIM_NONE && desc.element != ATTR_ELEMENT_MESH && if (prim == PRIM_NONE && desc.element != ATTR_ELEMENT_MESH &&
desc.element != ATTR_ELEMENT_VOXEL && desc.element != ATTR_ELEMENT_OBJECT) { desc.element != ATTR_ELEMENT_VOXEL && desc.element != ATTR_ELEMENT_OBJECT)
{
return attribute_not_found(); return attribute_not_found();
} }

@ -318,7 +318,8 @@ ccl_device_forceinline float4 primitive_motion_vector(KernelGlobals kg,
} }
else else
#endif #endif
if (sd->type & PRIMITIVE_TRIANGLE) { if (sd->type & PRIMITIVE_TRIANGLE)
{
/* Triangle */ /* Triangle */
if (subd_triangle_patch(kg, sd->prim) == ~0) { if (subd_triangle_patch(kg, sd->prim) == ~0) {
motion_pre = triangle_attribute_float3(kg, sd, desc, NULL, NULL); motion_pre = triangle_attribute_float3(kg, sd, desc, NULL, NULL);

@ -65,7 +65,8 @@ ccl_device_inline void shader_setup_from_ray(KernelGlobals kg,
else else
#endif #endif
#ifdef __POINTCLOUD__ #ifdef __POINTCLOUD__
if (sd->type & PRIMITIVE_POINT) { if (sd->type & PRIMITIVE_POINT)
{
/* point */ /* point */
point_shader_setup(kg, sd, isect, ray); point_shader_setup(kg, sd, isect, ray);
} }

@ -135,7 +135,8 @@ ccl_device_noinline float subd_triangle_attribute_float(KernelGlobals kg,
} }
else else
#endif /* __PATCH_EVAL__ */ #endif /* __PATCH_EVAL__ */
if (desc.element == ATTR_ELEMENT_FACE) { if (desc.element == ATTR_ELEMENT_FACE)
{
if (dx) if (dx)
*dx = 0.0f; *dx = 0.0f;
if (dy) if (dy)
@ -275,7 +276,8 @@ ccl_device_noinline float2 subd_triangle_attribute_float2(KernelGlobals kg,
} }
else else
#endif /* __PATCH_EVAL__ */ #endif /* __PATCH_EVAL__ */
if (desc.element == ATTR_ELEMENT_FACE) { if (desc.element == ATTR_ELEMENT_FACE)
{
if (dx) if (dx)
*dx = make_float2(0.0f, 0.0f); *dx = make_float2(0.0f, 0.0f);
if (dy) if (dy)
@ -416,7 +418,8 @@ ccl_device_noinline float3 subd_triangle_attribute_float3(KernelGlobals kg,
} }
else else
#endif /* __PATCH_EVAL__ */ #endif /* __PATCH_EVAL__ */
if (desc.element == ATTR_ELEMENT_FACE) { if (desc.element == ATTR_ELEMENT_FACE)
{
if (dx) if (dx)
*dx = make_float3(0.0f, 0.0f, 0.0f); *dx = make_float3(0.0f, 0.0f, 0.0f);
if (dy) if (dy)
@ -562,7 +565,8 @@ ccl_device_noinline float4 subd_triangle_attribute_float4(KernelGlobals kg,
} }
else else
#endif /* __PATCH_EVAL__ */ #endif /* __PATCH_EVAL__ */
if (desc.element == ATTR_ELEMENT_FACE) { if (desc.element == ATTR_ELEMENT_FACE)
{
if (dx) if (dx)
*dx = zero_float4(); *dx = zero_float4();
if (dy) if (dy)

@ -307,7 +307,8 @@ ccl_device float4 triangle_attribute_float4(KernelGlobals kg,
ccl_private float4 *dy) ccl_private float4 *dy)
{ {
if (desc.element & (ATTR_ELEMENT_VERTEX | ATTR_ELEMENT_VERTEX_MOTION | ATTR_ELEMENT_CORNER | if (desc.element & (ATTR_ELEMENT_VERTEX | ATTR_ELEMENT_VERTEX_MOTION | ATTR_ELEMENT_CORNER |
ATTR_ELEMENT_CORNER_BYTE)) { ATTR_ELEMENT_CORNER_BYTE))
{
float4 f0, f1, f2; float4 f0, f1, f2;
if (desc.element & (ATTR_ELEMENT_VERTEX | ATTR_ELEMENT_VERTEX_MOTION)) { if (desc.element & (ATTR_ELEMENT_VERTEX | ATTR_ELEMENT_VERTEX_MOTION)) {

@ -264,7 +264,8 @@ ccl_device_forceinline void guiding_record_volume_transmission(KernelGlobals kg,
(transmittance_weight[1] < 0.f || !std::isfinite(transmittance_weight[1]) || (transmittance_weight[1] < 0.f || !std::isfinite(transmittance_weight[1]) ||
std::isnan(transmittance_weight[1])) || std::isnan(transmittance_weight[1])) ||
(transmittance_weight[2] < 0.f || !std::isfinite(transmittance_weight[2]) || (transmittance_weight[2] < 0.f || !std::isfinite(transmittance_weight[2]) ||
std::isnan(transmittance_weight[2]))) { std::isnan(transmittance_weight[2])))
{
} }
else { else {
openpgl::cpp::SetTransmittanceWeight(state->guiding.path_segment, openpgl::cpp::SetTransmittanceWeight(state->guiding.path_segment,
@ -459,7 +460,8 @@ ccl_device_forceinline bool guiding_bsdf_init(KernelGlobals kg,
kg->opgl_guiding_field, guiding_point3f(P), rand)) { kg->opgl_guiding_field, guiding_point3f(P), rand)) {
# else # else
if (kg->opgl_surface_sampling_distribution->Init( if (kg->opgl_surface_sampling_distribution->Init(
kg->opgl_guiding_field, guiding_point3f(P), rand, true)) { kg->opgl_guiding_field, guiding_point3f(P), rand, true))
{
# endif # endif
kg->opgl_surface_sampling_distribution->ApplyCosineProduct(guiding_point3f(N)); kg->opgl_surface_sampling_distribution->ApplyCosineProduct(guiding_point3f(N));
return true; return true;
@ -516,7 +518,8 @@ ccl_device_forceinline bool guiding_phase_init(KernelGlobals kg,
kg->opgl_guiding_field, guiding_point3f(P), rand)) { kg->opgl_guiding_field, guiding_point3f(P), rand)) {
# else # else
if (kg->opgl_volume_sampling_distribution->Init( if (kg->opgl_volume_sampling_distribution->Init(
kg->opgl_guiding_field, guiding_point3f(P), rand, true)) { kg->opgl_guiding_field, guiding_point3f(P), rand, true))
{
# endif # endif
kg->opgl_volume_sampling_distribution->ApplySingleLobeHenyeyGreensteinProduct(guiding_vec3f(D), kg->opgl_volume_sampling_distribution->ApplySingleLobeHenyeyGreensteinProduct(guiding_vec3f(D),
g); g);

@ -77,7 +77,8 @@ ccl_device_inline void sort_shadow_intersections(IntegratorShadowState state, ui
swapped = false; swapped = false;
for (int j = 0; j < num_hits - 1; ++j) { for (int j = 0; j < num_hits - 1; ++j) {
if (INTEGRATOR_STATE_ARRAY(state, shadow_isect, j, t) > if (INTEGRATOR_STATE_ARRAY(state, shadow_isect, j, t) >
INTEGRATOR_STATE_ARRAY(state, shadow_isect, j + 1, t)) { INTEGRATOR_STATE_ARRAY(state, shadow_isect, j + 1, t))
{
struct Intersection tmp_j ccl_optional_struct_init; struct Intersection tmp_j ccl_optional_struct_init;
struct Intersection tmp_j_1 ccl_optional_struct_init; struct Intersection tmp_j_1 ccl_optional_struct_init;
integrator_state_read_shadow_isect(state, &tmp_j, j); integrator_state_read_shadow_isect(state, &tmp_j, j);

@ -153,7 +153,8 @@ ccl_device void integrator_volume_stack_init(KernelGlobals kg, IntegratorState s
int step = 0; int step = 0;
while (stack_index < volume_stack_size - 1 && enclosed_index < MAX_VOLUME_STACK_SIZE - 1 && while (stack_index < volume_stack_size - 1 && enclosed_index < MAX_VOLUME_STACK_SIZE - 1 &&
step < 2 * volume_stack_size) { step < 2 * volume_stack_size)
{
Intersection isect; Intersection isect;
if (!scene_intersect_volume(kg, &volume_ray, &isect, visibility)) { if (!scene_intersect_volume(kg, &volume_ray, &isect, visibility)) {
break; break;

@ -87,7 +87,8 @@ ccl_device_forceinline bool integrate_surface_holdout(KernelGlobals kg,
const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag); const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag);
if (((sd->flag & SD_HOLDOUT) || (sd->object_flag & SD_OBJECT_HOLDOUT_MASK)) && if (((sd->flag & SD_HOLDOUT) || (sd->object_flag & SD_OBJECT_HOLDOUT_MASK)) &&
(path_flag & PATH_RAY_TRANSPARENT_BACKGROUND)) { (path_flag & PATH_RAY_TRANSPARENT_BACKGROUND))
{
const Spectrum holdout_weight = surface_shader_apply_holdout(kg, sd); const Spectrum holdout_weight = surface_shader_apply_holdout(kg, sd);
const Spectrum throughput = INTEGRATOR_STATE(state, path, throughput); const Spectrum throughput = INTEGRATOR_STATE(state, path, throughput);
const float transparent = average(holdout_weight * throughput); const float transparent = average(holdout_weight * throughput);
@ -160,7 +161,8 @@ ccl_device_forceinline void integrate_surface_direct_light(KernelGlobals kg,
sd->flag, sd->flag,
bounce, bounce,
path_flag, path_flag,
&ls)) { &ls))
{
return; return;
} }
} }

@ -441,7 +441,8 @@ ccl_device_forceinline void volume_integrate_step_scattering(
/* Equiangular sampling for direct lighting. */ /* Equiangular sampling for direct lighting. */
if (vstate.direct_sample_method == VOLUME_SAMPLE_EQUIANGULAR && !result.direct_scatter) { if (vstate.direct_sample_method == VOLUME_SAMPLE_EQUIANGULAR && !result.direct_scatter) {
if (result.direct_t >= vstate.tmin && result.direct_t <= vstate.tmax && if (result.direct_t >= vstate.tmin && result.direct_t <= vstate.tmax &&
vstate.equiangular_pdf > VOLUME_SAMPLE_PDF_CUTOFF) { vstate.equiangular_pdf > VOLUME_SAMPLE_PDF_CUTOFF)
{
const float new_dt = result.direct_t - vstate.tmin; const float new_dt = result.direct_t - vstate.tmin;
const Spectrum new_transmittance = volume_color_transmittance(coeff.sigma_t, new_dt); const Spectrum new_transmittance = volume_color_transmittance(coeff.sigma_t, new_dt);
@ -720,7 +721,8 @@ ccl_device_forceinline bool integrate_volume_equiangular_sample_light(
ray->tmax - ray->tmin, ray->tmax - ray->tmin,
bounce, bounce,
path_flag, path_flag,
&ls)) { &ls))
{
return false; return false;
} }
@ -784,7 +786,8 @@ ccl_device_forceinline void integrate_volume_direct_light(
SD_BSDF_HAS_TRANSMISSION, SD_BSDF_HAS_TRANSMISSION,
bounce, bounce,
path_flag, path_flag,
&ls)) { &ls))
{
return; return;
} }
} }

@ -66,7 +66,8 @@ ccl_device_inline void surface_shader_prepare_guiding(KernelGlobals kg,
/* Init guiding (diffuse BSDFs only for now). */ /* Init guiding (diffuse BSDFs only for now). */
if (!(diffuse_sampling_fraction > 0.0f && if (!(diffuse_sampling_fraction > 0.0f &&
guiding_bsdf_init(kg, state, sd->P, sd->N, rand_bsdf_guiding))) { guiding_bsdf_init(kg, state, sd->P, sd->N, rand_bsdf_guiding)))
{
state->guiding.use_surface_guiding = false; state->guiding.use_surface_guiding = false;
return; return;
} }
@ -106,12 +107,14 @@ ccl_device_inline void surface_shader_prepare_closures(KernelGlobals kg,
(CLOSURE_IS_BSDF_GLOSSY(sc->type) && (CLOSURE_IS_BSDF_GLOSSY(sc->type) &&
(kernel_data.integrator.filter_closures & FILTER_CLOSURE_GLOSSY)) || (kernel_data.integrator.filter_closures & FILTER_CLOSURE_GLOSSY)) ||
(CLOSURE_IS_BSDF_TRANSMISSION(sc->type) && (CLOSURE_IS_BSDF_TRANSMISSION(sc->type) &&
(kernel_data.integrator.filter_closures & FILTER_CLOSURE_TRANSMISSION))) { (kernel_data.integrator.filter_closures & FILTER_CLOSURE_TRANSMISSION)))
{
sc->type = CLOSURE_NONE_ID; sc->type = CLOSURE_NONE_ID;
sc->sample_weight = 0.0f; sc->sample_weight = 0.0f;
} }
else if ((CLOSURE_IS_BSDF_TRANSPARENT(sc->type) && else if ((CLOSURE_IS_BSDF_TRANSPARENT(sc->type) &&
(kernel_data.integrator.filter_closures & FILTER_CLOSURE_TRANSPARENT))) { (kernel_data.integrator.filter_closures & FILTER_CLOSURE_TRANSPARENT)))
{
sc->type = CLOSURE_HOLDOUT_ID; sc->type = CLOSURE_HOLDOUT_ID;
sc->sample_weight = 0.0f; sc->sample_weight = 0.0f;
sd->flag |= SD_HOLDOUT; sd->flag |= SD_HOLDOUT;
@ -127,7 +130,8 @@ ccl_device_inline void surface_shader_prepare_closures(KernelGlobals kg,
* a good heuristic. */ * a good heuristic. */
if (INTEGRATOR_STATE(state, path, bounce) + INTEGRATOR_STATE(state, path, transparent_bounce) == if (INTEGRATOR_STATE(state, path, bounce) + INTEGRATOR_STATE(state, path, transparent_bounce) ==
0 && 0 &&
sd->num_closure > 1) { sd->num_closure > 1)
{
float sum = 0.0f; float sum = 0.0f;
for (int i = 0; i < sd->num_closure; i++) { for (int i = 0; i < sd->num_closure; i++) {
@ -153,7 +157,8 @@ ccl_device_inline void surface_shader_prepare_closures(KernelGlobals kg,
#ifdef __MNEE__ #ifdef __MNEE__
&& !(INTEGRATOR_STATE(state, path, mnee) & PATH_MNEE_VALID) && !(INTEGRATOR_STATE(state, path, mnee) & PATH_MNEE_VALID)
#endif #endif
) { )
{
float blur_pdf = kernel_data.integrator.filter_glossy * float blur_pdf = kernel_data.integrator.filter_glossy *
INTEGRATOR_STATE(state, path, min_ray_pdf); INTEGRATOR_STATE(state, path, min_ray_pdf);

@ -277,7 +277,8 @@ ccl_device_inline bool area_light_sample(const ccl_global KernelLight *klight,
&sample_axis_v, &sample_axis_v,
&sample_len_v, &sample_len_v,
klight->area.tan_half_spread, klight->area.tan_half_spread,
&sample_rectangle)) { &sample_rectangle))
{
return false; return false;
} }
} }
@ -424,7 +425,8 @@ ccl_device_inline bool area_light_sample_from_intersection(
&sample_axis_v, &sample_axis_v,
&sample_len_v, &sample_len_v,
klight->area.tan_half_spread, klight->area.tan_half_spread,
&sample_rectangle)) { &sample_rectangle))
{
return false; return false;
} }
} }

@ -140,7 +140,8 @@ ccl_device_noinline bool light_sample(KernelGlobals kg,
/* Exclude synthetic meshes from shadow catcher pass. */ /* Exclude synthetic meshes from shadow catcher pass. */
if ((path_flag & PATH_RAY_SHADOW_CATCHER_PASS) && if ((path_flag & PATH_RAY_SHADOW_CATCHER_PASS) &&
!(kernel_data_fetch(object_flag, object) & SD_OBJECT_SHADOW_CATCHER)) { !(kernel_data_fetch(object_flag, object) & SD_OBJECT_SHADOW_CATCHER))
{
return false; return false;
} }
@ -192,7 +193,8 @@ ccl_device bool lights_intersect(KernelGlobals kg,
/* This path should have been resolved with mnee, it will /* This path should have been resolved with mnee, it will
* generate a firefly for small lights since it is improbable. */ * generate a firefly for small lights since it is improbable. */
if ((INTEGRATOR_STATE(state, path, mnee) & PATH_MNEE_CULL_LIGHT_CONNECTION) && if ((INTEGRATOR_STATE(state, path, mnee) & PATH_MNEE_CULL_LIGHT_CONNECTION) &&
klight->use_caustics) { klight->use_caustics)
{
continue; continue;
} }
#endif #endif
@ -227,7 +229,8 @@ ccl_device bool lights_intersect(KernelGlobals kg,
} }
if (t < isect->t && if (t < isect->t &&
!(last_prim == lamp && last_object == OBJECT_NONE && last_type == PRIMITIVE_LAMP)) { !(last_prim == lamp && last_object == OBJECT_NONE && last_type == PRIMITIVE_LAMP))
{
isect->t = t; isect->t = t;
isect->u = u; isect->u = u;
isect->v = v; isect->v = v;

@ -29,7 +29,8 @@ ccl_device float light_tree_cos_bounding_box_angle(const BoundingBox bbox,
const float3 point_to_centroid) const float3 point_to_centroid)
{ {
if (P.x > bbox.min.x && P.y > bbox.min.y && P.z > bbox.min.z && P.x < bbox.max.x && if (P.x > bbox.min.x && P.y > bbox.min.y && P.z > bbox.min.z && P.x < bbox.max.x &&
P.y < bbox.max.y && P.z < bbox.max.z) { P.y < bbox.max.y && P.z < bbox.max.z)
{
/* If P is inside the bbox, `theta_u` covers the whole sphere. */ /* If P is inside the bbox, `theta_u` covers the whole sphere. */
return -1.0f; return -1.0f;
} }
@ -189,7 +190,8 @@ ccl_device void light_tree_importance(const float3 N_or_D,
cos_min_outgoing_angle = 1.0f; cos_min_outgoing_angle = 1.0f;
} }
else if ((bcone.theta_o + bcone.theta_e > M_PI_F) || else if ((bcone.theta_o + bcone.theta_e > M_PI_F) ||
(cos_theta_minus_theta_u > cos(bcone.theta_o + bcone.theta_e))) { (cos_theta_minus_theta_u > cos(bcone.theta_o + bcone.theta_e)))
{
/* theta' = theta - theta_o - theta_u < theta_e */ /* theta' = theta - theta_o - theta_u < theta_e */
kernel_assert( kernel_assert(
(fast_acosf(cos_theta) - bcone.theta_o - fast_acosf(cos_theta_u) - bcone.theta_e) < 5e-4f); (fast_acosf(cos_theta) - bcone.theta_o - fast_acosf(cos_theta_u) - bcone.theta_e) < 5e-4f);
@ -218,7 +220,8 @@ ccl_device void light_tree_importance(const float3 N_or_D,
float cos_max_outgoing_angle; float cos_max_outgoing_angle;
const float cos_theta_plus_theta_u = cos_theta * cos_theta_u - sin_theta * sin_theta_u; const float cos_theta_plus_theta_u = cos_theta * cos_theta_u - sin_theta * sin_theta_u;
if (bcone.theta_e - bcone.theta_o < 0 || cos_theta < 0 || cos_theta_u < 0 || if (bcone.theta_e - bcone.theta_o < 0 || cos_theta < 0 || cos_theta_u < 0 ||
cos_theta_plus_theta_u < cos(bcone.theta_e - bcone.theta_o)) { cos_theta_plus_theta_u < cos(bcone.theta_e - bcone.theta_o))
{
min_importance = 0.0f; min_importance = 0.0f;
} }
else { else {
@ -279,8 +282,8 @@ ccl_device bool compute_emitter_centroid_and_dir(KernelGlobals kg,
dir = -dir; dir = -dir;
} }
const int object_flag = kernel_data_fetch(object_flag, object); const int object_flag = kernel_data_fetch(object_flag, object);
if ((object_flag & SD_OBJECT_TRANSFORM_APPLIED) && if ((object_flag & SD_OBJECT_TRANSFORM_APPLIED) && (object_flag & SD_OBJECT_NEGATIVE_SCALE))
(object_flag & SD_OBJECT_NEGATIVE_SCALE)) { {
dir = -dir; dir = -dir;
} }
} }
@ -393,8 +396,8 @@ ccl_device void light_tree_emitter_importance(KernelGlobals kg,
float2 distance; /* distance.x = max_distance, distance.y = mix_distance */ float2 distance; /* distance.x = max_distance, distance.y = mix_distance */
float3 centroid, point_to_centroid, P_c; float3 centroid, point_to_centroid, P_c;
if (!compute_emitter_centroid_and_dir<in_volume_segment>( if (!compute_emitter_centroid_and_dir<in_volume_segment>(kg, kemitter, P, centroid, bcone.axis))
kg, kemitter, P, centroid, bcone.axis)) { {
return; return;
} }
@ -706,7 +709,8 @@ ccl_device_noinline bool light_tree_sample(KernelGlobals kg,
float left_prob; float left_prob;
if (!get_left_probability<in_volume_segment>( if (!get_left_probability<in_volume_segment>(
kg, local_P, N_or_D, t, has_transmission, left_index, right_index, left_prob)) { kg, local_P, N_or_D, t, has_transmission, left_index, right_index, left_prob))
{
return false; /* Both child nodes have zero importance. */ return false; /* Both child nodes have zero importance. */
} }
@ -825,7 +829,8 @@ ccl_device float light_tree_pdf(
float left_prob; float left_prob;
if (!get_left_probability<false>( if (!get_left_probability<false>(
kg, P, N, 0, has_transmission, left_index, right_index, left_prob)) { kg, P, N, 0, has_transmission, left_index, right_index, left_prob))
{
return 0.0f; return 0.0f;
} }

@ -221,8 +221,8 @@ ccl_device_forceinline bool triangle_light_sample(KernelGlobals kg,
ls->D = z * B + sin_from_cos(z) * safe_normalize(C_ - dot(C_, B) * B); ls->D = z * B + sin_from_cos(z) * safe_normalize(C_ - dot(C_, B) * B);
/* calculate intersection with the planar triangle */ /* calculate intersection with the planar triangle */
if (!ray_triangle_intersect( if (!ray_triangle_intersect(P, ls->D, 0.0f, FLT_MAX, V[0], V[1], V[2], &ls->u, &ls->v, &ls->t))
P, ls->D, 0.0f, FLT_MAX, V[0], V[1], V[2], &ls->u, &ls->v, &ls->t)) { {
ls->pdf = 0.0f; ls->pdf = 0.0f;
return false; return false;
} }

@ -52,7 +52,8 @@ ccl_device_forceinline bool osl_closure_skip(KernelGlobals kg,
/* caustic options */ /* caustic options */
if ((scattering & LABEL_GLOSSY) && (path_flag & PATH_RAY_DIFFUSE)) { if ((scattering & LABEL_GLOSSY) && (path_flag & PATH_RAY_DIFFUSE)) {
if ((!kernel_data.integrator.caustics_reflective && (scattering & LABEL_REFLECT)) || if ((!kernel_data.integrator.caustics_reflective && (scattering & LABEL_REFLECT)) ||
(!kernel_data.integrator.caustics_refractive && (scattering & LABEL_TRANSMIT))) { (!kernel_data.integrator.caustics_refractive && (scattering & LABEL_TRANSMIT)))
{
return true; return true;
} }
} }
@ -215,7 +216,8 @@ ccl_device void osl_closure_dielectric_bsdf_setup(KernelGlobals kg,
/* GGX */ /* GGX */
if (closure->distribution == make_string("ggx", 11253504724482777663ull) || if (closure->distribution == make_string("ggx", 11253504724482777663ull) ||
closure->distribution == make_string("default", 4430693559278735917ull)) { closure->distribution == make_string("default", 4430693559278735917ull))
{
if (has_reflection && has_transmission) { if (has_reflection && has_transmission) {
sd->flag |= bsdf_microfacet_ggx_glass_setup(bsdf); sd->flag |= bsdf_microfacet_ggx_glass_setup(bsdf);
} }
@ -274,7 +276,8 @@ ccl_device void osl_closure_conductor_bsdf_setup(KernelGlobals kg,
/* GGX */ /* GGX */
if (closure->distribution == make_string("ggx", 11253504724482777663ull) || if (closure->distribution == make_string("ggx", 11253504724482777663ull) ||
closure->distribution == make_string("default", 4430693559278735917ull)) { closure->distribution == make_string("default", 4430693559278735917ull))
{
sd->flag |= bsdf_microfacet_ggx_setup(bsdf); sd->flag |= bsdf_microfacet_ggx_setup(bsdf);
} }
/* Beckmann */ /* Beckmann */
@ -324,7 +327,8 @@ ccl_device void osl_closure_generalized_schlick_bsdf_setup(
/* GGX */ /* GGX */
if (closure->distribution == make_string("ggx", 11253504724482777663ull) || if (closure->distribution == make_string("ggx", 11253504724482777663ull) ||
closure->distribution == make_string("default", 4430693559278735917ull)) { closure->distribution == make_string("default", 4430693559278735917ull))
{
if (has_reflection && has_transmission) { if (has_reflection && has_transmission) {
sd->flag |= bsdf_microfacet_ggx_glass_setup(bsdf); sd->flag |= bsdf_microfacet_ggx_glass_setup(bsdf);
} }

@ -427,7 +427,8 @@ static bool set_attribute_float2(float2 f[3], TypeDesc type, bool derivatives, v
return true; return true;
} }
else if (type == TypeDesc::TypePoint || type == TypeDesc::TypeVector || else if (type == TypeDesc::TypePoint || type == TypeDesc::TypeVector ||
type == TypeDesc::TypeNormal || type == TypeDesc::TypeColor) { type == TypeDesc::TypeNormal || type == TypeDesc::TypeColor)
{
float *fval = (float *)val; float *fval = (float *)val;
fval[0] = f[0].x; fval[0] = f[0].x;
@ -497,7 +498,8 @@ static bool set_attribute_float3(float3 f[3], TypeDesc type, bool derivatives, v
return true; return true;
} }
else if (type == TypeDesc::TypePoint || type == TypeDesc::TypeVector || else if (type == TypeDesc::TypePoint || type == TypeDesc::TypeVector ||
type == TypeDesc::TypeNormal || type == TypeDesc::TypeColor) { type == TypeDesc::TypeNormal || type == TypeDesc::TypeColor)
{
float *fval = (float *)val; float *fval = (float *)val;
fval[0] = f[0].x; fval[0] = f[0].x;
@ -573,7 +575,8 @@ static bool set_attribute_float4(float4 f[3], TypeDesc type, bool derivatives, v
return true; return true;
} }
else if (type == TypeDesc::TypePoint || type == TypeDesc::TypeVector || else if (type == TypeDesc::TypePoint || type == TypeDesc::TypeVector ||
type == TypeDesc::TypeNormal || type == TypeDesc::TypeColor) { type == TypeDesc::TypeNormal || type == TypeDesc::TypeColor)
{
fval[0] = f[0].x; fval[0] = f[0].x;
fval[1] = f[0].y; fval[1] = f[0].y;
fval[2] = f[0].z; fval[2] = f[0].z;
@ -637,7 +640,8 @@ static bool set_attribute_float(float f[3], TypeDesc type, bool derivatives, voi
return true; return true;
} }
else if (type == TypeDesc::TypePoint || type == TypeDesc::TypeVector || else if (type == TypeDesc::TypePoint || type == TypeDesc::TypeVector ||
type == TypeDesc::TypeNormal || type == TypeDesc::TypeColor) { type == TypeDesc::TypeNormal || type == TypeDesc::TypeColor)
{
float *fval = (float *)val; float *fval = (float *)val;
fval[0] = f[0]; fval[0] = f[0];
fval[1] = f[0]; fval[1] = f[0];
@ -928,7 +932,8 @@ bool OSLRenderServices::get_object_standard_attribute(const KernelGlobalsCPU *kg
return set_attribute_int(3, type, derivatives, val); return set_attribute_int(3, type, derivatives, val);
} }
else if ((name == u_geom_trianglevertices || name == u_geom_polyvertices) && else if ((name == u_geom_trianglevertices || name == u_geom_polyvertices) &&
sd->type & PRIMITIVE_TRIANGLE) { sd->type & PRIMITIVE_TRIANGLE)
{
float3 P[3]; float3 P[3];
if (sd->type & PRIMITIVE_MOTION) { if (sd->type & PRIMITIVE_MOTION) {
@ -1068,7 +1073,8 @@ bool OSLRenderServices::get_background_attribute(const KernelGlobalsCPU *kg,
float3 ndc[3]; float3 ndc[3];
if ((globals->raytype & PATH_RAY_CAMERA) && sd->object == OBJECT_NONE && if ((globals->raytype & PATH_RAY_CAMERA) && sd->object == OBJECT_NONE &&
kernel_data.cam.type == CAMERA_ORTHOGRAPHIC) { kernel_data.cam.type == CAMERA_ORTHOGRAPHIC)
{
ndc[0] = camera_world_to_ndc(kg, sd, sd->ray_P); ndc[0] = camera_world_to_ndc(kg, sd, sd->ray_P);
if (derivatives) { if (derivatives) {

@ -810,16 +810,16 @@ ccl_device_inline bool set_attribute_float(ccl_private float fval[3],
const int type_arraylen = type >> 32; const int type_arraylen = type >> 32;
if (type_basetype == 11 /* TypeDesc::FLOAT */) { if (type_basetype == 11 /* TypeDesc::FLOAT */) {
if ((type_aggregate == 2 /* TypeDesc::VEC2 */) || if ((type_aggregate == 2 /* TypeDesc::VEC2 */) || (type_aggregate == 1 && type_arraylen == 2))
(type_aggregate == 1 && type_arraylen == 2)) { {
for (int i = 0; i < (derivatives ? 3 : 1); ++i) { for (int i = 0; i < (derivatives ? 3 : 1); ++i) {
static_cast<ccl_private float *>(val)[i * 2 + 0] = fval[i]; static_cast<ccl_private float *>(val)[i * 2 + 0] = fval[i];
static_cast<ccl_private float *>(val)[i * 2 + 1] = fval[i]; static_cast<ccl_private float *>(val)[i * 2 + 1] = fval[i];
} }
return true; return true;
} }
if ((type_aggregate == 3 /* TypeDesc::VEC3 */) || if ((type_aggregate == 3 /* TypeDesc::VEC3 */) || (type_aggregate == 1 && type_arraylen == 3))
(type_aggregate == 1 && type_arraylen == 3)) { {
for (int i = 0; i < (derivatives ? 3 : 1); ++i) { for (int i = 0; i < (derivatives ? 3 : 1); ++i) {
static_cast<ccl_private float *>(val)[i * 3 + 0] = fval[i]; static_cast<ccl_private float *>(val)[i * 3 + 0] = fval[i];
static_cast<ccl_private float *>(val)[i * 3 + 1] = fval[i]; static_cast<ccl_private float *>(val)[i * 3 + 1] = fval[i];
@ -827,8 +827,8 @@ ccl_device_inline bool set_attribute_float(ccl_private float fval[3],
} }
return true; return true;
} }
if ((type_aggregate == 4 /* TypeDesc::VEC4 */) || if ((type_aggregate == 4 /* TypeDesc::VEC4 */) || (type_aggregate == 1 && type_arraylen == 4))
(type_aggregate == 1 && type_arraylen == 4)) { {
for (int i = 0; i < (derivatives ? 3 : 1); ++i) { for (int i = 0; i < (derivatives ? 3 : 1); ++i) {
static_cast<ccl_private float *>(val)[i * 4 + 0] = fval[i]; static_cast<ccl_private float *>(val)[i * 4 + 0] = fval[i];
static_cast<ccl_private float *>(val)[i * 4 + 1] = fval[i]; static_cast<ccl_private float *>(val)[i * 4 + 1] = fval[i];
@ -870,16 +870,16 @@ ccl_device_inline bool set_attribute_float2(ccl_private float2 fval[3],
const int type_arraylen = type >> 32; const int type_arraylen = type >> 32;
if (type_basetype == 11 /* TypeDesc::FLOAT */) { if (type_basetype == 11 /* TypeDesc::FLOAT */) {
if ((type_aggregate == 2 /* TypeDesc::VEC2 */) || if ((type_aggregate == 2 /* TypeDesc::VEC2 */) || (type_aggregate == 1 && type_arraylen == 2))
(type_aggregate == 1 && type_arraylen == 2)) { {
for (int i = 0; i < (derivatives ? 3 : 1); ++i) { for (int i = 0; i < (derivatives ? 3 : 1); ++i) {
static_cast<ccl_private float *>(val)[i * 2 + 0] = fval[i].x; static_cast<ccl_private float *>(val)[i * 2 + 0] = fval[i].x;
static_cast<ccl_private float *>(val)[i * 2 + 1] = fval[i].y; static_cast<ccl_private float *>(val)[i * 2 + 1] = fval[i].y;
} }
return true; return true;
} }
if ((type_aggregate == 3 /* TypeDesc::VEC3 */) || if ((type_aggregate == 3 /* TypeDesc::VEC3 */) || (type_aggregate == 1 && type_arraylen == 3))
(type_aggregate == 1 && type_arraylen == 3)) { {
for (int i = 0; i < (derivatives ? 3 : 1); ++i) { for (int i = 0; i < (derivatives ? 3 : 1); ++i) {
static_cast<ccl_private float *>(val)[i * 3 + 0] = fval[i].x; static_cast<ccl_private float *>(val)[i * 3 + 0] = fval[i].x;
static_cast<ccl_private float *>(val)[i * 3 + 1] = fval[i].y; static_cast<ccl_private float *>(val)[i * 3 + 1] = fval[i].y;
@ -887,8 +887,8 @@ ccl_device_inline bool set_attribute_float2(ccl_private float2 fval[3],
} }
return true; return true;
} }
if ((type_aggregate == 4 /* TypeDesc::VEC4 */) || if ((type_aggregate == 4 /* TypeDesc::VEC4 */) || (type_aggregate == 1 && type_arraylen == 4))
(type_aggregate == 1 && type_arraylen == 4)) { {
for (int i = 0; i < (derivatives ? 3 : 1); ++i) { for (int i = 0; i < (derivatives ? 3 : 1); ++i) {
static_cast<ccl_private float *>(val)[i * 4 + 0] = fval[i].x; static_cast<ccl_private float *>(val)[i * 4 + 0] = fval[i].x;
static_cast<ccl_private float *>(val)[i * 4 + 1] = fval[i].y; static_cast<ccl_private float *>(val)[i * 4 + 1] = fval[i].y;
@ -917,8 +917,8 @@ ccl_device_inline bool set_attribute_float3(ccl_private float3 fval[3],
const int type_arraylen = type >> 32; const int type_arraylen = type >> 32;
if (type_basetype == 11 /* TypeDesc::FLOAT */) { if (type_basetype == 11 /* TypeDesc::FLOAT */) {
if ((type_aggregate == 3 /* TypeDesc::VEC3 */) || if ((type_aggregate == 3 /* TypeDesc::VEC3 */) || (type_aggregate == 1 && type_arraylen == 3))
(type_aggregate == 1 && type_arraylen == 3)) { {
for (int i = 0; i < (derivatives ? 3 : 1); ++i) { for (int i = 0; i < (derivatives ? 3 : 1); ++i) {
static_cast<ccl_private float *>(val)[i * 3 + 0] = fval[i].x; static_cast<ccl_private float *>(val)[i * 3 + 0] = fval[i].x;
static_cast<ccl_private float *>(val)[i * 3 + 1] = fval[i].y; static_cast<ccl_private float *>(val)[i * 3 + 1] = fval[i].y;
@ -926,8 +926,8 @@ ccl_device_inline bool set_attribute_float3(ccl_private float3 fval[3],
} }
return true; return true;
} }
if ((type_aggregate == 4 /* TypeDesc::VEC4 */) || if ((type_aggregate == 4 /* TypeDesc::VEC4 */) || (type_aggregate == 1 && type_arraylen == 4))
(type_aggregate == 1 && type_arraylen == 4)) { {
for (int i = 0; i < (derivatives ? 3 : 1); ++i) { for (int i = 0; i < (derivatives ? 3 : 1); ++i) {
static_cast<ccl_private float *>(val)[i * 4 + 0] = fval[i].x; static_cast<ccl_private float *>(val)[i * 4 + 0] = fval[i].x;
static_cast<ccl_private float *>(val)[i * 4 + 1] = fval[i].y; static_cast<ccl_private float *>(val)[i * 4 + 1] = fval[i].y;
@ -969,8 +969,8 @@ ccl_device_inline bool set_attribute_float4(ccl_private float4 fval[3],
const int type_arraylen = type >> 32; const int type_arraylen = type >> 32;
if (type_basetype == 11 /* TypeDesc::FLOAT */) { if (type_basetype == 11 /* TypeDesc::FLOAT */) {
if ((type_aggregate == 3 /* TypeDesc::VEC3 */) || if ((type_aggregate == 3 /* TypeDesc::VEC3 */) || (type_aggregate == 1 && type_arraylen == 3))
(type_aggregate == 1 && type_arraylen == 3)) { {
for (int i = 0; i < (derivatives ? 3 : 1); ++i) { for (int i = 0; i < (derivatives ? 3 : 1); ++i) {
static_cast<ccl_private float *>(val)[i * 3 + 0] = fval[i].x; static_cast<ccl_private float *>(val)[i * 3 + 0] = fval[i].x;
static_cast<ccl_private float *>(val)[i * 3 + 1] = fval[i].y; static_cast<ccl_private float *>(val)[i * 3 + 1] = fval[i].y;
@ -978,8 +978,8 @@ ccl_device_inline bool set_attribute_float4(ccl_private float4 fval[3],
} }
return true; return true;
} }
if ((type_aggregate == 4 /* TypeDesc::VEC4 */) || if ((type_aggregate == 4 /* TypeDesc::VEC4 */) || (type_aggregate == 1 && type_arraylen == 4))
(type_aggregate == 1 && type_arraylen == 4)) { {
for (int i = 0; i < (derivatives ? 3 : 1); ++i) { for (int i = 0; i < (derivatives ? 3 : 1); ++i) {
static_cast<ccl_private float *>(val)[i * 4 + 0] = fval[i].x; static_cast<ccl_private float *>(val)[i * 4 + 0] = fval[i].x;
static_cast<ccl_private float *>(val)[i * 4 + 1] = fval[i].y; static_cast<ccl_private float *>(val)[i * 4 + 1] = fval[i].y;

@ -31,7 +31,8 @@ shader node_normal_map(float Strength = 1.0,
// get _unnormalized_ interpolated normal and tangent // get _unnormalized_ interpolated normal and tangent
if (getattribute(attr_name, tangent) && getattribute(attr_sign_name, tangent_sign) && if (getattribute(attr_name, tangent) && getattribute(attr_sign_name, tangent_sign) &&
(!is_smooth || getattribute("geom:normal_map_normal", ninterp))) { (!is_smooth || getattribute("geom:normal_map_normal", ninterp)))
{
// apply normal map // apply normal map
vector B = tangent_sign * cross(ninterp, tangent); vector B = tangent_sign * cross(ninterp, tangent);
Normal = normalize(mcolor[0] * tangent + mcolor[1] * B + mcolor[2] * ninterp); Normal = normalize(mcolor[0] * tangent + mcolor[1] * B + mcolor[2] * ninterp);

@ -52,8 +52,7 @@ ccl_device_noinline int svm_node_closure_bsdf(KernelGlobals kg,
return svm_node_closure_bsdf_skip(kg, offset, type); return svm_node_closure_bsdf_skip(kg, offset, type);
} }
} }
else else {
{
return svm_node_closure_bsdf_skip(kg, offset, type); return svm_node_closure_bsdf_skip(kg, offset, type);
} }
@ -256,7 +255,8 @@ ccl_device_noinline int svm_node_closure_bsdf(KernelGlobals kg,
if (kernel_data.integrator.caustics_reflective || (path_flag & PATH_RAY_DIFFUSE) == 0) { if (kernel_data.integrator.caustics_reflective || (path_flag & PATH_RAY_DIFFUSE) == 0) {
#endif #endif
if (specular_weight > CLOSURE_WEIGHT_CUTOFF && if (specular_weight > CLOSURE_WEIGHT_CUTOFF &&
(specular > CLOSURE_WEIGHT_CUTOFF || metallic > CLOSURE_WEIGHT_CUTOFF)) { (specular > CLOSURE_WEIGHT_CUTOFF || metallic > CLOSURE_WEIGHT_CUTOFF))
{
Spectrum spec_weight = weight * specular_weight; Spectrum spec_weight = weight * specular_weight;
ccl_private MicrofacetBsdf *bsdf = (ccl_private MicrofacetBsdf *)bsdf_alloc( ccl_private MicrofacetBsdf *bsdf = (ccl_private MicrofacetBsdf *)bsdf_alloc(
@ -288,8 +288,8 @@ ccl_device_noinline int svm_node_closure_bsdf(KernelGlobals kg,
fresnel->color = rgb_to_spectrum(base_color); fresnel->color = rgb_to_spectrum(base_color);
/* setup bsdf */ /* setup bsdf */
if (distribution == CLOSURE_BSDF_MICROFACET_GGX_GLASS_ID || if (distribution == CLOSURE_BSDF_MICROFACET_GGX_GLASS_ID || roughness <= 0.075f)
roughness <= 0.075f) { /* use single-scatter GGX */ { /* use single-scatter GGX */
sd->flag |= bsdf_microfacet_ggx_setup(bsdf); sd->flag |= bsdf_microfacet_ggx_setup(bsdf);
bsdf_microfacet_setup_fresnel_principledv1(bsdf, sd, fresnel); bsdf_microfacet_setup_fresnel_principledv1(bsdf, sd, fresnel);
} }
@ -306,14 +306,15 @@ ccl_device_noinline int svm_node_closure_bsdf(KernelGlobals kg,
/* BSDF */ /* BSDF */
#ifdef __CAUSTICS_TRICKS__ #ifdef __CAUSTICS_TRICKS__
if (kernel_data.integrator.caustics_reflective || if (kernel_data.integrator.caustics_reflective ||
kernel_data.integrator.caustics_refractive || (path_flag & PATH_RAY_DIFFUSE) == 0) { kernel_data.integrator.caustics_refractive || (path_flag & PATH_RAY_DIFFUSE) == 0)
{
#endif #endif
if (final_transmission > CLOSURE_WEIGHT_CUTOFF) { if (final_transmission > CLOSURE_WEIGHT_CUTOFF) {
Spectrum glass_weight = weight * final_transmission; Spectrum glass_weight = weight * final_transmission;
float3 cspec0 = base_color * specular_tint + make_float3(1.0f - specular_tint); float3 cspec0 = base_color * specular_tint + make_float3(1.0f - specular_tint);
if (roughness <= 5e-2f || if (roughness <= 5e-2f || distribution == CLOSURE_BSDF_MICROFACET_GGX_GLASS_ID)
distribution == CLOSURE_BSDF_MICROFACET_GGX_GLASS_ID) { /* use single-scatter GGX */ { /* use single-scatter GGX */
float refl_roughness = roughness; float refl_roughness = roughness;
/* reflection */ /* reflection */

@ -73,8 +73,7 @@ ccl_device_noinline void svm_node_set_bump(KernelGlobals kg,
stack_store_float3(stack, node.w, normal_out); stack_store_float3(stack, node.w, normal_out);
} }
else else {
{
stack_store_float3(stack, node.w, zero_float3()); stack_store_float3(stack, node.w, zero_float3());
} }
#endif #endif
@ -128,8 +127,7 @@ ccl_device_noinline void svm_node_displacement(KernelGlobals kg,
stack_store_float3(stack, node.z, dP); stack_store_float3(stack, node.z, dP);
} }
else else {
{
stack_store_float3(stack, node.z, zero_float3()); stack_store_float3(stack, node.z, zero_float3());
} }
} }
@ -183,8 +181,7 @@ ccl_device_noinline int svm_node_vector_displacement(
stack_store_float3(stack, displacement_offset, dP); stack_store_float3(stack, displacement_offset, dP);
} }
else else {
{
stack_store_float3(stack, displacement_offset, zero_float3()); stack_store_float3(stack, displacement_offset, zero_float3());
(void)data_node; (void)data_node;
} }

@ -47,7 +47,8 @@ ccl_device_noinline int svm_node_vector_math(KernelGlobals kg,
/* 3 Vector Operators */ /* 3 Vector Operators */
if (type == NODE_VECTOR_MATH_WRAP || type == NODE_VECTOR_MATH_FACEFORWARD || if (type == NODE_VECTOR_MATH_WRAP || type == NODE_VECTOR_MATH_FACEFORWARD ||
type == NODE_VECTOR_MATH_MULTIPLY_ADD) { type == NODE_VECTOR_MATH_MULTIPLY_ADD)
{
uint4 extra_node = read_node(kg, &offset); uint4 extra_node = read_node(kg, &offset);
c = stack_load_float3(stack, extra_node.x); c = stack_load_float3(stack, extra_node.x);
} }

@ -144,7 +144,8 @@ ccl_device float3 sky_radiance_nishita(KernelGlobals kg,
/* If the ray is inside the sun disc, render it, otherwise render the sky. /* If the ray is inside the sun disc, render it, otherwise render the sky.
* Alternatively, ignore the sun if we're evaluating the background texture. */ * Alternatively, ignore the sun if we're evaluating the background texture. */
if (sun_disc && sun_dir_angle < half_angular && if (sun_disc && sun_dir_angle < half_angular &&
!((path_flag & PATH_RAY_IMPORTANCE_BAKE) && kernel_data.background.use_sun_guiding)) { !((path_flag & PATH_RAY_IMPORTANCE_BAKE) && kernel_data.background.use_sun_guiding))
{
/* get 2 pixels data */ /* get 2 pixels data */
float y; float y;

@ -49,7 +49,8 @@ ccl_device_noinline void svm_node_vector_transform(KernelGlobals kg,
/* From camera */ /* From camera */
else if (from == NODE_VECTOR_TRANSFORM_CONVERT_SPACE_CAMERA) { else if (from == NODE_VECTOR_TRANSFORM_CONVERT_SPACE_CAMERA) {
if (to == NODE_VECTOR_TRANSFORM_CONVERT_SPACE_WORLD || if (to == NODE_VECTOR_TRANSFORM_CONVERT_SPACE_WORLD ||
to == NODE_VECTOR_TRANSFORM_CONVERT_SPACE_OBJECT) { to == NODE_VECTOR_TRANSFORM_CONVERT_SPACE_OBJECT)
{
tfm = kernel_data.cam.cameratoworld; tfm = kernel_data.cam.cameratoworld;
if (is_direction) if (is_direction)
in = transform_direction(&tfm, in); in = transform_direction(&tfm, in);
@ -68,7 +69,8 @@ ccl_device_noinline void svm_node_vector_transform(KernelGlobals kg,
else if (from == NODE_VECTOR_TRANSFORM_CONVERT_SPACE_OBJECT) { else if (from == NODE_VECTOR_TRANSFORM_CONVERT_SPACE_OBJECT) {
if ((to == NODE_VECTOR_TRANSFORM_CONVERT_SPACE_WORLD || if ((to == NODE_VECTOR_TRANSFORM_CONVERT_SPACE_WORLD ||
to == NODE_VECTOR_TRANSFORM_CONVERT_SPACE_CAMERA) && to == NODE_VECTOR_TRANSFORM_CONVERT_SPACE_CAMERA) &&
is_object) { is_object)
{
if (is_direction) if (is_direction)
object_dir_transform(kg, sd, &in); object_dir_transform(kg, sd, &in);
else else

@ -254,7 +254,8 @@ static M44d convert_yup_zup(const M44d &mtx, float scale_mult)
rotation, rotation,
translation, translation,
true, true,
IMATH_INTERNAL_NAMESPACE::Euler<double>::XZY)) { IMATH_INTERNAL_NAMESPACE::Euler<double>::XZY))
{
return mtx; return mtx;
} }
@ -806,7 +807,8 @@ void AlembicProcedural::generate(Scene *scene, Progress &progress)
/* Check if the shaders were modified. */ /* Check if the shaders were modified. */
if (object->used_shaders_is_modified() && object->get_object() && if (object->used_shaders_is_modified() && object->get_object() &&
object->get_object()->get_geometry()) { object->get_object()->get_geometry())
{
Geometry *geometry = object->get_object()->get_geometry(); Geometry *geometry = object->get_object()->get_geometry();
array<Node *> used_shaders = object->get_used_shaders(); array<Node *> used_shaders = object->get_used_shaders();
geometry->set_used_shaders(used_shaders); geometry->set_used_shaders(used_shaders);
@ -908,7 +910,8 @@ void AlembicProcedural::generate(Scene *scene, Progress &progress)
/* skip constant objects */ /* skip constant objects */
if (object->is_constant() && !object->is_modified() && !object->need_shader_update && if (object->is_constant() && !object->is_modified() && !object->need_shader_update &&
!scale_is_modified()) { !scale_is_modified())
{
continue; continue;
} }
@ -994,7 +997,8 @@ void AlembicProcedural::load_objects(Progress &progress)
geometry = scene_->create_node<PointCloud>(); geometry = scene_->create_node<PointCloud>();
} }
else if (abc_object->schema_type == AlembicObject::POLY_MESH || else if (abc_object->schema_type == AlembicObject::POLY_MESH ||
abc_object->schema_type == AlembicObject::SUBD) { abc_object->schema_type == AlembicObject::SUBD)
{
geometry = scene_->create_node<Mesh>(); geometry = scene_->create_node<Mesh>();
} }
else { else {
@ -1469,7 +1473,8 @@ void AlembicProcedural::build_caches(Progress &progress)
} }
else if (object->schema_type == AlembicObject::CURVES) { else if (object->schema_type == AlembicObject::CURVES) {
if (!object->has_data_loaded() || default_radius_is_modified() || if (!object->has_data_loaded() || default_radius_is_modified() ||
object->radius_scale_is_modified()) { object->radius_scale_is_modified())
{
ICurves curves(object->iobject, Alembic::Abc::kWrapExisting); ICurves curves(object->iobject, Alembic::Abc::kWrapExisting);
ICurvesSchema schema = curves.getSchema(); ICurvesSchema schema = curves.getSchema();
object->load_data_in_cache(object->get_cached_data(), this, schema, progress); object->load_data_in_cache(object->get_cached_data(), this, schema, progress);
@ -1477,7 +1482,8 @@ void AlembicProcedural::build_caches(Progress &progress)
} }
else if (object->schema_type == AlembicObject::POINTS) { else if (object->schema_type == AlembicObject::POINTS) {
if (!object->has_data_loaded() || default_radius_is_modified() || if (!object->has_data_loaded() || default_radius_is_modified() ||
object->radius_scale_is_modified()) { object->radius_scale_is_modified())
{
IPoints points(object->iobject, Alembic::Abc::kWrapExisting); IPoints points(object->iobject, Alembic::Abc::kWrapExisting);
IPointsSchema schema = points.getSchema(); IPointsSchema schema = points.getSchema();
object->load_data_in_cache(object->get_cached_data(), this, schema, progress); object->load_data_in_cache(object->get_cached_data(), this, schema, progress);

@ -470,7 +470,8 @@ static void add_subd_edge_creases(CachedData &cached_data,
chrono_t time) chrono_t time)
{ {
if (!(data.crease_indices.valid() && data.crease_lengths.valid() && if (!(data.crease_indices.valid() && data.crease_lengths.valid() &&
data.crease_sharpnesses.valid())) { data.crease_sharpnesses.valid()))
{
return; return;
} }
@ -519,7 +520,8 @@ static void add_subd_vertex_creases(CachedData &cached_data,
const FloatArraySamplePtr creases_sharpnesses = data.crease_sharpnesses.getValue(iss); const FloatArraySamplePtr creases_sharpnesses = data.crease_sharpnesses.getValue(iss);
if (!(creases_indices && creases_sharpnesses) || if (!(creases_indices && creases_sharpnesses) ||
creases_indices->size() != creases_sharpnesses->size()) { creases_indices->size() != creases_sharpnesses->size())
{
return; return;
} }

@ -278,9 +278,11 @@ bool Attribute::same_storage(TypeDesc a, TypeDesc b)
return true; return true;
if (a == TypeDesc::TypeColor || a == TypeDesc::TypePoint || a == TypeDesc::TypeVector || if (a == TypeDesc::TypeColor || a == TypeDesc::TypePoint || a == TypeDesc::TypeVector ||
a == TypeDesc::TypeNormal) { a == TypeDesc::TypeNormal)
{
if (b == TypeDesc::TypeColor || b == TypeDesc::TypePoint || b == TypeDesc::TypeVector || if (b == TypeDesc::TypeColor || b == TypeDesc::TypePoint || b == TypeDesc::TypeVector ||
b == TypeDesc::TypeNormal) { b == TypeDesc::TypeNormal)
{
return true; return true;
} }
} }

@ -72,7 +72,8 @@ ColorSpaceProcessor *ColorSpaceManager::get_processor(ustring colorspace)
bool ColorSpaceManager::colorspace_is_data(ustring colorspace) bool ColorSpaceManager::colorspace_is_data(ustring colorspace)
{ {
if (colorspace == u_colorspace_auto || colorspace == u_colorspace_raw || if (colorspace == u_colorspace_auto || colorspace == u_colorspace_raw ||
colorspace == u_colorspace_srgb) { colorspace == u_colorspace_srgb)
{
return false; return false;
} }
@ -200,14 +201,16 @@ void ColorSpaceManager::is_builtin_colorspace(ustring colorspace,
/* Make sure that there is no channel crosstalk. */ /* Make sure that there is no channel crosstalk. */
if (fabsf(cR[1]) > 1e-5f || fabsf(cR[2]) > 1e-5f || fabsf(cG[0]) > 1e-5f || if (fabsf(cR[1]) > 1e-5f || fabsf(cR[2]) > 1e-5f || fabsf(cG[0]) > 1e-5f ||
fabsf(cG[2]) > 1e-5f || fabsf(cB[0]) > 1e-5f || fabsf(cB[1]) > 1e-5f) { fabsf(cG[2]) > 1e-5f || fabsf(cB[0]) > 1e-5f || fabsf(cB[1]) > 1e-5f)
{
is_scene_linear = false; is_scene_linear = false;
is_srgb = false; is_srgb = false;
break; break;
} }
/* Make sure that the three primaries combine linearly. */ /* Make sure that the three primaries combine linearly. */
if (!compare_floats(cR[0], cW[0], 1e-6f, 64) || !compare_floats(cG[1], cW[1], 1e-6f, 64) || if (!compare_floats(cR[0], cW[0], 1e-6f, 64) || !compare_floats(cG[1], cW[1], 1e-6f, 64) ||
!compare_floats(cB[2], cW[2], 1e-6f, 64)) { !compare_floats(cB[2], cW[2], 1e-6f, 64))
{
is_scene_linear = false; is_scene_linear = false;
is_srgb = false; is_srgb = false;
break; break;

Some files were not shown because too many files have changed in this diff Show More