blender/intern/cycles/kernel/split/kernel_next_iteration_setup.h
Mai Lavelle 915766f42d Cycles: Branched path tracing for the split kernel
This implements branched path tracing for the split kernel.

General approach is to store the ray state at a branch point, trace the
branched ray as normal, then restore the state as necessary before iterating
to the next part of the path. A state machine is used to advance the indirect
loop state, which avoids the need to add any new kernels. Each iteration the
state machine recreates as much state as possible from the stored ray to keep
overall storage down.

Its kind of hard to keep all the different integration loops in sync, so this
needs lots of testing to make sure everything is working correctly. We should
probably start trying to deduplicate the integration loops more now.

Nonbranched BMW is ~2% slower, while classroom is ~2% faster, other scenes
could use more testing still.

Reviewers: sergey, nirved

Reviewed By: nirved

Subscribers: Blendify, bliblubli

Differential Revision: https://developer.blender.org/D2611
2017-05-02 14:26:46 -04:00

245 lines
9.1 KiB
C

/*
* Copyright 2011-2015 Blender Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
CCL_NAMESPACE_BEGIN
/*This kernel takes care of setting up ray for the next iteration of
* path-iteration and accumulating radiance corresponding to AO and
* direct-lighting
*
* Ray state of rays that are terminated in this kernel are changed
* to RAY_UPDATE_BUFFER.
*
* Note on queues:
* This kernel fetches rays from the queue QUEUE_ACTIVE_AND_REGENERATED_RAYS
* and processes only the rays of state RAY_ACTIVE.
* There are different points in this kernel where a ray may terminate and
* reach RAY_UPDATE_BUFF state. These rays are enqueued into
* QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS queue. These rays will still be present
* in QUEUE_ACTIVE_AND_REGENERATED_RAYS queue, but since their ray-state has
* been changed to RAY_UPDATE_BUFF, there is no problem.
*
* State of queues when this kernel is called:
* At entry,
* - QUEUE_ACTIVE_AND_REGENERATED_RAYS will be filled with RAY_ACTIVE,
* RAY_REGENERATED, RAY_UPDATE_BUFFER rays.
* - QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will be filled with
* RAY_TO_REGENERATE and RAY_UPDATE_BUFFER rays.
* At exit,
* - QUEUE_ACTIVE_AND_REGENERATED_RAYS will be filled with RAY_ACTIVE,
* RAY_REGENERATED and more RAY_UPDATE_BUFFER rays.
* - QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will be filled with
* RAY_TO_REGENERATE and more RAY_UPDATE_BUFFER rays.
*/
#ifdef __BRANCHED_PATH__
ccl_device_inline void kernel_split_branched_indirect_light_init(KernelGlobals *kg, int ray_index)
{
kernel_split_branched_path_indirect_loop_init(kg, ray_index);
ADD_RAY_FLAG(kernel_split_state.ray_state, ray_index, RAY_BRANCHED_LIGHT_INDIRECT);
}
ccl_device void kernel_split_branched_indirect_light_end(KernelGlobals *kg, int ray_index)
{
kernel_split_branched_path_indirect_loop_end(kg, ray_index);
ccl_global float3 *throughput = &kernel_split_state.throughput[ray_index];
ShaderData *sd = &kernel_split_state.sd[ray_index];
ccl_global PathState *state = &kernel_split_state.path_state[ray_index];
ccl_global Ray *ray = &kernel_split_state.ray[ray_index];
/* continue in case of transparency */
*throughput *= shader_bsdf_transparency(kg, sd);
if(is_zero(*throughput)) {
kernel_split_path_end(kg, ray_index);
}
else {
/* Update Path State */
state->flag |= PATH_RAY_TRANSPARENT;
state->transparent_bounce++;
ray->P = ray_offset(sd->P, -sd->Ng);
ray->t -= sd->ray_length; /* clipping works through transparent */
# ifdef __RAY_DIFFERENTIALS__
ray->dP = sd->dP;
ray->dD.dx = -sd->dI.dx;
ray->dD.dy = -sd->dI.dy;
# endif /* __RAY_DIFFERENTIALS__ */
# ifdef __VOLUME__
/* enter/exit volume */
kernel_volume_stack_enter_exit(kg, sd, state->volume_stack);
# endif /* __VOLUME__ */
}
}
#endif /* __BRANCHED_PATH__ */
ccl_device void kernel_next_iteration_setup(KernelGlobals *kg,
ccl_local_param unsigned int *local_queue_atomics)
{
if(ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
*local_queue_atomics = 0;
}
ccl_barrier(CCL_LOCAL_MEM_FENCE);
if(ccl_global_id(0) == 0 && ccl_global_id(1) == 0) {
/* If we are here, then it means that scene-intersect kernel
* has already been executed atleast once. From the next time,
* scene-intersect kernel may operate on queues to fetch ray index
*/
*kernel_split_params.use_queues_flag = 1;
/* Mark queue indices of QUEUE_SHADOW_RAY_CAST_AO_RAYS and
* QUEUE_SHADOW_RAY_CAST_DL_RAYS queues that were made empty during the
* previous kernel.
*/
kernel_split_params.queue_index[QUEUE_SHADOW_RAY_CAST_AO_RAYS] = 0;
kernel_split_params.queue_index[QUEUE_SHADOW_RAY_CAST_DL_RAYS] = 0;
}
int ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
ray_index = get_ray_index(kg, ray_index,
QUEUE_ACTIVE_AND_REGENERATED_RAYS,
kernel_split_state.queue_data,
kernel_split_params.queue_size,
0);
ccl_global char *ray_state = kernel_split_state.ray_state;
bool active = IS_STATE(ray_state, ray_index, RAY_ACTIVE);
if(active) {
ccl_global float3 *throughput = &kernel_split_state.throughput[ray_index];
ccl_global Ray *ray = &kernel_split_state.ray[ray_index];
RNG rng = kernel_split_state.rng[ray_index];
ShaderData *sd = &kernel_split_state.sd[ray_index];
ccl_global PathState *state = &kernel_split_state.path_state[ray_index];
PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
#ifdef __BRANCHED_PATH__
if(!kernel_data.integrator.branched || IS_FLAG(ray_state, ray_index, RAY_BRANCHED_INDIRECT)) {
#endif
/* Compute direct lighting and next bounce. */
if(!kernel_path_surface_bounce(kg, &rng, sd, throughput, state, L, ray)) {
kernel_split_path_end(kg, ray_index);
}
#ifdef __BRANCHED_PATH__
}
else {
kernel_split_branched_indirect_light_init(kg, ray_index);
if(kernel_split_branched_path_surface_indirect_light_iter(kg,
ray_index,
1.0f,
&kernel_split_state.branched_state[ray_index].sd,
true))
{
ASSIGN_RAY_STATE(ray_state, ray_index, RAY_REGENERATED);
}
else {
kernel_split_branched_indirect_light_end(kg, ray_index);
}
}
#endif /* __BRANCHED_PATH__ */
kernel_split_state.rng[ray_index] = rng;
}
/* Enqueue RAY_UPDATE_BUFFER rays. */
enqueue_ray_index_local(ray_index,
QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
IS_STATE(ray_state, ray_index, RAY_UPDATE_BUFFER) && active,
kernel_split_params.queue_size,
local_queue_atomics,
kernel_split_state.queue_data,
kernel_split_params.queue_index);
#ifdef __BRANCHED_PATH__
/* iter loop */
if(ccl_global_id(0) == 0 && ccl_global_id(1) == 0) {
kernel_split_params.queue_index[QUEUE_LIGHT_INDIRECT_ITER] = 0;
}
ray_index = get_ray_index(kg, ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0),
QUEUE_LIGHT_INDIRECT_ITER,
kernel_split_state.queue_data,
kernel_split_params.queue_size,
1);
if(IS_STATE(ray_state, ray_index, RAY_LIGHT_INDIRECT_NEXT_ITER)) {
/* for render passes, sum and reset indirect light pass variables
* for the next samples */
PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
path_radiance_sum_indirect(L);
path_radiance_reset_indirect(L);
if(kernel_split_branched_path_surface_indirect_light_iter(kg,
ray_index,
1.0f,
&kernel_split_state.branched_state[ray_index].sd,
true))
{
ASSIGN_RAY_STATE(ray_state, ray_index, RAY_REGENERATED);
}
else {
kernel_split_branched_indirect_light_end(kg, ray_index);
}
}
# ifdef __VOLUME__
/* Enqueue RAY_VOLUME_INDIRECT_NEXT_ITER rays */
ccl_barrier(CCL_LOCAL_MEM_FENCE);
if(ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
*local_queue_atomics = 0;
}
ccl_barrier(CCL_LOCAL_MEM_FENCE);
ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
enqueue_ray_index_local(ray_index,
QUEUE_VOLUME_INDIRECT_ITER,
IS_STATE(kernel_split_state.ray_state, ray_index, RAY_VOLUME_INDIRECT_NEXT_ITER),
kernel_split_params.queue_size,
local_queue_atomics,
kernel_split_state.queue_data,
kernel_split_params.queue_index);
# endif /* __VOLUME__ */
# ifdef __SUBSURFACE__
/* Enqueue RAY_SUBSURFACE_INDIRECT_NEXT_ITER rays */
ccl_barrier(CCL_LOCAL_MEM_FENCE);
if(ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
*local_queue_atomics = 0;
}
ccl_barrier(CCL_LOCAL_MEM_FENCE);
ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
enqueue_ray_index_local(ray_index,
QUEUE_SUBSURFACE_INDIRECT_ITER,
IS_STATE(kernel_split_state.ray_state, ray_index, RAY_SUBSURFACE_INDIRECT_NEXT_ITER),
kernel_split_params.queue_size,
local_queue_atomics,
kernel_split_state.queue_data,
kernel_split_params.queue_index);
# endif /* __SUBSURFACE__ */
#endif /* __BRANCHED_PATH__ */
}
CCL_NAMESPACE_END