blender/intern/cycles/util/util_atomic.h
Stefan Werner 51e898324d Adaptive Sampling for Cycles.
This feature takes some inspiration from
"RenderMan: An Advanced Path Tracing Architecture for Movie Rendering" and
"A Hierarchical Automatic Stopping Condition for Monte Carlo Global Illumination"

The basic principle is as follows:
While samples are being added to a pixel, the adaptive sampler writes half
of the samples to a separate buffer. This gives it two separate estimates
of the same pixel, and by comparing their difference it estimates convergence.
Once convergence drops below a given threshold, the pixel is considered done.

When a pixel has not converged yet and needs more samples than the minimum,
its immediate neighbors are also set to take more samples. This is done in order
to more reliably detect sharp features such as caustics. A 3x3 box filter that
is run periodically over the tile buffer is used for that purpose.

After a tile has finished rendering, the values of all passes are scaled as if
they were rendered with the full number of samples. This way, any code operating
on these buffers, for example the denoiser, does not need to be changed for
per-pixel sample counts.

Reviewed By: brecht, #cycles

Differential Revision: https://developer.blender.org/D4686
2020-03-05 12:21:38 +01:00

119 lines
4.1 KiB
C

/*
* Copyright 2014 Blender Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __UTIL_ATOMIC_H__
#define __UTIL_ATOMIC_H__
#ifndef __KERNEL_GPU__
/* Using atomic ops header from Blender. */
# include "atomic_ops.h"
# define atomic_add_and_fetch_float(p, x) atomic_add_and_fetch_fl((p), (x))
# define atomic_compare_and_swap_float(p, old_val, new_val) \
atomic_cas_float((p), (old_val), (new_val))
# define atomic_fetch_and_inc_uint32(p) atomic_fetch_and_add_uint32((p), 1)
# define atomic_fetch_and_dec_uint32(p) atomic_fetch_and_add_uint32((p), -1)
# define CCL_LOCAL_MEM_FENCE 0
# define ccl_barrier(flags) ((void)0)
#else /* __KERNEL_GPU__ */
# ifdef __KERNEL_OPENCL__
/* Float atomics implementation credits:
* http://suhorukov.blogspot.in/2011/12/opencl-11-atomic-operations-on-floating.html
*/
ccl_device_inline float atomic_add_and_fetch_float(volatile ccl_global float *source,
const float operand)
{
union {
unsigned int int_value;
float float_value;
} new_value;
union {
unsigned int int_value;
float float_value;
} prev_value;
do {
prev_value.float_value = *source;
new_value.float_value = prev_value.float_value + operand;
} while (atomic_cmpxchg((volatile ccl_global unsigned int *)source,
prev_value.int_value,
new_value.int_value) != prev_value.int_value);
return new_value.float_value;
}
ccl_device_inline float atomic_compare_and_swap_float(volatile ccl_global float *dest,
const float old_val,
const float new_val)
{
union {
unsigned int int_value;
float float_value;
} new_value, prev_value, result;
prev_value.float_value = old_val;
new_value.float_value = new_val;
result.int_value = atomic_cmpxchg(
(volatile ccl_global unsigned int *)dest, prev_value.int_value, new_value.int_value);
return result.float_value;
}
# define atomic_fetch_and_add_uint32(p, x) atomic_add((p), (x))
# define atomic_fetch_and_inc_uint32(p) atomic_inc((p))
# define atomic_fetch_and_dec_uint32(p) atomic_dec((p))
# define atomic_fetch_and_or_uint32(p, x) atomic_or((p), (x))
# define CCL_LOCAL_MEM_FENCE CLK_LOCAL_MEM_FENCE
# define ccl_barrier(flags) barrier(flags)
# endif /* __KERNEL_OPENCL__ */
# ifdef __KERNEL_CUDA__
# define atomic_add_and_fetch_float(p, x) (atomicAdd((float *)(p), (float)(x)) + (float)(x))
# define atomic_fetch_and_add_uint32(p, x) atomicAdd((unsigned int *)(p), (unsigned int)(x))
# define atomic_fetch_and_sub_uint32(p, x) atomicSub((unsigned int *)(p), (unsigned int)(x))
# define atomic_fetch_and_inc_uint32(p) atomic_fetch_and_add_uint32((p), 1)
# define atomic_fetch_and_dec_uint32(p) atomic_fetch_and_sub_uint32((p), 1)
# define atomic_fetch_and_or_uint32(p, x) atomicOr((unsigned int *)(p), (unsigned int)(x))
ccl_device_inline float atomic_compare_and_swap_float(volatile float *dest,
const float old_val,
const float new_val)
{
union {
unsigned int int_value;
float float_value;
} new_value, prev_value, result;
prev_value.float_value = old_val;
new_value.float_value = new_val;
result.int_value = atomicCAS((unsigned int *)dest, prev_value.int_value, new_value.int_value);
return result.float_value;
}
# define CCL_LOCAL_MEM_FENCE
# define ccl_barrier(flags) __syncthreads()
# endif /* __KERNEL_CUDA__ */
#endif /* __KERNEL_GPU__ */
#endif /* __UTIL_ATOMIC_H__ */