blender/intern/cycles/render/film.h
Stefan Werner 51e898324d Adaptive Sampling for Cycles.
This feature takes some inspiration from
"RenderMan: An Advanced Path Tracing Architecture for Movie Rendering" and
"A Hierarchical Automatic Stopping Condition for Monte Carlo Global Illumination"

The basic principle is as follows:
While samples are being added to a pixel, the adaptive sampler writes half
of the samples to a separate buffer. This gives it two separate estimates
of the same pixel, and by comparing their difference it estimates convergence.
Once convergence drops below a given threshold, the pixel is considered done.

When a pixel has not converged yet and needs more samples than the minimum,
its immediate neighbors are also set to take more samples. This is done in order
to more reliably detect sharp features such as caustics. A 3x3 box filter that
is run periodically over the tile buffer is used for that purpose.

After a tile has finished rendering, the values of all passes are scaled as if
they were rendered with the full number of samples. This way, any code operating
on these buffers, for example the denoiser, does not need to be changed for
per-pixel sample counts.

Reviewed By: brecht, #cycles

Differential Revision: https://developer.blender.org/D4686
2020-03-05 12:21:38 +01:00

104 lines
2.3 KiB
C++

/*
* Copyright 2011-2013 Blender Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __FILM_H__
#define __FILM_H__
#include "util/util_string.h"
#include "util/util_vector.h"
#include "kernel/kernel_types.h"
#include "graph/node.h"
CCL_NAMESPACE_BEGIN
class Device;
class DeviceScene;
class Scene;
typedef enum FilterType {
FILTER_BOX,
FILTER_GAUSSIAN,
FILTER_BLACKMAN_HARRIS,
FILTER_NUM_TYPES,
} FilterType;
class Pass {
public:
PassType type;
int components;
bool filter;
bool exposure;
PassType divide_type;
string name;
static void add(PassType type, vector<Pass> &passes, const char *name = NULL);
static bool equals(const vector<Pass> &A, const vector<Pass> &B);
static bool contains(const vector<Pass> &passes, PassType);
};
class Film : public Node {
public:
NODE_DECLARE
float exposure;
vector<Pass> passes;
bool denoising_data_pass;
bool denoising_clean_pass;
bool denoising_prefiltered_pass;
int denoising_flags;
float pass_alpha_threshold;
PassType display_pass;
int pass_stride;
int denoising_data_offset;
int denoising_clean_offset;
FilterType filter_type;
float filter_width;
size_t filter_table_offset;
float mist_start;
float mist_depth;
float mist_falloff;
bool use_light_visibility;
CryptomatteType cryptomatte_passes;
int cryptomatte_depth;
bool use_adaptive_sampling;
bool need_update;
Film();
~Film();
void device_update(Device *device, DeviceScene *dscene, Scene *scene);
void device_free(Device *device, DeviceScene *dscene, Scene *scene);
bool modified(const Film &film);
void tag_passes_update(Scene *scene, const vector<Pass> &passes_, bool update_passes = true);
void tag_update(Scene *scene);
int get_aov_offset(string name, bool &is_color);
};
CCL_NAMESPACE_END
#endif /* __FILM_H__ */