diff --git a/intern/cycles/blender/addon/properties.py b/intern/cycles/blender/addon/properties.py index 64613216be0..986d5d972c1 100644 --- a/intern/cycles/blender/addon/properties.py +++ b/intern/cycles/blender/addon/properties.py @@ -325,6 +325,13 @@ class CyclesRenderSettings(bpy.types.PropertyGroup): default=1024, ) + sample_offset: IntProperty( + name="Sample Offset", + description="Number of samples to skip when starting render", + min=0, max=(1 << 24), + default=0, + ) + time_limit: FloatProperty( name="Time Limit", description="Limit the render time (excluding synchronization time)." diff --git a/intern/cycles/blender/addon/ui.py b/intern/cycles/blender/addon/ui.py index 0c9179b4ccf..7bac7e46117 100644 --- a/intern/cycles/blender/addon/ui.py +++ b/intern/cycles/blender/addon/ui.py @@ -290,6 +290,9 @@ class CYCLES_RENDER_PT_sampling_advanced(CyclesButtonsPanel, Panel): col.active = not (cscene.use_adaptive_sampling and cscene.use_preview_adaptive_sampling) col.prop(cscene, "sampling_pattern", text="Pattern") + col = layout.column(align=True) + col.prop(cscene, "sample_offset") + layout.separator() col = layout.column(align=True) diff --git a/intern/cycles/blender/sync.cpp b/intern/cycles/blender/sync.cpp index 92662e37bc2..59d684dcf05 100644 --- a/intern/cycles/blender/sync.cpp +++ b/intern/cycles/blender/sync.cpp @@ -835,18 +835,25 @@ SessionParams BlenderSync::get_session_params(BL::RenderEngine &b_engine, /* samples */ int samples = get_int(cscene, "samples"); int preview_samples = get_int(cscene, "preview_samples"); + int sample_offset = get_int(cscene, "sample_offset"); if (background) { params.samples = samples; + params.sample_offset = sample_offset; } else { params.samples = preview_samples; - if (params.samples == 0) + if (params.samples == 0) { params.samples = INT_MAX; + } + params.sample_offset = 0; } + /* Clamp sample offset. */ + params.sample_offset = clamp(params.sample_offset, 0, Integrator::MAX_SAMPLES); + /* Clamp samples. */ - params.samples = min(params.samples, Integrator::MAX_SAMPLES); + params.samples = clamp(params.samples, 0, Integrator::MAX_SAMPLES - params.sample_offset); /* Viewport Performance */ params.pixel_size = b_engine.get_preview_pixel_size(b_scene); diff --git a/intern/cycles/integrator/path_trace.cpp b/intern/cycles/integrator/path_trace.cpp index daf270d6686..dcbdf07ee67 100644 --- a/intern/cycles/integrator/path_trace.cpp +++ b/intern/cycles/integrator/path_trace.cpp @@ -380,7 +380,10 @@ void PathTrace::path_trace(RenderWork &render_work) PathTraceWork *path_trace_work = path_trace_works_[i].get(); PathTraceWork::RenderStatistics statistics; - path_trace_work->render_samples(statistics, render_work.path_trace.start_sample, num_samples); + path_trace_work->render_samples(statistics, + render_work.path_trace.start_sample, + num_samples, + render_work.path_trace.sample_offset); const double work_time = time_dt() - work_start_time; work_balance_infos_[i].time_spent += work_time; @@ -849,7 +852,8 @@ void PathTrace::progress_update_if_needed(const RenderWork &render_work) const int2 tile_size = get_render_tile_size(); const int num_samples_added = tile_size.x * tile_size.y * render_work.path_trace.num_samples; const int current_sample = render_work.path_trace.start_sample + - render_work.path_trace.num_samples; + render_work.path_trace.num_samples - + render_work.path_trace.sample_offset; progress_->add_samples(num_samples_added, current_sample); } diff --git a/intern/cycles/integrator/path_trace_work.h b/intern/cycles/integrator/path_trace_work.h index 0dc7cd2f896..2ebfc913580 100644 --- a/intern/cycles/integrator/path_trace_work.h +++ b/intern/cycles/integrator/path_trace_work.h @@ -75,7 +75,10 @@ class PathTraceWork { /* Render given number of samples as a synchronous blocking call. * The samples are added to the render buffer associated with this work. */ - virtual void render_samples(RenderStatistics &statistics, int start_sample, int samples_num) = 0; + virtual void render_samples(RenderStatistics &statistics, + int start_sample, + int samples_num, + int sample_offset) = 0; /* Copy render result from this work to the corresponding place of the GPU display. * diff --git a/intern/cycles/integrator/path_trace_work_cpu.cpp b/intern/cycles/integrator/path_trace_work_cpu.cpp index 36ce2be9f6d..530e60d6750 100644 --- a/intern/cycles/integrator/path_trace_work_cpu.cpp +++ b/intern/cycles/integrator/path_trace_work_cpu.cpp @@ -71,7 +71,8 @@ void PathTraceWorkCPU::init_execution() void PathTraceWorkCPU::render_samples(RenderStatistics &statistics, int start_sample, - int samples_num) + int samples_num, + int sample_offset) { const int64_t image_width = effective_buffer_params_.width; const int64_t image_height = effective_buffer_params_.height; @@ -97,6 +98,7 @@ void PathTraceWorkCPU::render_samples(RenderStatistics &statistics, work_tile.w = 1; work_tile.h = 1; work_tile.start_sample = start_sample; + work_tile.sample_offset = sample_offset; work_tile.num_samples = 1; work_tile.offset = effective_buffer_params_.offset; work_tile.stride = effective_buffer_params_.stride; diff --git a/intern/cycles/integrator/path_trace_work_cpu.h b/intern/cycles/integrator/path_trace_work_cpu.h index 6e734690811..63ab686588c 100644 --- a/intern/cycles/integrator/path_trace_work_cpu.h +++ b/intern/cycles/integrator/path_trace_work_cpu.h @@ -48,7 +48,8 @@ class PathTraceWorkCPU : public PathTraceWork { virtual void render_samples(RenderStatistics &statistics, int start_sample, - int samples_num) override; + int samples_num, + int sample_offset) override; virtual void copy_to_display(PathTraceDisplay *display, PassMode pass_mode, diff --git a/intern/cycles/integrator/path_trace_work_gpu.cpp b/intern/cycles/integrator/path_trace_work_gpu.cpp index dfc1362ab09..2263c9892f4 100644 --- a/intern/cycles/integrator/path_trace_work_gpu.cpp +++ b/intern/cycles/integrator/path_trace_work_gpu.cpp @@ -250,7 +250,8 @@ void PathTraceWorkGPU::init_execution() void PathTraceWorkGPU::render_samples(RenderStatistics &statistics, int start_sample, - int samples_num) + int samples_num, + int sample_offset) { /* Limit number of states for the tile and rely on a greedy scheduling of tiles. This allows to * add more work (because tiles are smaller, so there is higher chance that more paths will @@ -261,6 +262,7 @@ void PathTraceWorkGPU::render_samples(RenderStatistics &statistics, work_tile_scheduler_.reset(effective_buffer_params_, start_sample, samples_num, + sample_offset, device_scene_->data.integrator.scrambling_distance); enqueue_reset(); diff --git a/intern/cycles/integrator/path_trace_work_gpu.h b/intern/cycles/integrator/path_trace_work_gpu.h index c5e291e72db..5aa497c26e7 100644 --- a/intern/cycles/integrator/path_trace_work_gpu.h +++ b/intern/cycles/integrator/path_trace_work_gpu.h @@ -46,7 +46,8 @@ class PathTraceWorkGPU : public PathTraceWork { virtual void render_samples(RenderStatistics &statistics, int start_sample, - int samples_num) override; + int samples_num, + int sample_offset) override; virtual void copy_to_display(PathTraceDisplay *display, PassMode pass_mode, diff --git a/intern/cycles/integrator/render_scheduler.cpp b/intern/cycles/integrator/render_scheduler.cpp index f776d01ef67..538d751e8b1 100644 --- a/intern/cycles/integrator/render_scheduler.cpp +++ b/intern/cycles/integrator/render_scheduler.cpp @@ -88,6 +88,16 @@ int RenderScheduler::get_num_samples() const return num_samples_; } +void RenderScheduler::set_sample_offset(int sample_offset) +{ + sample_offset_ = sample_offset; +} + +int RenderScheduler::get_sample_offset() const +{ + return sample_offset_; +} + void RenderScheduler::set_time_limit(double time_limit) { time_limit_ = time_limit; @@ -110,13 +120,15 @@ int RenderScheduler::get_num_rendered_samples() const return state_.num_rendered_samples; } -void RenderScheduler::reset(const BufferParams &buffer_params, int num_samples) +void RenderScheduler::reset(const BufferParams &buffer_params, int num_samples, int sample_offset) { buffer_params_ = buffer_params; update_start_resolution_divider(); set_num_samples(num_samples); + set_start_sample(sample_offset); + set_sample_offset(sample_offset); /* In background mode never do lower resolution render preview, as it is not really supported * by the software. */ @@ -171,7 +183,7 @@ void RenderScheduler::reset(const BufferParams &buffer_params, int num_samples) void RenderScheduler::reset_for_next_tile() { - reset(buffer_params_, num_samples_); + reset(buffer_params_, num_samples_, sample_offset_); } bool RenderScheduler::render_work_reschedule_on_converge(RenderWork &render_work) @@ -317,6 +329,7 @@ RenderWork RenderScheduler::get_render_work() render_work.path_trace.start_sample = get_start_sample_to_path_trace(); render_work.path_trace.num_samples = get_num_samples_to_path_trace(); + render_work.path_trace.sample_offset = get_sample_offset(); render_work.init_render_buffers = (render_work.path_trace.start_sample == get_start_sample()); diff --git a/intern/cycles/integrator/render_scheduler.h b/intern/cycles/integrator/render_scheduler.h index d7b7413ae31..28f563c46e3 100644 --- a/intern/cycles/integrator/render_scheduler.h +++ b/intern/cycles/integrator/render_scheduler.h @@ -39,6 +39,7 @@ class RenderWork { struct { int start_sample = 0; int num_samples = 0; + int sample_offset = 0; } path_trace; struct { @@ -125,6 +126,9 @@ class RenderScheduler { void set_num_samples(int num_samples); int get_num_samples() const; + void set_sample_offset(int sample_offset); + int get_sample_offset() const; + /* Time limit for the path tracing tasks, in minutes. * Zero disables the limit. */ void set_time_limit(double time_limit); @@ -150,7 +154,7 @@ class RenderScheduler { /* Reset scheduler, indicating that rendering will happen from scratch. * Resets current rendered state, as well as scheduling information. */ - void reset(const BufferParams &buffer_params, int num_samples); + void reset(const BufferParams &buffer_params, int num_samples, int sample_offset); /* Reset scheduler upon switching to a next tile. * Will keep the same number of samples and full-frame render parameters, but will reset progress @@ -419,6 +423,8 @@ class RenderScheduler { int start_sample_ = 0; int num_samples_ = 0; + int sample_offset_ = 0; + /* Limit in seconds for how long path tracing is allowed to happen. * Zero means no limit is applied. */ double time_limit_ = 0.0; diff --git a/intern/cycles/integrator/work_tile_scheduler.cpp b/intern/cycles/integrator/work_tile_scheduler.cpp index 2d1ac07db7f..d60f7149bf4 100644 --- a/intern/cycles/integrator/work_tile_scheduler.cpp +++ b/intern/cycles/integrator/work_tile_scheduler.cpp @@ -36,6 +36,7 @@ void WorkTileScheduler::set_max_num_path_states(int max_num_path_states) void WorkTileScheduler::reset(const BufferParams &buffer_params, int sample_start, int samples_num, + int sample_offset, float scrambling_distance) { /* Image buffer parameters. */ @@ -51,6 +52,7 @@ void WorkTileScheduler::reset(const BufferParams &buffer_params, /* Samples parameters. */ sample_start_ = sample_start; samples_num_ = samples_num; + sample_offset_ = sample_offset; /* Initialize new scheduling. */ reset_scheduler_state(); @@ -111,6 +113,7 @@ bool WorkTileScheduler::get_work(KernelWorkTile *work_tile_, const int max_work_ work_tile.h = tile_size_.height; work_tile.start_sample = sample_start_ + start_sample; work_tile.num_samples = min(tile_size_.num_samples, samples_num_ - start_sample); + work_tile.sample_offset = sample_offset_; work_tile.offset = offset_; work_tile.stride = stride_; diff --git a/intern/cycles/integrator/work_tile_scheduler.h b/intern/cycles/integrator/work_tile_scheduler.h index d9fa7e84431..2d6395799f7 100644 --- a/intern/cycles/integrator/work_tile_scheduler.h +++ b/intern/cycles/integrator/work_tile_scheduler.h @@ -41,6 +41,7 @@ class WorkTileScheduler { void reset(const BufferParams &buffer_params, int sample_start, int samples_num, + int sample_offset, float scrambling_distance); /* Get work for a device. @@ -79,6 +80,7 @@ class WorkTileScheduler { * (splitting into a smaller work tiles). */ int sample_start_ = 0; int samples_num_ = 0; + int sample_offset_ = 0; /* Tile size which be scheduled for rendering. */ TileSize tile_size_; diff --git a/intern/cycles/kernel/film/accumulate.h b/intern/cycles/kernel/film/accumulate.h index 33f913a6746..8b5c0f2a681 100644 --- a/intern/cycles/kernel/film/accumulate.h +++ b/intern/cycles/kernel/film/accumulate.h @@ -141,7 +141,8 @@ ccl_device_forceinline ccl_global float *kernel_accum_pixel_render_buffer( ccl_device_inline int kernel_accum_sample(KernelGlobals kg, ConstIntegratorState state, ccl_global float *ccl_restrict render_buffer, - int sample) + int sample, + int sample_offset) { if (kernel_data.film.pass_sample_count == PASS_UNUSED) { return sample; @@ -149,7 +150,8 @@ ccl_device_inline int kernel_accum_sample(KernelGlobals kg, ccl_global float *buffer = kernel_accum_pixel_render_buffer(kg, state, render_buffer); - return atomic_fetch_and_add_uint32((uint *)(buffer) + kernel_data.film.pass_sample_count, 1); + return atomic_fetch_and_add_uint32((uint *)(buffer) + kernel_data.film.pass_sample_count, 1) + + sample_offset; } ccl_device void kernel_accum_adaptive_buffer(KernelGlobals kg, diff --git a/intern/cycles/kernel/integrator/init_from_bake.h b/intern/cycles/kernel/integrator/init_from_bake.h index 4e30563e21b..df1c7fd07e7 100644 --- a/intern/cycles/kernel/integrator/init_from_bake.h +++ b/intern/cycles/kernel/integrator/init_from_bake.h @@ -65,7 +65,8 @@ ccl_device bool integrator_init_from_bake(KernelGlobals kg, } /* Always count the sample, even if the camera sample will reject the ray. */ - const int sample = kernel_accum_sample(kg, state, render_buffer, scheduled_sample); + const int sample = kernel_accum_sample( + kg, state, render_buffer, scheduled_sample, tile->sample_offset); /* Setup render buffers. */ const int index = INTEGRATOR_STATE(state, path, render_pixel_index); diff --git a/intern/cycles/kernel/integrator/init_from_camera.h b/intern/cycles/kernel/integrator/init_from_camera.h index f0ba77bd9a6..59dd1a9fa75 100644 --- a/intern/cycles/kernel/integrator/init_from_camera.h +++ b/intern/cycles/kernel/integrator/init_from_camera.h @@ -89,7 +89,8 @@ ccl_device bool integrator_init_from_camera(KernelGlobals kg, * This logic allows to both count actual number of samples per pixel, and to add samples to this * pixel after it was converged and samples were added somewhere else (in which case the * `scheduled_sample` will be different from actual number of samples in this pixel). */ - const int sample = kernel_accum_sample(kg, state, render_buffer, scheduled_sample); + const int sample = kernel_accum_sample( + kg, state, render_buffer, scheduled_sample, tile->sample_offset); /* Initialize random number seed for path. */ const uint rng_hash = path_rng_hash_init(kg, sample, x, y); diff --git a/intern/cycles/kernel/types.h b/intern/cycles/kernel/types.h index 2827139d511..4e93e82e971 100644 --- a/intern/cycles/kernel/types.h +++ b/intern/cycles/kernel/types.h @@ -1410,6 +1410,7 @@ typedef struct KernelWorkTile { uint start_sample; uint num_samples; + uint sample_offset; int offset; uint stride; diff --git a/intern/cycles/session/session.cpp b/intern/cycles/session/session.cpp index b228939689c..299d731d9f9 100644 --- a/intern/cycles/session/session.cpp +++ b/intern/cycles/session/session.cpp @@ -262,6 +262,7 @@ RenderWork Session::run_update_for_next_iteration() } render_scheduler_.set_num_samples(params.samples); + render_scheduler_.set_start_sample(params.sample_offset); render_scheduler_.set_time_limit(params.time_limit); while (have_tiles) { @@ -397,7 +398,7 @@ void Session::do_delayed_reset() /* Tile and work scheduling. */ tile_manager_.reset_scheduling(buffer_params_, get_effective_tile_size()); - render_scheduler_.reset(buffer_params_, params.samples); + render_scheduler_.reset(buffer_params_, params.samples, params.sample_offset); /* Passes. */ /* When multiple tiles are used SAMPLE_COUNT pass is used to keep track of possible partial diff --git a/intern/cycles/session/session.h b/intern/cycles/session/session.h index 1ec0c6e9bb1..3f73593f008 100644 --- a/intern/cycles/session/session.h +++ b/intern/cycles/session/session.h @@ -54,6 +54,7 @@ class SessionParams { bool experimental; int samples; + int sample_offset; int pixel_size; int threads; @@ -75,6 +76,7 @@ class SessionParams { experimental = false; samples = 1024; + sample_offset = 0; pixel_size = 1; threads = 0; time_limit = 0.0;