Realtime Compositor: Implement Inpaint node

This patch implements the Inpaint node for the Realtime Compositor. The
inpainting region is filled by sampling the color of the nearest boundary pixel
if it is not further than the user supplied distance. Additionally, a lateral
blur is applied in the tangential path to the inpainting boundary to smooth out
the inpainted region.

The implementation is not identical to the existing CPU implementation due to
technical infeasibility. In particular, the CPU implementation uses a Manhattan
distance transform, while the GPU implementation uses an Euclidean one, which is
a consequence of the use of the Jump Flooding algorithm. Furthermore, the CPU
uses a serial convolution starting from the boundary outwards, while the GPU
uses a lateral Gaussian blur in the direction tangent to the boundary.

Pull Request: https://projects.blender.org/blender/blender/pulls/111792
This commit is contained in:
Omar Emara 2023-09-25 08:45:46 +02:00 committed by Omar Emara
parent 5008938a1c
commit acb88528a5
5 changed files with 228 additions and 5 deletions

@ -142,6 +142,8 @@ set(GLSL_SRC
shaders/compositor_glare_streaks_filter.glsl
shaders/compositor_id_mask.glsl
shaders/compositor_image_crop.glsl
shaders/compositor_inpaint_compute_boundary.glsl
shaders/compositor_inpaint_compute_region.glsl
shaders/compositor_jump_flooding.glsl
shaders/compositor_keying_compute_image.glsl
shaders/compositor_keying_compute_matte.glsl
@ -262,6 +264,7 @@ set(SRC_SHADER_CREATE_INFOS
shaders/infos/compositor_glare_info.hh
shaders/infos/compositor_id_mask_info.hh
shaders/infos/compositor_image_crop_info.hh
shaders/infos/compositor_inpaint_info.hh
shaders/infos/compositor_jump_flooding_info.hh
shaders/infos/compositor_keying_info.hh
shaders/infos/compositor_kuwahara_info.hh

@ -0,0 +1,45 @@
/* SPDX-FileCopyrightText: 2022-2023 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
/* The inpaint operation uses a jump flood algorithm to flood the region to be inpainted with the
* pixels at its boundary. The algorithms expects an input image whose values are those returned by
* the initialize_jump_flooding_value function, given the texel location and a boolean specifying
* if the pixel is a boundary one.
*
* Technically, we needn't restrict the output to just the boundary pixels, since the algorithm can
* still operate if the interior of the region was also included. However, the algorithm operates
* more accurately when the number of pixels to be flooded is minimum. */
#pragma BLENDER_REQUIRE(gpu_shader_compositor_texture_utilities.glsl)
#pragma BLENDER_REQUIRE(gpu_shader_compositor_jump_flooding_lib.glsl)
void main()
{
ivec2 texel = ivec2(gl_GlobalInvocationID.xy);
/* Identify if any of the 8 neighbours around the center pixel are transparent. */
bool has_transparent_neighbours = false;
for (int j = -1; j <= 1; j++) {
for (int i = -1; i <= 1; i++) {
ivec2 offset = ivec2(i, j);
/* Exempt the center pixel. */
if (all(notEqual(offset, ivec2(0)))) {
if (texture_load(input_tx, texel + offset).a < 1.0) {
has_transparent_neighbours = true;
break;
}
}
}
}
/* The pixels at the boundary are those that are opaque and have transparent neighbours. */
bool is_opaque = texture_load(input_tx, texel).a == 1.0;
bool is_boundary_pixel = is_opaque && has_transparent_neighbours;
/* Encode the boundary information in the format expected by the jump flooding algorithm. */
vec4 jump_flooding_value = initialize_jump_flooding_value(texel, is_boundary_pixel);
imageStore(boundary_img, texel, jump_flooding_value);
}

@ -0,0 +1,71 @@
/* SPDX-FileCopyrightText: 2023 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
/* Fill the inpainting region by sampling the color of the nearest boundary pixel if it is not
* further than the user supplied distance. Additionally, apply a lateral blur in the tangential
* path to the inpainting boundary to smooth out the inpainted region. */
#pragma BLENDER_REQUIRE(gpu_shader_compositor_texture_utilities.glsl)
#pragma BLENDER_REQUIRE(gpu_shader_compositor_jump_flooding_lib.glsl)
void main()
{
ivec2 texel = ivec2(gl_GlobalInvocationID.xy);
vec4 color = texture_load(input_tx, texel);
/* An opaque pixel, no inpainting needed. */
if (color.a == 1.0) {
imageStore(output_img, texel, color);
return;
}
vec4 flooding_value = texture_load(flooded_boundary_tx, texel);
float distance_to_boundary = extract_jump_flooding_distance_to_closest_seed(flooding_value);
/* Further than the user supplied distance, write a transparent color. */
if (distance_to_boundary > distance) {
imageStore(output_img, texel, vec4(0.0));
return;
}
/* We set the blur radius to be proportional to the distance to the boundary. */
int blur_radius = int(ceil(distance_to_boundary));
/* Laterally blur by accumulate the boundary pixels nearest to the pixels along the tangential
* path in both directions starting from the current pixel, noting that the weights texture only
* stores the weights for the left half, but since the Gaussian is symmetric, the same weight is
* used for the right half and we add both of their contributions. */
vec2 left_texel = vec2(texel);
vec2 right_texel = vec2(texel);
float accumulated_weight = 0.0;
vec4 accumulated_color = vec4(0.0);
for (int i = 0; i < blur_radius; i++) {
float weight = texture(gaussian_weights_tx, float(i / (blur_radius - 1))).x;
{
vec4 flooding_value = texture_load(flooded_boundary_tx, ivec2(left_texel));
ivec2 boundary_texel = extract_jump_flooding_closest_seed_texel(flooding_value);
accumulated_color += texture_load(input_tx, boundary_texel) * weight;
accumulated_weight += weight;
/* Move the left texel one pixel in the clockwise tangent to the boundary. */
left_texel += normalize((left_texel - boundary_texel).yx * vec2(-1.0, 1.0));
}
/* When i is zero, we are accumulating the center pixel, which was already accumulated as the
* left texel above, so no need to accumulate it again. */
if (i != 0) {
vec4 flooding_value = texture_load(flooded_boundary_tx, ivec2(right_texel));
ivec2 boundary_texel = extract_jump_flooding_closest_seed_texel(flooding_value);
accumulated_color += texture_load(input_tx, boundary_texel) * weight;
accumulated_weight += weight;
/* Move the left texel one pixel in the anti-clockwise tangent to the boundary. */
right_texel += normalize((right_texel - boundary_texel).yx * vec2(1.0, -1.0));
}
}
imageStore(output_img, texel, accumulated_color / accumulated_weight);
}

@ -0,0 +1,22 @@
/* SPDX-FileCopyrightText: 2023 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
#include "gpu_shader_create_info.hh"
GPU_SHADER_CREATE_INFO(compositor_inpaint_compute_boundary)
.local_group_size(16, 16)
.sampler(0, ImageType::FLOAT_2D, "input_tx")
.image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "boundary_img")
.compute_source("compositor_inpaint_compute_boundary.glsl")
.do_static_compilation(true);
GPU_SHADER_CREATE_INFO(compositor_inpaint_compute_region)
.local_group_size(16, 16)
.push_constant(Type::INT, "distance")
.sampler(0, ImageType::FLOAT_2D, "input_tx")
.sampler(1, ImageType::FLOAT_2D, "flooded_boundary_tx")
.sampler(2, ImageType::FLOAT_1D, "gaussian_weights_tx")
.image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img")
.compute_source("compositor_inpaint_compute_region.glsl")
.do_static_compilation(true);

@ -9,7 +9,12 @@
#include "UI_interface.hh"
#include "UI_resources.hh"
#include "DNA_scene_types.h"
#include "COM_algorithm_jump_flooding.hh"
#include "COM_node_operation.hh"
#include "COM_symmetric_separable_blur_weights.hh"
#include "COM_utilities.hh"
#include "node_composite_util.hh"
@ -19,7 +24,9 @@ namespace blender::nodes::node_composite_inpaint_cc {
static void cmp_node_inpaint_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Color>("Image").default_value({1.0f, 1.0f, 1.0f, 1.0f});
b.add_input<decl::Color>("Image")
.default_value({1.0f, 1.0f, 1.0f, 1.0f})
.compositor_domain_priority(0);
b.add_output<decl::Color>("Image");
}
@ -36,8 +43,85 @@ class InpaintOperation : public NodeOperation {
void execute() override
{
get_input("Image").pass_through(get_result("Image"));
context().set_info_message("Viewport compositor setup not fully supported");
Result &input = get_input("Image");
Result &output = get_result("Image");
if (input.is_single_value() || get_distance() == 0) {
input.pass_through(output);
return;
}
/* Compute an image that marks the boundary pixels of the inpainting region as seed pixels in
* the format expected by the jump flooding algorithm. The inpainting region is the region
* composed of pixels that are not opaque. */
Result inpainting_boundary = compute_inpainting_boundary();
/* Compute a jump flooding table to get the closest boundary pixel to each pixel. */
Result flooded_boundary = Result::Temporary(ResultType::Color, texture_pool());
jump_flooding(context(), inpainting_boundary, flooded_boundary);
inpainting_boundary.release();
/* Fill the inpainting region based on the jump flooding table. */
compute_inpainting_region(flooded_boundary);
flooded_boundary.release();
}
Result compute_inpainting_boundary()
{
GPUShader *shader = shader_manager().get("compositor_inpaint_compute_boundary");
GPU_shader_bind(shader);
const Result &input = get_input("Image");
input.bind_as_texture(shader, "input_tx");
Result inpainting_boundary = Result::Temporary(ResultType::Color, texture_pool());
const Domain domain = compute_domain();
inpainting_boundary.allocate_texture(domain);
inpainting_boundary.bind_as_image(shader, "boundary_img");
compute_dispatch_threads_at_least(shader, domain.size);
input.unbind_as_texture();
inpainting_boundary.unbind_as_image();
GPU_shader_unbind();
return inpainting_boundary;
}
void compute_inpainting_region(Result &flooded_boundary)
{
GPUShader *shader = shader_manager().get("compositor_inpaint_compute_region");
GPU_shader_bind(shader);
GPU_shader_uniform_1i(shader, "distance", get_distance());
const Result &input = get_input("Image");
input.bind_as_texture(shader, "input_tx");
flooded_boundary.bind_as_texture(shader, "flooded_boundary_tx");
/* The lateral blur in the shader is proportional to the distance each pixel makes with the
* inpainting boundary. So the maximum possible blur radius is the user supplied distance. */
const float max_radius = float(get_distance());
const SymmetricSeparableBlurWeights &gaussian_weights =
context().cache_manager().symmetric_separable_blur_weights.get(R_FILTER_GAUSS, max_radius);
gaussian_weights.bind_as_texture(shader, "gaussian_weights_tx");
const Domain domain = compute_domain();
Result &output = get_result("Image");
output.allocate_texture(domain);
output.bind_as_image(shader, "output_img");
compute_dispatch_threads_at_least(shader, domain.size);
input.unbind_as_texture();
gaussian_weights.unbind_as_texture();
output.unbind_as_image();
GPU_shader_unbind();
}
int get_distance()
{
return bnode().custom2;
}
};
@ -58,8 +142,6 @@ void register_node_type_cmp_inpaint()
ntype.declare = file_ns::cmp_node_inpaint_declare;
ntype.draw_buttons = file_ns::node_composit_buts_inpaint;
ntype.get_compositor_operation = file_ns::get_compositor_operation;
ntype.realtime_compositor_unsupported_message = N_(
"Node not supported in the Viewport compositor");
nodeRegisterType(&ntype);
}