Compositor: improve edge detection of Kuwahara filter

Compute edges of image once based on luminance instead of all 3 channels.

This also gives a modest performance improvement of 8%. Measured on intel i9 CPU using a 1920 x 3199 image.

Pull Request: https://projects.blender.org/blender/blender/pulls/108858
This commit is contained in:
Habib Gahbiche 2023-06-24 21:39:07 +02:00 committed by Habib Gahbiche
parent c1292b4a80
commit c8f434e685
2 changed files with 13 additions and 9 deletions

@ -4,6 +4,7 @@
#include "COM_KuwaharaNode.h" #include "COM_KuwaharaNode.h"
#include "COM_ConvertOperation.h"
#include "COM_ConvolutionFilterOperation.h" #include "COM_ConvolutionFilterOperation.h"
#include "COM_FastGaussianBlurOperation.h" #include "COM_FastGaussianBlurOperation.h"
#include "COM_KuwaharaAnisotropicOperation.h" #include "COM_KuwaharaAnisotropicOperation.h"
@ -31,7 +32,11 @@ void KuwaharaNode::convert_to_operations(NodeConverter &converter,
} }
case CMP_NODE_KUWAHARA_ANISOTROPIC: { case CMP_NODE_KUWAHARA_ANISOTROPIC: {
/* Edge detection */ /* Edge detection on luminance. */
auto rgb_to_lum = new ConvertColorToBWOperation();
converter.add_operation(rgb_to_lum);
converter.map_input_socket(get_input_socket(0), rgb_to_lum->get_input_socket(0));
auto const_fact = new SetValueOperation(); auto const_fact = new SetValueOperation();
const_fact->set_value(1.0f); const_fact->set_value(1.0f);
converter.add_operation(const_fact); converter.add_operation(const_fact);
@ -39,16 +44,16 @@ void KuwaharaNode::convert_to_operations(NodeConverter &converter,
auto sobel_x = new ConvolutionFilterOperation(); auto sobel_x = new ConvolutionFilterOperation();
sobel_x->set3x3Filter(1, 0, -1, 2, 0, -2, 1, 0, -1); sobel_x->set3x3Filter(1, 0, -1, 2, 0, -2, 1, 0, -1);
converter.add_operation(sobel_x); converter.add_operation(sobel_x);
converter.map_input_socket(get_input_socket(0), sobel_x->get_input_socket(0)); converter.add_link(rgb_to_lum->get_output_socket(0), sobel_x->get_input_socket(0));
converter.add_link(const_fact->get_output_socket(0), sobel_x->get_input_socket(1)); converter.add_link(const_fact->get_output_socket(0), sobel_x->get_input_socket(1));
auto sobel_y = new ConvolutionFilterOperation(); auto sobel_y = new ConvolutionFilterOperation();
sobel_y->set3x3Filter(1, 2, 1, 0, 0, 0, -1, -2, -1); sobel_y->set3x3Filter(1, 2, 1, 0, 0, 0, -1, -2, -1);
converter.add_operation(sobel_y); converter.add_operation(sobel_y);
converter.map_input_socket(get_input_socket(0), sobel_y->get_input_socket(0)); converter.add_link(rgb_to_lum->get_output_socket(0), sobel_y->get_input_socket(0));
converter.add_link(const_fact->get_output_socket(0), sobel_y->get_input_socket(1)); converter.add_link(const_fact->get_output_socket(0), sobel_y->get_input_socket(1));
/* Compute intensity of edges */ /* Compute intensity of edges. */
auto sobel_xx = new MathMultiplyOperation(); auto sobel_xx = new MathMultiplyOperation();
auto sobel_yy = new MathMultiplyOperation(); auto sobel_yy = new MathMultiplyOperation();
auto sobel_xy = new MathMultiplyOperation(); auto sobel_xy = new MathMultiplyOperation();

@ -202,11 +202,10 @@ void KuwaharaAnisotropicOperation::update_memory_buffer_partial(MemoryBuffer *ou
const int x = it.x; const int x = it.x;
const int y = it.y; const int y = it.y;
/* For now use green channel to compute orientation. */ /* All channels are identical. Take first channel for simplicity. */
/* TODO: convert to HSV and compute orientation and strength on luminance channel. */ const float a = s_xx->get_value(x, y, 0);
const float a = s_xx->get_value(x, y, 1); const float b = s_xy->get_value(x, y, 0);
const float b = s_xy->get_value(x, y, 1); const float c = s_yy->get_value(x, y, 0);
const float c = s_yy->get_value(x, y, 1);
/* Compute egenvalues of structure tensor */ /* Compute egenvalues of structure tensor */
const double tr = a + c; const double tr = a + c;