Cleanup: spelling in comments

This commit is contained in:
Campbell Barton 2023-08-25 08:56:58 +10:00
parent ed5110c0ef
commit 3de8900ed6
100 changed files with 191 additions and 190 deletions

@ -24,7 +24,7 @@
# gflags to be something else (i.e. google for legacy
# compatibility).
#
# The following variables control the behaviour of this module when an exported
# The following variables control the behavior of this module when an exported
# gflags CMake configuration is not found.
#
# GFLAGS_PREFER_EXPORTED_GFLAGS_CMAKE_CONFIGURATION: TRUE/FALSE, iff TRUE then
@ -47,7 +47,7 @@
# The following variables are also defined by this module, but in line with
# CMake recommended FindPackage() module style should NOT be referenced directly
# by callers (use the plural variables detailed above instead). These variables
# do however affect the behaviour of the module via FIND_[PATH/LIBRARY]() which
# do however affect the behavior of the module via FIND_[PATH/LIBRARY]() which
# are NOT re-called (i.e. search for library is not repeated) if these variables
# are set with valid values _in the CMake cache_. This means that if these
# variables are set directly in the cache, either by the user in the CMake GUI,

@ -449,7 +449,7 @@ void uiButSetFunc(uiBut *but, void (*func)(void *arg1, void *arg2), void *arg1,
When the button is pressed and released, it calls this function, with the 2 arguments.
void uiButSetFlag(uiBut *but, int flag)
set a flag for further control of button behaviour:
set a flag for further control of button behavior:
flag:
UI_TEXT_LEFT

@ -583,7 +583,7 @@ TEST_F(RenderGraph, constant_fold_part_mix_blend)
/*
* Tests:
* - NOT folding of MixRGB Sub with the same inputs and fac NOT 1.
* - NOT folding of MixRGB Subtract with the same inputs and fac NOT 1.
*/
TEST_F(RenderGraph, constant_fold_part_mix_sub_same_fac_bad)
{
@ -604,7 +604,7 @@ TEST_F(RenderGraph, constant_fold_part_mix_sub_same_fac_bad)
/*
* Tests:
* - Folding of MixRGB Sub with the same inputs and fac 1.
* - Folding of MixRGB Subtract with the same inputs and fac 1.
*/
TEST_F(RenderGraph, constant_fold_part_mix_sub_same_fac_1)
{
@ -700,7 +700,7 @@ TEST_F(RenderGraph, constant_fold_part_mix_add_0)
}
/*
* Tests: partial folding for RGB Sub with known 0.
* Tests: partial folding for RGB Subtract with known 0.
*/
TEST_F(RenderGraph, constant_fold_part_mix_sub_0)
{
@ -717,7 +717,7 @@ TEST_F(RenderGraph, constant_fold_part_mix_sub_0)
}
/*
* Tests: partial folding for RGB Mul with known 1.
* Tests: partial folding for RGB Multiply with known 1.
*/
TEST_F(RenderGraph, constant_fold_part_mix_mul_1)
{
@ -735,7 +735,7 @@ TEST_F(RenderGraph, constant_fold_part_mix_mul_1)
}
/*
* Tests: partial folding for RGB Div with known 1.
* Tests: partial folding for RGB Divide with known 1.
*/
TEST_F(RenderGraph, constant_fold_part_mix_div_1)
{
@ -752,7 +752,7 @@ TEST_F(RenderGraph, constant_fold_part_mix_div_1)
}
/*
* Tests: partial folding for RGB Mul with known 0.
* Tests: partial folding for RGB Multiply with known 0.
*/
TEST_F(RenderGraph, constant_fold_part_mix_mul_0)
{
@ -772,7 +772,7 @@ TEST_F(RenderGraph, constant_fold_part_mix_mul_0)
}
/*
* Tests: partial folding for RGB Div with known 0.
* Tests: partial folding for RGB Divide with known 0.
*/
TEST_F(RenderGraph, constant_fold_part_mix_div_0)
{
@ -1061,7 +1061,7 @@ TEST_F(RenderGraph, constant_fold_part_math_add_0)
}
/*
* Tests: partial folding for Math Sub with known 0.
* Tests: partial folding for Math Subtract with known 0.
*/
TEST_F(RenderGraph, constant_fold_part_math_sub_0)
{
@ -1076,7 +1076,7 @@ TEST_F(RenderGraph, constant_fold_part_math_sub_0)
}
/*
* Tests: partial folding for Math Mul with known 1.
* Tests: partial folding for Math Multiply with known 1.
*/
TEST_F(RenderGraph, constant_fold_part_math_mul_1)
{
@ -1091,7 +1091,7 @@ TEST_F(RenderGraph, constant_fold_part_math_mul_1)
}
/*
* Tests: partial folding for Math Div with known 1.
* Tests: partial folding for Math Divide with known 1.
*/
TEST_F(RenderGraph, constant_fold_part_math_div_1)
{
@ -1106,7 +1106,7 @@ TEST_F(RenderGraph, constant_fold_part_math_div_1)
}
/*
* Tests: partial folding for Math Mul with known 0.
* Tests: partial folding for Math Multiply with known 0.
*/
TEST_F(RenderGraph, constant_fold_part_math_mul_0)
{
@ -1122,7 +1122,7 @@ TEST_F(RenderGraph, constant_fold_part_math_mul_0)
}
/*
* Tests: partial folding for Math Div with known 0.
* Tests: partial folding for Math Divide with known 0.
*/
TEST_F(RenderGraph, constant_fold_part_math_div_0)
{
@ -1228,7 +1228,7 @@ TEST_F(RenderGraph, constant_fold_part_vecmath_add_0)
}
/*
* Tests: partial folding for Vector Math Sub with known 0.
* Tests: partial folding for Vector Math Subtract with known 0.
*/
TEST_F(RenderGraph, constant_fold_part_vecmath_sub_0)
{

@ -175,7 +175,7 @@ vec4 OCIO_ProcessColor(vec4 col, vec4 col_overlay)
col.rgb = pow(col.rgb, vec3(parameters.exponent * 2.2));
if (!parameters.use_hdr) {
/* If we're not using an extended colour space, clamp the color 0..1. */
/* If we're not using an extended color space, clamp the color 0..1. */
col = clamp(col, 0.0, 1.0);
}
else {

@ -208,7 +208,7 @@ typedef struct IDTypeInfo {
* Used to do some validation and/or complex processing on the ID after it has been fully read
* and its ID pointers have been updated to valid values (lib linking process).
*
* Note that this is still called _before_ the `do_versions_after_linking` versionning code.
* Note that this is still called _before_ the `do_versions_after_linking` versioning code.
*/
IDTypeBlendReadAfterLiblinkFunction blend_read_after_liblink;

@ -5694,7 +5694,7 @@ static void dynamic_paint_wave_step_cb(void *__restrict userdata,
wPoint->height = (dt * wave_speed * avg_n_height + wPoint->height * avg_dist) /
(avg_dist + dt * wave_speed);
}
/* else do wave eq */
/* Else do wave equation. */
else {
/* add force towards zero height based on average dist */
if (avg_dist) {

@ -892,7 +892,7 @@ static void object_blend_read_after_liblink(BlendLibReader *reader, ID *id)
BlendFileReadReport *reports = BLO_read_lib_reports(reader);
if (ob->data == nullptr && ob->type != OB_EMPTY) {
/* NOTE: This case is not expected to happen anymore, since in when a linked ID disapears, an
/* NOTE: This case is not expected to happen anymore, since in when a linked ID disappears, an
* empty placeholder is created for it by readfile code. Only some serious corruption of data
* should be able to trigger this code nowadays. */
@ -900,7 +900,7 @@ static void object_blend_read_after_liblink(BlendLibReader *reader, ID *id)
if (ob->pose) {
/* This code is now executed after _all_ ID pointers have been lib-linked,so it's safe to do
* a proper cleanup. Further more, since user count of IDs is not done in readcode anymore,
* a proper cleanup. Further more, since user count of IDs is not done in read-code anymore,
* `BKE_pose_free_ex(ob->pose, false)` can be called (instead of
* `BKE_pose_free_ex(ob->pose)`), avoiding any access to other IDs altogether. */
BKE_pose_free_ex(ob->pose, false);

@ -3270,9 +3270,9 @@ static void do_versions_after_linking(FileData *fd, Main *main)
static int lib_link_cb(struct LibraryIDLinkCallbackData *cb_data)
{
/* Embedded IDs are not known by lib_link code, so they would be remapped to `nullptr`. But there
* is no need to process them anyway, asthey are already handled during the 'read_data' phase.
* is no need to process them anyway, as they are already handled during the 'read_data' phase.
*
* NOTE: Some external non-owning pointers to embedded IDs (like the nodetree pointers of the
* NOTE: Some external non-owning pointers to embedded IDs (like the node-tree pointers of the
* Node editor) will not be detected as embedded ones though at 'lib_link' stage (because their
* source data cannot be accessed). This is handled on a case-by-case basis in 'after_lib_link'
* validation code. */
@ -3280,7 +3280,7 @@ static int lib_link_cb(struct LibraryIDLinkCallbackData *cb_data)
return IDWALK_RET_NOP;
}
/* Explicitely requested to be ignored during readfile processing. Means the read_data code
/* Explicitly requested to be ignored during readfile processing. Means the read_data code
* already handled this pointer. Typically, the 'owner_id' pointer of an embedded ID. */
if (cb_data->cb_flag & IDWALK_CB_READFILE_IGNORE) {
return IDWALK_RET_NOP;

@ -7,7 +7,7 @@
#if defined(KARIS_AVERAGE)
/* Computes the weighted average of the given four colors, which are assumed to the colors of
* spatially neighbouring pixels. The weights are computed so as to reduce the contributions of
* spatially neighboring pixels. The weights are computed so as to reduce the contributions of
* fireflies on the result by applying a form of local tone mapping as described by Brian Karis in
* the article "Graphic Rants: Tone Mapping".
*
@ -66,7 +66,7 @@ void main()
* groups of pixels. The center is sampled 4 times, the far non corner pixels are sampled 2
* times, the near corner pixels are sampled only once; but their weight is quadruple the weights
* of other groups; so they count as sampled 4 times, finally the far corner pixels are sampled
* only once, essentially totalling 32 samples. So the weights are as used in the following code
* only once, essentially totaling 32 samples. So the weights are as used in the following code
* section. */
vec4 result = (4.0 / 32.0) * center +
(4.0 / 32.0) *

@ -27,7 +27,7 @@ void main()
/* The value of the ghost is attenuated by a scalar multiple of the inverse distance to the
* center, such that it is maximum at the center and become zero further from the center,
* making sure to take the scale into account. The scaler multiple of 1 / 4 is chosen using
* visual judgement. */
* visual judgment. */
float distance_to_center = distance(coordinates, vec2(0.5)) * 2.0;
float attenuator = max(0.0, 1.0 - distance_to_center * abs(scale)) / 4.0;

@ -17,7 +17,7 @@ void main()
* while the big ghost is flipped and scaled up with the origin as the center of the image by a
* factor of 0.97. Note that 1) The negative scale implements the flipping. 2) Factors larger
* than 1 actually scales down the image since the factor multiplies the coordinates and not the
* images itself. 3) The values are arbitrarily chosen using visual judgement. */
* images itself. 3) The values are arbitrarily chosen using visual judgment. */
float small_ghost_scale = 2.13;
float big_ghost_scale = -0.97;

@ -15,28 +15,28 @@ void main()
vec2 coordinates = (vec2(texel) + vec2(0.5)) / vec2(input_size);
vec2 vector = streak_vector / vec2(input_size);
/* Load three equally spaced neighbours to the current pixel in the direction of the streak
/* Load three equally spaced neighbors to the current pixel in the direction of the streak
* vector. */
vec4 neighbours[3];
neighbours[0] = texture(input_streak_tx, coordinates + vector);
neighbours[1] = texture(input_streak_tx, coordinates + vector * 2.0);
neighbours[2] = texture(input_streak_tx, coordinates + vector * 3.0);
/* Attenuate the value of two of the channels for each of the neighbours by multiplying by the
* color modulator. The particular channels for each neighbour were chosen to be visually similar
/* Attenuate the value of two of the channels for each of the neighbors by multiplying by the
* color modulator. The particular channels for each neighbor were chosen to be visually similar
* to the modulation pattern of chromatic aberration. */
neighbours[0].gb *= color_modulator;
neighbours[1].rg *= color_modulator;
neighbours[2].rb *= color_modulator;
/* Compute the weighted sum of all neighbours using the given fade factors as weights. The
* weights are expected to be lower for neighbours that are further away. */
/* Compute the weighted sum of all neighbors using the given fade factors as weights. The
* weights are expected to be lower for neighbors that are further away. */
vec4 weighted_neighbours_sum = vec4(0.0);
for (int i = 0; i < 3; i++) {
weighted_neighbours_sum += fade_factors[i] * neighbours[i];
}
/* The output is the average between the center color and the weighted sum of the neighbours.
/* The output is the average between the center color and the weighted sum of the neighbors.
* Which intuitively mean that highlights will spread in the direction of the streak, which is
* the desired result. */
vec4 center_color = texture(input_streak_tx, coordinates);

@ -10,12 +10,12 @@ void main()
float matte = texture_load(input_matte_tx, texel).x;
/* Search the neighbourhood around the current matte value and identify if it lies along the
/* Search the neighborhood around the current matte value and identify if it lies along the
* edges of the matte. This is needs to be computed only when we need to compute the edges output
* or tweak the levels of the matte. */
bool is_edge = false;
if (compute_edges || black_level != 0.0 || white_level != 1.0) {
/* Count the number of neighbours whose matte is sufficiently similar to the current matte,
/* Count the number of neighbors whose matte is sufficiently similar to the current matte,
* as controlled by the edge_tolerance factor. */
int count = 0;
for (int j = -edge_search_radius; j <= edge_search_radius; j++) {
@ -25,8 +25,8 @@ void main()
}
}
/* If the number of neighbours that are sufficiently similar to the center matte is less that
* 90% of the total number of neighbours, then that means the variance is high in that areas
/* If the number of neighbors that are sufficiently similar to the center matte is less that
* 90% of the total number of neighbors, then that means the variance is high in that areas
* and it is considered an edge. */
is_edge = count < ((edge_search_radius * 2 + 1) * (edge_search_radius * 2 + 1)) * 0.9;
}

@ -175,7 +175,7 @@ void main()
vec2 rotated_disk_point = M_SQRT1_2 *
vec2(disk_point.x - disk_point.y, disk_point.x + disk_point.y);
/* Finally, we compute every other odd-index 4 weights starting from the 45 degreed rotated
/* Finally, we compute every other odd-index 4 weights starting from the 45 degrees rotated
* disk point. */
vec2 rotated_polynomial = sector_center_overlap_parameter -
cross_sector_overlap_parameter * square(rotated_disk_point);

@ -5,7 +5,7 @@
#pragma BLENDER_REQUIRE(gpu_shader_compositor_texture_utilities.glsl)
/* A shared memory to sum the prologues using parallel reduction. See the parallel reduction shader
* "compositor_parallel_reduction.glsl" for more information. */
* `compositor_parallel_reduction.glsl` for more information. */
shared vec4 complete_prologue[gl_WorkGroupSize.x];
/* See the compute_complete_x_prologues function for a description of this shader. */
@ -24,13 +24,13 @@ void main()
/* Note that the first row of sums is the result of summing the prologues of a virtual block
* that is before the first row of blocks and we assume that those prologues are all zeros,
* so we set the sum to zero in that case. This is implemented by setting the sums of the
* first vertical workgroup to zero, white latter workgroups are summed as as usual and
* first vertical work-group to zero, white latter work-groups are summed as as usual and
* stored starting from the second row. */
imageStore(complete_x_prologues_sum_img, ivec2(y, 0), vec4(0.0));
}
/* A parallel reduction loop to sum the prologues. This is exactly the same as the parallel
* reduction loop in the shader "compositor_parallel_reduction.glsl", see that shader for
* reduction loop in the shader `compositor_parallel_reduction.glsl`, see that shader for
* more information. */
complete_prologue[gl_LocalInvocationIndex] = accumulated_color;
for (uint stride = gl_WorkGroupSize.x / 2; stride > 0; stride /= 2) {

@ -17,7 +17,7 @@ void main()
gl_Position = point_world_to_ndc(world_pos);
#ifdef CONSERVATIVE_RASTER
/* Avoid expense of geometry shader by ensuring rastered pointcloud primitive
/* Avoid expense of geometry shader by ensuring rastered point-cloud primitive
* covers at least a whole pixel. */
int i = gl_VertexID % 3;
vec2 ofs = (i == 0) ? vec2(-1.0) : ((i == 1) ? vec2(2.0, -1.0) : vec2(-1.0, 2.0));

@ -324,8 +324,8 @@ void occlusion_eval(OcclusionData data,
}
}
/* Multibounce approximation base on surface albedo.
* Page 78 in the .pdf version. */
/* Multi-bounce approximation base on surface albedo.
* Page 78 in the PDF version. */
float gtao_multibounce(float visibility, vec3 albedo)
{
if (aoBounceFac == 0.0) {
@ -460,7 +460,7 @@ float ambient_occlusion_eval(vec3 normal,
const float inverted,
const float sample_count)
{
/* Avoid multiline define causing compiler issues. */
/* Avoid multi-line define causing compiler issues. */
/* clang-format off */
#if defined(GPU_FRAGMENT_SHADER) && (defined(MESH_SHADER) || defined(HAIR_SHADER)) && !defined(DEPTH_SHADER) && !defined(VOLUMETRICS)
/* clang-format on */

@ -78,7 +78,7 @@ vec3 F_color_blend(float eta, float fresnel, vec3 f0_color)
vec3 F_brdf_single_scatter(vec3 f0, vec3 f90, vec2 lut)
{
/* Unreal specular matching : if specular color is below 2% intensity,
* treat as shadowning */
* treat as shadowing. */
return lut.y * f90 + lut.x * f0;
}

@ -10,7 +10,7 @@
struct ClosureInputDiffuse {
vec3 N; /** Shading normal. */
vec3 albedo; /** Used for multibounce GTAO approximation. Not applied to final radiance. */
vec3 albedo; /** Used for multi-bounce GTAO approximation. Not applied to final radiance. */
};
#ifdef GPU_METAL

@ -89,7 +89,7 @@ vec2 btdf_lut(float cos_theta, float roughness, float ior)
/* Avoid harsh transition coming from ior == 1. */
float f90 = fast_sqrt(saturate(f0 / (f0_from_ior(eta_brdf) * 0.25)));
float fresnel = F_brdf_single_scatter(vec3(f0), vec3(f90), split_sum).r;
/* Setting the BTDF to one is not really important since it is only used for multiscatter
/* Setting the BTDF to one is not really important since it is only used for multi-scatter
* and it's already quite close to ground truth. */
float btdf = 1.0;
return vec2(btdf, fresnel);

@ -5,8 +5,8 @@
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma BLENDER_REQUIRE(common_math_lib.glsl)
#define cocMul cocParams[0] /* distance * aperturesize * invsensorsize */
#define cocBias cocParams[1] /* aperturesize * invsensorsize */
#define cocMul cocParams[0] /* `distance * aperturesize * invsensorsize`. */
#define cocBias cocParams[1] /* `aperturesize * invsensorsize`. */
#define cocNear cocParams[2] /* Near view depths value. */
#define cocFar cocParams[3] /* Far view depths value. */
@ -45,7 +45,7 @@
/* -------------- Utils ------------- */
/* For performance on macOS, constants declared within function scope utilize constant uniform
register space rather than per-thread, reducing spill and incrasing
register space rather than per-thread, reducing spill and increasing
thread execution width - and thus performance */
#define DEFINE_DOF_QUAD_OFFSETS \
const vec2 quad_offsets[4] = vec2[4]( \

@ -7,7 +7,7 @@
* convolution and in-focus fields.
*
* The halfres gather methods are fast but lack precision for small CoC areas. To fix this we
* do a bruteforce gather to have a smooth transition between in-focus and defocus regions.
* do a brute-force gather to have a smooth transition between in-focus and defocus regions.
*/
#pragma BLENDER_REQUIRE(common_utiltex_lib.glsl)
@ -51,7 +51,7 @@ void dof_slight_focus_gather(float radius, out vec4 out_color, out float out_wei
pair_data[i].coc = dof_coc_from_zdepth(depth);
pair_data[i].dist = ring_dist;
#ifdef DOF_BOKEH_TEXTURE
/* Contains subpixel distance to bokeh shape. */
/* Contains sub-pixel distance to bokeh shape. */
pair_data[i].dist = texelFetch(bokehLut, sample_offset + DOF_MAX_SLIGHT_FOCUS_RADIUS, 0).r;
#endif
pair_data[i].coc = clamp(pair_data[i].coc, -bokehMaxSize, bokehMaxSize);

@ -58,9 +58,9 @@ vec2 sample_weights(float center_depth,
vec4 decode_velocity(vec4 velocity)
{
velocity = velocity * 2.0 - 1.0;
/* Needed to match cycles. Can't find why... (fclem) */
/* NOTE(@fclem): Needed to match cycles. Can't find why. */
velocity *= 0.5;
/* Transpose to pixelspace. */
/* Transpose to pixel-space. */
velocity *= viewportSize.xyxy;
return velocity;
}

@ -80,7 +80,7 @@ void resolve_reflection_sample(int planar_index,
float weight = bsdf * data.ray_pdf_inv;
/* Do not reuse hitpoint from planar reflections for normal reflections and vice versa. */
/* Do not reuse hit-point from planar reflections for normal reflections and vice versa. */
if ((planar_index == -1 && data.is_planar) || (planar_index != -1 && !data.is_planar)) {
return;
}
@ -253,17 +253,17 @@ void main()
if (depth == 1.0) {
#if defined(GPU_INTEL) && defined(GPU_METAL)
/* Divergent code execution (and sampling) causes corruption due to undefined
* derivative/sampling behaviour, on Intel GPUs. Using a mask factor to ensure shaders do not
* derivative/sampling behavior, on Intel GPUs. Using a mask factor to ensure shaders do not
* diverge and only the final result is masked. */
factor = 0.0f;
#else
/* Note: In the Metal API, prior to Metal 2.3, Discard is not an explicit return and can
* produce undefined behaviour. This is especially prominent with derivatives if control-flow
* produce undefined behavior. This is especially prominent with derivatives if control-flow
* divergence is present.
*
* Adding a return call eliminates undefined behaviour and a later out-of-bounds read causing
* Adding a return call eliminates undefined behavior and a later out-of-bounds read causing
* a crash on AMD platforms.
* This behaviour can also affect OpenGL on certain devices. */
* This behavior can also affect OpenGL on certain devices. */
discard;
return;
#endif

@ -7,7 +7,7 @@
void main()
{
/* Constant array moved inside function scope.
* Minimises local register allocation in MSL. */
* Minimizes local register allocation in MSL. */
const vec2 pos[6] = vec2[6](vec2(-1.0, -1.0),
vec2(1.0, -1.0),
vec2(-1.0, 1.0),
@ -27,5 +27,5 @@ void main()
ws_location += screen_pos * sphere_size;
gl_Position = ProjectionMatrix * (ViewMatrix * vec4(ws_location, 1.0));
gl_Position.z += 0.0001; /* Small bias to let the icon draw without zfighting */
gl_Position.z += 0.0001; /* Small bias to let the icon draw without Z-fighting. */
}

@ -135,7 +135,7 @@ void main()
make_orthonormal_basis(N, T, B); /* Generate tangent space */
/* Integrating Envmap */
/* Integrating environment-map. */
float weight = 0.0;
vec3 out_radiance = vec3(0.0);
for (float i = 0; i < sampleCount; i++) {

@ -31,7 +31,7 @@ void main()
make_orthonormal_basis(N, T, B); /* Generate tangent space */
/* Integrating Envmap */
/* Integrating environment-map. */
float weight = 0.0;
vec3 out_radiance = vec3(0.0);
for (float i = 0; i < sampleCount; i++) {

@ -7,7 +7,7 @@
void main()
{
/* Constant array moved inside function scope.
* Minimises local register allocation in MSL. */
* Minimizes local register allocation in MSL. */
const vec2 pos[6] = vec2[6](vec2(-1.0, -1.0),
vec2(1.0, -1.0),
vec2(-1.0, 1.0),
@ -37,5 +37,5 @@ void main()
ws_cell_location += screen_pos * sphere_size;
gl_Position = ProjectionMatrix * (ViewMatrix * vec4(ws_cell_location, 1.0));
gl_Position.z += 0.0001; /* Small bias to let the icon draw without zfighting */
gl_Position.z += 0.0001; /* Small bias to let the icon draw without Z-fighting. */
}

@ -182,7 +182,7 @@ vec3 probe_evaluate_planar(int id, PlanarData pd, vec3 P, vec3 N, vec3 V, float
/* How far the pixel is from the plane. */
float ref_depth = 1.0; /* TODO: parameter. */
/* Compute distorded reflection vector based on the distance to the reflected object.
/* Compute distorted reflection vector based on the distance to the reflected object.
* In other words find intersection between reflection vector and the sphere center
* around point_on_plane. */
vec3 proj_ref = reflect(reflect(-V, N) * ref_depth, pd.pl_normal);

@ -49,7 +49,7 @@ float hashed_alpha_threshold(vec3 co)
/* Interpolate alpha threshold from noise at two scales. */
float x = mix(alpha.x, alpha.y, fac);
/* Pass into CDF to compute uniformly distrib threshold. */
/* Pass into CDF to compute uniformly distributed threshold. */
float a = min(fac, 1.0 - fac);
float one_a = 1.0 - a;
float denom = 1.0 / (2 * a * one_a);

@ -2,8 +2,8 @@
*
* SPDX-License-Identifier: GPL-2.0-or-later */
/* This shader is used to add default values to the volume accum textures.
* so it looks similar (transmittance = 1, scattering = 0) */
/* This shader is used to add default values to the volume accumulate textures.
* so it looks similar (transmittance = 1, scattering = 0). */
void main()
{
FragColor0 = vec4(0.0);

@ -35,7 +35,7 @@ class HiZBuffer {
* The last one will process the last few mip level.
*/
draw::StorageBuffer<uint4, true> atomic_tile_counter_ = {"atomic_tile_counter"};
/** Single pass recursive downsample. */
/** Single pass recursive down-sample. */
PassSimple hiz_update_ps_ = {"HizUpdate"};
/** Debug pass. */
PassSimple debug_draw_ps_ = {"HizUpdate.Debug"};

@ -47,7 +47,7 @@ enum eDebugMode : uint32_t {
*/
DEBUG_LIGHT_CULLING = 1u,
/**
* Show incorrectly downsample tiles in red.
* Show incorrectly down-sample tiles in red.
*/
DEBUG_HIZ_VALIDATION = 2u,
/**

@ -293,8 +293,8 @@ void ambient_occlusion_eval(OcclusionData data,
}
}
/* Multibounce approximation base on surface albedo.
* Page 78 in the .pdf version. */
/* Multi-bounce approximation base on surface albedo.
* Page 78 in the PDF version. */
float ambient_occlusion_multibounce(float visibility, vec3 albedo)
{
if (!AO_MULTI_BOUNCE) {

@ -33,10 +33,10 @@ float bxdf_ggx_D_opti(float NH, float a2)
float bxdf_ggx_smith_G1_opti(float NX, float a2)
{
/* Using Brian Karis approach and refactoring by NX/NX
* this way the (2*NL)*(2*NV) in G = G1(V) * G1(L) gets canceled by the brdf denominator 4*NL*NV
* Rcp is done on the whole G later.
* Note that this is not convenient for the transmission formula. */
/* return 2 / (1 + sqrt(1 + a2 * (1 - NX*NX) / (NX*NX) ) ); /* Reference function. */
* this way the `(2*NL)*(2*NV)` in `G = G1(V) * G1(L)` gets canceled by the BRDF denominator
* `4*NL*NV` Rcp is done on the whole G later. Note that this is not convenient for the
* transmission formula. */
// return 2 / (1 + sqrt(1 + a2 * (1 - NX*NX) / (NX*NX) ) ); /* Reference function. */
return NX + sqrt(NX * (NX - NX * a2) + a2);
}
@ -54,7 +54,7 @@ float bsdf_ggx(vec3 N, vec3 L, vec3 V, float roughness)
float D = bxdf_ggx_D_opti(NH, a2);
/* Denominator is canceled by G1_Smith */
/* bsdf = D * G / (4.0 * NL * NV); /* Reference function. */
// bsdf = D * G / (4.0 * NL * NV); /* Reference function. */
/* NL term to fit Cycles. NOTE(fclem): Not sure what it */
return NL * a2 / (D * G);
}
@ -75,7 +75,7 @@ float btdf_ggx(vec3 N, vec3 L, vec3 V, float roughness, float eta)
float G = bxdf_ggx_smith_G1_opti(NV, a2) * bxdf_ggx_smith_G1_opti(NL, a2);
float D = bxdf_ggx_D_opti(NH, a2);
/* btdf = abs(VH*LH) * ior^2 * D * G(V) * G(L) / (Ht2 * NV) */
/* `btdf = abs(VH*LH) * ior^2 * D * G(V) * G(L) / (Ht2 * NV)`. */
return abs(VH * LH) * sqr(eta) * 4.0 * a2 / (D * G * (Ht2 * NV));
}

@ -3,7 +3,7 @@
* SPDX-License-Identifier: GPL-2.0-or-later */
/**
* Holefill pass: Gather background parts where foreground is present.
* Hole-fill pass: Gather background parts where foreground is present.
*
* Using the min&max CoC tile buffer, we select the best appropriate method to blur the scene
*color. A fast gather path is taken if there is not many CoC variation inside the tile.

@ -150,7 +150,7 @@ void main()
rect_fg.offset = offset;
/* Negate extent to flip the sprite. Mimics optical phenomenon. */
rect_fg.half_extent = -half_extent;
/* NOTE: Since we fliped the quad along (1,-1) line, we need to also swap the (1,1) and
/* NOTE: Since we flipped the quad along (1,-1) line, we need to also swap the (1,1) and
* (0,0) values so that quad_offsets is in the right order in the vertex shader. */
/* Circle of Confusion absolute radius in halfres pixels. */

@ -7,7 +7,7 @@
* slight defocus convolution and in-focus fields.
*
* The halfres gather methods are fast but lack precision for small CoC areas.
* To fix this we do a bruteforce gather to have a smooth transition between
* To fix this we do a brute-force gather to have a smooth transition between
* in-focus and defocus regions.
*/

@ -116,7 +116,7 @@ float dof_luma_weight(float luma)
float dof_bilateral_weight(float reference_coc, float sample_coc)
{
/* NOTE: The difference between the cocs should be inside a abs() function,
/* NOTE: The difference between the COCS should be inside a abs() function,
* but we follow UE4 implementation to improve how dithered transparency looks (see slide 19).
* Effectively bleed background into foreground.
* Compared to dof_bilateral_coc_weights() this saturates as 2x the reference CoC. */

@ -8,7 +8,7 @@
void main()
{
/* Constant array moved inside function scope.
* Minimises local register allocation in MSL. */
* Minimizes local register allocation in MSL. */
const vec2 pos[6] = vec2[6](vec2(-1.0, -1.0),
vec2(1.0, -1.0),
vec2(-1.0, 1.0),
@ -38,6 +38,6 @@ void main()
vec3 vP = (ViewMatrix * vec4(ws_cell_pos, 1.0)).xyz + vs_offset;
gl_Position = ProjectionMatrix * vec4(vP, 1.0);
/* Small bias to let the icon draw without zfighting. */
/* Small bias to let the icon draw without Z-fighting. */
gl_Position.z += 0.0001;
}

@ -451,7 +451,7 @@ void film_store_combined(
vec4 color_src, color_dst;
float weight_src, weight_dst;
/* Undo the weighting to get final spatialy-filtered color. */
/* Undo the weighting to get final spatially-filtered color. */
color_src = color / color_weight;
if (film_buf.use_reprojection) {

@ -3,7 +3,7 @@
* SPDX-License-Identifier: GPL-2.0-or-later */
/**
* G-buffer: Packing and upacking of G-buffer data.
* G-buffer: Packing and unpacking of G-buffer data.
*
* See #GBuffer for a breakdown of the G-buffer layout.
*/
@ -72,7 +72,7 @@ vec4 gbuffer_color_pack(vec3 color)
{
float max_comp = max(color.x, max(color.y, color.z));
/* Store 2bit exponent inside Alpha. Allows values up to 8 with some color degradation.
* Above 8, the result will be clampped when writing the data to the output buffer. */
* Above 8, the result will be clamped when writing the data to the output buffer. */
float exponent = (max_comp > 1) ? ((max_comp > 2) ? ((max_comp > 4) ? 3.0 : 2.0) : 1.0) : 0.0;
/* TODO(fclem): Could try dithering to avoid banding artifacts on higher exponents. */
return vec4(color / exp2(exponent), exponent / 3.0);

@ -3,7 +3,7 @@
* SPDX-License-Identifier: GPL-2.0-or-later */
/**
* Debug Shader outputting a gradient of orange - white - blue to mark culling hotspots.
* Debug Shader outputting a gradient of orange - white - blue to mark culling hot-spots.
* Green pixels are error pixels that are missing lights from the culling pass (i.e: when culling
* pass is not conservative enough).
*/

@ -169,7 +169,7 @@ void main()
intersect_tile = intersect_tile && intersect(tile, pyramid);
break;
}
/* Fallthrough to the hemispheric case. */
/* Fall-through to the hemispheric case. */
case LIGHT_RECT:
case LIGHT_ELLIPSE: {
vec3 v000 = vP - v_right * radius - v_up * radius;

@ -184,7 +184,7 @@ void lightprobe_eval(ClosureDiffuse diffuse,
inout vec3 out_specular)
{
/* NOTE: Use the diffuse normal for biasing the probe sampling location since it is smoother than
* geometric normal. Could also try to use interp.N. */
* geometric normal. Could also try to use `interp.N`. */
SphericalHarmonicL1 irradiance = lightprobe_irradiance_sample(
irradiance_atlas_tx, P, V, diffuse.N, true);

@ -3,8 +3,8 @@
* SPDX-License-Identifier: GPL-2.0-or-later */
/**
* Load an input lightgrid cache texture into the atlas.
* Takes care of dilating valid lighting into invalid samples and composite lightprobes.
* Load an input light-grid cache texture into the atlas.
* Takes care of dilating valid lighting into invalid samples and composite light-probes.
*
* Each thread group will load a brick worth of data and add the needed padding texels.
*/
@ -60,7 +60,7 @@ void main()
float validity = texelFetch(validity_tx, input_coord, 0).r;
if (validity > dilation_threshold) {
/* Grid sample is valid. Simgle load. */
/* Grid sample is valid. Single load. */
sh_local = irradiance_load(input_coord);
}
else {

@ -3,7 +3,7 @@
* SPDX-License-Identifier: GPL-2.0-or-later */
/**
* For every irradiance probe sample, check if close to a surounding surfel and try to offset the
* For every irradiance probe sample, check if close to a surrounding surfel and try to offset the
* irradiance sample position. This is similar to the surfel ray but we do not actually transport
* the light.
*

@ -3,7 +3,7 @@
* SPDX-License-Identifier: GPL-2.0-or-later */
/**
* For every irradiance probe sample, compute the incomming radiance from both side.
* For every irradiance probe sample, compute the incoming radiance from both side.
* This is the same as the surfel ray but we do not actually transport the light, we only capture
* the irradiance as spherical harmonic coefficients.
*

@ -33,7 +33,7 @@ void main()
vec2 atlas_size = vec2(SHADOW_TILEMAP_RES);
{
/* Simulate a "2D" plane crossing the frustum diagonaly. */
/* Simulate a "2D" plane crossing the frustum diagonally. */
vec3 lP0 = vec3(-1.0, 0.0, -1.0);
vec3 lP1 = vec3(0.5, 0.0, -0.5);
vec3 lTg = normalize(lP1 - lP0);
@ -47,7 +47,7 @@ void main()
shadow_slope_bias_get(atlas_size, light, lNg, lP0, vec2(0.0), 2), expect * 4.0, 1e-4);
}
{
/* Simulate a "2D" plane crossing the near plane at the center diagonaly. */
/* Simulate a "2D" plane crossing the near plane at the center diagonally. */
vec3 lP0 = vec3(-1.0, 0.0, -1.0);
vec3 lP1 = vec3(0.0, 0.0, -0.5);
vec3 lTg = normalize(lP1 - lP0);

@ -22,7 +22,7 @@
const int gather_sample_count = 8;
/* Converts uv velocity into pixel space. Assumes velocity_tx is the same resolution as the
* target post-fx framebuffer. */
* target post-FX frame-buffer. */
vec4 motion_blur_sample_velocity(sampler2D velocity_tx, vec2 uv)
{
/* We can load velocity without velocity_resolve() since we resolved during the flatten pass. */

@ -224,7 +224,7 @@ float ambient_occlusion_eval(vec3 normal,
const float inverted,
const float sample_count)
{
/* Avoid multiline preprocesor conditionals.
/* Avoid multi-line pre-processor conditionals.
* Some drivers don't handle them correctly. */
// clang-format off
#if defined(GPU_FRAGMENT_SHADER) && defined(MAT_AMBIENT_OCCLUSION) && !defined(MAT_DEPTH) && !defined(MAT_SHADOW)
@ -300,7 +300,7 @@ vec3 F_brdf_multi_scatter(vec3 f0, vec3 f90, vec2 lut)
float Ems = 1.0 - Ess;
vec3 Favg = f0 + (1.0 - f0) / 21.0;
vec3 Fms = FssEss * Favg / (1.0 - (1.0 - Ess) * Favg);
/* We don't do anything special for diffuse surfaces because the principle bsdf
/* We don't do anything special for diffuse surfaces because the principle BSDF
* does not care about energy conservation of the specular layer for dielectrics. */
return FssEss + Fms * Ems;
}
@ -329,7 +329,7 @@ vec2 btdf_lut(float cos_theta, float roughness, float ior)
/* Avoid harsh transition coming from ior == 1. */
float f90 = fast_sqrt(saturate(f0 / (F0_from_ior(eta_brdf) * 0.25)));
float fresnel = F_brdf_single_scatter(vec3(f0), vec3(f90), split_sum).r;
/* Setting the BTDF to one is not really important since it is only used for multiscatter
/* Setting the BTDF to one is not really important since it is only used for multi-scatter
* and it's already quite close to ground truth. */
float btdf = 1.0;
return vec2(btdf, fresnel);

@ -7,8 +7,8 @@
*
* Dispatched at fullres using a tile list.
*
* Input: Temporaly Stabilized Radiance, Stabilized Variance
* Ouput: Denoised radiance
* Input: Temporally Stabilized Radiance, Stabilized Variance
* Output: Denoised radiance
*
* Following "Stochastic All The Things: Raytracing in Hybrid Real-Time Rendering"
* by Tomasz Stachowiak
@ -51,7 +51,7 @@ float bilateral_normal_weight(vec3 center_N, vec3 sample_N)
return weight;
}
/* In order to remove some more fireflies, "tonemap" the color samples during the accumulation. */
/* In order to remove some more fireflies, "tone-map" the color samples during the accumulation. */
vec3 to_accumulation_space(vec3 color)
{
return color / (1.0 + dot(color, vec3(1.0)));

@ -6,7 +6,7 @@
* Spatial ray reuse. Denoise raytrace result using ratio estimator.
*
* Input: Ray direction * hit time, Ray radiance, Ray hit depth
* Ouput: Ray radiance reconstructed, Mean Ray hit depth, Radiance Variance
* Output: Ray radiance reconstructed, Mean Ray hit depth, Radiance Variance
*
* Shader is specialized depending on the type of ray to denoise.
*
@ -164,7 +164,7 @@ void main()
float filter_size_factor = saturate(closure.roughness * 8.0);
sample_count = 1u + uint(15.0 * filter_size_factor + 0.5);
/* NOTE: filter_size should never be greater than twice RAYTRACE_GROUP_SIZE. Otherwise, the
* reconstruction can becomes ill defined since we don't know if further tiles are valids. */
* reconstruction can becomes ill defined since we don't know if further tiles are valid. */
filter_size = 12.0 * sqrt(filter_size_factor);
if (raytrace_buf.resolution_scale > 1) {
/* Filter at least 1 trace pixel to fight the undersampling. */

@ -7,8 +7,8 @@
*
* Dispatched at fullres using a tile list.
*
* Input: Spatialy denoised radiance, Variance, Hit depth
* Ouput: Stabilized Radiance, Stabilized Variance
* Input: Spatially denoised radiance, Variance, Hit depth
* Output: Stabilized Radiance, Stabilized Variance
*
* Following "Stochastic All The Things: Raytracing in Hybrid Real-Time Rendering"
* by Tomasz Stachowiak
@ -197,7 +197,7 @@ void main()
history_radiance.rgb = colorspace_scene_linear_from_YCoCg(history_radiance.rgb);
/* Blend history with new radiance. */
float mix_fac = (history_radiance.w > 1e-3) ? 0.97 : 0.0;
/* Reduce blend factor to improve low rougness reflections. Use variance instead for speed. */
/* Reduce blend factor to improve low roughness reflections. Use variance instead for speed. */
mix_fac *= mix(0.75, 1.0, saturate(in_variance * 20.0));
vec3 out_radiance = mix(safe_color(in_radiance), safe_color(history_radiance.rgb), mix_fac);
/* This is feedback next frame as radiance_history_tx. */

@ -68,7 +68,7 @@ void main()
#if defined(RAYTRACE_REFRACT)
if (gbuffer_is_refraction(gbuffer_packed) && closure_active != CLOSURE_REFRACTION) {
/* Discard incorect rays. */
/* Discard incorrect rays. */
pdf = 0.0;
}
#endif

@ -78,7 +78,7 @@ void main()
ray_view);
if (hit) {
/* Evaluate radiance at hitpoint. */
/* Evaluate radiance at hit-point. */
// vec2 hit_uv = get_uvs_from_view(ray.origin + ray.direction);
// radiance = textureLod(radiance_tx, hit_uv, 0.0).rgb;
@ -93,7 +93,7 @@ void main()
hit_time = length(ray_view.direction);
}
else {
/* Fallback to nearest lightprobe. */
/* Fallback to nearest light-probe. */
int closest_probe_id = reflection_probes_find_closest(P);
ReflectionProbeData probe = reflection_probe_buf[closest_probe_id];
radiance = reflection_probes_sample(ray.direction, 0.0, probe).rgb;

@ -69,7 +69,7 @@ vec4 reflection_probe_eval(ClosureReflection reflection,
/* Clamped brightness. */
/* For artistic freedom this should be read from the scene/reflection probe.
* Note: Eevee-legacy read the firefly_factor from gi_glossy_clamp.
* Note: EEVEE-legacy read the firefly_factor from gi_glossy_clamp.
* Note: Firefly removal should be moved to a different shader and also take SSR into
* account.*/
float luma = max(1e-8, max_v3(l_col));

@ -96,8 +96,8 @@ void main()
vec3 ls_view_direction = normalize(point_world_to_object(interp.P) - ls_near_plane);
/* TODO (Miguel Pozo): We could try to ray-cast against the non-inflated bounds first,
* and fallback to the inflated ones if theres no hit.
* The inflated bounds can cause unnecesary extra steps. */
* and fallback to the inflated ones if there is no hit.
* The inflated bounds can cause unnecessary extra steps. */
float ls_near_box_t = ray_aabb(
ls_near_plane, ls_view_direction, interp_flat.ls_aabb_min, interp_flat.ls_aabb_max);
vec3 ls_near_box = ls_near_plane + ls_view_direction * ls_near_box_t;

@ -313,7 +313,7 @@ void main()
light._clipmap_origin_x = 0.0;
light._clipmap_origin_y = 0.0;
/* Position has no effect for directionnal. */
/* Position has no effect for directional. */
vec3 lP = vec3(0.0);
vec2 atlas_size = vec2(SHADOW_TILEMAP_RES);
{
@ -370,7 +370,7 @@ void main()
vec2 atlas_size = vec2(SHADOW_TILEMAP_RES);
{
/* Simulate a "2D" plane crossing the frustum diagonaly. */
/* Simulate a "2D" plane crossing the frustum diagonally. */
vec3 lP0 = vec3(-1.0, 0.0, -1.0);
vec3 lP1 = vec3(0.5, 0.0, -0.5);
vec3 lTg = normalize(lP1 - lP0);
@ -384,7 +384,7 @@ void main()
shadow_slope_bias_get(atlas_size, light, lNg, lP0, vec2(0.0), 2), expect * 4.0, 1e-4);
}
{
/* Simulate a "2D" plane crossing the near plane at the center diagonaly. */
/* Simulate a "2D" plane crossing the near plane at the center diagonally. */
vec3 lP0 = vec3(-1.0, 0.0, -1.0);
vec3 lP1 = vec3(0.0, 0.0, -0.5);
vec3 lTg = normalize(lP1 - lP0);

@ -64,7 +64,7 @@ void main()
/* Final result. Min/Max of the whole dispatch. */
atomicMin(light_buf[l_idx].clip_near, global_min);
atomicMax(light_buf[l_idx].clip_far, global_max);
/* TODO(fclem): This feel unecessary but we currently have no indexing from
/* TODO(fclem): This feel unnecessary but we currently have no indexing from
* tilemap to lights. This is because the lights are selected by culling phase. */
for (int i = light.tilemap_index; i <= light_tilemap_max_get(light); i++) {
int index = tilemaps_buf[i].clip_data_index;

@ -52,7 +52,7 @@ void main()
bool near_changed = clip_near_new != clip_data.clip_near_stored;
bool far_changed = clip_far_new != clip_data.clip_far_stored;
directional_range_changed = near_changed || far_changed;
/* NOTE(fclem): This assumes clip near/far are computed each time the init phase runs. */
/* NOTE(fclem): This assumes clip near/far are computed each time the initial phase runs. */
tilemaps_clip_buf[clip_index].clip_near_stored = clip_near_new;
tilemaps_clip_buf[clip_index].clip_far_stored = clip_far_new;
/* Reset for next update. */
@ -98,7 +98,7 @@ void main()
if (thread_active) {
int tile_store = shadow_tile_offset(tile_co, tilemap.tiles_index, lod);
if ((tile_load != tile_store) && flag_test(tile, SHADOW_IS_CACHED)) {
/* Inlining of shadow_page_cache_update_tile_ref to avoid buffer depedencies. */
/* Inlining of shadow_page_cache_update_tile_ref to avoid buffer dependencies. */
pages_cached_buf[shadow_tile_unpack(tile).cache_index].y = tile_store;
}
tiles_buf[tile_store] = tile;

@ -199,7 +199,7 @@ vec4 spherical_harmonics_L2_evaluate(vec3 direction, SphericalHarmonicBandL2 L2)
SphericalHarmonicBandL0 spherical_harmonics_L0_rotate(mat3x3 rotation, SphericalHarmonicBandL0 L0)
{
/* L0 band being a constant function (i.e: there is no directionallity) there is nothing to
/* L0 band being a constant function (i.e: there is no directionality) there is nothing to
* rotate. This is a no-op. */
return L0;
}
@ -344,11 +344,9 @@ void spherical_harmonics_pack(SphericalHarmonicL1 sh,
SphericalHarmonicL1 spherical_harmonics_triple_product(SphericalHarmonicL1 a,
SphericalHarmonicL1 b)
{
/**
* Addapted from :
/* Adapted from:
* "Code Generation and Factoring for Fast Evaluation of Low-order Spherical Harmonic Products
* and Squares" Function "SH_product_3"
*/
* and Squares" Function "SH_product_3". */
SphericalHarmonicL1 sh;
sh.L0.M0 = 0.282094792 * a.L0.M0 * b.L0.M0;

@ -40,7 +40,7 @@ vec3 burley_eval(vec3 d, float r)
/* NOTE:
* - Surface albedo is applied at the end.
* - This is normalized diffuse model, so the equation is multiplied
* by 2*pi, which also matches cdf().
* by 2*pi, which also matches `cdf()`.
*/
return (exp_r_d + exp_r_3_d) / (4.0 * d);
}

@ -6,7 +6,7 @@
* Virtual Shadow map output.
*
* Meshes are rasterize onto an empty framebuffer. Each generated fragment then checks which
* virtual page it is supposed to go and load the physical page adress.
* virtual page it is supposed to go and load the physical page address.
* If a physical page exists, we then use atomicMin to mimic a less-than depth test and write to
* the destination texel.
*/

@ -27,7 +27,7 @@ void main()
float ray_distance;
int list_index = surfel_list_index_get(
list_info_buf.ray_grid_size, surfel_buf[surfel_index].position, ray_distance);
/* Do separate assignement to avoid reference to buffer in arguments which is tricky to cross
/* Do separate assignment to avoid reference to buffer in arguments which is tricky to cross
* compile. */
surfel_buf[surfel_index].ray_distance = ray_distance;
/* NOTE: We only need to init the `list_start_buf` to -1 for the whole list to be valid since

@ -3,7 +3,7 @@
* SPDX-License-Identifier: GPL-2.0-or-later */
/**
* For every surfel, compute the incomming radiance from both side.
* For every surfel, compute the incoming radiance from both side.
* For that, walk the ray surfel linked-list and gather the light from the neighbor surfels.
* This shader is dispatched for a random ray in a uniform hemisphere as we evaluate the
* radiance in both directions.

@ -31,7 +31,7 @@ float transparency_hashed_alpha_threshold(float hash_scale, float hash_offset, v
float fac = fract(log2(pix_scale));
/* Interpolate alpha threshold from noise at two scales. */
float x = mix(alpha.x, alpha.y, fac);
/* Pass into CDF to compute uniformly distrib threshold. */
/* Pass into CDF to compute uniformly distributed threshold. */
float a = min(fac, 1.0 - fac);
float one_a = 1.0 - a;
float denom = 1.0 / (2 * a * one_a);

@ -34,12 +34,12 @@ void main()
/* Revealage, how much light passes through. */
/* Average for alpha channel. */
out_reveal.a = clamp(dot(out_reveal.rgb, vec3(0.333334)), 0.0, 1.0);
/* Color buf is already premultiplied. Just add it to the color. */
/* Color buffer is already premultiplied. Just add it to the color. */
/* Add the alpha. */
out_color.a = 1.0 - out_reveal.a;
if (onlyAlpha) {
/* Special case in wireframe xray mode. */
/* Special case in wire-frame X-ray mode. */
out_color = vec4(0.0);
out_reveal.rgb = out_reveal.aaa;
}

@ -51,7 +51,7 @@ void blend_mode_output(
break;
}
case MODE_HARDLIGHT_SECOND_PASS:
/* Reminder: Blending func is additive blend (dst.rgba + src.rgba). */
/* Reminder: Blending func is additive blend `(dst.rgba + src.rgba)`. */
color = mix(vec4(0.5), color, color.a * opacity);
frag_revealage = frag_color = (-1.0 + 2.0 * color) * step(-0.5, -color);
frag_revealage = max(vec4(0.0), frag_revealage);

@ -41,7 +41,7 @@ void main()
* to avoid glitches if one end is behind the camera origin (in persp). */
float clip_dist = (drw_view.winmat[3][3] == 0.0) ?
-1e-7 :
1e20; /* hardcoded, -1e-8 is giving gliches. */
1e20; /* hard-coded, -1e-8 is giving glitches. */
vec3 bvec = v1.xyz - v0.xyz;
vec3 clip_pt = v0.xyz + bvec * ((v0.z - clip_dist) / -bvec.z);
if (v0.z > clip_dist) {

@ -8,11 +8,11 @@
vec3 weight_to_rgb(float t)
{
if (t < 0.0) {
/* Minimum color, grey */
/* Minimum color, gray */
return vec3(0.25, 0.25, 0.25);
}
else if (t > 1.0) {
/* Error color */
/* Error color. */
return vec3(1.0, 0.0, 1.0);
}
else {

@ -85,7 +85,7 @@ void main()
#elif defined(FACEDOT)
finalColor = EDIT_MESH_facedot_color(norAndFlag.w);
/* Bias Facedot Z position in clipspace. */
/* Bias Face-dot Z position in clip-space. */
gl_Position.z -= (drw_view.winmat[3][3] == 0.0) ? 0.00035 : 1e-6;
gl_PointSize = sizeFaceDot;

@ -9,7 +9,7 @@
* The formula for the area uses inverse trig function and is quite complex. Instead,
* we approximate it by using the smooth-step function and a 1.05 factor to the disc radius.
*/
#define M_1_SQRTPI 0.5641895835477563 /* 1/sqrt(pi) */
#define M_1_SQRTPI 0.5641895835477563 /* `1/sqrt(pi)`. */
#define DISC_RADIUS (M_1_SQRTPI * 1.05)
#define GRID_LINE_SMOOTH_START (0.5 - DISC_RADIUS)
#define GRID_LINE_SMOOTH_END (0.5 + DISC_RADIUS)

@ -123,8 +123,8 @@ void main()
do_vertex_shader(out_pos0, base_vertex_id, ssPos[0], finalColor_geom[0]);
do_vertex_shader(out_pos1, base_vertex_id + 1, ssPos[1], finalColor_geom[1]);
/* Geometry shader alternative -- Output is trianglelist consisting of 6 vertices.
* Each vertex shader invocation is one vertex in the output primitive, so outptut
/* Geometry shader alternative -- Output is triangle-list consisting of 6 vertices.
* Each vertex shader invocation is one vertex in the output primitive, so output
* required ID. */
vec2 t;
vec2 edge_dir = compute_dir(ssPos[0], ssPos[1]) * sizeViewportInv;

@ -243,7 +243,7 @@ void main()
float ref_depth = textureLod(outlineDepth, depth_uv, 0.0).r;
float scene_depth = textureLod(sceneDepth, depth_uv, 0.0).r;
/* Avoid bad cases of zfighting for occlusion only. */
/* Avoid bad cases of Z-fighting for occlusion only. */
const float epsilon = 3.0 / 8388608.0;
bool occluded = (ref_depth > scene_depth + epsilon);

@ -31,7 +31,7 @@ float contours(float value, float steps, float width_px, float max_rel_width, fl
float base_alpha = 1.0 - max(offset * hi_bias, -offset) / min(max_rel_width, rel_width);
/* Line fadeout when too thin in screen space. */
/* Line fade-out when too thin in screen-space. */
float rel_fade_width = fade_width_px * rel_gradient;
float fade_alpha = (max_rel_width - rel_min_width) / (rel_fade_width - rel_min_width);

@ -40,7 +40,7 @@ float decode_signed_coc(vec2 cocs)
/**
* ----------------- STEP 0 ------------------
* Custom Coc aware downsampling. Half res pass.
* Custom COC aware down-sampling. Half res pass.
*/
#ifdef PREPARE
@ -83,7 +83,7 @@ void main()
/**
* ----------------- STEP 0.5 ------------------
* Custom Coc aware downsampling. Quarter res pass.
* Custom COC aware down-sampling. Quarter res pass.
*/
#ifdef DOWNSAMPLE

@ -272,8 +272,8 @@ vec4 gpencil_vertex(vec4 viewport_size,
float uv_rot = gpencil_decode_uvrot(uvrot1);
float rot_sin = sqrt(max(0.0, 1.0 - uv_rot * uv_rot)) * sign(uv_rot);
float rot_cos = abs(uv_rot);
/* TODO(@fclem): Optimize these 2 matrix mul into one by only having one rotation angle and
* using a cosine approximation. */
/* TODO(@fclem): Optimize these 2 matrix multiply into one by only having one rotation angle
* and using a cosine approximation. */
x_axis = mat2(rot_cos, -rot_sin, rot_sin, rot_cos) * x_axis;
x_axis = mat2(alignment_rot.x, -alignment_rot.y, alignment_rot.y, alignment_rot.x) * x_axis;
/* Rotate 90 degrees counter-clockwise. */

@ -279,8 +279,8 @@ vec4 gpencil_vertex(vec4 viewport_size,
float uv_rot = gpencil_decode_uvrot(uvrot1);
float rot_sin = sqrt(max(0.0, 1.0 - uv_rot * uv_rot)) * sign(uv_rot);
float rot_cos = abs(uv_rot);
/* TODO(@fclem): Optimize these 2 matrix mul into one by only having one rotation angle and
* using a cosine approximation. */
/* TODO(@fclem): Optimize these 2 matrix multiply into one by only having one rotation angle
* and using a cosine approximation. */
x_axis = mat2(rot_cos, -rot_sin, rot_sin, rot_cos) * x_axis;
x_axis = mat2(alignment_rot.x, -alignment_rot.y, alignment_rot.y, alignment_rot.x) * x_axis;
/* Rotate 90 degrees counter-clockwise. */

@ -504,7 +504,7 @@ bool intersect(Cone cone, Sphere sphere)
(cone.angle_cos * sphere_cos -
cone_aperture_sin * sphere_sin);
/* Comparing cosines instead of angles since we are interested
* only in the monotonic region [0 .. M_PI / 2]. This saves costly acos() calls. */
* only in the monotonic region [0 .. M_PI / 2]. This saves costly `acos()` calls. */
bool intersects = (cone_sphere_center_cos >= cone_sphere_angle_sum_cos);
return intersects;

@ -57,7 +57,7 @@ layout(std430, binding = 8) writeonly buffer outputFVarData
#elif defined(FDOTS_EVALUATION)
/* For face dots, we build the position, normals, and index buffers in one go. */
/* vec3 is padded to vec4, but the format used for fdots does not have any padding. */
/* vec3 is padded to vec4, but the format used for face-dots does not have any padding. */
struct FDotVert {
float x, y, z;
};

@ -195,7 +195,7 @@ layout(std140) uniform modelBlock
/* Intel GPU seems to suffer performance impact when the model matrix is in UBO storage.
* So for now we just force using the legacy path. */
/* Note that this is also a workaround of a problem on OSX (AMD or NVIDIA)
* and older amd driver on windows. */
* and older AMD driver on windows. */
uniform mat4 ModelMatrix;
uniform mat4 ModelMatrixInverse;
# endif /* USE_GPU_SHADER_CREATE_INFO */

@ -616,7 +616,7 @@ static Array<bool> get_points_to_dissolve(bke::CurvesGeometry &curves, const Dis
continue;
}
/* `between` is just `unselect` but with the first and last segments not geting
/* `between` is just `unselect` but with the first and last segments not getting
* dissolved. */
if (mode != DissolveMode::BETWEEN) {
continue;
@ -629,7 +629,7 @@ static Array<bool> get_points_to_dissolve(bke::CurvesGeometry &curves, const Dis
const IndexRange first_range = deselection_ranges.first().shift(points.first());
const IndexRange last_range = deselection_ranges.last().shift(points.first());
/* Ranges should only be fill if the first/last point matchs the start/end point
/* Ranges should only be fill if the first/last point matches the start/end point
* of the segment. */
if (first_range.first() == points.first()) {
points_to_keep.slice(first_range).fill(true);

@ -41,7 +41,7 @@
# define DIMS 3
#endif
/* Position dimensionality for threadgroup. */
/* Position dimensionality for thread-group. */
#if DIMS == 1
# define POSITION_TYPE uint
#elif DIMS == 2
@ -66,19 +66,19 @@ template<> uint denormalize<uint>(float val)
return uint(float(DEPTH_SCALE_FACTOR) * val);
}
/* Float to other type case. */
/* `float` to other type case. */
template<typename T> T convert_type(float type)
{
return T(type);
}
/* Uint to other types. */
/* `uint` to other types. */
template<typename T> T convert_type(uint type)
{
return T(type);
}
/* Int to other types. */
/* `int` to other types. */
template<typename T> T convert_type(int type)
{
return T(type);
@ -213,4 +213,4 @@ kernel void compute_texture_read(constant TextureReadParams &params [[buffer(0)]
output_data[index + i] = convert_type<OUTPUT_DATA_TYPE>(0);
}
#endif
}
}

@ -33,7 +33,7 @@ using namespace metal;
# define DIMS 3
#endif
/* Position dimensionality for threadgroup. */
/* Position dimensionality for thread-group. */
#if DIMS == 1
# define POSITION_TYPE uint
#elif DIMS == 2
@ -150,4 +150,4 @@ kernel void compute_texture_update(constant TextureUpdateParams &params [[buffer
output, uint2(params.offset[0], params.offset[1]) + uint2(xx, yy), params.offset[2] + layer);
#endif
}
}

@ -4,7 +4,7 @@
/* Float Math */
/* WORKAROUND: To be removed once we port all code to use gpu_shader_math_base_lib.glsl. */
/* WORKAROUND: To be removed once we port all code to use `gpu_shader_math_base_lib.glsl`. */
#ifndef GPU_SHADER_MATH_BASE_LIB_GLSL
float safe_divide(float a, float b)
@ -30,7 +30,7 @@ float compatible_pow(float x, float y)
return 1.0;
}
/* glsl pow doesn't accept negative x */
/* GLSL pow doesn't accept negative x. */
if (x < 0.0) {
if (mod(-y, 2.0) == 0.0) {
return pow(-x, y);

@ -92,7 +92,7 @@ vec2 do_tria()
vec2 point_pos[4] = vec2[4](vec2(-1.0, -1.0), vec2(-1.0, 1.0), vec2(1.0, -1.0), vec2(1.0, 1.0));
vec2 point_uvs[4] = vec2[4](vec2(0.0, 0.0), vec2(0.0, 1.0), vec2(1.0, 0.0), vec2(1.0, 1.0));
/* We reuse the SDF roundbox rendering of widget to render the tria shapes.
/* We reuse the SDF round-box rendering of widget to render the tria shapes.
* This means we do clever tricks to position the rectangle the way we want using
* the 2 triangles uvs. */
if (triaType == 0.0) {

@ -19,7 +19,7 @@
#define INNER_FLAG uint(1 << 10) /* is inner vert */
/* radi and rad per corner */
/* Radii and rad per corner. */
#define recti parameters[0]
#define rect parameters[1]
#define radsi parameters[2].x

@ -14,7 +14,7 @@ vec2 toScreenSpace(vec4 vertex)
return vec2(vertex.xy / vertex.w) * gpencil_stroke_data.viewport;
}
/* get zdepth value */
/* Get Z-depth value. */
float getZdepth(vec4 point)
{
if (gpencil_stroke_data.xraymode == GP_XRAY_FRONT) {

@ -20,7 +20,7 @@ vec2 toScreenSpace(vec4 in_vertex)
return vec2(in_vertex.xy / in_vertex.w) * gpencil_stroke_data.viewport;
}
/* get zdepth value */
/* Get Z-depth value. */
float getZdepth(vec4 point)
{
if (gpencil_stroke_data.xraymode == GP_XRAY_FRONT) {

@ -5,14 +5,14 @@
/**
* Draw the icons, leaving a semi-transparent rectangle on top of the icon.
*
* The top-left corner of the rectangle is rounded and drawned with anti-alias.
* The top-left corner of the rectangle is rounded and drawn with anti-alias.
* The anti-alias is done by transitioning from the outer to the inner radius of
* the rounded corner, and the rectangle sides.
*/
void main()
{
/* Sample texture with LOD BIAS. Used instead of custom lod bias in GPU_SAMPLER_CUSTOM_ICON. */
/* Sample texture with LOD BIAS. Used instead of custom LOD bias in GPU_SAMPLER_CUSTOM_ICON. */
fragColor = texture(image, texCoord_interp, -0.5) * finalColor;
#ifdef DO_CORNER_MASKING

@ -40,11 +40,11 @@ void main()
if (overlay) {
if (!use_hdr) {
/* If we're not using an extended colour space, clamp the color 0..1. */
/* If we're not using an extended color space, clamp the color 0..1. */
fragColor = clamp(fragColor, 0.0, 1.0);
}
else {
/* When using extended colorspace, interpolate towards clamped color to improve display of
/* When using extended color-space, interpolate towards clamped color to improve display of
* alpha-blended overlays. */
fragColor = mix(max(fragColor, 0.0), clamp(fragColor, 0.0, 1.0), overlay_col.a);
}

@ -8,7 +8,7 @@ void main()
float dist_squared = dot(centered, centered);
const float rad_squared = 0.25;
/* Round point with jaggy edges. */
/* Round point with jagged edges. */
if (dist_squared > rad_squared) {
discard;
}

@ -82,7 +82,7 @@ void main()
vec2(-0.5, 0.5), vec2(0.5, 0.5), vec2(-0.5, -0.5), vec2(-0.5, -0.5));
/* 3x3 blur */
/* Manual unroll for perf. (stupid glsl compiler) */
/* Manual unroll for performance (stupid GLSL compiler). */
fragColor.a += sample_glyph_offset(texel, offsets4[0]);
fragColor.a += sample_glyph_offset(texel, offsets4[1]);
fragColor.a += sample_glyph_offset(texel, offsets4[2]);
@ -110,7 +110,7 @@ void main()
vec2(1.5, -1.5));
/* 5x5 blur */
/* Manual unroll for perf. (stupid glsl compiler) */
/* Manual unroll for performance (stupid GLSL compiler). */
fragColor.a += sample_glyph_offset(texel, offsets16[0]);
fragColor.a += sample_glyph_offset(texel, offsets16[1]);
fragColor.a += sample_glyph_offset(texel, offsets16[2]);

@ -28,7 +28,7 @@ void node_attribute_flame(vec4 attr, out float out_attr)
void node_attribute_uniform(vec4 attr, const float attr_hash, out vec4 out_attr)
{
/* Temporary solution to support both old UBO attribs and new SSBO loading.
/* Temporary solution to support both old UBO attributes and new SSBO loading.
* Old UBO load is already done through `attr` and will just be passed through. */
out_attr = attr_load_uniform(attr, floatBitsToUint(attr_hash));
}

@ -15,7 +15,7 @@ typedef enum {
GPU_FETCH_INT_TO_FLOAT,
} GPUVertFetchMode;
/* Consant to flag base binding index of uniform buffers. */
/* Constant to flag base binding index of uniform buffers. */
constant int MTL_uniform_buffer_base_index [[function_constant(0)]];
/* Default Point Size.
@ -40,14 +40,14 @@ constant int MTL_AttributeConvert13 [[function_constant(15)]];
constant int MTL_AttributeConvert14 [[function_constant(16)]];
constant int MTL_AttributeConvert15 [[function_constant(17)]];
/* Consant to flag binding index of transform feedback buffer.
/* Constant to flag binding index of transform feedback buffer.
* Unused if function constant not set. */
constant int MTL_transform_feedback_buffer_index [[function_constant(18)]];
/** Clip distance enablement. */
/* General toggle to control whether any clipping distanes are written at all.
/* General toggle to control whether any clipping distances are written at all.
* This is an optimization to avoid having the clipping distance shader output
* paramter if it is not needed. */
* parameter if it is not needed. */
constant int MTL_clip_distances_enabled [[function_constant(19)]];
/* If clipping planes are enabled at all, then we require an enablement
@ -73,7 +73,7 @@ constant int MTL_storage_buffer_base_index [[function_constant(26)]];
* https://developer.apple.com/documentation/metal/mtlvertexattributedescriptor/1516081-format?language=objc
*
* For unsupported conversions, the mtl_shader_generator will create an attribute reading function
* which performs this conversion manually upon read, depending on the requested fetchmode.
* which performs this conversion manually upon read, depending on the requested fetch-mode.
*
* These conversions use the function constants above, so any branching is optimized out during
* backend shader compilation (PSO creation).

@ -71,7 +71,7 @@ using uvec4 = uint4;
#define barrier() threadgroup_barrier(mem_flags::mem_threadgroup | mem_flags::mem_device | mem_flags::mem_texture)
#ifdef MTL_USE_WORKGROUP_SIZE
/* Compute workgroup size. */
/* Compute work-group size. */
struct constexp_uvec3 {
/* Type union to cover all syntax accessors:
* .x, .y, .z, .xy, .xyz
@ -132,7 +132,7 @@ constexpr constexp_uvec3 __internal_workgroupsize_get()
* NOTE: We cannot hoist the address space into the template declaration, so these must be declared
* for each relevant address space. */
/* Threadgroup memory. */
/* Thread-group memory. */
template<typename T> T atomicMax(threadgroup T &mem, T data)
{
return atomic_fetch_max_explicit((threadgroup _atomic<T> *)&mem, data, memory_order_relaxed);
@ -201,7 +201,7 @@ template<typename T> T atomicExchange(device T &mem, T data)
}
/* Unblock texture atomic compilation.
* TODO(Metal): This is not correct for global atomic behaviour, but will be safe within a single
* TODO(Metal): This is not correct for global atomic behavior, but will be safe within a single
* thread.
* We need to re-visit the solution for this use-case and use a 2D texture buffer instead. */
#define imageAtomicMin(tex, coord, data) \
@ -209,8 +209,8 @@ template<typename T> T atomicExchange(device T &mem, T data)
_texture_write_internal(tex, coord, uint4((val < data) ? val : data)); \
tex.texture->fence();
/* Used to replace 'out' in function parameters with threadlocal reference
* shortened to avoid expanding the glsl source string. */
/* Used to replace 'out' in function parameters with thread-local reference
* shortened to avoid expanding the GLSL source string. */
#define THD thread
#define OUT(type, name, array) thread type(&name)[array]

@ -25,7 +25,7 @@ struct USDExporterContext {
/**
* Wrap a function which returns the current time code
* for export. This is necessary since the context
* may be used for exporting an animation over a sequece
* may be used for exporting an animation over a sequence
* of frames.
*/
std::function<pxr::UsdTimeCode()> get_time_code;

@ -110,7 +110,7 @@ class SimpleImportTest(AbstractAlembicTest):
as_background_job=False)
# The active object is probably the first one that was imported, but this
# behaviour is not defined. At least it should be one of the cubes, and
# behavior is not defined. At least it should be one of the cubes, and
# not the sphere.
self.assertNotEqual(sphere, bpy.context.active_object)
self.assertTrue('Cube' in bpy.context.active_object.name)

@ -93,6 +93,8 @@ dict_custom = {
"deduplicating",
"deduplication",
"defocus",
"defragment",
"defragmenting",
"degeneracies",
"deletable",
"deleter",
@ -113,6 +115,7 @@ dict_custom = {
"dialogs",
"digitizers",
"dihedral",
"dimensionality",
"directionality",
"discoverability",
"discretization",