Cleanup: spelling in code

This commit is contained in:
Campbell Barton 2023-11-27 10:55:39 +11:00
parent 27c660707d
commit 1eff48a838
19 changed files with 81 additions and 81 deletions

@ -27,7 +27,7 @@ ccl_device_noinline void svm_node_wavelength(KernelGlobals kg,
}
else {
ii -= i;
ccl_constant float *c = cie_colour_match[i];
ccl_constant float *c = cie_color_match[i];
color = interp(make_float3(c[0], c[1], c[2]), make_float3(c[3], c[4], c[5]), ii);
}

@ -34,7 +34,7 @@ ccl_inline_constant float blackbody_table_b[][4] = {
{6.72650283e-13f, -2.73078809e-08f, 4.24098264e-04f, -7.52335691e-01f}
};
ccl_inline_constant float cie_colour_match[][3] = {
ccl_inline_constant float cie_color_match[][3] = {
{0.0014f, 0.0000f, 0.0065f}, {0.0022f, 0.0001f, 0.0105f}, {0.0042f, 0.0001f, 0.0201f},
{0.0076f, 0.0002f, 0.0362f}, {0.0143f, 0.0004f, 0.0679f}, {0.0232f, 0.0006f, 0.1102f},
{0.0435f, 0.0012f, 0.2074f}, {0.0776f, 0.0022f, 0.3713f}, {0.1344f, 0.0040f, 0.6456f},

@ -232,7 +232,7 @@ static bool or_into_each_other(BitGroupVector<> &vec, const int64_t a, const int
return or_into_each_other(vec[a], vec[b]);
}
static AnonymousAttributeInferencingResult analyse_anonymous_attribute_usages(
static AnonymousAttributeInferencingResult analyze_anonymous_attribute_usages(
const bNodeTree &tree)
{
BLI_assert(!tree.has_available_link_cycle());
@ -643,7 +643,7 @@ bool update_anonymous_attribute_relations(bNodeTree &tree)
return changed;
}
AnonymousAttributeInferencingResult result = analyse_anonymous_attribute_usages(tree);
AnonymousAttributeInferencingResult result = analyze_anonymous_attribute_usages(tree);
const bool group_interface_changed =
!tree.runtime->anonymous_attribute_inferencing ||

@ -42,14 +42,14 @@ void main()
* standard backward finite difference equation, because multiplication by -1 flips the order of
* subtraction. */
int x_step = (table_index.x % 2) * -2 + 1;
vec2 x_neighbour = displaced_coordinates_table[table_index.x + x_step][table_index.y];
vec2 x_gradient = (x_neighbour - displaced_coordinates) * x_step;
vec2 x_neighbor = displaced_coordinates_table[table_index.x + x_step][table_index.y];
vec2 x_gradient = (x_neighbor - displaced_coordinates) * x_step;
/* Compute the partial derivative of the displaced coordinates along the y direction using a
* finite difference approximation. See the previous code section for more information. */
int y_step = (table_index.y % 2) * -2 + 1;
vec2 y_neighbour = displaced_coordinates_table[table_index.x][table_index.y + y_step];
vec2 y_gradient = (y_neighbour - displaced_coordinates) * y_step;
vec2 y_neighbor = displaced_coordinates_table[table_index.x][table_index.y + y_step];
vec2 y_gradient = (y_neighbor - displaced_coordinates) * y_step;
/* Sample the input using the displaced coordinates passing in the computed gradients in order to
* utilize the anisotropic filtering capabilities of the sampler. */

@ -18,9 +18,9 @@ void main()
{
ivec2 texel = ivec2(gl_GlobalInvocationID.xy);
/* Identify if any of the 8 neighbours around the center pixel are not masked. */
bool has_inner_non_masked_neighbours = false;
bool has_outer_non_masked_neighbours = false;
/* Identify if any of the 8 neighbors around the center pixel are not masked. */
bool has_inner_non_masked_neighbors = false;
bool has_outer_non_masked_neighbors = false;
for (int j = -1; j <= 1; j++) {
for (int i = -1; i <= 1; i++) {
ivec2 offset = ivec2(i, j);
@ -31,7 +31,7 @@ void main()
}
if (texture_load(inner_mask_tx, texel + offset).x == 0.0) {
has_inner_non_masked_neighbours = true;
has_inner_non_masked_neighbors = true;
}
/* If the user specified include_edges_of_image to be true, then we assume the outer mask is
@ -39,11 +39,11 @@ void main()
* practically implemented by falling back to 0.0 or 1.0 for out of bound pixels. */
vec4 boundary_fallback = include_edges_of_image ? vec4(0.0) : vec4(1.0);
if (texture_load(outer_mask_tx, texel + offset, boundary_fallback).x == 0.0) {
has_outer_non_masked_neighbours = true;
has_outer_non_masked_neighbors = true;
}
/* Both are true, no need to continue. */
if (has_inner_non_masked_neighbours && has_outer_non_masked_neighbours) {
if (has_inner_non_masked_neighbors && has_outer_non_masked_neighbors) {
break;
}
}
@ -52,12 +52,12 @@ void main()
bool is_inner_masked = texture_load(inner_mask_tx, texel).x > 0.0;
bool is_outer_masked = texture_load(outer_mask_tx, texel).x > 0.0;
/* The pixels at the boundary are those that are masked and have non masked neighbours. The inner
/* The pixels at the boundary are those that are masked and have non masked neighbors. The inner
* boundary has a specialization, if include_all_inner_edges is false, only inner boundaries that
* lie inside the outer mask will be considered a boundary. */
bool is_inner_boundary = is_inner_masked && has_inner_non_masked_neighbours &&
bool is_inner_boundary = is_inner_masked && has_inner_non_masked_neighbors &&
(is_outer_masked || include_all_inner_edges);
bool is_outer_boundary = is_outer_masked && has_outer_non_masked_neighbours;
bool is_outer_boundary = is_outer_masked && has_outer_non_masked_neighbors;
/* Encode the boundary information in the format expected by the jump flooding algorithm. */
ivec2 inner_jump_flooding_value = initialize_jump_flooding_value(texel, is_inner_boundary);

@ -27,8 +27,8 @@ void main()
vec4 current_input = imageLoad(anti_diagonal_img, texel);
vec4 next_input = imageLoad(anti_diagonal_img, texel + i * direction);
vec4 neighbour_average = (previous_output + next_input) / 2.0;
vec4 causal_output = mix(current_input, neighbour_average, fade_factor);
vec4 neighbor_average = (previous_output + next_input) / 2.0;
vec4 causal_output = mix(current_input, neighbor_average, fade_factor);
imageStore(anti_diagonal_img, texel, causal_output);
}
@ -42,8 +42,8 @@ void main()
vec4 current_input = imageLoad(anti_diagonal_img, texel);
vec4 next_input = imageLoad(anti_diagonal_img, texel - i * direction);
vec4 neighbour_average = (previous_output + next_input) / 2.0;
vec4 non_causal_output = mix(current_input, neighbour_average, fade_factor);
vec4 neighbor_average = (previous_output + next_input) / 2.0;
vec4 non_causal_output = mix(current_input, neighbor_average, fade_factor);
imageStore(anti_diagonal_img, texel, non_causal_output);
}
}

@ -26,8 +26,8 @@ void main()
vec4 current_input = imageLoad(diagonal_img, texel);
vec4 next_input = imageLoad(diagonal_img, texel + i * direction);
vec4 neighbour_average = (previous_output + next_input) / 2.0;
vec4 causal_output = mix(current_input, neighbour_average, fade_factor);
vec4 neighbor_average = (previous_output + next_input) / 2.0;
vec4 causal_output = mix(current_input, neighbor_average, fade_factor);
imageStore(diagonal_img, texel, causal_output);
}
@ -41,8 +41,8 @@ void main()
vec4 current_input = imageLoad(diagonal_img, texel);
vec4 next_input = imageLoad(diagonal_img, texel - i * direction);
vec4 neighbour_average = (previous_output + next_input) / 2.0;
vec4 non_causal_output = mix(current_input, neighbour_average, fade_factor);
vec4 neighbor_average = (previous_output + next_input) / 2.0;
vec4 non_causal_output = mix(current_input, neighbor_average, fade_factor);
imageStore(diagonal_img, texel, non_causal_output);
}
}

@ -19,8 +19,8 @@ void main()
vec4 current_input = imageLoad(horizontal_img, texel);
vec4 next_input = imageLoad(horizontal_img, texel + ivec2(i, 0));
vec4 neighbour_average = (previous_output + next_input) / 2.0;
vec4 causal_output = mix(current_input, neighbour_average, fade_factor);
vec4 neighbor_average = (previous_output + next_input) / 2.0;
vec4 causal_output = mix(current_input, neighbor_average, fade_factor);
imageStore(horizontal_img, texel, causal_output);
}
@ -34,8 +34,8 @@ void main()
vec4 current_input = imageLoad(horizontal_img, texel);
vec4 next_input = imageLoad(horizontal_img, texel - ivec2(i, 0));
vec4 neighbour_average = (previous_output + next_input) / 2.0;
vec4 non_causal_output = mix(current_input, neighbour_average, fade_factor);
vec4 neighbor_average = (previous_output + next_input) / 2.0;
vec4 non_causal_output = mix(current_input, neighbor_average, fade_factor);
imageStore(horizontal_img, texel, non_causal_output);
}
}

@ -21,8 +21,8 @@ void main()
vec4 current_input = imageLoad(vertical_img, texel);
vec4 next_input = imageLoad(vertical_img, texel + ivec2(0, i));
vec4 neighbour_average = (previous_output + next_input) / 2.0;
vec4 causal_output = mix(current_input, neighbour_average, fade_factor);
vec4 neighbor_average = (previous_output + next_input) / 2.0;
vec4 causal_output = mix(current_input, neighbor_average, fade_factor);
imageStore(vertical_img, texel, causal_output);
}
@ -36,8 +36,8 @@ void main()
vec4 current_input = imageLoad(vertical_img, texel);
vec4 next_input = imageLoad(vertical_img, texel - ivec2(0, i));
vec4 neighbour_average = (previous_output + next_input) / 2.0;
vec4 non_causal_output = mix(current_input, neighbour_average, fade_factor);
vec4 neighbor_average = (previous_output + next_input) / 2.0;
vec4 non_causal_output = mix(current_input, neighbor_average, fade_factor);
imageStore(vertical_img, texel, non_causal_output);
}
}

@ -17,29 +17,29 @@ void main()
/* Load three equally spaced neighbors to the current pixel in the direction of the streak
* vector. */
vec4 neighbours[3];
neighbours[0] = texture(input_streak_tx, coordinates + vector);
neighbours[1] = texture(input_streak_tx, coordinates + vector * 2.0);
neighbours[2] = texture(input_streak_tx, coordinates + vector * 3.0);
vec4 neighbors[3];
neighbors[0] = texture(input_streak_tx, coordinates + vector);
neighbors[1] = texture(input_streak_tx, coordinates + vector * 2.0);
neighbors[2] = texture(input_streak_tx, coordinates + vector * 3.0);
/* Attenuate the value of two of the channels for each of the neighbors by multiplying by the
* color modulator. The particular channels for each neighbor were chosen to be visually similar
* to the modulation pattern of chromatic aberration. */
neighbours[0].gb *= color_modulator;
neighbours[1].rg *= color_modulator;
neighbours[2].rb *= color_modulator;
neighbors[0].gb *= color_modulator;
neighbors[1].rg *= color_modulator;
neighbors[2].rb *= color_modulator;
/* Compute the weighted sum of all neighbors using the given fade factors as weights. The
* weights are expected to be lower for neighbors that are further away. */
vec4 weighted_neighbours_sum = vec4(0.0);
vec4 weighted_neighbors_sum = vec4(0.0);
for (int i = 0; i < 3; i++) {
weighted_neighbours_sum += fade_factors[i] * neighbours[i];
weighted_neighbors_sum += fade_factors[i] * neighbors[i];
}
/* The output is the average between the center color and the weighted sum of the neighbors.
* Which intuitively mean that highlights will spread in the direction of the streak, which is
* the desired result. */
vec4 center_color = texture(input_streak_tx, coordinates);
vec4 output_color = (center_color + weighted_neighbours_sum) / 2.0;
vec4 output_color = (center_color + weighted_neighbors_sum) / 2.0;
imageStore(output_streak_img, texel, output_color);
}

@ -18,8 +18,8 @@ void main()
{
ivec2 texel = ivec2(gl_GlobalInvocationID.xy);
/* Identify if any of the 8 neighbours around the center pixel are transparent. */
bool has_transparent_neighbours = false;
/* Identify if any of the 8 neighbors around the center pixel are transparent. */
bool has_transparent_neighbors = false;
for (int j = -1; j <= 1; j++) {
for (int i = -1; i <= 1; i++) {
ivec2 offset = ivec2(i, j);
@ -27,16 +27,16 @@ void main()
/* Exempt the center pixel. */
if (all(notEqual(offset, ivec2(0)))) {
if (texture_load(input_tx, texel + offset).a < 1.0) {
has_transparent_neighbours = true;
has_transparent_neighbors = true;
break;
}
}
}
}
/* The pixels at the boundary are those that are opaque and have transparent neighbours. */
/* The pixels at the boundary are those that are opaque and have transparent neighbors. */
bool is_opaque = texture_load(input_tx, texel).a == 1.0;
bool is_boundary_pixel = is_opaque && has_transparent_neighbours;
bool is_boundary_pixel = is_opaque && has_transparent_neighbors;
/* Encode the boundary information in the format expected by the jump flooding algorithm. */
ivec2 jump_flooding_value = initialize_jump_flooding_value(texel, is_boundary_pixel);

@ -20,8 +20,8 @@ void main()
int count = 0;
for (int j = -edge_search_radius; j <= edge_search_radius; j++) {
for (int i = -edge_search_radius; i <= edge_search_radius; i++) {
float neighbour_matte = texture_load(input_matte_tx, texel + ivec2(i, j)).x;
count += int(distance(matte, neighbour_matte) < edge_tolerance);
float neighbor_matte = texture_load(input_matte_tx, texel + ivec2(i, j)).x;
count += int(distance(matte, neighbor_matte) < edge_tolerance);
}
}

@ -32,14 +32,14 @@ void main()
* standard backward finite difference equation, because multiplication by -1 flips the order of
* subtraction. */
int x_step = (table_index.x % 2) * -2 + 1;
vec2 x_neighbour = uv_coordinates_table[table_index.x + x_step][table_index.y];
vec2 x_gradient = (x_neighbour - uv_coordinates) * x_step;
vec2 x_neighbor = uv_coordinates_table[table_index.x + x_step][table_index.y];
vec2 x_gradient = (x_neighbor - uv_coordinates) * x_step;
/* Compute the partial derivative of the UV coordinates along the y direction using a
* finite difference approximation. See the previous code section for more information. */
int y_step = (table_index.y % 2) * -2 + 1;
vec2 y_neighbour = uv_coordinates_table[table_index.x][table_index.y + y_step];
vec2 y_gradient = (y_neighbour - uv_coordinates) * y_step;
vec2 y_neighbor = uv_coordinates_table[table_index.x][table_index.y + y_step];
vec2 y_gradient = (y_neighbor - uv_coordinates) * y_step;
/* Sample the input using the UV coordinates passing in the computed gradients in order to
* utilize the anisotropic filtering capabilities of the sampler. */

@ -28,10 +28,10 @@ vec2 compute_dir(vec2 v0, vec2 v1)
return dir;
}
void do_vertex_shader(vec4 pos, int vertex_id, out vec2 out_sspos, out vec4 out_finalcolour)
void do_vertex_shader(vec4 pos, int vertex_id, out vec2 out_sspos, out vec4 out_finalcolor)
{
out_sspos = proj(pos);
out_finalcolour = vec4(0.0);
out_finalcolor = vec4(0.0);
int frame = vertex_id + cacheStart;
float intensity; /* how faint */
@ -43,7 +43,7 @@ void do_vertex_shader(vec4 pos, int vertex_id, out vec2 out_sspos, out vec4 out_
if (frame < frameCurrent) {
if (use_custom_color) {
/* Custom color: previous frames color is darker than current frame */
out_finalcolour.rgb = customColor * 0.25;
out_finalcolor.rgb = customColor * 0.25;
}
else {
/* black - before frameCurrent */
@ -53,13 +53,13 @@ void do_vertex_shader(vec4 pos, int vertex_id, out vec2 out_sspos, out vec4 out_
else {
intensity = SET_INTENSITY(frameStart, frame, frameCurrent, 0.68, 0.92);
}
out_finalcolour.rgb = mix(colorWire.rgb, blend_base, intensity);
out_finalcolor.rgb = mix(colorWire.rgb, blend_base, intensity);
}
}
else if (frame > frameCurrent) {
if (use_custom_color) {
/* Custom color: next frames color is equal to user selected color */
out_finalcolour.rgb = customColor;
out_finalcolor.rgb = customColor;
}
else {
/* blue - after frameCurrent */
@ -70,13 +70,13 @@ void do_vertex_shader(vec4 pos, int vertex_id, out vec2 out_sspos, out vec4 out_
intensity = SET_INTENSITY(frameCurrent, frame, frameEnd, 0.68, 0.92);
}
out_finalcolour.rgb = mix(colorBonePose.rgb, blend_base, intensity);
out_finalcolor.rgb = mix(colorBonePose.rgb, blend_base, intensity);
}
}
else {
if (use_custom_color) {
/* Custom color: current frame color is slightly darker than user selected color */
out_finalcolour.rgb = customColor * 0.5;
out_finalcolor.rgb = customColor * 0.5;
}
else {
/* green - on frameCurrent */
@ -86,10 +86,10 @@ void do_vertex_shader(vec4 pos, int vertex_id, out vec2 out_sspos, out vec4 out_
else {
intensity = 0.75f;
}
out_finalcolour.rgb = mix(colorBackground.rgb, blend_base, intensity);
out_finalcolor.rgb = mix(colorBackground.rgb, blend_base, intensity);
}
}
out_finalcolour.a = 1.0;
out_finalcolor.a = 1.0;
}
void main()

@ -615,7 +615,7 @@ float mad(float a, float b, float c)
/**
* Gathers current pixel, and the top-left neighbors.
*/
float3 SMAAGatherNeighbours(float2 texcoord, float4 offset[3], SMAATexture2D(tex))
float3 SMAAGatherNeighbors(float2 texcoord, float4 offset[3], SMAATexture2D(tex))
{
#ifdef SMAAGather
return SMAAGather(tex, texcoord + SMAA_RT_METRICS.xy * float2(-0.5, -0.5)).grb;
@ -634,8 +634,8 @@ float2 SMAACalculatePredicatedThreshold(float2 texcoord,
float4 offset[3],
SMAATexture2D(predicationTex))
{
float3 neighbours = SMAAGatherNeighbours(texcoord, offset, SMAATexturePass2D(predicationTex));
float2 delta = abs(neighbours.xx - neighbours.yz);
float3 neighbors = SMAAGatherNeighbors(texcoord, offset, SMAATexturePass2D(predicationTex));
float2 delta = abs(neighbors.xx - neighbors.yz);
float2 edges = step(SMAA_PREDICATION_THRESHOLD, delta);
return SMAA_PREDICATION_SCALE * SMAA_THRESHOLD * (1.0 - SMAA_PREDICATION_STRENGTH * edges);
}
@ -876,8 +876,8 @@ float2 SMAAColorEdgeDetectionPS(float2 texcoord,
*/
float2 SMAADepthEdgeDetectionPS(float2 texcoord, float4 offset[3], SMAATexture2D(depthTex))
{
float3 neighbours = SMAAGatherNeighbours(texcoord, offset, SMAATexturePass2D(depthTex));
float2 delta = abs(neighbours.xx - float2(neighbours.y, neighbours.z));
float3 neighbors = SMAAGatherNeighbors(texcoord, offset, SMAATexturePass2D(depthTex));
float2 delta = abs(neighbors.xx - float2(neighbors.y, neighbors.z));
float2 edges = step(SMAA_DEPTH_THRESHOLD, delta);
# ifdef GPU_FRAGMENT_SHADER

@ -4365,7 +4365,7 @@ void IMB_colormanagement_blackbody_temperature_to_rgb_table(float *r_table,
* \endcode
*/
static float cie_colour_match[81][3] = {
static float cie_color_match[81][3] = {
{0.0014f, 0.0000f, 0.0065f}, {0.0022f, 0.0001f, 0.0105f}, {0.0042f, 0.0001f, 0.0201f},
{0.0076f, 0.0002f, 0.0362f}, {0.0143f, 0.0004f, 0.0679f}, {0.0232f, 0.0006f, 0.1102f},
{0.0435f, 0.0012f, 0.2074f}, {0.0776f, 0.0022f, 0.3713f}, {0.1344f, 0.0040f, 0.6456f},
@ -4406,7 +4406,7 @@ static void wavelength_to_xyz(float xyz[3], float lambda_nm)
}
else {
ii -= float(i);
const float *c = cie_colour_match[i];
const float *c = cie_color_match[i];
xyz[0] = c[0] + ii * (c[3] - c[0]);
xyz[1] = c[1] + ii * (c[4] - c[1]);
xyz[2] = c[2] + ii * (c[5] - c[2]);

@ -169,13 +169,13 @@ class TrackPositionOperation : public NodeOperation {
/* Compute the speed as the difference between the previous marker position and the current
* marker position. Notice that we compute the speed from the current to the previous position,
* not the other way around. */
const float2 previous_marker_position = compute_temporally_neighbouring_marker_position(
const float2 previous_marker_position = compute_temporally_neighboring_marker_position(
track, current_marker_position, -1);
const float2 speed_toward_previous = previous_marker_position - current_marker_position;
/* Compute the speed as the difference between the current marker position and the next marker
* position. */
const float2 next_marker_position = compute_temporally_neighbouring_marker_position(
const float2 next_marker_position = compute_temporally_neighboring_marker_position(
track, current_marker_position, 1);
const float2 speed_toward_next = current_marker_position - next_marker_position;
@ -210,9 +210,9 @@ class TrackPositionOperation : public NodeOperation {
* marker exist for that particular frame or is disabled, the current marker position is
* returned. This is useful for computing the speed by providing small negative and positive
* delta times. */
float2 compute_temporally_neighbouring_marker_position(MovieTrackingTrack *track,
float2 current_marker_position,
int time_delta)
float2 compute_temporally_neighboring_marker_position(MovieTrackingTrack *track,
float2 current_marker_position,
int time_delta)
{
const int local_frame_number = BKE_movieclip_remap_scene_to_clip_frame(
get_movie_clip(), get_frame() + time_delta);

@ -59,11 +59,11 @@ static void shortest_paths(const Mesh &mesh,
continue;
}
const float edge_cost = std::max(0.0f, input_cost[edge_i]);
const float new_neighbour_cost = cost_i + edge_cost;
if (new_neighbour_cost < r_cost[neighbor_vert_i]) {
r_cost[neighbor_vert_i] = new_neighbour_cost;
const float new_neighbor_cost = cost_i + edge_cost;
if (new_neighbor_cost < r_cost[neighbor_vert_i]) {
r_cost[neighbor_vert_i] = new_neighbor_cost;
r_next_index[neighbor_vert_i] = vert_i;
queue.emplace(new_neighbour_cost, neighbor_vert_i);
queue.emplace(new_neighbor_cost, neighbor_vert_i);
}
}
}

@ -251,7 +251,7 @@ class TextureMarginMap {
int other_poly;
bool found_pixel_in_polygon = false;
if (lookup_pixel_polygon_neighbourhood(x, y, &face, &destX, &destY, &other_poly)) {
if (lookup_pixel_polygon_neighborhood(x, y, &face, &destX, &destY, &other_poly)) {
for (int i = 0; i < maxPolygonSteps; i++) {
/* Force to pixel grid. */
@ -326,7 +326,7 @@ class TextureMarginMap {
* face we need can be the one next to the one the Dijkstra map provides. To prevent missing
* pixels also check the neighboring polygons.
*/
bool lookup_pixel_polygon_neighbourhood(
bool lookup_pixel_polygon_neighborhood(
float x, float y, uint32_t *r_start_poly, float *r_destx, float *r_desty, int *r_other_poly)
{
float found_dist;