Cleanup: fix various typos

Contributed by luzpaz.

Differential Revision: https://developer.blender.org/D15588
This commit is contained in:
Brecht Van Lommel 2022-08-03 19:27:23 +02:00
parent 81d75affb1
commit cc12f3e0ba
16 changed files with 24 additions and 24 deletions

@ -366,7 +366,7 @@ if(WIN32 OR APPLE)
endif() endif()
option(WITH_INPUT_NDOF "Enable NDOF input devices (SpaceNavigator and friends)" ON) option(WITH_INPUT_NDOF "Enable NDOF input devices (SpaceNavigator and friends)" ON)
if(UNIX AND NOT APPLE) if(UNIX AND NOT APPLE)
option(WITH_INSTALL_PORTABLE "Install redistributeable runtime, otherwise install into CMAKE_INSTALL_PREFIX" ON) option(WITH_INSTALL_PORTABLE "Install redistributable runtime, otherwise install into CMAKE_INSTALL_PREFIX" ON)
option(WITH_STATIC_LIBS "Try to link with static libraries, as much as possible, to make blender more portable across distributions" OFF) option(WITH_STATIC_LIBS "Try to link with static libraries, as much as possible, to make blender more portable across distributions" OFF)
if(WITH_STATIC_LIBS) if(WITH_STATIC_LIBS)
option(WITH_BOOST_ICU "Boost uses ICU library (required for linking with static Boost built with libicu)." OFF) option(WITH_BOOST_ICU "Boost uses ICU library (required for linking with static Boost built with libicu)." OFF)

@ -68,7 +68,7 @@ static ft_pix blf_font_width_max_ft_pix(struct FontBLF *font);
/** \name FreeType Utilities (Internal) /** \name FreeType Utilities (Internal)
* \{ */ * \{ */
/* Convert a FreeType 26.6 value representing an unscaled design size to factional pixels. */ /* Convert a FreeType 26.6 value representing an unscaled design size to fractional pixels. */
static ft_pix blf_unscaled_F26Dot6_to_pixels(FontBLF *font, FT_Pos value) static ft_pix blf_unscaled_F26Dot6_to_pixels(FontBLF *font, FT_Pos value)
{ {
/* Scale value by font size using integer-optimized multiplication. */ /* Scale value by font size using integer-optimized multiplication. */

@ -360,7 +360,7 @@ class BezierSpline final : public Spline {
* Returns non-owning access to an array of values containing the information necessary to * Returns non-owning access to an array of values containing the information necessary to
* interpolate values from the original control points to evaluated points. The control point * interpolate values from the original control points to evaluated points. The control point
* index is the integer part of each value, and the factor used for interpolating to the next * index is the integer part of each value, and the factor used for interpolating to the next
* control point is the remaining factional part. * control point is the remaining fractional part.
*/ */
blender::Span<float> evaluated_mappings() const; blender::Span<float> evaluated_mappings() const;
blender::Span<blender::float3> evaluated_positions() const final; blender::Span<blender::float3> evaluated_positions() const final;

@ -288,7 +288,7 @@ static int position_tail_on_spline(bSplineIKConstraint *ik_data,
int max_seg_idx = BKE_anim_path_get_array_size(cache) - 1; int max_seg_idx = BKE_anim_path_get_array_size(cache) - 1;
/* Make an initial guess of where our intersection point will be. /* Make an initial guess of where our intersection point will be.
* If the curve was a straight line, then the faction passed in r_new_curve_pos * If the curve was a straight line, then the fraction passed in r_new_curve_pos
* would be the correct location. * would be the correct location.
* So make it our first initial guess. * So make it our first initial guess.
*/ */

@ -189,7 +189,7 @@ void mat3_to_quat_is_ok(float q[4], const float mat[3][3]);
* \endcode * \endcode
* *
* \param numerator: An integer factor in [0..denominator] (inclusive). * \param numerator: An integer factor in [0..denominator] (inclusive).
* \param denominator: The faction denominator (typically the number of segments of the circle). * \param denominator: The fraction denominator (typically the number of segments of the circle).
* \param r_sin: The resulting sine. * \param r_sin: The resulting sine.
* \param r_cos: The resulting cosine. * \param r_cos: The resulting cosine.
*/ */

@ -483,7 +483,7 @@ void Film::update_sample_table()
data_.samples_weight_total = 1.0f; data_.samples_weight_total = 1.0f;
data_.samples_len = 1; data_.samples_len = 1;
} }
/* NOTE: Threshold determined by hand until we don't hit the assert bellow. */ /* NOTE: Threshold determined by hand until we don't hit the assert below. */
else if (data_.filter_radius < 2.20f) { else if (data_.filter_radius < 2.20f) {
/* Small filter Size. */ /* Small filter Size. */
int closest_index = 0; int closest_index = 0;

@ -40,7 +40,7 @@ class Film {
private: private:
Instance &inst_; Instance &inst_;
/** Incomming combined buffer with post fx applied (motion blur + depth of field). */ /** Incoming combined buffer with post fx applied (motion blur + depth of field). */
GPUTexture *combined_final_tx_ = nullptr; GPUTexture *combined_final_tx_ = nullptr;
/** Main accumulation textures containing every render-pass except depth and combined. */ /** Main accumulation textures containing every render-pass except depth and combined. */

@ -254,7 +254,7 @@ void film_get_catmull_rom_weights(vec2 t, out vec2 weights[4])
weights[3] = fct3 - fct2; weights[3] = fct3 - fct2;
} }
/* Load color using a special filter to avoid loosing detail. /* Load color using a special filter to avoid losing detail.
* \a texel is sample position with subpixel accuracy. */ * \a texel is sample position with subpixel accuracy. */
vec4 film_sample_catmull_rom(sampler2D color_tx, vec2 input_texel) vec4 film_sample_catmull_rom(sampler2D color_tx, vec2 input_texel)
{ {
@ -390,7 +390,7 @@ vec4 film_amend_combined_history(
float t = line_aabb_clipping_dist(color_history.rgb, clip_dir.rgb, min_color.rgb, max_color.rgb); float t = line_aabb_clipping_dist(color_history.rgb, clip_dir.rgb, min_color.rgb, max_color.rgb);
color_history.rgb += clip_dir.rgb * saturate(t); color_history.rgb += clip_dir.rgb * saturate(t);
/* Clip alpha on its own to avoid interference with other chanels. */ /* Clip alpha on its own to avoid interference with other channels. */
float t_a = film_aabb_clipping_dist_alpha(color_history.a, clip_dir.a, min_color.a, max_color.a); float t_a = film_aabb_clipping_dist_alpha(color_history.a, clip_dir.a, min_color.a, max_color.a);
color_history.a += clip_dir.a * saturate(t_a); color_history.a += clip_dir.a * saturate(t_a);
@ -406,16 +406,16 @@ float film_history_blend_factor(float velocity,
{ {
/* 5% of incoming color by default. */ /* 5% of incoming color by default. */
float blend = 0.05; float blend = 0.05;
/* Blend less history if the pixel has substential velocity. */ /* Blend less history if the pixel has substantial velocity. */
blend = mix(blend, 0.20, saturate(velocity * 0.02)); blend = mix(blend, 0.20, saturate(velocity * 0.02));
/** /**
* "High Quality Temporal Supersampling" by Brian Karis at Siggraph 2014 (Slide 43) * "High Quality Temporal Supersampling" by Brian Karis at Siggraph 2014 (Slide 43)
* Bias towards history if incomming pixel is near clamping. Reduces flicker. * Bias towards history if incoming pixel is near clamping. Reduces flicker.
*/ */
float distance_to_luma_clip = min_v2(vec2(luma_history - luma_min, luma_max - luma_history)); float distance_to_luma_clip = min_v2(vec2(luma_history - luma_min, luma_max - luma_history));
/* Divide by bbox size to get a factor. 2 factor to compensate the line above. */ /* Divide by bbox size to get a factor. 2 factor to compensate the line above. */
distance_to_luma_clip *= 2.0 * safe_rcp(luma_max - luma_min); distance_to_luma_clip *= 2.0 * safe_rcp(luma_max - luma_min);
/* Linearly blend when history gets bellow to 25% of the bbox size. */ /* Linearly blend when history gets below to 25% of the bbox size. */
blend *= saturate(distance_to_luma_clip * 4.0 + 0.1); blend *= saturate(distance_to_luma_clip * 4.0 + 0.1);
/* Discard out of view history. */ /* Discard out of view history. */
if (any(lessThan(texel, vec2(0))) || any(greaterThanEqual(texel, film_buf.extent))) { if (any(lessThan(texel, vec2(0))) || any(greaterThanEqual(texel, film_buf.extent))) {
@ -451,13 +451,13 @@ void film_store_combined(
float velocity = length(motion); float velocity = length(motion);
/* Load weight if it is not uniform accross the whole buffer (i.e: upsampling, panoramic). */ /* Load weight if it is not uniform across the whole buffer (i.e: upsampling, panoramic). */
// dst.weight = film_weight_load(texel_combined); // dst.weight = film_weight_load(texel_combined);
color_dst = film_sample_catmull_rom(in_combined_tx, history_texel); color_dst = film_sample_catmull_rom(in_combined_tx, history_texel);
color_dst.rgb = film_YCoCg_from_scene_linear(color_dst.rgb); color_dst.rgb = film_YCoCg_from_scene_linear(color_dst.rgb);
/* Get local color bounding box of source neighboorhood. */ /* Get local color bounding box of source neighborhood. */
vec4 min_color, max_color; vec4 min_color, max_color;
film_combined_neighbor_boundbox(src_texel, min_color, max_color); film_combined_neighbor_boundbox(src_texel, min_color, max_color);
@ -622,7 +622,7 @@ void film_process_data(ivec2 texel_film, out vec4 out_color, out float out_depth
src = film_sample_get(i, texel_film); src = film_sample_get(i, texel_film);
film_sample_accum_combined(src, combined_accum, weight_accum); film_sample_accum_combined(src, combined_accum, weight_accum);
} }
/* NOTE: src.texel is center texel in incomming data buffer. */ /* NOTE: src.texel is center texel in incoming data buffer. */
film_store_combined(dst, src.texel, combined_accum, weight_accum, out_color); film_store_combined(dst, src.texel, combined_accum, weight_accum, out_color);
} }

@ -1,7 +1,7 @@
/** /**
* Dilate motion vector tiles until we covered maximum velocity. * Dilate motion vector tiles until we covered maximum velocity.
* Outputs the largest intersecting motion vector in the neighboorhod. * Outputs the largest intersecting motion vector in the neighborhood.
* *
*/ */
@ -62,7 +62,7 @@ bool is_inside_motion_line(ivec2 tile, MotionLine motion_line)
/* NOTE: Everything in is tile unit. */ /* NOTE: Everything in is tile unit. */
float dist = point_line_projection_dist(vec2(tile), motion_line.origin, motion_line.normal); float dist = point_line_projection_dist(vec2(tile), motion_line.origin, motion_line.normal);
/* In order to be conservative and for simplicity, we use the tiles bounding circles. /* In order to be conservative and for simplicity, we use the tiles bounding circles.
* Consider that both the tile and the line have bouding radius of M_SQRT1_2. */ * Consider that both the tile and the line have bounding radius of M_SQRT1_2. */
return abs(dist) < M_SQRT2; return abs(dist) < M_SQRT2;
} }

@ -22,7 +22,7 @@ const int gather_sample_count = 8;
* target post-fx framebuffer. */ * target post-fx framebuffer. */
vec4 motion_blur_sample_velocity(sampler2D velocity_tx, vec2 uv) vec4 motion_blur_sample_velocity(sampler2D velocity_tx, vec2 uv)
{ {
/* We can load velocity without velocity_resolve() since we resovled during the flatten pass. */ /* We can load velocity without velocity_resolve() since we resolved during the flatten pass. */
vec4 velocity = velocity_unpack(texture(velocity_tx, uv)); vec4 velocity = velocity_unpack(texture(velocity_tx, uv));
return velocity * vec2(textureSize(velocity_tx, 0)).xyxy * motion_blur_buf.motion_scale.xxyy; return velocity * vec2(textureSize(velocity_tx, 0)).xyxy * motion_blur_buf.motion_scale.xxyy;
} }

@ -9,7 +9,7 @@
/* -------------------------------------------------------------------- */ /* -------------------------------------------------------------------- */
/** \name Sampling data. /** \name Sampling data.
* *
* Return a random values from Low Discrepency Sequence in [0..1) range. * Return a random values from Low Discrepancy Sequence in [0..1) range.
* This value is uniform (constant) for the whole scene sample. * This value is uniform (constant) for the whole scene sample.
* You might want to couple it with a noise function. * You might want to couple it with a noise function.
* \{ */ * \{ */

@ -47,7 +47,7 @@ vec4 velocity_surface(vec3 P_prv, vec3 P, vec3 P_nxt)
*/ */
vec4 velocity_background(vec3 vV) vec4 velocity_background(vec3 vV)
{ {
/* Only transform direction to avoid loosing precision. */ /* Only transform direction to avoid losing precision. */
vec3 V = transform_direction(camera_curr.viewinv, vV); vec3 V = transform_direction(camera_curr.viewinv, vV);
/* NOTE: We don't use the drw_view.winmat to avoid adding the TAA jitter to the velocity. */ /* NOTE: We don't use the drw_view.winmat to avoid adding the TAA jitter to the velocity. */
vec2 prev_uv = project_point(camera_prev.winmat, V).xy; vec2 prev_uv = project_point(camera_prev.winmat, V).xy;

@ -398,7 +398,7 @@ void TVertex::setBackEdgeB(ViewEdge *iBackEdgeB, bool incoming)
void TVertex::Replace(ViewEdge *iOld, ViewEdge *iNew) void TVertex::Replace(ViewEdge *iOld, ViewEdge *iNew)
{ {
// theoritically, we only replace edges for which this // theoretically, we only replace edges for which this
// view vertex is the B vertex // view vertex is the B vertex
if ((iOld == _FrontEdgeA.first) && (_FrontEdgeA.first->B() == this)) { if ((iOld == _FrontEdgeA.first) && (_FrontEdgeA.first->B() == this)) {
_FrontEdgeA.first = iNew; _FrontEdgeA.first = iNew;

@ -481,7 +481,7 @@ static void lineart_shadow_create_shadow_edge_array(LineartData *ld,
* This process is repeated on each existing segments of the shadow edge (#e), which ensures they * This process is repeated on each existing segments of the shadow edge (#e), which ensures they
* all have been tested for closest segments after cutting. And in the diagram it's clear that the * all have been tested for closest segments after cutting. And in the diagram it's clear that the
* left/right side of cuts are likely to be discontinuous, each cut's left side designates the * left/right side of cuts are likely to be discontinuous, each cut's left side designates the
* right side of the last segment, and vise versa. */ * right side of the last segment, and vice-versa. */
static void lineart_shadow_edge_cut(LineartData *ld, static void lineart_shadow_edge_cut(LineartData *ld,
LineartShadowEdge *e, LineartShadowEdge *e,
double start, double start,

@ -686,7 +686,7 @@ static struct PyMethodDef pygpu_framebuffer__tp_methods[] = {
PyDoc_STRVAR(pygpu_framebuffer__tp_doc, PyDoc_STRVAR(pygpu_framebuffer__tp_doc,
".. class:: GPUFrameBuffer(depth_slot=None, color_slots=None)\n" ".. class:: GPUFrameBuffer(depth_slot=None, color_slots=None)\n"
"\n" "\n"
" This object gives access to framebuffer functionallities.\n" " This object gives access to framebuffer functionalities.\n"
" When a 'layer' is specified in a argument, a single layer of a 3D or array " " When a 'layer' is specified in a argument, a single layer of a 3D or array "
"texture is attached to the frame-buffer.\n" "texture is attached to the frame-buffer.\n"
" For cube map textures, layer is translated into a cube map face.\n" " For cube map textures, layer is translated into a cube map face.\n"

@ -74,7 +74,7 @@ class TestGraph:
revisions[revision] = len(revisions) revisions[revision] = len(revisions)
revision_dates[revision] = int(entry.date) revision_dates[revision] = int(entry.date)
# Google Charts JSON data layout is like a spreadsheat table, with # Google Charts JSON data layout is like a spreadsheet table, with
# columns, rows, and cells. We create one column for revision labels, # columns, rows, and cells. We create one column for revision labels,
# and one column for each test. # and one column for each test.
cols = [] cols = []