Cleanup: spelling in comments

This commit is contained in:
Campbell Barton 2023-02-27 20:54:27 +11:00
parent 40a11c7404
commit dc08ff3c2e
15 changed files with 36 additions and 35 deletions

@ -1248,8 +1248,8 @@ void GHOST_GetVulkanHandles(GHOST_ContextHandle context,
void GHOST_GetVulkanCommandBuffer(GHOST_ContextHandle context, void *r_command_buffer); void GHOST_GetVulkanCommandBuffer(GHOST_ContextHandle context, void *r_command_buffer);
/** /**
* Gets the Vulkan backbuffer related resource handles associated with the Vulkan context. * Gets the Vulkan back-buffer related resource handles associated with the Vulkan context.
* Needs to be called after each swap event as the backbuffer will change. * Needs to be called after each swap event as the back-buffer will change.
* *
* Should should only be called when using a Vulkan context with an active swap chain. * Should should only be called when using a Vulkan context with an active swap chain.
* Other contexts will not return any handles and leave the * Other contexts will not return any handles and leave the

@ -91,8 +91,8 @@ class GHOST_IContext {
virtual GHOST_TSuccess getVulkanCommandBuffer(void *r_command_buffer) = 0; virtual GHOST_TSuccess getVulkanCommandBuffer(void *r_command_buffer) = 0;
/** /**
* Gets the Vulkan backbuffer related resource handles associated with the Vulkan context. * Gets the Vulkan back-buffer related resource handles associated with the Vulkan context.
* Needs to be called after each swap event as the backbuffer will change. * Needs to be called after each swap event as the back-buffer will change.
* *
* \param r_image: After calling this function the VkImage * \param r_image: After calling this function the VkImage
* referenced by this parameter will contain the VKImage handle * referenced by this parameter will contain the VKImage handle

@ -641,7 +641,7 @@ static void cloth_apply_vgroup(ClothModifierData *clmd, Mesh *mesh)
if (dvert->dw[j].def_nr == (clmd->sim_parms->vgroup_mass - 1)) { if (dvert->dw[j].def_nr == (clmd->sim_parms->vgroup_mass - 1)) {
verts->goal = dvert->dw[j].weight; verts->goal = dvert->dw[j].weight;
/* goalfac= 1.0f; */ /* UNUSED */ // goalfac = 1.0f; /* UNUSED */
/* Kicking goal factor to simplify things...who uses that anyway? */ /* Kicking goal factor to simplify things...who uses that anyway? */
// ABS (clmd->sim_parms->maxgoal - clmd->sim_parms->mingoal); // ABS (clmd->sim_parms->maxgoal - clmd->sim_parms->mingoal);
@ -1779,7 +1779,7 @@ static bool cloth_build_springs(ClothModifierData *clmd, Mesh *mesh)
index2 = ((tspring->ij == tspring2->kl) ? (tspring->kl) : (tspring->ij)); index2 = ((tspring->ij == tspring2->kl) ? (tspring->kl) : (tspring->ij));
/* Check for existing spring. */ /* Check for existing spring. */
/* Check also if startpoint is equal to endpoint. */ /* Check also if start-point is equal to endpoint. */
if ((index2 != tspring2->ij) && !BLI_edgeset_haskey(edgeset, tspring2->ij, index2)) { if ((index2 != tspring2->ij) && !BLI_edgeset_haskey(edgeset, tspring2->ij, index2)) {
spring = (ClothSpring *)MEM_callocN(sizeof(ClothSpring), "cloth spring"); spring = (ClothSpring *)MEM_callocN(sizeof(ClothSpring), "cloth spring");
@ -1889,7 +1889,8 @@ static bool cloth_build_springs(ClothModifierData *clmd, Mesh *mesh)
/* NOTE: the edges may already exist so run reinsert. */ /* NOTE: the edges may already exist so run reinsert. */
/* insert other near springs in edgeset AFTER bending springs are calculated (for selfcolls) */ /* Insert other near springs in `edgeset` AFTER bending springs are calculated
* (for self-collision). */
for (int i = 0; i < numedges; i++) { /* struct springs */ for (int i = 0; i < numedges; i++) { /* struct springs */
BLI_edgeset_add(edgeset, edges[i].v1, edges[i].v2); BLI_edgeset_add(edgeset, edges[i].v1, edges[i].v2);
} }

@ -251,7 +251,7 @@ int BKE_object_data_transfer_dttype_to_srcdst_index(const int dtdata_type)
/* ********** */ /* ********** */
/** /**
* When transfering color attributes, also transfer the active color attribute string. * When transferring color attributes, also transfer the active color attribute string.
* If a match can't be found, use the first color layer that can be found (to ensure a valid string * If a match can't be found, use the first color layer that can be found (to ensure a valid string
* is set). * is set).
*/ */
@ -295,7 +295,7 @@ static void data_transfer_mesh_attributes_transfer_active_color_string(
} }
/** /**
* When transfering color attributes, also transfer the default color attribute string. * When transferring color attributes, also transfer the default color attribute string.
* If a match cant be found, use the first color layer that can be found (to ensure a valid string * If a match cant be found, use the first color layer that can be found (to ensure a valid string
* is set). * is set).
*/ */
@ -1211,8 +1211,8 @@ void BKE_object_data_transfer_layout(struct Depsgraph *depsgraph,
fromlayers, fromlayers,
tolayers, tolayers,
nullptr); nullptr);
/* Make sure we have active/defaut color layers if none existed before. /* Make sure we have active/default color layers if none existed before.
* Use the active/defaut from src (if it was transferred), otherwise the first. */ * Use the active/default from src (if it was transferred), otherwise the first. */
if (ELEM(cddata_type, CD_PROP_COLOR, CD_PROP_BYTE_COLOR)) { if (ELEM(cddata_type, CD_PROP_COLOR, CD_PROP_BYTE_COLOR)) {
data_transfer_mesh_attributes_transfer_active_color_string( data_transfer_mesh_attributes_transfer_active_color_string(
me_dst, me_src, ATTR_DOMAIN_MASK_POINT, cddata_type); me_dst, me_src, ATTR_DOMAIN_MASK_POINT, cddata_type);
@ -1259,8 +1259,8 @@ void BKE_object_data_transfer_layout(struct Depsgraph *depsgraph,
fromlayers, fromlayers,
tolayers, tolayers,
nullptr); nullptr);
/* Make sure we have active/defaut color layers if none existed before. /* Make sure we have active/default color layers if none existed before.
* Use the active/defaut from src (if it was transferred), otherwise the first. */ * Use the active/default from src (if it was transferred), otherwise the first. */
if (ELEM(cddata_type, CD_PROP_COLOR, CD_PROP_BYTE_COLOR)) { if (ELEM(cddata_type, CD_PROP_COLOR, CD_PROP_BYTE_COLOR)) {
data_transfer_mesh_attributes_transfer_active_color_string( data_transfer_mesh_attributes_transfer_active_color_string(
me_dst, me_src, ATTR_DOMAIN_MASK_CORNER, cddata_type); me_dst, me_src, ATTR_DOMAIN_MASK_CORNER, cddata_type);

@ -1406,7 +1406,7 @@ void shrinkwrapModifier_deform(ShrinkwrapModifierData *smd,
calc.aux_target = DEG_get_evaluated_object(ctx->depsgraph, smd->auxTarget); calc.aux_target = DEG_get_evaluated_object(ctx->depsgraph, smd->auxTarget);
if (mesh != nullptr && smd->shrinkType == MOD_SHRINKWRAP_PROJECT) { if (mesh != nullptr && smd->shrinkType == MOD_SHRINKWRAP_PROJECT) {
/* Setup arrays to get vertexs positions, normals and deform weights */ /* Setup arrays to get vertex positions, normals and deform weights */
calc.vert_positions = BKE_mesh_vert_positions_for_write(mesh); calc.vert_positions = BKE_mesh_vert_positions_for_write(mesh);
calc.vert_normals = BKE_mesh_vertex_normals_ensure(mesh); calc.vert_normals = BKE_mesh_vertex_normals_ensure(mesh);

@ -189,7 +189,7 @@ static void subdiv_foreach_ctx_count(SubdivForeachTaskContext *ctx)
} }
} }
/* Add vertices used by outer edges on subdvided faces and loose edges. */ /* Add vertices used by outer edges on subdivided faces and loose edges. */
ctx->num_subdiv_vertices += num_subdiv_vertices_per_coarse_edge * coarse_mesh->totedge; ctx->num_subdiv_vertices += num_subdiv_vertices_per_coarse_edge * coarse_mesh->totedge;
ctx->num_subdiv_loops = ctx->num_subdiv_polygons * 4; ctx->num_subdiv_loops = ctx->num_subdiv_polygons * 4;

@ -11303,7 +11303,7 @@ static int ui_region_handler(bContext *C, const wmEvent *event, void * /*userdat
ui_blocks_set_tooltips(region, true); ui_blocks_set_tooltips(region, true);
} }
/* Always do this, to reliably update view and uilist item highlighting, even if /* Always do this, to reliably update view and UI-list item highlighting, even if
* the mouse hovers a button nested in the item (it's an overlapping layout). */ * the mouse hovers a button nested in the item (it's an overlapping layout). */
ui_handle_viewlist_items_hover(event, region); ui_handle_viewlist_items_hover(event, region);
if (retval == WM_UI_HANDLER_CONTINUE) { if (retval == WM_UI_HANDLER_CONTINUE) {

@ -77,7 +77,7 @@ static void region_draw_emboss(const ARegion *region, const rcti *scirct, int si
rect.ymin = scirct->ymin - region->winrct.ymin; rect.ymin = scirct->ymin - region->winrct.ymin;
rect.ymax = scirct->ymax - region->winrct.ymin; rect.ymax = scirct->ymax - region->winrct.ymin;
/* set transp line */ /* Set transparent line. */
GPU_blend(GPU_BLEND_ALPHA); GPU_blend(GPU_BLEND_ALPHA);
float color[4] = {0.0f, 0.0f, 0.0f, 0.25f}; float color[4] = {0.0f, 0.0f, 0.0f, 0.25f};
@ -1286,11 +1286,11 @@ static void region_rect_recursive(
alignment = RGN_ALIGN_NONE; alignment = RGN_ALIGN_NONE;
} }
/* If both the ARegion.sizex/y and the prefsize are 0, the region is tagged as too small, even /* If both the #ARegion.sizex/y and the #ARegionType.prefsizex/y are 0,
* before the layout for dynamic regions is created. #wm_draw_window_offscreen() allows the * the region is tagged as too small, even before the layout for dynamic regions is created.
* layout to be created despite the RGN_FLAG_TOO_SMALL flag being set. But there may still be * #wm_draw_window_offscreen() allows the layout to be created despite the #RGN_FLAG_TOO_SMALL
* regions that don't have a separate ARegionType.layout callback. For those, set a default * flag being set. But there may still be regions that don't have a separate #ARegionType.layout
* prefsize so they can become visible. */ * callback. For those, set a default #ARegionType.prefsizex/y so they can become visible. */
if ((region->flag & RGN_FLAG_DYNAMIC_SIZE) && !(region->type->layout)) { if ((region->flag & RGN_FLAG_DYNAMIC_SIZE) && !(region->type->layout)) {
if ((region->sizex == 0) && (region->type->prefsizex == 0)) { if ((region->sizex == 0) && (region->type->prefsizex == 0)) {
region->type->prefsizex = AREAMINX; region->type->prefsizex = AREAMINX;
@ -1300,7 +1300,7 @@ static void region_rect_recursive(
} }
} }
/* prefsize, taking into account DPI */ /* `prefsizex/y`, taking into account DPI. */
int prefsizex = UI_DPI_FAC * int prefsizex = UI_DPI_FAC *
((region->sizex > 1) ? region->sizex + 0.5f : region->type->prefsizex); ((region->sizex > 1) ? region->sizex + 0.5f : region->type->prefsizex);
int prefsizey; int prefsizey;
@ -1523,7 +1523,7 @@ static void region_rect_recursive(
region_overlap_fix(area, region); region_overlap_fix(area, region);
} }
/* set winrect for azones */ /* Set `region->winrct` for action-zones. */
if (region->flag & (RGN_FLAG_HIDDEN | RGN_FLAG_TOO_SMALL)) { if (region->flag & (RGN_FLAG_HIDDEN | RGN_FLAG_TOO_SMALL)) {
region->winrct = (region->overlap) ? *overlap_remainder : *remainder; region->winrct = (region->overlap) ? *overlap_remainder : *remainder;

@ -49,7 +49,7 @@ std::optional<Mesh *> mesh_merge_by_distance_connected(const Mesh &mesh,
* \warning \a vert_merge_map must **not** contain any chained mapping (v1 -> v2 -> v3 etc.), * \warning \a vert_merge_map must **not** contain any chained mapping (v1 -> v2 -> v3 etc.),
* this is not supported and will likely generate corrupted geometry. * this is not supported and will likely generate corrupted geometry.
* *
* \param vert_dest_map_len: The number of non '-1' values in vtargetmap. (not the size) * \param vert_dest_map_len: The number of non '-1' values in `vert_dest_map`. (not the size)
*/ */
Mesh *mesh_merge_verts(const Mesh &mesh, MutableSpan<int> vert_dest_map, int vert_dest_map_len); Mesh *mesh_merge_verts(const Mesh &mesh, MutableSpan<int> vert_dest_map, int vert_dest_map_len);

@ -47,7 +47,7 @@ typedef enum eGPUSamplerState {
GPU_SAMPLER_DEFAULT = 0, GPU_SAMPLER_DEFAULT = 0,
/** /**
* Enables hardware linear filtering. * Enables hardware linear filtering.
* Enables linear interpolation between mips if GPU_SAMPLER_MIPMAP is also set. * Enables linear interpolation between MIPS if GPU_SAMPLER_MIPMAP is also set.
*/ */
GPU_SAMPLER_FILTER = (1 << 0), GPU_SAMPLER_FILTER = (1 << 0),
/** /**
@ -85,7 +85,7 @@ typedef enum eGPUSamplerState {
/** Enable mirror repeat extension mode for directions using the `GPU_SAMPLER_REPEAT_*` flag. */ /** Enable mirror repeat extension mode for directions using the `GPU_SAMPLER_REPEAT_*` flag. */
GPU_SAMPLER_MIRROR_REPEAT = (1 << 8), GPU_SAMPLER_MIRROR_REPEAT = (1 << 8),
/** Special icon sampler with custom lod bias and interpolation mode. */ /** Special icon sampler with custom LOD bias and interpolation mode. */
GPU_SAMPLER_ICON = (1 << 9), GPU_SAMPLER_ICON = (1 << 9),
} eGPUSamplerState; } eGPUSamplerState;
@ -487,7 +487,7 @@ void GPU_texture_update_mipmap_chain(GPUTexture *texture);
/** /**
* Read the content of a \a mip_level from a \a tex and returns a copy of its data. * Read the content of a \a mip_level from a \a tex and returns a copy of its data.
* \warning the texture must have been created using GPU_TEXTURE_USAGE_HOST_READ. * \warning the texture must have been created using GPU_TEXTURE_USAGE_HOST_READ.
* \note synchronisation of shader writes via `imageStore()` needs to be explicitly done using * \note synchronization of shader writes via `imageStore()` needs to be explicitly done using
* `GPU_memory_barrier(GPU_BARRIER_TEXTURE_FETCH)`. * `GPU_memory_barrier(GPU_BARRIER_TEXTURE_FETCH)`.
*/ */
void *GPU_texture_read(GPUTexture *texture, eGPUDataFormat data_format, int mip_level); void *GPU_texture_read(GPUTexture *texture, eGPUDataFormat data_format, int mip_level);

@ -595,7 +595,7 @@ void GLTexture::samplers_init()
samplers_update(); samplers_update();
/* Custom sampler for icons. /* Custom sampler for icons.
* NOTE: The icon texture is sampled within the shader using a -0.5f lod bias. */ * NOTE: The icon texture is sampled within the shader using a -0.5f LOD bias. */
GLuint icon_sampler = samplers_[GPU_SAMPLER_ICON]; GLuint icon_sampler = samplers_[GPU_SAMPLER_ICON];
glSamplerParameteri(icon_sampler, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST); glSamplerParameteri(icon_sampler, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST);
glSamplerParameteri(icon_sampler, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glSamplerParameteri(icon_sampler, GL_TEXTURE_MAG_FILTER, GL_LINEAR);

@ -129,7 +129,7 @@ void VKCommandBuffer::encode_recorded_commands()
{ {
/* Intentionally not implemented. For the graphics pipeline we want to extract the /* Intentionally not implemented. For the graphics pipeline we want to extract the
* resources and its usages so we can encode multiple commands in the same command buffer with * resources and its usages so we can encode multiple commands in the same command buffer with
* the correct synchorinzations. */ * the correct synchronizations. */
} }
void VKCommandBuffer::submit_encoded_commands() void VKCommandBuffer::submit_encoded_commands()

@ -34,7 +34,7 @@ class VKCommandBuffer : NonCopyable, NonMovable {
const VkPipelineLayout vk_pipeline_layout, const VkPipelineLayout vk_pipeline_layout,
VkPipelineBindPoint bind_point); VkPipelineBindPoint bind_point);
void dispatch(int groups_x_len, int groups_y_len, int groups_z_len); void dispatch(int groups_x_len, int groups_y_len, int groups_z_len);
/* Copy the contents of a texture mip level to the dst buffer.*/ /** Copy the contents of a texture MIP level to the dst buffer. */
void copy(VKBuffer &dst_buffer, VKTexture &src_texture, Span<VkBufferImageCopy> regions); void copy(VKBuffer &dst_buffer, VKTexture &src_texture, Span<VkBufferImageCopy> regions);
void pipeline_barrier(VkPipelineStageFlags source_stages, void pipeline_barrier(VkPipelineStageFlags source_stages,
VkPipelineStageFlags destination_stages); VkPipelineStageFlags destination_stages);

@ -905,9 +905,9 @@ bool VKShader::finalize_descriptor_set_layouts(VkDevice vk_device,
VK_ALLOCATION_CALLBACKS VK_ALLOCATION_CALLBACKS
/* Currently we create a single descriptor set. The goal would be to create one descriptor set /* Currently we create a single descriptor set. The goal would be to create one descriptor set
* for Frequency::PASS/BATCH. This isn't possible as areas expect that the binding location is * for #Frequency::PASS/BATCH. This isn't possible as areas expect that the binding location is
* static and predictable (eevee-next) or the binding location can be mapped to a single number * static and predictable (EEVEE-NEXT) or the binding location can be mapped to a single number
* (python). */ * (Python). */
Vector<ShaderCreateInfo::Resource> all_resources; Vector<ShaderCreateInfo::Resource> all_resources;
all_resources.extend(info.pass_resources_); all_resources.extend(info.pass_resources_);
all_resources.extend(info.batch_resources_); all_resources.extend(info.batch_resources_);

@ -123,7 +123,7 @@ static void seq_update_sound_bounds_recursive_impl(const Scene *scene,
Sequence *seq; Sequence *seq;
/* For sound we go over full meta tree to update bounds of the sound strips, /* For sound we go over full meta tree to update bounds of the sound strips,
* since sound is played outside of evaluating the imbufs. */ * since sound is played outside of evaluating the image-buffers (#ImBuf). */
for (seq = metaseq->seqbase.first; seq; seq = seq->next) { for (seq = metaseq->seqbase.first; seq; seq = seq->next) {
if (seq->type == SEQ_TYPE_META) { if (seq->type == SEQ_TYPE_META) {
seq_update_sound_bounds_recursive_impl( seq_update_sound_bounds_recursive_impl(