Cleanup: spelling

This commit is contained in:
Campbell Barton 2019-08-17 00:54:22 +10:00
parent 05edcf6329
commit 2790740813
72 changed files with 157 additions and 174 deletions

@ -43,8 +43,7 @@ BVHNode *bvh_node_merge_children_recursively(const BVHNode *node)
if (node->is_leaf()) {
return new LeafNode(*reinterpret_cast<const LeafNode *>(node));
}
/* Collect nodes of one layer deeper, allowing us to have more childrem in
* an inner layer. */
/* Collect nodes of one layer deeper, allowing us to have more children in an inner layer. */
assert(node->num_children() <= 2);
const BVHNode *children[4];
const BVHNode *child0 = node->get_child(0);

@ -70,9 +70,9 @@ ccl_device void kernel_filter_construct_transform(const float *ccl_restrict buff
filter_calculate_scale(feature_scale, use_time);
/* === Generate the feature transformation. ===
* This transformation maps the num_features-dimentional feature space to a reduced feature
* (r-feature) space which generally has fewer dimensions. This mainly helps to prevent
* overfitting. */
* This transformation maps the num_features-dimensional feature space to a reduced feature
* (r-feature) space which generally has fewer dimensions.
* This mainly helps to prevent over-fitting. */
float feature_matrix[DENOISE_FEATURES * DENOISE_FEATURES];
math_matrix_zero(feature_matrix, num_features);
FOR_PIXEL_WINDOW
@ -85,7 +85,7 @@ ccl_device void kernel_filter_construct_transform(const float *ccl_restrict buff
math_matrix_jacobi_eigendecomposition(feature_matrix, transform, num_features, 1);
*rank = 0;
/* Prevent overfitting when a small window is used. */
/* Prevent over-fitting when a small window is used. */
int max_rank = min(num_features, num_pixels / 3);
if (pca_threshold < 0.0f) {
float threshold_energy = 0.0f;

@ -686,8 +686,7 @@ ccl_device_inline const ShaderClosure *shader_bsdf_pick(ShaderData *sd, float *r
if (r < next_sum) {
sampled = i;
/* Rescale to reuse for direction sample, to better
* preserve stratification. */
/* Rescale to reuse for direction sample, to better preserve stratification. */
*randu = (r - partial_sum) / sc->sample_weight;
break;
}
@ -743,8 +742,7 @@ ccl_device_inline const ShaderClosure *shader_bssrdf_pick(ShaderData *sd,
*throughput *= (sum_bsdf + sum_bssrdf) / sum_bssrdf;
sampled = i;
/* Rescale to reuse for direction sample, to better
* preserve stratifaction. */
/* Rescale to reuse for direction sample, to better preserve stratification. */
*randu = (r - partial_sum) / sc->sample_weight;
break;
}

@ -29,7 +29,7 @@ CCL_NAMESPACE_BEGIN
* semantic around the units of size, it just should be the same for all
* entries.
*
* This is a generic entry foi all size-related statistics, which helps
* This is a generic entry for all size-related statistics, which helps
* avoiding duplicating code for things like sorting.
*/
class NamedSizeEntry {

@ -27,7 +27,7 @@
CCL_NAMESPACE_BEGIN
/* Own subclass-ed vestion of std::vector. Subclass is needed because:
/* Own subclass-ed version of std::vector. Subclass is needed because:
*
* - Use own allocator which keeps track of used/peak memory.
* - Have method to ensure capacity is re-set to 0.

@ -834,7 +834,7 @@ class I18nMessages:
def parse_messages_from_po(self, src, key=None):
"""
Parse a po file.
Note: This function will silently "arrange" mis-formated entries, thus using afterward write_messages() should
Note: This function will silently "arrange" mis-formatted entries, thus using afterward write_messages() should
always produce a po-valid file, though not correct!
"""
reading_msgid = False

@ -241,9 +241,9 @@ def edge_loops_from_edges(mesh, edges=None):
def ngon_tessellate(from_data, indices, fix_loops=True, debug_print=True):
"""
Takes a polyline of indices (fgon) and returns a list of face
Takes a polyline of indices (ngon) and returns a list of face
index lists. Designed to be used for importers that need indices for an
fgon to create from existing verts.
ngon to create from existing verts.
:arg from_data: either a mesh, or a list/tuple of vectors.
:type from_data: list or :class:`bpy.types.Mesh`

@ -105,7 +105,7 @@ class ANIM_OT_keying_set_export(Operator):
# - idtype_list is used to get the list of id-datablocks from
# bpy.data.* since this info isn't available elsewhere
# - id.bl_rna.name gives a name suitable for UI,
# with a capitalised first letter, but we need
# with a capitalized first letter, but we need
# the plural form that's all lower case
# - special handling is needed for "nested" ID-blocks
# (e.g. nodetree in Material)

@ -889,7 +889,7 @@ void BKE_pose_eval_proxy_copy_bone(struct Depsgraph *depsgraph, Object *object,
bPoseChannel *pchan = pose_pchan_get_indexed(object, pchan_index);
DEG_debug_print_eval_subdata(
depsgraph, __func__, object->id.name, object, "pchan", pchan->name, pchan);
/* TODO(sergey): Use indexec lookup, once it's guaranteed to be kept
/* TODO(sergey): Use indexed lookup, once it's guaranteed to be kept
* around for the time while proxies are evaluating.
*/
#if 0

@ -294,7 +294,7 @@ static void layerInterp_mdeformvert(const void **sources,
}
}
/* delay writing to the destination incase dest is in sources */
/* Delay writing to the destination in case dest is in sources. */
/* now we know how many unique deform weights there are, so realloc */
if (dvert->dw && (dvert->totweight == totweight)) {
@ -441,7 +441,7 @@ static void layerInterp_tface(
}
}
/* delay writing to the destination incase dest is in sources */
/* Delay writing to the destination in case dest is in sources. */
*tf = *(MTFace *)(*sources);
memcpy(tf->uv, uv, sizeof(tf->uv));
}
@ -548,7 +548,7 @@ static void layerInterp_origspace_face(
}
}
/* delay writing to the destination in case dest is in sources */
/* Delay writing to the destination in case dest is in sources. */
memcpy(osf->uv, uv, sizeof(osf->uv));
}
@ -908,7 +908,7 @@ static void layerInterp_mloopcol(
/* Subdivide smooth or fractal can cause problems without clamping
* although weights should also not cause this situation */
/* also delay writing to the destination incase dest is in sources */
/* Also delay writing to the destination in case dest is in sources. */
mc->r = round_fl_to_uchar_clamp(col.r);
mc->g = round_fl_to_uchar_clamp(col.g);
mc->b = round_fl_to_uchar_clamp(col.b);
@ -1008,7 +1008,7 @@ static void layerInterp_mloopuv(
}
}
/* delay writing to the destination incase dest is in sources */
/* Delay writing to the destination in case dest is in sources. */
copy_v2_v2(((MLoopUV *)dest)->uv, uv);
((MLoopUV *)dest)->flag = flag;
}
@ -1104,7 +1104,7 @@ static void layerInterp_mloop_origspace(
}
}
/* delay writing to the destination incase dest is in sources */
/* Delay writing to the destination in case dest is in sources. */
copy_v2_v2(((OrigSpaceLoop *)dest)->uv, uv);
}
/* --- end copy */
@ -1152,7 +1152,7 @@ static void layerInterp_mcol(
}
}
/* delay writing to the destination incase dest is in sources */
/* Delay writing to the destination in case dest is in sources. */
for (j = 0; j < 4; ++j) {
/* Subdivide smooth or fractal can cause problems without clamping
@ -1220,7 +1220,7 @@ static void layerInterp_bweight(const void **sources,
}
}
/* delay writing to the destination incase dest is in sources */
/* Delay writing to the destination in case dest is in sources. */
*((float *)dest) = f;
}
@ -1251,7 +1251,7 @@ static void layerInterp_shapekey(const void **sources,
}
}
/* delay writing to the destination incase dest is in sources */
/* Delay writing to the destination in case dest is in sources. */
copy_v3_v3((float *)dest, co);
}
@ -1289,7 +1289,7 @@ static void layerInterp_mvert_skin(const void **sources,
madd_v3_v3fl(radius, vs_src->radius, w);
}
/* delay writing to the destination incase dest is in sources */
/* Delay writing to the destination in case dest is in sources. */
vs_dst = dest;
copy_v3_v3(vs_dst->radius, radius);
vs_dst->flag &= ~MVERT_SKIN_ROOT;
@ -2828,9 +2828,7 @@ void CustomData_interp(const CustomData *source,
const void *source_buf[SOURCE_BUF_SIZE];
const void **sources = source_buf;
/* slow fallback in case we're interpolating a ridiculous number of
* elements
*/
/* Slow fallback in case we're interpolating a ridiculous number of elements. */
if (count > SOURCE_BUF_SIZE) {
sources = MEM_malloc_arrayN(count, sizeof(*sources), __func__);
}
@ -3828,9 +3826,7 @@ void CustomData_bmesh_interp(CustomData *data,
void *source_buf[SOURCE_BUF_SIZE];
const void **sources = (const void **)source_buf;
/* slow fallback in case we're interpolating a ridiculous number of
* elements
*/
/* Slow fallback in case we're interpolating a ridiculous number of elements. */
if (count > SOURCE_BUF_SIZE) {
sources = MEM_malloc_arrayN(count, sizeof(*sources), __func__);
}

@ -61,11 +61,10 @@ BMEditMesh *BKE_editmesh_copy(BMEditMesh *em)
/* The tessellation is NOT calculated on the copy here,
* because currently all the callers of this function use
* it to make a backup copy of the BMEditMesh to restore
* it in the case of errors in an operation. For perf
* reasons, in that case it makes more sense to do the
* tessellation only when/if that copy ends up getting
* used.*/
* it to make a backup copy of the #BMEditMesh to restore
* it in the case of errors in an operation. For performance reasons,
* in that case it makes more sense to do the
* tessellation only when/if that copy ends up getting used. */
em_copy->looptris = NULL;
return em_copy;
@ -97,8 +96,8 @@ static void editmesh_tessface_calc_intern(BMEditMesh *em)
BMesh *bm = em->bm;
/* this assumes all faces can be scan-filled, which isn't always true,
* worst case we over alloc a little which is acceptable */
/* This assumes all faces can be scan-filled, which isn't always true,
* worst case we over allocate a little which is acceptable. */
const int looptris_tot = poly_to_tri_count(bm->totface, bm->totloop);
const int looptris_tot_prev_alloc = em->looptris ?
(MEM_allocN_len(em->looptris) / sizeof(*em->looptris)) :
@ -109,7 +108,7 @@ static void editmesh_tessface_calc_intern(BMEditMesh *em)
/* this means no reallocs for quad dominant models, for */
if ((em->looptris != NULL) &&
/* (*em->tottri >= looptris_tot)) */
/* check against alloc'd size incase we over alloc'd a little */
/* Check against allocated size in case we over allocated a little. */
((looptris_tot_prev_alloc >= looptris_tot) &&
(looptris_tot_prev_alloc <= looptris_tot * 2))) {
looptris = em->looptris;

@ -1727,8 +1727,8 @@ bool BKE_id_new_name_validate(ListBase *lb, ID *id, const char *tname)
/* This was in 2.43 and previous releases
* however all data in blender should be sorted, not just duplicate names
* sorting should not hurt, but noting just incase it alters the way other
* functions work, so sort every time */
* sorting should not hurt, but noting just in case it alters the way other
* functions work, so sort every time. */
#if 0
if (result) {
id_sort_by_name(lb, id);

@ -1628,7 +1628,9 @@ MaskLayerShape *BKE_mask_layer_shape_find_frame(MaskLayer *masklay, const int fr
return NULL;
}
/* when returning 2 - the frame isnt found but before/after frames are */
/**
* When returning 2 - the frame isn't found but before/after frames are.
*/
int BKE_mask_layer_shape_find_frame_range(MaskLayer *masklay,
const float frame,
MaskLayerShape **r_masklay_shape_a,

@ -364,12 +364,13 @@ bool BKE_mball_is_any_unselected(const MetaBall *mb)
return false;
}
/* \brief copy some properties from object to other metaball object with same base name
/**
* \brief copy some properties from object to other metaball object with same base name
*
* When some properties (wiresize, threshold, update flags) of metaball are changed, then this
* properties are copied to all metaballs in same "group" (metaballs with same base name: MBall,
* MBall.001, MBall.002, etc). The most important is to copy properties to the base metaball,
* because this metaball influence polygonisation of metaballs. */
* because this metaball influence polygonization of metaballs. */
void BKE_mball_properties_copy(Scene *scene, Object *active_object)
{
Scene *sce_iter = scene;

@ -1320,7 +1320,7 @@ static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data, LoopSpli
}
// print_v2("new clnors", clnors_avg);
}
/* Extra bonus: since smallstack is local to this func,
/* Extra bonus: since small-stack is local to this function,
* no more need to empty it at all cost! */
BKE_lnor_space_custom_data_to_normal(lnor_space, *clnor_ref, lnor);
@ -1336,7 +1336,7 @@ static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data, LoopSpli
copy_v3_v3(nor, lnor);
}
}
/* Extra bonus: since smallstack is local to this func,
/* Extra bonus: since small-stack is local to this funcion,
* no more need to empty it at all cost! */
}
}

@ -432,7 +432,7 @@ void BKE_object_eval_eval_base_flags(Depsgraph *depsgraph,
BKE_base_eval_flags(base);
/* For render, compute base visibility again since BKE_base_eval_flags
* assumed viewport visibility. Selectability does not matter here. */
* assumed viewport visibility. Select-ability does not matter here. */
if (DEG_get_mode(depsgraph) == DAG_EVAL_RENDER) {
if (base->flag & BASE_ENABLED_RENDER) {
base->flag |= BASE_VISIBLE;

@ -519,7 +519,7 @@ static void ocean_compute_htilda(void *__restrict userdata,
int j;
/* note the <= _N/2 here, see the fftw doco
/* Note the <= _N/2 here, see the FFTW documentation
* about the mechanics of the complex->real fft storage. */
for (j = 0; j <= o->_N / 2; ++j) {
fftw_complex exp_param1;

@ -2705,7 +2705,7 @@ static void mesh_to_softbody(Scene *scene, Object *ob)
for (a = 0; a < me->totvert; a++, bp++) {
/* get scalar values needed *per vertex* from vertex group functions,
* so we can *paint* them nicly ..
* so we can *paint* them nicely ..
* they are normalized [0.0..1.0] so may be we need amplitude for scale
* which can be done by caller but still .. i'd like it to go this way
*/

@ -886,7 +886,7 @@ bool BLI_ghash_ensure_p_ex(GHash *gh, const void *key, void ***r_key, void ***r_
const bool haskey = (e != NULL);
if (!haskey) {
/* pass 'key' incase we resize */
/* Pass 'key' in case we resize. */
e = BLI_mempool_alloc(gh->entrypool);
ghash_insert_ex_keyonly_entry(gh, (void *)key, bucket_index, (Entry *)e);
e->e.key = NULL; /* caller must re-assign */
@ -1189,7 +1189,7 @@ bool BLI_gset_ensure_p_ex(GSet *gs, const void *key, void ***r_key)
const bool haskey = (e != NULL);
if (!haskey) {
/* pass 'key' incase we resize */
/* Pass 'key' in case we resize */
e = BLI_mempool_alloc(((GHash *)gs)->entrypool);
ghash_insert_ex_keyonly_entry((GHash *)gs, (void *)key, bucket_index, (Entry *)e);
e->key = NULL; /* caller must re-assign */

@ -273,12 +273,12 @@ static int vertex_sort(const void *p1, const void *p2, void *vs_ctx_p)
/** \} */
/**
* Main boxpacking function accessed from other functions
* Main box-packing function accessed from other functions
* This sets boxes x,y to positive values, sorting from 0,0 outwards.
* There is no limit to the space boxes may take, only that they will be packed
* tightly into the lower left hand corner (0,0)
*
* \param boxarray: a pre allocated array of boxes.
* \param boxarray: a pre-allocated array of boxes.
* only the 'box->x' and 'box->y' are set, 'box->w' and 'box->h' are used,
* 'box->index' is not used at all, the only reason its there
* is that the box array is sorted by area and programs need to be able

@ -3452,7 +3452,7 @@ bool clip_segment_v3_plane(
}
}
/* incase input/output values match (above also) */
/* In case input/output values match (above also). */
const float p1_copy[3] = {UNPACK3(p1)};
copy_v3_v3(r_p2, p2);
copy_v3_v3(r_p1, p1_copy);
@ -3511,7 +3511,7 @@ bool clip_segment_v3_plane_n(const float p1[3],
}
}
/* incase input/output values match */
/* In case input/output values match. */
const float p1_copy[3] = {UNPACK3(p1)};
madd_v3_v3v3fl(r_p1, p1_copy, dp, p1_fac);

@ -18,7 +18,7 @@
/** \file
* \ingroup blenloader
* \brief defines for blendfile codes
* \brief defines for blend-file codes.
*/
/* INTEGER CODES */

@ -222,7 +222,7 @@
* which keeps large arrays in memory from data-blocks we may not even use.
*
* \note This is disabled when using compression,
* while zlib supports seek ist's unusably slow, see: T61880.
* while zlib supports seek it's unusably slow, see: T61880.
*/
#define USE_BHEAD_READ_ON_DEMAND
@ -271,13 +271,13 @@ typedef struct BHeadN {
* because ID names are used in lookup tables. */
#define BHEAD_USE_READ_ON_DEMAND(bhead) ((bhead)->code == DATA)
/* this function ensures that reports are printed,
* in the case of libraray linking errors this is important!
/**
* This function ensures that reports are printed,
* in the case of library linking errors this is important!
*
* bit kludge but better then doubling up on prints,
* we could alternatively have a versions of a report function which forces printing - campbell
*/
void blo_reportf_wrap(ReportList *reports, ReportType type, const char *format, ...)
{
char fixed_buf[1024]; /* should be long enough */
@ -533,7 +533,7 @@ static void split_libdata(ListBase *lb_src, Main **lib_main_array, const uint li
if (id->lib) {
if (((uint)id->lib->temp_index < lib_main_array_len) &&
/* this check should never fail, just incase 'id->lib' is a dangling pointer. */
/* this check should never fail, just in case 'id->lib' is a dangling pointer. */
(lib_main_array[id->lib->temp_index]->curlib == id->lib)) {
Main *mainvar = lib_main_array[id->lib->temp_index];
ListBase *lb_dst = which_libbase(mainvar, GS(id->name));
@ -2498,16 +2498,13 @@ static void IDP_DirectLinkProperty(IDProperty *prop, int switch_endian, FileData
IDP_DirectLinkIDPArray(prop, switch_endian, fd);
break;
case IDP_DOUBLE:
/* erg, stupid doubles. since I'm storing them
* in the same field as int val; val2 in the
* IDPropertyData struct, they have to deal with
* endianness specifically
/* Workaround for doubles.
* They are stored in the same field as `int val, val2` in the IDPropertyData struct,
* they have to deal with endianness specifically.
*
* in theory, val and val2 would've already been swapped
* In theory, val and val2 would've already been swapped
* if switch_endian is true, so we have to first unswap
* them then reswap them as a single 64-bit entity.
*/
* them then re-swap them as a single 64-bit entity. */
if (switch_endian) {
BLI_endian_switch_int32(&prop->data.val);
BLI_endian_switch_int32(&prop->data.val2);
@ -7686,8 +7683,8 @@ static void direct_link_windowmanager(FileData *fd, wmWindowManager *wm)
win->addmousemove = true;
win->stereo3d_format = newdataadr(fd, win->stereo3d_format);
/* multiview always fallback to anaglyph at file opening
* otherwise quadbuffer saved files can break Blender */
/* Multi-view always fallback to anaglyph at file opening
* otherwise quad-buffer saved files can break Blender. */
if (win->stereo3d_format) {
win->stereo3d_format->display_mode = S3D_DISPLAY_ANAGLYPH;
}
@ -9192,7 +9189,7 @@ static BHead *read_libblock(FileData *fd, Main *main, BHead *bhead, const int ta
* flags dependency graph does not do animation update to avoid loss of unkeyed changes.,
* which conflicts with undo/redo of changes to animation data itself.
*
* But for regular file load we clear the flag, since the flags might have been changed sinde
* But for regular file load we clear the flag, since the flags might have been changed since
* the version the file has been saved with. */
if (!fd->memfile) {
id->recalc = 0;
@ -11265,7 +11262,7 @@ static void add_collections_to_scene(Main *mainvar,
}
/* We do not want to force instantiation of indirectly linked collections,
* not even when appending. Users can now easily instantiate collections (and their objects)
* as needed by themsleves. See T67032. */
* as needed by themselves. See T67032. */
else if ((collection->id.tag & LIB_TAG_INDIRECT) == 0) {
bool do_add_collection = (collection->id.tag & LIB_TAG_DOIT) != 0;
if (!do_add_collection) {

@ -125,8 +125,8 @@ static void do_version_workspaces_create_from_screens(Main *bmain)
}
if (screen_parent) {
/* fullscreen with "Back to Previous" option, don't create
* a new workspace, add layout workspace containing parent */
/* Full-screen with "Back to Previous" option, don't create
* a new workspace, add layout workspace containing parent. */
workspace = BLI_findstring(
&bmain->workspaces, screen_parent->id.name + 2, offsetof(ID, name) + 2);
}

@ -1142,9 +1142,12 @@ typedef struct RenderInfo {
char scene_name[MAX_ID_NAME - 2];
} RenderInfo;
/* was for historic render-deamon feature,
* now write because it can be easily extracted without
* reading the whole blend file */
/**
* This was originally added for the historic render-daemon feature,
* now write because it can be easily extracted without reading the whole blend file.
*
* See: `release/scripts/modules/blend_render_info.py`
*/
static void write_renderinfo(WriteData *wd, Main *mainvar)
{
bScreen *curscreen;

@ -422,7 +422,7 @@ void BM_mesh_bm_from_me(BMesh *bm, const Mesh *me, const struct BMeshFromMeshPar
/* don't use 'j' since we may have skipped some faces, hence some loops. */
BM_elem_index_set(l_iter, totloops++); /* set_ok */
/* Save index of correspsonding MLoop */
/* Save index of corresponding #MLoop. */
CustomData_to_bmesh_block(&me->ldata, &bm->ldata, j++, &l_iter->head.data, true);
} while ((l_iter = l_iter->next) != l_first);

@ -483,7 +483,7 @@ void BM_face_calc_tangent_edge_diagonal(const BMFace *f, float r_tangent[3])
l_iter = l_first = BM_FACE_FIRST_LOOP(f);
/* incase of degenerate faces */
/* In case of degenerate faces. */
zero_v3(r_tangent);
/* warning: O(n^2) loop here, take care! */
@ -520,7 +520,7 @@ void BM_face_calc_tangent_vert_diagonal(const BMFace *f, float r_tangent[3])
l_iter = l_first = BM_FACE_FIRST_LOOP(f);
/* incase of degenerate faces */
/* In case of degenerate faces. */
zero_v3(r_tangent);
/* warning: O(n^2) loop here, take care! */

@ -363,7 +363,7 @@ static void bm_decim_build_edge_cost(BMesh *bm,
struct KD_Symmetry_Data {
/* pre-flipped coords */
float e_v1_co[3], e_v2_co[3];
/* Use to compare the correct endpoints incase v1/v2 are swapped */
/* Use to compare the correct endpoints in case v1/v2 are swapped. */
float e_dir[3];
int e_found_index;
@ -1371,8 +1371,8 @@ void BM_mesh_decimate_collapse(BMesh *bm,
/* handy to detect corruptions elsewhere */
BLI_assert(BM_elem_index_get(e) < tot_edge_orig);
/* under normal conditions wont be accessed again,
* but NULL just incase so we don't use freed node */
/* Under normal conditions wont be accessed again,
* but NULL just in case so we don't use freed node. */
eheap_table[BM_elem_index_get(e)] = NULL;
bm_decim_edge_collapse(bm,

@ -231,7 +231,7 @@ void BM_mesh_decimate_unsubdivide_ex(BMesh *bm, const int iterations, const bool
if (BMO_vert_flag_test(bm, v, ELE_VERT_TAG))
#endif
{
/* check again incase the topology changed */
/* Check again in case the topology changed. */
if (bm_vert_dissolve_fan_test(v)) {
v_first = v;
}

@ -1099,7 +1099,7 @@ static BMEdge *bm_face_region_pivot_edge_find(BMFace **faces_region,
}
}
else {
/* use incase (depth == 0), no interior verts */
/* Use in case (depth == 0), no interior verts. */
e_pivot_fallback = e;
}
} while ((l_iter = l_iter->next) != l_first);

@ -73,7 +73,7 @@ class MemoryBuffer {
rcti m_rect;
/**
* brief refers to the chunknumber within the executiongroup where related to the MemoryProxy
* brief refers to the chunk-number within the execution-group where related to the MemoryProxy
* \see memoryProxy
*/
unsigned int m_chunkNumber;

@ -195,7 +195,7 @@ void ImageNode::convertToOperations(NodeConverter &converter,
}
}
/* incase we can't load the layer */
/* In case we can't load the layer. */
if (operation == NULL) {
converter.setInvalidOutput(getOutputSocket(index));
}

@ -22,7 +22,7 @@
inline float colorbalance_lgg(float in, float lift_lgg, float gamma_inv, float gain)
{
/* 1:1 match with the sequencer with linear/srgb conversions, the conversion isnt pretty
* but best keep it this way, sice testing for durian shows a similar calculation
* but best keep it this way, since testing for durian shows a similar calculation
* without lin/srgb conversions gives bad results (over-saturated shadows) with colors
* slightly below 1.0. some correction can be done but it ends up looking bad for shadows or
* lighter tones - campbell */

@ -148,7 +148,7 @@ void MaskOperation::executePixelSampled(float output[4],
}
}
else {
/* incase loop below fails */
/* In case loop below fails. */
output[0] = 0.0f;
for (unsigned int i = 0; i < this->m_rasterMaskHandleTot; i++) {

@ -638,7 +638,7 @@ void graph_id_tag_update(
* This way IDs in the undo steps will have this flag preserved, making it possible to restore
* all needed tags when new dependency graph is created on redo.
* This is the only way to ensure modifications to animation data (such as keyframes i.e.)
* properly triggers animation update for the newely constructed dependency graph on redo (while
* properly triggers animation update for the newly constructed dependency graph on redo (while
* usually newly created dependency graph skips animation update to avoid loss of unkeyed
* changes). */
if (update_source == DEG_UPDATE_SOURCE_USER_EDIT) {

@ -4001,7 +4001,7 @@ GPUBatch *DRW_cache_cursor_get(bool crosshair_lines)
/** \} */
/* -------------------------------------------------------------------- */
/** \name Batch Cache Impl. common
/** \name Batch Cache Implementation (common)
* \{ */
void drw_batch_cache_validate(Object *ob)

@ -919,7 +919,7 @@ static void *extract_lines_adjacency_init(const MeshRenderData *mr, void *UNUSED
{
/* Similar to poly_to_tri_count().
* There is always loop + tri - 1 edges inside a polygon.
* Cummulate for all polys and you get : */
* Accumulate for all polys and you get : */
uint tess_edge_len = mr->loop_len + mr->tri_len - mr->poly_len;
size_t vert_to_loop_size = sizeof(uint) * mr->vert_len;
@ -1859,8 +1859,8 @@ static void *extract_orco_init(const MeshRenderData *mr, void *buf)
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
/* FIXME(fclem): We use the last component as a way to differentiate from generic vertex
* attribs. This is a substential waste of Vram and should be done another way.
* Unfortunately, at the time of writting, I did not found any other "non disruptive"
* attribs. This is a substantial waste of Vram and should be done another way.
* Unfortunately, at the time of writing, I did not found any other "non disruptive"
* alternative. */
GPU_vertformat_attr_add(&format, "orco", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
}

@ -60,9 +60,8 @@ typedef struct MPathTarget {
Object *ob; /* source object */
bPoseChannel *pchan; /* source posechannel (if applicable) */
/* "Evaluated" Copies (these come from the background COW copie
* that provide all the coordinates we want to save off)
*/
/* "Evaluated" Copies (these come from the background COW copy
* that provide all the coordinates we want to save off). */
Object *ob_eval; /* evaluated object */
} MPathTarget;

@ -712,9 +712,9 @@ int ANIM_scene_get_keyingset_index(Scene *scene, KeyingSet *ks)
}
}
/* still here, so try builtins list too
* - builtins are from (<= -1)
* - none/invalid is (= 0)
/* Still here, so try built-ins list too:
* - Built-ins are from (<= -1).
* - None/Invalid is (= 0).
*/
index = BLI_findindex(&builtin_keyingsets, ks);
if (index != -1) {

@ -769,7 +769,7 @@ static int armature_symmetrize_exec(bContext *C, wmOperator *op)
EditBone *ebone = ebone_iter->temp.ebone;
/* copy flags incase bone is pre-existing data */
/* Copy flags in case bone is pre-existing data. */
ebone->flag = (ebone->flag & ~flag_copy) | (ebone_iter->flag & flag_copy);
if (ebone_iter->parent == NULL) {

@ -120,7 +120,7 @@ struct CurveDrawData {
struct {
float mouse[2];
/* used incase we can't calculate the depth */
/* Used in case we can't calculate the depth. */
float location_world[3];
float location_world_valid[3];
@ -1053,7 +1053,7 @@ static int curve_draw_invoke(bContext *C, wmOperator *op, const wmEvent *event)
const bool is_modal = RNA_boolean_get(op->ptr, "wait_for_input");
/* fallback (incase we can't find the depth on first test) */
/* Fallback (in case we can't find the depth on first test). */
{
const float mval_fl[2] = {UNPACK2(event->mval)};
float center[3];

@ -363,8 +363,8 @@ static void gizmo_arrow_exit(bContext *C, wmGizmo *gz, const bool cancel)
const bool is_prop_valid = WM_gizmo_target_property_is_valid(gz_prop);
if (!cancel) {
/* Assign incase applying the operation needs an updated offset
* editmesh bisect needs this. */
/* Assign in case applying the operation needs an updated offset
* edit-mesh bisect needs this. */
if (is_prop_valid) {
const int transform_flag = RNA_enum_get(arrow->gizmo.ptr, "transform");
const bool constrained = (transform_flag & ED_GIZMO_ARROW_XFORM_FLAG_CONSTRAINED) != 0;

@ -358,7 +358,7 @@ static void gizmo_cage3d_draw_intern(
bool show = false;
if (gz->highlight_part == ED_GIZMO_CAGE3D_PART_TRANSLATE) {
/* Only show if we're drawing the center handle
* otherwise the entire rectangle is the hotspot. */
* otherwise the entire rectangle is the hot-spot. */
if (draw_options & ED_GIZMO_CAGE2D_DRAW_FLAG_XFORM_CENTER_HANDLE) {
show = true;
}

@ -3331,7 +3331,7 @@ typedef enum eGP_ReprojectModes {
GP_REPROJECT_FRONT = 0,
GP_REPROJECT_SIDE,
GP_REPROJECT_TOP,
/* On same plane, parallel to viewplane */
/* On same plane, parallel to view-plane. */
GP_REPROJECT_VIEW,
/* Reprojected on to the scene geometry */
GP_REPROJECT_SURFACE,
@ -3374,11 +3374,10 @@ static int gp_strokes_reproject_exec(bContext *C, wmOperator *op)
for (i = 0, pt = gps->points; i < gps->totpoints; i++, pt++) {
float xy[2];
/* 3D to Screenspace */
/* Note: We can't use gp_point_to_xy() here because that uses ints for the screenspace
* coordinates, resulting in lost precision, which in turn causes stairstepping
* artifacts in the final points.
*/
/* 3D to Screen-space */
/* Note: We can't use gp_point_to_xy() here because that uses ints for the screen-space
* coordinates, resulting in lost precision, which in turn causes stair-stepping
* artifacts in the final points. */
bGPDspoint pt2;
gp_point_to_parent_space(pt, gpstroke_iter.diff_mat, &pt2);
gp_point_to_xy_fl(&gsc, gps, &pt2, &xy[0], &xy[1]);
@ -3427,16 +3426,15 @@ static int gp_strokes_reproject_exec(bContext *C, wmOperator *op)
/* apply parent again */
gp_apply_parent_point(depsgraph, ob, gpd, gpl, pt);
}
/* Project screenspace back to 3D space (from current perspective)
* so that all points have been treated the same way
*/
/* Project screen-space back to 3D space (from current perspective)
* so that all points have been treated the same way. */
else if (mode == GP_REPROJECT_VIEW) {
/* Planar - All on same plane parallel to the viewplane */
/* Planar - All on same plane parallel to the view-plane. */
gp_point_xy_to_3d(&gsc, scene, xy, &pt->x);
}
else {
/* Geometry - Snap to surfaces of visible geometry */
/* XXX: There will be precision loss (possible stairstep artifacts)
/* XXX: There will be precision loss (possible stair-step artifacts)
* from this conversion to satisfy the API's */
const int screen_co[2] = {(int)xy[0], (int)xy[1]};
@ -4440,7 +4438,7 @@ static int gpencil_cutter_lasso_select(bContext *C,
if ((pt->flag & GP_SPOINT_SELECT) || (pt->flag & GP_SPOINT_TAG)) {
continue;
}
/* convert point coords to screenspace */
/* convert point coords to screen-space */
const bool is_inside = is_inside_fn(gps, pt, &gsc, gpstroke_iter.diff_mat, user_data);
if (is_inside) {
tot_inside++;

@ -1844,7 +1844,7 @@ static void scroller_activate_init(bContext *C,
*/
scrollers = UI_view2d_scrollers_calc(v2d, NULL);
/* use a union of 'cur' & 'tot' incase the current view is far outside 'tot'. In this cases
/* Use a union of 'cur' & 'tot' in case the current view is far outside 'tot'. In this cases
* moving the scroll bars has far too little effect and the view can get stuck T31476. */
tot_cur_union = v2d->tot;
BLI_rctf_union(&tot_cur_union, &v2d->cur);

@ -2949,7 +2949,7 @@ static int knifetool_modal(bContext *C, wmOperator *op, const wmEvent *event)
case KNF_MODAL_ADD_CUT_CLOSED:
if (kcd->mode == MODE_DRAGGING) {
/* shouldn't be possible with default key-layout, just incase... */
/* Shouldn't be possible with default key-layout, just in case. */
if (kcd->is_drag_hold) {
kcd->is_drag_hold = false;
knifetool_update_mval(kcd, kcd->curr.mval);

@ -1116,7 +1116,7 @@ void ED_screen_full_prevspace(bContext *C, ScrArea *sa)
void ED_screen_restore_temp_type(bContext *C, ScrArea *sa)
{
/* incase nether functions below run */
/* In case nether functions below run. */
ED_area_tag_redraw(sa);
if (sa->flag & AREA_FLAG_TEMP_TYPE) {

@ -3742,7 +3742,7 @@ static int region_quadview_exec(bContext *C, wmOperator *op)
rv3d->viewlock = 0;
rv3d->rflag &= ~RV3D_CLIPPING;
/* accumulate locks, incase they're mixed */
/* Accumulate locks, in case they're mixed. */
for (ar_iter = sa->regionbase.first; ar_iter; ar_iter = ar_iter->next) {
if (ar_iter->regiontype == RGN_TYPE_WINDOW) {
RegionView3D *rv3d_iter = ar_iter->regiondata;

@ -1719,9 +1719,9 @@ static float project_paint_uvpixel_mask(const ProjPaintState *ps,
normalize_v3(no);
}
else {
/* incase the */
#if 1
/* normalizing per pixel isn't optimal, we could cache or check ps->*/
/* In case the normalizing per pixel isn't optimal,
* we could cache or access from evaluated mesh. */
normal_tri_v3(no,
ps->mvert_eval[lt_vtri[0]].co,
ps->mvert_eval[lt_vtri[1]].co,

@ -404,9 +404,7 @@ static void viewops_data_create(bContext *C,
if (viewops_flag & VIEWOPS_FLAG_PERSP_ENSURE) {
if (ED_view3d_persp_ensure(depsgraph, vod->v3d, vod->ar)) {
/* If we're switching from camera view to the perspective one,
* need to tag viewport update, so camera vuew and borders
* are properly updated.
*/
* need to tag viewport update, so camera view and borders are properly updated. */
ED_region_tag_redraw(vod->ar);
}
}

@ -622,8 +622,8 @@ bool ED_view3d_camera_autokey(
/**
* Call after modifying a locked view.
*
* \note Not every view edit currently auto-keys (numpad for eg),
* this is complicated because of smoothview.
* \note Not every view edit currently auto-keys (num-pad for eg),
* this is complicated because of smooth-view.
*/
bool ED_view3d_camera_lock_autokey(View3D *v3d,
RegionView3D *rv3d,

@ -6601,7 +6601,7 @@ static void slide_origdata_interp_data_vert(SlideOrigData *sod,
* and we do not want to mess up other shape keys */
BM_loop_interp_from_face(bm, l, f_copy, false, false);
/* make sure face-attributes are correct (e.g. MTexPoly) */
/* make sure face-attributes are correct (e.g. #MLoopUV, #MLoopCol) */
BM_elem_attrs_copy_ex(sod->bm_origfaces, bm, f_copy, l->f, 0x0, CD_MASK_NORMAL);
/* weight the loop */

@ -299,7 +299,7 @@ static void calcSpringFactor(MouseInput *mi)
void initMouseInputMode(TransInfo *t, MouseInput *mi, MouseInputMode mode)
{
/* incase we allocate a new value */
/* In case we allocate a new value. */
void *mi_data_prev = mi->data;
mi->use_virtual_mval = true;

@ -19,7 +19,7 @@
/** \file
* \ingroup freestyle
* \brief A Set of indexed faces to represent a surfacic object
* \brief A Set of indexed faces to represent a surface object
*/
#include <memory.h>

@ -253,7 +253,7 @@ void GPU_vertformat_safe_attrib_name(const char *attrib_name,
*(uint *)&data[4] = BLI_ghashutil_strhash_p_murmur(attrib_name + 4);
}
else {
/* Copy the whole name. Collision is barelly possible
/* Copy the whole name. Collision is barely possible
* (hash would have to be equal to the last 4 bytes). */
for (int i = 0; i < 8 && attrib_name[i] != '\0'; i++) {
data[i] = attrib_name[i];

@ -46,8 +46,7 @@
*
* IMB needs:
* - \ref DNA module
* The listbase types are used for handling the memory
* management.
* The #ListBase types are used for handling the memory management.
* - \ref blenlib module
* blenlib handles guarded memory management in blender-style.
* BLI_winstuff.h makes a few windows specific behaviors

@ -1629,8 +1629,8 @@ bool IMB_scaleImBuf(struct ImBuf *ibuf, unsigned int newx, unsigned int newy)
return false;
}
/* scaleup / scaledown functions below change ibuf->x and ibuf->y
* so we first scale the Z-buffer (if any) */
/* Scale-up / scale-down functions below change ibuf->x and ibuf->y
* so we first scale the Z-buffer (if any). */
scalefast_Z_ImBuf(ibuf, newx, newy);
/* try to scale common cases in a fast way */

@ -188,10 +188,6 @@ typedef struct MVertTri {
unsigned int tri[3];
} MVertTri;
// typedef struct MTexPoly {
// void *_pad;
//} MTexPoly;
typedef struct MLoopUV {
float uv[2];
int flag;

@ -339,7 +339,7 @@ static bool init_structDNA(SDNA *sdna, bool do_endian_swap, const char **r_error
data = (int *)sdna->data;
/* clear pointers incase of error */
/* Clear pointers in case of error. */
sdna->names = NULL;
sdna->types = NULL;
sdna->structs = NULL;

@ -1432,8 +1432,8 @@ bool rna_Object_constraints_override_apply(Main *UNUSED(bmain),
Object *ob_src = (Object *)ptr_src->id.data;
/* Remember that insertion operations are defined and stored in correct order, which means that
* even if we insert several items in a row, we alays insert first one, then second one, etc.
* So we should always find 'anchor' constraint in both _src *and* _dst> */
* even if we insert several items in a row, we always insert first one, then second one, etc.
* So we should always find 'anchor' constraint in both _src *and* _dst. */
bConstraint *con_anchor = NULL;
if (opop->subitem_local_name && opop->subitem_local_name[0]) {
con_anchor = BLI_findstring(
@ -1520,8 +1520,8 @@ bool rna_Object_modifiers_override_apply(Main *UNUSED(bmain),
Object *ob_src = (Object *)ptr_src->id.data;
/* Remember that insertion operations are defined and stored in correct order, which means that
* even if we insert several items in a row, we alays insert first one, then second one, etc.
* So we should always find 'anchor' constraint in both _src *and* _dst> */
* even if we insert several items in a row, we always insert first one, then second one, etc.
* So we should always find 'anchor' constraint in both _src *and* _dst. */
ModifierData *mod_anchor = NULL;
if (opop->subitem_local_name && opop->subitem_local_name[0]) {
mod_anchor = BLI_findstring(

@ -888,7 +888,7 @@ static void rna_def_pointcache_common(StructRNA *srna)
prop = RNA_def_property(srna, "info", PROP_STRING, PROP_NONE);
RNA_def_property_string_sdna(prop, NULL, "info");
RNA_def_property_clear_flag(prop, PROP_EDITABLE);
/* Note that we do not actually need a getter here, `rna_Cache_info_length` will upate the info
/* Note that we do not actually need a getter here, `rna_Cache_info_length` will update the info
* string just as well. */
RNA_def_property_string_funcs(prop, NULL, "rna_Cache_info_length", NULL);
RNA_def_property_string_maxlength(

@ -821,8 +821,8 @@ static void rna_Scene_fps_update(Main *bmain, Scene *scene, PointerRNA *UNUSED(p
{
DEG_id_tag_update(&scene->id, ID_RECALC_AUDIO_FPS | ID_RECALC_SEQUENCER_STRIPS);
/* NOTE: Tag via dependency graph will take care of all the updates ion the evaluated domain,
* however, changes in FPS actually modifies an original stip length, so this we take care about
* here. */
* however, changes in FPS actually modifies an original skip length,
* so this we take care about here. */
BKE_sequencer_refresh_sound_length(bmain, scene);
}

@ -138,10 +138,10 @@ static Mesh *applyModifier(ModifierData *md, const ModifierEvalContext *ctx, Mes
return mesh;
}
/* Determine whether each vertexgroup is associated with a selected bone or not:
* - Each cell is a boolean saying whether bone corresponding to the ith group is selected.
/* Determine whether each vertex-group is associated with a selected bone or not:
* - Each cell is a boolean saying whether bone corresponding to the i'th group selected.
* - Groups that don't match a bone are treated as not existing
* (along with the corresponding ungrouped verts).
* (along with the corresponding un-grouped verts).
*/
bone_select_array = MEM_malloc_arrayN((size_t)defbase_tot, sizeof(char), "mask array");

@ -20,8 +20,8 @@
/** \file
* \ingroup pybmesh
*
* This file defines customdata types which can't be accessed as primitive
* python types such as MDeformVert, MLoopUV, MTexPoly
* This file defines custom-data types which can't be accessed as primitive
* python types such as #MDeformVert, #MLoopUV.
*/
#include <Python.h>
@ -515,8 +515,8 @@ static PySequenceMethods bpy_bmdeformvert_as_sequence = {
NULL, /* sq_concat */
NULL, /* sq_repeat */
/* note: if this is set PySequence_Check() returns True,
* but in this case we dont want to be treated as a seq */
/* Note: if this is set #PySequence_Check() returns True,
* but in this case we don't want to be treated as a seq. */
NULL, /* sq_item */
NULL, /* sq_slice */

@ -890,7 +890,7 @@ PyObject *BPY_app_translations_struct(void)
void BPY_app_translations_end(void)
{
/* Incase the object remains in a module's namespace, see T44127. */
/* In case the object remains in a module's name-space, see T44127. */
#ifdef WITH_INTERNATIONAL
_clear_translations_cache();
#endif

@ -1015,7 +1015,7 @@ static PyObject *Quaternion_matmul(PyObject *q1, PyObject *q2)
return NULL;
}
/*------------------------obj @= obj------------------------------
* inplace quaternion multiplication */
* in-place quaternion multiplication */
static PyObject *Quaternion_imatmul(PyObject *q1, PyObject *q2)
{
float quat[QUAT_SIZE];

@ -786,10 +786,8 @@ static PyObject *Vector_to_track_quat(VectorObject *self, PyObject *args)
return NULL;
}
/*
* flip vector around, since vectoquat expect a vector from target to tracking object
* and the python function expects the inverse (a vector to the target).
*/
/* Flip vector around, since #vec_to_quat expect a vector from target to tracking object
* and the python function expects the inverse (a vector to the target). */
negate_v3_v3(vec, self->vec);
vec_to_quat(quat, vec, track, up);

@ -1213,7 +1213,7 @@ static PyObject *M_Geometry_tessellate_polygon(PyObject *UNUSED(self), PyObject
PyObject *polyLine, *polyVec;
int i, len_polylines, len_polypoints, ls_error = 0;
/* display listbase */
/* Display #ListBase. */
ListBase dispbase = {NULL, NULL};
DispList *dl;
float *fp; /*pointer to the array of malloced dl->verts to set the points from the vectors */
@ -1299,7 +1299,7 @@ static PyObject *M_Geometry_tessellate_polygon(PyObject *UNUSED(self), PyObject
BKE_displist_fill(&dispbase, &dispbase, NULL, false);
/* The faces are stored in a new DisplayList
* that's added to the head of the listbase */
* that's added to the head of the #ListBase. */
dl = dispbase.first;
tri_list = PyList_New(dl->parts);

@ -1992,8 +1992,8 @@ static int render_initialize_from_main(Render *re,
winx = (rd->size * rd->xsch) / 100;
winy = (rd->size * rd->ysch) / 100;
/* We always render smaller part, inserting it in larger image is compositor bizz,
* it uses disprect for it. */
/* We always render smaller part, inserting it in larger image is compositor business,
* it uses 'disprect' for it. */
if (scene->r.mode & R_BORDER) {
disprect.xmin = rd->border.xmin * winx;
disprect.xmax = rd->border.xmax * winx;

@ -1351,7 +1351,7 @@ static bool wm_file_write(bContext *C, const char *filepath, int fileflags, Repo
}
}
/* Call pre-save callbacks befores writing preview,
/* Call pre-save callbacks before writing preview,
* that way you can generate custom file thumbnail. */
BLI_callback_exec(bmain, NULL, BLI_CB_EVT_SAVE_PRE);

@ -482,8 +482,8 @@ void wm_window_title(wmWindowManager *wm, wmWindow *win)
}
/* Informs GHOST of unsaved changes, to set window modified visual indicator (macOS)
* and to give hint of unsaved changes for a user warning mechanism in case of OS
* application terminate request (e.g. OS Shortcut Alt+F4, Cmd+Q, (...), or session end). */
* and to give hint of unsaved changes for a user warning mechanism in case of OS application
* terminate request (e.g. OS Shortcut Alt+F4, Command+Q, (...), or session end). */
GHOST_SetWindowModifiedState(win->ghostwin, (GHOST_TUns8)!wm->file_saved);
}
}

@ -400,8 +400,8 @@ int main(int argc,
/* background render uses this font too */
BKE_vfont_builtin_register(datatoc_bfont_pfb, datatoc_bfont_pfb_size);
/* Initialize ffmpeg if built in, also needed for bg mode if videos are
* rendered via ffmpeg */
/* Initialize ffmpeg if built in, also needed for background-mode if videos are
* rendered via ffmpeg. */
BKE_sound_init_once();
init_def_material();