diff --git a/release/datafiles/blender_icons_geom_update.py b/release/datafiles/blender_icons_geom_update.py index 770b0bdc0cb..5f60b314b37 100755 --- a/release/datafiles/blender_icons_geom_update.py +++ b/release/datafiles/blender_icons_geom_update.py @@ -31,10 +31,10 @@ def edit_text_file(filename: str, marker_begin: str, marker_end: str, content: s while data[marker_end_index - 1] in {'\t', ' '}: marker_end_index -= 1 if marker_begin_index == -1: - print('Error: %r not found' % marker_begin) + print('Error: {!r} not found'.format(marker_begin)) return if marker_end_index == -1: - print('Error: %r not found' % marker_end) + print('Error: {!r} not found'.format(marker_end)) return marker_begin_index += len(marker_begin) + 1 data_update = data[:marker_begin_index] + content + data[marker_end_index:] diff --git a/release/datafiles/ctodata.py b/release/datafiles/ctodata.py index 43740aff894..8a456c4bb5c 100755 --- a/release/datafiles/ctodata.py +++ b/release/datafiles/ctodata.py @@ -21,7 +21,7 @@ filename = argv[1] try: fpin = open(filename, "r") except: - sys.stdout.write("Unable to open input %s\n" % argv[1]) + sys.stdout.write("Unable to open input {:s}\n".format(argv[1])) sys.exit(1) data_as_str = fpin.read().rsplit("{")[-1].split("}")[0] @@ -39,14 +39,14 @@ del data_as_list dname = filename + ".ctodata" -sys.stdout.write("Making DATA file <%s>\n" % dname) +sys.stdout.write("Making DATA file <{:s}>\n".format(dname)) try: fpout = open(dname, "wb") except: - sys.stdout.write("Unable to open output %s\n" % dname) + sys.stdout.write("Unable to open output {:s}\n".format(dname)) sys.exit(1) size = fpout.write(data) -sys.stdout.write("%d\n" % size) +sys.stdout.write("{:d}\n".format(size)) diff --git a/tools/check_source/check_spelling.py b/tools/check_source/check_spelling.py index 8286789d35a..db529a4107c 100755 --- a/tools/check_source/check_spelling.py +++ b/tools/check_source/check_spelling.py @@ -423,7 +423,7 @@ def extract_c_comments(filepath: str) -> Tuple[List[Comment], Set[str]]: star_offsets.add(l.find("*", l_ofs_first)) l_ofs_first = 0 if len(star_offsets) > 1: - print("%s:%d" % (filepath, line_index + text.count("\n", 0, i))) + print("{:s}:{:d}".format(filepath, line_index + text.count("\n", 0, i))) break if not PRINT_SPELLING: @@ -492,7 +492,7 @@ def spell_check_report(filepath: str, check_type: str, report: Report) -> None: if suggest is None: _suggest_map[w_lower] = suggest = " ".join(dictionary_suggest(w)) - print("%s:%d:%d: %s%s%s, suggest (%s)" % ( + print("{:s}:{:d}:{:d}: {:s}{:s}{:s}, suggest ({:s})".format( filepath, slineno + 1, scol + 1, @@ -502,7 +502,7 @@ def spell_check_report(filepath: str, check_type: str, report: Report) -> None: suggest, )) elif check_type == 'DUPLICATES': - print("%s:%d:%d: %s%s%s, duplicate" % ( + print("{:s}:{:d}:{:d}: {:s}{:s}{:s}, duplicate".format( filepath, slineno + 1, scol + 1, diff --git a/tools/triage/weekly_report.py b/tools/triage/weekly_report.py index 3dc92efaded..c96dfbfaaf8 100644 --- a/tools/triage/weekly_report.py +++ b/tools/triage/weekly_report.py @@ -230,14 +230,14 @@ def report_personal_weekly_get(username: str, start: datetime.datetime, verbose: issues_involved = issues_closed | issues_commented | issues_created - print("**Involved in %s reports:** " % len(issues_involved)) - print("* Confirmed: %s" % len(issues_confirmed)) - print("* Closed as Resolved: %s" % len(issues_fixed)) - print("* Closed as Archived: %s" % len(issues_archived)) - print("* Closed as Duplicate: %s" % len(issues_duplicated)) - print("* Needs Info from User: %s" % len(issues_needing_user_info)) - print("* Needs Info from Developers: %s" % len(issues_needing_developer_info)) - print("* Actions total: %s" % (len(issues_closed) + len(issues_commented) + len(issues_created))) + print("**Involved in {:d} reports:** ".format(len(issues_involved))) + print("* Confirmed: {:d}".format(len(issues_confirmed))) + print("* Closed as Resolved: {:d}".format(len(issues_fixed))) + print("* Closed as Archived: {:d}".format(len(issues_archived))) + print("* Closed as Duplicate: {:d}".format(len(issues_duplicated))) + print("* Needs Info from User: {:d}".format(len(issues_needing_user_info))) + print("* Needs Info from Developers: {:d}".format(len(issues_needing_developer_info))) + print("* Actions total: {:d}".format(len(issues_closed) + len(issues_commented) + len(issues_created))) print() # Print review stats @@ -248,12 +248,12 @@ def report_personal_weekly_get(username: str, start: datetime.datetime, verbose: owner, repo, _, number = pull.split('/') print(f"* {title} ({owner}/{repo}!{number})") - print("**Review: %s**" % len(pulls_reviewed)) + print("**Review: {:d}**".format(len(pulls_reviewed))) print_pulls(pulls_reviewed) print() # Print created diffs - print("**Created Pull Requests: %s**" % len(pulls_created)) + print("**Created Pull Requests: {:d}**".format(len(pulls_created))) print_pulls(pulls_created) print() diff --git a/tools/utils/blender_theme_as_c.py b/tools/utils/blender_theme_as_c.py index 06bba5da238..b3723d12ecc 100755 --- a/tools/utils/blender_theme_as_c.py +++ b/tools/utils/blender_theme_as_c.py @@ -59,7 +59,7 @@ def repr_f32(f): f_test = round(f, i) f_test_round = round_float_32(f_test) if f_test_round == f_round: - return "%.*f" % (i, f_test) + return "{:.{:d}f}".format(f_test, i) return f_str diff --git a/tools/utils/gdb_struct_repr_c99.py b/tools/utils/gdb_struct_repr_c99.py index f69cf45b9c7..41ab044350c 100644 --- a/tools/utils/gdb_struct_repr_c99.py +++ b/tools/utils/gdb_struct_repr_c99.py @@ -46,7 +46,7 @@ class PrintStructC99(gdb.Command): print(' ' * hs + '.' + rr_s[0] + '= ' + rr_rval) -print('Running GDB from: %s\n' % (gdb.PYTHONDIR)) +print('Running GDB from: {:s}\n'.format(gdb.PYTHONDIR)) gdb.execute("set print pretty") gdb.execute('set pagination off') gdb.execute('set print repeats 0') diff --git a/tools/utils_doc/rna_manual_reference_updater.py b/tools/utils_doc/rna_manual_reference_updater.py index 9ac2fd8baaa..8ea6e792fc3 100644 --- a/tools/utils_doc/rna_manual_reference_updater.py +++ b/tools/utils_doc/rna_manual_reference_updater.py @@ -60,7 +60,7 @@ def sphobjinv_sanity_check(o): if m: fail_char = m.span(0)[1] msg = "WARNING: invalid char found for name:" - print(msg, name, "(at index %d)" % fail_char, "skipping!") + print(msg, name, "(at index {:d})".format(fail_char), "skipping!") print(" " * (len(msg) + fail_char), "^") return False @@ -76,19 +76,19 @@ def write_mappings(inv, output): fw = file.write year = datetime.date.today().year - fw("# SPDX-FileCopyrightText: 2019-%d Blender Authors\n" % year) + fw("# SPDX-FileCopyrightText: 2019-{:d} Blender Authors\n".format(year)) fw("#\n") fw("# SPDX-License-Identifier: GPL-2.0-or-later\n") fw("\n") - fw("# Do not edit this file.") - fw(" This file is auto generated from rna_manual_reference_updater.py\n\n") + fw("# Do not edit this file. This file is auto generated from:\n") + fw("# ./tools/utils_doc/rna_manual_reference_updater.py\n\n") # Prevent systems with autopep8 configured from re-formatting the file. fw("# autopep8: off\n") fw( "import bpy\n" "\n" - "url_manual_prefix = \"https://docs.blender.org/manual/%s/%d.%d/\" % (\n" + "url_manual_prefix = \"https://docs.blender.org/manual/{:s}/{:d}.{:d}/\".format(\n" " bpy.utils.manual_language_code(),\n" " *bpy.app.version[:2],\n" ")\n" @@ -117,7 +117,7 @@ def write_mappings(inv, output): def is_valid_file(parser, arg): if not os.path.isfile(arg): - parser.error("The file %s does not exist!" % arg) + parser.error("The file {:s} does not exist!".format(arg)) else: return arg diff --git a/tools/utils_ide/qtcreator/externaltools/qtc_cpp_to_c_comments.py b/tools/utils_ide/qtcreator/externaltools/qtc_cpp_to_c_comments.py index 8f7929d45c1..b734485e0ee 100755 --- a/tools/utils_ide/qtcreator/externaltools/qtc_cpp_to_c_comments.py +++ b/tools/utils_ide/qtcreator/externaltools/qtc_cpp_to_c_comments.py @@ -50,7 +50,7 @@ def block_data(data, i_start): data[i_begin] = data[i_begin].replace("//", "/*", 1) for i in range(i_begin + 1, i_end + 1): data[i] = data[i].replace("//", " *", 1) - data[i_end] = "%s */" % data[i_end].rstrip() + data[i_end] = "{:s} */".format(data[i_end].rstrip()) # done with block comment, still go onto do regular replace return max(i_end, i_start + 1) @@ -69,7 +69,7 @@ for i, l in enumerate(data): text, comment = l.split("//", 1) - l = "%s/* %s */" % (text, comment.strip()) + l = "{:s}/* {:s} */".format(text, comment.strip()) data[i] = l diff --git a/tools/utils_maintenance/autopep8_format_paths.py b/tools/utils_maintenance/autopep8_format_paths.py index 42e319ca209..05ea28e0461 100755 --- a/tools/utils_maintenance/autopep8_format_paths.py +++ b/tools/utils_maintenance/autopep8_format_paths.py @@ -111,7 +111,7 @@ def autopep8_ensure_version(autopep8_format_cmd_argument: str) -> Optional[Tuple if version_str is not None: # Ensure exactly 3 numbers. major, minor, patch = (tuple(int(n) for n in version_str.split("-")[0].split(".")) + (0, 0, 0))[0:3] - print("Using %s (%d.%d.%d)..." % (AUTOPEP8_FORMAT_CMD, major, minor, patch)) + print("Using {:s} ({:d}.{:d}.{:d})...".format(AUTOPEP8_FORMAT_CMD, major, minor, patch)) return major, minor, patch return None @@ -177,9 +177,10 @@ def main() -> None: if version is None: print("Unable to detect 'autopep8 --version'") print( - "You may want to install autopep8-%d.%d, " - "or use the precompiled libs repository." % - (VERSION_MAX_RECOMMENDED[0], VERSION_MAX_RECOMMENDED[1]), + "You may want to install autopep8-{:d}.{:d}, " + "or use the precompiled libs repository.".format( + VERSION_MAX_RECOMMENDED[0], VERSION_MAX_RECOMMENDED[1], + ), ) sys.exit(1) if version < VERSION_MIN: @@ -191,15 +192,16 @@ def main() -> None: version, ">", VERSION_MAX_RECOMMENDED, ) print( - "You may want to install autopep8-%d.%d, " - "or use the precompiled libs repository." % - (VERSION_MAX_RECOMMENDED[0], VERSION_MAX_RECOMMENDED[1]), + "You may want to install autopep8-{:d}.{:d}, " + "or use the precompiled libs repository.".format( + VERSION_MAX_RECOMMENDED[0], VERSION_MAX_RECOMMENDED[1], + ), ) use_default_paths = not (bool(args.paths) or bool(args.changed_only)) paths = compute_paths(args.paths, use_default_paths) - print("Operating on:" + (" (%d changed paths)" % len(paths) if args.changed_only else "")) + print("Operating on:" + (" ({:d} changed paths)".format(len(paths)) if args.changed_only else "")) for p in paths: print(" ", p) diff --git a/tools/utils_maintenance/blender_menu_search_coverage.py b/tools/utils_maintenance/blender_menu_search_coverage.py index f0b453b0eec..b4d247c65e8 100644 --- a/tools/utils_maintenance/blender_menu_search_coverage.py +++ b/tools/utils_maintenance/blender_menu_search_coverage.py @@ -197,7 +197,7 @@ def operator_list(): def is_op_ok(op): for op_match in OPERATOR_IGNORE: if fnmatchcase(op, op_match): - print(" skipping: %s (%s)" % (op, op_match)) + print(" skipping: {:s} ({:s})".format(op, op_match)) return False return True @@ -210,7 +210,7 @@ def operator_list(): if 'INTERNAL' in bl_options: continue - op_id = "%s.%s" % (mod_name, submod_name) + op_id = "{:s}.{:s}".format(mod_name, submod_name) if not is_op_ok(op_id): continue @@ -642,7 +642,7 @@ def perform_coverage_test(): # Report: print( - "Coverage %.2f%% (%d of %d)" % ( + "Coverage {:.2f} ({:d} of {:d})".format( (len_op_menu / len_op) * 100.0, len_op_menu, len_op, diff --git a/tools/utils_maintenance/blender_update_themes.py b/tools/utils_maintenance/blender_update_themes.py index 1c08f58a734..c4d7a139258 100644 --- a/tools/utils_maintenance/blender_update_themes.py +++ b/tools/utils_maintenance/blender_update_themes.py @@ -14,7 +14,7 @@ def update(filepath): import rna_xml context = bpy.context - print("Updating theme: %r" % filepath) + print("Updating theme: {!r}".format(filepath)) preset_xml_map = ( ("preferences.themes[0]", "Theme"), ("preferences.ui_styles[0]", "Theme"), diff --git a/tools/utils_maintenance/clang_format_paths.py b/tools/utils_maintenance/clang_format_paths.py index ade503c637e..1bc5863cd38 100755 --- a/tools/utils_maintenance/clang_format_paths.py +++ b/tools/utils_maintenance/clang_format_paths.py @@ -133,7 +133,7 @@ def clang_format_ensure_version() -> Optional[Tuple[int, int, int]]: version_output = "" for i in range(2, -1, -1): clang_format_cmd = ( - "clang-format-" + (".".join(["%d"] * i) % VERSION_MIN[:i]) + "clang-format-" + (".".join(["{:d}"] * i).format(*VERSION_MIN[:i])) if i > 0 else "clang-format" ) @@ -150,7 +150,7 @@ def clang_format_ensure_version() -> Optional[Tuple[int, int, int]]: version = version.split("-")[0] # Ensure exactly 3 numbers. version_num: Tuple[int, int, int] = (tuple(int(n) for n in version.split(".")) + (0, 0, 0))[:3] # type: ignore - print("Using %s (%d.%d.%d)..." % (CLANG_FORMAT_CMD, version_num[0], version_num[1], version_num[2])) + print("Using {:s} ({:d}.{:d}.{:d})...".format(CLANG_FORMAT_CMD, version_num[0], version_num[1], version_num[2])) return version_num @@ -236,9 +236,10 @@ def main() -> None: version, ">", VERSION_MAX_RECOMMENDED, ) print( - "You may want to install clang-format-%d.%d, " - "or use the precompiled libs repository." % - (VERSION_MAX_RECOMMENDED[0], VERSION_MAX_RECOMMENDED[1]), + "You may want to install clang-format-{:d}.{:d}, " + "or use the precompiled libs repository.".format( + VERSION_MAX_RECOMMENDED[0], VERSION_MAX_RECOMMENDED[1], + ), ) args = argparse_create().parse_args() @@ -246,7 +247,7 @@ def main() -> None: use_default_paths = not (bool(args.paths) or bool(args.changed_only)) paths = compute_paths(args.paths, use_default_paths) - print("Operating on:" + (" (%d changed paths)" % len(paths) if args.changed_only else "")) + print("Operating on:" + (" ({:d} changed paths)".format(len(paths)) if args.changed_only else "")) for p in paths: print(" ", p) diff --git a/tools/utils_maintenance/code_clean.py b/tools/utils_maintenance/code_clean.py index 4ac2927c0e9..ce844991707 100755 --- a/tools/utils_maintenance/code_clean.py +++ b/tools/utils_maintenance/code_clean.py @@ -320,16 +320,16 @@ def process_commands(cmake_dir: str, data: Sequence[str]) -> Optional[ProcessedC compiler_c = cmake_cache_var(cmake_dir, "CMAKE_C_COMPILER") compiler_cxx = cmake_cache_var(cmake_dir, "CMAKE_CXX_COMPILER") if compiler_c is None: - sys.stderr.write("Can't find C compiler in %r\n" % cmake_dir) + sys.stderr.write("Can't find C compiler in {!r}\n".format(cmake_dir)) return None if compiler_cxx is None: - sys.stderr.write("Can't find C++ compiler in %r\n" % cmake_dir) + sys.stderr.write("Can't find C++ compiler in {!r}\n".format(cmake_dir)) return None # Check for unsupported configurations. for arg in ("WITH_UNITY_BUILD", "WITH_COMPILER_CCACHE", "WITH_COMPILER_PRECOMPILED_HEADERS"): if cmake_cache_var_is_true(cmake_cache_var(cmake_dir, arg)): - sys.stderr.write("The option '%s' must be disabled for proper functionality\n" % arg) + sys.stderr.write("The option '{:s}' must be disabled for proper functionality\n".format(arg)) return None file_args = [] @@ -442,12 +442,12 @@ class EditGenerator: def __init_subclass__(cls) -> None: # Ensure the sub-class declares this. if (not isinstance(getattr(cls, "is_default", None), bool)) or ("is_default" not in cls.__dict__): - raise Exception("Class %r missing \"is_default\" boolean!" % cls) + raise Exception("Class {!r} missing \"is_default\" boolean!".format(cls)) if getattr(cls, "edit_list_from_file") is EditGenerator.edit_list_from_file: - raise Exception("Class %r missing \"edit_list_from_file\" callback!" % cls) + raise Exception("Class {!r} missing \"edit_list_from_file\" callback!".format(cls)) def __new__(cls, *args: Tuple[Any], **kwargs: Dict[str, Any]) -> Any: - raise RuntimeError("%s should not be instantiated" % cls) + raise RuntimeError("Class {!r} should not be instantiated".format(cls)) @staticmethod def edit_list_from_file(_source: str, _data: str, _shared_edit_data: Any) -> List[Edit]: @@ -488,21 +488,21 @@ class edit_generators: for match in re.finditer(r"sizeof\(([a-zA-Z_]+)\) \* (\d+) \* (\d+)", data): edits.append(Edit( span=match.span(), - content='sizeof(%s[%s][%s])' % (match.group(1), match.group(2), match.group(3)), + content='sizeof({:s}[{:s}][{:s}])'.format(match.group(1), match.group(2), match.group(3)), content_fail='__ALWAYS_FAIL__', )) for match in re.finditer(r"sizeof\(([a-zA-Z_]+)\) \* (\d+)", data): edits.append(Edit( span=match.span(), - content='sizeof(%s[%s])' % (match.group(1), match.group(2)), + content='sizeof({:s}[{:s}])'.format(match.group(1), match.group(2)), content_fail='__ALWAYS_FAIL__', )) for match in re.finditer(r"\b(\d+) \* sizeof\(([a-zA-Z_]+)\)", data): edits.append(Edit( span=match.span(), - content='sizeof(%s[%s])' % (match.group(2), match.group(1)), + content='sizeof({:s}[{:s}])'.format(match.group(2), match.group(1)), content_fail='__ALWAYS_FAIL__', )) return edits @@ -546,7 +546,7 @@ class edit_generators: for match in re.finditer(r"(\(|, | )([a-zA-Z_0-9]+ [a-zA-Z_0-9]+\[)\b([^\n]+ = )", data): edits.append(Edit( span=match.span(), - content='%s const %s%s' % (match.group(1), match.group(2), match.group(3)), + content='{:s} const {:s}{:s}'.format(match.group(1), match.group(2), match.group(3)), content_fail='__ALWAYS_FAIL__', )) @@ -554,7 +554,7 @@ class edit_generators: for match in re.finditer(r"(\(|, )([a-zA-Z_0-9]+ [a-zA-Z_0-9]+\[)", data): edits.append(Edit( span=match.span(), - content='%s const %s' % (match.group(1), match.group(2)), + content='{:s} const {:s}'.format(match.group(1), match.group(2)), content_fail='__ALWAYS_FAIL__', )) @@ -570,7 +570,9 @@ class edit_generators: ): edits.append(Edit( span=match.span(), - content='%sconst %s%s%s' % (match.group(1), match.group(2), match.group(3), match.group(4)), + content='{:s}const {:s}{:s}{:s}'.format( + match.group(1), match.group(2), match.group(3), match.group(4), + ), content_fail='__ALWAYS_FAIL__', )) @@ -600,7 +602,7 @@ class edit_generators: for match in re.finditer(r"\b(\d+)\.([fF])\b", data): edits.append(Edit( span=match.span(), - content='%s.0%s' % (match.group(1), match.group(2)), + content='{:s}.0{:s}'.format(match.group(1), match.group(2)), content_fail='__ALWAYS_FAIL__', )) @@ -608,7 +610,7 @@ class edit_generators: for match in re.finditer(r"\b(\d+\.\d+)F\b", data): edits.append(Edit( span=match.span(), - content='%sf' % (match.group(1),), + content='{:s}f'.format(match.group(1)), content_fail='__ALWAYS_FAIL__', )) @@ -646,7 +648,7 @@ class edit_generators: edits.append(Edit( span=match.span(), - content='u%s' % match.group(2), + content='u{:s}'.format(match.group(2)), content_fail='__ALWAYS_FAIL__', )) @@ -765,8 +767,8 @@ class edit_generators: ): edits.append(Edit( span=match.span(), - content='/*%s*/%s' % (match.group(2), match.group(3)), - content_fail='__ALWAYS_FAIL__(%s%s)' % (match.group(2), match.group(3)), + content='/*{:s}*/{:s}'.format(match.group(2), match.group(3)), + content_fail='__ALWAYS_FAIL__({:s}{:s})'.format(match.group(2), match.group(3)), )) return edits @@ -832,14 +834,14 @@ class edit_generators: if found: edits.append(Edit( span=match.span(), - content='(%sELEM(%s, %s))' % ( + content='({:s}ELEM({:s}, {:s}))'.format( ('' if is_equal else '!'), var, ', '.join(var_rest), ), # Use same expression otherwise this can change values # inside assert when it shouldn't. - content_fail='(%s__ALWAYS_FAIL__(%s, %s))' % ( + content_fail='({:s}__ALWAYS_FAIL__({:s}, {:s}))'.format( ('' if is_equal else '!'), var, ', '.join(var_rest), @@ -914,14 +916,14 @@ class edit_generators: if found: edits.append(Edit( span=match.span(), - content='(%sSTR_ELEM(%s, %s))' % ( + content='({:s}STR_ELEM({:s}, {:s}))'.format( ('' if is_equal else '!'), var, ', '.join(var_rest), ), # Use same expression otherwise this can change values # inside assert when it shouldn't. - content_fail='(%s__ALWAYS_FAIL__(%s, %s))' % ( + content_fail='({:s}__ALWAYS_FAIL__({:s}, {:s}))'.format( ('' if is_equal else '!'), var, ', '.join(var_rest), @@ -951,14 +953,14 @@ class edit_generators: # for match in re.finditer(r"( [a-zA-Z0-9_]+ [a-zA-Z0-9_]+ = [A-Z][A-Z_0-9_]*;)", data): # edits.append(Edit( # span=match.span(), - # content='const %s' % (match.group(1).lstrip()), + # content='const {:s}'.format(match.group(1).lstrip()), # content_fail='__ALWAYS_FAIL__', # )) for match in re.finditer(r"( [a-zA-Z0-9_]+ [a-zA-Z0-9_]+ = .*;)", data): edits.append(Edit( span=match.span(), - content='const %s' % (match.group(1).lstrip()), + content='const {:s}'.format(match.group(1).lstrip()), content_fail='__ALWAYS_FAIL__', )) @@ -1021,7 +1023,7 @@ class edit_generators: for match in re.finditer(r"return \(([a-zA-Z_0-9]+)\);", data): edits.append(Edit( span=match.span(), - content='return %s;' % (match.group(1)), + content='return {:s};'.format(match.group(1)), content_fail='return __ALWAYS_FAIL__;', )) return edits @@ -1050,13 +1052,13 @@ class edit_generators: for match in re.finditer(r"\bstrcmp\((.*)\) == 0", data): edits.append(Edit( span=match.span(), - content='STREQ(%s)' % (match.group(1)), + content='STREQ({:s})'.format(match.group(1)), content_fail='__ALWAYS_FAIL__', )) for match in re.finditer(r"!strcmp\((.*)\)", data): edits.append(Edit( span=match.span(), - content='STREQ(%s)' % (match.group(1)), + content='STREQ({:s})'.format(match.group(1)), content_fail='__ALWAYS_FAIL__', )) @@ -1064,13 +1066,13 @@ class edit_generators: for match in re.finditer(r"\bstrcmp\((.*)\) != 0", data): edits.append(Edit( span=match.span(), - content='!STREQ(%s)' % (match.group(1)), + content='!STREQ({:s})'.format(match.group(1)), content_fail='__ALWAYS_FAIL__', )) for match in re.finditer(r"\bstrcmp\((.*)\)", data): edits.append(Edit( span=match.span(), - content='!STREQ(%s)' % (match.group(1)), + content='!STREQ({:s})'.format(match.group(1)), content_fail='__ALWAYS_FAIL__', )) @@ -1152,7 +1154,7 @@ class edit_generators: ): edits.append(Edit( span=match.span(), - content='%s(%s)' % (dst, match.group(1)), + content='{:s}({:s})'.format(dst, match.group(1)), content_fail='__ALWAYS_FAIL__', )) @@ -1170,7 +1172,7 @@ class edit_generators: ): edits.append(Edit( span=match.span(), - content='%s(%s,' % (dst, match.group(1)), + content='{:s}({:s},'.format(dst, match.group(1)), content_fail='__ALWAYS_FAIL__', )) @@ -1195,7 +1197,7 @@ class edit_generators: for match in re.finditer(r"\bsizeof\((.*)\) / sizeof\([^\)]+\)", data): edits.append(Edit( span=match.span(), - content='ARRAY_SIZE(%s)' % match.group(1), + content='ARRAY_SIZE({:s})'.format(match.group(1)), content_fail='__ALWAYS_FAIL__', )) @@ -1317,7 +1319,7 @@ class edit_generators: edits.append(Edit( # Span covers `for (...)` { span=(for_beg, for_paren_end + 1), - content='%s (%s, %s, %s%s)' % ( + content='{:s} ({:s}, {:s}, {:s}{:s})'.format( "LISTBASE_FOREACH" if is_forward else "LISTBASE_FOREACH_BACKWARD", ty, var, @@ -1475,7 +1477,7 @@ class edit_generators: @staticmethod def _header_guard_from_filename(f: str) -> str: - return '__%s__' % os.path.basename(f).replace('.', '_').upper() + return '__{:s}__'.format(os.path.basename(f).replace('.', '_').upper()) @classmethod def setup(cls) -> Any: @@ -1501,9 +1503,9 @@ class edit_generators: start, end = match.span() src = data[start:end] dst = ( - '#ifndef %s\n#define %s' % (header_guard, header_guard) + '#ifndef {:s}\n#define {:s}'.format(header_guard, header_guard) ) - dst_footer = '\n#endif /* %s */\n' % header_guard + dst_footer = '\n#endif /* {:s} */\n'.format(header_guard) files.append((f, src, dst, dst_footer)) data = data[:start] + dst + data[end:] + dst_footer with open(f, 'w', encoding='utf-8') as fh: @@ -1542,7 +1544,7 @@ class edit_generators: edits.append(Edit( span=match.span(), content='', # Remove the header. - content_fail='%s__ALWAYS_FAIL__%s' % (match.group(2), match.group(4)), + content_fail='{:s}__ALWAYS_FAIL__{:s}'.format(match.group(2), match.group(4)), extra_build_args=('-D' + header_guard, ), )) @@ -1933,8 +1935,7 @@ def run_edits_on_directory( args = find_build_args_make(build_dir) else: sys.stderr.write( - "Can't find Ninja or Makefile (%r or %r), aborting" % - (build_file_ninja, build_file_make) + "Can't find Ninja or Makefile ({!r} or {!r}), aborting".format(build_file_ninja, build_file_make) ) return 1 @@ -2005,7 +2006,7 @@ def run_edits_on_directory( if test_path(c) ] del args - print("Operating on %d of %d files..." % (len(args_with_cwd), args_orig_len)) + print("Operating on {:d} of {:d} files...".format(len(args_with_cwd), args_orig_len)) for (c, build_args, build_cwd) in args_with_cwd: print(" ", c) del args_orig_len @@ -2019,7 +2020,7 @@ def run_edits_on_directory( edits_to_apply_grouped = [[edit] for edit in edits_to_apply] for i, edits_group in enumerate(edits_to_apply_grouped): - print("Applying edit:", edits_group, "(%d of %d)" % (i + 1, len(edits_to_apply_grouped))) + print("Applying edit:", edits_group, "({:d} of {:d})".format(i + 1, len(edits_to_apply_grouped))) edit_generator_class = edit_class_from_id(edits_group[0]) shared_edit_data = edit_generator_class.setup() @@ -2071,7 +2072,7 @@ def create_parser(edits_all: Sequence[str], edits_all_default: Sequence[str]) -> for edit in edits_all: # `%` -> `%%` is needed for `--help` not to interpret these as formatting arguments. edits_all_docs.append( - " %s\n%s" % ( + " {:s}\n{:s}".format( edit, indent(edit_docstring_from_id(edit).replace("%", "%%"), ' '), ) @@ -2082,7 +2083,7 @@ def create_parser(edits_all: Sequence[str], edits_all_default: Sequence[str]) -> for verbose_id, verbose_doc in VERBOSE_INFO: # `%` -> `%%` is needed for `--help` not to interpret these as formatting arguments. verbose_all_docs.append( - " %s\n%s" % ( + " {:s}\n{:s}".format( verbose_id, indent(verbose_doc.replace("%", "%%"), " "), ) @@ -2181,7 +2182,7 @@ def main() -> int: for edit in edits_all_from_args: if edit not in edits_all: - print("Error, unrecognized '--edits' argument '%s', expected a value in {%s}" % ( + print("Error, unrecognized '--edits' argument '{:s}', expected a value in {{{:s}}}".format( edit, ", ".join(edits_all), )) @@ -2198,7 +2199,7 @@ def main() -> int: case "edit_actions": verbose_edit_actions = True case _: - print("Error, unrecognized '--verbose' argument '%s', expected a value in {%s}" % ( + print("Error, unrecognized '--verbose' argument '{:s}', expected a value in {{{:s}}}".format( verbose_id, ", ".join(verbose_all), )) @@ -2207,7 +2208,7 @@ def main() -> int: if len(edits_all_from_args) > 1: for edit in edits_all: if edit not in edits_all_from_args: - print("Skipping edit: %s, default=%d" % (edit, getattr(edit_generators, edit).is_default)) + print("Skipping edit: {:s}, default={:d}".format(edit, getattr(edit_generators, edit).is_default)) return run_edits_on_directory( build_dir=build_dir, diff --git a/tools/utils_maintenance/trailing_space_clean.py b/tools/utils_maintenance/trailing_space_clean.py index cfb2fa39e8a..c19af7205d7 100755 --- a/tools/utils_maintenance/trailing_space_clean.py +++ b/tools/utils_maintenance/trailing_space_clean.py @@ -69,7 +69,7 @@ def rstrip_file(filename): len_strip = len(data_src) - len(data_dst) if len_strip != 0: - reports.append("STRIP=%d" % len_strip) + reports.append("STRIP={:d}".format(len_strip)) if len_strip: with open(filename, "w", encoding="utf-8") as fh: @@ -81,7 +81,7 @@ def main(): for f in path_expand(PATHS, is_source): report = rstrip_file(f) if report: - print("Strip (%s): %s" % (', '.join(report), f)) + print("Strip ({:s}): {:s}".format(', '.join(report), f)) if __name__ == "__main__":