Various edits preparing addons' translations tools (not everything yet functionnal/tested, though).

Also workaround a nasty bug, where unregistered py classes remain listed in relevant __subclasses__() calls, which would lead to crash with python addons i18n tools (main translation was not affected, as messages extracting tools are executed in a brand new "factory startup" Blender ;) ).
This commit is contained in:
Bastien Montagne 2013-04-09 08:56:35 +00:00
parent 2f5eaf3fcf
commit 6382f6bedd
2 changed files with 225 additions and 154 deletions

@ -62,6 +62,17 @@ def _gen_check_ctxt(settings):
"spell_errors": {},
}
def _diff_check_ctxt(check_ctxt, minus_check_ctxt):
"""Returns check_ctxt - minus_check_ctxt"""
for key in check_ctxt:
if isinstance(check_ctxt[key], set):
for warning in minus_check_ctxt[key]:
if warning in check_ctxt[key]:
check_ctxt[key].remove(warning)
elif isinstance(check_ctxt[key], dict):
for warning in minus_check_ctxt[key]:
if warning in check_ctxt[key]:
del check_ctxt[key][warning]
def _gen_reports(check_ctxt):
return {
@ -176,45 +187,6 @@ def print_info(reports, pot):
_print("\t\t{}".format("\n\t\t".join(pot.msgs[key].sources)))
def enable_addons(addons={}, support={}, disable=False):
"""
Enable (or disable) addons based either on a set of names, or a set of 'support' types.
Returns the list of all affected addons (as fake modules)!
"""
import addon_utils
userpref = bpy.context.user_preferences
used_ext = {ext.module for ext in userpref.addons}
ret = [mod for mod in addon_utils.modules(addon_utils.addons_fake_modules)
if ((addons and mod.__name__ in addons) or
(not addons and addon_utils.module_bl_info(mod)["support"] in support))]
for mod in ret:
module_name = mod.__name__
if disable:
if module_name not in used_ext:
continue
print(" Disabling module ", module_name)
bpy.ops.wm.addon_disable(module=module_name)
else:
if module_name in used_ext:
continue
print(" Enabling module ", module_name)
bpy.ops.wm.addon_enable(module=module_name)
# XXX There are currently some problems with bpy/rna...
# *Very* tricky to solve!
# So this is a hack to make all newly added operator visible by
# bpy.types.OperatorProperties.__subclasses__()
for cat in dir(bpy.ops):
cat = getattr(bpy.ops, cat)
for op in dir(cat):
getattr(cat, op).get_rna()
return ret
def process_msg(msgs, msgctxt, msgid, msgsrc, reports, check_ctxt, settings):
if filter_message(msgid):
reports["messages_skipped"].add((msgid, msgsrc))
@ -235,50 +207,72 @@ def process_msg(msgs, msgctxt, msgid, msgsrc, reports, check_ctxt, settings):
##### RNA #####
def dump_messages_rna(msgs, reports, settings):
def dump_rna_messages(msgs, reports, settings):
"""
Dump into messages dict all RNA-defined UI messages (labels en tooltips).
"""
def class_blacklist():
blacklist_rna_class = [
blacklist_rna_class = {getattr(bpy.types, cls_id) for cls_id in (
# core classes
"Context", "Event", "Function", "UILayout", "UnknownType",
"Context", "Event", "Function", "UILayout", "UnknownType", "Property", "Struct",
# registerable classes
"Panel", "Menu", "Header", "RenderEngine", "Operator", "OperatorMacro", "Macro", "KeyingSetInfo",
# window classes
"Window",
]
)
}
# Collect internal operators
# extend with all internal operators
# note that this uses internal api introspection functions
# all possible operator names
op_ids = set(cls.bl_rna.identifier for cls in bpy.types.OperatorProperties.__subclasses__()) | \
set(cls.bl_rna.identifier for cls in bpy.types.Operator.__subclasses__()) | \
set(cls.bl_rna.identifier for cls in bpy.types.OperatorMacro.__subclasses__())
# More builtin classes we don't need to parse.
blacklist_rna_class |= {cls for cls in bpy.types.Property.__subclasses__()}
get_instance = __import__("_bpy").ops.get_instance
# path_resolve = type(bpy.context).__base__.path_resolve
for idname in op_ids:
op = get_instance(idname)
# XXX Do not skip INTERNAL's anymore, some of those ops show up in UI now!
# if 'INTERNAL' in path_resolve(op, "bl_options"):
# blacklist_rna_class.append(idname)
_rna = {getattr(bpy.types, cls) for cls in dir(bpy.types)}
# Collect builtin classes we don't need to doc
blacklist_rna_class.append("Property")
blacklist_rna_class.extend([cls.__name__ for cls in bpy.types.Property.__subclasses__()])
# Collect classes which are attached to collections, these are api access only.
collection_props = set()
for cls_id in dir(bpy.types):
cls = getattr(bpy.types, cls_id)
# Classes which are attached to collections can be skipped too, these are api access only.
for cls in _rna:
for prop in cls.bl_rna.properties:
if prop.type == 'COLLECTION':
prop_cls = prop.srna
if prop_cls is not None:
collection_props.add(prop_cls.identifier)
blacklist_rna_class.extend(sorted(collection_props))
blacklist_rna_class.add(prop_cls.__class__)
# Now here is the *ugly* hack!
# Unfortunately, all classes we want to access are not available from bpy.types (OperatorProperties subclasses
# are not here, as they have the same name as matching Operator ones :( ). So we use __subclasses__() calls
# to walk through all rna hierachy.
# But unregistered classes remain listed by relevant __subclasses__() calls (be it a Py or BPY/RNA bug),
# and obviously the matching RNA struct exists no more, so trying to access their data (even the identifier)
# quickly leads to segfault!
# To address this, we have to blacklist classes which __name__ does not match any __name__ from bpy.types
# (we can't use only RNA identifiers, as some py-defined classes has a different name that rna id,
# and we can't use class object themselves, because OperatorProperties subclasses are not in bpy.types!)...
_rna_clss_ids = {cls.__name__ for cls in _rna} | {cls.bl_rna.identifier for cls in _rna}
# All registrable types.
blacklist_rna_class |= {cls for cls in bpy.types.OperatorProperties.__subclasses__() +
bpy.types.Operator.__subclasses__() +
bpy.types.OperatorMacro.__subclasses__() +
bpy.types.Header.__subclasses__() +
bpy.types.Panel.__subclasses__() +
bpy.types.Menu.__subclasses__() +
bpy.types.UIList.__subclasses__()
if cls.__name__ not in _rna_clss_ids}
# Collect internal operators
# extend with all internal operators
# note that this uses internal api introspection functions
# XXX Do not skip INTERNAL's anymore, some of those ops show up in UI now!
# all possible operator names
#op_ids = (set(cls.bl_rna.identifier for cls in bpy.types.OperatorProperties.__subclasses__()) |
#set(cls.bl_rna.identifier for cls in bpy.types.Operator.__subclasses__()) |
#set(cls.bl_rna.identifier for cls in bpy.types.OperatorMacro.__subclasses__()))
#get_instance = __import__("_bpy").ops.get_instance
#path_resolve = type(bpy.context).__base__.path_resolve
#for idname in op_ids:
#op = get_instance(idname)
#if 'INTERNAL' in path_resolve(op, "bl_options"):
#blacklist_rna_class.add(idname)
return blacklist_rna_class
@ -337,17 +331,6 @@ def dump_messages_rna(msgs, reports, settings):
def walk_class(cls):
bl_rna = cls.bl_rna
reports["rna_structs"].append(cls)
if bl_rna.identifier in blacklist_rna_class:
reports["rna_structs_skipped"].append(cls)
return
# XXX translation_context of Operator sub-classes are not "good"!
# So ignore those Operator sub-classes (anyway, will get the same from OperatorProperties sub-classes!)...
if issubclass(cls, bpy.types.Operator):
reports["rna_structs_skipped"].append(cls)
return
msgsrc = "bpy.types." + bl_rna.identifier
msgctxt = bl_rna.translation_context or default_context
@ -388,6 +371,11 @@ def dump_messages_rna(msgs, reports, settings):
cls_list.sort(key=full_class_id)
for cls in cls_list:
reports["rna_structs"].append(cls)
# Ignore those Operator sub-classes (anyway, will get the same from OperatorProperties sub-classes!)...
if (cls in blacklist_rna_class) or issubclass(cls, bpy.types.Operator):
reports["rna_structs_skipped"].append(cls)
else:
walk_class(cls)
# Recursively process subclasses.
process_cls_list(cls.__subclasses__())
@ -796,14 +784,14 @@ def dump_messages(do_messages, do_checks, settings):
# Enable all wanted addons.
# For now, enable all official addons, before extracting msgids.
addons = enable_addons(support={"OFFICIAL"})
addons = utils.enable_addons(support={"OFFICIAL"})
# Note this is not needed if we have been started with factory settings, but just in case...
enable_addons(support={"COMMUNITY", "TESTING"}, disable=True)
utils.enable_addons(support={"COMMUNITY", "TESTING"}, disable=True)
reports = _gen_reports(_gen_check_ctxt(settings) if do_checks else None)
# Get strings from RNA.
dump_messages_rna(msgs, reports, settings)
dump_rna_messages(msgs, reports, settings)
# Get strings from UI layout definitions text="..." args.
dump_py_messages(msgs, reports, addons, settings)
@ -836,40 +824,51 @@ def dump_messages(do_messages, do_checks, settings):
print("Finished extracting UI messages!")
return pot # Not used currently, but may be useful later (and to be consistent with dump_addon_messages!).
def dump_addon_messages(module_name, messages_formats, do_checks, settings):
# Enable our addon and get strings from RNA.
addon = enable_addons(addons={module_name})[0]
addon_info = addon_utils.module_bl_info(addon)
ver = addon_info.name + " " + ".".join(addon_info.version)
rev = "???"
date = datetime.datetime()
pot = utils.I18nMessages.gen_empty_messages(settings.PARSER_TEMPLATE_ID, ver, rev, date, date.year,
settings=settings)
msgs = pot.msgs
minus_msgs = copy.deepcopy(msgs)
check_ctxt = _gen_check_ctxt(settings) if do_checks else None
minus_check_ctxt = _gen_check_ctxt(settings) if do_checks else None
def dump_addon_messages(module_name, do_checks, settings):
import addon_utils
# Get current addon state (loaded or not):
was_loaded = addon_utils.check(module_name)[1]
# Enable our addon and get strings from RNA.
addons = enable_addons(addons={module_name})
# Enable our addon.
addon = utils.enable_addons(addons={module_name})[0]
addon_info = addon_utils.module_bl_info(addon)
ver = addon_info["name"] + " " + ".".join(str(v) for v in addon_info["version"])
rev = 0
date = datetime.datetime.now()
pot = utils.I18nMessages.gen_empty_messages(settings.PARSER_TEMPLATE_ID, ver, rev, date, date.year,
settings=settings)
msgs = pot.msgs
minus_pot = utils.I18nMessages.gen_empty_messages(settings.PARSER_TEMPLATE_ID, ver, rev, date, date.year,
settings=settings)
minus_msgs = minus_pot.msgs
check_ctxt = _gen_check_ctxt(settings) if do_checks else None
minus_check_ctxt = _gen_check_ctxt(settings) if do_checks else None
# Get strings from RNA, our addon being enabled.
print("A")
reports = _gen_reports(check_ctxt)
dump_messages_rna(msgs, reports, settings)
print("B")
dump_rna_messages(msgs, reports, settings)
print("C")
# Now disable our addon, and rescan RNA.
enable_addons(addons={module_name}, disable=True)
utils.enable_addons(addons={module_name}, disable=True)
print("D")
reports["check_ctxt"] = minus_check_ctxt
dump_messages_rna(minus_msgs, reports, settings)
print("E")
dump_rna_messages(minus_msgs, reports, settings)
print("F")
# Restore previous state if needed!
if was_loaded:
enable_addons(addons={module_name})
utils.enable_addons(addons={module_name})
# and make the diff!
for key in minus_msgs:
@ -877,11 +876,10 @@ def dump_addon_messages(module_name, messages_formats, do_checks, settings):
del msgs[key]
if check_ctxt:
for key in check_ctxt:
for warning in minus_check_ctxt[key]:
check_ctxt[key].remove(warning)
check_ctxt = _diff_check_ctxt(check_ctxt, minus_check_ctxt)
# and we are done with those!
del minus_pot
del minus_msgs
del minus_check_ctxt
@ -889,8 +887,11 @@ def dump_addon_messages(module_name, messages_formats, do_checks, settings):
reports["check_ctxt"] = check_ctxt
dump_py_messages(msgs, reports, {addon}, settings, addons_only=True)
pot.unescape() # Strings gathered in py/C source code may contain escaped chars...
print_info(reports, pot)
print("Finished extracting UI messages!")
return pot

@ -122,6 +122,52 @@ def locale_match(loc1, loc2):
return ...
def find_best_isocode_matches(uid, iso_codes):
tmp = ((e, locale_match(e, uid)) for e in iso_codes)
return tuple(e[0] for e in sorted((e for e in tmp if e[1] is not ... and e[1] >= 0), key=lambda e: e[1]))
def enable_addons(addons={}, support={}, disable=False, check_only=False):
"""
Enable (or disable) addons based either on a set of names, or a set of 'support' types.
Returns the list of all affected addons (as fake modules)!
If "check_only" is set, no addon will be enabled nor disabled.
"""
import addon_utils
userpref = bpy.context.user_preferences
used_ext = {ext.module for ext in userpref.addons}
ret = [mod for mod in addon_utils.modules(addon_utils.addons_fake_modules)
if ((addons and mod.__name__ in addons) or
(not addons and addon_utils.module_bl_info(mod)["support"] in support))]
if not check_only:
for mod in ret:
module_name = mod.__name__
if disable:
if module_name not in used_ext:
continue
print(" Disabling module ", module_name)
bpy.ops.wm.addon_disable(module=module_name)
else:
if module_name in used_ext:
continue
print(" Enabling module ", module_name)
bpy.ops.wm.addon_enable(module=module_name)
# XXX There are currently some problems with bpy/rna...
# *Very* tricky to solve!
# So this is a hack to make all newly added operator visible by
# bpy.types.OperatorProperties.__subclasses__()
for cat in dir(bpy.ops):
cat = getattr(bpy.ops, cat)
for op in dir(cat):
getattr(cat, op).get_rna()
return ret
##### Main Classes #####
class I18nMessage:
@ -1021,7 +1067,9 @@ class I18n:
"""
@staticmethod
def _parser_check_file(path, maxsize=settings.PARSER_MAX_FILE_SIZE, _begin_marker=None, _end_marker=None):
def _parser_check_file(path, maxsize=settings.PARSER_MAX_FILE_SIZE,
_begin_marker=settings.PARSER_PY_MARKER_BEGIN,
_end_marker=settings.PARSER_PY_MARKER_END):
if os.stat(path).st_size > maxsize:
# Security, else we could read arbitrary huge files!
print("WARNING: skipping file {}, too huge!".format(path))
@ -1040,8 +1088,16 @@ class I18n:
if _end_marker in txt:
_out = txt.index(_end_marker)
if _in is not None and _out is not None:
return txt[:_in], txt[_in:_out], txt[_out:]
return txt, None, None
in_txt, txt, out_txt = txt[:_in], txt[_in:_out], txt[_out:]
elif _in is not None:
in_txt, txt, out_txt = txt[:_in], txt[_in:], None
elif _out is not None:
in_txt, txt, out_txt = None, txt[:_out], txt[_out:]
else:
in_txt, txt, out_txt = None, txt, None
if "translations_tuple" not in txt:
return None, None, None
return in_txt, txt, out_txt
@staticmethod
def _dst(self, path, uid, kind):
@ -1148,6 +1204,35 @@ class I18n:
)
print(prefix.join(lines))
@classmethod
def check_py_module_has_translations(clss, src, settings=settings):
"""
Check whether a given src (a py module, either a directory or a py file) has some i18n translation data,
and returns a tuple (src_file, translations_tuple) if yes, else (None, None).
"""
txts = []
if os.path.isdir(src):
for root, dnames, fnames in os.walk(src):
for fname in fnames:
if not fname.endswith(".py"):
continue
path = os.path.join(root, fname)
_1, txt, _2 = clss._parser_check_file(path)
if txt is not None:
txts.append((path, txt))
elif src.endswith(".py") and os.path.isfile(src):
_1, txt, _2 = clss._parser_check_file(src)
if txt is not None:
txts.append((src, txt))
for path, txt in txts:
tuple_id = "translations_tuple"
env = globals().copy()
exec(txt, env)
if tuple_id in env:
return path, env[tuple_id]
return None, None # No data...
def parse(self, kind, src, langs=set()):
self.parsers[kind](self, src, langs)
@ -1193,28 +1278,9 @@ class I18n:
if langs set is void, all languages found are loaded.
"""
default_context = self.settings.DEFAULT_CONTEXT
txt = None
if os.path.isdir(src):
for root, dnames, fnames in os.walk(src):
for fname in fnames:
path = os.path.join(root, fname)
_1, txt, _2 = self._parser_check_file(path)
if txt is not None:
self.src[self.settings.PARSER_PY_ID] = path
break
if txt is not None:
break
elif src.endswith(".py") and os.path.isfile(src):
_1, txt, _2 = _check_file(src, self.settings.PARSER_PY_MARKER_BEGIN, self.settings.PARSER_PY_MARKER_END)
if txt is not None:
self.src[self.settings.PARSER_PY_ID] = src
if txt is None:
self.src[self.settings.PARSER_PY_ID], msgs = self.check_py_module_has_translations(src, self.settings)
if msgs is None:
return
env = globals()
exec(txt, env)
if "translations_tuple" not in env:
return # No data...
msgs = env["translations_tuple"]
for key, (sources, gen_comments), *translations in msgs:
if self.settings.PARSER_TEMPLATE_ID not in self.trans:
self.trans[self.settings.PARSER_TEMPLATE_ID] = I18nMessages(self.settings.PARSER_TEMPLATE_ID,
@ -1239,6 +1305,10 @@ class I18n:
comment_lines = [self.settings.PO_COMMENT_PREFIX + c for c in user_comments] + common_comment_lines
self.trans[uid].msgs[key] = I18nMessage(ctxt, [key[1]], [msgstr], comment_lines, False, is_fuzzy,
settings=self.settings)
#key = self.settings.PO_HEADER_KEY
#for uid, trans in self.trans.items():
#if key not in trans.msgs:
#trans.msgs[key]
self.unescape()
def write(self, kind, langs=set()):
@ -1261,7 +1331,7 @@ class I18n:
def write_to_py(self, langs=set()):
"""
Write all translations as python code, either in a "translations.py" file under same dir as source(s), or in
specified file is self.py_file is set (default, as usual can be customized with self.dst callable!).
specified file if self.py_file is set (default, as usual can be customized with self.dst callable!).
Note: If langs is set and you want to export the pot template as well, langs must contain PARSER_TEMPLATE_ID
({} currently).
""".format(self.settings.PARSER_TEMPLATE_ID)
@ -1282,16 +1352,16 @@ class I18n:
]
# First gather all keys (msgctxt, msgid) - theoretically, all translations should share the same, but...
keys = set()
for trans in self.trans.items:
keys |= trans.msgs.keys()
for trans in self.trans.values():
keys |= set(trans.msgs.keys())
# Get the ref translation (ideally, PARSER_TEMPLATE_ID one, else the first one that pops up!
# Ref translation will be used to generate sources "comments"
ref = self.trans.get(self.settings.PARSER_TEMPLATE_ID) or self.trans[list(self.trans.keys())[0]]
# Get all languages (uids) and sort them (PARSER_TEMPLATE_ID excluded!)
translations = self.trans.keys() - {self.settings.PARSER_TEMPLATE_ID}
# Get all languages (uids) and sort them (PARSER_TEMPLATE_ID and PARSER_PY_ID excluded!)
translations = self.trans.keys() - {self.settings.PARSER_TEMPLATE_ID, self.settings.PARSER_PY_ID}
if langs:
translations &= langs
translations = [('"' + lng + '"', " " * len(lng) + 4, self.trans[lng]) for lng in sorted(translations)]
translations = [('"' + lng + '"', " " * (len(lng) + 4), self.trans[lng]) for lng in sorted(translations)]
for key in keys:
if ref.msgs[key].is_commented:
continue
@ -1340,9 +1410,9 @@ class I18n:
if len(comments) > 1:
ret.append(tab + lngsp + " (\"" + comments[0] + "\",")
ret += [tab + lngsp + " \"" + s + "\"," for s in comments[1:-1]]
ret.append(tab + lngsp + " \"" + comments[-1] + "\"))),")
ret.append(tab + lngsp + " \"" + comments[-1] + "\")),")
else:
ret[-1] = ret[-1] + " " + ('"' + comments[0] + '",' if comments else "") + "))),"
ret[-1] = ret[-1] + " " + ('"' + comments[0] + '",' if comments else "") + ")),"
ret.append(tab + "),")
ret += [
")",
@ -1359,19 +1429,19 @@ class I18n:
self.escape(True)
dst = self.dst(self, self.src.get(self.settings.PARSER_PY_ID, ""), self.settings.PARSER_PY_ID, 'PY')
prev = txt = next = ""
print(dst)
prev = txt = nxt = ""
if os.path.exists(dst):
if not os.path.isfile(dst):
print("WARNING: trying to write as python code into {}, which is not a file! Aborting.".format(dst))
return
prev, txt, next = self._parser_check_file(dst, self.settings.PARSER_MAX_FILE_SIZE,
self.settings.PARSER_PY_MARKER_BEGIN,
self.settings.PARSER_PY_MARKER_END)
if prev is None:
return
if txt is None:
print("WARNING: given python file {} has no auto-generated translations yet, will be added at "
"the end of the file, you can move that section later if needed...".format(dst))
prev, txt, nxt = self._parser_check_file(dst)
if prev is None and nxt is None:
print("WARNING: Looks like given python file {} has no auto-generated translations yet, will be added "
"at the end of the file, you can move that section later if needed...".format(dst))
txt = [txt] + _gen_py(self, langs)
else:
# We completely replace the text found between start and end markers...
txt = _gen_py(self, langs)
else:
printf("Creating python file {} containing translations.".format(dst))
@ -1403,7 +1473,7 @@ class I18n:
self.settings.PARSER_PY_MARKER_END,
]
with open(dst, 'w') as f:
f.write(prev + "\n".join(txt) + (next or ""))
f.write(prev + "\n".join(txt) + (nxt or ""))
self.unescape()
parsers = {