svn merge ^/trunk/blender -r48489:48527

This commit is contained in:
Campbell Barton 2012-07-03 11:03:39 +00:00
commit 259e78997a
63 changed files with 3940 additions and 204 deletions

@ -1298,7 +1298,7 @@ void FLUID_3D::addVorticity(int zBegin, int zEnd)
N[2] = (_vorticity[out] - _vorticity[in]) * dz; N[2] = (_vorticity[out] - _vorticity[in]) * dz;
float magnitude = sqrtf(N[0] * N[0] + N[1] * N[1] + N[2] * N[2]); float magnitude = sqrtf(N[0] * N[0] + N[1] * N[1] + N[2] * N[2]);
if (magnitude > 0.0f) if (magnitude > FLT_EPSILON)
{ {
magnitude = 1.0f / magnitude; magnitude = 1.0f / magnitude;
N[0] *= magnitude; N[0] *= magnitude;
@ -1306,7 +1306,7 @@ void FLUID_3D::addVorticity(int zBegin, int zEnd)
N[2] *= magnitude; N[2] *= magnitude;
_xForce[index] += (N[1] * _zVorticity[vIndex] - N[2] * _yVorticity[vIndex]) * _dx * eps; _xForce[index] += (N[1] * _zVorticity[vIndex] - N[2] * _yVorticity[vIndex]) * _dx * eps;
_yForce[index] -= (N[0] * _zVorticity[vIndex] - N[2] * _xVorticity[vIndex]) * _dx * eps; _yForce[index] += (N[2] * _xVorticity[vIndex] - N[0] * _zVorticity[vIndex]) * _dx * eps;
_zForce[index] += (N[0] * _yVorticity[vIndex] - N[1] * _xVorticity[vIndex]) * _dx * eps; _zForce[index] += (N[0] * _yVorticity[vIndex] - N[1] * _xVorticity[vIndex]) * _dx * eps;
} }
} // if } // if

@ -234,7 +234,7 @@ inline void MTRand::seed( uint32 *const bigSeed, const uint32 seedLength )
initialize(19650218UL); initialize(19650218UL);
register int i = 1; register int i = 1;
register uint32 j = 0; register uint32 j = 0;
register int k = ( N > seedLength ? N : seedLength ); register int k = ( (uint32)N > seedLength ? (uint32)N : seedLength );
for( ; k; --k ) for( ; k; --k )
{ {
state[i] = state[i] =

@ -198,8 +198,8 @@ extern "C" void smoke_export(FLUID_3D *fluid, float *dt, float *dx, float **dens
*vyold = fluid->_yVelocityOld; *vyold = fluid->_yVelocityOld;
*vzold = fluid->_zVelocityOld; *vzold = fluid->_zVelocityOld;
*obstacles = fluid->_obstacles; *obstacles = fluid->_obstacles;
dt = &(fluid->_dt); *dt = fluid->_dt;
dx = &(fluid->_dx); *dx = fluid->_dx;
} }

@ -157,8 +157,8 @@ def find_path_new(id_data, data_path, rna_update_dict, rna_update_from_map):
def update_data_paths(rna_update): def update_data_paths(rna_update):
''' rna_update triple [(class_name, from, to), ...] """ rna_update triple [(class_name, from, to), ...]
''' """
# make a faster lookup dict # make a faster lookup dict
rna_update_dict = {} rna_update_dict = {}

@ -31,16 +31,16 @@ op_get_instance = ops_module.get_instance
class BPyOps(object): class BPyOps(object):
''' """
Fake module like class. Fake module like class.
bpy.ops bpy.ops
''' """
def __getattr__(self, module): def __getattr__(self, module):
''' """
gets a bpy.ops submodule gets a bpy.ops submodule
''' """
if module.startswith('__'): if module.startswith('__'):
raise AttributeError(module) raise AttributeError(module)
return BPyOpsSubMod(module) return BPyOpsSubMod(module)
@ -69,20 +69,20 @@ class BPyOps(object):
class BPyOpsSubMod(object): class BPyOpsSubMod(object):
''' """
Utility class to fake submodules. Utility class to fake submodules.
eg. bpy.ops.object eg. bpy.ops.object
''' """
__keys__ = ("module",) __keys__ = ("module",)
def __init__(self, module): def __init__(self, module):
self.module = module self.module = module
def __getattr__(self, func): def __getattr__(self, func):
''' """
gets a bpy.ops.submodule function gets a bpy.ops.submodule function
''' """
if func.startswith('__'): if func.startswith('__'):
raise AttributeError(func) raise AttributeError(func)
return BPyOpsSubModOp(self.module, func) return BPyOpsSubModOp(self.module, func)
@ -105,11 +105,11 @@ class BPyOpsSubMod(object):
class BPyOpsSubModOp(object): class BPyOpsSubModOp(object):
''' """
Utility class to fake submodule operators. Utility class to fake submodule operators.
eg. bpy.ops.object.somefunc eg. bpy.ops.object.somefunc
''' """
__keys__ = ("module", "func") __keys__ = ("module", "func")

@ -319,7 +319,7 @@ def edge_loops_from_edges(mesh, edges=None):
def ngon_tessellate(from_data, indices, fix_loops=True): def ngon_tessellate(from_data, indices, fix_loops=True):
''' """
Takes a polyline of indices (fgon) and returns a list of face Takes a polyline of indices (fgon) and returns a list of face
indicie lists. Designed to be used for importers that need indices for an indicie lists. Designed to be used for importers that need indices for an
fgon to create from existing verts. fgon to create from existing verts.
@ -329,7 +329,7 @@ def ngon_tessellate(from_data, indices, fix_loops=True):
to fill, and can be a subset of the data given. to fill, and can be a subset of the data given.
fix_loops: If this is enabled polylines that use loops to make multiple fix_loops: If this is enabled polylines that use loops to make multiple
polylines are delt with correctly. polylines are delt with correctly.
''' """
from mathutils.geometry import tessellate_polygon from mathutils.geometry import tessellate_polygon
from mathutils import Vector from mathutils import Vector
@ -352,9 +352,9 @@ def ngon_tessellate(from_data, indices, fix_loops=True):
return v1[1], v2[1] return v1[1], v2[1]
if not fix_loops: if not fix_loops:
''' """
Normal single concave loop filling Normal single concave loop filling
''' """
if type(from_data) in {tuple, list}: if type(from_data) in {tuple, list}:
verts = [Vector(from_data[i]) for ii, i in enumerate(indices)] verts = [Vector(from_data[i]) for ii, i in enumerate(indices)]
else: else:
@ -368,10 +368,10 @@ def ngon_tessellate(from_data, indices, fix_loops=True):
fill = tessellate_polygon([verts]) fill = tessellate_polygon([verts])
else: else:
''' """
Seperate this loop into multiple loops be finding edges that are Seperate this loop into multiple loops be finding edges that are
used twice. This is used by lightwave LWO files a lot used twice. This is used by lightwave LWO files a lot
''' """
if type(from_data) in {tuple, list}: if type(from_data) in {tuple, list}:
verts = [vert_treplet(Vector(from_data[i]), ii) verts = [vert_treplet(Vector(from_data[i]), ii)

@ -85,10 +85,10 @@ def _call_recursive(context, base, py_node):
class BPyML_BaseUI(): class BPyML_BaseUI():
''' """
This is a mix-in class that defines a draw function This is a mix-in class that defines a draw function
which checks for draw_data which checks for draw_data
''' """
def draw(self, context): def draw(self, context):
layout = self.layout layout = self.layout

@ -48,14 +48,14 @@ def replace_help(namespace):
def get_console(console_id): def get_console(console_id):
''' """
helper function for console operators helper function for console operators
currently each text data block gets its own currently each text data block gets its own
console - code.InteractiveConsole() console - code.InteractiveConsole()
...which is stored in this function. ...which is stored in this function.
console_id can be any hashable type console_id can be any hashable type
''' """
from code import InteractiveConsole from code import InteractiveConsole
consoles = getattr(get_console, "consoles", None) consoles = getattr(get_console, "consoles", None)

@ -0,0 +1,21 @@
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
"""Package for translation (i18n) tools."""

@ -0,0 +1,546 @@
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8-80 compliant>
# Write out messages.txt from Blender.
# XXX: This script is meant to be used from inside Blender!
# You should not directly use this script, rather use update_msg.py!
import os
# Quite an ugly hack… But the simplest solution for now!
#import sys
#sys.path.append(os.path.abspath(os.path.dirname(__file__)))
import i18n.settings as settings
#classes = set()
SOURCE_DIR = settings.SOURCE_DIR
CUSTOM_PY_UI_FILES = [os.path.abspath(os.path.join(SOURCE_DIR, p))
for p in settings.CUSTOM_PY_UI_FILES]
FILE_NAME_MESSAGES = settings.FILE_NAME_MESSAGES
COMMENT_PREFIX = settings.COMMENT_PREFIX
CONTEXT_PREFIX = settings.CONTEXT_PREFIX
CONTEXT_DEFAULT = settings.CONTEXT_DEFAULT
UNDOC_OPS_STR = settings.UNDOC_OPS_STR
NC_ALLOWED = settings.WARN_MSGID_NOT_CAPITALIZED_ALLOWED
def check(check_ctxt, messages, key, msgsrc):
if check_ctxt is None:
return
multi_rnatip = check_ctxt.get("multi_rnatip")
multi_lines = check_ctxt.get("multi_lines")
py_in_rna = check_ctxt.get("py_in_rna")
not_capitalized = check_ctxt.get("not_capitalized")
end_point = check_ctxt.get("end_point")
undoc_ops = check_ctxt.get("undoc_ops")
if multi_rnatip is not None:
if key in messages and key not in multi_rnatip:
multi_rnatip.add(key)
if multi_lines is not None:
if '\n' in key[1]:
multi_lines.add(key)
if py_in_rna is not None:
if key in py_in_rna[1]:
py_in_rna[0].add(key)
if not_capitalized is not None:
if(key[1] not in NC_ALLOWED and key[1][0].isalpha() and
not key[1][0].isupper()):
not_capitalized.add(key)
if end_point is not None:
if key[1].strip().endswith('.'):
end_point.add(key)
if undoc_ops is not None:
if key[1] == UNDOC_OPS_STR:
undoc_ops.add(key)
def dump_messages_rna(messages, check_ctxt):
import bpy
def classBlackList():
blacklist_rna_class = [# core classes
"Context", "Event", "Function", "UILayout",
"BlendData",
# registerable classes
"Panel", "Menu", "Header", "RenderEngine",
"Operator", "OperatorMacro", "Macro",
"KeyingSetInfo", "UnknownType",
# window classes
"Window",
]
# ---------------------------------------------------------------------
# Collect internal operators
# extend with all internal operators
# note that this uses internal api introspection functions
# all possible operator names
op_ids = set(cls.bl_rna.identifier for cls in
bpy.types.OperatorProperties.__subclasses__()) | \
set(cls.bl_rna.identifier for cls in
bpy.types.Operator.__subclasses__()) | \
set(cls.bl_rna.identifier for cls in
bpy.types.OperatorMacro.__subclasses__())
get_instance = __import__("_bpy").ops.get_instance
path_resolve = type(bpy.context).__base__.path_resolve
for idname in op_ids:
op = get_instance(idname)
if 'INTERNAL' in path_resolve(op, "bl_options"):
blacklist_rna_class.append(idname)
# ---------------------------------------------------------------------
# Collect builtin classes we don't need to doc
blacklist_rna_class.append("Property")
blacklist_rna_class.extend(
[cls.__name__ for cls in
bpy.types.Property.__subclasses__()])
# ---------------------------------------------------------------------
# Collect classes which are attached to collections, these are api
# access only.
collection_props = set()
for cls_id in dir(bpy.types):
cls = getattr(bpy.types, cls_id)
for prop in cls.bl_rna.properties:
if prop.type == 'COLLECTION':
prop_cls = prop.srna
if prop_cls is not None:
collection_props.add(prop_cls.identifier)
blacklist_rna_class.extend(sorted(collection_props))
return blacklist_rna_class
blacklist_rna_class = classBlackList()
def filterRNA(bl_rna):
rid = bl_rna.identifier
if rid in blacklist_rna_class:
print(" skipping", rid)
return True
return False
check_ctxt_rna = check_ctxt_rna_tip = None
if check_ctxt:
check_ctxt_rna = {"multi_lines": check_ctxt.get("multi_lines"),
"not_capitalized": check_ctxt.get("not_capitalized"),
"end_point": check_ctxt.get("end_point"),
"undoc_ops": check_ctxt.get("undoc_ops")}
check_ctxt_rna_tip = check_ctxt_rna
check_ctxt_rna_tip["multi_rnatip"] = check_ctxt.get("multi_rnatip")
# -------------------------------------------------------------------------
# Function definitions
def walkProperties(bl_rna):
import bpy
# Get our parents' properties, to not export them multiple times.
bl_rna_base = bl_rna.base
if bl_rna_base:
bl_rna_base_props = bl_rna_base.properties.values()
else:
bl_rna_base_props = ()
for prop in bl_rna.properties:
# Only write this property if our parent hasn't got it.
if prop in bl_rna_base_props:
continue
if prop.identifier == "rna_type":
continue
msgsrc = "bpy.types.{}.{}".format(bl_rna.identifier, prop.identifier)
context = getattr(prop, "translation_context", CONTEXT_DEFAULT)
if prop.name and (prop.name != prop.identifier or context):
key = (context, prop.name)
check(check_ctxt_rna, messages, key, msgsrc)
messages.setdefault(key, []).append(msgsrc)
if prop.description:
key = (CONTEXT_DEFAULT, prop.description)
check(check_ctxt_rna_tip, messages, key, msgsrc)
messages.setdefault(key, []).append(msgsrc)
if isinstance(prop, bpy.types.EnumProperty):
for item in prop.enum_items:
msgsrc = "bpy.types.{}.{}:'{}'".format(bl_rna.identifier,
prop.identifier,
item.identifier)
if item.name and item.name != item.identifier:
key = (CONTEXT_DEFAULT, item.name)
check(check_ctxt_rna, messages, key, msgsrc)
messages.setdefault(key, []).append(msgsrc)
if item.description:
key = (CONTEXT_DEFAULT, item.description)
check(check_ctxt_rna_tip, messages, key, msgsrc)
messages.setdefault(key, []).append(msgsrc)
def walkRNA(bl_rna):
if filterRNA(bl_rna):
return
msgsrc = ".".join(("bpy.types", bl_rna.identifier))
context = getattr(bl_rna, "translation_context", CONTEXT_DEFAULT)
if bl_rna.name and (bl_rna.name != bl_rna.identifier or context):
key = (context, bl_rna.name)
check(check_ctxt_rna, messages, key, msgsrc)
messages.setdefault(key, []).append(msgsrc)
if bl_rna.description:
key = (CONTEXT_DEFAULT, bl_rna.description)
check(check_ctxt_rna_tip, messages, key, msgsrc)
messages.setdefault(key, []).append(msgsrc)
if hasattr(bl_rna, 'bl_label') and bl_rna.bl_label:
key = (context, bl_rna.bl_label)
check(check_ctxt_rna, messages, key, msgsrc)
messages.setdefault(key, []).append(msgsrc)
walkProperties(bl_rna)
def walkClass(cls):
walkRNA(cls.bl_rna)
def walk_keymap_hierarchy(hier, msgsrc_prev):
for lvl in hier:
msgsrc = "{}.{}".format(msgsrc_prev, lvl[1])
messages.setdefault((CONTEXT_DEFAULT, lvl[0]), []).append(msgsrc)
if lvl[3]:
walk_keymap_hierarchy(lvl[3], msgsrc)
# -------------------------------------------------------------------------
# Dump Messages
def process_cls_list(cls_list):
if not cls_list:
return 0
def full_class_id(cls):
""" gives us 'ID.Lamp.AreaLamp' which is best for sorting.
"""
cls_id = ""
bl_rna = cls.bl_rna
while bl_rna:
cls_id = "{}.{}".format(bl_rna.identifier, cls_id)
bl_rna = bl_rna.base
return cls_id
cls_list.sort(key=full_class_id)
processed = 0
for cls in cls_list:
walkClass(cls)
# classes.add(cls)
# Recursively process subclasses.
processed += process_cls_list(cls.__subclasses__()) + 1
return processed
# Parse everything (recursively parsing from bpy_struct "class"...).
processed = process_cls_list(type(bpy.context).__base__.__subclasses__())
print("{} classes processed!".format(processed))
# import pickle
# global classes
# classes = {str(c) for c in classes}
# with open("/home/i7deb64/Bureau/tpck_2", "wb") as f:
# pickle.dump(classes, f, protocol=0)
from bpy_extras.keyconfig_utils import KM_HIERARCHY
walk_keymap_hierarchy(KM_HIERARCHY, "KM_HIERARCHY")
def dump_messages_pytext(messages, check_ctxt):
""" dumps text inlined in the python user interface: eg.
layout.prop("someprop", text="My Name")
"""
import ast
# -------------------------------------------------------------------------
# Gather function names
import bpy
# key: func_id
# val: [(arg_kw, arg_pos), (arg_kw, arg_pos), ...]
func_translate_args = {}
# so far only 'text' keywords, but we may want others translated later
translate_kw = ("text", )
# Break recursive nodes look up on some kind of nodes.
# E.g. we dont want to get strings inside subscripts (blah["foo"])!
stopper_nodes = {ast.Subscript,}
for func_id, func in bpy.types.UILayout.bl_rna.functions.items():
# check it has a 'text' argument
for (arg_pos, (arg_kw, arg)) in enumerate(func.parameters.items()):
if ((arg_kw in translate_kw) and
(arg.is_output == False) and
(arg.type == 'STRING')):
func_translate_args.setdefault(func_id, []).append((arg_kw,
arg_pos))
# print(func_translate_args)
check_ctxt_py = None
if check_ctxt:
check_ctxt_py = {"py_in_rna": (check_ctxt["py_in_rna"], messages.copy()),
"multi_lines": check_ctxt["multi_lines"],
"not_capitalized": check_ctxt["not_capitalized"],
"end_point": check_ctxt["end_point"]}
# -------------------------------------------------------------------------
# Function definitions
def extract_strings(fp_rel, node):
""" Recursively get strings, needed in case we have "Blah" + "Blah",
passed as an argument in that case it wont evaluate to a string.
However, break on some kind of stopper nodes, like e.g. Subscript.
"""
if type(node) == ast.Str:
eval_str = ast.literal_eval(node)
if eval_str:
key = (CONTEXT_DEFAULT, eval_str)
msgsrc = "{}:{}".format(fp_rel, node.lineno)
check(check_ctxt_py, messages, key, msgsrc)
messages.setdefault(key, []).append(msgsrc)
return
for nd in ast.iter_child_nodes(node):
if type(nd) not in stopper_nodes:
extract_strings(fp_rel, nd)
def extract_strings_from_file(fp):
filedata = open(fp, 'r', encoding="utf8")
root_node = ast.parse(filedata.read(), fp, 'exec')
filedata.close()
fp_rel = os.path.relpath(fp, SOURCE_DIR)
for node in ast.walk(root_node):
if type(node) == ast.Call:
# print("found function at")
# print("%s:%d" % (fp, node.lineno))
# lambda's
if type(node.func) == ast.Name:
continue
# getattr(self, con.type)(context, box, con)
if not hasattr(node.func, "attr"):
continue
translate_args = func_translate_args.get(node.func.attr, ())
# do nothing if not found
for arg_kw, arg_pos in translate_args:
if arg_pos < len(node.args):
extract_strings(fp_rel, node.args[arg_pos])
else:
for kw in node.keywords:
if kw.arg == arg_kw:
extract_strings(fp_rel, kw.value)
# -------------------------------------------------------------------------
# Dump Messages
mod_dir = os.path.join(SOURCE_DIR,
"release",
"scripts",
"startup",
"bl_ui")
files = [os.path.join(mod_dir, fn)
for fn in sorted(os.listdir(mod_dir))
if not fn.startswith("_")
if fn.endswith("py")
]
# Dummy Cycles has its py addon in its own dir!
files += CUSTOM_PY_UI_FILES
for fp in files:
extract_strings_from_file(fp)
def dump_messages(do_messages, do_checks):
import collections
def enable_addons():
"""For now, enable all official addons, before extracting msgids."""
import addon_utils
import bpy
userpref = bpy.context.user_preferences
used_ext = {ext.module for ext in userpref.addons}
support = {"OFFICIAL"}
# collect the categories that can be filtered on
addons = [(mod, addon_utils.module_bl_info(mod)) for mod in
addon_utils.modules(addon_utils.addons_fake_modules)]
for mod, info in addons:
module_name = mod.__name__
if module_name in used_ext or info["support"] not in support:
continue
print(" Enabling module ", module_name)
bpy.ops.wm.addon_enable(module=module_name)
# XXX There are currently some problems with bpy/rna...
# *Very* tricky to solve!
# So this is a hack to make all newly added operator visible by
# bpy.types.OperatorProperties.__subclasses__()
for cat in dir(bpy.ops):
cat = getattr(bpy.ops, cat)
for op in dir(cat):
getattr(cat, op).get_rna()
# check for strings like ": %d"
ignore = ("%d", "%f", "%s", "%r", # string formatting
"*", ".", "(", ")", "-", "/", "\\", "+", ":", "#", "%"
"0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
"x", # used on its own eg: 100x200
"X", "Y", "Z", "W", # used alone. no need to include
)
def filter_message(msg):
msg_tmp = msg
for ign in ignore:
msg_tmp = msg_tmp.replace(ign, "")
if not msg_tmp.strip():
return True
# we could filter out different strings here
return False
if hasattr(collections, 'OrderedDict'):
messages = collections.OrderedDict()
else:
messages = {}
messages[(CONTEXT_DEFAULT, "")] = []
# Enable all wanted addons.
enable_addons()
check_ctxt = None
if do_checks:
check_ctxt = {"multi_rnatip": set(),
"multi_lines": set(),
"py_in_rna": set(),
"not_capitalized": set(),
"end_point": set(),
"undoc_ops": set()}
# get strings from RNA
dump_messages_rna(messages, check_ctxt)
# get strings from UI layout definitions text="..." args
dump_messages_pytext(messages, check_ctxt)
del messages[(CONTEXT_DEFAULT, "")]
if do_checks:
print("WARNINGS:")
keys = set()
for c in check_ctxt.values():
keys |= c
# XXX Temp, see below
c -= check_ctxt["multi_rnatip"]
for key in keys:
if key in check_ctxt["undoc_ops"]:
print("\tThe following operators are undocumented:")
else:
print("\t{}”|“{}”:".format(*key))
if key in check_ctxt["multi_lines"]:
print("\t\t-> newline in this message!")
if key in check_ctxt["not_capitalized"]:
print("\t\t-> message not capitalized!")
if key in check_ctxt["end_point"]:
print("\t\t-> message with endpoint!")
# XXX Hide this one for now, too much false positives.
# if key in check_ctxt["multi_rnatip"]:
# print("\t\t-> tip used in several RNA items")
if key in check_ctxt["py_in_rna"]:
print("\t\t-> RNA message also used in py UI code:")
print("\t\t{}".format("\n\t\t".join(messages[key])))
if do_messages:
print("Writing messages…")
num_written = 0
num_filtered = 0
with open(FILE_NAME_MESSAGES, 'w', encoding="utf8") as message_file:
for (ctx, key), value in messages.items():
# filter out junk values
if filter_message(key):
num_filtered += 1
continue
# Remove newlines in key and values!
message_file.write("\n".join(COMMENT_PREFIX + msgsrc.replace("\n", "") for msgsrc in value))
message_file.write("\n")
if ctx:
message_file.write(CONTEXT_PREFIX + ctx.replace("\n", "") + "\n")
message_file.write(key.replace("\n", "") + "\n")
num_written += 1
print("Written {} messages to: {} ({} were filtered out)." \
"".format(num_written, FILE_NAME_MESSAGES, num_filtered))
def main():
try:
import bpy
except ImportError:
print("This script must run from inside blender")
return
import sys
back_argv = sys.argv
sys.argv = sys.argv[sys.argv.index("--") + 1:]
import argparse
parser = argparse.ArgumentParser(description="Process UI messages " \
"from inside Blender.")
parser.add_argument('-c', '--no_checks', default=True,
action="store_false",
help="No checks over UI messages.")
parser.add_argument('-m', '--no_messages', default=True,
action="store_false",
help="No export of UI messages.")
parser.add_argument('-o', '--output', help="Output messages file path.")
args = parser.parse_args()
if args.output:
global FILE_NAME_MESSAGES
FILE_NAME_MESSAGES = args.output
dump_messages(do_messages=args.no_messages, do_checks=args.no_checks)
sys.argv = back_argv
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
main()

@ -0,0 +1,175 @@
#!/usr/bin/python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Check pos in branches (or in trunk) for missing/unneeded messages.
import os
import sys
from codecs import open
import settings
import utils
TRUNK_PO_DIR = settings.TRUNK_PO_DIR
BRANCHES_DIR = settings.BRANCHES_DIR
FILE_NAME_POT = settings.FILE_NAME_POT
def print_diff(ref_messages, messages, states):
# Remove comments from messages list!
messages = set(messages.keys()) - states["comm_msg"]
unneeded = (messages - ref_messages)
for msgid in unneeded:
print('\tUnneeded message id "{}"'.format(msgid))
missing = (ref_messages - messages)
for msgid in missing:
print('\tMissing message id "{}"'.format(msgid))
for msgid in states["comm_msg"]:
print('\tCommented message id "{}"'.format(msgid))
print("\t{} unneeded messages, {} missing messages, {} commented messages." \
"".format(len(unneeded), len(missing), len(states["comm_msg"])))
return 0
def process_po(ref_messages, po, glob_stats, do_stats, do_messages):
print("Checking {}...".format(po))
ret = 0
messages, states, stats = utils.parse_messages(po)
if do_messages:
t = print_diff(ref_messages, messages, states)
if t:
ret = t
if do_stats:
print("\tStats:")
t = utils.print_stats(stats, glob_stats, prefix=" ")
if t:
ret = t
if states["is_broken"]:
print("\tERROR! This .po is broken!")
ret = 1
return ret
def main():
import argparse
parser = argparse.ArgumentParser(description="Check pos in branches " \
"(or in trunk) for missing" \
"/unneeded messages.")
parser.add_argument('-s', '--stats', action="store_true",
help="Print pos stats.")
parser.add_argument('-m', '--messages', action="store_true",
help="Print pos missing/unneeded/commented messages.")
parser.add_argument('-t', '--trunk', action="store_true",
help="Check pos in /trunk/po rather than /branches.")
parser.add_argument('-p', '--pot',
help="Specify the .pot file used as reference.")
parser.add_argument('langs', metavar='ISO_code', nargs='*',
help="Restrict processed languages to those.")
args = parser.parse_args()
if args.pot:
global FILE_NAME_POT
FILE_NAME_POT = args.pot
glob_stats = {"nbr" : 0.0,
"lvl" : 0.0,
"lvl_ttips" : 0.0,
"lvl_trans_ttips" : 0.0,
"lvl_ttips_in_trans": 0.0,
"lvl_comm" : 0.0,
"nbr_signs" : 0,
"nbr_trans_signs" : 0,
"contexts" : set()}
ret = 0
pot_messages = None
if args.messages:
pot_messages, u1, pot_stats = utils.parse_messages(FILE_NAME_POT)
pot_messages = set(pot_messages.keys())
glob_stats["nbr_signs"] = pot_stats["nbr_signs"]
if args.langs:
for lang in args.langs:
if args.trunk:
po = os.path.join(TRUNK_PO_DIR, ".".join((lang, "po")))
else:
po = os.path.join(BRANCHES_DIR, lang, ".".join((lang, "po")))
if os.path.exists(po):
t = process_po(pot_messages, po, glob_stats,
args.stats, args.messages)
if t:
ret = t
elif args.trunk:
for po in os.listdir(TRUNK_PO_DIR):
if po.endswith(".po"):
po = os.path.join(TRUNK_PO_DIR, po)
t = process_po(pot_messages, po, glob_stats,
args.stats, args.messages)
if t:
ret = t
else:
for lang in os.listdir(BRANCHES_DIR):
for po in os.listdir(os.path.join(BRANCHES_DIR, lang)):
if po.endswith(".po"):
po = os.path.join(BRANCHES_DIR, lang, po)
t = process_po(pot_messages, po, glob_stats,
args.stats, args.messages)
if t:
ret = t
if args.stats and glob_stats["nbr"] != 0.0:
nbr_contexts = len(glob_stats["contexts"]-{""})
if nbr_contexts != 1:
if nbr_contexts == 0:
nbr_contexts = "No"
_ctx_txt = "s are"
else:
_ctx_txt = " is"
print("\nAverage stats for all {:.0f} processed files:\n" \
" {:>6.1%} done!\n" \
" {:>6.1%} of messages are tooltips.\n" \
" {:>6.1%} of tooltips are translated.\n" \
" {:>6.1%} of translated messages are tooltips.\n" \
" {:>6.1%} of messages are commented.\n" \
" The org msgids are currently made of {} signs.\n" \
" All processed translations are currently made of {} signs.\n" \
" {} specific context{} present:\n {}\n" \
"".format(glob_stats["nbr"], glob_stats["lvl"]/glob_stats["nbr"],
glob_stats["lvl_ttips"]/glob_stats["nbr"],
glob_stats["lvl_trans_ttips"]/glob_stats["nbr"],
glob_stats["lvl_ttips_in_trans"]/glob_stats["nbr"],
glob_stats["lvl_comm"]/glob_stats["nbr"], glob_stats["nbr_signs"],
glob_stats["nbr_trans_signs"], nbr_contexts, _ctx_txt,
"\n ".join(glob_stats["contexts"]-{""})))
return ret
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
print(" *** WARNING! Number of tooltips is only an estimation! ***\n")
sys.exit(main())

@ -0,0 +1,97 @@
#!/usr/bin/python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Clean (i.e. remove commented messages) pos in branches or trunk.
import os
import sys
import collections
from codecs import open
import settings
import utils
TRUNK_PO_DIR = settings.TRUNK_PO_DIR
BRANCHES_DIR = settings.BRANCHES_DIR
def do_clean(po, strict):
print("Cleaning {}...".format(po))
messages, states, u1 = utils.parse_messages(po)
if strict and states["is_broken"]:
print("ERROR! This .po file is broken!")
return 1
for msgkey in states["comm_msg"]:
del messages[msgkey]
utils.write_messages(po, messages, states["comm_msg"], states["fuzzy_msg"])
print("Removed {} commented messages.".format(len(states["comm_msg"])))
return 0
def main():
import argparse
parser = argparse.ArgumentParser(description="Clean pos in branches " \
"or trunk (i.e. remove " \
"all commented messages).")
parser.add_argument('-t', '--trunk', action="store_true",
help="Clean pos in trunk rather than branches.")
parser.add_argument('-s', '--strict', action="store_true",
help="Raise an error if a po is broken.")
parser.add_argument('langs', metavar='ISO_code', nargs='*',
help="Restrict processed languages to those.")
args = parser.parse_args()
ret = 0
if args.langs:
for lang in args.langs:
if args.trunk:
po = os.path.join(TRUNK_PO_DIR, ".".join((lang, "po")))
else:
po = os.path.join(BRANCHES_DIR, lang, ".".join((lang, "po")))
if os.path.exists(po):
t = do_clean(po, args.strict)
if t:
ret = t
elif args.trunk:
for po in os.listdir(TRUNK_PO_DIR):
if po.endswith(".po"):
po = os.path.join(TRUNK_PO_DIR, po)
t = do_clean(po, args.strict)
if t:
ret = t
else:
for lang in os.listdir(BRANCHES_DIR):
for po in os.listdir(os.path.join(BRANCHES_DIR, lang)):
if po.endswith(".po"):
po = os.path.join(BRANCHES_DIR, lang, po)
t = do_clean(po, args.strict)
if t:
ret = t
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
sys.exit(main())

@ -0,0 +1,119 @@
#!/usr/bin/python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Import in trunk/po all po from branches translated above the given threshold.
import os
import shutil
import sys
import subprocess
from codecs import open
import settings
import utils
import rtl_preprocess
TRUNK_PO_DIR = settings.TRUNK_PO_DIR
BRANCHES_DIR = settings.BRANCHES_DIR
RTL_PREPROCESS_FILE = settings.RTL_PREPROCESS_FILE
PY3 = settings.PYTHON3_EXEC
def main():
import argparse
parser = argparse.ArgumentParser(description="Import advanced enough pos " \
"from branches to trunk.")
parser.add_argument('-t', '--threshold', type=int,
help="Import threshold, as a percentage.")
parser.add_argument('-s', '--strict', action="store_true",
help="Raise an error if a po is broken.")
parser.add_argument('langs', metavar='ISO_code', nargs='*',
help="Restrict processed languages to those.")
args = parser.parse_args()
ret = 0
threshold = float(settings.IMPORT_MIN_LEVEL)/100.0
if args.threshold is not None:
threshold = float(args.threshold)/100.0
for lang in os.listdir(BRANCHES_DIR):
if args.langs and lang not in args.langs:
continue
po = os.path.join(BRANCHES_DIR, lang, ".".join((lang, "po")))
if os.path.exists(po):
po_is_rtl = os.path.join(BRANCHES_DIR, lang, RTL_PREPROCESS_FILE)
msgs, state, stats = utils.parse_messages(po)
tot_msgs = stats["tot_msg"]
trans_msgs = stats["trans_msg"]
lvl = 0.0
if tot_msgs:
lvl = float(trans_msgs)/float(tot_msgs)
if lvl > threshold:
if state["is_broken"] and args.strict:
print("{:<10}: {:>6.1%} done, but BROKEN, skipped." \
"".format(lang, lvl))
ret = 1
else:
if os.path.exists(po_is_rtl):
out_po = os.path.join(TRUNK_PO_DIR,
".".join((lang, "po")))
out_raw_po = os.path.join(TRUNK_PO_DIR,
"_".join((lang, "raw.po")))
keys = []
trans = []
for k, m in msgs.items():
keys.append(k)
trans.append("".join(m["msgstr_lines"]))
trans = rtl_preprocess.log2vis(trans)
for k, t in zip(keys, trans):
# Mono-line for now...
msgs[k]["msgstr_lines"] = [t]
utils.write_messages(out_po, msgs, state["comm_msg"],
state["fuzzy_msg"])
# Also copies org po!
shutil.copy(po, out_raw_po)
print("{:<10}: {:>6.1%} done, enough translated " \
"messages, processed and copied to trunk." \
"".format(lang, lvl))
else:
shutil.copy(po, TRUNK_PO_DIR)
print("{:<10}: {:>6.1%} done, enough translated " \
"messages, copied to trunk.".format(lang, lvl))
else:
if state["is_broken"] and args.strict:
print("{:<10}: {:>6.1%} done, BROKEN and not enough " \
"translated messages, skipped".format(lang, lvl))
ret = 1
else:
print("{:<10}: {:>6.1%} done, not enough translated " \
"messages, skipped.".format(lang, lvl))
return ret
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
sys.exit(main())

@ -0,0 +1,156 @@
#!/usr/bin/python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Merge one or more .po files into the first dest one.
# If a msgkey is present in more than one merged po, the one in the first file wins, unless
# its marked as fuzzy and one later is not.
# The fuzzy flag is removed if necessary.
# All other comments are never modified.
# However, commented messages in dst will always remain commented, and commented messages are
# never merged from sources.
import sys
from codecs import open
import utils
def main():
import argparse
parser = argparse.ArgumentParser(description="" \
"Merge one or more .po files into the first dest one.\n" \
"If a msgkey (msgid, msgctxt) is present in more than " \
"one merged po, the one in the first file wins, unless " \
"its marked as fuzzy and one later is not.\n" \
"The fuzzy flag is removed if necessary.\n" \
"All other comments are never modified.\n" \
"Commented messages in dst will always remain " \
"commented, and commented messages are never merged " \
"from sources.")
parser.add_argument('-s', '--stats', action="store_true",
help="Show statistics info.")
parser.add_argument('-r', '--replace', action="store_true",
help="Replace existing messages of same \"level\" already in dest po.")
parser.add_argument('dst', metavar='dst.po',
help="The dest po into which merge the others.")
parser.add_argument('src', metavar='src.po', nargs='+',
help="The po's to merge into the dst.po one.")
args = parser.parse_args()
ret = 0
done_msgkeys = set()
done_fuzzy_msgkeys = set()
nbr_merged = 0
nbr_replaced = 0
nbr_added = 0
nbr_unfuzzied = 0
dst_messages, dst_states, dst_stats = utils.parse_messages(args.dst)
if dst_states["is_broken"]:
print("Dest po is BROKEN, aborting.")
return 1
if args.stats:
print("Dest po, before merging:")
utils.print_stats(dst_stats, prefix="\t")
# If we dont want to replace existing valid translations, pre-populate
# done_msgkeys and done_fuzzy_msgkeys.
if not args.replace:
done_msgkeys = dst_states["trans_msg"].copy()
done_fuzzy_msgkeys = dst_states["fuzzy_msg"].copy()
for po in args.src:
messages, states, stats = utils.parse_messages(po)
if states["is_broken"]:
print("\tSrc po {} is BROKEN, skipping.".format(po))
ret = 1
continue
print("\tMerging {}...".format(po))
if args.stats:
print("\t\tMerged po stats:")
utils.print_stats(stats, prefix="\t\t\t")
for msgkey, val in messages.items():
msgctxt, msgid = msgkey
# This msgkey has already been completely merged, or is a commented one,
# or the new message is commented, skip it.
if msgkey in (done_msgkeys | dst_states["comm_msg"] | states["comm_msg"]):
continue
is_ttip = utils.is_tooltip(msgid)
# New messages does not yet exists in dest.
if msgkey not in dst_messages:
dst_messages[msgkey] = messages[msgkey]
if msgkey in states["fuzzy_msg"]:
done_fuzzy_msgkeys.add(msgkey)
dst_states["fuzzy_msg"].add(msgkey)
elif msgkey in states["trans_msg"]:
done_msgkeys.add(msgkey)
dst_states["trans_msg"].add(msgkey)
dst_stats["trans_msg"] += 1
if is_ttip:
dst_stats["trans_ttips"] += 1
nbr_added += 1
dst_stats["tot_msg"] += 1
if is_ttip:
dst_stats["tot_ttips"] += 1
# From now on, the new messages is already in dst.
# New message is neither translated nor fuzzy, skip it.
elif msgkey not in (states["trans_msg"] | states["fuzzy_msg"]):
continue
# From now on, the new message is either translated or fuzzy!
# The new message is translated.
elif msgkey in states["trans_msg"]:
dst_messages[msgkey]["msgstr_lines"] = messages[msgkey]["msgstr_lines"]
done_msgkeys.add(msgkey)
done_fuzzy_msgkeys.discard(msgkey)
if msgkey in dst_states["fuzzy_msg"]:
dst_states["fuzzy_msg"].remove(msgkey)
nbr_unfuzzied += 1
if msgkey not in dst_states["trans_msg"]:
dst_states["trans_msg"].add(msgkey)
dst_stats["trans_msg"] += 1
if is_ttip:
dst_stats["trans_ttips"] += 1
else:
nbr_replaced += 1
nbr_merged += 1
# The new message is fuzzy, org one is fuzzy too,
# and this msgkey has not yet been merged.
elif msgkey not in (dst_states["trans_msg"] | done_fuzzy_msgkeys):
dst_messages[msgkey]["msgstr_lines"] = messages[msgkey]["msgstr_lines"]
done_fuzzy_msgkeys.add(msgkey)
dst_states["fuzzy_msg"].add(msgkey)
nbr_merged += 1
nbr_replaced += 1
utils.write_messages(args.dst, dst_messages, dst_states["comm_msg"], dst_states["fuzzy_msg"])
print("Merged completed. {} messages were merged (among which {} were replaced), " \
"{} were added, {} were \"un-fuzzied\"." \
"".format(nbr_merged, nbr_replaced, nbr_added, nbr_unfuzzied))
if args.stats:
print("Final merged po stats:")
utils.print_stats(dst_stats, prefix="\t")
return ret
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
sys.exit(main())

@ -0,0 +1,231 @@
#!/usr/bin/python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Preprocess right-to-left languages.
# You can use it either standalone, or through import_po_from_branches or
# update_trunk.
#
# Notes: This has been tested on Linux, not 100% it will work nicely on
# Windows or OsX.
# This uses ctypes, as there is no py3 binding for fribidi currently.
# This implies you only need the compiled C library to run it.
# Finally, note that it handles some formating/escape codes (like
# \", %s, %x12, %.4f, etc.), protecting them from ugly (evil) fribidi,
# which seems completely unaware of such things (as unicode is...).
import sys
import ctypes
import settings
import utils
FRIBIDI_LIB = settings.FRIBIDI_LIB
###### Import C library and recreate "defines". #####
fbd = ctypes.CDLL(FRIBIDI_LIB)
#define FRIBIDI_MASK_NEUTRAL 0x00000040L /* Is neutral */
FRIBIDI_PAR_ON = 0x00000040
#define FRIBIDI_FLAG_SHAPE_MIRRORING 0x00000001
#define FRIBIDI_FLAG_REORDER_NSM 0x00000002
#define FRIBIDI_FLAG_SHAPE_ARAB_PRES 0x00000100
#define FRIBIDI_FLAG_SHAPE_ARAB_LIGA 0x00000200
#define FRIBIDI_FLAG_SHAPE_ARAB_CONSOLE 0x00000400
#define FRIBIDI_FLAG_REMOVE_BIDI 0x00010000
#define FRIBIDI_FLAG_REMOVE_JOINING 0x00020000
#define FRIBIDI_FLAG_REMOVE_SPECIALS 0x00040000
#define FRIBIDI_FLAGS_DEFAULT ( \
# FRIBIDI_FLAG_SHAPE_MIRRORING | \
# FRIBIDI_FLAG_REORDER_NSM | \
# FRIBIDI_FLAG_REMOVE_SPECIALS )
#define FRIBIDI_FLAGS_ARABIC ( \
# FRIBIDI_FLAG_SHAPE_ARAB_PRES | \
# FRIBIDI_FLAG_SHAPE_ARAB_LIGA )
FRIBIDI_FLAG_SHAPE_MIRRORING = 0x00000001
FRIBIDI_FLAG_REORDER_NSM = 0x00000002
FRIBIDI_FLAG_REMOVE_SPECIALS = 0x00040000
FRIBIDI_FLAG_SHAPE_ARAB_PRES = 0x00000100
FRIBIDI_FLAG_SHAPE_ARAB_LIGA = 0x00000200
FRIBIDI_FLAGS_DEFAULT = FRIBIDI_FLAG_SHAPE_MIRRORING | \
FRIBIDI_FLAG_REORDER_NSM | \
FRIBIDI_FLAG_REMOVE_SPECIALS
FRIBIDI_FLAGS_ARABIC = FRIBIDI_FLAG_SHAPE_ARAB_PRES | \
FRIBIDI_FLAG_SHAPE_ARAB_LIGA
##### Kernel processing funcs. #####
def protect_format_seq(msg):
"""
Find some specific escaping/formating sequences (like \", %s, etc.,
and protect them from any modification!
"""
LRE = "\u202A"
PDF = "\u202C"
# Most likely incomplete, but seems to cover current needs.
format_codes = set("tslfd")
digits = set(".0123456789")
idx = 0
ret = []
ln = len(msg)
while idx < ln:
dlt = 1
# \" or \'
if idx < (ln - 1) and msg[idx] == '\\' and msg[idx + 1] in "\"\'":
dlt = 2
# %x12
elif idx < (ln - 2) and msg[idx] == '%' and msg[idx + 1] in "x" and \
msg[idx + 2] in digits:
dlt = 2
while (idx + dlt + 1) < ln and msg[idx + dlt + 1] in digits:
dlt += 1
# %.4f
elif idx < (ln - 3) and msg[idx] == '%' and msg[idx + 1] in digits:
dlt = 2
while (idx + dlt + 1) < ln and msg[idx + dlt + 1] in digits:
dlt += 1
if (idx + dlt + 1) < ln and msg[idx + dlt + 1] in format_codes:
dlt += 1
else:
dlt = 1
# %s
elif idx < (ln - 1) and msg[idx] == '%' and \
msg[idx + 1] in format_codes:
dlt = 2
if dlt > 1:
ret.append(LRE)
ret += msg[idx:idx + dlt]
idx += dlt
if dlt > 1:
ret.append(PDF)
return "".join(ret)
def log2vis(msgs):
"""
Globally mimics deprecated fribidi_log2vis.
msgs should be an iterable of messages to rtl-process.
"""
for msg in msgs:
msg = protect_format_seq(msg)
fbc_str = ctypes.create_unicode_buffer(msg)
ln = len(fbc_str) - 1
# print(fbc_str.value, ln)
btypes = (ctypes.c_int * ln)()
embed_lvl = (ctypes.c_uint8 * ln)()
pbase_dir = ctypes.c_int(FRIBIDI_PAR_ON)
jtypes = (ctypes.c_uint8 * ln)()
flags = FRIBIDI_FLAGS_DEFAULT | FRIBIDI_FLAGS_ARABIC
# Find out direction of each char.
fbd.fribidi_get_bidi_types(fbc_str, ln, ctypes.byref(btypes))
# print(*btypes)
fbd.fribidi_get_par_embedding_levels(btypes, ln,
ctypes.byref(pbase_dir),
embed_lvl)
# print(*embed_lvl)
# Joinings for arabic chars.
fbd.fribidi_get_joining_types(fbc_str, ln, jtypes)
# print(*jtypes)
fbd.fribidi_join_arabic(btypes, ln, embed_lvl, jtypes)
# print(*jtypes)
# Final Shaping!
fbd.fribidi_shape(flags, embed_lvl, ln, jtypes, fbc_str)
# print(fbc_str.value)
# print(*(ord(c) for c in fbc_str))
# And now, the reordering.
# Note that here, we expect a single line, so no need to do
# fancy things...
fbd.fribidi_reorder_line(flags, btypes, ln, 0, pbase_dir, embed_lvl,
fbc_str, None)
# print(fbc_str.value)
# print(*(ord(c) for c in fbc_str))
yield fbc_str.value
##### Command line stuff. #####
def main():
import argparse
parser = argparse.ArgumentParser(description="" \
"Preprocesses right-to-left languages.\n" \
"You can use it either standalone, or through " \
"import_po_from_branches or update_trunk.\n\n" \
"Note: This has been tested on Linux, not 100% it will " \
"work nicely on Windows or OsX.\n" \
"Note: This uses ctypes, as there is no py3 binding for " \
"fribidi currently. This implies you only need the " \
"compiled C library to run it.\n" \
"Note: It handles some formating/escape codes (like " \
"\\\", %s, %x12, %.4f, etc.), protecting them from ugly " \
"(evil) fribidi, which seems completely unaware of such " \
"things (as unicode is...).")
parser.add_argument('dst', metavar='dst.po',
help="The dest po into which write the " \
"pre-processed messages.")
parser.add_argument('src', metavar='src.po',
help="The po's to pre-process messages.")
args = parser.parse_args()
msgs, state, u1 = utils.parse_messages(args.src)
if state["is_broken"]:
print("Source po is BROKEN, aborting.")
return 1
keys = []
trans = []
for key, val in msgs.items():
keys.append(key)
trans.append("".join(val["msgstr_lines"]))
trans = log2vis(trans)
for key, trn in zip(keys, trans):
# Mono-line for now...
msgs[key]["msgstr_lines"] = [trn]
utils.write_messages(args.dst, msgs, state["comm_msg"], state["fuzzy_msg"])
print("RTL pre-process completed.")
return 0
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
sys.exit(main())

@ -0,0 +1,286 @@
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Global settings used by all scripts in this dir.
# XXX Before any use of the tools in this dir, please make a copy of this file
# named "setting.py"
# XXX This is a template, most values should be OK, but some youll have to
# edit (most probably, BLENDER_EXEC and SOURCE_DIR).
import os.path
###############################################################################
# MISC
###############################################################################
# The min level of completeness for a po file to be imported from /branches
# into /trunk, as a percentage. -1 means "import everything".
IMPORT_MIN_LEVEL = -1
# The comment prefix used in generated messages.txt file.
COMMENT_PREFIX = "#~ "
# The comment prefix used to mark sources of msgids, in po's.
COMMENT_PREFIX_SOURCE = "#: "
# The comment prefix used in generated messages.txt file.
CONTEXT_PREFIX = "MSGCTXT:"
# Default context.
CONTEXT_DEFAULT = ""
# Undocumented operator placeholder string.
UNDOC_OPS_STR = "(undocumented operator)"
# The gettext domain.
DOMAIN = "blender"
# Our own "gettext" stuff.
# File type (ext) to parse.
PYGETTEXT_ALLOWED_EXTS = {".c", ".cpp", ".cxx", ".hpp", ".hxx", ".h"}
# Where to search contexts definitions, relative to SOURCE_DIR (defined below).
PYGETTEXT_CONTEXTS_DEFSRC = os.path.join("source", "blender", "blenfont",
"BLF_translation.h")
# Regex to extract contexts defined in BLF_translation.h
# XXX Not full-proof, but should be enough here!
PYGETTEXT_CONTEXTS = "#define\\s+(BLF_I18NCONTEXT_[A-Z_0-9]+)\\s+\"([^\"]*)\""
# Keywords' regex.
# XXX Most unfortunately, we can't use named backreferences inside character sets,
# which makes the regexes even more twisty... :/
_str_base = (
# Match void string
"(?P<{_}1>[\"'])(?P={_}1)" # Get opening quote (' or "), and closing immediately.
"|"
# Or match non-void string
"(?P<{_}2>[\"'])" # Get opening quote (' or ").
"(?{capt}(?:"
# This one is for crazy things like "hi \\\\\" folks!"...
r"(?:(?!<\\)(?:\\\\)*\\(?=(?P={_}2)))|"
# The most common case.
".(?!(?P={_}2))"
")+.)" # Don't forget the last char!
"(?P={_}2)" # And closing quote.
)
str_clean_re = _str_base.format(_="g", capt="P<clean>")
# Here we have to consider two different cases (empty string and other).
_str_whole_re = (
_str_base.format(_="{_}1_", capt=":") +
# Optional loop start, this handles "split" strings...
"(?:(?<=[\"'])\\s*(?=[\"'])(?:"
+ _str_base.format(_="{_}2_", capt=":") +
# End of loop.
"))*"
)
_ctxt_re = r"(?P<ctxt_raw>(?:" + _str_whole_re.format(_="_ctxt") + r")|(?:[A-Z_0-9]+))"
_msg_re = r"(?P<msg_raw>" + _str_whole_re.format(_="_msg") + r")"
PYGETTEXT_KEYWORDS = (() +
tuple((r"{}\(\s*" + _msg_re + r"\s*\)").format(it)
for it in ("IFACE_", "TIP_", "N_")) +
tuple((r"{}\(\s*" + _ctxt_re + r"\s*,\s*"+ _msg_re + r"\s*\)").format(it)
for it in ("CTX_IFACE_", "CTX_TIP_", "CTX_N_"))
)
#GETTEXT_KEYWORDS = ("IFACE_", "CTX_IFACE_:1c,2", "TIP_", "CTX_TIP_:1c,2",
# "N_", "CTX_N_:1c,2")
# Should po parser warn when finding a first letter not capitalized?
WARN_MSGID_NOT_CAPITALIZED = True
# Strings that should not raise above warning!
WARN_MSGID_NOT_CAPITALIZED_ALLOWED = {
"", # Simplifies things... :p
"sin(x) / x",
"fBM",
"sqrt(x*x+y*y+z*z)",
"iTaSC",
"bItasc",
"px",
"mm",
"fStop",
"sRGB",
"iso-8859-15",
"utf-8",
"ascii",
"re",
"y",
"ac3",
"flac",
"mkv",
"mp2",
"mp3",
"ogg",
"wav",
"iTaSC parameters",
"vBVH",
"rv",
"en_US",
"fr_FR",
"it_IT",
"ru_RU",
"zh_CN",
"es",
"zh_TW",
"ar_EG",
"pt",
"bg_BG",
"ca_AD",
"hr_HR",
"cs_CZ",
"nl_NL",
"fi_FI",
"de_DE",
"el_GR",
"id_ID",
"ja_JP",
"ky_KG",
"ko_KR",
"ne_NP",
"fa_IR",
"pl_PL",
"ro_RO",
"sr_RS",
"sr_RS@latin",
"sv_SE",
"uk_UA",
"tr_TR",
"hu_HU",
"available with", # Is part of multi-line msg.
"virtual parents", # Is part of multi-line msg.
"description", # Addons' field. :/
"location", # Addons' field. :/
"author", # Addons' field. :/
"in memory to enable editing!", # Is part of multi-line msg.
"iScale",
"dx",
"p0",
"res",
}
###############################################################################
# PATHS
###############################################################################
# The tools path, should be OK.
TOOLS_DIR = os.path.join(os.path.dirname(__file__))
# The Python3 executable.Youll likely have to edit it in your user_settings.py
# if youre under Windows.
PYTHON3_EXEC = "python3"
# The Blender executable!
# This is just an example, youll most likely have to edit it in your
# user_settings.py!
BLENDER_EXEC = os.path.abspath(os.path.join(TOOLS_DIR, "..", "..", "..", "..",
"blender"))
# The xgettext tool. Youll likely have to edit it in your user_settings.py
# if youre under Windows.
GETTEXT_XGETTEXT_EXECUTABLE = "xgettext"
# The gettext msgmerge tool. Youll likely have to edit it in your
# user_settings.py if youre under Windows.
GETTEXT_MSGMERGE_EXECUTABLE = "msgmerge"
# The gettext msgfmt "compiler". Youll likely have to edit it in your
# user_settings.py if youre under Windows.
GETTEXT_MSGFMT_EXECUTABLE = "msgfmt"
# The svn binary... Youll likely have to edit it in your
# user_settings.py if youre under Windows.
SVN_EXECUTABLE = "svn"
# The FriBidi C compiled library (.so under Linux, .dll under windows...).
# Youll likely have to edit it in your user_settings.py if youre under
# Windows., e.g. using the included one:
# FRIBIDI_LIB = os.path.join(TOOLS_DIR, "libfribidi.dll")
FRIBIDI_LIB = "libfribidi.so.0"
# The name of the (currently empty) file that must be present in a po's
# directory to enable rtl-preprocess.
RTL_PREPROCESS_FILE = "is_rtl"
# The Blender source root path.
# This is just an example, youll most likely have to override it in your
# user_settings.py!
SOURCE_DIR = os.path.abspath(os.path.join(TOOLS_DIR, "..", "..", "..", "..",
"..", "..", "blender_msgs"))
# The bf-translation repository (you'll likely have to override this in your
# user_settings.py).
I18N_DIR = os.path.abspath(os.path.join(TOOLS_DIR, "..", "..", "..", "..",
"..", "..", "i18n"))
# The /branches path (overriden in bf-translation's i18n_override_settings.py).
BRANCHES_DIR = os.path.join(I18N_DIR, "branches")
# The /trunk path (overriden in bf-translation's i18n_override_settings.py).
TRUNK_DIR = os.path.join(I18N_DIR, "trunk")
# The /trunk/po path (overriden in bf-translation's i18n_override_settings.py).
TRUNK_PO_DIR = os.path.join(TRUNK_DIR, "po")
# The /trunk/mo path (overriden in bf-translation's i18n_override_settings.py).
TRUNK_MO_DIR = os.path.join(TRUNK_DIR, "locale")
# The file storing Blender-generated messages.
FILE_NAME_MESSAGES = os.path.join(TRUNK_PO_DIR, "messages.txt")
# The Blender source path to check for i18n macros.
POTFILES_SOURCE_DIR = os.path.join(SOURCE_DIR, "source")
# The "source" file storing which files should be processed by xgettext,
# used to create FILE_NAME_POTFILES
FILE_NAME_SRC_POTFILES = os.path.join(TRUNK_PO_DIR, "_POTFILES.in")
# The final (generated) file storing which files
# should be processed by xgettext.
FILE_NAME_POTFILES = os.path.join(TRUNK_PO_DIR, "POTFILES.in")
# The template messages file.
FILE_NAME_POT = os.path.join(TRUNK_PO_DIR, ".".join((DOMAIN, "pot")))
# Other py files that should be searched for ui strings, relative to SOURCE_DIR.
# Needed for Cycles, currently...
CUSTOM_PY_UI_FILES = [os.path.join("intern", "cycles", "blender",
"addon", "ui.py"),
]
# A cache storing validated msgids, to avoid re-spellchecking them.
SPELL_CACHE = os.path.join("/tmp", ".spell_cache")
# Custom override settings must be one dir above i18n tools itself!
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
try:
from i18n_override_settings import *
except ImportError: # If no i18n_override_settings available, its no error!
pass
# Override with custom user settings, if available.
try:
from user_settings import *
except ImportError: # If no user_settings available, its no error!
pass

@ -0,0 +1,490 @@
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import re
_valid_before = "(?<=[\\s*'\"`])|(?<=[a-zA-Z][/-])|(?<=^)"
_valid_after = "(?=[\\s'\"`.!?,;:])|(?=[/-]\\s*[a-zA-Z])|(?=$)"
_valid_words = "(?:{})(?:(?:[A-Z]+[a-z]*)|[A-Z]*|[a-z]*)(?:{})".format(_valid_before, _valid_after)
_reg = re.compile(_valid_words)
def split_words(text):
return [w for w in _reg.findall(text) if w]
# These must be all lower case for comparisons
dict_uimsgs = {
# OK words
"aren", # aren't
"betweens", # yuck! in-betweens!
"boolean", "booleans",
"decrement",
"doesn", # doesn't
"fader",
"hoc", # ad-hoc
"indices",
"iridas",
"isn", # isn't
"iterable",
"kyrgyz",
"latin",
"merchantability",
"mplayer",
"vertices",
# Merged words
"addon", "addons",
"antialiasing",
"arcsine", "arccosine", "arctangent",
"autoclip",
"autocomplete",
"autoname",
"autosave",
"autoscale",
"autosmooth",
"autosplit",
"backface",
"backimage",
"backscattered",
"bandnoise",
"bindcode",
"bitrate",
"blendin",
"bonesize",
"boundbox",
"boxpack",
"buffersize",
"builtin", "builtins",
"chunksize",
"de",
"defocus",
"denoise",
"despill", "despilling",
"filebrowser",
"filelist",
"filename", "filenames",
"filepath", "filepaths",
"forcefield", "forcefields",
"fulldome", "fulldomes",
"fullscreen",
"gridline",
"hemi",
"inscatter",
"lightless",
"lookup", "lookups",
"mathutils",
"midlevel",
"midground",
"mixdown",
"multi",
"multifractal",
"multires", "multiresolution",
"multisampling",
"multitexture",
"namespace",
"keyconfig",
"playhead",
"polyline",
"popup", "popups",
"pre",
"precalculate",
"prefetch",
"premultiply", "premultiplied",
"prepass",
"prepend",
"preprocess", "preprocessing",
"preseek",
"readonly",
"realtime",
"rekey",
"remesh",
"reprojection",
"resize",
"restpose",
"retarget", "retargets", "retargeting", "retargeted",
"ringnoise",
"rolloff",
"screencast", "screenshot", "screenshots",
"selfcollision",
"singletexture",
"startup",
"stateful",
"starfield",
"subflare", "subflares",
"subframe", "subframes",
"subclass", "subclasses", "subclassing",
"subdirectory", "subdirectories", "subdir", "subdirs",
"submodule", "submodules",
"subpath",
"subsize",
"substep", "substeps",
"targetless",
"textbox", "textboxes",
"tilemode",
"timestamp", "timestamps",
"timestep", "timesteps",
"un",
"unbake",
"uncomment",
"undeformed",
"undistort",
"ungroup",
"unhide",
"unindent",
"unkeyed",
"unpremultiply",
"unprojected",
"unreacted",
"unregister",
"unselected",
"unsubdivided",
"unshadowed",
"unspill",
"unstitchable",
"vectorscope",
"worldspace",
"workflow",
# Neologisms, slangs
"automagic", "automagically",
"blobby",
"blockiness", "blocky",
"collider", "colliders",
"deformer", "deformers",
"editability",
"keyer",
"lacunarity",
"numerics",
"occluder",
"passepartout",
"perspectively",
"polygonization",
"selectability",
"slurph",
"trackability",
"transmissivity",
"rasterized", "rasterization",
"renderer", "renderable", "renderability",
# Abbreviations
"aero",
"amb",
"anim",
"bool",
"calc",
"config", "configs",
"const",
"coord", "coords",
"dof",
"dupli", "duplis",
"eg",
"esc",
"fac",
"grless",
"http",
"init",
"kbit",
"lensdist",
"loc", "rot", "pos",
"lorem",
"luma",
"multicam",
"num",
"ok",
"ortho",
"persp",
"pref", "prefs",
"prev",
"param",
"premul",
"quad", "quads",
"quat", "quats",
"recalc", "recalcs",
"refl",
"spec",
"struct", "structs",
"tex",
"tri", "tris",
"uv", "uvs", "uvw", "uw", "uvmap",
"vec",
"vert", "verts",
"vis",
"xyz", "xzy", "yxz", "yzx", "zxy", "zyx",
"xy", "xz", "yx", "yz", "zx", "zy",
# General computer/science terms
"boid", "boids",
"equisolid",
"euler", "eulers",
"hashable",
"intrinsics",
"isosurface",
"jitter", "jittering", "jittered",
"keymap", "keymaps",
"lambertian",
"laplacian",
"metadata",
"nand", "xnor",
"normals",
"numpad",
"octree",
"opengl",
"pulldown", "pulldowns",
"quantized",
"samplerate",
"scrollback",
"scrollbar",
"scroller",
"searchable",
"spacebar",
"tooltip", "tooltips",
"trackpad",
"unicode",
"viewport", "viewports",
"viscoelastic",
"wildcard", "wildcards",
# General computer graphics terms
"anaglyph",
"bezier", "beziers",
"bicubic",
"bilinear",
"blackpoint", "whitepoint",
"blinn",
"bokeh",
"catadioptric",
"centroid",
"chrominance",
"codec", "codecs",
"collada",
"compositing",
"crossfade",
"deinterlace",
"dropoff",
"eigenvectors",
"equirectangular",
"fisheye",
"framerate",
"gimbal",
"grayscale",
"icosphere",
"lightmap",
"lossless", "lossy",
"midtones",
"mipmap", "mipmaps", "mip",
"ngon", "ngons",
"nurb", "nurbs",
"perlin",
"phong",
"radiosity",
"raytrace", "raytracing", "raytraced",
"renderfarm",
"shader", "shaders",
"specular", "specularity",
"spillmap",
"sobel",
"tonemap",
"toon",
"timecode",
"voronoi",
"voxel", "voxels",
"wireframe",
"zmask",
"ztransp",
# Blender terms
"bbone",
"breakdowner",
"bspline",
"bweight",
"datablock", "datablocks",
"dopesheet",
"dupliface", "duplifaces",
"dupliframe", "dupliframes",
"dupliobject", "dupliob",
"dupligroup",
"duplivert",
"fcurve", "fcurves",
"fluidsim",
"frameserver",
"enum",
"keyframe", "keyframes", "keyframing", "keyframed",
"metaball", "metaballs",
"metaelement", "metaelements",
"metastrip", "metastrips",
"movieclip",
"nabla",
"navmesh",
"outliner",
"paintmap", "paintmaps",
"polygroup", "polygroups",
"poselib",
"pushpull",
"pyconstraint", "pyconstraints",
"shapekey", "shapekeys",
"shrinkfatten",
"shrinkwrap",
"softbody",
"stucci",
"sunsky",
"subsurf",
"texface",
"timeline", "timelines",
"tosphere",
"vcol", "vcols",
"vgroup", "vgroups",
"vinterlace",
"wetmap", "wetmaps",
"wpaint",
# Algorithm names
"beckmann",
"catmull",
"catrom",
"chebychev",
"kutta",
"lennard",
"minkowsky",
"minnaert",
"musgrave",
"nayar",
"netravali",
"oren",
"prewitt",
"runge",
"verlet",
"worley",
# Acronyms
"aa", "msaa",
"api",
"asc", "cdl",
"ascii",
"atrac",
"bw",
"ccd",
"cmd",
"cpus",
"ctrl",
"cw", "ccw",
"dev",
"djv",
"dpi",
"dvar",
"dx",
"fh",
"fov",
"fft",
"gfx",
"gl",
"glsl",
"gpl",
"gpu", "gpus",
"hc",
"hdr",
"hh", "mm", "ss", "ff", # hh:mm:ss:ff timecode
"hsv", "hsva",
"id",
"itu",
"lhs",
"lmb", "mmb", "rmb",
"mux",
"ndof",
"ppc",
"px",
"qmc",
"rgb", "rgba",
"rhs",
"rv",
"sdl",
"sl",
"smpte",
"svn",
"ui",
"unix",
"vbo", "vbos",
"ycc", "ycca",
"yuv", "yuva",
# Blender acronyms
"bge",
"bli",
"bvh",
"dbvt",
"dop", # BLI K-Dop BVH
"ik",
"nla",
"qbvh",
"rna",
"rvo",
"simd",
"sph",
"svbvh",
# CG acronyms
"ao",
"bsdf",
"ior",
"mocap",
# Files types/formats
"avi",
"attrac",
"autodesk",
"bmp",
"btx",
"cineon",
"dpx",
"dxf",
"eps",
"exr",
"fbx",
"ffmpeg",
"flac",
"gzip",
"ico",
"jpg", "jpeg",
"matroska",
"mdd",
"mkv",
"mpeg", "mjpeg",
"mtl",
"ogg",
"openjpeg",
"piz",
"png",
"po",
"quicktime",
"rle",
"sgi",
"stl",
"svg",
"targa", "tga",
"tiff",
"theora",
"vorbis",
"wav",
"xiph",
"xml",
"xna",
"xvid",
}

@ -0,0 +1,104 @@
#!/usr/bin/python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Update all branches:
# * Generate a temp messages.txt file.
# * Use it to generate a temp .pot file.
# * Use it to update all .pos in /branches.
import subprocess
import os
import sys
import tempfile
import settings
PY3 = settings.PYTHON3_EXEC
def main():
import argparse
parser = argparse.ArgumentParser(description="" \
"Update all branches:\n" \
"* Generate a temp messages.txt file.\n" \
"* Use it to generate a temp .pot file.\n" \
"* Use it to update all .pos in /branches.")
parser.add_argument('--pproc-contexts', action="store_true",
help="Pre-process pos to avoid having plenty of "
"fuzzy msgids just because a context was "
"added/changed!")
parser.add_argument('-c', '--no_checks', default=True,
action="store_false",
help="No checks over UI messages.")
parser.add_argument('-a', '--add', action="store_true",
help="Add missing pos (useful only when one or " \
"more languages are given!).")
parser.add_argument('langs', metavar='ISO_code', nargs='*',
help="Restrict processed languages to those.")
args = parser.parse_args()
ret = 0
# Generate a temp messages file.
dummy, msgfile = tempfile.mkstemp(suffix=".txt",
prefix="blender_messages_")
os.close(dummy)
cmd = (PY3, "./update_msg.py", "-o", msgfile)
t = subprocess.call(cmd)
if t:
ret = t
# Regenerate POTFILES.in.
# cmd = (PY3, "./update_potinput.py")
# t = subprocess.call(cmd)
# if t:
# ret = t
# Generate a temp pot file.
dummy, potfile = tempfile.mkstemp(suffix=".pot",
prefix="blender_pot_")
os.close(dummy)
cmd = [PY3, "./update_pot.py", "-i", msgfile, "-o", potfile]
if not args.no_checks:
cmd.append("-c")
t = subprocess.call(cmd)
if t:
ret = t
# Update branches po files.
cmd = [PY3, "./update_po.py", "-i", potfile]
if args.langs:
if args.add:
cmd.append("-a")
cmd += args.langs
if args.pproc_contexts:
cmd.append("--pproc-contexts")
t = subprocess.call(cmd)
if t:
ret = t
return ret
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
sys.exit(main())

@ -0,0 +1,91 @@
#!/usr/bin/python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Create or update mos under /trunk/locale/…
import subprocess
import os
import sys
import settings
import utils
GETTEXT_MSGFMT_EXECUTABLE = settings.GETTEXT_MSGFMT_EXECUTABLE
SOURCE_DIR = settings.SOURCE_DIR
TRUNK_MO_DIR = settings.TRUNK_MO_DIR
TRUNK_PO_DIR = settings.TRUNK_PO_DIR
DOMAIN = settings.DOMAIN
def process_po(po, lang):
mo_dir = os.path.join(TRUNK_MO_DIR, lang, "LC_MESSAGES")
# Create dirs if not existing!
os.makedirs(mo_dir, exist_ok = True)
# show stats
cmd = (GETTEXT_MSGFMT_EXECUTABLE,
"--statistics",
po,
"-o",
os.path.join(mo_dir, ".".join((DOMAIN, "mo"))),
)
print("Running ", " ".join(cmd))
ret = subprocess.call(cmd)
print("Finished.")
return ret
def main():
import argparse
parser = argparse.ArgumentParser(description="Create or update mos " \
"under {}.".format(TRUNK_MO_DIR))
parser.add_argument('langs', metavar='ISO_code', nargs='*',
help="Restrict processed languages to those.")
args = parser.parse_args()
ret = 0
if args.langs:
for lang in args.langs:
po = os.path.join(TRUNK_PO_DIR, ".".join((lang, "po")))
if os.path.exists(po):
t = process_po(po, lang)
if t:
ret = t
else:
for po in os.listdir(TRUNK_PO_DIR):
if po.endswith(".po") and not po.endswith("_raw.po"):
lang = os.path.basename(po)[:-3]
po = os.path.join(TRUNK_PO_DIR, po)
t = process_po(po, lang)
if t:
ret = t
return ret
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
sys.exit(main())

@ -0,0 +1,69 @@
#!/usr/bin/python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8-80 compliant>
# Write out messages.txt from Blender.
import os
import sys
import subprocess
import settings
BLENDER_ARGS = [
settings.BLENDER_EXEC,
"--background",
"--factory-startup",
"--python",
os.path.join(settings.TOOLS_DIR, "bl_process_msg.py"),
"--",
"-m",
]
def main():
import argparse
parser = argparse.ArgumentParser(description="Write out messages.txt " \
"from Blender.")
parser.add_argument('-c', '--no_checks', default=True,
action="store_false",
help="No checks over UI messages.")
parser.add_argument('-b', '--blender', help="Blender executable path.")
parser.add_argument('-o', '--output', help="Output messages file path.")
args = parser.parse_args()
if args.blender:
BLENDER_ARGS[0] = args.blender
if not args.no_checks:
BLENDER_ARGS.append("-c")
if args.output:
BLENDER_ARGS.append("-o")
BLENDER_ARGS.append(args.output)
ret = subprocess.call(BLENDER_ARGS)
return ret
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
ret = main()
if ret:
raise(Exception(ret))

@ -0,0 +1,166 @@
#!/usr/bin/python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Update pos in the branches from blender.pot in /trunk/po dir.
import subprocess
import os
import sys
from codecs import open
import shutil
import settings
import utils
GETTEXT_MSGMERGE_EXECUTABLE = settings.GETTEXT_MSGMERGE_EXECUTABLE
BRANCHES_DIR = settings.BRANCHES_DIR
TRUNK_PO_DIR = settings.TRUNK_PO_DIR
FILE_NAME_POT = settings.FILE_NAME_POT
def pproc_newcontext_po(po, pot_messages, pot_stats):
print("Adding new contexts to {}...".format(po))
messages, state, stats = utils.parse_messages(po)
known_ctxt = stats["contexts"]
print("Already known (present) context(s): {}".format(str(known_ctxt)))
new_ctxt = set()
added = 0
# Only use valid already translated messages!
allowed_keys = state["trans_msg"] - state["fuzzy_msg"] - state["comm_msg"]
for key in pot_messages.keys():
ctxt, msgid = key
if ctxt in known_ctxt:
continue
new_ctxt.add(ctxt)
for t_ctxt in known_ctxt:
# XXX The first match will win, this might not be optimal...
t_key = (t_ctxt, msgid)
if t_key in allowed_keys:
# Wrong comments (sources) will be removed by msgmerge...
messages[key] = messages[t_key]
messages[key]["msgctxt_lines"] = [ctxt]
added += 1
utils.write_messages(po, messages, state["comm_msg"], state["fuzzy_msg"])
print("Finished!\n {} new context(s) was/were added {}, adding {} new "
"messages.\n".format(len(new_ctxt), str(new_ctxt), added))
return 0
def process_po(po, lang):
# update po file
cmd = (GETTEXT_MSGMERGE_EXECUTABLE,
"--update",
"--no-wrap",
"--backup=none",
"--lang={}".format(lang),
po,
FILE_NAME_POT,
)
print("Updating {}...".format(po))
print("Running ", " ".join(cmd))
ret = subprocess.call(cmd)
print("Finished!\n")
return ret
def main():
import argparse
parser = argparse.ArgumentParser(description="Write out messages.txt "
"from Blender.")
parser.add_argument('-t', '--trunk', action="store_true",
help="Update pos in /trunk/po rather than /branches.")
parser.add_argument('-i', '--input', metavar="File",
help="Input pot file path.")
parser.add_argument('--pproc-contexts', action="store_true",
help="Pre-process pos to avoid having plenty of "
"fuzzy msgids just because a context was "
"added/changed!")
parser.add_argument('-a', '--add', action="store_true",
help="Add missing pos (useful only when one or "
"more languages are given!).")
parser.add_argument('langs', metavar='ISO_code', nargs='*',
help="Restrict processed languages to those.")
args = parser.parse_args()
if args.input:
global FILE_NAME_POT
FILE_NAME_POT = args.input
ret = 0
if args.pproc_contexts:
_ctxt_proc = pproc_newcontext_po
pot_messages, _a, pot_stats = utils.parse_messages(FILE_NAME_POT)
else:
_ctxt_proc = lambda a, b, c: 0
pot_messages, pot_stats = None, None
if args.langs:
for lang in args.langs:
if args.trunk:
dr = TRUNK_PO_DIR
po = os.path.join(dr, ".".join((lang, "po")))
else:
dr = os.path.join(BRANCHES_DIR, lang)
po = os.path.join(dr, ".".join((lang, "po")))
if args.add:
if not os.path.exists(dr):
os.makedirs(dr)
if not os.path.exists(po):
shutil.copy(FILE_NAME_POT, po)
if args.add or os.path.exists(po):
t = _ctxt_proc(po, pot_messages, pot_stats)
if t:
ret = t
t = process_po(po, lang)
if t:
ret = t
elif args.trunk:
for po in os.listdir(TRUNK_PO_DIR):
if po.endswith(".po"):
lang = os.path.basename(po)[:-3]
po = os.path.join(TRUNK_PO_DIR, po)
t = _ctxt_proc(po, pot_messages, pot_stats)
if t:
ret = t
t = process_po(po, lang)
if t:
ret = t
else:
for lang in os.listdir(BRANCHES_DIR):
po = os.path.join(BRANCHES_DIR, lang, ".".join((lang, "po")))
if os.path.exists(po):
t = _ctxt_proc(po, pot_messages, pot_stats)
if t:
ret = t
t = process_po(po, lang)
if t:
ret = t
return ret
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
sys.exit(main())

@ -0,0 +1,314 @@
#!/usr/bin/python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Update blender.pot file from messages.txt
import subprocess
import os
import sys
import re
#from codecs import open
import tempfile
import argparse
import time
import pickle
import settings
import utils
COMMENT_PREFIX = settings.COMMENT_PREFIX
COMMENT_PREFIX_SOURCE = settings.COMMENT_PREFIX_SOURCE
CONTEXT_PREFIX = settings.CONTEXT_PREFIX
FILE_NAME_MESSAGES = settings.FILE_NAME_MESSAGES
#FILE_NAME_POTFILES = settings.FILE_NAME_POTFILES
FILE_NAME_POT = settings.FILE_NAME_POT
SOURCE_DIR = settings.SOURCE_DIR
POTFILES_DIR = settings.POTFILES_SOURCE_DIR
SRC_POTFILES = settings.FILE_NAME_SRC_POTFILES
#GETTEXT_XGETTEXT_EXECUTABLE = settings.GETTEXT_XGETTEXT_EXECUTABLE
#GETTEXT_KEYWORDS = settings.GETTEXT_KEYWORDS
CONTEXT_DEFAULT = settings.CONTEXT_DEFAULT
PYGETTEXT_ALLOWED_EXTS = settings.PYGETTEXT_ALLOWED_EXTS
SVN_EXECUTABLE = settings.SVN_EXECUTABLE
WARN_NC = settings.WARN_MSGID_NOT_CAPITALIZED
NC_ALLOWED = settings.WARN_MSGID_NOT_CAPITALIZED_ALLOWED
SPELL_CACHE = settings.SPELL_CACHE
#def generate_valid_potfiles(final_potfiles):
# "Generates a temp potfiles.in with aboslute paths."
# with open(FILE_NAME_POTFILES, 'r', 'utf-8') as f, \
# open(final_potfiles, 'w', 'utf-8') as w:
# for line in f:
# line = utils.stripeol(line)
# if line:
# w.write("".join((os.path.join(SOURCE_DIR,
# os.path.normpath(line)), "\n")))
# Do this only once!
# Get contexts defined in blf.
CONTEXTS = {}
with open(os.path.join(SOURCE_DIR, settings.PYGETTEXT_CONTEXTS_DEFSRC)) as f:
reg = re.compile(settings.PYGETTEXT_CONTEXTS)
f = f.read()
# This regex is supposed to yield tuples
# (key=C_macro_name, value=C_string).
CONTEXTS = dict(m.groups() for m in reg.finditer(f))
# Build regexes to extract messages (with optinal contexts) from C source.
pygettexts = tuple(re.compile(r).search
for r in settings.PYGETTEXT_KEYWORDS)
_clean_str = re.compile(settings.str_clean_re).finditer
clean_str = lambda s: "".join(m.group("clean") for m in _clean_str(s))
def check_file(path, rel_path, messages):
with open(path, encoding="utf-8") as f:
f = f.read()
for srch in pygettexts:
m = srch(f)
line = pos =0
while m:
d = m.groupdict()
# Context.
ctxt = d.get("ctxt_raw")
if ctxt:
if ctxt in CONTEXTS:
ctxt = CONTEXTS[ctxt]
elif '"' in ctxt or "'" in ctxt:
ctxt = clean_str(ctxt)
else:
print("WARNING: raw context “{}” couldnt be resolved!"
"".format(ctxt))
ctxt = CONTEXT_DEFAULT
else:
ctxt = CONTEXT_DEFAULT
# Message.
msg = d.get("msg_raw")
if msg:
if '"' in msg or "'" in msg:
msg = clean_str(msg)
else:
print("WARNING: raw message “{}” couldnt be resolved!"
"".format(msg))
msg = ""
else:
msg = ""
# Line.
line += f[pos:m.start()].count('\n')
# And we are done for this item!
messages.setdefault((ctxt, msg), []).append(":".join((rel_path, str(line))))
pos = m.end()
line += f[m.start():pos].count('\n')
m = srch(f, pos)
def py_xgettext(messages):
with open(SRC_POTFILES) as src:
forbidden = set()
forced = set()
for l in src:
if l[0] == '-':
forbidden.add(l[1:].rstrip('\n'))
elif l[0] != '#':
forced.add(l.rstrip('\n'))
for root, dirs, files in os.walk(POTFILES_DIR):
if "/.svn" in root:
continue
for fname in files:
if os.path.splitext(fname)[1] not in PYGETTEXT_ALLOWED_EXTS:
continue
path = os.path.join(root, fname)
rel_path = os.path.relpath(path, SOURCE_DIR)
if rel_path in forbidden | forced:
continue
check_file(path, rel_path, messages)
for path in forced:
if os.path.exists(path):
check_file(os.path.join(SOURCE_DIR, path), path, messages)
# Spell checking!
import enchant
dict_spelling = enchant.Dict("en_US")
from spell_check_utils import (dict_uimsgs,
split_words,
)
_spell_checked = set()
def spell_check(txt, cache):
ret = []
if cache is not None and txt in cache:
return ret
for w in split_words(txt):
w_lower = w.lower()
if w_lower in dict_uimsgs | _spell_checked:
continue
if not dict_spelling.check(w):
ret.append("{}: suggestions are ({})"
.format(w, "'" + "', '".join(dict_spelling.suggest(w))
+ "'"))
else:
_spell_checked.add(w_lower)
if not ret:
if cache is not None:
cache.add(txt)
return ret
def get_svnrev():
cmd = [SVN_EXECUTABLE,
"info",
"--xml",
SOURCE_DIR,
]
xml = subprocess.check_output(cmd)
return re.search(b'revision="(\d+)"', xml).group(1)
def gen_empty_pot():
blender_rev = get_svnrev()
utctime = time.gmtime()
time_str = time.strftime("%Y-%m-%d %H:%M+0000", utctime)
year_str = time.strftime("%Y", utctime)
return utils.gen_empty_messages(blender_rev, time_str, year_str)
def merge_messages(msgs, states, messages, do_checks, spell_cache):
num_added = num_present = 0
for (context, msgid), srcs in messages.items():
if do_checks:
err = spell_check(msgid, spell_cache)
if err:
print("WARNING: spell check failed on “" + msgid + "”:")
print("\t\t" + "\n\t\t".join(err))
print("\tFrom:\n\t\t" + "\n\t\t".join(srcs))
# Escape some chars in msgid!
msgid = msgid.replace("\\", "\\\\")
msgid = msgid.replace("\"", "\\\"")
msgid = msgid.replace("\t", "\\t")
srcs = [COMMENT_PREFIX_SOURCE + s for s in srcs]
key = (context, msgid)
if key not in msgs:
msgs[key] = {"msgid_lines": [msgid],
"msgstr_lines": [""],
"comment_lines": srcs,
"msgctxt_lines": [context]}
num_added += 1
else:
# We need to merge comments!
msgs[key]["comment_lines"].extend(srcs)
num_present += 1
return num_added, num_present
def main():
parser = argparse.ArgumentParser(description="Update blender.pot file " \
"from messages.txt")
parser.add_argument('-w', '--warning', action="store_true",
help="Show warnings.")
parser.add_argument('-i', '--input', metavar="File",
help="Input messages file path.")
parser.add_argument('-o', '--output', metavar="File",
help="Output pot file path.")
args = parser.parse_args()
if args.input:
global FILE_NAME_MESSAGES
FILE_NAME_MESSAGES = args.input
if args.output:
global FILE_NAME_POT
FILE_NAME_POT = args.output
print("Running fake py gettext…")
# Not using any more xgettext, simpler to do it ourself!
messages = {}
py_xgettext(messages)
print("Finished, found {} messages.".format(len(messages)))
if SPELL_CACHE and os.path.exists(SPELL_CACHE):
with open(SPELL_CACHE, 'rb') as f:
spell_cache = pickle.load(f)
else:
spell_cache = set()
print(len(spell_cache))
print("Generating POT file {}".format(FILE_NAME_POT))
msgs, states = gen_empty_pot()
tot_messages, _a = merge_messages(msgs, states, messages,
True, spell_cache)
# add messages collected automatically from RNA
print("\tMerging RNA messages from {}".format(FILE_NAME_MESSAGES))
messages = {}
with open(FILE_NAME_MESSAGES, encoding="utf-8") as f:
srcs = []
context = ""
for line in f:
line = utils.stripeol(line)
if line.startswith(COMMENT_PREFIX):
srcs.append(line[len(COMMENT_PREFIX):].strip())
elif line.startswith(CONTEXT_PREFIX):
context = line[len(CONTEXT_PREFIX):].strip()
else:
key = (context, line)
messages[key] = srcs
srcs = []
context = ""
num_added, num_present = merge_messages(msgs, states, messages,
True, spell_cache)
tot_messages += num_added
print("\tMerged {} messages ({} were already present)."
"".format(num_added, num_present))
# Write back all messages into blender.pot.
utils.write_messages(FILE_NAME_POT, msgs, states["comm_msg"],
states["fuzzy_msg"])
print(len(spell_cache))
if SPELL_CACHE and spell_cache:
with open(SPELL_CACHE, 'wb') as f:
pickle.dump(spell_cache, f)
print("Finished, total: {} messages!".format(tot_messages - 1))
return 0
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
sys.exit(main())

@ -0,0 +1,132 @@
#!/usr/bin/python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Update trunk from branches:
# * Remove pos in trunk.
# * Copy pos from branches advanced enough.
# * Clean pos in trunk.
# * Compile pos in trunk in mos, keeping track of those failing.
# * Remove pos, mos (and their dirs) that failed to compile or
# are no more present in trunk.
import subprocess
import os
import sys
import shutil
import settings
TRUNK_PO_DIR = settings.TRUNK_PO_DIR
TRUNK_MO_DIR = settings.TRUNK_MO_DIR
PY3 = settings.PYTHON3_EXEC
def main():
import argparse
parser = argparse.ArgumentParser(description="" \
"Update trunk from branches:\n" \
"* Remove pos in trunk.\n" \
"* Copy pos from branches advanced enough.\n" \
"* Clean pos in trunk.\n" \
"* Compile pos in trunk in mos, keeping " \
"track of those failing.\n" \
"* Remove pos and mos (and their dirs) that " \
"failed to compile or are no more present in trunk.")
parser.add_argument('-t', '--threshold', type=int,
help="Import threshold, as a percentage.")
parser.add_argument('-p', '--po', action="store_false",
help="Do not remove failing pos.")
parser.add_argument('-m', '--mo', action="store_false",
help="Do not remove failing mos.")
parser.add_argument('langs', metavar='ISO_code', nargs='*',
help="Restrict processed languages to those.")
args = parser.parse_args()
ret = 0
failed = set()
# Remove pos in trunk.
for po in os.listdir(TRUNK_PO_DIR):
if po.endswith(".po"):
lang = os.path.basename(po)[:-3]
if args.langs and lang not in args.langs:
continue
po = os.path.join(TRUNK_PO_DIR, po)
os.remove(po)
# Copy pos from branches.
cmd = [PY3, "./import_po_from_branches.py", "-s"]
if args.threshold is not None:
cmd += ["-t", str(args.threshold)]
if args.langs:
cmd += args.langs
t = subprocess.call(cmd)
if t:
ret = t
# Add in failed all mos no more having relevant pos in trunk.
for lang in os.listdir(TRUNK_MO_DIR):
if lang == ".svn":
continue # !!!
if not os.path.exists(os.path.join(TRUNK_PO_DIR, ".".join((lang, "po")))):
failed.add(lang)
# Check and compile each po separatly, to keep track of those failing.
# XXX There should not be any failing at this stage, import step is
# supposed to have already filtered them out!
for po in os.listdir(TRUNK_PO_DIR):
if po.endswith(".po") and not po.endswith("_raw.po"):
lang = os.path.basename(po)[:-3]
if args.langs and lang not in args.langs:
continue
cmd = [PY3, "./clean_po.py", "-t", "-s", lang]
t = subprocess.call(cmd)
if t:
ret = t
failed.add(lang)
continue
cmd = [PY3, "./update_mo.py", lang]
t = subprocess.call(cmd)
if t:
ret = t
failed.add(lang)
# Remove failing pos, mos and related dirs.
for lang in failed:
print("Lang “{}” failed, removing it...".format(lang))
if args.po:
po = os.path.join(TRUNK_PO_DIR, ".".join((lang, "po")))
if os.path.exists(po):
os.remove(po)
if args.mo:
mo = os.path.join(TRUNK_MO_DIR, lang)
if os.path.exists(mo):
shutil.rmtree(mo)
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
sys.exit(main())

@ -0,0 +1,23 @@
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
import os
import settings

@ -0,0 +1,377 @@
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Some misc utilities...
import os
import sys
import collections
from codecs import open
import settings
COMMENT_PREFIX = settings.COMMENT_PREFIX
WARN_NC = settings.WARN_MSGID_NOT_CAPITALIZED
NC_ALLOWED = settings.WARN_MSGID_NOT_CAPITALIZED_ALLOWED
def stripeol(s):
return s.rstrip("\n\r")
# XXX For now, we assume that all messages > 30 chars are tooltips!
def is_tooltip(msgid):
return len(msgid) > 30
def parse_messages(fname):
"""
Returns a tupple (messages, states, stats).
messages is an odereddict of dicts
{(ctxt, msgid): {msgid_lines:, msgstr_lines:,
comment_lines:, msgctxt_lines:}}.
states is a dict of three sets of (msgid, ctxt), and a boolean flag
indicating the .po is somewhat broken
{trans_msg:, fuzzy_msg:, comm_msg:, is_broken:}.
stats is a dict of values
{tot_msg:, trans_msg:, tot_ttips:, trans_ttips:, comm_msg:,
nbr_signs:, nbr_trans_signs:, contexts: set()}.
Note: This function will silently "arrange" mis-formated entries, thus
using afterward write_messages() should always produce a po-valid file,
though not correct!
"""
tot_messages = 0
tot_tooltips = 0
trans_messages = 0
trans_tooltips = 0
comm_messages = 0
nbr_signs = 0
nbr_trans_signs = 0
contexts = set()
reading_msgid = False
reading_msgstr = False
reading_msgctxt = False
reading_comment = False
is_translated = False
is_fuzzy = False
is_commented = False
is_broken = False
msgid_lines = []
msgstr_lines = []
msgctxt_lines = []
comment_lines = []
messages = getattr(collections, 'OrderedDict', dict)()
translated_messages = set()
fuzzy_messages = set()
commented_messages = set()
def clean_vars():
nonlocal reading_msgid, reading_msgstr, reading_msgctxt, \
reading_comment, is_fuzzy, is_translated, is_commented, \
msgid_lines, msgstr_lines, msgctxt_lines, comment_lines
reading_msgid = reading_msgstr = reading_msgctxt = \
reading_comment = False
is_tooltip = is_fuzzy = is_translated = is_commented = False
msgid_lines = []
msgstr_lines = []
msgctxt_lines = []
comment_lines = []
def finalize_message():
nonlocal reading_msgid, reading_msgstr, reading_msgctxt, \
reading_comment, is_fuzzy, is_translated, is_commented, \
msgid_lines, msgstr_lines, msgctxt_lines, comment_lines, \
messages, translated_messages, fuzzy_messages, \
commented_messages, \
tot_messages, tot_tooltips, trans_messages, trans_tooltips, \
comm_messages, nbr_signs, nbr_trans_signs, contexts
msgid = "".join(msgid_lines)
msgctxt = "".join(msgctxt_lines)
msgkey = (msgctxt, msgid)
is_ttip = is_tooltip(msgid)
# Never allow overriding existing msgid/msgctxt pairs!
if msgkey in messages:
clean_vars()
return
nbr_signs += len(msgid)
if is_commented:
commented_messages.add(msgkey)
elif is_fuzzy:
fuzzy_messages.add(msgkey)
elif is_translated:
translated_messages.add(msgkey)
nbr_trans_signs += len("".join(msgstr_lines))
messages[msgkey] = {"msgid_lines" : msgid_lines,
"msgstr_lines" : msgstr_lines,
"comment_lines": comment_lines,
"msgctxt_lines": msgctxt_lines}
if is_commented:
comm_messages += 1
else:
tot_messages += 1
if is_ttip:
tot_tooltips += 1
if not is_fuzzy and is_translated:
trans_messages += 1
if is_ttip:
trans_tooltips += 1
if msgctxt not in contexts:
contexts.add(msgctxt)
clean_vars()
with open(fname, 'r', "utf-8") as f:
for line_nr, line in enumerate(f):
line = stripeol(line)
if line == "":
finalize_message()
elif line.startswith("msgctxt") or \
line.startswith("".join((COMMENT_PREFIX, "msgctxt"))):
reading_comment = False
reading_ctxt = True
if line.startswith(COMMENT_PREFIX):
is_commented = True
line = line[9+len(COMMENT_PREFIX):-1]
else:
line = line[9:-1]
msgctxt_lines.append(line)
elif line.startswith("msgid") or \
line.startswith("".join((COMMENT_PREFIX, "msgid"))):
reading_comment = False
reading_msgid = True
if line.startswith(COMMENT_PREFIX):
is_commented = True
line = line[7+len(COMMENT_PREFIX):-1]
else:
line = line[7:-1]
msgid_lines.append(line)
elif line.startswith("msgstr") or \
line.startswith("".join((COMMENT_PREFIX, "msgstr"))):
if not reading_msgid:
is_broken = True
else:
reading_msgid = False
reading_msgstr = True
if line.startswith(COMMENT_PREFIX):
line = line[8+len(COMMENT_PREFIX):-1]
if not is_commented:
is_broken = True
else:
line = line[8:-1]
if is_commented:
is_broken = True
msgstr_lines.append(line)
if line:
is_translated = True
elif line.startswith("#"):
if reading_msgid:
if is_commented:
msgid_lines.append(line[1+len(COMMENT_PREFIX):-1])
else:
msgid_lines.append(line)
is_broken = True
elif reading_msgstr:
if is_commented:
msgstr_lines.append(line[1+len(COMMENT_PREFIX):-1])
else:
msgstr_lines.append(line)
is_broken = True
else:
if line.startswith("#, fuzzy"):
is_fuzzy = True
else:
comment_lines.append(line)
reading_comment = True
else:
if reading_msgid:
msgid_lines.append(line[1:-1])
elif reading_msgstr:
line = line[1:-1]
msgstr_lines.append(line)
if not is_translated and line:
is_translated = True
else:
is_broken = True
# If no final empty line, last message is not finalized!
if reading_msgstr:
finalize_message()
return (messages,
{"trans_msg": translated_messages,
"fuzzy_msg": fuzzy_messages,
"comm_msg" : commented_messages,
"is_broken": is_broken},
{"tot_msg" : tot_messages,
"trans_msg" : trans_messages,
"tot_ttips" : tot_tooltips,
"trans_ttips" : trans_tooltips,
"comm_msg" : comm_messages,
"nbr_signs" : nbr_signs,
"nbr_trans_signs": nbr_trans_signs,
"contexts" : contexts})
def write_messages(fname, messages, commented, fuzzy):
"Write in fname file the content of messages (similar to parse_messages " \
"returned values). commented and fuzzy are two sets containing msgid. " \
"Returns the number of written messages."
num = 0
with open(fname, 'w', "utf-8") as f:
for msgkey, val in messages.items():
msgctxt, msgid = msgkey
f.write("\n".join(val["comment_lines"]))
# Only mark as fuzzy if msgstr is not empty!
if msgkey in fuzzy and "".join(val["msgstr_lines"]):
f.write("\n#, fuzzy")
if msgkey in commented:
if msgctxt:
f.write("\n{}msgctxt \"".format(COMMENT_PREFIX))
f.write("\"\n{}\"".format(COMMENT_PREFIX).join(
val["msgctxt_lines"]))
f.write("\"")
f.write("\n{}msgid \"".format(COMMENT_PREFIX))
f.write("\"\n{}\"".format(COMMENT_PREFIX).join(
val["msgid_lines"]))
f.write("\"\n{}msgstr \"".format(COMMENT_PREFIX))
f.write("\"\n{}\"".format(COMMENT_PREFIX).join(
val["msgstr_lines"]))
f.write("\"\n\n")
else:
if msgctxt:
f.write("\nmsgctxt \"")
f.write("\"\n\"".join(val["msgctxt_lines"]))
f.write("\"")
f.write("\nmsgid \"")
f.write("\"\n\"".join(val["msgid_lines"]))
f.write("\"\nmsgstr \"")
f.write("\"\n\"".join(val["msgstr_lines"]))
f.write("\"\n\n")
num += 1
return num
def gen_empty_messages(blender_rev, time_str, year_str):
"""Generate an empty messages & state data (only header if present!)."""
header_key = ("", "")
messages = getattr(collections, 'OrderedDict', dict)()
messages[header_key] = {
"msgid_lines": [""],
"msgctxt_lines": [],
"msgstr_lines": [
"Project-Id-Version: Blender r{}\\n"
"".format(blender_rev),
"Report-Msgid-Bugs-To: \\n",
"POT-Creation-Date: {}\\n"
"".format(time_str),
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n",
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n",
"Language-Team: LANGUAGE <LL@li.org>\\n",
"Language: \\n",
"MIME-Version: 1.0\\n",
"Content-Type: text/plain; charset=UTF-8\\n",
"Content-Transfer-Encoding: 8bit\\n"
],
"comment_lines": [
"# Blender's translation file (po format).",
"# Copyright (C) {} The Blender Foundation."
"".format(year_str),
"# This file is distributed under the same "
"# license as the Blender package.",
"# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.",
"#",
],
}
states = {"trans_msg": set(),
"fuzzy_msg": {header_key},
"comm_msg": set(),
"is_broken": False}
return messages, states
def print_stats(stats, glob_stats=None, prefix=""):
"""
Print out some stats about a po file.
glob_stats is for making global stats over several po's.
"""
tot_msgs = stats["tot_msg"]
trans_msgs = stats["trans_msg"]
tot_ttips = stats["tot_ttips"]
trans_ttips = stats["trans_ttips"]
comm_msgs = stats["comm_msg"]
nbr_signs = stats["nbr_signs"]
nbr_trans_signs = stats["nbr_trans_signs"]
contexts = stats["contexts"]
lvl = lvl_ttips = lvl_trans_ttips = lvl_ttips_in_trans = lvl_comm = 0.0
if tot_msgs > 0:
lvl = float(trans_msgs)/float(tot_msgs)
lvl_ttips = float(tot_ttips)/float(tot_msgs)
lvl_comm = float(comm_msgs)/float(tot_msgs+comm_msgs)
if tot_ttips > 0:
lvl_trans_ttips = float(trans_ttips)/float(tot_ttips)
if trans_msgs > 0:
lvl_ttips_in_trans = float(trans_ttips)/float(trans_msgs)
if glob_stats:
glob_stats["nbr"] += 1.0
glob_stats["lvl"] += lvl
glob_stats["lvl_ttips"] += lvl_ttips
glob_stats["lvl_trans_ttips"] += lvl_trans_ttips
glob_stats["lvl_ttips_in_trans"] += lvl_ttips_in_trans
glob_stats["lvl_comm"] += lvl_comm
glob_stats["nbr_trans_signs"] += nbr_trans_signs
if glob_stats["nbr_signs"] == 0:
glob_stats["nbr_signs"] = nbr_signs
glob_stats["contexts"] |= contexts
lines = ("",
"{:>6.1%} done! ({} translated messages over {}).\n"
"".format(lvl, trans_msgs, tot_msgs),
"{:>6.1%} of messages are tooltips ({} over {}).\n"
"".format(lvl_ttips, tot_ttips, tot_msgs),
"{:>6.1%} of tooltips are translated ({} over {}).\n"
"".format(lvl_trans_ttips, trans_ttips, tot_ttips),
"{:>6.1%} of translated messages are tooltips ({} over {}).\n"
"".format(lvl_ttips_in_trans, trans_ttips, trans_msgs),
"{:>6.1%} of messages are commented ({} over {}).\n"
"".format(lvl_comm, comm_msgs, comm_msgs+tot_msgs),
"This translation is currently made of {} signs.\n"
"".format(nbr_trans_signs))
print(prefix.join(lines))
return 0

@ -437,9 +437,9 @@ def BuildRNAInfo():
# rna_functions_dict = {} # store all functions directly in this type (not inherited) # rna_functions_dict = {} # store all functions directly in this type (not inherited)
def full_rna_struct_path(rna_struct): def full_rna_struct_path(rna_struct):
''' """
Needed when referencing one struct from another Needed when referencing one struct from another
''' """
nested = rna_struct.nested nested = rna_struct.nested
if nested: if nested:
return "%s.%s" % (full_rna_struct_path(nested), rna_struct.identifier) return "%s.%s" % (full_rna_struct_path(nested), rna_struct.identifier)

@ -84,7 +84,7 @@ def add_torus(major_rad, minor_rad, major_seg, minor_seg):
class AddTorus(Operator, object_utils.AddObjectHelper): class AddTorus(Operator, object_utils.AddObjectHelper):
'''Add a torus mesh''' """Add a torus mesh"""
bl_idname = "mesh.primitive_torus_add" bl_idname = "mesh.primitive_torus_add"
bl_label = "Add Torus" bl_label = "Add Torus"
bl_options = {'REGISTER', 'UNDO', 'PRESET'} bl_options = {'REGISTER', 'UNDO', 'PRESET'}

@ -24,7 +24,7 @@ from bpy.props import StringProperty
class EditExternally(Operator): class EditExternally(Operator):
'''Edit image in an external application''' """Edit image in an external application"""
bl_idname = "image.external_edit" bl_idname = "image.external_edit"
bl_label = "Image Edit Externally" bl_label = "Image Edit Externally"
bl_options = {'REGISTER'} bl_options = {'REGISTER'}

@ -25,7 +25,7 @@ from bpy.props import EnumProperty
class MeshMirrorUV(Operator): class MeshMirrorUV(Operator):
'''Copy mirror UV coordinates on the X axis based on a mirrored mesh''' """Copy mirror UV coordinates on the X axis based on a mirrored mesh"""
bl_idname = "mesh.faces_mirror_uv" bl_idname = "mesh.faces_mirror_uv"
bl_label = "Copy Mirrored UV coords" bl_label = "Copy Mirrored UV coords"
bl_options = {'REGISTER', 'UNDO'} bl_options = {'REGISTER', 'UNDO'}

@ -27,7 +27,7 @@ from bpy.props import (StringProperty,
class SelectPattern(Operator): class SelectPattern(Operator):
'''Select objects matching a naming pattern''' """Select objects matching a naming pattern"""
bl_idname = "object.select_pattern" bl_idname = "object.select_pattern"
bl_label = "Select Pattern" bl_label = "Select Pattern"
bl_options = {'REGISTER', 'UNDO'} bl_options = {'REGISTER', 'UNDO'}
@ -105,7 +105,7 @@ class SelectPattern(Operator):
class SelectCamera(Operator): class SelectCamera(Operator):
'''Select the active camera''' """Select the active camera"""
bl_idname = "object.select_camera" bl_idname = "object.select_camera"
bl_label = "Select Camera" bl_label = "Select Camera"
bl_options = {'REGISTER', 'UNDO'} bl_options = {'REGISTER', 'UNDO'}
@ -131,7 +131,7 @@ class SelectCamera(Operator):
class SelectHierarchy(Operator): class SelectHierarchy(Operator):
"""Select object relative to the active object's position """ """Select object relative to the active object's position """ \
"""in the hierarchy""" """in the hierarchy"""
bl_idname = "object.select_hierarchy" bl_idname = "object.select_hierarchy"
bl_label = "Select Hierarchy" bl_label = "Select Hierarchy"
@ -198,7 +198,7 @@ class SelectHierarchy(Operator):
class SubdivisionSet(Operator): class SubdivisionSet(Operator):
'''Sets a Subdivision Surface Level (1-5)''' """Sets a Subdivision Surface Level (1-5)"""
bl_idname = "object.subdivision_set" bl_idname = "object.subdivision_set"
bl_label = "Subdivision Set" bl_label = "Subdivision Set"
@ -278,7 +278,7 @@ class SubdivisionSet(Operator):
class ShapeTransfer(Operator): class ShapeTransfer(Operator):
"""Copy another selected objects active shape to this one by """ """Copy another selected objects active shape to this one by """ \
"""applying the relative offsets""" """applying the relative offsets"""
bl_idname = "object.shape_key_transfer" bl_idname = "object.shape_key_transfer"
@ -468,7 +468,7 @@ class ShapeTransfer(Operator):
class JoinUVs(Operator): class JoinUVs(Operator):
'''Copy UV Layout to objects with matching geometry''' """Copy UV Layout to objects with matching geometry"""
bl_idname = "object.join_uvs" bl_idname = "object.join_uvs"
bl_label = "Join as UVs" bl_label = "Join as UVs"
@ -547,7 +547,7 @@ class JoinUVs(Operator):
class MakeDupliFace(Operator): class MakeDupliFace(Operator):
'''Make linked objects into dupli-faces''' """Make linked objects into dupli-faces"""
bl_idname = "object.make_dupli_face" bl_idname = "object.make_dupli_face"
bl_label = "Make Dupli-Face" bl_label = "Make Dupli-Face"
@ -642,7 +642,7 @@ class IsolateTypeRender(Operator):
class ClearAllRestrictRender(Operator): class ClearAllRestrictRender(Operator):
'''Reveal all render objects by setting the hide render flag''' """Reveal all render objects by setting the hide render flag"""
bl_idname = "object.hide_render_clear_all" bl_idname = "object.hide_render_clear_all"
bl_label = "Clear All Restrict Render" bl_label = "Clear All Restrict Render"
bl_options = {'REGISTER', 'UNDO'} bl_options = {'REGISTER', 'UNDO'}
@ -654,7 +654,7 @@ class ClearAllRestrictRender(Operator):
class TransformsToDeltasAnim(Operator): class TransformsToDeltasAnim(Operator):
'''Convert object animation for normal transforms to delta transforms''' """Convert object animation for normal transforms to delta transforms"""
bl_idname = "object.anim_transforms_to_deltas" bl_idname = "object.anim_transforms_to_deltas"
bl_label = "Animated Transforms to Deltas" bl_label = "Animated Transforms to Deltas"
bl_options = {'REGISTER', 'UNDO'} bl_options = {'REGISTER', 'UNDO'}
@ -700,7 +700,7 @@ class TransformsToDeltasAnim(Operator):
class DupliOffsetFromCursor(Operator): class DupliOffsetFromCursor(Operator):
'''Set offset used for DupliGroup based on cursor position''' """Set offset used for DupliGroup based on cursor position"""
bl_idname = "object.dupli_offset_from_cursor" bl_idname = "object.dupli_offset_from_cursor"
bl_label = "Set Offset From Cursor" bl_label = "Set Offset From Cursor"
bl_options = {'REGISTER', 'UNDO'} bl_options = {'REGISTER', 'UNDO'}

@ -341,7 +341,7 @@ from bpy.props import EnumProperty, BoolProperty
class AlignObjects(Operator): class AlignObjects(Operator):
'''Align Objects''' """Align Objects"""
bl_idname = "object.align" bl_idname = "object.align"
bl_label = "Align Objects" bl_label = "Align Objects"
bl_options = {'REGISTER', 'UNDO'} bl_options = {'REGISTER', 'UNDO'}

@ -95,7 +95,7 @@ from bpy.props import (IntProperty,
class RandomizeLocRotSize(Operator): class RandomizeLocRotSize(Operator):
'''Randomize objects loc/rot/scale''' """Randomize objects loc/rot/scale"""
bl_idname = "object.randomize_transform" bl_idname = "object.randomize_transform"
bl_label = "Randomize Transform" bl_label = "Randomize Transform"
bl_options = {'REGISTER', 'UNDO'} bl_options = {'REGISTER', 'UNDO'}

@ -24,10 +24,10 @@ from bpy.props import StringProperty, BoolProperty
class AddPresetBase(): class AddPresetBase():
'''Base preset class, only for subclassing """Base preset class, only for subclassing
subclasses must define subclasses must define
- preset_values - preset_values
- preset_subdir ''' - preset_subdir """
# bl_idname = "script.preset_base_add" # bl_idname = "script.preset_base_add"
# bl_label = "Add a Python Preset" # bl_label = "Add a Python Preset"
bl_options = {'REGISTER'} # only because invoke_props_popup requires. bl_options = {'REGISTER'} # only because invoke_props_popup requires.
@ -179,7 +179,7 @@ class AddPresetBase():
class ExecutePreset(Operator): class ExecutePreset(Operator):
'''Execute a preset''' """Execute a preset"""
bl_idname = "script.execute_preset" bl_idname = "script.execute_preset"
bl_label = "Execute a Python Preset" bl_label = "Execute a Python Preset"
@ -217,7 +217,7 @@ class ExecutePreset(Operator):
class AddPresetRender(AddPresetBase, Operator): class AddPresetRender(AddPresetBase, Operator):
'''Add a Render Preset''' """Add a Render Preset"""
bl_idname = "render.preset_add" bl_idname = "render.preset_add"
bl_label = "Add Render Preset" bl_label = "Add Render Preset"
preset_menu = "RENDER_MT_presets" preset_menu = "RENDER_MT_presets"
@ -243,7 +243,7 @@ class AddPresetRender(AddPresetBase, Operator):
class AddPresetCamera(AddPresetBase, Operator): class AddPresetCamera(AddPresetBase, Operator):
'''Add a Camera Preset''' """Add a Camera Preset"""
bl_idname = "camera.preset_add" bl_idname = "camera.preset_add"
bl_label = "Add Camera Preset" bl_label = "Add Camera Preset"
preset_menu = "CAMERA_MT_presets" preset_menu = "CAMERA_MT_presets"
@ -262,7 +262,7 @@ class AddPresetCamera(AddPresetBase, Operator):
class AddPresetSSS(AddPresetBase, Operator): class AddPresetSSS(AddPresetBase, Operator):
'''Add a Subsurface Scattering Preset''' """Add a Subsurface Scattering Preset"""
bl_idname = "material.sss_preset_add" bl_idname = "material.sss_preset_add"
bl_label = "Add SSS Preset" bl_label = "Add SSS Preset"
preset_menu = "MATERIAL_MT_sss_presets" preset_menu = "MATERIAL_MT_sss_presets"
@ -290,7 +290,7 @@ class AddPresetSSS(AddPresetBase, Operator):
class AddPresetCloth(AddPresetBase, Operator): class AddPresetCloth(AddPresetBase, Operator):
'''Add a Cloth Preset''' """Add a Cloth Preset"""
bl_idname = "cloth.preset_add" bl_idname = "cloth.preset_add"
bl_label = "Add Cloth Preset" bl_label = "Add Cloth Preset"
preset_menu = "CLOTH_MT_presets" preset_menu = "CLOTH_MT_presets"
@ -312,7 +312,7 @@ class AddPresetCloth(AddPresetBase, Operator):
class AddPresetFluid(AddPresetBase, Operator): class AddPresetFluid(AddPresetBase, Operator):
'''Add a Fluid Preset''' """Add a Fluid Preset"""
bl_idname = "fluid.preset_add" bl_idname = "fluid.preset_add"
bl_label = "Add Fluid Preset" bl_label = "Add Fluid Preset"
preset_menu = "FLUID_MT_presets" preset_menu = "FLUID_MT_presets"
@ -330,7 +330,7 @@ class AddPresetFluid(AddPresetBase, Operator):
class AddPresetSunSky(AddPresetBase, Operator): class AddPresetSunSky(AddPresetBase, Operator):
'''Add a Sky & Atmosphere Preset''' """Add a Sky & Atmosphere Preset"""
bl_idname = "lamp.sunsky_preset_add" bl_idname = "lamp.sunsky_preset_add"
bl_label = "Add Sunsky Preset" bl_label = "Add Sunsky Preset"
preset_menu = "LAMP_MT_sunsky_presets" preset_menu = "LAMP_MT_sunsky_presets"
@ -359,7 +359,7 @@ class AddPresetSunSky(AddPresetBase, Operator):
class AddPresetInteraction(AddPresetBase, Operator): class AddPresetInteraction(AddPresetBase, Operator):
'''Add an Application Interaction Preset''' """Add an Application Interaction Preset"""
bl_idname = "wm.interaction_preset_add" bl_idname = "wm.interaction_preset_add"
bl_label = "Add Interaction Preset" bl_label = "Add Interaction Preset"
preset_menu = "USERPREF_MT_interaction_presets" preset_menu = "USERPREF_MT_interaction_presets"
@ -385,7 +385,7 @@ class AddPresetInteraction(AddPresetBase, Operator):
class AddPresetTrackingCamera(AddPresetBase, Operator): class AddPresetTrackingCamera(AddPresetBase, Operator):
'''Add a Tracking Camera Intrinsics Preset''' """Add a Tracking Camera Intrinsics Preset"""
bl_idname = "clip.camera_preset_add" bl_idname = "clip.camera_preset_add"
bl_label = "Add Camera Preset" bl_label = "Add Camera Preset"
preset_menu = "CLIP_MT_camera_presets" preset_menu = "CLIP_MT_camera_presets"
@ -408,7 +408,7 @@ class AddPresetTrackingCamera(AddPresetBase, Operator):
class AddPresetTrackingTrackColor(AddPresetBase, Operator): class AddPresetTrackingTrackColor(AddPresetBase, Operator):
'''Add a Clip Track Color Preset''' """Add a Clip Track Color Preset"""
bl_idname = "clip.track_color_preset_add" bl_idname = "clip.track_color_preset_add"
bl_label = "Add Track Color Preset" bl_label = "Add Track Color Preset"
preset_menu = "CLIP_MT_track_color_presets" preset_menu = "CLIP_MT_track_color_presets"
@ -426,7 +426,7 @@ class AddPresetTrackingTrackColor(AddPresetBase, Operator):
class AddPresetTrackingSettings(AddPresetBase, Operator): class AddPresetTrackingSettings(AddPresetBase, Operator):
'''Add a motion tracking settings preset''' """Add a motion tracking settings preset"""
bl_idname = "clip.tracking_settings_preset_add" bl_idname = "clip.tracking_settings_preset_add"
bl_label = "Add Tracking Settings Preset" bl_label = "Add Tracking Settings Preset"
preset_menu = "CLIP_MT_tracking_settings_presets" preset_menu = "CLIP_MT_tracking_settings_presets"
@ -453,7 +453,7 @@ class AddPresetTrackingSettings(AddPresetBase, Operator):
class AddPresetNodeColor(AddPresetBase, Operator): class AddPresetNodeColor(AddPresetBase, Operator):
'''Add a Node Color Preset''' """Add a Node Color Preset"""
bl_idname = "node.node_color_preset_add" bl_idname = "node.node_color_preset_add"
bl_label = "Add Node Color Preset" bl_label = "Add Node Color Preset"
preset_menu = "NODE_MT_node_color_presets" preset_menu = "NODE_MT_node_color_presets"
@ -471,7 +471,7 @@ class AddPresetNodeColor(AddPresetBase, Operator):
class AddPresetInterfaceTheme(AddPresetBase, Operator): class AddPresetInterfaceTheme(AddPresetBase, Operator):
'''Add a theme preset''' """Add a theme preset"""
bl_idname = "wm.interface_theme_preset_add" bl_idname = "wm.interface_theme_preset_add"
bl_label = "Add Tracking Settings Preset" bl_label = "Add Tracking Settings Preset"
preset_menu = "USERPREF_MT_interface_theme_presets" preset_menu = "USERPREF_MT_interface_theme_presets"
@ -479,7 +479,7 @@ class AddPresetInterfaceTheme(AddPresetBase, Operator):
class AddPresetKeyconfig(AddPresetBase, Operator): class AddPresetKeyconfig(AddPresetBase, Operator):
'''Add a Key-config Preset''' """Add a Key-config Preset"""
bl_idname = "wm.keyconfig_preset_add" bl_idname = "wm.keyconfig_preset_add"
bl_label = "Add Keyconfig Preset" bl_label = "Add Keyconfig Preset"
preset_menu = "USERPREF_MT_keyconfigs" preset_menu = "USERPREF_MT_keyconfigs"
@ -502,7 +502,7 @@ class AddPresetKeyconfig(AddPresetBase, Operator):
class AddPresetOperator(AddPresetBase, Operator): class AddPresetOperator(AddPresetBase, Operator):
'''Add an Application Interaction Preset''' """Add an Application Interaction Preset"""
bl_idname = "wm.operator_preset_add" bl_idname = "wm.operator_preset_add"
bl_label = "Operator Preset" bl_label = "Operator Preset"
preset_menu = "WM_MT_operator_presets" preset_menu = "WM_MT_operator_presets"

@ -68,7 +68,7 @@ def guess_player_path(preset):
class PlayRenderedAnim(Operator): class PlayRenderedAnim(Operator):
'''Play back rendered frames/movies using an external player''' """Play back rendered frames/movies using an external player"""
bl_idname = "render.play_rendered_anim" bl_idname = "render.play_rendered_anim"
bl_label = "Play Rendered Animation" bl_label = "Play Rendered Animation"
bl_options = {'REGISTER'} bl_options = {'REGISTER'}

@ -25,7 +25,7 @@ from bpy.props import IntProperty
class SequencerCrossfadeSounds(Operator): class SequencerCrossfadeSounds(Operator):
'''Do cross-fading volume animation of two selected sound strips''' """Do cross-fading volume animation of two selected sound strips"""
bl_idname = "sequencer.crossfade_sounds" bl_idname = "sequencer.crossfade_sounds"
bl_label = "Crossfade sounds" bl_label = "Crossfade sounds"
@ -76,7 +76,7 @@ class SequencerCrossfadeSounds(Operator):
class SequencerCutMulticam(Operator): class SequencerCutMulticam(Operator):
'''Cut multi-cam strip and select camera''' """Cut multi-cam strip and select camera"""
bl_idname = "sequencer.cut_multicam" bl_idname = "sequencer.cut_multicam"
bl_label = "Cut multicam" bl_label = "Cut multicam"
@ -118,7 +118,7 @@ class SequencerCutMulticam(Operator):
class SequencerDeinterlaceSelectedMovies(Operator): class SequencerDeinterlaceSelectedMovies(Operator):
'''Deinterlace all selected movie sources''' """Deinterlace all selected movie sources"""
bl_idname = "sequencer.deinterlace_selected_movies" bl_idname = "sequencer.deinterlace_selected_movies"
bl_label = "Deinterlace Movies" bl_label = "Deinterlace Movies"

@ -46,11 +46,11 @@ def extend(obj, operator, EXTEND_MODE):
OTHER_INDEX = 2, 3, 0, 1 OTHER_INDEX = 2, 3, 0, 1
def extend_uvs(face_source, face_target, edge_key): def extend_uvs(face_source, face_target, edge_key):
''' """
Takes 2 faces, Takes 2 faces,
Projects its extends its UV coords onto the face next to it. Projects its extends its UV coords onto the face next to it.
Both faces must share an edge Both faces must share an edge
''' """
def face_edge_vs(vi): def face_edge_vs(vi):
vlen = len(vi) vlen = len(vi)
@ -224,7 +224,7 @@ def main(context, operator):
class FollowActiveQuads(Operator): class FollowActiveQuads(Operator):
'''Follow UVs from active quads along continuous face loops''' """Follow UVs from active quads along continuous face loops"""
bl_idname = "uv.follow_active_quads" bl_idname = "uv.follow_active_quads"
bl_label = "Follow Active Quads" bl_label = "Follow Active Quads"
bl_options = {'REGISTER', 'UNDO'} bl_options = {'REGISTER', 'UNDO'}

@ -197,12 +197,12 @@ def lightmap_uvpack(meshes,
PREF_BOX_DIV=8, PREF_BOX_DIV=8,
PREF_MARGIN_DIV=512 PREF_MARGIN_DIV=512
): ):
''' """
BOX_DIV if the maximum division of the UV map that BOX_DIV if the maximum division of the UV map that
a box may be consolidated into. a box may be consolidated into.
Basically, a lower value will be slower but waist less space Basically, a lower value will be slower but waist less space
and a higher value will have more clumpy boxes but more wasted space and a higher value will have more clumpy boxes but more wasted space
''' """
import time import time
from math import sqrt from math import sqrt
@ -545,7 +545,7 @@ from bpy.props import BoolProperty, FloatProperty, IntProperty
class LightMapPack(Operator): class LightMapPack(Operator):
'''Follow UVs from active quads along continuous face loops''' """Follow UVs from active quads along continuous face loops"""
bl_idname = "uv.lightmap_pack" bl_idname = "uv.lightmap_pack"
bl_label = "Lightmap Pack" bl_label = "Lightmap Pack"

@ -492,7 +492,7 @@ def mergeUvIslands(islandList):
pass pass
if Intersect == 2: # Source inside target if Intersect == 2: # Source inside target
''' """
We have an intersection, if we are inside the target We have an intersection, if we are inside the target
then move us 1 whole width across, then move us 1 whole width across,
Its possible this is a bad idea since 2 skinny Angular faces Its possible this is a bad idea since 2 skinny Angular faces
@ -500,7 +500,7 @@ def mergeUvIslands(islandList):
since we have already tested for it. since we have already tested for it.
It gives about 10% speedup with minimal errors. It gives about 10% speedup with minimal errors.
''' """
# Move the test along its width + SMALL_NUM # Move the test along its width + SMALL_NUM
#boxLeft += sourceIsland[4] + SMALL_NUM #boxLeft += sourceIsland[4] + SMALL_NUM
boxLeft += sourceIsland[4] boxLeft += sourceIsland[4]
@ -694,11 +694,11 @@ def packIslands(islandList):
islandIdx -=1 islandIdx -=1
continue continue
'''Save the offset to be applied later, """Save the offset to be applied later,
we could apply to the UVs now and allign them to the bottom left hand area we could apply to the UVs now and allign them to the bottom left hand area
of the UV coords like the box packer imagines they are of the UV coords like the box packer imagines they are
but, its quicker just to remember their offset and but, its quicker just to remember their offset and
apply the packing and offset in 1 pass ''' apply the packing and offset in 1 pass """
islandOffsetList.append((minx, miny)) islandOffsetList.append((minx, miny))
# Add to boxList. use the island idx for the BOX id. # Add to boxList. use the island idx for the BOX id.
@ -1104,8 +1104,9 @@ from bpy.props import FloatProperty
class SmartProject(Operator): class SmartProject(Operator):
'''This script projection unwraps the selected faces of a mesh ''' \ """This script projection unwraps the selected faces of a mesh """ \
'''(it operates on all selected mesh objects, and can be used to unwrap selected faces, or all faces)''' """(it operates on all selected mesh objects, and can be used """ \
"""to unwrap selected faces, or all faces)"""
bl_idname = "uv.smart_project" bl_idname = "uv.smart_project"
bl_label = "Smart UV Project" bl_label = "Smart UV Project"
bl_options = {'REGISTER', 'UNDO'} bl_options = {'REGISTER', 'UNDO'}

@ -31,7 +31,8 @@ from rna_prop_ui import rna_idprop_ui_prop_get, rna_idprop_ui_prop_clear
class MESH_OT_delete_edgeloop(Operator): class MESH_OT_delete_edgeloop(Operator):
'''Delete an edge loop by merging the faces on each side to a single face loop''' """Delete an edge loop by merging the faces on each side """ \
"""to a single face loop"""
bl_idname = "mesh.delete_edgeloop" bl_idname = "mesh.delete_edgeloop"
bl_label = "Delete Edge Loop" bl_label = "Delete Edge Loop"
@ -173,7 +174,7 @@ class BRUSH_OT_active_index_set(Operator):
class WM_OT_context_set_boolean(Operator): class WM_OT_context_set_boolean(Operator):
'''Set a context value''' """Set a context value"""
bl_idname = "wm.context_set_boolean" bl_idname = "wm.context_set_boolean"
bl_label = "Context Set Boolean" bl_label = "Context Set Boolean"
bl_options = {'UNDO', 'INTERNAL'} bl_options = {'UNDO', 'INTERNAL'}
@ -189,7 +190,7 @@ class WM_OT_context_set_boolean(Operator):
class WM_OT_context_set_int(Operator): # same as enum class WM_OT_context_set_int(Operator): # same as enum
'''Set a context value''' """Set a context value"""
bl_idname = "wm.context_set_int" bl_idname = "wm.context_set_int"
bl_label = "Context Set" bl_label = "Context Set"
bl_options = {'UNDO', 'INTERNAL'} bl_options = {'UNDO', 'INTERNAL'}
@ -206,7 +207,7 @@ class WM_OT_context_set_int(Operator): # same as enum
class WM_OT_context_scale_int(Operator): class WM_OT_context_scale_int(Operator):
'''Scale an int context value''' """Scale an int context value"""
bl_idname = "wm.context_scale_int" bl_idname = "wm.context_scale_int"
bl_label = "Context Set" bl_label = "Context Set"
bl_options = {'UNDO', 'INTERNAL'} bl_options = {'UNDO', 'INTERNAL'}
@ -249,7 +250,7 @@ class WM_OT_context_scale_int(Operator):
class WM_OT_context_set_float(Operator): # same as enum class WM_OT_context_set_float(Operator): # same as enum
'''Set a context value''' """Set a context value"""
bl_idname = "wm.context_set_float" bl_idname = "wm.context_set_float"
bl_label = "Context Set Float" bl_label = "Context Set Float"
bl_options = {'UNDO', 'INTERNAL'} bl_options = {'UNDO', 'INTERNAL'}
@ -266,7 +267,7 @@ class WM_OT_context_set_float(Operator): # same as enum
class WM_OT_context_set_string(Operator): # same as enum class WM_OT_context_set_string(Operator): # same as enum
'''Set a context value''' """Set a context value"""
bl_idname = "wm.context_set_string" bl_idname = "wm.context_set_string"
bl_label = "Context Set String" bl_label = "Context Set String"
bl_options = {'UNDO', 'INTERNAL'} bl_options = {'UNDO', 'INTERNAL'}
@ -282,7 +283,7 @@ class WM_OT_context_set_string(Operator): # same as enum
class WM_OT_context_set_enum(Operator): class WM_OT_context_set_enum(Operator):
'''Set a context value''' """Set a context value"""
bl_idname = "wm.context_set_enum" bl_idname = "wm.context_set_enum"
bl_label = "Context Set Enum" bl_label = "Context Set Enum"
bl_options = {'UNDO', 'INTERNAL'} bl_options = {'UNDO', 'INTERNAL'}
@ -298,7 +299,7 @@ class WM_OT_context_set_enum(Operator):
class WM_OT_context_set_value(Operator): class WM_OT_context_set_value(Operator):
'''Set a context value''' """Set a context value"""
bl_idname = "wm.context_set_value" bl_idname = "wm.context_set_value"
bl_label = "Context Set Value" bl_label = "Context Set Value"
bl_options = {'UNDO', 'INTERNAL'} bl_options = {'UNDO', 'INTERNAL'}
@ -319,7 +320,7 @@ class WM_OT_context_set_value(Operator):
class WM_OT_context_toggle(Operator): class WM_OT_context_toggle(Operator):
'''Toggle a context value''' """Toggle a context value"""
bl_idname = "wm.context_toggle" bl_idname = "wm.context_toggle"
bl_label = "Context Toggle" bl_label = "Context Toggle"
bl_options = {'UNDO', 'INTERNAL'} bl_options = {'UNDO', 'INTERNAL'}
@ -338,7 +339,7 @@ class WM_OT_context_toggle(Operator):
class WM_OT_context_toggle_enum(Operator): class WM_OT_context_toggle_enum(Operator):
'''Toggle a context value''' """Toggle a context value"""
bl_idname = "wm.context_toggle_enum" bl_idname = "wm.context_toggle_enum"
bl_label = "Context Toggle Values" bl_label = "Context Toggle Values"
bl_options = {'UNDO', 'INTERNAL'} bl_options = {'UNDO', 'INTERNAL'}
@ -371,7 +372,7 @@ class WM_OT_context_toggle_enum(Operator):
class WM_OT_context_cycle_int(Operator): class WM_OT_context_cycle_int(Operator):
"""Set a context value. Useful for cycling active material, """ """Set a context value. Useful for cycling active material, """ \
"""vertex keys, groups' etc""" """vertex keys, groups' etc"""
bl_idname = "wm.context_cycle_int" bl_idname = "wm.context_cycle_int"
bl_label = "Context Int Cycle" bl_label = "Context Int Cycle"
@ -406,7 +407,7 @@ class WM_OT_context_cycle_int(Operator):
class WM_OT_context_cycle_enum(Operator): class WM_OT_context_cycle_enum(Operator):
'''Toggle a context value''' """Toggle a context value"""
bl_idname = "wm.context_cycle_enum" bl_idname = "wm.context_cycle_enum"
bl_label = "Context Enum Cycle" bl_label = "Context Enum Cycle"
bl_options = {'UNDO', 'INTERNAL'} bl_options = {'UNDO', 'INTERNAL'}
@ -458,8 +459,8 @@ class WM_OT_context_cycle_enum(Operator):
class WM_OT_context_cycle_array(Operator): class WM_OT_context_cycle_array(Operator):
'''Set a context array value. ''' """Set a context array value """ \
'''Useful for cycling the active mesh edit mode''' """(useful for cycling the active mesh edit mode)"""
bl_idname = "wm.context_cycle_array" bl_idname = "wm.context_cycle_array"
bl_label = "Context Array Cycle" bl_label = "Context Array Cycle"
bl_options = {'UNDO', 'INTERNAL'} bl_options = {'UNDO', 'INTERNAL'}
@ -519,7 +520,7 @@ class WM_OT_context_menu_enum(Operator):
class WM_OT_context_set_id(Operator): class WM_OT_context_set_id(Operator):
'''Toggle a context value''' """Toggle a context value"""
bl_idname = "wm.context_set_id" bl_idname = "wm.context_set_id"
bl_label = "Set Library ID" bl_label = "Set Library ID"
bl_options = {'UNDO', 'INTERNAL'} bl_options = {'UNDO', 'INTERNAL'}
@ -575,7 +576,7 @@ data_path_item = StringProperty(
class WM_OT_context_collection_boolean_set(Operator): class WM_OT_context_collection_boolean_set(Operator):
'''Set boolean values for a collection of items''' """Set boolean values for a collection of items"""
bl_idname = "wm.context_collection_boolean_set" bl_idname = "wm.context_collection_boolean_set"
bl_label = "Context Collection Boolean Set" bl_label = "Context Collection Boolean Set"
bl_options = {'UNDO', 'REGISTER', 'INTERNAL'} bl_options = {'UNDO', 'REGISTER', 'INTERNAL'}
@ -634,7 +635,7 @@ class WM_OT_context_collection_boolean_set(Operator):
class WM_OT_context_modal_mouse(Operator): class WM_OT_context_modal_mouse(Operator):
'''Adjust arbitrary values with mouse input''' """Adjust arbitrary values with mouse input"""
bl_idname = "wm.context_modal_mouse" bl_idname = "wm.context_modal_mouse"
bl_label = "Context Modal Mouse" bl_label = "Context Modal Mouse"
bl_options = {'GRAB_POINTER', 'BLOCKING', 'UNDO', 'INTERNAL'} bl_options = {'GRAB_POINTER', 'BLOCKING', 'UNDO', 'INTERNAL'}
@ -836,7 +837,7 @@ def _wm_doc_get_id(doc_id, do_url=True, url_prefix=""):
class WM_OT_doc_view_manual(Operator): class WM_OT_doc_view_manual(Operator):
'''Load online manual''' """Load online manual"""
bl_idname = "wm.doc_view_manual" bl_idname = "wm.doc_view_manual"
bl_label = "View Manual" bl_label = "View Manual"
@ -881,7 +882,7 @@ class WM_OT_doc_view_manual(Operator):
class WM_OT_doc_view(Operator): class WM_OT_doc_view(Operator):
'''Load online reference docs''' """Load online reference docs"""
bl_idname = "wm.doc_view" bl_idname = "wm.doc_view"
bl_label = "View Documentation" bl_label = "View Documentation"
@ -905,7 +906,7 @@ class WM_OT_doc_view(Operator):
class WM_OT_doc_edit(Operator): class WM_OT_doc_edit(Operator):
'''Load online reference docs''' """Load online reference docs"""
bl_idname = "wm.doc_edit" bl_idname = "wm.doc_edit"
bl_label = "Edit Documentation" bl_label = "Edit Documentation"
@ -1008,7 +1009,7 @@ rna_max = FloatProperty(
class WM_OT_properties_edit(Operator): class WM_OT_properties_edit(Operator):
'''Internal use (edit a property data_path)''' """Internal use (edit a property data_path)"""
bl_idname = "wm.properties_edit" bl_idname = "wm.properties_edit"
bl_label = "Edit Property" bl_label = "Edit Property"
bl_options = {'REGISTER'} # only because invoke_props_popup requires. bl_options = {'REGISTER'} # only because invoke_props_popup requires.
@ -1094,7 +1095,7 @@ class WM_OT_properties_edit(Operator):
class WM_OT_properties_add(Operator): class WM_OT_properties_add(Operator):
'''Internal use (edit a property data_path)''' """Internal use (edit a property data_path)"""
bl_idname = "wm.properties_add" bl_idname = "wm.properties_add"
bl_label = "Add Property" bl_label = "Add Property"
bl_options = {'UNDO'} bl_options = {'UNDO'}
@ -1137,7 +1138,7 @@ class WM_OT_properties_context_change(Operator):
class WM_OT_properties_remove(Operator): class WM_OT_properties_remove(Operator):
'''Internal use (edit a property data_path)''' """Internal use (edit a property data_path)"""
bl_idname = "wm.properties_remove" bl_idname = "wm.properties_remove"
bl_label = "Remove Property" bl_label = "Remove Property"
bl_options = {'UNDO'} bl_options = {'UNDO'}
@ -1203,7 +1204,7 @@ class WM_OT_appconfig_activate(Operator):
class WM_OT_sysinfo(Operator): class WM_OT_sysinfo(Operator):
'''Generate System Info''' """Generate System Info"""
bl_idname = "wm.sysinfo" bl_idname = "wm.sysinfo"
bl_label = "System Info" bl_label = "System Info"
@ -1214,7 +1215,7 @@ class WM_OT_sysinfo(Operator):
class WM_OT_copy_prev_settings(Operator): class WM_OT_copy_prev_settings(Operator):
'''Copy settings from previous version''' """Copy settings from previous version"""
bl_idname = "wm.copy_prev_settings" bl_idname = "wm.copy_prev_settings"
bl_label = "Copy Previous Settings" bl_label = "Copy Previous Settings"
@ -1251,7 +1252,7 @@ class WM_OT_copy_prev_settings(Operator):
class WM_OT_blenderplayer_start(Operator): class WM_OT_blenderplayer_start(Operator):
'''Launch the blender-player with the current blend-file''' """Launch the blender-player with the current blend-file"""
bl_idname = "wm.blenderplayer_start" bl_idname = "wm.blenderplayer_start"
bl_label = "Start Game In Player" bl_label = "Start Game In Player"

@ -33,7 +33,7 @@ class CurveButtonsPanel():
class CurveButtonsPanelCurve(CurveButtonsPanel): class CurveButtonsPanelCurve(CurveButtonsPanel):
'''Same as above but for curves only''' """Same as above but for curves only"""
@classmethod @classmethod
def poll(cls, context): def poll(cls, context):
@ -41,7 +41,7 @@ class CurveButtonsPanelCurve(CurveButtonsPanel):
class CurveButtonsPanelActive(CurveButtonsPanel): class CurveButtonsPanelActive(CurveButtonsPanel):
'''Same as above but for curves only''' """Same as above but for curves only"""
@classmethod @classmethod
def poll(cls, context): def poll(cls, context):

@ -159,7 +159,7 @@ class OBJECT_PT_groups(ObjectButtonsPanel, Panel):
def draw(self, context): def draw(self, context):
layout = self.layout layout = self.layout
ob = context.object obj = context.object
row = layout.row(align=True) row = layout.row(align=True)
row.operator("object.group_link", text="Add to Group") row.operator("object.group_link", text="Add to Group")
@ -167,8 +167,13 @@ class OBJECT_PT_groups(ObjectButtonsPanel, Panel):
# XXX, this is bad practice, yes, I wrote it :( - campbell # XXX, this is bad practice, yes, I wrote it :( - campbell
index = 0 index = 0
obj_name = obj.name
for group in bpy.data.groups: for group in bpy.data.groups:
if ob.name in group.objects: # XXX this is slow and stupid!, we need 2 checks, one thats fast
# and another that we can be sure its not a name collission
# from linked library data
group_objects = group.objects
if obj_name in group.objects and obj in group_objects[:]:
col = layout.column(align=True) col = layout.column(align=True)
col.context_pointer_set("group", group) col.context_pointer_set("group", group)

@ -113,7 +113,7 @@ struct bActionGroup *get_active_actiongroup(struct bAction *act);
void set_active_action_group(struct bAction *act, struct bActionGroup *agrp, short select); void set_active_action_group(struct bAction *act, struct bActionGroup *agrp, short select);
/* Sync colors used for action/bone group with theme settings */ /* Sync colors used for action/bone group with theme settings */
void action_group_colors_sync(struct bActionGroup *grp); void action_group_colors_sync(struct bActionGroup *grp, const struct bActionGroup *ref_grp);
/* Add a new action group with the given name to the action */ /* Add a new action group with the given name to the action */
struct bActionGroup *action_groups_add_new(struct bAction *act, const char name[]); struct bActionGroup *action_groups_add_new(struct bAction *act, const char name[]);

@ -42,6 +42,7 @@ struct ID;
struct Object; struct Object;
struct Mesh; struct Mesh;
struct MTFace; struct MTFace;
struct Scene;
/* materials */ /* materials */
@ -92,6 +93,9 @@ int material_in_material(struct Material *parmat, struct Material *mat);
void ramp_blend(int type, float r_col[3], const float fac, const float col[3]); void ramp_blend(int type, float r_col[3], const float fac, const float col[3]);
/* driver update hacks */
void material_drivers_update(struct Scene *scene, struct Material *mat, float ctime);
/* copy/paste */ /* copy/paste */
void clear_matcopybuf(void); void clear_matcopybuf(void);
void free_matcopybuf(void); void free_matcopybuf(void);

@ -253,7 +253,7 @@ void set_active_action_group(bAction *act, bActionGroup *agrp, short select)
} }
/* Sync colors used for action/bone group with theme settings */ /* Sync colors used for action/bone group with theme settings */
void action_group_colors_sync(bActionGroup *grp) void action_group_colors_sync(bActionGroup *grp, const bActionGroup *ref_grp)
{ {
/* only do color copying if using a custom color (i.e. not default color) */ /* only do color copying if using a custom color (i.e. not default color) */
if (grp->customCol) { if (grp->customCol) {
@ -265,9 +265,15 @@ void action_group_colors_sync(bActionGroup *grp)
memcpy(&grp->cs, col_set, sizeof(ThemeWireColor)); memcpy(&grp->cs, col_set, sizeof(ThemeWireColor));
} }
else { else {
/* init custom colors with a generic multi-color rgb set, if not initialized already /* if a reference group is provided, use the custom color from there... */
* (for custom color set) */ if (ref_grp) {
if (grp->cs.solid[0] == 0) { /* assumption: reference group has a color set */
memcpy(&grp->cs, &ref_grp->cs, sizeof(ThemeWireColor));
}
/* otherwise, init custom color with a generic/placeholder color set if
* no previous theme color was used that we can just keep using
*/
else if (grp->cs.solid[0] == 0) {
/* define for setting colors in theme below */ /* define for setting colors in theme below */
rgba_char_args_set(grp->cs.solid, 0xff, 0x00, 0x00, 255); rgba_char_args_set(grp->cs.solid, 0xff, 0x00, 0x00, 255);
rgba_char_args_set(grp->cs.select, 0x81, 0xe6, 0x14, 255); rgba_char_args_set(grp->cs.select, 0x81, 0xe6, 0x14, 255);

@ -66,6 +66,7 @@
#include "BKE_library.h" #include "BKE_library.h"
#include "BKE_main.h" #include "BKE_main.h"
#include "BKE_node.h" #include "BKE_node.h"
#include "BKE_material.h"
#include "BKE_mball.h" #include "BKE_mball.h"
#include "BKE_modifier.h" #include "BKE_modifier.h"
#include "BKE_object.h" #include "BKE_object.h"
@ -311,7 +312,7 @@ static void dag_add_driver_relation(AnimData *adt, DagForest *dag, DagNode *node
for (fcu = adt->drivers.first; fcu; fcu = fcu->next) { for (fcu = adt->drivers.first; fcu; fcu = fcu->next) {
ChannelDriver *driver = fcu->driver; ChannelDriver *driver = fcu->driver;
DriverVar *dvar; DriverVar *dvar;
int isdata_fcu = isdata || (fcu->rna_path && strstr(fcu->rna_path, "modifiers[")); int isdata_fcu = (isdata) || (fcu->rna_path && strstr(fcu->rna_path, "modifiers["));
/* loop over variables to get the target relationships */ /* loop over variables to get the target relationships */
for (dvar = driver->variables.first; dvar; dvar = dvar->next) { for (dvar = driver->variables.first; dvar; dvar = dvar->next) {
@ -347,6 +348,48 @@ static void dag_add_driver_relation(AnimData *adt, DagForest *dag, DagNode *node
} }
} }
/* XXX: forward def for material driver handling... */
static void dag_add_material_driver_relations(DagForest *dag, DagNode *node, Material *ma);
/* recursive handling for material nodetree drivers */
static void dag_add_material_nodetree_driver_relations(DagForest *dag, DagNode *node, bNodeTree *ntree)
{
bNode *n;
/* nodetree itself */
if (ntree->adt) {
dag_add_driver_relation(ntree->adt, dag, node, 1);
}
/* nodetree's nodes... */
for (n = ntree->nodes.first; n; n = n->next) {
if (n->id && GS(n->id->name) == ID_MA) {
dag_add_material_driver_relations(dag, node, (Material *)n->id);
}
else if (n->type == NODE_GROUP && n->id) {
dag_add_material_nodetree_driver_relations(dag, node, (bNodeTree *)n->id);
}
}
}
/* recursive handling for material drivers */
static void dag_add_material_driver_relations(DagForest *dag, DagNode *node, Material *ma)
{
/* material itself */
if (ma->adt) {
dag_add_driver_relation(ma->adt, dag, node, 1);
}
/* textures */
// TODO...
//dag_add_texture_driver_relations(DagForest *dag, DagNode *node, ID *id);
/* material's nodetree */
if (ma->nodetree) {
dag_add_material_nodetree_driver_relations(dag, node, ma->nodetree);
}
}
static void dag_add_collision_field_relation(DagForest *dag, Scene *scene, Object *ob, DagNode *node) static void dag_add_collision_field_relation(DagForest *dag, Scene *scene, Object *ob, DagNode *node)
{ {
Base *base; Base *base;
@ -572,6 +615,20 @@ static void build_dag_object(DagForest *dag, DagNode *scenenode, Scene *scene, O
break; break;
} }
/* material drivers */
if (ob->totcol) {
int a;
for (a = 1; a <= ob->totcol; a++) {
Material *ma = give_current_material(ob, a);
if (ma) {
/* recursively figure out if there are drivers, and hook these up to this object */
dag_add_material_driver_relations(dag, node, ma);
}
}
}
/* particles */ /* particles */
psys = ob->particlesystem.first; psys = ob->particlesystem.first;
if (psys) { if (psys) {

@ -36,6 +36,7 @@
#include "MEM_guardedalloc.h" #include "MEM_guardedalloc.h"
#include "DNA_anim_types.h"
#include "DNA_curve_types.h" #include "DNA_curve_types.h"
#include "DNA_material_types.h" #include "DNA_material_types.h"
#include "DNA_mesh_types.h" #include "DNA_mesh_types.h"
@ -1051,6 +1052,52 @@ int material_in_material(Material *parmat, Material *mat)
return 0; return 0;
} }
/* ****************** */
/* Update drivers for materials in a nodetree */
static void material_node_drivers_update(Scene *scene, bNodeTree *ntree, float ctime)
{
bNode *node;
/* nodetree itself */
if (ntree->adt && ntree->adt->drivers.first) {
BKE_animsys_evaluate_animdata(scene, &ntree->id, ntree->adt, ctime, ADT_RECALC_DRIVERS);
}
/* nodes... */
for (node = ntree->nodes.first; node; node = node->next) {
if (node->id && GS(node->id->name) == ID_MA) {
/* TODO: prevent infinite recursion here... */
material_drivers_update(scene, (Material *)node->id, ctime);
}
else if (node->type == NODE_GROUP && node->id) {
material_node_drivers_update(scene, (bNodeTree *)node->id, ctime);
}
}
}
/* Calculate all drivers for materials
* FIXME: this is really a terrible method which may result in some things being calculated
* multiple times. However, without proper despgraph support for these things, we are forced
* into this sort of thing...
*/
void material_drivers_update(Scene *scene, Material *ma, float ctime)
{
//if (G.f & G_DEBUG)
// printf("material_drivers_update(%s, %s)\n", scene->id.name, ma->id.name);
/* material itself */
if (ma->adt && ma->adt->drivers.first) {
BKE_animsys_evaluate_animdata(scene, &ma->id, ma->adt, ctime, ADT_RECALC_DRIVERS);
}
/* nodes */
if (ma->nodetree) {
material_node_drivers_update(scene, ma->nodetree, ctime);
}
}
/* ****************** */ /* ****************** */
#if 0 /* UNUSED */ #if 0 /* UNUSED */
static char colname_array[125][20]= { static char colname_array[125][20]= {

@ -3146,19 +3146,17 @@ void BKE_mesh_translate(Mesh *me, float offset[3], int do_keys)
} }
} }
void BKE_mesh_ensure_navmesh(Mesh *me) void BKE_mesh_ensure_navmesh(Mesh *me)
{ {
if (!CustomData_has_layer(&me->pdata, CD_RECAST)) { if (!CustomData_has_layer(&me->pdata, CD_RECAST)) {
int i; int i;
int numFaces = me->totpoly; int numFaces = me->totpoly;
int *recastData; int *recastData;
CustomData_add_layer_named(&me->pdata, CD_RECAST, CD_CALLOC, NULL, numFaces, "recastData"); recastData = (int *)MEM_mallocN(numFaces * sizeof(int), __func__);
recastData = (int *)CustomData_get_layer(&me->pdata, CD_RECAST);
for (i = 0; i < numFaces; i++) { for (i = 0; i < numFaces; i++) {
recastData[i] = i + 1; recastData[i] = i + 1;
} }
CustomData_add_layer_named(&me->pdata, CD_RECAST, CD_REFERENCE, recastData, numFaces, "recastData"); CustomData_add_layer_named(&me->pdata, CD_RECAST, CD_ASSIGN, recastData, numFaces, "recastData");
} }
} }

@ -2538,7 +2538,7 @@ void BKE_object_handle_update(Scene *scene, Object *ob)
printf("recalcdata %s\n", ob->id.name + 2); printf("recalcdata %s\n", ob->id.name + 2);
if (adt) { if (adt) {
/* evaluate drivers */ /* evaluate drivers - datalevel */
// XXX: for mesh types, should we push this to derivedmesh instead? // XXX: for mesh types, should we push this to derivedmesh instead?
BKE_animsys_evaluate_animdata(scene, data_id, adt, ctime, ADT_RECALC_DRIVERS); BKE_animsys_evaluate_animdata(scene, data_id, adt, ctime, ADT_RECALC_DRIVERS);
} }
@ -2596,7 +2596,25 @@ void BKE_object_handle_update(Scene *scene, Object *ob)
break; break;
} }
/* related materials */
/* XXX: without depsgraph tagging, this will always need to be run, which will be slow!
* However, not doing anything (or trying to hack around this lack) is not an option
* anymore, especially due to Cycles [#31834]
*/
if (ob->totcol) {
int a;
for (a = 1; a <= ob->totcol; a++) {
Material *ma = give_current_material(ob, a);
if (ma) {
/* recursively update drivers for this material */
material_drivers_update(scene, ma, ctime);
}
}
}
/* particles */
if (ob->particlesystem.first) { if (ob->particlesystem.first) {
ParticleSystem *tpsys, *psys; ParticleSystem *tpsys, *psys;
DerivedMesh *dm; DerivedMesh *dm;

@ -528,8 +528,8 @@ static BMOpDefine bmo_transform_def = {
*/ */
static BMOpDefine bmo_object_load_bmesh_def = { static BMOpDefine bmo_object_load_bmesh_def = {
"object_load_bmesh", "object_load_bmesh",
{{BMO_OP_SLOT_PNT, "scene"}, {{BMO_OP_SLOT_PTR, "scene"},
{BMO_OP_SLOT_PNT, "object"}, {BMO_OP_SLOT_PTR, "object"},
{0, /* null-terminating sentinel */}}, {0, /* null-terminating sentinel */}},
bmo_object_load_bmesh_exec, bmo_object_load_bmesh_exec,
0, 0,
@ -543,8 +543,8 @@ static BMOpDefine bmo_object_load_bmesh_def = {
*/ */
static BMOpDefine bmo_bmesh_to_mesh_def = { static BMOpDefine bmo_bmesh_to_mesh_def = {
"bmesh_to_mesh", "bmesh_to_mesh",
{{BMO_OP_SLOT_PNT, "mesh"}, //pointer to a mesh structure to fill in {{BMO_OP_SLOT_PTR, "mesh"}, //pointer to a mesh structure to fill in
{BMO_OP_SLOT_PNT, "object"}, //pointer to an object structure {BMO_OP_SLOT_PTR, "object"}, //pointer to an object structure
{BMO_OP_SLOT_BOOL, "notessellation"}, //don't calculate mfaces {BMO_OP_SLOT_BOOL, "notessellation"}, //don't calculate mfaces
{0, /* null-terminating sentinel */}}, {0, /* null-terminating sentinel */}},
bmo_bmesh_to_mesh_exec, bmo_bmesh_to_mesh_exec,
@ -559,8 +559,8 @@ static BMOpDefine bmo_bmesh_to_mesh_def = {
*/ */
static BMOpDefine bmo_mesh_to_bmesh_def = { static BMOpDefine bmo_mesh_to_bmesh_def = {
"mesh_to_bmesh", "mesh_to_bmesh",
{{BMO_OP_SLOT_PNT, "mesh"}, //pointer to a Mesh structure {{BMO_OP_SLOT_PTR, "mesh"}, //pointer to a Mesh structure
{BMO_OP_SLOT_PNT, "object"}, //pointer to an Object structure {BMO_OP_SLOT_PTR, "object"}, //pointer to an Object structure
{BMO_OP_SLOT_BOOL, "set_shapekey"}, //load active shapekey coordinates into verts {BMO_OP_SLOT_BOOL, "set_shapekey"}, //load active shapekey coordinates into verts
{0, /* null-terminating sentinel */}}, {0, /* null-terminating sentinel */}},
bmo_mesh_to_bmesh_exec, bmo_mesh_to_bmesh_exec,
@ -737,7 +737,7 @@ static BMOpDefine bmo_duplicate_def = {
{BMO_OP_SLOT_MAPPING, "facemap"}, {BMO_OP_SLOT_MAPPING, "facemap"},
{BMO_OP_SLOT_MAPPING, "boundarymap"}, {BMO_OP_SLOT_MAPPING, "boundarymap"},
{BMO_OP_SLOT_MAPPING, "isovertmap"}, {BMO_OP_SLOT_MAPPING, "isovertmap"},
{BMO_OP_SLOT_PNT, "dest"}, /* destination bmesh, if NULL will use current on */ {BMO_OP_SLOT_PTR, "dest"}, /* destination bmesh, if NULL will use current on */
{0} /* null-terminating sentinel */}, {0} /* null-terminating sentinel */},
bmo_duplicate_exec, bmo_duplicate_exec,
0 0
@ -749,7 +749,7 @@ static BMOpDefine bmo_split_def = {
{BMO_OP_SLOT_ELEMENT_BUF, "geomout"}, {BMO_OP_SLOT_ELEMENT_BUF, "geomout"},
{BMO_OP_SLOT_MAPPING, "boundarymap"}, {BMO_OP_SLOT_MAPPING, "boundarymap"},
{BMO_OP_SLOT_MAPPING, "isovertmap"}, {BMO_OP_SLOT_MAPPING, "isovertmap"},
{BMO_OP_SLOT_PNT, "dest"}, /* destination bmesh, if NULL will use current on */ {BMO_OP_SLOT_PTR, "dest"}, /* destination bmesh, if NULL will use current on */
{BMO_OP_SLOT_BOOL, "use_only_faces"}, /* when enabled. don't duplicate loose verts/edges */ {BMO_OP_SLOT_BOOL, "use_only_faces"}, /* when enabled. don't duplicate loose verts/edges */
{0} /* null-terminating sentinel */}, {0} /* null-terminating sentinel */},
bmo_split_exec, bmo_split_exec,

@ -99,7 +99,7 @@ enum {
/* normally store pointers to object, scene, /* normally store pointers to object, scene,
* _never_ store arrays corresponding to mesh elements with this */ * _never_ store arrays corresponding to mesh elements with this */
BMO_OP_SLOT_PNT = 4, BMO_OP_SLOT_PTR = 4,
BMO_OP_SLOT_MAT = 5, BMO_OP_SLOT_MAT = 5,
BMO_OP_SLOT_VEC = 8, BMO_OP_SLOT_VEC = 8,

@ -378,8 +378,8 @@ void BMO_slot_mat3_set(BMOperator *op, const char *slot_name, float r_mat[3][3])
void BMO_slot_ptr_set(BMOperator *op, const char *slot_name, void *p) void BMO_slot_ptr_set(BMOperator *op, const char *slot_name, void *p)
{ {
BMOpSlot *slot = BMO_slot_get(op, slot_name); BMOpSlot *slot = BMO_slot_get(op, slot_name);
BLI_assert(slot->slot_type == BMO_OP_SLOT_PNT); BLI_assert(slot->slot_type == BMO_OP_SLOT_PTR);
if (!(slot->slot_type == BMO_OP_SLOT_PNT)) if (!(slot->slot_type == BMO_OP_SLOT_PTR))
return; return;
slot->data.p = p; slot->data.p = p;
@ -430,8 +430,8 @@ int BMO_slot_bool_get(BMOperator *op, const char *slot_name)
void *BMO_slot_ptr_get(BMOperator *op, const char *slot_name) void *BMO_slot_ptr_get(BMOperator *op, const char *slot_name)
{ {
BMOpSlot *slot = BMO_slot_get(op, slot_name); BMOpSlot *slot = BMO_slot_get(op, slot_name);
BLI_assert(slot->slot_type == BMO_OP_SLOT_PNT); BLI_assert(slot->slot_type == BMO_OP_SLOT_PTR);
if (!(slot->slot_type == BMO_OP_SLOT_PNT)) if (!(slot->slot_type == BMO_OP_SLOT_PTR))
return NULL; return NULL;
return slot->data.p; return slot->data.p;

@ -485,14 +485,18 @@ bool ExecutionGroup::scheduleAreaWhenPossible(ExecutionSystem *graph, rcti *area
float chunkSizef = this->m_chunkSize; float chunkSizef = this->m_chunkSize;
int indexx, indexy; int indexx, indexy;
const int minxchunk = floor(area->xmin / chunkSizef); int minxchunk = floor(area->xmin / chunkSizef);
const int maxxchunk = ceil((area->xmax - 1) / chunkSizef); int maxxchunk = ceil((area->xmax - 1) / chunkSizef);
const int minychunk = floor(area->ymin / chunkSizef); int minychunk = floor(area->ymin / chunkSizef);
const int maxychunk = ceil((area->ymax - 1) / chunkSizef); int maxychunk = ceil((area->ymax - 1) / chunkSizef);
minxchunk = MAX2(minxchunk, 0);
minychunk = MAX2(minychunk, 0);
maxxchunk = MIN2(maxxchunk, this->m_numberOfXChunks);
maxychunk = MIN2(maxychunk, this->m_numberOfYChunks);
bool result = true; bool result = true;
for (indexx = max(minxchunk, 0); indexx < maxxchunk; indexx++) { for (indexx = minxchunk; indexx < maxxchunk; indexx++) {
for (indexy = max(minychunk, 0); indexy < maxychunk; indexy++) { for (indexy = minychunk; indexy < maxychunk; indexy++) {
if (!scheduleChunkWhenPossible(graph, indexx, indexy)) { if (!scheduleChunkWhenPossible(graph, indexx, indexy)) {
result = false; result = false;
} }

@ -48,13 +48,21 @@ void LensDistortionNode::convertToOperations(ExecutionSystem *graph, CompositorC
} }
else { else {
ScreenLensDistortionOperation *operation = new ScreenLensDistortionOperation(); ScreenLensDistortionOperation *operation = new ScreenLensDistortionOperation();
operation->setData(data);
if (!(this->getInputSocket(1)->isConnected() || this->getInputSocket(2)->isConnected()))
{
// no nodes connected to the distortion and dispersion. We can precalculate some values
float distortion = ((const bNodeSocketValueFloat *)this->getInputSocket(1)->getbNodeSocket()->default_value)->value;
float dispersion = ((const bNodeSocketValueFloat *)this->getInputSocket(2)->getbNodeSocket()->default_value)->value;
operation->setDistortionAndDispersion(distortion, dispersion);
}
this->getInputSocket(0)->relinkConnections(operation->getInputSocket(0), 0, graph); this->getInputSocket(0)->relinkConnections(operation->getInputSocket(0), 0, graph);
this->getInputSocket(1)->relinkConnections(operation->getInputSocket(1), 1, graph); this->getInputSocket(1)->relinkConnections(operation->getInputSocket(1), 1, graph);
this->getInputSocket(2)->relinkConnections(operation->getInputSocket(2), 2, graph); this->getInputSocket(2)->relinkConnections(operation->getInputSocket(2), 2, graph);
this->getOutputSocket(0)->relinkConnections(operation->getOutputSocket(0)); this->getOutputSocket(0)->relinkConnections(operation->getOutputSocket(0));
operation->setData(data);
graph->addOperation(operation); graph->addOperation(operation);
} }

@ -36,6 +36,7 @@ ProjectorLensDistortionOperation::ProjectorLensDistortionOperation() : NodeOpera
} }
void ProjectorLensDistortionOperation::initExecution() void ProjectorLensDistortionOperation::initExecution()
{ {
this->initMutex();
this->m_inputProgram = this->getInputSocketReader(0); this->m_inputProgram = this->getInputSocketReader(0);
} }
@ -65,6 +66,7 @@ void ProjectorLensDistortionOperation::executePixel(float *color, int x, int y,
void ProjectorLensDistortionOperation::deinitExecution() void ProjectorLensDistortionOperation::deinitExecution()
{ {
this->deinitMutex();
this->m_inputProgram = NULL; this->m_inputProgram = NULL;
} }
@ -77,16 +79,18 @@ bool ProjectorLensDistortionOperation::determineDependingAreaOfInterest(rcti *in
newInput.xmin = input->xmin - this->m_kr2 - 2; newInput.xmin = input->xmin - this->m_kr2 - 2;
newInput.xmax = input->xmax + this->m_kr2 + 2; newInput.xmax = input->xmax + this->m_kr2 + 2;
} else { } else {
newInput.xmin = 0; newInput.xmin = input->xmin-7; //(0.25f*20*1)+2 == worse case dispersion
newInput.ymin = input->ymin; newInput.ymin = input->ymin;
newInput.ymax = input->ymax; newInput.ymax = input->ymax;
newInput.xmax = this->m_inputProgram->getWidth(); newInput.xmax = input->xmax+7; //(0.25f*20*1)+2 == worse case dispersion
} }
return NodeOperation::determineDependingAreaOfInterest(&newInput, readOperation, output); return NodeOperation::determineDependingAreaOfInterest(&newInput, readOperation, output);
} }
void ProjectorLensDistortionOperation::updateDispersion(MemoryBuffer **inputBuffers) void ProjectorLensDistortionOperation::updateDispersion(MemoryBuffer **inputBuffers)
{ {
if (this->m_dispersionAvailable) return;
this->lockMutex();
if (!this->m_dispersionAvailable) { if (!this->m_dispersionAvailable) {
float result[4]; float result[4];
this->getInputSocketReader(1)->read(result, 0, 0, COM_PS_NEAREST, inputBuffers); this->getInputSocketReader(1)->read(result, 0, 0, COM_PS_NEAREST, inputBuffers);
@ -95,4 +99,5 @@ void ProjectorLensDistortionOperation::updateDispersion(MemoryBuffer **inputBuff
this->m_kr2 = this->m_kr * 20; this->m_kr2 = this->m_kr * 20;
this->m_dispersionAvailable = true; this->m_dispersionAvailable = true;
} }
this->unlockMutex();
} }

@ -42,6 +42,10 @@ ScreenLensDistortionOperation::ScreenLensDistortionOperation() : NodeOperation()
void ScreenLensDistortionOperation::initExecution() void ScreenLensDistortionOperation::initExecution()
{ {
this->m_inputProgram = this->getInputSocketReader(0); this->m_inputProgram = this->getInputSocketReader(0);
this->initMutex();
this->m_cx = 0.5f * (float)getWidth();
this->m_cy = 0.5f * (float)getHeight();
} }
void *ScreenLensDistortionOperation::initializeTileData(rcti *rect, MemoryBuffer **memoryBuffers) void *ScreenLensDistortionOperation::initializeTileData(rcti *rect, MemoryBuffer **memoryBuffers)
@ -139,40 +143,167 @@ void ScreenLensDistortionOperation::executePixel(float *outputColor, int x, int
void ScreenLensDistortionOperation::deinitExecution() void ScreenLensDistortionOperation::deinitExecution()
{ {
this->deinitMutex();
this->m_inputProgram = NULL; this->m_inputProgram = NULL;
} }
void ScreenLensDistortionOperation::determineUV(float result[2], float x, float y) const void ScreenLensDistortionOperation::determineUV(float result[4], float x, float y, float distortion, float dispersion)
{ {
if (!this->m_valuesAvailable) {
updateVariables(distortion, dispersion);
}
determineUV(result, x, y);
}
void ScreenLensDistortionOperation::determineUV(float result[4], float x, float y) const
{
const float height = this->getHeight();
const float width = this->getWidth();
float d, t, ln[6] = {0, 0, 0, 0, 0, 0};
const float v = this->m_sc * ((y + 0.5f) - this->m_cy) / this->m_cy; const float v = this->m_sc * ((y + 0.5f) - this->m_cy) / this->m_cy;
const float u = this->m_sc * ((x + 0.5f) - this->m_cx) / this->m_cx; const float u = this->m_sc * ((x + 0.5f) - this->m_cx) / this->m_cx;
const float t = ABS(MIN3(this->m_kr, this->m_kg, this->m_kb) * 4); const float uv_dot = u * u + v * v;
float d = 1.f / (1.f + sqrtf(t));
result[0] = (u * d + 0.5f) * getWidth() - 0.5f; if ((t = 1.f - this->m_kr4 * uv_dot) >= 0.f) {
result[1] = (v * d + 0.5f) * getHeight() - 0.5f; d = 1.f / (1.f + sqrtf(t));
ln[0] = (u * d + 0.5f) * width - 0.5f, ln[1] = (v * d + 0.5f) * height - 0.5f;
}
if ((t = 1.f - this->m_kg4 * uv_dot) >= 0.f) {
d = 1.f / (1.f + sqrtf(t));
ln[2] = (u * d + 0.5f) * width - 0.5f, ln[3] = (v * d + 0.5f) * height - 0.5f;
}
if ((t = 1.f - this->m_kb4 * uv_dot) >= 0.f) {
d = 1.f / (1.f + sqrtf(t));
ln[4] = (u * d + 0.5f) * width - 0.5f, ln[5] = (v * d + 0.5f) * height - 0.5f;
}
float jit = this->m_data->jit;
float z;
{
// RG
const int dx = ln[2] - ln[0], dy = ln[3] - ln[1];
const float dsf = sqrtf((float)dx * dx + dy * dy) + 1.f;
const int ds = (int)(jit ? ((dsf < 4.f) ? 2.f : sqrtf(dsf)) : dsf);
const float sd = 1.f / (float)ds;
z = ds;
const float tz = ((float)z + (1.0f)) * sd;
t = 1.0f - (this->m_kr4 + tz * this->m_drg) * uv_dot;
d = 1.0f / (1.f + sqrtf(t));
const float nx = (u * d + 0.5f) * width - 0.5f;
const float ny = (v * d + 0.5f) * height - 0.5f;
result[0] = nx;
result[1] = ny;
}
{
// GB
const int dx = ln[4] - ln[2], dy = ln[5] - ln[3];
const float dsf = sqrtf((float)dx * dx + dy * dy) + 1.f;
const int ds = (int)(jit ? ((dsf < 4.f) ? 2.f : sqrtf(dsf)) : dsf);
const float sd = 1.f / (float)ds;
z = ds;
const float tz = ((float)z + (1.0f)) * sd;
t = 1.f - (this->m_kg4 + tz * this->m_dgb) * uv_dot;
d = 1.f / (1.f + sqrtf(t));
const float nx = (u * d + 0.5f) * width - 0.5f;
const float ny = (v * d + 0.5f) * height - 0.5f;
result[2] = nx;
result[3] = ny;
}
} }
bool ScreenLensDistortionOperation::determineDependingAreaOfInterest(rcti *input, ReadBufferOperation *readOperation, rcti *output) bool ScreenLensDistortionOperation::determineDependingAreaOfInterest(rcti *input, ReadBufferOperation *readOperation, rcti *output)
{ {
rcti newInput; rcti newInputValue;
newInput.xmin = 0; newInputValue.xmin = 0;
newInput.ymin = 0; newInputValue.ymin = 0;
newInput.ymax = this->m_inputProgram->getHeight(); newInputValue.xmax = 2;
newInput.xmax = this->m_inputProgram->getWidth(); newInputValue.ymax = 2;
return NodeOperation::determineDependingAreaOfInterest(&newInput, readOperation, output);
NodeOperation *operation = getInputOperation(1);
if (operation->determineDependingAreaOfInterest(&newInputValue, readOperation, output) ) {
return true;
} }
void ScreenLensDistortionOperation::updateDispersionAndDistortion(MemoryBuffer **inputBuffers) operation = getInputOperation(2);
if (operation->determineDependingAreaOfInterest(&newInputValue, readOperation, output) ) {
return true;
}
#define MARGIN 64
#define UPDATE_INPUT \
newInput.xmin = MIN3(newInput.xmin, coords[0], coords[2]); \
newInput.ymin = MIN3(newInput.ymin, coords[1], coords[3]); \
newInput.xmax = MAX3(newInput.xmax, coords[0], coords[2]); \
newInput.ymax = MAX3(newInput.ymax, coords[1], coords[3]);
rcti newInput;
float margin;
float coords[4];
if (m_valuesAvailable) {
determineUV(coords, input->xmin, input->ymin);
newInput.xmin = coords[0];
newInput.ymin = coords[1];
newInput.xmax = coords[0];
newInput.ymax = coords[1];
UPDATE_INPUT;
determineUV(coords, input->xmin, input->ymax);
UPDATE_INPUT;
determineUV(coords, input->xmax, input->ymax);
UPDATE_INPUT;
determineUV(coords, input->xmax, input->ymin);
UPDATE_INPUT;
margin = (ABS(this->m_distortion)+this->m_dispersion)*MARGIN;
}
else
{ {
if (!this->m_valuesAvailable) { determineUV(coords, input->xmin, input->ymin, 1.0f, 1.0f);
float result[4]; newInput.xmin = coords[0];
this->getInputSocketReader(1)->read(result, 0, 0, COM_PS_NEAREST, inputBuffers); newInput.ymin = coords[1];
this->m_distortion = result[0]; newInput.xmax = coords[0];
this->getInputSocketReader(2)->read(result, 0, 0, COM_PS_NEAREST, inputBuffers); newInput.ymax = coords[1];
this->m_dispersion = result[0]; UPDATE_INPUT;
this->m_kg = MAX2(MIN2(this->m_distortion, 1.f), -0.999f); determineUV(coords, input->xmin, input->ymin, -1.0f, 1.0f);
UPDATE_INPUT;
determineUV(coords, input->xmin, input->ymax, -1.0f, 1.0f);
UPDATE_INPUT;
determineUV(coords, input->xmin, input->ymax, 1.0f, 1.0f);
UPDATE_INPUT;
determineUV(coords, input->xmax, input->ymax, -1.0f, 1.0f);
UPDATE_INPUT;
determineUV(coords, input->xmax, input->ymax, 1.0f, 1.0f);
UPDATE_INPUT;
determineUV(coords, input->xmax, input->ymin, -1.0f, 1.0f);
UPDATE_INPUT;
determineUV(coords, input->xmax, input->ymin, 1.0f, 1.0f);
UPDATE_INPUT;
margin=MARGIN;
}
#undef UPDATE_INPUT
newInput.xmin -= margin;
newInput.ymin -= margin;
newInput.xmax += margin;
newInput.ymax += margin;
operation = getInputOperation(0);
if (operation->determineDependingAreaOfInterest(&newInput, readOperation, output) ) {
return true;
}
return false;
}
void ScreenLensDistortionOperation::updateVariables(float distortion, float dispersion)
{
this->m_kg = MAX2(MIN2(distortion, 1.f), -0.999f);
// smaller dispersion range for somewhat more control // smaller dispersion range for somewhat more control
const float d = 0.25f * MAX2(MIN2(this->m_dispersion, 1.f), 0.f); const float d = 0.25f * MAX2(MIN2(dispersion, 1.f), 0.f);
this->m_kr = MAX2(MIN2((this->m_kg + d), 1.0f), -0.999f); this->m_kr = MAX2(MIN2((this->m_kg + d), 1.0f), -0.999f);
this->m_kb = MAX2(MIN2((this->m_kg - d), 1.0f), -0.999f); this->m_kb = MAX2(MIN2((this->m_kg - d), 1.0f), -0.999f);
this->m_maxk = MAX3(this->m_kr, this->m_kg, this->m_kb); this->m_maxk = MAX3(this->m_kr, this->m_kg, this->m_kb);
@ -183,8 +314,21 @@ void ScreenLensDistortionOperation::updateDispersionAndDistortion(MemoryBuffer *
this->m_kr4 = this->m_kr * 4.0f; this->m_kr4 = this->m_kr * 4.0f;
this->m_kg4 = this->m_kg * 4.0f; this->m_kg4 = this->m_kg * 4.0f;
this->m_kb4 = this->m_kb * 4.0f; this->m_kb4 = this->m_kb * 4.0f;
this->m_cx = 0.5f * (float)getWidth(); }
this->m_cy = 0.5f * (float)getHeight();
void ScreenLensDistortionOperation::updateDispersionAndDistortion(MemoryBuffer **inputBuffers)
{
if (this->m_valuesAvailable) return;
this->lockMutex();
if (!this->m_valuesAvailable) {
float result[4];
this->getInputSocketReader(1)->read(result, 0, 0, COM_PS_NEAREST, inputBuffers);
this->m_distortion = result[0];
this->getInputSocketReader(2)->read(result, 0, 0, COM_PS_NEAREST, inputBuffers);
this->m_dispersion = result[0];
updateVariables(this->m_distortion, this->m_dispersion);
this->m_valuesAvailable = true; this->m_valuesAvailable = true;
} }
this->unlockMutex();
} }

@ -66,9 +66,23 @@ public:
bool determineDependingAreaOfInterest(rcti *input, ReadBufferOperation *readOperation, rcti *output); bool determineDependingAreaOfInterest(rcti *input, ReadBufferOperation *readOperation, rcti *output);
/**
* @brief Set the distortion and dispersion and precalc some values
* @param distortion
* @param dispersion
*/
void setDistortionAndDispersion(float distortion, float dispersion) {
this->m_distortion = distortion;
this->m_dispersion = dispersion;
updateVariables(distortion, dispersion);
this->m_valuesAvailable = true;
}
private: private:
void determineUV(float *result, float x, float y) const; void determineUV(float result[4], float x, float y) const;
void determineUV(float result[4], float x, float y, float distortion, float dispersion);
void updateDispersionAndDistortion(MemoryBuffer **inputBuffers); void updateDispersionAndDistortion(MemoryBuffer **inputBuffers);
void updateVariables(float distortion, float dispersion);
}; };
#endif #endif

@ -416,6 +416,7 @@ static char *get_driver_path_hack(bContext *C, PointerRNA *ptr, PropertyRNA *pro
char *basepath = RNA_path_from_ID_to_property(ptr, prop); char *basepath = RNA_path_from_ID_to_property(ptr, prop);
char *path = basepath; /* in case no remapping is needed */ char *path = basepath; /* in case no remapping is needed */
/* Remapping will only be performed in the Properties Editor, as only this /* Remapping will only be performed in the Properties Editor, as only this
* restricts the subspace of options to the 'active' data (a manageable state) * restricts the subspace of options to the 'active' data (a manageable state)
*/ */
@ -426,23 +427,6 @@ static char *get_driver_path_hack(bContext *C, PointerRNA *ptr, PropertyRNA *pro
if (ob && id) { if (ob && id) {
/* only id-types which can be remapped to go through objects should be considered */ /* only id-types which can be remapped to go through objects should be considered */
switch (GS(id->name)) { switch (GS(id->name)) {
case ID_MA: /* materials */
{
Material *ma = give_current_material(ob, ob->actcol);
/* assumes: material will only be shown if it is active objects's active material it's ok */
if ((ID *)ma == id) {
/* create new path */
// TODO: use RNA path functions to construct instead?
path = BLI_sprintfN("material_slots[\"%s\"].material.%s",
ma->id.name + 2, basepath);
/* free old one */
MEM_freeN(basepath);
}
}
break;
case ID_TE: /* textures */ case ID_TE: /* textures */
{ {
Material *ma = give_current_material(ob, ob->actcol); Material *ma = give_current_material(ob, ob->actcol);
@ -452,6 +436,7 @@ static char *get_driver_path_hack(bContext *C, PointerRNA *ptr, PropertyRNA *pro
if ((ID *)tex == id) { if ((ID *)tex == id) {
/* create new path */ /* create new path */
// TODO: use RNA path functions to construct step by step instead? // TODO: use RNA path functions to construct step by step instead?
// FIXME: maybe this isn't even needed anymore...
path = BLI_sprintfN("material_slots[\"%s\"].material.texture_slots[\"%s\"].texture.%s", path = BLI_sprintfN("material_slots[\"%s\"].material.texture_slots[\"%s\"].texture.%s",
ma->id.name + 2, tex->id.name + 2, basepath); ma->id.name + 2, tex->id.name + 2, basepath);

@ -201,7 +201,7 @@ FCurve *verify_fcurve(bAction *act, const char group[], PointerRNA *ptr,
grp = (bActionGroup *)BLI_findlink(&pose->agroups, (pchan->agrp_index - 1)); grp = (bActionGroup *)BLI_findlink(&pose->agroups, (pchan->agrp_index - 1));
if (grp) { if (grp) {
agrp->customCol = grp->customCol; agrp->customCol = grp->customCol;
action_group_colors_sync(agrp); action_group_colors_sync(agrp, grp);
} }
} }
} }

@ -237,7 +237,7 @@ static int group_objects_remove_exec(bContext *C, wmOperator *op)
/* can be called with C == NULL */ /* can be called with C == NULL */
static EnumPropertyItem *group_objects_remove_itemf(bContext *C, PointerRNA *UNUSED(ptr), PropertyRNA *UNUSED(prop), int *free) static EnumPropertyItem *group_objects_remove_itemf(bContext *C, PointerRNA *UNUSED(ptr), PropertyRNA *UNUSED(prop), int *free)
{ {
Object *ob = ED_object_context(C); Object *ob;
EnumPropertyItem *item = NULL, item_tmp = {0}; EnumPropertyItem *item = NULL, item_tmp = {0};
int totitem = 0; int totitem = 0;
@ -245,6 +245,8 @@ static EnumPropertyItem *group_objects_remove_itemf(bContext *C, PointerRNA *UNU
return DummyRNA_NULL_items; return DummyRNA_NULL_items;
} }
ob = ED_object_context(C);
/* check that the action exists */ /* check that the action exists */
if (ob) { if (ob) {
Group *group = NULL; Group *group = NULL;

@ -1312,7 +1312,10 @@ static void node_shader_buts_tex_image(uiLayout *layout, bContext *C, PointerRNA
uiTemplateID(layout, C, ptr, "image", NULL, "IMAGE_OT_open", NULL); uiTemplateID(layout, C, ptr, "image", NULL, "IMAGE_OT_open", NULL);
uiItemR(layout, ptr, "color_space", 0, "", ICON_NONE); uiItemR(layout, ptr, "color_space", 0, "", ICON_NONE);
node_buts_image_user(layout, C, ptr, &imaptr, &iuserptr); /* note: image user properties used directly here, unlike compositor image node,
* which redefines them in the node struct RNA to get proper updates.
*/
node_buts_image_user(layout, C, &iuserptr, &imaptr, &iuserptr);
} }
static void node_shader_buts_tex_environment(uiLayout *layout, bContext *C, PointerRNA *ptr) static void node_shader_buts_tex_environment(uiLayout *layout, bContext *C, PointerRNA *ptr)

@ -144,7 +144,7 @@ void rna_ActionGroup_colorset_set(PointerRNA *ptr, int value)
grp->customCol = value; grp->customCol = value;
/* sync colors stored with theme colors based on the index specified */ /* sync colors stored with theme colors based on the index specified */
action_group_colors_sync(grp); action_group_colors_sync(grp, NULL);
} }
} }

@ -163,6 +163,7 @@ static PyObject *pyrna_op_call(BPy_BMeshOpFunc *self, PyObject *args, PyObject *
PyErr_Format(PyExc_TypeError, PyErr_Format(PyExc_TypeError,
"%.200s: keyword \"%.200s\" expected an int, not %.200s", "%.200s: keyword \"%.200s\" expected an int, not %.200s",
self->opname, slot_name, Py_TYPE(value)->tp_name); self->opname, slot_name, Py_TYPE(value)->tp_name);
return NULL;
} }
else { else {
slot->data.i = (int)param; slot->data.i = (int)param;
@ -176,12 +177,47 @@ static PyObject *pyrna_op_call(BPy_BMeshOpFunc *self, PyObject *args, PyObject *
PyErr_Format(PyExc_TypeError, PyErr_Format(PyExc_TypeError,
"%.200s: keyword \"%.200s\" expected a float, not %.200s", "%.200s: keyword \"%.200s\" expected a float, not %.200s",
self->opname, slot_name, Py_TYPE(value)->tp_name); self->opname, slot_name, Py_TYPE(value)->tp_name);
return NULL;
} }
else { else {
slot->data.f = param; slot->data.f = param;
} }
break; break;
} }
case BMO_OP_SLOT_MAT:
{
/* XXX - BMesh operator design is crappy here, operator slot should define matrix size,
* not the caller! */
unsigned short size;
if (!MatrixObject_Check(value)) {
PyErr_Format(PyExc_TypeError,
"%.200s: keyword \"%.200s\" expected a Matrix, not %.200s",
self->opname, slot_name, Py_TYPE(value)->tp_name);
return NULL;
}
else if (BaseMath_ReadCallback((MatrixObject *)value) == -1) {
return NULL;
}
else if (((size = ((MatrixObject *)value)->num_col) != ((MatrixObject *)value)->num_row) ||
(ELEM(size, 3, 4) == FALSE))
{
PyErr_Format(PyExc_TypeError,
"%.200s: keyword \"%.200s\" expected a 3x3 or 4x4 matrix Matrix",
self->opname, slot_name);
return NULL;
}
BMO_slot_mat_set(&bmop, slot_name, ((MatrixObject *)value)->matrix, size);
break;
}
case BMO_OP_SLOT_VEC:
{
/* passing slot name here is a bit non-descriptive */
if (mathutils_array_parse(slot->data.vec, 3, 3, value, slot_name) == -1) {
return NULL;
}
break;
}
case BMO_OP_SLOT_ELEMENT_BUF: case BMO_OP_SLOT_ELEMENT_BUF:
{ {
/* there are many ways we could interpret arguments, for now... /* there are many ways we could interpret arguments, for now...
@ -258,6 +294,7 @@ static PyObject *pyrna_op_call(BPy_BMeshOpFunc *self, PyObject *args, PyObject *
"%.200s: keyword \"%.200s\" expected " "%.200s: keyword \"%.200s\" expected "
"a bmesh sequence, list, (htype, flag) pair, not %.200s", "a bmesh sequence, list, (htype, flag) pair, not %.200s",
self->opname, slot_name, Py_TYPE(value)->tp_name); self->opname, slot_name, Py_TYPE(value)->tp_name);
return NULL;
} }
#undef BPY_BM_GENERIC_MESH_TEST #undef BPY_BM_GENERIC_MESH_TEST

@ -3508,8 +3508,8 @@ static PyObject *pyrna_struct_getattro(BPy_StructRNA *self, PyObject *pyname)
PyList_Append(ret, linkptr); PyList_Append(ret, linkptr);
Py_DECREF(linkptr); Py_DECREF(linkptr);
} }
}
break; break;
}
default: default:
/* should never happen */ /* should never happen */
BLI_assert(!"Invalid context type"); BLI_assert(!"Invalid context type");