parent
3b975dddcc
commit
13a3603578
136
scripts/addons_core/bl_pkg/Makefile
Normal file
136
scripts/addons_core/bl_pkg/Makefile
Normal file
@ -0,0 +1,136 @@
|
||||
# SPDX-FileCopyrightText: 2011-2023 Blender Authors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
# note: this isn't needed for building,
|
||||
# its just for some convenience targets.
|
||||
|
||||
# Needed for when tests are run from another directory: `make -C ./path/to/this/directory`
|
||||
BASE_DIR := ${CURDIR}
|
||||
|
||||
PY_FILES=$(shell find ./ -type f -name '*.py')
|
||||
# Filter out files which use `bpy`.
|
||||
PY_FILES_MYPY=$(filter-out \
|
||||
./__init__.py \
|
||||
./bl_extension_cli.py \
|
||||
./bl_extension_local.py \
|
||||
./bl_extension_monkeypatch.py \
|
||||
./bl_extension_notify.py \
|
||||
./bl_extension_ops.py \
|
||||
./bl_extension_ui.py \
|
||||
./bl_extension_utils.py \
|
||||
./wheel_manager.py \
|
||||
./example_extension/__init__.py \
|
||||
./example_extension/foo.py \
|
||||
,$(PY_FILES))
|
||||
|
||||
PY_FILES_MYPY_STANDALONE= \
|
||||
./bl_extension_utils.py \
|
||||
./bl_extension_cli.py \
|
||||
./wheel_manager.py
|
||||
|
||||
EXTRA_WATCH_FILES=Makefile
|
||||
|
||||
# For tests that launch Blender directly.
|
||||
BLENDER_BIN?=$(shell which blender)
|
||||
PYTHON_BIN?=$(shell which python3)
|
||||
|
||||
pep8: FORCE
|
||||
@flake8 $(PY_FILES) --ignore=E501,E302,E123,E126,E128,E129,E124,E122,W504
|
||||
|
||||
# `--no-namespace-packages` is needed otherwise `./cli/blender_ext.py` loads in parent modules
|
||||
# (the Blender add-on which imports `bpy`).
|
||||
check_mypy: FORCE
|
||||
@mypy --no-namespace-packages --strict $(PY_FILES_MYPY)
|
||||
@mypy --strict --follow-imports=skip $(PY_FILES_MYPY_STANDALONE)
|
||||
|
||||
check_ruff: FORCE
|
||||
@env --chdir="$(BASE_DIR)" ruff check $(PY_FILES_MYPY)
|
||||
@env --chdir="$(BASE_DIR)" ruff check $(PY_FILES_MYPY_STANDALONE)
|
||||
|
||||
check_pylint:
|
||||
@env --chdir="$(BASE_DIR)" \
|
||||
pylint $(PY_FILES) \
|
||||
--disable=C0111,C0301,C0302,C0103,C0415,R1705,R0902,R0903,R0913,E0611,E0401,I1101,R0801,C0209,W0511,W0718,W0719,C0413,R0911,R0912,R0914,R0915
|
||||
|
||||
# python3 ./tests/test_cli.py
|
||||
test: FORCE
|
||||
@env --chdir="$(BASE_DIR)" \
|
||||
USE_HTTP=0 \
|
||||
$(PYTHON_BIN) ./tests/test_cli.py
|
||||
@env --chdir="$(BASE_DIR)" \
|
||||
USE_HTTP=1 \
|
||||
$(PYTHON_BIN) ./tests/test_cli.py
|
||||
|
||||
# NOTE: these rely on the blender binary.
|
||||
test_blender: FORCE
|
||||
@env --chdir="$(BASE_DIR)" \
|
||||
ASAN_OPTIONS=check_initialization_order=0:leak_check_at_exit=0 \
|
||||
$(BLENDER_BIN) --background --factory-startup -noaudio --python ./tests/test_blender.py -- --verbose
|
||||
|
||||
watch_test_blender: FORCE
|
||||
@cd "$(BASE_DIR)" && \
|
||||
while true; do \
|
||||
$(MAKE) test_blender; \
|
||||
inotifywait -q -e close_write $(EXTRA_WATCH_FILES) $(PY_FILES) ; \
|
||||
tput clear; \
|
||||
done
|
||||
|
||||
test_cli_blender: FORCE
|
||||
@env BLENDER_BIN=$(BLENDER_BIN) \
|
||||
$(PYTHON_BIN) ./tests/test_cli_blender.py
|
||||
|
||||
watch_test_cli_blender: FORCE
|
||||
@while true; do \
|
||||
env BLENDER_BIN=$(BLENDER_BIN) \
|
||||
$(MAKE) test_cli_blender; \
|
||||
inotifywait -q -e close_write $(EXTRA_WATCH_FILES) $(PY_FILES) ; \
|
||||
tput clear; \
|
||||
done
|
||||
|
||||
|
||||
# https://www.cyberciti.biz/faq/howto-create-linux-ram-disk-filesystem/
|
||||
# mkfs -q /dev/ram1 8192
|
||||
# mkdir -p /ramcache
|
||||
# sudo mount /dev/ram1 /ramcache
|
||||
# sudo chmod 777 /ramcache
|
||||
# mkdir /ramcache/tmp
|
||||
|
||||
watch_test: FORCE
|
||||
@cd "$(BASE_DIR)" && \
|
||||
while true; do \
|
||||
$(MAKE) test; \
|
||||
inotifywait -q -e close_write $(EXTRA_WATCH_FILES) $(PY_FILES) ; \
|
||||
tput clear; \
|
||||
done
|
||||
|
||||
watch_check_mypy:
|
||||
@cd "$(BASE_DIR)" && \
|
||||
while true; do \
|
||||
$(MAKE) check_mypy; \
|
||||
inotifywait -q -e close_write $(EXTRA_WATCH_FILES) \
|
||||
$(PY_FILES_MYPY) \
|
||||
./bl_extension_utils.py ; \
|
||||
tput clear; \
|
||||
done
|
||||
|
||||
watch_check_ruff:
|
||||
@cd "$(BASE_DIR)" && \
|
||||
while true; do \
|
||||
$(MAKE) check_ruff; \
|
||||
inotifywait -q -e close_write $(EXTRA_WATCH_FILES) \
|
||||
$(PY_FILES_MYPY) \
|
||||
./bl_extension_utils.py ; \
|
||||
tput clear; \
|
||||
done
|
||||
|
||||
watch_check_pylint:
|
||||
@cd "$(BASE_DIR)" && \
|
||||
while true; do \
|
||||
$(MAKE) check_pylint; \
|
||||
inotifywait -q -e close_write $(EXTRA_WATCH_FILES) $(PY_FILES) ; \
|
||||
tput clear; \
|
||||
done
|
||||
|
||||
|
||||
FORCE:
|
545
scripts/addons_core/bl_pkg/__init__.py
Normal file
545
scripts/addons_core/bl_pkg/__init__.py
Normal file
@ -0,0 +1,545 @@
|
||||
# SPDX-FileCopyrightText: 2023 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
bl_info = {
|
||||
"name": "Blender Extensions",
|
||||
"author": "Campbell Barton",
|
||||
"version": (0, 0, 1),
|
||||
"blender": (4, 0, 0),
|
||||
"location": "Edit -> Preferences -> Extensions",
|
||||
"description": "Extension repository support for remote repositories",
|
||||
"warning": "",
|
||||
# "doc_url": "{BLENDER_MANUAL_URL}/addons/bl_pkg/bl_pkg.html",
|
||||
"support": 'OFFICIAL',
|
||||
"category": "System",
|
||||
}
|
||||
|
||||
if "bpy" in locals():
|
||||
import importlib
|
||||
from . import (
|
||||
bl_extension_ops,
|
||||
bl_extension_ui,
|
||||
bl_extension_utils,
|
||||
)
|
||||
importlib.reload(bl_extension_ops)
|
||||
importlib.reload(bl_extension_ui)
|
||||
importlib.reload(bl_extension_utils)
|
||||
del (
|
||||
bl_extension_ops,
|
||||
bl_extension_ui,
|
||||
bl_extension_utils,
|
||||
)
|
||||
del importlib
|
||||
|
||||
import bpy
|
||||
|
||||
from bpy.props import (
|
||||
BoolProperty,
|
||||
EnumProperty,
|
||||
IntProperty,
|
||||
StringProperty,
|
||||
)
|
||||
|
||||
from bpy.types import (
|
||||
AddonPreferences,
|
||||
)
|
||||
|
||||
|
||||
class BlExtPreferences(AddonPreferences):
|
||||
bl_idname = __name__
|
||||
timeout: IntProperty(
|
||||
name="Time Out",
|
||||
default=10,
|
||||
)
|
||||
show_development_reports: BoolProperty(
|
||||
name="Show Development Reports",
|
||||
description=(
|
||||
"Show the result of running commands in the main interface "
|
||||
"this has the advantage that multiple processes that run at once have their errors properly grouped "
|
||||
"which is not the case for reports which are mixed together"
|
||||
),
|
||||
default=False,
|
||||
)
|
||||
|
||||
|
||||
class StatusInfoUI:
|
||||
__slots__ = (
|
||||
# The the title of the status/notification.
|
||||
"title",
|
||||
# The result of an operation.
|
||||
"log",
|
||||
# Set to true when running (via a modal operator).
|
||||
"running",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
self.log = []
|
||||
self.title = ""
|
||||
self.running = False
|
||||
|
||||
def from_message(self, title, text):
|
||||
log_new = []
|
||||
for line in text.split("\n"):
|
||||
if not (line := line.rstrip()):
|
||||
continue
|
||||
# Don't show any prefix for "Info" since this is implied.
|
||||
log_new.append(('STATUS', line.removeprefix("Info: ")))
|
||||
if not log_new:
|
||||
return
|
||||
|
||||
self.title = title
|
||||
self.running = False
|
||||
self.log = log_new
|
||||
|
||||
|
||||
def cookie_from_session():
|
||||
# This path is a unique string for this session.
|
||||
# Don't use a constant as it may be changed at run-time.
|
||||
return bpy.app.tempdir
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Shared Low Level Utilities
|
||||
|
||||
def repo_paths_or_none(repo_item):
|
||||
if (directory := repo_item.directory) == "":
|
||||
return None, None
|
||||
if repo_item.use_remote_url:
|
||||
if not (remote_url := repo_item.remote_url):
|
||||
return None, None
|
||||
else:
|
||||
remote_url = ""
|
||||
return directory, remote_url
|
||||
|
||||
|
||||
def repo_active_or_none():
|
||||
prefs = bpy.context.preferences
|
||||
if not prefs.experimental.use_extension_repos:
|
||||
return
|
||||
|
||||
extensions = prefs.extensions
|
||||
active_extension_index = extensions.active_repo
|
||||
try:
|
||||
active_repo = None if active_extension_index < 0 else extensions.repos[active_extension_index]
|
||||
except IndexError:
|
||||
active_repo = None
|
||||
return active_repo
|
||||
|
||||
|
||||
def print_debug(*args, **kw):
|
||||
if not bpy.app.debug:
|
||||
return
|
||||
print(*args, **kw)
|
||||
|
||||
|
||||
use_repos_to_notify = False
|
||||
|
||||
|
||||
def repos_to_notify():
|
||||
repos_notify = []
|
||||
if not bpy.app.background:
|
||||
# To use notifications on startup requires:
|
||||
# - The splash displayed.
|
||||
# - The status bar displayed.
|
||||
#
|
||||
# Since it's not all that common to disable the status bar just run notifications
|
||||
# if any repositories are marked to run notifications.
|
||||
|
||||
prefs = bpy.context.preferences
|
||||
if prefs.experimental.use_extension_repos:
|
||||
extension_repos = prefs.extensions.repos
|
||||
for repo_item in extension_repos:
|
||||
if not repo_item.enabled:
|
||||
continue
|
||||
if not repo_item.use_sync_on_startup:
|
||||
continue
|
||||
if not repo_item.use_remote_url:
|
||||
continue
|
||||
# Invalid, if there is no remote path this can't update.
|
||||
if not repo_item.remote_url:
|
||||
continue
|
||||
repos_notify.append(repo_item)
|
||||
return repos_notify
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Handlers
|
||||
|
||||
@bpy.app.handlers.persistent
|
||||
def extenion_repos_sync(*_):
|
||||
# This is called from operators (create or an explicit call to sync)
|
||||
# so calling a modal operator is "safe".
|
||||
if (active_repo := repo_active_or_none()) is None:
|
||||
return
|
||||
|
||||
print_debug("SYNC:", active_repo.name)
|
||||
# There may be nothing to upgrade.
|
||||
|
||||
from contextlib import redirect_stdout
|
||||
import io
|
||||
stdout = io.StringIO()
|
||||
|
||||
with redirect_stdout(stdout):
|
||||
bpy.ops.bl_pkg.repo_sync_all('INVOKE_DEFAULT', use_active_only=True)
|
||||
|
||||
if text := stdout.getvalue():
|
||||
repo_status_text.from_message("Sync \"{:s}\"".format(active_repo.name), text)
|
||||
|
||||
|
||||
@bpy.app.handlers.persistent
|
||||
def extenion_repos_upgrade(*_):
|
||||
# This is called from operators (create or an explicit call to sync)
|
||||
# so calling a modal operator is "safe".
|
||||
if (active_repo := repo_active_or_none()) is None:
|
||||
return
|
||||
|
||||
print_debug("UPGRADE:", active_repo.name)
|
||||
|
||||
from contextlib import redirect_stdout
|
||||
import io
|
||||
stdout = io.StringIO()
|
||||
|
||||
with redirect_stdout(stdout):
|
||||
bpy.ops.bl_pkg.pkg_upgrade_all('INVOKE_DEFAULT', use_active_only=True)
|
||||
|
||||
if text := stdout.getvalue():
|
||||
repo_status_text.from_message("Upgrade \"{:s}\"".format(active_repo.name), text)
|
||||
|
||||
|
||||
@bpy.app.handlers.persistent
|
||||
def extenion_repos_files_clear(directory, _):
|
||||
# Perform a "safe" file deletion by only removing files known to be either
|
||||
# packages or known extension meta-data.
|
||||
#
|
||||
# Safer because removing a repository which points to an arbitrary path
|
||||
# has the potential to wipe user data #119481.
|
||||
import shutil
|
||||
import os
|
||||
from .bl_extension_utils import scandir_with_demoted_errors
|
||||
# Unlikely but possible a new repository is immediately removed before initializing,
|
||||
# avoid errors in this case.
|
||||
if not os.path.isdir(directory):
|
||||
return
|
||||
|
||||
if os.path.isdir(path := os.path.join(directory, ".blender_ext")):
|
||||
try:
|
||||
shutil.rmtree(path)
|
||||
except BaseException as ex:
|
||||
print("Failed to remove files", ex)
|
||||
|
||||
for entry in scandir_with_demoted_errors(directory):
|
||||
if not entry.is_dir():
|
||||
continue
|
||||
path = entry.path
|
||||
if not os.path.exists(os.path.join(path, "blender_manifest.toml")):
|
||||
continue
|
||||
try:
|
||||
shutil.rmtree(path)
|
||||
except BaseException as ex:
|
||||
print("Failed to remove files", ex)
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Wrap Handlers
|
||||
|
||||
_monkeypatch_extenions_repos_update_dirs = set()
|
||||
|
||||
|
||||
def monkeypatch_extenions_repos_update_pre_impl():
|
||||
_monkeypatch_extenions_repos_update_dirs.clear()
|
||||
|
||||
extension_repos = bpy.context.preferences.extensions.repos
|
||||
for repo_item in extension_repos:
|
||||
if not repo_item.enabled:
|
||||
continue
|
||||
directory, _repo_path = repo_paths_or_none(repo_item)
|
||||
if directory is None:
|
||||
continue
|
||||
|
||||
_monkeypatch_extenions_repos_update_dirs.add(directory)
|
||||
|
||||
|
||||
def monkeypatch_extenions_repos_update_post_impl():
|
||||
import os
|
||||
from . import bl_extension_ops
|
||||
|
||||
bl_extension_ops.repo_cache_store_refresh_from_prefs()
|
||||
|
||||
# Refresh newly added directories.
|
||||
extension_repos = bpy.context.preferences.extensions.repos
|
||||
for repo_item in extension_repos:
|
||||
if not repo_item.enabled:
|
||||
continue
|
||||
directory, _repo_path = repo_paths_or_none(repo_item)
|
||||
if directory is None:
|
||||
continue
|
||||
# Happens for newly added extension directories.
|
||||
if not os.path.exists(directory):
|
||||
continue
|
||||
if directory in _monkeypatch_extenions_repos_update_dirs:
|
||||
continue
|
||||
# Ignore missing because the new repo might not have a JSON file.
|
||||
repo_cache_store.refresh_remote_from_directory(directory=directory, error_fn=print, force=True)
|
||||
repo_cache_store.refresh_local_from_directory(directory=directory, error_fn=print, ignore_missing=True)
|
||||
|
||||
_monkeypatch_extenions_repos_update_dirs.clear()
|
||||
|
||||
|
||||
@bpy.app.handlers.persistent
|
||||
def monkeypatch_extensions_repos_update_pre(*_):
|
||||
print_debug("PRE:")
|
||||
try:
|
||||
monkeypatch_extenions_repos_update_pre_impl()
|
||||
except BaseException as ex:
|
||||
print_debug("ERROR", str(ex))
|
||||
try:
|
||||
monkeypatch_extensions_repos_update_pre._fn_orig()
|
||||
except BaseException as ex:
|
||||
print_debug("ERROR", str(ex))
|
||||
|
||||
|
||||
@bpy.app.handlers.persistent
|
||||
def monkeypatch_extenions_repos_update_post(*_):
|
||||
print_debug("POST:")
|
||||
try:
|
||||
monkeypatch_extenions_repos_update_post._fn_orig()
|
||||
except BaseException as ex:
|
||||
print_debug("ERROR", str(ex))
|
||||
try:
|
||||
monkeypatch_extenions_repos_update_post_impl()
|
||||
except BaseException as ex:
|
||||
print_debug("ERROR", str(ex))
|
||||
|
||||
|
||||
def monkeypatch_install():
|
||||
import addon_utils
|
||||
|
||||
handlers = bpy.app.handlers._extension_repos_update_pre
|
||||
fn_orig = addon_utils._initialize_extension_repos_pre
|
||||
fn_override = monkeypatch_extensions_repos_update_pre
|
||||
for i, fn in enumerate(handlers):
|
||||
if fn is fn_orig:
|
||||
handlers[i] = fn_override
|
||||
fn_override._fn_orig = fn_orig
|
||||
break
|
||||
|
||||
handlers = bpy.app.handlers._extension_repos_update_post
|
||||
fn_orig = addon_utils._initialize_extension_repos_post
|
||||
fn_override = monkeypatch_extenions_repos_update_post
|
||||
for i, fn in enumerate(handlers):
|
||||
if fn is fn_orig:
|
||||
handlers[i] = fn_override
|
||||
fn_override._fn_orig = fn_orig
|
||||
break
|
||||
|
||||
|
||||
def monkeypatch_uninstall():
|
||||
handlers = bpy.app.handlers._extension_repos_update_pre
|
||||
fn_override = monkeypatch_extensions_repos_update_pre
|
||||
for i in range(len(handlers)):
|
||||
fn = handlers[i]
|
||||
if fn is fn_override:
|
||||
handlers[i] = fn_override._fn_orig
|
||||
del fn_override._fn_orig
|
||||
break
|
||||
|
||||
handlers = bpy.app.handlers._extension_repos_update_post
|
||||
fn_override = monkeypatch_extenions_repos_update_post
|
||||
for i in range(len(handlers)):
|
||||
fn = handlers[i]
|
||||
if fn is fn_override:
|
||||
handlers[i] = fn_override._fn_orig
|
||||
del fn_override._fn_orig
|
||||
break
|
||||
|
||||
|
||||
# Text to display in the UI (while running...).
|
||||
repo_status_text = StatusInfoUI()
|
||||
|
||||
# Singleton to cache all repositories JSON data and handles refreshing.
|
||||
repo_cache_store = None
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Theme Integration
|
||||
|
||||
def theme_preset_draw(menu, context):
|
||||
from .bl_extension_utils import (
|
||||
pkg_theme_file_list,
|
||||
)
|
||||
layout = menu.layout
|
||||
repos_all = [
|
||||
repo_item for repo_item in context.preferences.extensions.repos
|
||||
if repo_item.enabled
|
||||
]
|
||||
if not repos_all:
|
||||
return
|
||||
import os
|
||||
menu_idname = type(menu).__name__
|
||||
for i, pkg_manifest_local in enumerate(repo_cache_store.pkg_manifest_from_local_ensure(error_fn=print)):
|
||||
if pkg_manifest_local is None:
|
||||
continue
|
||||
repo_item = repos_all[i]
|
||||
directory = repo_item.directory
|
||||
for pkg_idname, value in pkg_manifest_local.items():
|
||||
if value["type"] != "theme":
|
||||
continue
|
||||
|
||||
theme_dir, theme_files = pkg_theme_file_list(directory, pkg_idname)
|
||||
for filename in theme_files:
|
||||
props = layout.operator(menu.preset_operator, text=bpy.path.display_name(filename))
|
||||
props.filepath = os.path.join(theme_dir, filename)
|
||||
props.menu_idname = menu_idname
|
||||
|
||||
|
||||
def cli_extension(argv):
|
||||
from . import bl_extension_cli
|
||||
return bl_extension_cli.cli_extension_handler(argv)
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Registration
|
||||
|
||||
classes = (
|
||||
BlExtPreferences,
|
||||
)
|
||||
|
||||
cli_commands = []
|
||||
|
||||
|
||||
def register():
|
||||
# pylint: disable-next=global-statement
|
||||
global repo_cache_store
|
||||
|
||||
from bpy.types import WindowManager
|
||||
from . import (
|
||||
bl_extension_ops,
|
||||
bl_extension_ui,
|
||||
bl_extension_utils,
|
||||
)
|
||||
|
||||
if repo_cache_store is None:
|
||||
repo_cache_store = bl_extension_utils.RepoCacheStore()
|
||||
else:
|
||||
repo_cache_store.clear()
|
||||
bl_extension_ops.repo_cache_store_refresh_from_prefs()
|
||||
|
||||
for cls in classes:
|
||||
bpy.utils.register_class(cls)
|
||||
|
||||
bl_extension_ops.register()
|
||||
bl_extension_ui.register()
|
||||
|
||||
WindowManager.extension_search = StringProperty(
|
||||
name="Filter",
|
||||
description="Filter by extension name, author & category",
|
||||
options={'TEXTEDIT_UPDATE'},
|
||||
)
|
||||
WindowManager.extension_type = EnumProperty(
|
||||
items=(
|
||||
('ALL', "All", "Show all extensions"),
|
||||
None,
|
||||
('ADDON', "Add-ons", "Only show add-ons"),
|
||||
('THEME', "Themes", "Only show themes"),
|
||||
),
|
||||
name="Filter by Type",
|
||||
description="Show extensions by type",
|
||||
default='ALL',
|
||||
)
|
||||
WindowManager.extension_enabled_only = BoolProperty(
|
||||
name="Show Enabled Extensions",
|
||||
description="Only show enabled extensions",
|
||||
)
|
||||
WindowManager.extension_updates_only = BoolProperty(
|
||||
name="Show Updates Available",
|
||||
description="Only show extensions with updates available",
|
||||
)
|
||||
WindowManager.extension_installed_only = BoolProperty(
|
||||
name="Show Installed Extensions",
|
||||
description="Only show installed extensions",
|
||||
)
|
||||
WindowManager.extension_show_legacy_addons = BoolProperty(
|
||||
name="Show Legacy Add-Ons",
|
||||
description="Only show extensions, hiding legacy add-ons",
|
||||
default=True,
|
||||
)
|
||||
|
||||
from bl_ui.space_userpref import USERPREF_MT_interface_theme_presets
|
||||
USERPREF_MT_interface_theme_presets.append(theme_preset_draw)
|
||||
|
||||
handlers = bpy.app.handlers._extension_repos_sync
|
||||
handlers.append(extenion_repos_sync)
|
||||
|
||||
handlers = bpy.app.handlers._extension_repos_upgrade
|
||||
handlers.append(extenion_repos_upgrade)
|
||||
|
||||
handlers = bpy.app.handlers._extension_repos_files_clear
|
||||
handlers.append(extenion_repos_files_clear)
|
||||
|
||||
cli_commands.append(bpy.utils.register_cli_command("extension", cli_extension))
|
||||
|
||||
global use_repos_to_notify
|
||||
if (repos_notify := repos_to_notify()):
|
||||
use_repos_to_notify = True
|
||||
from . import bl_extension_notify
|
||||
bl_extension_notify.register(repos_notify)
|
||||
del repos_notify
|
||||
|
||||
monkeypatch_install()
|
||||
|
||||
|
||||
def unregister():
|
||||
# pylint: disable-next=global-statement
|
||||
global repo_cache_store
|
||||
|
||||
from bpy.types import WindowManager
|
||||
from . import (
|
||||
bl_extension_ops,
|
||||
bl_extension_ui,
|
||||
)
|
||||
|
||||
bl_extension_ops.unregister()
|
||||
bl_extension_ui.unregister()
|
||||
|
||||
del WindowManager.extension_search
|
||||
del WindowManager.extension_type
|
||||
del WindowManager.extension_enabled_only
|
||||
del WindowManager.extension_installed_only
|
||||
del WindowManager.extension_show_legacy_addons
|
||||
|
||||
for cls in classes:
|
||||
bpy.utils.unregister_class(cls)
|
||||
|
||||
if repo_cache_store is None:
|
||||
pass
|
||||
else:
|
||||
repo_cache_store.clear()
|
||||
repo_cache_store = None
|
||||
|
||||
from bl_ui.space_userpref import USERPREF_MT_interface_theme_presets
|
||||
USERPREF_MT_interface_theme_presets.remove(theme_preset_draw)
|
||||
|
||||
handlers = bpy.app.handlers._extension_repos_sync
|
||||
if extenion_repos_sync in handlers:
|
||||
handlers.remove(extenion_repos_sync)
|
||||
|
||||
handlers = bpy.app.handlers._extension_repos_upgrade
|
||||
if extenion_repos_upgrade in handlers:
|
||||
handlers.remove(extenion_repos_upgrade)
|
||||
|
||||
handlers = bpy.app.handlers._extension_repos_files_clear
|
||||
if extenion_repos_files_clear in handlers:
|
||||
handlers.remove(extenion_repos_files_clear)
|
||||
|
||||
for cmd in cli_commands:
|
||||
bpy.utils.unregister_cli_command(cmd)
|
||||
cli_commands.clear()
|
||||
|
||||
global use_repos_to_notify
|
||||
if use_repos_to_notify:
|
||||
use_repos_to_notify = False
|
||||
from . import bl_extension_notify
|
||||
bl_extension_notify.unregister()
|
||||
|
||||
monkeypatch_uninstall()
|
829
scripts/addons_core/bl_pkg/bl_extension_cli.py
Normal file
829
scripts/addons_core/bl_pkg/bl_extension_cli.py
Normal file
@ -0,0 +1,829 @@
|
||||
# SPDX-FileCopyrightText: 2023 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
"""
|
||||
Command line access for extension operations see:
|
||||
|
||||
blender --command extension --help
|
||||
"""
|
||||
|
||||
__all__ = (
|
||||
"cli_extension_handler",
|
||||
)
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
|
||||
from typing import (
|
||||
Any,
|
||||
Dict,
|
||||
List,
|
||||
Optional,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
|
||||
show_color = (
|
||||
False if os.environ.get("NO_COLOR") else
|
||||
sys.stdout.isatty()
|
||||
)
|
||||
|
||||
|
||||
if show_color:
|
||||
color_codes = {
|
||||
'black': '\033[0;30m',
|
||||
'bright_gray': '\033[0;37m',
|
||||
'blue': '\033[0;34m',
|
||||
'white': '\033[1;37m',
|
||||
'green': '\033[0;32m',
|
||||
'bright_blue': '\033[1;34m',
|
||||
'cyan': '\033[0;36m',
|
||||
'bright_green': '\033[1;32m',
|
||||
'red': '\033[0;31m',
|
||||
'bright_cyan': '\033[1;36m',
|
||||
'purple': '\033[0;35m',
|
||||
'bright_red': '\033[1;31m',
|
||||
'yellow': '\033[0;33m',
|
||||
'bright_purple': '\033[1;35m',
|
||||
'dark_gray': '\033[1;30m',
|
||||
'bright_yellow': '\033[1;33m',
|
||||
'normal': '\033[0m',
|
||||
}
|
||||
|
||||
def colorize(text: str, color: str) -> str:
|
||||
return (color_codes[color] + text + color_codes["normal"])
|
||||
else:
|
||||
def colorize(text: str, color: str) -> str:
|
||||
return text
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Wrap Operators
|
||||
|
||||
|
||||
def blender_preferences_write() -> bool:
|
||||
import bpy # type: ignore
|
||||
try:
|
||||
ok = 'FINISHED' in bpy.ops.wm.save_userpref()
|
||||
except RuntimeError as ex:
|
||||
print("Failed to write preferences: {!r}".format(ex))
|
||||
ok = False
|
||||
return ok
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Argument Implementation (Utilities)
|
||||
|
||||
class subcmd_utils:
|
||||
|
||||
def __new__(cls) -> Any:
|
||||
raise RuntimeError("{:s} should not be instantiated".format(cls))
|
||||
|
||||
@staticmethod
|
||||
def sync(
|
||||
*,
|
||||
show_done: bool = True,
|
||||
) -> bool:
|
||||
import bpy
|
||||
try:
|
||||
bpy.ops.bl_pkg.repo_sync_all()
|
||||
if show_done:
|
||||
sys.stdout.write("Done...\n\n")
|
||||
except BaseException:
|
||||
print("Error synchronizing")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return False
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def _expand_package_ids(
|
||||
packages: List[str],
|
||||
*,
|
||||
use_local: bool,
|
||||
) -> Union[List[Tuple[int, str]], str]:
|
||||
# Takes a terse lists of package names and expands to repo index and name list,
|
||||
# returning an error string if any can't be resolved.
|
||||
from . import repo_cache_store
|
||||
from .bl_extension_ops import extension_repos_read
|
||||
|
||||
repo_map = {}
|
||||
errors = []
|
||||
|
||||
repos_all = extension_repos_read()
|
||||
for (
|
||||
repo_index,
|
||||
pkg_manifest,
|
||||
) in enumerate(
|
||||
repo_cache_store.pkg_manifest_from_local_ensure(error_fn=print)
|
||||
if use_local else
|
||||
repo_cache_store.pkg_manifest_from_remote_ensure(error_fn=print)
|
||||
):
|
||||
# Show any exceptions created while accessing the JSON,
|
||||
repo = repos_all[repo_index]
|
||||
repo_map[repo.module] = (repo_index, set(pkg_manifest.keys()))
|
||||
|
||||
repos_and_packages = []
|
||||
|
||||
for pkg_id_full in packages:
|
||||
repo_id, pkg_id = pkg_id_full.rpartition(".")[0::2]
|
||||
if not pkg_id:
|
||||
errors.append("Malformed package name \"{:s}\", expected \"repo_id.pkg_id\"!".format(pkg_id_full))
|
||||
continue
|
||||
if repo_id:
|
||||
repo_index, repo_packages = repo_map.get(repo_id, (-1, ()))
|
||||
if repo_index == -1:
|
||||
errors.append("Repository \"{:s}\" not found in [{:s}]!".format(
|
||||
repo_id,
|
||||
", ".join(sorted("\"{:s}\"".format(x) for x in repo_map.keys()))
|
||||
))
|
||||
continue
|
||||
else:
|
||||
repo_index = -1
|
||||
for repo_id_iter, (repo_index_iter, repo_packages_iter) in repo_map.items():
|
||||
if pkg_id in repo_packages_iter:
|
||||
repo_index = repo_index_iter
|
||||
break
|
||||
if repo_index == -1:
|
||||
if use_local:
|
||||
errors.append("Package \"{:s}\" not installed in local repositories!".format(pkg_id))
|
||||
else:
|
||||
errors.append("Package \"{:s}\" not found in remote repositories!".format(pkg_id))
|
||||
continue
|
||||
repos_and_packages.append((repo_index, pkg_id))
|
||||
|
||||
if errors:
|
||||
return "\n".join(errors)
|
||||
|
||||
return repos_and_packages
|
||||
|
||||
@staticmethod
|
||||
def expand_package_ids_from_remote(packages: List[str]) -> Union[List[Tuple[int, str]], str]:
|
||||
return subcmd_utils._expand_package_ids(packages, use_local=False)
|
||||
|
||||
@staticmethod
|
||||
def expand_package_ids_from_local(packages: List[str]) -> Union[List[Tuple[int, str]], str]:
|
||||
return subcmd_utils._expand_package_ids(packages, use_local=True)
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Argument Implementation (Queries)
|
||||
|
||||
class subcmd_query:
|
||||
|
||||
def __new__(cls) -> Any:
|
||||
raise RuntimeError("{:s} should not be instantiated".format(cls))
|
||||
|
||||
@staticmethod
|
||||
def list(
|
||||
*,
|
||||
sync: bool,
|
||||
) -> bool:
|
||||
|
||||
def list_item(
|
||||
pkg_id: str,
|
||||
item_remote: Optional[Dict[str, Any]],
|
||||
item_local: Optional[Dict[str, Any]],
|
||||
) -> None:
|
||||
# Both can't be None.
|
||||
assert item_remote is not None or item_local is not None
|
||||
|
||||
if item_remote is not None:
|
||||
item_version = item_remote["version"]
|
||||
if item_local is None:
|
||||
item_local_version = None
|
||||
is_outdated = False
|
||||
else:
|
||||
item_local_version = item_local["version"]
|
||||
is_outdated = item_local_version != item_version
|
||||
|
||||
if item_local is not None:
|
||||
if is_outdated:
|
||||
status_info = " [{:s}]".format(colorize("outdated: {:s} -> {:s}".format(
|
||||
item_local_version,
|
||||
item_version,
|
||||
), "red"))
|
||||
else:
|
||||
status_info = " [{:s}]".format(colorize("installed", "green"))
|
||||
else:
|
||||
status_info = ""
|
||||
item = item_remote
|
||||
else:
|
||||
# All local-only packages are installed.
|
||||
status_info = " [{:s}]".format(colorize("installed", "green"))
|
||||
assert isinstance(item_local, dict)
|
||||
item = item_local
|
||||
|
||||
print(
|
||||
" {:s}{:s}: {:s}".format(
|
||||
pkg_id,
|
||||
status_info,
|
||||
colorize("\"{:s}\", {:s}".format(item["name"], item.get("tagline", "<no tagline>")), "dark_gray"),
|
||||
))
|
||||
|
||||
if sync:
|
||||
if not subcmd_utils.sync():
|
||||
return False
|
||||
|
||||
# NOTE: exactly how this data is extracted is rather arbitrary.
|
||||
# This uses the same code paths as drawing code.
|
||||
from .bl_extension_ops import extension_repos_read
|
||||
from . import repo_cache_store
|
||||
|
||||
repos_all = extension_repos_read()
|
||||
|
||||
for repo_index, (
|
||||
pkg_manifest_remote,
|
||||
pkg_manifest_local,
|
||||
) in enumerate(zip(
|
||||
repo_cache_store.pkg_manifest_from_remote_ensure(error_fn=print),
|
||||
repo_cache_store.pkg_manifest_from_local_ensure(error_fn=print),
|
||||
)):
|
||||
# Show any exceptions created while accessing the JSON,
|
||||
repo = repos_all[repo_index]
|
||||
|
||||
print("Repository: \"{:s}\" (id={:s})".format(repo.name, repo.module))
|
||||
if pkg_manifest_remote is not None:
|
||||
for pkg_id, item_remote in pkg_manifest_remote.items():
|
||||
if pkg_manifest_local is not None:
|
||||
item_local = pkg_manifest_local.get(pkg_id)
|
||||
else:
|
||||
item_local = None
|
||||
list_item(pkg_id, item_remote, item_local)
|
||||
else:
|
||||
for pkg_id, item_local in pkg_manifest_local.items():
|
||||
list_item(pkg_id, None, item_local)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Argument Implementation (Packages)
|
||||
|
||||
class subcmd_pkg:
|
||||
|
||||
def __new__(cls) -> Any:
|
||||
raise RuntimeError("{:s} should not be instantiated".format(cls))
|
||||
|
||||
@staticmethod
|
||||
def update(
|
||||
*,
|
||||
sync: bool,
|
||||
) -> bool:
|
||||
if sync:
|
||||
if not subcmd_utils.sync():
|
||||
return False
|
||||
|
||||
import bpy
|
||||
try:
|
||||
bpy.ops.bl_pkg.pkg_upgrade_all()
|
||||
except RuntimeError:
|
||||
return False # The error will have been printed.
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def install(
|
||||
*,
|
||||
sync: bool,
|
||||
packages: List[str],
|
||||
enable_on_install: bool,
|
||||
no_prefs: bool,
|
||||
) -> bool:
|
||||
if sync:
|
||||
if not subcmd_utils.sync():
|
||||
return False
|
||||
|
||||
# Expand all package ID's.
|
||||
repos_and_packages = subcmd_utils.expand_package_ids_from_remote(packages)
|
||||
if isinstance(repos_and_packages, str):
|
||||
sys.stderr.write(repos_and_packages)
|
||||
sys.stderr.write("\n")
|
||||
return False
|
||||
|
||||
import bpy
|
||||
for repo_index, pkg_id in repos_and_packages:
|
||||
bpy.ops.bl_pkg.pkg_mark_set(
|
||||
repo_index=repo_index,
|
||||
pkg_id=pkg_id,
|
||||
)
|
||||
|
||||
try:
|
||||
bpy.ops.bl_pkg.pkg_install_marked(enable_on_install=enable_on_install)
|
||||
except RuntimeError:
|
||||
return False # The error will have been printed.
|
||||
|
||||
if not no_prefs:
|
||||
if enable_on_install:
|
||||
blender_preferences_write()
|
||||
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def remove(
|
||||
*,
|
||||
packages: List[str],
|
||||
no_prefs: bool,
|
||||
) -> bool:
|
||||
# Expand all package ID's.
|
||||
repos_and_packages = subcmd_utils.expand_package_ids_from_local(packages)
|
||||
if isinstance(repos_and_packages, str):
|
||||
sys.stderr.write(repos_and_packages)
|
||||
sys.stderr.write("\n")
|
||||
return False
|
||||
|
||||
import bpy
|
||||
for repo_index, pkg_id in repos_and_packages:
|
||||
bpy.ops.bl_pkg.pkg_mark_set(repo_index=repo_index, pkg_id=pkg_id)
|
||||
|
||||
try:
|
||||
bpy.ops.bl_pkg.pkg_uninstall_marked()
|
||||
except RuntimeError:
|
||||
return False # The error will have been printed.
|
||||
|
||||
if not no_prefs:
|
||||
blender_preferences_write()
|
||||
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def install_file(
|
||||
*,
|
||||
filepath: str,
|
||||
repo_id: str,
|
||||
enable_on_install: bool,
|
||||
no_prefs: bool,
|
||||
) -> bool:
|
||||
import bpy
|
||||
|
||||
# Blender's operator requires an absolute path.
|
||||
filepath = os.path.abspath(filepath)
|
||||
|
||||
try:
|
||||
bpy.ops.bl_pkg.pkg_install_files(
|
||||
filepath=filepath,
|
||||
repo=repo_id,
|
||||
enable_on_install=enable_on_install,
|
||||
)
|
||||
except RuntimeError:
|
||||
return False # The error will have been printed.
|
||||
except BaseException as ex:
|
||||
sys.stderr.write(str(ex))
|
||||
sys.stderr.write("\n")
|
||||
|
||||
if not no_prefs:
|
||||
if enable_on_install:
|
||||
blender_preferences_write()
|
||||
|
||||
return True
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Argument Implementation (Repositories)
|
||||
|
||||
class subcmd_repo:
|
||||
|
||||
def __new__(cls) -> Any:
|
||||
raise RuntimeError("{:s} should not be instantiated".format(cls))
|
||||
|
||||
@staticmethod
|
||||
def list() -> bool:
|
||||
from .bl_extension_ops import extension_repos_read
|
||||
repos_all = extension_repos_read()
|
||||
for repo in repos_all:
|
||||
print("{:s}:".format(repo.module))
|
||||
print(" name: \"{:s}\"".format(repo.name))
|
||||
print(" directory: \"{:s}\"".format(repo.directory))
|
||||
if url := repo.remote_url:
|
||||
print(" url: \"{:s}\"".format(url))
|
||||
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def add(
|
||||
*,
|
||||
name: str,
|
||||
id: str,
|
||||
directory: str,
|
||||
url: str,
|
||||
cache: bool,
|
||||
clear_all: bool,
|
||||
no_prefs: bool,
|
||||
) -> bool:
|
||||
from bpy import context
|
||||
|
||||
extension_repos = context.preferences.extensions.repos
|
||||
if clear_all:
|
||||
while extension_repos:
|
||||
extension_repos.remove(extension_repos[0])
|
||||
|
||||
repo = extension_repos.new(
|
||||
name=name,
|
||||
module=id,
|
||||
custom_directory=directory,
|
||||
remote_url=url,
|
||||
)
|
||||
repo.use_cache = cache
|
||||
|
||||
if not no_prefs:
|
||||
blender_preferences_write()
|
||||
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def remove(
|
||||
*,
|
||||
id: str,
|
||||
no_prefs: bool,
|
||||
) -> bool:
|
||||
from bpy import context
|
||||
extension_repos = context.preferences.extensions.repos
|
||||
extension_repos_module_map = {repo.module: repo for repo in extension_repos}
|
||||
repo = extension_repos_module_map.get(id)
|
||||
if repo is None:
|
||||
sys.stderr.write("Repository: \"{:s}\" not found in [{:s}]\n".format(
|
||||
id,
|
||||
", ".join(["\"{:s}\"".format(x) for x in sorted(extension_repos_module_map.keys())])
|
||||
))
|
||||
return False
|
||||
extension_repos.remove(repo)
|
||||
print("Removed repo \"{:s}\"".format(id))
|
||||
|
||||
if not no_prefs:
|
||||
blender_preferences_write()
|
||||
|
||||
return True
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Command Line Argument Definitions
|
||||
|
||||
def arg_handle_int_as_bool(value: str) -> bool:
|
||||
result = int(value)
|
||||
if result not in {0, 1}:
|
||||
raise argparse.ArgumentTypeError("Expected a 0 or 1")
|
||||
return bool(result)
|
||||
|
||||
|
||||
def generic_arg_sync(subparse: argparse.ArgumentParser) -> None:
|
||||
subparse.add_argument(
|
||||
"-s",
|
||||
"--sync",
|
||||
dest="sync",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help=(
|
||||
"Sync the remote directory before performing the action."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def generic_arg_enable_on_install(subparse: argparse.ArgumentParser) -> None:
|
||||
subparse.add_argument(
|
||||
"-e",
|
||||
"--enable",
|
||||
dest="enable",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help=(
|
||||
"Enable the extension after installation."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def generic_arg_no_prefs(subparse: argparse.ArgumentParser) -> None:
|
||||
subparse.add_argument(
|
||||
"--no-prefs",
|
||||
dest="no_prefs",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help=(
|
||||
"Treat the user-preferences as read-only,\n"
|
||||
"preventing updates for operations that would otherwise modify them.\n"
|
||||
"This means removing extensions or repositories for example, wont update the user-preferences."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def generic_arg_package_list_positional(subparse: argparse.ArgumentParser) -> None:
|
||||
subparse.add_argument(
|
||||
dest="packages",
|
||||
metavar="PACKAGES",
|
||||
type=str,
|
||||
help=(
|
||||
"The packages to operate on (separated by ``,`` without spaces)."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def generic_arg_package_file_positional(subparse: argparse.ArgumentParser) -> None:
|
||||
subparse.add_argument(
|
||||
dest="file",
|
||||
metavar="FILE",
|
||||
type=str,
|
||||
help=(
|
||||
"The packages file."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def generic_arg_repo_id(subparse: argparse.ArgumentParser) -> None:
|
||||
subparse.add_argument(
|
||||
"-r",
|
||||
"--repo",
|
||||
dest="repo",
|
||||
type=str,
|
||||
help=(
|
||||
"The repository identifier."
|
||||
),
|
||||
required=True,
|
||||
)
|
||||
|
||||
|
||||
def generic_arg_package_repo_id_positional(subparse: argparse.ArgumentParser) -> None:
|
||||
subparse.add_argument(
|
||||
dest="id",
|
||||
metavar="ID",
|
||||
type=str,
|
||||
help=(
|
||||
"The repository identifier."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Blender Package Manipulation
|
||||
|
||||
def cli_extension_args_list(subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]") -> None:
|
||||
# Implement "list".
|
||||
subparse = subparsers.add_parser(
|
||||
"list",
|
||||
help="List all packages.",
|
||||
description=(
|
||||
"List packages from all enabled repositories."
|
||||
),
|
||||
formatter_class=argparse.RawTextHelpFormatter,
|
||||
)
|
||||
generic_arg_sync(subparse)
|
||||
|
||||
subparse.set_defaults(
|
||||
func=lambda args: subcmd_query.list(
|
||||
sync=args.sync,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def cli_extension_args_sync(subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]") -> None:
|
||||
# Implement "sync".
|
||||
subparse = subparsers.add_parser(
|
||||
"sync",
|
||||
help="Synchronize with remote repositories.",
|
||||
description=(
|
||||
"Download package information for remote repositories."
|
||||
),
|
||||
formatter_class=argparse.RawTextHelpFormatter,
|
||||
)
|
||||
subparse.set_defaults(
|
||||
func=lambda args: subcmd_utils.sync(show_done=False),
|
||||
)
|
||||
|
||||
|
||||
def cli_extension_args_upgrade(subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]") -> None:
|
||||
# Implement "update".
|
||||
subparse = subparsers.add_parser(
|
||||
"update",
|
||||
help="Upgrade any outdated packages.",
|
||||
description=(
|
||||
"Download and update any outdated packages."
|
||||
),
|
||||
formatter_class=argparse.RawTextHelpFormatter,
|
||||
)
|
||||
generic_arg_sync(subparse)
|
||||
|
||||
subparse.set_defaults(
|
||||
func=lambda args: subcmd_pkg.update(sync=args.sync),
|
||||
)
|
||||
|
||||
|
||||
def cli_extension_args_install(subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]") -> None:
|
||||
# Implement "install".
|
||||
subparse = subparsers.add_parser(
|
||||
"install",
|
||||
help="Install packages.",
|
||||
formatter_class=argparse.RawTextHelpFormatter,
|
||||
)
|
||||
generic_arg_sync(subparse)
|
||||
generic_arg_package_list_positional(subparse)
|
||||
|
||||
generic_arg_enable_on_install(subparse)
|
||||
generic_arg_no_prefs(subparse)
|
||||
|
||||
subparse.set_defaults(
|
||||
func=lambda args: subcmd_pkg.install(
|
||||
sync=args.sync,
|
||||
packages=args.packages.split(","),
|
||||
enable_on_install=args.enable,
|
||||
no_prefs=args.no_prefs,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def cli_extension_args_install_file(subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]") -> None:
|
||||
# Implement "install-file".
|
||||
subparse = subparsers.add_parser(
|
||||
"install-file",
|
||||
help="Install package from file.",
|
||||
description=(
|
||||
"Install a package file into a local repository."
|
||||
),
|
||||
formatter_class=argparse.RawTextHelpFormatter,
|
||||
)
|
||||
|
||||
generic_arg_package_file_positional(subparse)
|
||||
generic_arg_repo_id(subparse)
|
||||
|
||||
generic_arg_enable_on_install(subparse)
|
||||
generic_arg_no_prefs(subparse)
|
||||
|
||||
subparse.set_defaults(
|
||||
func=lambda args: subcmd_pkg.install_file(
|
||||
filepath=args.file,
|
||||
repo_id=args.repo,
|
||||
enable_on_install=args.enable,
|
||||
no_prefs=args.no_prefs,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def cli_extension_args_remove(subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]") -> None:
|
||||
# Implement "remove".
|
||||
subparse = subparsers.add_parser(
|
||||
"remove",
|
||||
help="Remove packages.",
|
||||
description=(
|
||||
"Disable & remove package(s)."
|
||||
),
|
||||
formatter_class=argparse.RawTextHelpFormatter,
|
||||
)
|
||||
generic_arg_package_list_positional(subparse)
|
||||
generic_arg_no_prefs(subparse)
|
||||
|
||||
subparse.set_defaults(
|
||||
func=lambda args: subcmd_pkg.remove(
|
||||
packages=args.packages.split(","),
|
||||
no_prefs=args.no_prefs,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Blender Repository Manipulation
|
||||
|
||||
def cli_extension_args_repo_list(subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]") -> None:
|
||||
# Implement "repo-list".
|
||||
subparse = subparsers.add_parser(
|
||||
"repo-list",
|
||||
help="List repositories.",
|
||||
description=(
|
||||
"List all repositories stored in Blender's preferences."
|
||||
),
|
||||
formatter_class=argparse.RawTextHelpFormatter,
|
||||
)
|
||||
subparse.set_defaults(
|
||||
func=lambda args: subcmd_repo.list(),
|
||||
)
|
||||
|
||||
|
||||
def cli_extension_args_repo_add(subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]") -> None:
|
||||
# Implement "repo-add".
|
||||
subparse = subparsers.add_parser(
|
||||
"repo-add",
|
||||
help="Add repository.",
|
||||
description=(
|
||||
"Add a new local or remote repository."
|
||||
),
|
||||
formatter_class=argparse.RawTextHelpFormatter,
|
||||
)
|
||||
generic_arg_package_repo_id_positional(subparse)
|
||||
|
||||
# Optional.
|
||||
subparse.add_argument(
|
||||
"--name",
|
||||
dest="name",
|
||||
type=str,
|
||||
default="",
|
||||
metavar="NAME",
|
||||
help=(
|
||||
"The name to display in the interface (optional)."
|
||||
),
|
||||
)
|
||||
|
||||
subparse.add_argument(
|
||||
"--directory",
|
||||
dest="directory",
|
||||
type=str,
|
||||
default="",
|
||||
help=(
|
||||
"The directory where the repository stores local files (optional).\n"
|
||||
"When omitted a directory in the users directory is automatically selected."
|
||||
),
|
||||
)
|
||||
subparse.add_argument(
|
||||
"--url",
|
||||
dest="url",
|
||||
type=str,
|
||||
default="",
|
||||
metavar="URL",
|
||||
help=(
|
||||
"The URL, for remote repositories (optional).\n"
|
||||
"When omitted the repository is considered \"local\"\n"
|
||||
"as it is not connected to an external repository,\n"
|
||||
"where packages may be installed by file or managed manually."
|
||||
),
|
||||
)
|
||||
|
||||
subparse.add_argument(
|
||||
"--cache",
|
||||
dest="cache",
|
||||
metavar="BOOLEAN",
|
||||
type=arg_handle_int_as_bool,
|
||||
default=True,
|
||||
help=(
|
||||
"Use package cache (default=1)."
|
||||
),
|
||||
)
|
||||
|
||||
subparse.add_argument(
|
||||
"--clear-all",
|
||||
dest="clear_all",
|
||||
action="store_true",
|
||||
help=(
|
||||
"Clear all repositories before adding, simplifies test setup."
|
||||
),
|
||||
)
|
||||
|
||||
generic_arg_no_prefs(subparse)
|
||||
|
||||
subparse.set_defaults(
|
||||
func=lambda args: subcmd_repo.add(
|
||||
id=args.id,
|
||||
name=args.name,
|
||||
directory=args.directory,
|
||||
url=args.url,
|
||||
cache=args.cache,
|
||||
clear_all=args.clear_all,
|
||||
no_prefs=args.no_prefs,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def cli_extension_args_repo_remove(subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]") -> None:
|
||||
# Implement "repo-remove".
|
||||
subparse = subparsers.add_parser(
|
||||
"repo-remove",
|
||||
help="Remove repository.",
|
||||
description=(
|
||||
"Remove a repository."
|
||||
),
|
||||
formatter_class=argparse.RawTextHelpFormatter,
|
||||
)
|
||||
generic_arg_package_repo_id_positional(subparse)
|
||||
generic_arg_no_prefs(subparse)
|
||||
|
||||
subparse.set_defaults(
|
||||
func=lambda args: subcmd_repo.remove(
|
||||
id=args.id,
|
||||
no_prefs=args.no_prefs,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Implement Additional Arguments
|
||||
|
||||
def cli_extension_args_extra(subparsers: "argparse._SubParsersAction[argparse.ArgumentParser]") -> None:
|
||||
# Package commands.
|
||||
cli_extension_args_list(subparsers)
|
||||
cli_extension_args_sync(subparsers)
|
||||
cli_extension_args_upgrade(subparsers)
|
||||
cli_extension_args_install(subparsers)
|
||||
cli_extension_args_install_file(subparsers)
|
||||
cli_extension_args_remove(subparsers)
|
||||
|
||||
# Preference commands.
|
||||
cli_extension_args_repo_list(subparsers)
|
||||
cli_extension_args_repo_add(subparsers)
|
||||
cli_extension_args_repo_remove(subparsers)
|
||||
|
||||
|
||||
def cli_extension_handler(args: List[str]) -> int:
|
||||
from .cli import blender_ext
|
||||
result = blender_ext.main(
|
||||
args,
|
||||
args_internal=False,
|
||||
args_extra_subcommands_fn=cli_extension_args_extra,
|
||||
prog="blender --command extension",
|
||||
)
|
||||
# Needed as the import isn't followed by `mypy`.
|
||||
assert isinstance(result, int)
|
||||
return result
|
46
scripts/addons_core/bl_pkg/bl_extension_local.py
Normal file
46
scripts/addons_core/bl_pkg/bl_extension_local.py
Normal file
@ -0,0 +1,46 @@
|
||||
# SPDX-FileCopyrightText: 2023 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
"""
|
||||
High level API for managing an extension local site-packages and wheels.
|
||||
|
||||
NOTE: this is a standalone module.
|
||||
"""
|
||||
|
||||
__all__ = (
|
||||
"sync",
|
||||
)
|
||||
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from .wheel_manager import WheelSource
|
||||
|
||||
from typing import (
|
||||
List,
|
||||
)
|
||||
|
||||
|
||||
def sync(
|
||||
*,
|
||||
local_dir: str,
|
||||
wheel_list: List[WheelSource],
|
||||
) -> None:
|
||||
from . import wheel_manager
|
||||
local_dir_site_packages = os.path.join(
|
||||
local_dir,
|
||||
"lib",
|
||||
"python{:d}.{:d}".format(sys.version_info.major, sys.version_info.minor),
|
||||
"site-packages",
|
||||
)
|
||||
|
||||
wheel_manager.apply_action(
|
||||
local_dir=local_dir,
|
||||
local_dir_site_packages=local_dir_site_packages,
|
||||
wheel_list=wheel_list,
|
||||
)
|
||||
if os.path.exists(local_dir_site_packages):
|
||||
if local_dir_site_packages not in sys.path:
|
||||
sys.path.append(local_dir_site_packages)
|
378
scripts/addons_core/bl_pkg/bl_extension_notify.py
Normal file
378
scripts/addons_core/bl_pkg/bl_extension_notify.py
Normal file
@ -0,0 +1,378 @@
|
||||
# SPDX-FileCopyrightText: 2023 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
"""
|
||||
Startup notifications.
|
||||
"""
|
||||
|
||||
__all__ = (
|
||||
"register",
|
||||
"unregister",
|
||||
)
|
||||
|
||||
|
||||
import os
|
||||
import bpy
|
||||
|
||||
from . import bl_extension_ops
|
||||
from . import bl_extension_utils
|
||||
|
||||
# Request the processes exit, then wait for them to exit.
|
||||
# NOTE(@ideasman42): This is all well and good but any delays exiting are unwanted,
|
||||
# only keep this as a reference and in case we can speed up forcing them to exit.
|
||||
USE_GRACEFUL_EXIT = False
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Internal Utilities
|
||||
|
||||
def sync_status_count_outdated_extensions(repos_notify):
|
||||
from . import repo_cache_store
|
||||
|
||||
repos_notify_directories = [repo_item.directory for repo_item in repos_notify]
|
||||
|
||||
package_count = 0
|
||||
|
||||
for (
|
||||
pkg_manifest_remote,
|
||||
pkg_manifest_local,
|
||||
) in zip(
|
||||
repo_cache_store.pkg_manifest_from_remote_ensure(
|
||||
error_fn=print,
|
||||
directory_subset=repos_notify_directories,
|
||||
),
|
||||
repo_cache_store.pkg_manifest_from_local_ensure(
|
||||
error_fn=print,
|
||||
directory_subset=repos_notify_directories,
|
||||
# Needed as these have been updated.
|
||||
check_files=True,
|
||||
),
|
||||
):
|
||||
if pkg_manifest_remote is None:
|
||||
continue
|
||||
if pkg_manifest_local is None:
|
||||
continue
|
||||
|
||||
for pkg_id, item_remote in pkg_manifest_remote.items():
|
||||
item_local = pkg_manifest_local.get(pkg_id)
|
||||
if item_local is None:
|
||||
# Not installed.
|
||||
continue
|
||||
|
||||
if item_remote["version"] != item_local["version"]:
|
||||
package_count += 1
|
||||
return package_count
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Update Iterator
|
||||
#
|
||||
# This is a black-box which handled running the updates, yielding status text.
|
||||
|
||||
def sync_apply_locked(repos_notify, repos_notify_files, unique_ext):
|
||||
"""
|
||||
Move files with a unique extension to their final location
|
||||
with a locked repository to ensure multiple Blender instances never overwrite
|
||||
repositories at the same time.
|
||||
|
||||
Lock the repositories for the shortest time reasonably possible.
|
||||
If locking fails, this is OK as it's possible another Blender got here first.
|
||||
|
||||
Another reason this is needed is exiting Blender will close the sync sub-processes,
|
||||
this is OK as long as the final location of the repositories JSON isn't being written
|
||||
to the moment Blender and it's sub-processes exit.
|
||||
"""
|
||||
# TODO: handle the case of cruft being left behind, perhaps detect previous
|
||||
# files created with a `unique_ext` (`@{HEX}` extension) and removing them.
|
||||
# Although this shouldn't happen on a regular basis. Only when exiting immediately after launching
|
||||
# Blender and even then the user would need to be *lucky*.
|
||||
from . import cookie_from_session
|
||||
|
||||
any_lock_errors = False
|
||||
repo_directories = [repo_item.directory for repo_item in repos_notify]
|
||||
with bl_extension_utils.RepoLockContext(
|
||||
repo_directories=repo_directories,
|
||||
cookie=cookie_from_session(),
|
||||
) as lock_result:
|
||||
for directory, repo_files in zip(repo_directories, repos_notify_files):
|
||||
repo_files = [os.path.join(directory, filepath_rel) for filepath_rel in repo_files]
|
||||
|
||||
if (lock_result_for_repo := lock_result[directory]) is not None:
|
||||
print("Warning \"{:s}\" locking \"{:s}\"".format(lock_result_for_repo, repr(directory)))
|
||||
any_lock_errors = True
|
||||
for filepath in repo_files:
|
||||
try:
|
||||
os.remove(filepath)
|
||||
except Exception as ex:
|
||||
print("Failed to remove file:", ex)
|
||||
continue
|
||||
|
||||
# Locking worked, rename the files.
|
||||
for filepath in repo_files:
|
||||
filepath_dst = filepath[:-len(unique_ext)]
|
||||
try:
|
||||
os.remove(filepath_dst)
|
||||
except Exception as ex:
|
||||
print("Failed to remove file before renaming:", ex)
|
||||
continue
|
||||
os.rename(filepath, filepath_dst)
|
||||
return any_lock_errors
|
||||
|
||||
|
||||
def sync_status_generator(repos_notify):
|
||||
|
||||
# Generator results...
|
||||
# -> None: do nothing.
|
||||
# -> (text, ICON_ID, NUMBER_OF_UPDATES)
|
||||
|
||||
# ################ #
|
||||
# Setup The Update #
|
||||
# ################ #
|
||||
|
||||
yield None
|
||||
|
||||
# An extension unique to this session.
|
||||
unique_ext = "@{:x}".format(os.getpid())
|
||||
|
||||
from functools import partial
|
||||
|
||||
cmd_batch_partial = []
|
||||
for repo_item in repos_notify:
|
||||
# Local only repositories should still refresh, but not run the sync.
|
||||
assert repo_item.remote_url
|
||||
cmd_batch_partial.append(partial(
|
||||
bl_extension_utils.repo_sync,
|
||||
directory=repo_item.directory,
|
||||
remote_url=repo_item.remote_url,
|
||||
online_user_agent=bl_extension_ops.online_user_agent_from_blender(),
|
||||
# Never sleep while there is no input, as this blocks Blender.
|
||||
use_idle=False,
|
||||
# Needed so the user can exit blender without warnings about a broken pipe.
|
||||
# TODO: write to a temporary location, once done:
|
||||
# There is no chance of corrupt data as the data isn't written directly to the target JSON.
|
||||
force_exit_ok=not USE_GRACEFUL_EXIT,
|
||||
extension_override=unique_ext,
|
||||
))
|
||||
|
||||
yield None
|
||||
|
||||
# repos_lock = [repo_item.directory for repo_item in self.repos_notify]
|
||||
|
||||
# Lock repositories.
|
||||
# self.repo_lock = bl_extension_utils.RepoLock(repo_directories=repos_lock, cookie=cookie_from_session())
|
||||
|
||||
import atexit
|
||||
|
||||
cmd_batch = None
|
||||
|
||||
def cmd_force_quit():
|
||||
if cmd_batch is None:
|
||||
return
|
||||
cmd_batch.exec_non_blocking(request_exit=True)
|
||||
|
||||
if USE_GRACEFUL_EXIT:
|
||||
import time
|
||||
# Force all commands to close.
|
||||
while not cmd_batch.exec_non_blocking(request_exit=True).all_complete:
|
||||
# Avoid high CPU usage on exit.
|
||||
time.sleep(0.01)
|
||||
|
||||
atexit.register(cmd_force_quit)
|
||||
|
||||
cmd_batch = bl_extension_utils.CommandBatch(
|
||||
# Used as a prefix in status.
|
||||
title="Update",
|
||||
batch=cmd_batch_partial,
|
||||
)
|
||||
del cmd_batch_partial
|
||||
|
||||
yield None
|
||||
|
||||
# ############## #
|
||||
# Run The Update #
|
||||
# ############## #
|
||||
|
||||
# The count is unknown.
|
||||
update_total = -1
|
||||
any_lock_errors = False
|
||||
|
||||
repos_notify_files = [[] for _ in repos_notify]
|
||||
|
||||
is_debug = bpy.app.debug
|
||||
while True:
|
||||
command_result = cmd_batch.exec_non_blocking(
|
||||
# TODO: if Blender requested an exit... this should request exit here.
|
||||
request_exit=False,
|
||||
)
|
||||
# Forward new messages to reports.
|
||||
msg_list_per_command = cmd_batch.calc_status_log_since_last_request_or_none()
|
||||
if msg_list_per_command is not None:
|
||||
for i, msg_list in enumerate(msg_list_per_command):
|
||||
for (ty, msg) in msg_list:
|
||||
if ty == 'PATH':
|
||||
if not msg.endswith(unique_ext):
|
||||
print("Unexpected path:", msg)
|
||||
repos_notify_files[i].append(msg)
|
||||
continue
|
||||
|
||||
if not is_debug:
|
||||
continue
|
||||
|
||||
# TODO: output this information to a place for users, if they want to debug.
|
||||
if len(msg_list_per_command) > 1:
|
||||
# These reports are flattened, note the process number that fails so
|
||||
# whoever is reading the reports can make sense of the messages.
|
||||
msg = "{:s} (process {:d} of {:d})".format(msg, i + 1, len(msg_list_per_command))
|
||||
if ty == 'STATUS':
|
||||
print('INFO', msg)
|
||||
else:
|
||||
print(ty, msg)
|
||||
|
||||
# TODO: more elegant way to detect changes.
|
||||
# Re-calculating the same information each time then checking if it's different isn't great.
|
||||
if command_result.status_data_changed:
|
||||
if command_result.all_complete:
|
||||
any_lock_errors = sync_apply_locked(repos_notify, repos_notify_files, unique_ext)
|
||||
update_total = sync_status_count_outdated_extensions(repos_notify)
|
||||
yield (cmd_batch.calc_status_data(), update_total, any_lock_errors)
|
||||
else:
|
||||
yield None
|
||||
|
||||
if command_result.all_complete:
|
||||
break
|
||||
|
||||
atexit.unregister(cmd_force_quit)
|
||||
|
||||
# ################### #
|
||||
# Finalize The Update #
|
||||
# ################### #
|
||||
|
||||
yield None
|
||||
|
||||
# Unlock repositories.
|
||||
# lock_result_any_failed_with_report(self, self.repo_lock.release(), report_type='WARNING')
|
||||
# self.repo_lock = None
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Private API
|
||||
|
||||
# The timer before running the timer (initially).
|
||||
TIME_WAIT_INIT = 0.05
|
||||
# The time between calling the timer.
|
||||
TIME_WAIT_STEP = 0.1
|
||||
|
||||
state_text = (
|
||||
"Checking for updates...",
|
||||
)
|
||||
|
||||
|
||||
class NotifyHandle:
|
||||
__slots__ = (
|
||||
"splash_region",
|
||||
"state",
|
||||
|
||||
"sync_generator",
|
||||
"sync_info",
|
||||
)
|
||||
|
||||
def __init__(self, repos_notify):
|
||||
self.splash_region = None
|
||||
self.state = 0
|
||||
# We could start the generator separately, this seems OK here for now.
|
||||
self.sync_generator = iter(sync_status_generator(repos_notify))
|
||||
# TEXT/ICON_ID/COUNT
|
||||
self.sync_info = None
|
||||
|
||||
|
||||
# When non-null, the timer is running.
|
||||
_notify = None
|
||||
|
||||
|
||||
def _region_exists(region):
|
||||
# TODO: this is a workaround for there being no good way to inspect temporary regions.
|
||||
# A better solution could be to store the `PyObject` in the `ARegion` so that it gets invalidated when freed.
|
||||
# This is a bigger change though - so use the context override as a way to check if a region is valid.
|
||||
exists = False
|
||||
try:
|
||||
with bpy.context.temp_override(region=region):
|
||||
exists = True
|
||||
except TypeError:
|
||||
pass
|
||||
return exists
|
||||
|
||||
|
||||
def _ui_refresh_timer():
|
||||
if _notify is None:
|
||||
return None
|
||||
|
||||
default_wait = TIME_WAIT_STEP
|
||||
|
||||
sync_info = next(_notify.sync_generator, ...)
|
||||
# If the generator exited, early exit here.
|
||||
if sync_info is ...:
|
||||
return None
|
||||
if sync_info is None:
|
||||
# Nothing changed, no action is needed (waiting for a response).
|
||||
return default_wait
|
||||
|
||||
# Re-display.
|
||||
assert isinstance(sync_info, tuple)
|
||||
assert len(sync_info) == 3
|
||||
|
||||
_notify.sync_info = sync_info
|
||||
|
||||
# Check if the splash_region is valid.
|
||||
if _notify.splash_region is not None:
|
||||
if not _region_exists(_notify.splash_region):
|
||||
_notify.splash_region = None
|
||||
return None
|
||||
_notify.splash_region.tag_redraw()
|
||||
_notify.splash_region.tag_refresh_ui()
|
||||
|
||||
# TODO: redraw the status bar.
|
||||
|
||||
return default_wait
|
||||
|
||||
|
||||
def splash_draw_status_fn(self, context):
|
||||
if _notify.splash_region is None:
|
||||
_notify.splash_region = context.region_popup
|
||||
|
||||
if _notify.sync_info is None:
|
||||
self.layout.label(text="Updates starting...")
|
||||
else:
|
||||
status_data, update_count, any_lock_errors = _notify.sync_info
|
||||
text, icon = bl_extension_utils.CommandBatch.calc_status_text_icon_from_data(status_data, update_count)
|
||||
if any_lock_errors:
|
||||
text = text + " - failed to acquire lock!"
|
||||
row = self.layout.row(align=True)
|
||||
if update_count > 0:
|
||||
row.operator("bl_pkg.extensions_show_for_update", text=text, icon=icon)
|
||||
else:
|
||||
row.label(text=text, icon=icon)
|
||||
|
||||
self.layout.separator()
|
||||
self.layout.separator()
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Public API
|
||||
|
||||
|
||||
def register(repos_notify):
|
||||
global _notify
|
||||
_notify = NotifyHandle(repos_notify)
|
||||
bpy.types.WM_MT_splash.append(splash_draw_status_fn)
|
||||
bpy.app.timers.register(_ui_refresh_timer, first_interval=TIME_WAIT_INIT)
|
||||
|
||||
|
||||
def unregister():
|
||||
global _notify
|
||||
assert _notify is not None
|
||||
_notify = None
|
||||
|
||||
bpy.types.WM_MT_splash.remove(splash_draw_status_fn)
|
||||
# This timer is responsible for un-registering itself.
|
||||
# `bpy.app.timers.unregister(_ui_refresh_timer)`
|
2337
scripts/addons_core/bl_pkg/bl_extension_ops.py
Normal file
2337
scripts/addons_core/bl_pkg/bl_extension_ops.py
Normal file
File diff suppressed because it is too large
Load Diff
807
scripts/addons_core/bl_pkg/bl_extension_ui.py
Normal file
807
scripts/addons_core/bl_pkg/bl_extension_ui.py
Normal file
@ -0,0 +1,807 @@
|
||||
# SPDX-FileCopyrightText: 2023 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
"""
|
||||
GUI (WARNING) this is a hack!
|
||||
Written to allow a UI without modifying Blender.
|
||||
"""
|
||||
|
||||
__all__ = (
|
||||
"display_errors",
|
||||
"register",
|
||||
"unregister",
|
||||
)
|
||||
|
||||
import bpy
|
||||
|
||||
from bpy.types import (
|
||||
Menu,
|
||||
Panel,
|
||||
)
|
||||
|
||||
from bl_ui.space_userpref import (
|
||||
USERPREF_PT_addons,
|
||||
)
|
||||
|
||||
from . import repo_status_text
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Generic Utilities
|
||||
|
||||
|
||||
def size_as_fmt_string(num: float, *, precision: int = 1) -> str:
|
||||
for unit in ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB"):
|
||||
if abs(num) < 1024.0:
|
||||
return "{:3.{:d}f}{:s}".format(num, precision, unit)
|
||||
num /= 1024.0
|
||||
unit = "yb"
|
||||
return "{:.{:d}f}{:s}".format(num, precision, unit)
|
||||
|
||||
|
||||
def sizes_as_percentage_string(size_partial: int, size_final: int) -> str:
|
||||
if size_final == 0:
|
||||
percent = 0.0
|
||||
else:
|
||||
size_partial = min(size_partial, size_final)
|
||||
percent = size_partial / size_final
|
||||
|
||||
return "{:-6.2f}%".format(percent * 100)
|
||||
|
||||
|
||||
def license_info_to_text(license_list):
|
||||
# See: https://spdx.org/licenses/
|
||||
# - Note that we could include all, for now only common, GPL compatible licenses.
|
||||
# - Note that many of the human descriptions are not especially more humanly readable
|
||||
# than the short versions, so it's questionable if we should attempt to add all of these.
|
||||
_spdx_id_to_text = {
|
||||
"GPL-2.0-only": "GNU General Public License v2.0 only",
|
||||
"GPL-2.0-or-later": "GNU General Public License v2.0 or later",
|
||||
"GPL-3.0-only": "GNU General Public License v3.0 only",
|
||||
"GPL-3.0-or-later": "GNU General Public License v3.0 or later",
|
||||
}
|
||||
result = []
|
||||
for item in license_list:
|
||||
if item.startswith("SPDX:"):
|
||||
item = item[5:]
|
||||
item = _spdx_id_to_text.get(item, item)
|
||||
result.append(item)
|
||||
return ", ".join(result)
|
||||
|
||||
|
||||
def pkg_repo_and_id_from_theme_path(repos_all, filepath):
|
||||
import os
|
||||
if not filepath:
|
||||
return None
|
||||
|
||||
# Strip the `theme.xml` filename.
|
||||
dirpath = os.path.dirname(filepath)
|
||||
repo_directory, pkg_id = os.path.split(dirpath)
|
||||
for repo_index, repo in enumerate(repos_all):
|
||||
if not os.path.samefile(repo_directory, repo.directory):
|
||||
continue
|
||||
return repo_index, pkg_id
|
||||
return None
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Extensions UI (Legacy)
|
||||
|
||||
def extensions_panel_draw_legacy_addons(
|
||||
layout,
|
||||
context,
|
||||
*,
|
||||
search_lower,
|
||||
enabled_only,
|
||||
installed_only,
|
||||
used_addon_module_name_map,
|
||||
):
|
||||
# NOTE: this duplicates logic from `USERPREF_PT_addons` eventually this logic should be used instead.
|
||||
# Don't de-duplicate the logic as this is a temporary state - as long as extensions remains experimental.
|
||||
import addon_utils
|
||||
from bpy.app.translations import (
|
||||
pgettext_iface as iface_,
|
||||
)
|
||||
from .bl_extension_ops import (
|
||||
pkg_info_check_exclude_filter_ex,
|
||||
)
|
||||
|
||||
addons = [
|
||||
(mod, addon_utils.module_bl_info(mod))
|
||||
for mod in addon_utils.modules(refresh=False)
|
||||
]
|
||||
|
||||
# Initialized on demand.
|
||||
user_addon_paths = []
|
||||
|
||||
for mod, bl_info in addons:
|
||||
module_name = mod.__name__
|
||||
is_extension = addon_utils.check_extension(module_name)
|
||||
if is_extension:
|
||||
continue
|
||||
|
||||
if search_lower and (
|
||||
not pkg_info_check_exclude_filter_ex(
|
||||
bl_info["name"],
|
||||
bl_info["description"],
|
||||
search_lower,
|
||||
)
|
||||
):
|
||||
continue
|
||||
|
||||
is_enabled = module_name in used_addon_module_name_map
|
||||
if enabled_only and (not is_enabled):
|
||||
continue
|
||||
|
||||
col_box = layout.column()
|
||||
box = col_box.box()
|
||||
colsub = box.column()
|
||||
row = colsub.row(align=True)
|
||||
|
||||
row.operator(
|
||||
"preferences.addon_expand",
|
||||
icon='DISCLOSURE_TRI_DOWN' if bl_info["show_expanded"] else 'DISCLOSURE_TRI_RIGHT',
|
||||
emboss=False,
|
||||
).module = module_name
|
||||
|
||||
row.operator(
|
||||
"preferences.addon_disable" if is_enabled else "preferences.addon_enable",
|
||||
icon='CHECKBOX_HLT' if is_enabled else 'CHECKBOX_DEHLT', text="",
|
||||
emboss=False,
|
||||
).module = module_name
|
||||
|
||||
sub = row.row()
|
||||
sub.active = is_enabled
|
||||
sub.label(text="Legacy: " + bl_info["name"])
|
||||
|
||||
if bl_info["warning"]:
|
||||
sub.label(icon='ERROR')
|
||||
|
||||
row_right = row.row()
|
||||
row_right.alignment = 'RIGHT'
|
||||
|
||||
row_right.label(text="Installed ")
|
||||
row_right.active = False
|
||||
|
||||
if bl_info["show_expanded"]:
|
||||
split = box.split(factor=0.15)
|
||||
col_a = split.column()
|
||||
col_b = split.column()
|
||||
if value := bl_info["description"]:
|
||||
col_a.label(text="Description:")
|
||||
col_b.label(text=iface_(value))
|
||||
|
||||
col_a.label(text="File:")
|
||||
col_b.label(text=mod.__file__, translate=False)
|
||||
|
||||
if value := bl_info["author"]:
|
||||
col_a.label(text="Author:")
|
||||
col_b.label(text=value.split("<", 1)[0].rstrip(), translate=False)
|
||||
if value := bl_info["version"]:
|
||||
col_a.label(text="Version:")
|
||||
col_b.label(text=".".join(str(x) for x in value), translate=False)
|
||||
if value := bl_info["warning"]:
|
||||
col_a.label(text="Warning:")
|
||||
col_b.label(text=" " + iface_(value), icon='ERROR')
|
||||
del value
|
||||
|
||||
# Include for consistency.
|
||||
col_a.label(text="Type:")
|
||||
col_b.label(text="add-on")
|
||||
|
||||
user_addon = USERPREF_PT_addons.is_user_addon(mod, user_addon_paths)
|
||||
|
||||
if bl_info["doc_url"] or bl_info.get("tracker_url"):
|
||||
split = box.row().split(factor=0.15)
|
||||
split.label(text="Internet:")
|
||||
sub = split.row()
|
||||
if bl_info["doc_url"]:
|
||||
sub.operator(
|
||||
"wm.url_open", text="Documentation", icon='HELP',
|
||||
).url = bl_info["doc_url"]
|
||||
# Only add "Report a Bug" button if tracker_url is set
|
||||
# or the add-on is bundled (use official tracker then).
|
||||
if bl_info.get("tracker_url"):
|
||||
sub.operator(
|
||||
"wm.url_open", text="Report a Bug", icon='URL',
|
||||
).url = bl_info["tracker_url"]
|
||||
elif not user_addon:
|
||||
addon_info = (
|
||||
"Name: %s %s\n"
|
||||
"Author: %s\n"
|
||||
) % (bl_info["name"], str(bl_info["version"]), bl_info["author"])
|
||||
props = sub.operator(
|
||||
"wm.url_open_preset", text="Report a Bug", icon='URL',
|
||||
)
|
||||
props.type = 'BUG_ADDON'
|
||||
props.id = addon_info
|
||||
|
||||
if user_addon:
|
||||
rowsub = col_b.row()
|
||||
rowsub.alignment = 'RIGHT'
|
||||
rowsub.operator(
|
||||
"preferences.addon_remove", text="Uninstall", icon='CANCEL',
|
||||
).module = module_name
|
||||
|
||||
if is_enabled:
|
||||
if (addon_preferences := used_addon_module_name_map[module_name].preferences) is not None:
|
||||
USERPREF_PT_addons.draw_addon_preferences(layout, context, addon_preferences)
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Extensions UI
|
||||
|
||||
class display_errors:
|
||||
"""
|
||||
This singleton class is used to store errors which are generated while drawing,
|
||||
note that these errors are reasonably obscure, examples are:
|
||||
- Failure to parse the repository JSON file.
|
||||
- Failure to access the file-system for reading where the repository is stored.
|
||||
|
||||
The current and previous state are compared, when they match no drawing is done,
|
||||
this allows the current display errors to be dismissed.
|
||||
"""
|
||||
errors_prev = []
|
||||
errors_curr = []
|
||||
|
||||
@staticmethod
|
||||
def clear():
|
||||
display_errors.errors_prev = display_errors.errors_curr
|
||||
|
||||
@staticmethod
|
||||
def draw(layout):
|
||||
if display_errors.errors_curr == display_errors.errors_prev:
|
||||
return
|
||||
box_header = layout.box()
|
||||
# Don't clip longer names.
|
||||
row = box_header.split(factor=0.9)
|
||||
row.label(text="Repository Access Errors:", icon='ERROR')
|
||||
rowsub = row.row(align=True)
|
||||
rowsub.alignment = 'RIGHT'
|
||||
rowsub.operator("bl_pkg.pkg_display_errors_clear", text="", icon='X', emboss=False)
|
||||
|
||||
box_contents = box_header.box()
|
||||
for err in display_errors.errors_curr:
|
||||
box_contents.label(text=err)
|
||||
|
||||
|
||||
def extensions_panel_draw_online_extensions_request_impl(
|
||||
self,
|
||||
context,
|
||||
):
|
||||
layout = self.layout
|
||||
layout_header, layout_panel = layout.panel("advanced", default_closed=False)
|
||||
layout_header.label(text="Online Extensions")
|
||||
if layout_panel is not None:
|
||||
# Text wrapping isn't supported, manually wrap.
|
||||
for line in (
|
||||
"Welcome! Access community-made add-ons and themes from the",
|
||||
"extensions.blender.org repository.",
|
||||
"",
|
||||
"This requires internet access.",
|
||||
):
|
||||
layout_panel.label(text=line)
|
||||
|
||||
row = layout.row()
|
||||
row.operator("bl_pkg.extension_online_access", text="Dismiss", icon='X').enable = False
|
||||
row.operator("bl_pkg.extension_online_access", text="Enable Repository", icon='CHECKMARK').enable = True
|
||||
|
||||
|
||||
def extensions_panel_draw_impl(
|
||||
self,
|
||||
context,
|
||||
search_lower,
|
||||
filter_by_type,
|
||||
enabled_only,
|
||||
updates_only,
|
||||
installed_only,
|
||||
show_legacy_addons,
|
||||
show_development,
|
||||
):
|
||||
"""
|
||||
Show all the items... we may want to paginate at some point.
|
||||
"""
|
||||
import os
|
||||
from .bl_extension_ops import (
|
||||
blender_extension_mark,
|
||||
blender_extension_show,
|
||||
extension_repos_read,
|
||||
pkg_info_check_exclude_filter,
|
||||
repo_cache_store_refresh_from_prefs,
|
||||
)
|
||||
|
||||
from . import repo_cache_store
|
||||
|
||||
# This isn't elegant, but the preferences aren't available on registration.
|
||||
if not repo_cache_store.is_init():
|
||||
repo_cache_store_refresh_from_prefs()
|
||||
|
||||
layout = self.layout
|
||||
|
||||
prefs = context.preferences
|
||||
|
||||
if updates_only:
|
||||
installed_only = True
|
||||
show_legacy_addons = False
|
||||
|
||||
# Define a top-most column to place warnings (if-any).
|
||||
# Needed so the warnings aren't mixed in with other content.
|
||||
layout_topmost = layout.column()
|
||||
|
||||
repos_all = extension_repos_read()
|
||||
|
||||
# To access enabled add-ons.
|
||||
show_addons = filter_by_type in {"", "add-on"}
|
||||
show_themes = filter_by_type in {"", "theme"}
|
||||
if show_addons:
|
||||
used_addon_module_name_map = {addon.module: addon for addon in prefs.addons}
|
||||
if show_themes:
|
||||
active_theme_info = pkg_repo_and_id_from_theme_path(repos_all, prefs.themes[0].filepath)
|
||||
|
||||
# Collect exceptions accessing repositories, and optionally show them.
|
||||
errors_on_draw = []
|
||||
|
||||
remote_ex = None
|
||||
local_ex = None
|
||||
|
||||
def error_fn_remote(ex):
|
||||
nonlocal remote_ex
|
||||
remote_ex = ex
|
||||
|
||||
def error_fn_local(ex):
|
||||
nonlocal remote_ex
|
||||
remote_ex = ex
|
||||
|
||||
for repo_index, (
|
||||
pkg_manifest_remote,
|
||||
pkg_manifest_local,
|
||||
) in enumerate(zip(
|
||||
repo_cache_store.pkg_manifest_from_remote_ensure(error_fn=error_fn_remote),
|
||||
repo_cache_store.pkg_manifest_from_local_ensure(error_fn=error_fn_local),
|
||||
)):
|
||||
# Show any exceptions created while accessing the JSON,
|
||||
# if the JSON has an IO error while being read or if the directory doesn't exist.
|
||||
# In general users should _not_ see these kinds of errors however we cannot prevent
|
||||
# IO errors in general and it is better to show a warning than to ignore the error entirely
|
||||
# or cause a trace-back which breaks the UI.
|
||||
if (remote_ex is not None) or (local_ex is not None):
|
||||
repo = repos_all[repo_index]
|
||||
# NOTE: `FileNotFoundError` occurs when a repository has been added but has not update with its remote.
|
||||
# We may want a way for users to know a repository is missing from the view and they need to run update
|
||||
# to access its extensions.
|
||||
if remote_ex is not None:
|
||||
if isinstance(remote_ex, FileNotFoundError) and (remote_ex.filename == repo.directory):
|
||||
pass
|
||||
else:
|
||||
errors_on_draw.append("Remote of \"{:s}\": {:s}".format(repo.name, str(remote_ex)))
|
||||
remote_ex = None
|
||||
|
||||
if local_ex is not None:
|
||||
if isinstance(local_ex, FileNotFoundError) and (local_ex.filename == repo.directory):
|
||||
pass
|
||||
else:
|
||||
errors_on_draw.append("Local of \"{:s}\": {:s}".format(repo.name, str(local_ex)))
|
||||
local_ex = None
|
||||
continue
|
||||
|
||||
if pkg_manifest_remote is None:
|
||||
repo = repos_all[repo_index]
|
||||
has_remote = (repo.remote_url != "")
|
||||
if has_remote:
|
||||
# NOTE: it would be nice to detect when the repository ran sync and it failed.
|
||||
# This isn't such an important distinction though, the main thing users should be aware of
|
||||
# is that a "sync" is required.
|
||||
errors_on_draw.append("Repository: \"{:s}\" must sync with the remote repository.".format(repo.name))
|
||||
del repo
|
||||
continue
|
||||
else:
|
||||
repo = repos_all[repo_index]
|
||||
has_remote = (repo.remote_url != "")
|
||||
del repo
|
||||
|
||||
for pkg_id, item_remote in pkg_manifest_remote.items():
|
||||
if filter_by_type and (filter_by_type != item_remote["type"]):
|
||||
continue
|
||||
if search_lower and (not pkg_info_check_exclude_filter(item_remote, search_lower)):
|
||||
continue
|
||||
|
||||
item_local = pkg_manifest_local.get(pkg_id)
|
||||
is_installed = item_local is not None
|
||||
|
||||
if installed_only and (is_installed == 0):
|
||||
continue
|
||||
|
||||
is_addon = False
|
||||
is_theme = False
|
||||
match item_remote["type"]:
|
||||
case "add-on":
|
||||
is_addon = True
|
||||
case "theme":
|
||||
is_theme = True
|
||||
|
||||
if is_addon:
|
||||
if is_installed:
|
||||
# Currently we only need to know the module name once installed.
|
||||
addon_module_name = "bl_ext.{:s}.{:s}".format(repos_all[repo_index].module, pkg_id)
|
||||
is_enabled = addon_module_name in used_addon_module_name_map
|
||||
|
||||
else:
|
||||
is_enabled = False
|
||||
addon_module_name = None
|
||||
elif is_theme:
|
||||
is_enabled = (repo_index, pkg_id) == active_theme_info
|
||||
addon_module_name = None
|
||||
else:
|
||||
# TODO: ability to disable.
|
||||
is_enabled = is_installed
|
||||
addon_module_name = None
|
||||
|
||||
if enabled_only and (not is_enabled):
|
||||
continue
|
||||
|
||||
item_version = item_remote["version"]
|
||||
if item_local is None:
|
||||
item_local_version = None
|
||||
is_outdated = False
|
||||
else:
|
||||
item_local_version = item_local["version"]
|
||||
is_outdated = item_local_version != item_version
|
||||
|
||||
if updates_only:
|
||||
if not is_outdated:
|
||||
continue
|
||||
|
||||
key = (pkg_id, repo_index)
|
||||
if show_development:
|
||||
mark = key in blender_extension_mark
|
||||
show = key in blender_extension_show
|
||||
del key
|
||||
|
||||
box = layout.box()
|
||||
|
||||
# Left align so the operator text isn't centered.
|
||||
colsub = box.column()
|
||||
row = colsub.row(align=True)
|
||||
# row.label
|
||||
if show:
|
||||
props = row.operator("bl_pkg.pkg_show_clear", text="", icon='DISCLOSURE_TRI_DOWN', emboss=False)
|
||||
else:
|
||||
props = row.operator("bl_pkg.pkg_show_set", text="", icon='DISCLOSURE_TRI_RIGHT', emboss=False)
|
||||
props.pkg_id = pkg_id
|
||||
props.repo_index = repo_index
|
||||
del props
|
||||
|
||||
if is_installed:
|
||||
if is_addon:
|
||||
row.operator(
|
||||
"preferences.addon_disable" if is_enabled else "preferences.addon_enable",
|
||||
icon='CHECKBOX_HLT' if is_enabled else 'CHECKBOX_DEHLT',
|
||||
text="",
|
||||
emboss=False,
|
||||
).module = addon_module_name
|
||||
elif is_theme:
|
||||
props = row.operator(
|
||||
"bl_pkg.extension_theme_disable" if is_enabled else "bl_pkg.extension_theme_enable",
|
||||
icon='CHECKBOX_HLT' if is_enabled else 'CHECKBOX_DEHLT',
|
||||
text="",
|
||||
emboss=False,
|
||||
)
|
||||
props.repo_index = repo_index
|
||||
props.pkg_id = pkg_id
|
||||
del props
|
||||
else:
|
||||
# Use a place-holder checkbox icon to avoid odd text alignment when mixing with installed add-ons.
|
||||
# Non add-ons have no concept of "enabled" right now, use installed.
|
||||
row.operator(
|
||||
"bl_pkg.extension_disable",
|
||||
text="",
|
||||
icon='CHECKBOX_HLT',
|
||||
emboss=False,
|
||||
)
|
||||
else:
|
||||
# Not installed, always placeholder.
|
||||
row.operator("bl_pkg.extensions_enable_not_installed", text="", icon='CHECKBOX_DEHLT', emboss=False)
|
||||
|
||||
if show_development:
|
||||
if mark:
|
||||
props = row.operator("bl_pkg.pkg_mark_clear", text="", icon='RADIOBUT_ON', emboss=False)
|
||||
else:
|
||||
props = row.operator("bl_pkg.pkg_mark_set", text="", icon='RADIOBUT_OFF', emboss=False)
|
||||
props.pkg_id = pkg_id
|
||||
props.repo_index = repo_index
|
||||
del props
|
||||
|
||||
sub = row.row()
|
||||
sub.active = is_enabled
|
||||
sub.label(text=item_remote["name"])
|
||||
del sub
|
||||
|
||||
row_right = row.row()
|
||||
row_right.alignment = 'RIGHT'
|
||||
|
||||
if has_remote:
|
||||
if is_installed:
|
||||
# Include uninstall below.
|
||||
if is_outdated:
|
||||
props = row_right.operator("bl_pkg.pkg_install", text="Update")
|
||||
props.repo_index = repo_index
|
||||
props.pkg_id = pkg_id
|
||||
del props
|
||||
else:
|
||||
# Right space for alignment with the button.
|
||||
row_right.label(text="Installed ")
|
||||
row_right.active = False
|
||||
else:
|
||||
props = row_right.operator("bl_pkg.pkg_install", text="Install")
|
||||
props.repo_index = repo_index
|
||||
props.pkg_id = pkg_id
|
||||
del props
|
||||
else:
|
||||
# Right space for alignment with the button.
|
||||
row_right.label(text="Installed ")
|
||||
row_right.active = False
|
||||
|
||||
if show:
|
||||
split = box.split(factor=0.15)
|
||||
col_a = split.column()
|
||||
col_b = split.column()
|
||||
|
||||
col_a.label(text="Description:")
|
||||
# The full description may be multiple lines (not yet supported by Blender's UI).
|
||||
col_b.label(text=item_remote["tagline"])
|
||||
|
||||
if is_installed:
|
||||
col_a.label(text="Path:")
|
||||
col_b.label(text=os.path.join(repos_all[repo_index].directory, pkg_id), translate=False)
|
||||
|
||||
# Remove the maintainers email while it's not private, showing prominently
|
||||
# could cause maintainers to get direct emails instead of issue tracking systems.
|
||||
col_a.label(text="Maintainer:")
|
||||
col_b.label(text=item_remote["maintainer"].split("<", 1)[0].rstrip(), translate=False)
|
||||
|
||||
col_a.label(text="License:")
|
||||
col_b.label(text=license_info_to_text(item_remote["license"]))
|
||||
|
||||
col_a.label(text="Version:")
|
||||
if is_outdated:
|
||||
col_b.label(text="{:s} ({:s} available)".format(item_local_version, item_version))
|
||||
else:
|
||||
col_b.label(text=item_version)
|
||||
|
||||
if has_remote:
|
||||
col_a.label(text="Size:")
|
||||
col_b.label(text=size_as_fmt_string(item_remote["archive_size"]))
|
||||
|
||||
if not filter_by_type:
|
||||
col_a.label(text="Type:")
|
||||
col_b.label(text=item_remote["type"])
|
||||
|
||||
if len(repos_all) > 1:
|
||||
col_a.label(text="Repository:")
|
||||
col_b.label(text=repos_all[repo_index].name)
|
||||
|
||||
if value := item_remote.get("website"):
|
||||
col_a.label(text="Internet:")
|
||||
# Use half size button, for legacy add-ons there are two, here there is one
|
||||
# however one large button looks silly, so use a half size still.
|
||||
col_b.split(factor=0.5).operator("wm.url_open", text="Website", icon='HELP').url = value
|
||||
del value
|
||||
|
||||
# Note that we could allow removing extensions from non-remote extension repos
|
||||
# although this is destructive, so don't enable this right now.
|
||||
if is_installed:
|
||||
rowsub = col_b.row()
|
||||
rowsub.alignment = 'RIGHT'
|
||||
props = rowsub.operator("bl_pkg.pkg_uninstall", text="Uninstall")
|
||||
props.repo_index = repo_index
|
||||
props.pkg_id = pkg_id
|
||||
del props, rowsub
|
||||
|
||||
# Show addon user preferences.
|
||||
if is_enabled and is_addon:
|
||||
if (addon_preferences := used_addon_module_name_map[addon_module_name].preferences) is not None:
|
||||
USERPREF_PT_addons.draw_addon_preferences(layout, context, addon_preferences)
|
||||
|
||||
if show_addons and show_legacy_addons:
|
||||
extensions_panel_draw_legacy_addons(
|
||||
layout,
|
||||
context,
|
||||
search_lower=search_lower,
|
||||
enabled_only=enabled_only,
|
||||
installed_only=installed_only,
|
||||
used_addon_module_name_map=used_addon_module_name_map,
|
||||
)
|
||||
|
||||
# Finally show any errors in a single panel which can be dismissed.
|
||||
display_errors.errors_curr = errors_on_draw
|
||||
if errors_on_draw:
|
||||
display_errors.draw(layout_topmost)
|
||||
|
||||
|
||||
class USERPREF_PT_extensions_bl_pkg_filter(Panel):
|
||||
bl_label = "Extensions Filter"
|
||||
|
||||
bl_space_type = 'TOPBAR' # dummy.
|
||||
bl_region_type = 'HEADER'
|
||||
bl_ui_units_x = 13
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
|
||||
wm = context.window_manager
|
||||
|
||||
col = layout.column(heading="Show Only")
|
||||
col.use_property_split = True
|
||||
col.prop(wm, "extension_enabled_only", text="Enabled Extensions")
|
||||
col.prop(wm, "extension_updates_only", text="Updates Available")
|
||||
sub = col.column()
|
||||
sub.active = (not wm.extension_enabled_only) and (not wm.extension_updates_only)
|
||||
sub.prop(wm, "extension_installed_only", text="Installed Extensions")
|
||||
|
||||
col = layout.column(heading="Show")
|
||||
col.use_property_split = True
|
||||
sub = col.column()
|
||||
sub.active = (not wm.extension_updates_only)
|
||||
sub.prop(wm, "extension_show_legacy_addons", text="Legacy Add-ons")
|
||||
|
||||
|
||||
class USERPREF_MT_extensions_bl_pkg_settings(Menu):
|
||||
bl_label = "Extension Settings"
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
|
||||
prefs = context.preferences
|
||||
|
||||
addon_prefs = prefs.addons[__package__].preferences
|
||||
|
||||
layout.operator("bl_pkg.repo_sync_all", text="Check for Updates", icon='FILE_REFRESH')
|
||||
|
||||
layout.separator()
|
||||
|
||||
layout.operator("bl_pkg.pkg_upgrade_all", text="Install Available Updates", icon='IMPORT')
|
||||
layout.operator("bl_pkg.pkg_install_files", text="Install from Disk")
|
||||
layout.operator("preferences.addon_install", text="Install Legacy Add-on")
|
||||
|
||||
if prefs.experimental.use_extension_utils:
|
||||
layout.separator()
|
||||
|
||||
layout.prop(addon_prefs, "show_development_reports")
|
||||
|
||||
layout.separator()
|
||||
|
||||
# We might want to expose this for all users, the purpose of this
|
||||
# is to refresh after changes have been made to the repos outside of Blender
|
||||
# it's disputable if this is a common case.
|
||||
layout.operator("preferences.addon_refresh", text="Refresh (file-system)", icon='FILE_REFRESH')
|
||||
layout.separator()
|
||||
|
||||
layout.operator("bl_pkg.pkg_install_marked", text="Install Marked", icon='IMPORT')
|
||||
layout.operator("bl_pkg.pkg_uninstall_marked", text="Uninstall Marked", icon='X')
|
||||
layout.operator("bl_pkg.obsolete_marked")
|
||||
|
||||
layout.separator()
|
||||
|
||||
layout.operator("bl_pkg.repo_lock")
|
||||
layout.operator("bl_pkg.repo_unlock")
|
||||
|
||||
|
||||
def extensions_panel_draw(panel, context):
|
||||
prefs = context.preferences
|
||||
|
||||
if not prefs.experimental.use_extension_repos:
|
||||
# Unexpected, the extension is disabled but this add-on is.
|
||||
# In this case don't show the UI as it is confusing.
|
||||
return
|
||||
|
||||
from .bl_extension_ops import (
|
||||
blender_filter_by_type_map,
|
||||
)
|
||||
|
||||
addon_prefs = prefs.addons[__package__].preferences
|
||||
|
||||
show_development = prefs.experimental.use_extension_utils
|
||||
show_development_reports = show_development and addon_prefs.show_development_reports
|
||||
|
||||
wm = context.window_manager
|
||||
layout = panel.layout
|
||||
|
||||
row = layout.split(factor=0.5)
|
||||
row_a = row.row()
|
||||
row_a.prop(wm, "extension_search", text="", icon='VIEWZOOM')
|
||||
row_b = row.row(align=True)
|
||||
row_b.prop(wm, "extension_type", text="")
|
||||
row_b.popover("USERPREF_PT_extensions_bl_pkg_filter", text="", icon='FILTER')
|
||||
|
||||
row_b.separator()
|
||||
row_b.popover("USERPREF_PT_extensions_repos", text="Repositories")
|
||||
|
||||
row_b.separator()
|
||||
row_b.menu("USERPREF_MT_extensions_bl_pkg_settings", text="", icon='DOWNARROW_HLT')
|
||||
del row, row_a, row_b
|
||||
|
||||
if show_development_reports:
|
||||
show_status = bool(repo_status_text.log)
|
||||
else:
|
||||
# Only show if running and there is progress to display.
|
||||
show_status = bool(repo_status_text.log) and repo_status_text.running
|
||||
if show_status:
|
||||
show_status = False
|
||||
for ty, msg in repo_status_text.log:
|
||||
if ty == 'PROGRESS':
|
||||
show_status = True
|
||||
|
||||
if show_status:
|
||||
box = layout.box()
|
||||
# Don't clip longer names.
|
||||
row = box.split(factor=0.9, align=True)
|
||||
if repo_status_text.running:
|
||||
row.label(text=repo_status_text.title + "...", icon='INFO')
|
||||
else:
|
||||
row.label(text=repo_status_text.title, icon='INFO')
|
||||
if show_development_reports:
|
||||
rowsub = row.row(align=True)
|
||||
rowsub.alignment = 'RIGHT'
|
||||
rowsub.operator("bl_pkg.pkg_status_clear", text="", icon='X', emboss=False)
|
||||
boxsub = box.box()
|
||||
for ty, msg in repo_status_text.log:
|
||||
if ty == 'STATUS':
|
||||
boxsub.label(text=msg)
|
||||
elif ty == 'PROGRESS':
|
||||
msg_str, progress_unit, progress, progress_range = msg
|
||||
if progress <= progress_range:
|
||||
boxsub.progress(
|
||||
factor=progress / progress_range,
|
||||
text="{:s}, {:s}".format(
|
||||
sizes_as_percentage_string(progress, progress_range),
|
||||
msg_str,
|
||||
),
|
||||
)
|
||||
elif progress_unit == 'BYTE':
|
||||
boxsub.progress(factor=0.0, text="{:s}, {:s}".format(msg_str, size_as_fmt_string(progress)))
|
||||
else:
|
||||
# We might want to support other types.
|
||||
boxsub.progress(factor=0.0, text="{:s}, {:d}".format(msg_str, progress))
|
||||
else:
|
||||
boxsub.label(text="{:s}: {:s}".format(ty, msg))
|
||||
|
||||
# Hide when running.
|
||||
if repo_status_text.running:
|
||||
return
|
||||
|
||||
if not prefs.extensions.use_online_access_handled:
|
||||
extensions_panel_draw_online_extensions_request_impl(panel, context)
|
||||
|
||||
extensions_panel_draw_impl(
|
||||
panel,
|
||||
context,
|
||||
wm.extension_search.lower(),
|
||||
blender_filter_by_type_map[wm.extension_type],
|
||||
wm.extension_enabled_only,
|
||||
wm.extension_updates_only,
|
||||
wm.extension_installed_only,
|
||||
wm.extension_show_legacy_addons,
|
||||
show_development,
|
||||
)
|
||||
|
||||
|
||||
classes = (
|
||||
# Pop-overs.
|
||||
USERPREF_PT_extensions_bl_pkg_filter,
|
||||
USERPREF_MT_extensions_bl_pkg_settings,
|
||||
)
|
||||
|
||||
|
||||
def register():
|
||||
USERPREF_PT_addons.append(extensions_panel_draw)
|
||||
|
||||
for cls in classes:
|
||||
bpy.utils.register_class(cls)
|
||||
|
||||
|
||||
def unregister():
|
||||
USERPREF_PT_addons.remove(extensions_panel_draw)
|
||||
|
||||
for cls in reversed(classes):
|
||||
bpy.utils.unregister_class(cls)
|
1305
scripts/addons_core/bl_pkg/bl_extension_utils.py
Normal file
1305
scripts/addons_core/bl_pkg/bl_extension_utils.py
Normal file
File diff suppressed because it is too large
Load Diff
2923
scripts/addons_core/bl_pkg/cli/blender_ext.py
Executable file
2923
scripts/addons_core/bl_pkg/cli/blender_ext.py
Executable file
File diff suppressed because it is too large
Load Diff
5
scripts/addons_core/bl_pkg/example_extension/AUTHORS
Normal file
5
scripts/addons_core/bl_pkg/example_extension/AUTHORS
Normal file
@ -0,0 +1,5 @@
|
||||
# Authors
|
||||
|
||||
Campbell Barton <campbell@email.com>
|
||||
Sergey Sharybin <sergey@email.com>
|
||||
|
8
scripts/addons_core/bl_pkg/example_extension/__init__.py
Normal file
8
scripts/addons_core/bl_pkg/example_extension/__init__.py
Normal file
@ -0,0 +1,8 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
def register():
|
||||
print("Register", __package__)
|
||||
|
||||
|
||||
def unregister():
|
||||
print("UnRegister", __package__)
|
@ -0,0 +1,17 @@
|
||||
# Example
|
||||
schema_version = "1.0.0"
|
||||
|
||||
id = "my_example_package"
|
||||
name = "Test Package"
|
||||
tagline = "Single line description"
|
||||
type = "add-on"
|
||||
tags = ["UV", "Modeling"]
|
||||
version = "0.2.0"
|
||||
blender_version_min = "2.80.0"
|
||||
maintainer = "Maintainer Name"
|
||||
license = [
|
||||
"SPDX:CC-0"
|
||||
]
|
||||
copyright = [
|
||||
"Developer Name"
|
||||
]
|
135
scripts/addons_core/bl_pkg/readme.rst
Normal file
135
scripts/addons_core/bl_pkg/readme.rst
Normal file
@ -0,0 +1,135 @@
|
||||
|
||||
##################
|
||||
Blender Extensions
|
||||
##################
|
||||
|
||||
Directory Layout
|
||||
================
|
||||
|
||||
``./blender_addon/bl_pkg/cli/``
|
||||
The stand-alone command line utility to manage extensions.
|
||||
|
||||
``./blender_addon/bl_pkg/``
|
||||
The Blender add-on which wraps the command line utility
|
||||
(abstracts details of interacting with the package manager & repositories).
|
||||
|
||||
``./tests/``
|
||||
Automated tests.
|
||||
|
||||
To run tests via the ``Makefile``.
|
||||
|
||||
Test the command line application.
|
||||
|
||||
.. code-block::
|
||||
|
||||
make test PYTHON_BIN=/path/to/bin/python3.11
|
||||
|
||||
If your system Python is v3.11 or newer you may omit ``PYTHON_BIN``.
|
||||
|
||||
.. code-block::
|
||||
|
||||
make test_blender BLENDER_BIN=/path/to/blender
|
||||
|
||||
|
||||
GUI
|
||||
===
|
||||
|
||||
This GUI is work-in-progress, currently it's been made to work with an un-modified Blender 4.1.
|
||||
|
||||
- Link ``blender_addon/bl_pkg`` into your add-ons directly.
|
||||
- Enable the blender extensions add-on from Blender.
|
||||
- Enable the blender extensions checkbox in the add-ons preference (this is a temporary location).
|
||||
- Repositories can be added/removed from the "Files" section in the preferences.
|
||||
|
||||
|
||||
Hacking
|
||||
=======
|
||||
|
||||
Some useful hints.
|
||||
|
||||
When developing the command line interface, these tests can be setup to run on file-change, run:
|
||||
|
||||
.. code-block::
|
||||
|
||||
make watch_test
|
||||
|
||||
To run Blender tests.
|
||||
|
||||
.. code-block::
|
||||
|
||||
make watch_test_blender BLENDER_BIN=/path/to/blender
|
||||
|
||||
How to Setup a Test Environment
|
||||
===============================
|
||||
|
||||
Most of the options here are using the command-line tools. For a comprehensive list of commands check the help:
|
||||
|
||||
.. code-block::
|
||||
|
||||
./blender_addon/bl_pkg/cli/blender_ext.py --help
|
||||
|
||||
|
||||
Dummy server
|
||||
------------
|
||||
|
||||
The simple way to get started is by creating a dummy asset library.
|
||||
|
||||
.. code-block::
|
||||
|
||||
./blender_addon/bl_pkg/cli/blender_ext.py dummy-repo \
|
||||
--repo-dir=/path/to/host/my/repo/files \
|
||||
--package-names="blue,red,green,purple,orange"
|
||||
|
||||
This will populate the directory specified as ``--repo-dir`` with dummy assets packages (``.zip``),
|
||||
and an index (``bl_ext_repo.json``).
|
||||
|
||||
|
||||
Setup an Extensions Repository
|
||||
==============================
|
||||
|
||||
First you need to create individual packages for the individual extension:
|
||||
|
||||
- Go to the directory of the extension you want to package.
|
||||
- Create a ``bl_ext_pkg.toml`` file with your configuration.
|
||||
- Run the command ``blender_ext.py build``.
|
||||
|
||||
You can look at an example of a dummy extension in the ``example_extension`` directory.
|
||||
|
||||
.. code-block::
|
||||
|
||||
cd ./example_extension
|
||||
../blender_addon/bl_pkg/cli/blender_ext.py build
|
||||
|
||||
This will create a ``my_example_package.zip`` (as specified in the .toml file).
|
||||
|
||||
Now you can move all your ``*.zip`` packages to where they will be hosted in the server.
|
||||
The final step is to create an index file to serve all your packages.
|
||||
|
||||
.. code-block::
|
||||
|
||||
mkdir -p /path/to/host/my/repo/files
|
||||
cp ./example_extension/my_example_package.zip /path/to/host/my/repo/files
|
||||
./blender_addon/bl_pkg/cli/blender_ext.py server-generate --repo-dir /path/to/host/my/repo/files
|
||||
|
||||
This will generate a new file ``bl_ext_repo.json`` in your repository directory.
|
||||
This file is to be used the entry point to your remote server.
|
||||
|
||||
Alternatively, if you are doing tests locally,
|
||||
you can point the directory containing this file as the ``Remote Path`` to your Extensions Repository.
|
||||
|
||||
|
||||
.. This section could go elsewhere, for now there is only a single note.
|
||||
|
||||
Requirement: Add-Ons
|
||||
====================
|
||||
|
||||
Add-ons packaged as extensions must use relative imports when importing its own sub-modules.
|
||||
This is a requirement of Python module name-spacing.
|
||||
|
||||
|
||||
Requirement: Blender 4.2
|
||||
========================
|
||||
|
||||
This add-on requires an yet-to-be released version of Blender.
|
||||
|
||||
You can download a `daily build <https://builder.blender.org>`__ of Blender 4.2 for testing and development purposes.
|
103
scripts/addons_core/bl_pkg/tests/modules/http_server_context.py
Normal file
103
scripts/addons_core/bl_pkg/tests/modules/http_server_context.py
Normal file
@ -0,0 +1,103 @@
|
||||
# SPDX-FileCopyrightText: 2023 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
"""
|
||||
Starts up a web server pointed to a local directory for the purpose of simulating online access.
|
||||
With basic options for PORT/path & verbosity (so tests aren't too noisy).
|
||||
"""
|
||||
__all__ = (
|
||||
"HTTPServerContext",
|
||||
)
|
||||
|
||||
import socketserver
|
||||
import http.server
|
||||
import threading
|
||||
|
||||
from typing import (
|
||||
Any,
|
||||
)
|
||||
|
||||
|
||||
class HTTPServerContext:
|
||||
__slots__ = (
|
||||
"_directory",
|
||||
"_port",
|
||||
"_http_thread",
|
||||
"_http_server",
|
||||
"_wait_tries",
|
||||
"_wait_delay",
|
||||
"_verbose",
|
||||
)
|
||||
|
||||
class _TestServer(socketserver.TCPServer):
|
||||
allow_reuse_address = True
|
||||
|
||||
@staticmethod
|
||||
def _is_port_in_use(port: int) -> bool:
|
||||
import socket
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
return s.connect_ex(("localhost", port)) == 0
|
||||
|
||||
@staticmethod
|
||||
def _test_handler_factory(directory: str, verbose: bool = False) -> type:
|
||||
class TestHandler(http.server.SimpleHTTPRequestHandler):
|
||||
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
||||
super().__init__(*args, directory=directory, **kwargs)
|
||||
# Suppress messages by overriding the function.
|
||||
if not verbose:
|
||||
def log_message(self, *_args: Any, **_kw: Any) -> None:
|
||||
pass
|
||||
return TestHandler
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
directory: str,
|
||||
port: int,
|
||||
*,
|
||||
verbose: bool = False,
|
||||
wait_delay: float = 0.0,
|
||||
wait_tries: int = 0,
|
||||
) -> None:
|
||||
self._directory = directory
|
||||
self._port = port
|
||||
self._wait_delay = wait_delay
|
||||
self._wait_tries = wait_tries
|
||||
self._verbose = verbose
|
||||
|
||||
# Members `_http_thread` & `_http_server` are set when entering the context.
|
||||
|
||||
def __enter__(self) -> None:
|
||||
|
||||
if self._wait_tries:
|
||||
import time
|
||||
for _ in range(self._wait_tries):
|
||||
if not HTTPServerContext._is_port_in_use(self._port):
|
||||
break
|
||||
|
||||
print("Waiting...")
|
||||
time.sleep(self._wait_delay)
|
||||
|
||||
http_server = HTTPServerContext._TestServer(
|
||||
("", self._port),
|
||||
HTTPServerContext._test_handler_factory(
|
||||
self._directory,
|
||||
verbose=self._verbose,
|
||||
),
|
||||
)
|
||||
|
||||
# Use a thread so as not to block.
|
||||
http_thread = threading.Thread(target=http_server.serve_forever)
|
||||
http_thread.daemon = True
|
||||
http_thread.start()
|
||||
|
||||
self._http_thread = http_thread
|
||||
self._http_server = http_server
|
||||
|
||||
def __exit__(self, _type: Any, _value: Any, traceback: Any) -> None:
|
||||
# Needed on WIN32, otherwise exit causes an `OSError`.
|
||||
self._http_server.shutdown()
|
||||
|
||||
self._http_server.server_close()
|
||||
del self._http_server
|
||||
del self._http_thread
|
@ -0,0 +1,153 @@
|
||||
# SPDX-FileCopyrightText: 2023 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
"""
|
||||
This module generates a Python wheel (*.whl) for the purpose of running tests.
|
||||
"""
|
||||
__all__ = (
|
||||
"generate_from_file_data",
|
||||
"generate_from_source",
|
||||
)
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
from typing import (
|
||||
Callable,
|
||||
Dict,
|
||||
List,
|
||||
Tuple,
|
||||
)
|
||||
|
||||
|
||||
def _contents_to_filesystem(
|
||||
contents: Dict[str, bytes],
|
||||
directory: str,
|
||||
) -> None:
|
||||
swap_slash = os.sep == "\\"
|
||||
for key, value in contents.items():
|
||||
path = key.replace("/", "\\") if swap_slash else key
|
||||
path_full = os.path.join(directory, path)
|
||||
path_base = os.path.dirname(path_full)
|
||||
os.makedirs(path_base, exist_ok=True)
|
||||
|
||||
with (
|
||||
open(path_full, "wb") if isinstance(value, bytes) else
|
||||
open(path_full, "w", encoding="utf-8")
|
||||
) as fh:
|
||||
fh.write(value)
|
||||
|
||||
|
||||
def search_impl(directory: str, fn: Callable[[os.DirEntry[str]], bool], result: List[str]) -> None:
|
||||
for entry in os.scandir(directory):
|
||||
if entry.is_dir():
|
||||
search_impl(entry.path, fn, result)
|
||||
if fn(entry):
|
||||
result.append(entry.path)
|
||||
|
||||
|
||||
def search(directory: str, fn: Callable[[os.DirEntry[str]], bool]) -> List[str]:
|
||||
result: List[str] = []
|
||||
search_impl(directory, fn, result)
|
||||
return result
|
||||
|
||||
|
||||
def generate_from_file_data(
|
||||
*,
|
||||
module_name: str,
|
||||
version: str,
|
||||
package_contents: Dict[str, bytes],
|
||||
) -> Tuple[str, bytes]:
|
||||
"""
|
||||
:arg package_contents:
|
||||
The package contents.
|
||||
- The key is a path.
|
||||
- The value is file contents.
|
||||
|
||||
Return filename & data.
|
||||
"""
|
||||
|
||||
setup_contents: Dict[str, bytes] = {
|
||||
"setup.py": """
|
||||
from setuptools import setup
|
||||
|
||||
setup()
|
||||
""".encode("utf-8"),
|
||||
"pyproject.toml": """
|
||||
[build-system]
|
||||
requires = ["setuptools >= 61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "{:s}"
|
||||
version = "{:s}"
|
||||
dependencies = []
|
||||
|
||||
requires-python = ">=3.11"
|
||||
authors = [
|
||||
{{name = "Developer Name", email = "name@example.com"}},
|
||||
]
|
||||
maintainers = [
|
||||
{{name = "Developer Name", email = "name@example.com"}}
|
||||
]
|
||||
description = "Dummy description."
|
||||
keywords = ["egg", "bacon", "sausage", "tomatoes", "Lobster Thermidor"]
|
||||
classifiers = [
|
||||
"Development Status :: 4 - Beta",
|
||||
"Programming Language :: Python"
|
||||
]
|
||||
""".format(module_name, version).encode("utf-8"),
|
||||
}
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
_contents_to_filesystem(package_contents, temp_dir)
|
||||
_contents_to_filesystem(setup_contents, temp_dir)
|
||||
|
||||
output = subprocess.run(
|
||||
[sys.executable, "setup.py", "bdist_wheel"],
|
||||
cwd=temp_dir,
|
||||
stderr=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
)
|
||||
|
||||
result = search(temp_dir, lambda entry: entry.name.endswith(".whl"))
|
||||
if len(result) != 1:
|
||||
print(output)
|
||||
raise Exception("failed to create wheel!")
|
||||
|
||||
with open(result[0], 'rb') as fh:
|
||||
data = fh.read()
|
||||
|
||||
filename = os.path.basename(result[0])
|
||||
|
||||
return filename, data
|
||||
|
||||
|
||||
def generate_from_source(
|
||||
*,
|
||||
module_name: str,
|
||||
version: str,
|
||||
source: str,
|
||||
) -> Tuple[str, bytes]:
|
||||
"""
|
||||
Return filename & data.
|
||||
"""
|
||||
return generate_from_file_data(
|
||||
module_name=module_name,
|
||||
version=version,
|
||||
package_contents={
|
||||
"{:s}/__init__.py".format(module_name): source.encode("utf-8"),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
filename, data = generate_from_source(
|
||||
module_name="blender_example_module",
|
||||
version="0.0.1",
|
||||
source="print(\"Hello World\")"
|
||||
)
|
||||
print(filename, len(data))
|
199
scripts/addons_core/bl_pkg/tests/test_blender.py
Normal file
199
scripts/addons_core/bl_pkg/tests/test_blender.py
Normal file
@ -0,0 +1,199 @@
|
||||
# SPDX-FileCopyrightText: 2023 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
"""
|
||||
Test with command:
|
||||
make test
|
||||
"""
|
||||
|
||||
# NOTE:
|
||||
# Currently this sets up an environment and runs commands.
|
||||
|
||||
# High level tests, run operators which manage a repository, ensure they work as expected.
|
||||
# This tests Blender's integration for all areas except for the interactive GUI... for now
|
||||
# perhaps this is supported in the future.
|
||||
|
||||
|
||||
# Start a web server, connect blender to it, then setup new repos and install extensions.
|
||||
|
||||
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
from typing import (
|
||||
Any,
|
||||
Sequence,
|
||||
Tuple,
|
||||
)
|
||||
|
||||
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
CMD = (
|
||||
sys.executable,
|
||||
os.path.normpath(os.path.join(BASE_DIR, "..", "cli", "blender_ext.py")),
|
||||
)
|
||||
|
||||
# Simulate communicating with a web-server.
|
||||
USE_HTTP = os.environ.get("USE_HTTP", "0") != "0"
|
||||
HTTP_PORT = 8002
|
||||
|
||||
VERBOSE = os.environ.get("VERBOSE", "0") != "0"
|
||||
|
||||
sys.path.append(os.path.join(BASE_DIR, "modules"))
|
||||
from http_server_context import HTTPServerContext # noqa: E402
|
||||
|
||||
|
||||
PKG_REPO_LIST_FILENAME = "bl_ext_repo.json"
|
||||
|
||||
# Use an in-memory temp, when available.
|
||||
TEMP_PREFIX = tempfile.gettempdir()
|
||||
if os.path.exists("/ramcache/tmp"):
|
||||
TEMP_PREFIX = "/ramcache/tmp"
|
||||
|
||||
# Useful for debugging, when blank create dynamically.
|
||||
TEMP_DIR_SOURCE = os.path.join(TEMP_PREFIX, "blender_app_ext_source")
|
||||
TEMP_DIR_REMOTE = os.path.join(TEMP_PREFIX, "blender_app_ext_remote")
|
||||
TEMP_DIR_LOCAL = os.path.join(TEMP_PREFIX, "blender_app_ext_local")
|
||||
|
||||
if TEMP_DIR_SOURCE and not os.path.isdir(TEMP_DIR_SOURCE):
|
||||
os.makedirs(TEMP_DIR_SOURCE)
|
||||
if TEMP_DIR_LOCAL and not os.path.isdir(TEMP_DIR_LOCAL):
|
||||
os.makedirs(TEMP_DIR_LOCAL)
|
||||
if TEMP_DIR_REMOTE and not os.path.isdir(TEMP_DIR_REMOTE):
|
||||
os.makedirs(TEMP_DIR_REMOTE)
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Generic Functions
|
||||
|
||||
def command_output_from_json_0(args: Sequence[str]) -> Sequence[Tuple[str, Any]]:
|
||||
result = []
|
||||
for json_bytes in subprocess.check_output(
|
||||
[*CMD, *args, "--output-type=JSON_0"],
|
||||
).split(b'\0'):
|
||||
if not json_bytes:
|
||||
continue
|
||||
json_str = json_bytes.decode("utf-8")
|
||||
json_data = json.loads(json_str)
|
||||
assert len(json_data) == 2
|
||||
assert isinstance(json_data[0], str)
|
||||
result.append((json_data[0], json_data[1]))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def ensure_script_directory(script_directory_to_add: str) -> None:
|
||||
import bpy # type: ignore
|
||||
script_directories = bpy.context.preferences.filepaths.script_directories
|
||||
script_dir_empty = None
|
||||
for script_dir in script_directories:
|
||||
dir_test = script_dir.directory
|
||||
if dir_test == script_directory_to_add:
|
||||
return
|
||||
if not dir_test:
|
||||
script_dir_empty = script_dir
|
||||
|
||||
if not script_dir_empty:
|
||||
bpy.ops.preferences.script_directory_add()
|
||||
script_dir_empty = script_directories[-1]
|
||||
|
||||
script_dir_empty.directory = script_directory_to_add
|
||||
|
||||
if script_directory_to_add not in sys.path:
|
||||
sys.path.append(script_directory_to_add)
|
||||
|
||||
|
||||
def blender_test_run(temp_dir_local: str) -> None:
|
||||
import bpy
|
||||
import addon_utils # type: ignore
|
||||
|
||||
preferences = bpy.context.preferences
|
||||
|
||||
preferences.view.show_developer_ui = True
|
||||
preferences.experimental.use_extension_repos = True
|
||||
|
||||
addon_dir = os.path.normpath(os.path.join(BASE_DIR, "..", "blender_addon"))
|
||||
|
||||
ensure_script_directory(addon_dir)
|
||||
|
||||
addon_utils.enable("bl_pkg")
|
||||
|
||||
print("BEG*********************", dir(bpy.ops.bl_pkg))
|
||||
|
||||
# NOTE: it's assumed the URL will expand to JSON, example:
|
||||
# http://extensions.local:8111/add-ons/?format=json
|
||||
# This is not supported by the test server so the file name needs to be added.
|
||||
remote_url = "http://localhost:{:d}/{:s}".format(HTTP_PORT, PKG_REPO_LIST_FILENAME)
|
||||
|
||||
repo = preferences.extensions.repos.new(
|
||||
name="My Test",
|
||||
module="my_repo",
|
||||
custom_directory=temp_dir_local,
|
||||
remote_url=remote_url,
|
||||
)
|
||||
|
||||
bpy.ops.bl_pkg.dummy_progress()
|
||||
|
||||
bpy.ops.bl_pkg.repo_sync(
|
||||
repo_directory=temp_dir_local,
|
||||
)
|
||||
|
||||
bpy.ops.bl_pkg.pkg_install(
|
||||
repo_directory=temp_dir_local,
|
||||
pkg_id="blue",
|
||||
)
|
||||
|
||||
bpy.ops.bl_pkg.pkg_uninstall(
|
||||
repo_directory=temp_dir_local,
|
||||
pkg_id="blue",
|
||||
)
|
||||
|
||||
preferences.extensions.repos.remove(repo)
|
||||
|
||||
print("END*********************")
|
||||
|
||||
|
||||
def main() -> None:
|
||||
package_names = (
|
||||
"blue",
|
||||
"red",
|
||||
"green",
|
||||
"purple",
|
||||
"orange",
|
||||
)
|
||||
with tempfile.TemporaryDirectory(dir=TEMP_DIR_REMOTE) as temp_dir_remote:
|
||||
# Populate repository from source.
|
||||
for msg in command_output_from_json_0([
|
||||
"dummy-repo",
|
||||
"--repo-dir", temp_dir_remote,
|
||||
"--package-names", ",".join(package_names)
|
||||
]):
|
||||
print(msg)
|
||||
|
||||
with HTTPServerContext(
|
||||
directory=temp_dir_remote,
|
||||
port=HTTP_PORT,
|
||||
# Avoid error when running tests quickly,
|
||||
# sometimes the port isn't available yet.
|
||||
wait_tries=10,
|
||||
wait_delay=0.05,
|
||||
):
|
||||
# Where we will put the files.
|
||||
with tempfile.TemporaryDirectory() as temp_dir_local:
|
||||
blender_test_run(temp_dir_local)
|
||||
|
||||
with open(os.path.join(temp_dir_remote, PKG_REPO_LIST_FILENAME), 'r', encoding="utf-8") as fh:
|
||||
print(fh.read())
|
||||
|
||||
# If we want to copy out these.
|
||||
# print(temp_dir_remote)
|
||||
# import time
|
||||
# time.sleep(540)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
420
scripts/addons_core/bl_pkg/tests/test_cli.py
Normal file
420
scripts/addons_core/bl_pkg/tests/test_cli.py
Normal file
@ -0,0 +1,420 @@
|
||||
# SPDX-FileCopyrightText: 2023 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
"""
|
||||
Test with command:
|
||||
make test_blender BLENDER_BIN=$PWD/../../../blender.bin
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
import unittest.util
|
||||
|
||||
from typing import (
|
||||
Any,
|
||||
Sequence,
|
||||
Dict,
|
||||
NamedTuple,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
)
|
||||
|
||||
# For more useful output that isn't clipped.
|
||||
unittest.util._MAX_LENGTH = 10_000
|
||||
|
||||
IS_WIN32 = sys.platform == "win32"
|
||||
|
||||
# See the variable with the same name in `blender_ext.py`.
|
||||
REMOTE_REPO_HAS_JSON_IMPLIED = True
|
||||
|
||||
PKG_EXT = ".zip"
|
||||
|
||||
# PKG_REPO_LIST_FILENAME = "bl_ext_repo.json"
|
||||
PKG_MANIFEST_FILENAME = "bl_ext_pkg_manifest.json"
|
||||
|
||||
PKG_MANIFEST_FILENAME_TOML = "blender_manifest.toml"
|
||||
|
||||
# Use an in-memory temp, when available.
|
||||
TEMP_PREFIX = tempfile.gettempdir()
|
||||
if os.path.exists("/ramcache/tmp"):
|
||||
TEMP_PREFIX = "/ramcache/tmp"
|
||||
|
||||
TEMP_DIR_REMOTE = os.path.join(TEMP_PREFIX, "bl_ext_remote")
|
||||
TEMP_DIR_LOCAL = os.path.join(TEMP_PREFIX, "bl_ext_local")
|
||||
|
||||
if TEMP_DIR_LOCAL and not os.path.isdir(TEMP_DIR_LOCAL):
|
||||
os.makedirs(TEMP_DIR_LOCAL)
|
||||
if TEMP_DIR_REMOTE and not os.path.isdir(TEMP_DIR_REMOTE):
|
||||
os.makedirs(TEMP_DIR_REMOTE)
|
||||
|
||||
|
||||
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
|
||||
# PYTHON_CMD = sys.executable
|
||||
|
||||
CMD = (
|
||||
sys.executable,
|
||||
os.path.normpath(os.path.join(BASE_DIR, "..", "cli", "blender_ext.py")),
|
||||
)
|
||||
|
||||
# Simulate communicating with a web-server.
|
||||
USE_HTTP = os.environ.get("USE_HTTP", "0") != "0"
|
||||
HTTP_PORT = 8001
|
||||
|
||||
VERBOSE = os.environ.get("VERBOSE", "0") != "0"
|
||||
|
||||
sys.path.append(os.path.join(BASE_DIR, "modules"))
|
||||
from http_server_context import HTTPServerContext # noqa: E402
|
||||
|
||||
STATUS_NON_ERROR = {'STATUS', 'PROGRESS'}
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Generic Utilities
|
||||
#
|
||||
|
||||
|
||||
def rmdir_contents(directory: str) -> None:
|
||||
"""
|
||||
Remove all directory contents without removing the directory.
|
||||
"""
|
||||
for entry in os.scandir(directory):
|
||||
filepath = os.path.join(directory, entry.name)
|
||||
if entry.is_dir():
|
||||
shutil.rmtree(filepath)
|
||||
else:
|
||||
os.unlink(filepath)
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# HTTP Server (simulate remote access)
|
||||
#
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Generate Repository
|
||||
#
|
||||
|
||||
|
||||
def my_create_package(dirpath: str, filename: str, *, metadata: Dict[str, Any], files: Dict[str, bytes]) -> None:
|
||||
"""
|
||||
Create a package using the command line interface.
|
||||
"""
|
||||
assert filename.endswith(PKG_EXT)
|
||||
outfile = os.path.join(dirpath, filename)
|
||||
|
||||
# NOTE: use the command line packaging utility to ensure 1:1 behavior with actual packages.
|
||||
metadata_copy = metadata.copy()
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_dir_pkg:
|
||||
temp_dir_pkg_manifest_toml = os.path.join(temp_dir_pkg, PKG_MANIFEST_FILENAME_TOML)
|
||||
with open(temp_dir_pkg_manifest_toml, "wb") as fh:
|
||||
# NOTE: escaping is not supported, this is primitive TOML writing for tests.
|
||||
data = "".join((
|
||||
"""# Example\n""",
|
||||
"""schema_version = "{:s}"\n""".format(metadata_copy.pop("schema_version")),
|
||||
"""id = "{:s}"\n""".format(metadata_copy.pop("id")),
|
||||
"""name = "{:s}"\n""".format(metadata_copy.pop("name")),
|
||||
"""tagline = "{:s}"\n""".format(metadata_copy.pop("tagline")),
|
||||
"""version = "{:s}"\n""".format(metadata_copy.pop("version")),
|
||||
"""type = "{:s}"\n""".format(metadata_copy.pop("type")),
|
||||
"""tags = [{:s}]\n""".format(", ".join("\"{:s}\"".format(v) for v in metadata_copy.pop("tags"))),
|
||||
"""blender_version_min = "{:s}"\n""".format(metadata_copy.pop("blender_version_min")),
|
||||
"""maintainer = "{:s}"\n""".format(metadata_copy.pop("maintainer")),
|
||||
"""license = [{:s}]\n""".format(", ".join("\"{:s}\"".format(v) for v in metadata_copy.pop("license"))),
|
||||
)).encode('utf-8')
|
||||
fh.write(data)
|
||||
|
||||
if metadata_copy:
|
||||
raise Exception("Unexpected mata-data: {!r}".format(metadata_copy))
|
||||
|
||||
for filename_iter, data in files.items():
|
||||
with open(os.path.join(temp_dir_pkg, filename_iter), "wb") as fh:
|
||||
fh.write(data)
|
||||
|
||||
output_json = command_output_from_json_0(
|
||||
[
|
||||
"build",
|
||||
"--source-dir", temp_dir_pkg,
|
||||
"--output-filepath", outfile,
|
||||
],
|
||||
exclude_types={"PROGRESS"},
|
||||
)
|
||||
|
||||
output_json_error = command_output_filter_exclude(
|
||||
output_json,
|
||||
exclude_types=STATUS_NON_ERROR,
|
||||
)
|
||||
|
||||
if output_json_error:
|
||||
raise Exception("Creating a package produced some error output: {!r}".format(output_json_error))
|
||||
|
||||
|
||||
class PkgTemplate(NamedTuple):
|
||||
"""Data need to create a package for testing."""
|
||||
idname: str
|
||||
name: str
|
||||
version: str
|
||||
|
||||
|
||||
def my_generate_repo(
|
||||
dirpath: str,
|
||||
*,
|
||||
templates: Sequence[PkgTemplate],
|
||||
) -> None:
|
||||
for template in templates:
|
||||
my_create_package(
|
||||
dirpath, template.idname + PKG_EXT,
|
||||
metadata={
|
||||
"schema_version": "1.0.0",
|
||||
"id": template.idname,
|
||||
"name": template.name,
|
||||
"tagline": """This package has a tagline""",
|
||||
"version": template.version,
|
||||
"type": "add-on",
|
||||
"tags": ["UV", "Modeling"],
|
||||
"blender_version_min": "0.0.0",
|
||||
"maintainer": "Some Developer",
|
||||
"license": ["SPDX:GPL-2.0-or-later"],
|
||||
},
|
||||
files={
|
||||
"__init__.py": b"# This is a script\n",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def command_output_filter_include(
|
||||
output_json: Sequence[Tuple[str, Any]],
|
||||
include_types: Set[str],
|
||||
) -> Sequence[Tuple[str, Any]]:
|
||||
return [(a, b) for a, b in output_json if a in include_types]
|
||||
|
||||
|
||||
def command_output_filter_exclude(
|
||||
output_json: Sequence[Tuple[str, Any]],
|
||||
exclude_types: Set[str],
|
||||
) -> Sequence[Tuple[str, Any]]:
|
||||
return [(a, b) for a, b in output_json if a not in exclude_types]
|
||||
|
||||
|
||||
def command_output(
|
||||
args: Sequence[str],
|
||||
expected_returncode: int = 0,
|
||||
) -> str:
|
||||
proc = subprocess.run(
|
||||
[*CMD, *args],
|
||||
stdout=subprocess.PIPE,
|
||||
check=expected_returncode == 0,
|
||||
)
|
||||
if proc.returncode != expected_returncode:
|
||||
raise subprocess.CalledProcessError(proc.returncode, proc.args, output=proc.stdout, stderr=proc.stderr)
|
||||
result = proc.stdout.decode("utf-8")
|
||||
if IS_WIN32:
|
||||
result = result.replace("\r\n", "\n")
|
||||
return result
|
||||
|
||||
|
||||
def command_output_from_json_0(
|
||||
args: Sequence[str],
|
||||
*,
|
||||
exclude_types: Optional[Set[str]] = None,
|
||||
expected_returncode: int = 0,
|
||||
) -> Sequence[Tuple[str, Any]]:
|
||||
result = []
|
||||
|
||||
proc = subprocess.run(
|
||||
[*CMD, *args, "--output-type=JSON_0"],
|
||||
stdout=subprocess.PIPE,
|
||||
check=expected_returncode == 0,
|
||||
)
|
||||
if proc.returncode != expected_returncode:
|
||||
raise subprocess.CalledProcessError(proc.returncode, proc.args, output=proc.stdout, stderr=proc.stderr)
|
||||
for json_bytes in proc.stdout.split(b'\0'):
|
||||
if not json_bytes:
|
||||
continue
|
||||
json_str = json_bytes.decode("utf-8")
|
||||
json_data = json.loads(json_str)
|
||||
assert len(json_data) == 2
|
||||
assert isinstance(json_data[0], str)
|
||||
if (exclude_types is not None) and (json_data[0] in exclude_types):
|
||||
continue
|
||||
result.append((json_data[0], json_data[1]))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class TestCLI(unittest.TestCase):
|
||||
|
||||
def test_version(self) -> None:
|
||||
self.assertEqual(command_output(["--version"]), "0.1\n")
|
||||
|
||||
|
||||
class TestCLI_WithRepo(unittest.TestCase):
|
||||
dirpath = ""
|
||||
dirpath_url = ""
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls) -> None:
|
||||
if TEMP_DIR_REMOTE:
|
||||
cls.dirpath = TEMP_DIR_REMOTE
|
||||
if os.path.isdir(cls.dirpath):
|
||||
# pylint: disable-next=using-constant-test
|
||||
if False:
|
||||
shutil.rmtree(cls.dirpath)
|
||||
os.makedirs(TEMP_DIR_REMOTE)
|
||||
else:
|
||||
# Empty the path without removing it,
|
||||
# handy so a developer can remain in the directory.
|
||||
rmdir_contents(TEMP_DIR_REMOTE)
|
||||
else:
|
||||
os.makedirs(TEMP_DIR_REMOTE)
|
||||
else:
|
||||
cls.dirpath = tempfile.mkdtemp(prefix="bl_ext_")
|
||||
|
||||
my_generate_repo(
|
||||
cls.dirpath,
|
||||
templates=(
|
||||
PkgTemplate(idname="foo_bar", name="Foo Bar", version="1.0.5"),
|
||||
PkgTemplate(idname="another_package", name="Another Package", version="1.5.2"),
|
||||
PkgTemplate(idname="test_package", name="Test Package", version="1.5.2"),
|
||||
),
|
||||
)
|
||||
|
||||
if USE_HTTP:
|
||||
if REMOTE_REPO_HAS_JSON_IMPLIED:
|
||||
cls.dirpath_url = "http://localhost:{:d}/bl_ext_repo.json".format(HTTP_PORT)
|
||||
else:
|
||||
cls.dirpath_url = "http://localhost:{:d}".format(HTTP_PORT)
|
||||
else:
|
||||
cls.dirpath_url = cls.dirpath
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls) -> None:
|
||||
if not TEMP_DIR_REMOTE:
|
||||
shutil.rmtree(cls.dirpath)
|
||||
del cls.dirpath
|
||||
del cls.dirpath_url
|
||||
|
||||
def test_version(self) -> None:
|
||||
self.assertEqual(command_output(["--version"]), "0.1\n")
|
||||
|
||||
def test_server_generate(self) -> None:
|
||||
output = command_output(["server-generate", "--repo-dir", self.dirpath])
|
||||
self.assertEqual(output, "found 3 packages.\n")
|
||||
|
||||
def test_client_list(self) -> None:
|
||||
# TODO: only run once.
|
||||
self.test_server_generate()
|
||||
|
||||
output = command_output(["list", "--remote-url", self.dirpath_url, "--local-dir", ""])
|
||||
self.assertEqual(
|
||||
output, (
|
||||
"another_package(1.5.2): Another Package\n"
|
||||
"foo_bar(1.0.5): Foo Bar\n"
|
||||
"test_package(1.5.2): Test Package\n"
|
||||
)
|
||||
)
|
||||
del output
|
||||
|
||||
# TODO, figure out how to split JSON & TEXT output tests, this test just checks JSON is working at all.
|
||||
output_json = command_output_from_json_0(
|
||||
["list", "--remote-url", self.dirpath_url, "--local-dir", ""],
|
||||
exclude_types={"PROGRESS"},
|
||||
)
|
||||
self.assertEqual(
|
||||
output_json, [
|
||||
("STATUS", "another_package(1.5.2): Another Package"),
|
||||
("STATUS", "foo_bar(1.0.5): Foo Bar"),
|
||||
("STATUS", "test_package(1.5.2): Test Package"),
|
||||
]
|
||||
)
|
||||
|
||||
def test_client_install_and_uninstall(self) -> None:
|
||||
with tempfile.TemporaryDirectory(dir=TEMP_DIR_LOCAL) as temp_dir_local:
|
||||
# TODO: only run once.
|
||||
self.test_server_generate()
|
||||
|
||||
output_json = command_output_from_json_0([
|
||||
"sync",
|
||||
"--remote-url", self.dirpath_url,
|
||||
"--local-dir", temp_dir_local,
|
||||
], exclude_types={"PROGRESS"})
|
||||
self.assertEqual(
|
||||
output_json, [
|
||||
('STATUS', 'Sync repo: ' + self.dirpath_url),
|
||||
('STATUS', 'Sync downloading remote data'),
|
||||
('STATUS', 'Sync complete: ' + self.dirpath_url),
|
||||
]
|
||||
)
|
||||
|
||||
# Install.
|
||||
output_json = command_output_from_json_0(
|
||||
[
|
||||
"install", "another_package",
|
||||
"--remote-url", self.dirpath_url,
|
||||
"--local-dir", temp_dir_local,
|
||||
],
|
||||
exclude_types={"PROGRESS"},
|
||||
)
|
||||
self.assertEqual(
|
||||
output_json, [
|
||||
("STATUS", "Installed \"another_package\"")
|
||||
]
|
||||
)
|
||||
self.assertTrue(os.path.isdir(os.path.join(temp_dir_local, "another_package")))
|
||||
|
||||
# Re-Install.
|
||||
output_json = command_output_from_json_0(
|
||||
[
|
||||
"install", "another_package",
|
||||
"--remote-url", self.dirpath_url,
|
||||
"--local-dir", temp_dir_local,
|
||||
],
|
||||
exclude_types={"PROGRESS"},
|
||||
)
|
||||
self.assertEqual(
|
||||
output_json, [
|
||||
("STATUS", "Re-Installed \"another_package\"")
|
||||
]
|
||||
)
|
||||
self.assertTrue(os.path.isdir(os.path.join(temp_dir_local, "another_package")))
|
||||
|
||||
# Uninstall (not found).
|
||||
output_json = command_output_from_json_0(
|
||||
[
|
||||
"uninstall", "another_package_",
|
||||
"--local-dir", temp_dir_local,
|
||||
],
|
||||
expected_returncode=1,
|
||||
)
|
||||
self.assertEqual(
|
||||
output_json, [
|
||||
("ERROR", "Package not found \"another_package_\"")
|
||||
]
|
||||
)
|
||||
|
||||
# Uninstall.
|
||||
output_json = command_output_from_json_0([
|
||||
"uninstall", "another_package",
|
||||
"--local-dir", temp_dir_local,
|
||||
])
|
||||
self.assertEqual(
|
||||
output_json, [
|
||||
("STATUS", "Removed \"another_package\"")
|
||||
]
|
||||
)
|
||||
self.assertFalse(os.path.isdir(os.path.join(temp_dir_local, "another_package")))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if USE_HTTP:
|
||||
with HTTPServerContext(directory=TEMP_DIR_REMOTE, port=HTTP_PORT):
|
||||
unittest.main()
|
||||
else:
|
||||
unittest.main()
|
515
scripts/addons_core/bl_pkg/tests/test_cli_blender.py
Normal file
515
scripts/addons_core/bl_pkg/tests/test_cli_blender.py
Normal file
@ -0,0 +1,515 @@
|
||||
# SPDX-FileCopyrightText: 2023 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
"""
|
||||
This test emulates running packaging commands with Blender via the command line.
|
||||
|
||||
This also happens to test packages with ``*.whl``.
|
||||
|
||||
Command to run this test:
|
||||
make test_cli_blender BLENDER_BIN=$PWD/../../../blender.bin
|
||||
"""
|
||||
|
||||
import os
|
||||
import shlex
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
import unittest
|
||||
|
||||
from typing import (
|
||||
Dict,
|
||||
Sequence,
|
||||
Tuple,
|
||||
)
|
||||
|
||||
|
||||
PKG_MANIFEST_FILENAME_TOML = "blender_manifest.toml"
|
||||
|
||||
VERBOSE_CMD = False
|
||||
|
||||
|
||||
BLENDER_BIN = os.environ.get("BLENDER_BIN")
|
||||
if BLENDER_BIN is None:
|
||||
raise Exception("BLENDER_BIN: environment variable not defined")
|
||||
|
||||
|
||||
# Arguments to ensure extensions are enabled (currently it's an experimental feature).
|
||||
BLENDER_ENABLE_EXTENSION_ARGS = [
|
||||
"--python-exit-code", "1",
|
||||
# Code begin/end text because of Blender's chatty reporting of version and that it quit.
|
||||
"--python-expr", '''\
|
||||
from bpy import context
|
||||
context.preferences.view.show_developer_ui = True
|
||||
context.preferences.experimental.use_extension_repos = True
|
||||
''',
|
||||
]
|
||||
|
||||
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
|
||||
sys.path.append(os.path.join(BASE_DIR, "modules"))
|
||||
import python_wheel_generate # noqa: E402
|
||||
|
||||
|
||||
CMD = (
|
||||
sys.executable,
|
||||
os.path.normpath(os.path.join(BASE_DIR, "..", "cli", "blender_ext.py")),
|
||||
)
|
||||
|
||||
# Write the command to a script, use so it's possible to manually run commands outside of the test environment.
|
||||
TEMP_COMMAND_OUTPUT = "" # os.path.join(tempfile.gettempdir(), "blender_test.sh")
|
||||
|
||||
# Handy when developing test so the paths can be manually inspected.
|
||||
USE_PAUSE_BEFORE_EXIT = False
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Utility Functions
|
||||
|
||||
def pause_until_keyboard_interrupt() -> None:
|
||||
print("Waiting for keyboard interrupt...")
|
||||
try:
|
||||
time.sleep(10_000)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
print("Exiting!")
|
||||
|
||||
|
||||
def contents_to_filesystem(
|
||||
contents: Dict[str, bytes],
|
||||
directory: str,
|
||||
) -> None:
|
||||
swap_slash = os.sep == "\\"
|
||||
for key, value in contents.items():
|
||||
path = key.replace("/", "\\") if swap_slash else key
|
||||
path_full = os.path.join(directory, path)
|
||||
path_base = os.path.dirname(path_full)
|
||||
os.makedirs(path_base, exist_ok=True)
|
||||
|
||||
with (
|
||||
open(path_full, "wb") if isinstance(value, bytes) else
|
||||
open(path_full, "w", encoding="utf-8")
|
||||
) as fh:
|
||||
fh.write(value)
|
||||
|
||||
|
||||
def create_package(
|
||||
pkg_src_dir: str,
|
||||
pkg_idname: str,
|
||||
wheel_module_name: str,
|
||||
wheel_module_version: str,
|
||||
) -> None:
|
||||
pkg_name = pkg_idname.replace("_", " ").title()
|
||||
|
||||
wheel_filename, wheel_filedata = python_wheel_generate.generate_from_source(
|
||||
module_name=wheel_module_name,
|
||||
version=wheel_module_version,
|
||||
source=(
|
||||
"__version__ = {!r}\n"
|
||||
"print(\"The wheel has been found\")\n"
|
||||
).format(wheel_module_version),
|
||||
)
|
||||
|
||||
wheel_dir = os.path.join(pkg_src_dir, "wheels")
|
||||
os.makedirs(wheel_dir, exist_ok=True)
|
||||
path = os.path.join(wheel_dir, wheel_filename)
|
||||
with open(path, "wb") as fh:
|
||||
fh.write(wheel_filedata)
|
||||
|
||||
with open(os.path.join(pkg_src_dir, PKG_MANIFEST_FILENAME_TOML), "w", encoding="utf-8") as fh:
|
||||
fh.write('''# Example\n''')
|
||||
fh.write('''schema_version = "1.0.0"\n''')
|
||||
fh.write('''id = "{:s}"\n'''.format(pkg_idname))
|
||||
fh.write('''name = "{:s}"\n'''.format(pkg_name))
|
||||
fh.write('''type = "add-on"\n''')
|
||||
fh.write('''tags = ["UV"]\n''')
|
||||
fh.write('''maintainer = "Maintainer Name <username@addr.com>"\n''')
|
||||
fh.write('''license = ["SPDX:GPL-2.0-or-later"]\n''')
|
||||
fh.write('''version = "1.0.0"\n''')
|
||||
fh.write('''tagline = "This is a tagline"\n''')
|
||||
fh.write('''blender_version_min = "0.0.0"\n''')
|
||||
fh.write('''\n''')
|
||||
fh.write('''wheels = ["./wheels/{:s}"]\n'''.format(wheel_filename))
|
||||
|
||||
with open(os.path.join(pkg_src_dir, "__init__.py"), "w", encoding="utf-8") as fh:
|
||||
fh.write((
|
||||
'''import {:s}\n'''
|
||||
'''def register():\n'''
|
||||
''' print("Register success:", __name__)\n'''
|
||||
'''\n'''
|
||||
'''def unregister():\n'''
|
||||
''' print("Unregister success:", __name__)\n'''
|
||||
).format(wheel_module_name))
|
||||
|
||||
|
||||
def run_blender(
|
||||
args: Sequence[str],
|
||||
force_script_and_pause: bool = False,
|
||||
) -> Tuple[int, str, str]:
|
||||
"""
|
||||
:arg force_script_and_pause:
|
||||
When true, write out a shell script and wait,
|
||||
this lets the developer run the command manually which is useful as the temporary directories
|
||||
are removed once the test finished.
|
||||
"""
|
||||
assert BLENDER_BIN is not None
|
||||
cmd: Tuple[str, ...] = (
|
||||
BLENDER_BIN,
|
||||
# Needed while extensions is experimental.
|
||||
*BLENDER_ENABLE_EXTENSION_ARGS,
|
||||
*args,
|
||||
)
|
||||
cwd = TEMP_DIR_LOCAL
|
||||
|
||||
if VERBOSE_CMD:
|
||||
print(shlex.join(cmd))
|
||||
|
||||
env_overlay = {
|
||||
"TMPDIR": TEMP_DIR_TMPDIR,
|
||||
"BLENDER_USER_RESOURCES": TEMP_DIR_BLENDER_USER,
|
||||
# Needed for ASAN builds.
|
||||
"ASAN_OPTIONS": "log_path={:s}:exitcode=0:{:s}".format(
|
||||
# Needed so the `stdout` & `stderr` aren't mixed in with ASAN messages.
|
||||
os.path.join(TEMP_DIR_TMPDIR, "blender_asan.txt"),
|
||||
# Support using existing configuration (if set).
|
||||
os.environ.get("ASAN_OPTIONS", ""),
|
||||
),
|
||||
}
|
||||
|
||||
if force_script_and_pause:
|
||||
temp_command_output = os.path.join(tempfile.gettempdir(), "blender_test.sh")
|
||||
else:
|
||||
temp_command_output = TEMP_COMMAND_OUTPUT
|
||||
|
||||
if temp_command_output:
|
||||
with open(temp_command_output, "w", encoding="utf-8") as fh:
|
||||
fh.write("#!/usr/bin/env bash\n")
|
||||
for k, v in env_overlay.items():
|
||||
fh.write("export {:s}={:s}\n".format(k, shlex.quote(v)))
|
||||
fh.write("\n")
|
||||
|
||||
fh.write("cd {:s}\n\n".format(shlex.quote(cwd)))
|
||||
|
||||
for i, v in enumerate(cmd):
|
||||
if i != 0:
|
||||
fh.write(" ")
|
||||
fh.write(shlex.quote(v))
|
||||
if i + 1 != len(cmd):
|
||||
fh.write(" \\\n")
|
||||
fh.write("\n\n")
|
||||
|
||||
if force_script_and_pause:
|
||||
print("Written:", temp_command_output)
|
||||
time.sleep(10_000)
|
||||
|
||||
output = subprocess.run(
|
||||
cmd,
|
||||
cwd=cwd,
|
||||
env={
|
||||
**os.environ,
|
||||
**env_overlay,
|
||||
},
|
||||
stderr=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
)
|
||||
stdout = output.stdout.decode("utf-8")
|
||||
stderr = output.stderr.decode("utf-8")
|
||||
|
||||
if VERBOSE_CMD:
|
||||
print(stdout)
|
||||
print(stderr)
|
||||
|
||||
return (
|
||||
output.returncode,
|
||||
stdout,
|
||||
stderr,
|
||||
)
|
||||
|
||||
|
||||
def run_blender_no_errors(
|
||||
args: Sequence[str],
|
||||
force_script_and_pause: bool = False,
|
||||
) -> str:
|
||||
returncode, stdout, stderr = run_blender(args, force_script_and_pause=force_script_and_pause)
|
||||
if returncode != 0:
|
||||
if stdout:
|
||||
sys.stdout.write("STDOUT:\n")
|
||||
sys.stdout.write(stdout + "\n")
|
||||
if stderr:
|
||||
sys.stdout.write("STDERR:\n")
|
||||
sys.stdout.write(stderr + "\n")
|
||||
raise Exception("Expected zero returncode, got {:d}".format(returncode))
|
||||
if stderr:
|
||||
raise Exception("Expected empty stderr, got {:s}".format(stderr))
|
||||
return stdout
|
||||
|
||||
|
||||
def run_blender_extensions(
|
||||
args: Sequence[str],
|
||||
force_script_and_pause: bool = False,
|
||||
) -> Tuple[int, str, str]:
|
||||
return run_blender(("--command", "extension", *args,), force_script_and_pause=force_script_and_pause)
|
||||
|
||||
|
||||
def run_blender_extensions_no_errors(
|
||||
args: Sequence[str],
|
||||
force_script_and_pause: bool = False,
|
||||
) -> str:
|
||||
return run_blender_no_errors(("--command", "extension", *args,), force_script_and_pause=force_script_and_pause)
|
||||
|
||||
|
||||
# Initialized from `main()`.
|
||||
TEMP_DIR_BLENDER_USER = ""
|
||||
TEMP_DIR_REMOTE = ""
|
||||
TEMP_DIR_LOCAL = ""
|
||||
# Don't leave temporary files in TMP: `/tmp` (since it's only cleared on restart).
|
||||
# Instead, have a test-local temporary directly which is removed when the test finishes.
|
||||
TEMP_DIR_TMPDIR = ""
|
||||
|
||||
user_dirs: Tuple[str, ...] = (
|
||||
"config",
|
||||
"datafiles",
|
||||
"extensions",
|
||||
"scripts",
|
||||
)
|
||||
|
||||
|
||||
class TestWithTempBlenderUser_MixIn(unittest.TestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls) -> None:
|
||||
for dirname in user_dirs:
|
||||
os.makedirs(os.path.join(TEMP_DIR_BLENDER_USER, dirname), exist_ok=True)
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls) -> None:
|
||||
for dirname in user_dirs:
|
||||
shutil.rmtree(os.path.join(TEMP_DIR_BLENDER_USER, dirname))
|
||||
|
||||
|
||||
class TestSimple(TestWithTempBlenderUser_MixIn, unittest.TestCase):
|
||||
|
||||
# Internal utilities.
|
||||
def _build_package(
|
||||
self,
|
||||
*,
|
||||
pkg_idname: str,
|
||||
wheel_module_name: str,
|
||||
wheel_module_version: str,
|
||||
) -> None:
|
||||
pkg_output_filepath = os.path.join(TEMP_DIR_REMOTE, pkg_idname + ".zip")
|
||||
with tempfile.TemporaryDirectory() as package_build_dir:
|
||||
create_package(
|
||||
package_build_dir,
|
||||
pkg_idname=pkg_idname,
|
||||
wheel_module_name=wheel_module_name,
|
||||
wheel_module_version=wheel_module_version,
|
||||
)
|
||||
stdout = run_blender_extensions_no_errors((
|
||||
"build",
|
||||
"--source-dir", package_build_dir,
|
||||
"--output-filepath", pkg_output_filepath,
|
||||
))
|
||||
self.assertEqual(
|
||||
stdout,
|
||||
(
|
||||
"Building {:s}.zip\n"
|
||||
"complete\n"
|
||||
"created \"{:s}\", {:d}\n"
|
||||
).format(pkg_idname, pkg_output_filepath, os.path.getsize(pkg_output_filepath)),
|
||||
)
|
||||
|
||||
def test_simple_package(self) -> None:
|
||||
"""
|
||||
Create a simple package and install it.
|
||||
"""
|
||||
|
||||
repo_id = "test_repo_module_name"
|
||||
|
||||
stdout = run_blender_extensions_no_errors((
|
||||
"repo-add",
|
||||
"--name", "MyTestRepo",
|
||||
"--directory", TEMP_DIR_LOCAL,
|
||||
"--url", TEMP_DIR_REMOTE,
|
||||
# A bit odd, this argument avoids running so many commands to setup a test.
|
||||
"--clear-all",
|
||||
repo_id,
|
||||
))
|
||||
self.assertEqual(stdout, "Info: Preferences saved\n")
|
||||
|
||||
wheel_module_name = "my_custom_wheel"
|
||||
|
||||
# Create a package contents.
|
||||
pkg_idname = "my_test_pkg"
|
||||
self._build_package(
|
||||
pkg_idname=pkg_idname,
|
||||
wheel_module_name=wheel_module_name,
|
||||
wheel_module_version="1.0.1",
|
||||
)
|
||||
|
||||
# Generate the repository.
|
||||
stdout = run_blender_extensions_no_errors((
|
||||
"server-generate",
|
||||
"--repo-dir", TEMP_DIR_REMOTE,
|
||||
))
|
||||
self.assertEqual(stdout, "found 1 packages.\n")
|
||||
|
||||
stdout = run_blender_extensions_no_errors((
|
||||
"sync",
|
||||
))
|
||||
self.assertEqual(stdout.rstrip("\n").split("\n")[-1], "STATUS Sync complete: {:s}".format(TEMP_DIR_REMOTE))
|
||||
|
||||
# Install the package into Blender.
|
||||
|
||||
stdout = run_blender_extensions_no_errors(("repo-list",))
|
||||
self.assertEqual(
|
||||
stdout,
|
||||
(
|
||||
'''test_repo_module_name:\n'''
|
||||
''' name: "MyTestRepo"\n'''
|
||||
''' directory: "{:s}"\n'''
|
||||
''' url: "{:s}"\n'''
|
||||
).format(TEMP_DIR_LOCAL, TEMP_DIR_REMOTE))
|
||||
|
||||
stdout = run_blender_extensions_no_errors(("list",))
|
||||
self.assertEqual(
|
||||
stdout,
|
||||
(
|
||||
'''Repository: "MyTestRepo" (id=test_repo_module_name)\n'''
|
||||
''' my_test_pkg: "My Test Pkg", This is a tagline\n'''
|
||||
)
|
||||
)
|
||||
|
||||
stdout = run_blender_extensions_no_errors(("install", pkg_idname, "--enable"))
|
||||
self.assertEqual(
|
||||
[line for line in stdout.split("\n") if line.startswith("STATUS ")][0],
|
||||
"STATUS Installed \"my_test_pkg\""
|
||||
)
|
||||
|
||||
# TODO: validate the installation works - that the package does something non-trivial when Blender starts.
|
||||
|
||||
stdout = run_blender_extensions_no_errors(("remove", pkg_idname))
|
||||
self.assertEqual(
|
||||
[line for line in stdout.split("\n") if line.startswith("STATUS ")][0],
|
||||
"STATUS Removed \"my_test_pkg\""
|
||||
)
|
||||
|
||||
returncode, _, _ = run_blender((
|
||||
"-b",
|
||||
"--python-expr",
|
||||
# Return an `exitcode` of 64 if the module exists.
|
||||
# The module should not exist (and return a zero error code).
|
||||
(
|
||||
'''import sys\n'''
|
||||
'''try:\n'''
|
||||
''' import {:s}\n'''
|
||||
''' code = 32\n'''
|
||||
'''except ModuleNotFoundError:\n'''
|
||||
''' code = 64\n'''
|
||||
'''sys.exit(code)\n'''
|
||||
).format(wheel_module_name)
|
||||
))
|
||||
self.assertEqual(returncode, 64)
|
||||
|
||||
# Ensure packages that including conflicting dependencies use the newest wheel.
|
||||
packages_to_install = ["my_test_pkg"]
|
||||
# This is the maximum wheel version.
|
||||
packages_wheel_version_max = "4.0.1"
|
||||
# Create a package contents (with a different wheel version).
|
||||
for pkg_idname, wheel_module_version in (
|
||||
("my_test_pkg_a", "2.0.1"),
|
||||
("my_test_pkg_b", packages_wheel_version_max),
|
||||
("my_test_pkg_c", "3.0.1"),
|
||||
):
|
||||
packages_to_install.append(pkg_idname)
|
||||
self._build_package(
|
||||
pkg_idname=pkg_idname,
|
||||
wheel_module_name=wheel_module_name,
|
||||
wheel_module_version=wheel_module_version,
|
||||
)
|
||||
|
||||
# Generate the repository.
|
||||
stdout = run_blender_extensions_no_errors((
|
||||
"server-generate",
|
||||
"--repo-dir", TEMP_DIR_REMOTE,
|
||||
))
|
||||
self.assertEqual(stdout, "found 4 packages.\n")
|
||||
|
||||
stdout = run_blender_extensions_no_errors((
|
||||
"sync",
|
||||
))
|
||||
self.assertEqual(stdout.rstrip("\n").split("\n")[-1], "STATUS Sync complete: {:s}".format(TEMP_DIR_REMOTE))
|
||||
|
||||
# Install.
|
||||
|
||||
stdout = run_blender_extensions_no_errors(("install", ",".join(packages_to_install), "--enable"))
|
||||
self.assertEqual(
|
||||
tuple([line for line in stdout.split("\n") if line.startswith("STATUS ")]),
|
||||
(
|
||||
'''STATUS Installed "my_test_pkg"''',
|
||||
'''STATUS Installed "my_test_pkg_a"''',
|
||||
'''STATUS Installed "my_test_pkg_b"''',
|
||||
'''STATUS Installed "my_test_pkg_c"''',
|
||||
)
|
||||
)
|
||||
|
||||
returncode, stdout, stderr = run_blender((
|
||||
"-b",
|
||||
"--python-expr",
|
||||
# Return an `exitcode` of 64 if the module exists.
|
||||
# The module should not exist (and return a zero error code).
|
||||
(
|
||||
'''import sys\n'''
|
||||
'''try:\n'''
|
||||
''' import {:s}\n'''
|
||||
''' found = True\n'''
|
||||
'''except ModuleNotFoundError:\n'''
|
||||
''' found = False\n'''
|
||||
'''if found:\n'''
|
||||
''' if {:s}.__version__ == "{:s}":\n'''
|
||||
''' sys.exit(64) # Success!\n'''
|
||||
''' else:\n'''
|
||||
''' sys.exit(32)\n'''
|
||||
'''else:\n'''
|
||||
''' sys.exit(16)\n'''
|
||||
).format(wheel_module_name, wheel_module_name, packages_wheel_version_max),
|
||||
))
|
||||
|
||||
self.assertEqual(returncode, 64)
|
||||
|
||||
if USE_PAUSE_BEFORE_EXIT:
|
||||
print(TEMP_DIR_REMOTE)
|
||||
print(TEMP_DIR_BLENDER_USER)
|
||||
pause_until_keyboard_interrupt()
|
||||
|
||||
|
||||
def main() -> None:
|
||||
global TEMP_DIR_BLENDER_USER
|
||||
global TEMP_DIR_REMOTE
|
||||
global TEMP_DIR_LOCAL
|
||||
global TEMP_DIR_TMPDIR
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_prefix:
|
||||
TEMP_DIR_BLENDER_USER = os.path.join(temp_prefix, "bl_ext_blender")
|
||||
TEMP_DIR_REMOTE = os.path.join(temp_prefix, "bl_ext_remote")
|
||||
TEMP_DIR_LOCAL = os.path.join(temp_prefix, "bl_ext_local")
|
||||
TEMP_DIR_TMPDIR = os.path.join(temp_prefix, "tmp")
|
||||
|
||||
for directory in (
|
||||
TEMP_DIR_BLENDER_USER,
|
||||
TEMP_DIR_REMOTE,
|
||||
TEMP_DIR_LOCAL,
|
||||
TEMP_DIR_TMPDIR,
|
||||
):
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
|
||||
for dirname in user_dirs:
|
||||
os.makedirs(os.path.join(TEMP_DIR_BLENDER_USER, dirname), exist_ok=True)
|
||||
|
||||
unittest.main()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
380
scripts/addons_core/bl_pkg/wheel_manager.py
Normal file
380
scripts/addons_core/bl_pkg/wheel_manager.py
Normal file
@ -0,0 +1,380 @@
|
||||
# SPDX-FileCopyrightText: 2024 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
# Ref: https://peps.python.org/pep-0491/
|
||||
# Deferred but seems to include valid info for existing wheels.
|
||||
|
||||
"""
|
||||
This module takes wheels and applies them to a "managed" destination directory.
|
||||
"""
|
||||
|
||||
__all__ = (
|
||||
"apply_action"
|
||||
)
|
||||
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import zipfile
|
||||
|
||||
from typing import (
|
||||
Dict,
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
)
|
||||
|
||||
WheelSource = Tuple[
|
||||
# Key - doesn't matter what this is... it's just a handle.
|
||||
str,
|
||||
# A list of absolute wheel file-paths.
|
||||
List[str],
|
||||
]
|
||||
|
||||
|
||||
def _read_records_csv(filepath: str) -> List[List[str]]:
|
||||
import csv
|
||||
with open(filepath, encoding="utf8", errors="surrogateescape") as fh:
|
||||
return list(csv.reader(fh.read().splitlines()))
|
||||
|
||||
|
||||
def _wheels_from_dir(dirpath: str) -> Tuple[
|
||||
# The key is:
|
||||
# wheel_id
|
||||
# The values are:
|
||||
# Top level directories.
|
||||
Dict[str, List[str]],
|
||||
# Unknown path.s
|
||||
List[str],
|
||||
]:
|
||||
result: Dict[str, List[str]] = {}
|
||||
paths_unused: Set[str] = set()
|
||||
|
||||
if not os.path.exists(dirpath):
|
||||
return result, list(paths_unused)
|
||||
|
||||
for entry in os.scandir(dirpath):
|
||||
name = entry.name
|
||||
paths_unused.add(name)
|
||||
if not entry.is_dir():
|
||||
continue
|
||||
# TODO: is this part of the spec?
|
||||
name = entry.name
|
||||
if not name.endswith("-info"):
|
||||
continue
|
||||
filepath_record = os.path.join(entry.path, "RECORD")
|
||||
if not os.path.exists(filepath_record):
|
||||
continue
|
||||
|
||||
record_rows = _read_records_csv(filepath_record)
|
||||
|
||||
# Build top-level paths.
|
||||
toplevel_paths_set: Set[str] = set()
|
||||
for row in record_rows:
|
||||
if not row:
|
||||
continue
|
||||
path_text = row[0]
|
||||
# Ensure paths separator is compatible.
|
||||
path_text = path_text.replace("\\", "/")
|
||||
# Ensure double slashes don't cause issues or "/./" doesn't complicate checking the head of the path.
|
||||
path_split = [
|
||||
elem for elem in path_text.split("/")
|
||||
if elem not in {"", "."}
|
||||
]
|
||||
if not path_split:
|
||||
continue
|
||||
# These wont have been extracted.
|
||||
if path_split[0] in {"..", name}:
|
||||
continue
|
||||
|
||||
toplevel_paths_set.add(path_split[0])
|
||||
|
||||
result[name] = list(sorted(toplevel_paths_set))
|
||||
del toplevel_paths_set
|
||||
|
||||
for wheel_name, toplevel_paths in result.items():
|
||||
paths_unused.discard(wheel_name)
|
||||
for name in toplevel_paths:
|
||||
paths_unused.discard(name)
|
||||
|
||||
paths_unused_list = list(sorted(paths_unused))
|
||||
|
||||
return result, paths_unused_list
|
||||
|
||||
|
||||
def _wheel_info_dir_from_zip(filepath_wheel: str) -> Optional[Tuple[str, List[str]]]:
|
||||
"""
|
||||
Return:
|
||||
- The "*-info" directory name which contains meta-data.
|
||||
- The top-level path list (excluding "..").
|
||||
"""
|
||||
dir_info = ""
|
||||
toplevel_paths: Set[str] = set()
|
||||
|
||||
with zipfile.ZipFile(filepath_wheel, mode="r") as zip_fh:
|
||||
# This file will always exist.
|
||||
for filepath_rel in zip_fh.namelist():
|
||||
path_split = [
|
||||
elem for elem in filepath_rel.split("/")
|
||||
if elem not in {"", "."}
|
||||
]
|
||||
if not path_split:
|
||||
continue
|
||||
if path_split[0] == "..":
|
||||
continue
|
||||
|
||||
if len(path_split) == 2:
|
||||
if path_split[1].upper() == "RECORD":
|
||||
if path_split[0].endswith("-info"):
|
||||
dir_info = path_split[0]
|
||||
|
||||
toplevel_paths.add(path_split[0])
|
||||
|
||||
if dir_info == "":
|
||||
return None
|
||||
toplevel_paths.discard(dir_info)
|
||||
toplevel_paths_list = list(sorted(toplevel_paths))
|
||||
return dir_info, toplevel_paths_list
|
||||
|
||||
|
||||
def _rmtree_safe(dir_remove: str, expected_root: str) -> None:
|
||||
if not dir_remove.startswith(expected_root):
|
||||
raise Exception("Expected prefix not found")
|
||||
shutil.rmtree(dir_remove)
|
||||
|
||||
|
||||
def _zipfile_extractall_safe(
|
||||
zip_fh: zipfile.ZipFile,
|
||||
path: str,
|
||||
path_restrict: str,
|
||||
) -> None:
|
||||
"""
|
||||
A version of ``ZipFile.extractall`` that wont write to paths outside ``path_restrict``.
|
||||
|
||||
Avoids writing this:
|
||||
``zip_fh.extractall(zip_fh, path)``
|
||||
"""
|
||||
sep = os.sep
|
||||
path_restrict = path_restrict.rstrip(sep)
|
||||
if sep == "\\":
|
||||
path_restrict = path_restrict.rstrip("/")
|
||||
path_restrict_with_slash = path_restrict + sep
|
||||
|
||||
# Strip is probably not needed (only if multiple slashes exist).
|
||||
path_prefix = path[len(path_restrict_with_slash):].lstrip(sep)
|
||||
# Switch slashes forward.
|
||||
if sep == "\\":
|
||||
path_prefix = path_prefix.replace("\\", "/").rstrip("/") + "/"
|
||||
else:
|
||||
path_prefix = path_prefix + "/"
|
||||
|
||||
path_restrict_with_slash = path_restrict + sep
|
||||
assert len(path) >= len(path_restrict_with_slash)
|
||||
if not path.startswith(path_restrict_with_slash):
|
||||
raise Exception("Expected the restricted directory to start with ")
|
||||
|
||||
for member in zip_fh.infolist():
|
||||
filename_orig = member.filename
|
||||
member.filename = path_prefix + filename_orig
|
||||
# This isn't likely to happen so accept a noisy print here.
|
||||
# If this ends up happening more often, it could be suppressed.
|
||||
# (although this hints at bigger problems because we might be excluding necessary files).
|
||||
if os.path.normpath(member.filename).startswith(".." + sep):
|
||||
print("Skipping path:", member.filename, "that escapes:", path_restrict)
|
||||
continue
|
||||
zip_fh.extract(member, path_restrict)
|
||||
member.filename = filename_orig
|
||||
|
||||
|
||||
WHEEL_VERSION_RE = re.compile(r"(\d+)?(?:\.(\d+))?(?:\.(\d+))")
|
||||
|
||||
|
||||
def wheel_version_from_filename_for_cmp(
|
||||
filename: str,
|
||||
) -> Tuple[int, int, int, str]:
|
||||
"""
|
||||
Extract the version number for comparison.
|
||||
Note that this only handled the first 3 numbers,
|
||||
the trailing text is compared as a string which is not technically correct
|
||||
however this is not a priority to support since scripts should only be including stable releases,
|
||||
so comparing the first 3 numbers is sufficient. The trailing string is just a tie breaker in the
|
||||
unlikely event it differs.
|
||||
|
||||
If supporting the full spec, comparing: "1.1.dev6" with "1.1.6rc6" for e.g.
|
||||
we could support this doesn't seem especially important as extensions should use major releases.
|
||||
"""
|
||||
filename_split = filename.split("-")
|
||||
if len(filename_split) >= 2:
|
||||
version = filename.split("-")[1]
|
||||
if (version_match := WHEEL_VERSION_RE.match(version)) is not None:
|
||||
groups = version_match.groups()
|
||||
# print(groups)
|
||||
return (
|
||||
int(groups[0]) if groups[0] is not None else 0,
|
||||
int(groups[1]) if groups[1] is not None else 0,
|
||||
int(groups[2]) if groups[2] is not None else 0,
|
||||
version[version_match.end():],
|
||||
)
|
||||
return (0, 0, 0, "")
|
||||
|
||||
|
||||
def wheel_list_deduplicate_as_skip_set(
|
||||
wheel_list: List[WheelSource],
|
||||
) -> Set[str]:
|
||||
"""
|
||||
Return all wheel paths to skip.
|
||||
"""
|
||||
wheels_to_skip: Set[str] = set()
|
||||
all_wheels: Set[str] = {
|
||||
filepath
|
||||
for _, wheels in wheel_list
|
||||
for filepath in wheels
|
||||
}
|
||||
|
||||
# NOTE: this is not optimized.
|
||||
# Probably speed is never an issue here, but this could be sped up.
|
||||
|
||||
# Keep a map from the base name to the "best" wheel,
|
||||
# the other wheels get added to `wheels_to_skip` to be ignored.
|
||||
all_wheels_by_base: Dict[str, str] = {}
|
||||
|
||||
for wheel in all_wheels:
|
||||
wheel_filename = os.path.basename(wheel)
|
||||
wheel_base = wheel_filename.partition("-")[0]
|
||||
|
||||
wheel_exists = all_wheels_by_base.get(wheel_base)
|
||||
if wheel_exists is None:
|
||||
all_wheels_by_base[wheel_base] = wheel
|
||||
continue
|
||||
|
||||
wheel_exists_filename = os.path.basename(wheel_exists)
|
||||
if wheel_exists_filename == wheel_filename:
|
||||
# Should never happen because they are converted into a set before looping.
|
||||
assert wheel_exists != wheel
|
||||
# The same wheel is used in two different locations, use a tie breaker for predictability
|
||||
# although the result should be the same.
|
||||
if wheel_exists_filename < wheel_filename:
|
||||
all_wheels_by_base[wheel_base] = wheel
|
||||
wheels_to_skip.add(wheel_exists)
|
||||
else:
|
||||
wheels_to_skip.add(wheel)
|
||||
else:
|
||||
wheel_version = wheel_version_from_filename_for_cmp(wheel_filename)
|
||||
wheel_exists_version = wheel_version_from_filename_for_cmp(wheel_exists_filename)
|
||||
if (
|
||||
(wheel_exists_version < wheel_version) or
|
||||
# Tie breaker for predictability.
|
||||
((wheel_exists_version == wheel_version) and (wheel_exists_filename < wheel_filename))
|
||||
):
|
||||
all_wheels_by_base[wheel_base] = wheel
|
||||
wheels_to_skip.add(wheel_exists)
|
||||
else:
|
||||
wheels_to_skip.add(wheel)
|
||||
|
||||
return wheels_to_skip
|
||||
|
||||
|
||||
def apply_action(
|
||||
*,
|
||||
local_dir: str,
|
||||
local_dir_site_packages: str,
|
||||
wheel_list: List[WheelSource],
|
||||
) -> None:
|
||||
"""
|
||||
:arg local_dir:
|
||||
The location wheels are stored.
|
||||
Typically: ``~/.config/blender/4.2/extensions/.local``.
|
||||
|
||||
WARNING: files under this directory may be removed.
|
||||
:arg local_dir_site_packages:
|
||||
The path which wheels are extracted into.
|
||||
Typically: ``~/.config/blender/4.2/extensions/.local/lib/python3.11/site-packages``.
|
||||
"""
|
||||
debug = False
|
||||
|
||||
# NOTE: we could avoid scanning the wheel directories however:
|
||||
# Recursively removing all paths on the users system can be considered relatively risky
|
||||
# even if this is located in a known location under the users home directory - better avoid.
|
||||
# So build a list of wheel paths and only remove the unused paths from this list.
|
||||
wheels_installed, paths_unknown = _wheels_from_dir(local_dir_site_packages)
|
||||
|
||||
# Wheels and their top level directories (which would be installed).
|
||||
wheels_packages: Dict[str, List[str]] = {}
|
||||
|
||||
# Map the wheel ID to path.
|
||||
wheels_dir_info_to_filepath_map: Dict[str, str] = {}
|
||||
|
||||
# NOTE(@ideasman42): the wheels skip-set only de-duplicates at the level of the base-name of the wheels filename.
|
||||
# So the wheel file-paths:
|
||||
# - `pip-24.0-py3-none-any.whl`
|
||||
# - `pip-22.1-py2-none-any.whl`
|
||||
# Will both extract the *base* name `pip`, de-duplicating by skipping the wheels with an older version number.
|
||||
# This is not fool-proof, because it is possible files inside the `.whl` conflict upon extraction.
|
||||
# In practice I consider this fairly unlikely because:
|
||||
# - Practically all wheels extract to their top-level module names.
|
||||
# - Modules are mainly downloaded from the Python package index.
|
||||
#
|
||||
# Having two modules conflict is possible but this is an issue outside of Blender,
|
||||
# as it's most likely quite rare and generally avoided with unique module names,
|
||||
# this is not considered a problem to "solve" at the moment.
|
||||
#
|
||||
# The one exception to this assumption is any extensions that bundle `.whl` files that aren't
|
||||
# available on the Python package index. In this case naming collisions are more likely.
|
||||
# This probably needs to be handled on a policy level - if the `.whl` author also maintains
|
||||
# the extension they can in all likelihood make the module a sub-module of the extension
|
||||
# without the need to use `.whl` files.
|
||||
wheels_to_skip = wheel_list_deduplicate_as_skip_set(wheel_list)
|
||||
|
||||
for key, wheels in wheel_list:
|
||||
for wheel in wheels:
|
||||
if wheel in wheels_to_skip:
|
||||
continue
|
||||
if (wheel_info := _wheel_info_dir_from_zip(wheel)) is None:
|
||||
continue
|
||||
dir_info, toplevel_paths_list = wheel_info
|
||||
wheels_packages[dir_info] = toplevel_paths_list
|
||||
|
||||
wheels_dir_info_to_filepath_map[dir_info] = wheel
|
||||
|
||||
# Now there is two sets of packages, the ones we need and the ones we have.
|
||||
|
||||
# -----
|
||||
# Clear
|
||||
|
||||
# First remove installed packages no longer needed:
|
||||
for dir_info, toplevel_paths_list in wheels_installed.items():
|
||||
if dir_info in wheels_packages:
|
||||
continue
|
||||
|
||||
# Remove installed packages which aren't needed any longer.
|
||||
for filepath_rel in (dir_info, *toplevel_paths_list):
|
||||
filepath_abs = os.path.join(local_dir_site_packages, filepath_rel)
|
||||
if not os.path.exists(filepath_abs):
|
||||
continue
|
||||
|
||||
if debug:
|
||||
print("removing wheel:", filepath_rel)
|
||||
|
||||
if os.path.isdir(filepath_abs):
|
||||
_rmtree_safe(filepath_abs, local_dir)
|
||||
else:
|
||||
os.remove(filepath_abs)
|
||||
|
||||
# -----
|
||||
# Setup
|
||||
|
||||
# Install packages that need to be installed:
|
||||
for dir_info, toplevel_paths_list in wheels_packages.items():
|
||||
if dir_info in wheels_installed:
|
||||
continue
|
||||
|
||||
if debug:
|
||||
for filepath_rel in toplevel_paths_list:
|
||||
print("adding wheel:", filepath_rel)
|
||||
filepath = wheels_dir_info_to_filepath_map[dir_info]
|
||||
# `ZipFile.extractall` is needed because some wheels contain paths that point to parent directories.
|
||||
# Handle this *safely* by allowing extracting to parent directories but limit this to the `local_dir`.
|
||||
with zipfile.ZipFile(filepath, mode="r") as zip_fh:
|
||||
_zipfile_extractall_safe(zip_fh, local_dir_site_packages, local_dir)
|
1100
scripts/addons_core/copy_global_transform.py
Normal file
1100
scripts/addons_core/copy_global_transform.py
Normal file
File diff suppressed because it is too large
Load Diff
33
scripts/addons_core/hydra_storm/__init__.py
Normal file
33
scripts/addons_core/hydra_storm/__init__.py
Normal file
@ -0,0 +1,33 @@
|
||||
# SPDX-FileCopyrightText: 2011-2022 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
bl_info = {
|
||||
"name": "Hydra Storm render engine",
|
||||
"author": "AMD",
|
||||
"version": (1, 0, 0),
|
||||
"blender": (4, 0, 0),
|
||||
"description": "USD's high performance rasterizing renderer",
|
||||
"tracker_url": "",
|
||||
"doc_url": "",
|
||||
"community": "",
|
||||
"downloads": "",
|
||||
"main_web": "",
|
||||
"support": 'OFFICIAL',
|
||||
"category": "Render"
|
||||
}
|
||||
|
||||
|
||||
from . import engine, properties, ui
|
||||
|
||||
|
||||
def register():
|
||||
engine.register()
|
||||
properties.register()
|
||||
ui.register()
|
||||
|
||||
|
||||
def unregister():
|
||||
ui.unregister()
|
||||
properties.unregister()
|
||||
engine.unregister()
|
47
scripts/addons_core/hydra_storm/engine.py
Normal file
47
scripts/addons_core/hydra_storm/engine.py
Normal file
@ -0,0 +1,47 @@
|
||||
# SPDX-FileCopyrightText: 2011-2022 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
|
||||
|
||||
class StormHydraRenderEngine(bpy.types.HydraRenderEngine):
|
||||
bl_idname = 'HYDRA_STORM'
|
||||
bl_label = "Hydra Storm"
|
||||
bl_info = "USD's high performance rasterizing renderer"
|
||||
|
||||
bl_use_preview = True
|
||||
bl_use_gpu_context = True
|
||||
bl_use_materialx = True
|
||||
|
||||
bl_delegate_id = 'HdStormRendererPlugin'
|
||||
|
||||
def get_render_settings(self, engine_type):
|
||||
settings = bpy.context.scene.hydra_storm.viewport if engine_type == 'VIEWPORT' else \
|
||||
bpy.context.scene.hydra_storm.final
|
||||
result = {
|
||||
'enableTinyPrimCulling': settings.use_tiny_prim_culling,
|
||||
'maxLights': settings.max_lights,
|
||||
'volumeRaymarchingStepSize': settings.volume_raymarching_step_size,
|
||||
'volumeRaymarchingStepSizeLighting': settings.volume_raymarching_step_size_lighting,
|
||||
'volumeMaxTextureMemoryPerField': settings.volume_max_texture_memory_per_field,
|
||||
}
|
||||
|
||||
if engine_type != 'VIEWPORT':
|
||||
result |= {
|
||||
'aovToken:Combined': "color",
|
||||
'aovToken:Depth': "depth",
|
||||
}
|
||||
|
||||
return result
|
||||
|
||||
def update_render_passes(self, scene, render_layer):
|
||||
if render_layer.use_pass_combined:
|
||||
self.register_pass(scene, render_layer, 'Combined', 4, 'RGBA', 'COLOR')
|
||||
if render_layer.use_pass_z:
|
||||
self.register_pass(scene, render_layer, 'Depth', 1, 'Z', 'VALUE')
|
||||
|
||||
|
||||
register, unregister = bpy.utils.register_classes_factory((
|
||||
StormHydraRenderEngine,
|
||||
))
|
62
scripts/addons_core/hydra_storm/properties.py
Normal file
62
scripts/addons_core/hydra_storm/properties.py
Normal file
@ -0,0 +1,62 @@
|
||||
# SPDX-FileCopyrightText: 2011-2022 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
|
||||
|
||||
class Properties(bpy.types.PropertyGroup):
|
||||
type = None
|
||||
|
||||
@classmethod
|
||||
def register(cls):
|
||||
cls.type.hydra_storm = bpy.props.PointerProperty(
|
||||
name="Hydra Storm",
|
||||
description="Hydra Storm properties",
|
||||
type=cls,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def unregister(cls):
|
||||
del cls.type.hydra_storm
|
||||
|
||||
|
||||
class RenderProperties(bpy.types.PropertyGroup):
|
||||
max_lights: bpy.props.IntProperty(
|
||||
name="Max Lights",
|
||||
description="Limit maximum number of lights",
|
||||
default=16, min=0, max=16,
|
||||
)
|
||||
use_tiny_prim_culling: bpy.props.BoolProperty(
|
||||
name="Tiny Prim Culling",
|
||||
description="Hide small geometry primitives to improve performance",
|
||||
default=False,
|
||||
)
|
||||
volume_raymarching_step_size: bpy.props.FloatProperty(
|
||||
name="Volume Raymarching Step Size",
|
||||
description="Step size when raymarching volume",
|
||||
default=1.0,
|
||||
)
|
||||
volume_raymarching_step_size_lighting: bpy.props.FloatProperty(
|
||||
name="Volume Raymarching Step Size Lighting",
|
||||
description="Step size when raymarching volume for lighting computation",
|
||||
default=10.0,
|
||||
)
|
||||
volume_max_texture_memory_per_field: bpy.props.FloatProperty(
|
||||
name="Max Texture Memory Per Field",
|
||||
description="Maximum memory for a volume field texture in Mb (unless overridden by field prim)",
|
||||
default=128.0,
|
||||
)
|
||||
|
||||
|
||||
class SceneProperties(Properties):
|
||||
type = bpy.types.Scene
|
||||
|
||||
final: bpy.props.PointerProperty(type=RenderProperties)
|
||||
viewport: bpy.props.PointerProperty(type=RenderProperties)
|
||||
|
||||
|
||||
register, unregister = bpy.utils.register_classes_factory((
|
||||
RenderProperties,
|
||||
SceneProperties,
|
||||
))
|
259
scripts/addons_core/hydra_storm/ui.py
Normal file
259
scripts/addons_core/hydra_storm/ui.py
Normal file
@ -0,0 +1,259 @@
|
||||
# SPDX-FileCopyrightText: 2011-2022 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
|
||||
from .engine import StormHydraRenderEngine
|
||||
|
||||
|
||||
class Panel(bpy.types.Panel):
|
||||
bl_space_type = 'PROPERTIES'
|
||||
bl_region_type = 'WINDOW'
|
||||
bl_context = 'render'
|
||||
COMPAT_ENGINES = {StormHydraRenderEngine.bl_idname}
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
return context.engine in cls.COMPAT_ENGINES
|
||||
|
||||
|
||||
#
|
||||
# Quality render settings
|
||||
#
|
||||
class STORM_HYDRA_RENDER_PT_quality(Panel):
|
||||
bl_label = "Quality"
|
||||
|
||||
def draw(self, layout):
|
||||
pass
|
||||
|
||||
|
||||
class STORM_HYDRA_RENDER_PT_quality_viewport(Panel):
|
||||
bl_label = "Viewport"
|
||||
bl_parent_id = "STORM_HYDRA_RENDER_PT_quality"
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
layout.use_property_split = True
|
||||
layout.use_property_decorate = False
|
||||
|
||||
settings = context.scene.hydra_storm.viewport
|
||||
layout.prop(settings, 'max_lights')
|
||||
layout.prop(settings, 'use_tiny_prim_culling')
|
||||
|
||||
|
||||
class STORM_HYDRA_RENDER_PT_quality_render(Panel):
|
||||
bl_label = "Render"
|
||||
bl_parent_id = "STORM_HYDRA_RENDER_PT_quality"
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
layout.use_property_split = True
|
||||
layout.use_property_decorate = False
|
||||
|
||||
settings = context.scene.hydra_storm.final
|
||||
layout.prop(settings, 'max_lights')
|
||||
layout.prop(settings, 'use_tiny_prim_culling')
|
||||
|
||||
|
||||
#
|
||||
# Volume render settings
|
||||
#
|
||||
class STORM_HYDRA_RENDER_PT_volumes(Panel):
|
||||
bl_label = "Volumes"
|
||||
bl_options = {'DEFAULT_CLOSED'}
|
||||
|
||||
def draw(self, layout):
|
||||
pass
|
||||
|
||||
|
||||
class STORM_HYDRA_RENDER_PT_volumes_viewport(Panel):
|
||||
bl_label = "Viewport"
|
||||
bl_parent_id = "STORM_HYDRA_RENDER_PT_volumes"
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
layout.use_property_split = True
|
||||
layout.use_property_decorate = False
|
||||
|
||||
settings = context.scene.hydra_storm.viewport
|
||||
|
||||
col = layout.column(align=True)
|
||||
col.prop(settings, "volume_raymarching_step_size", text="Step Size")
|
||||
col.prop(settings, "volume_raymarching_step_size_lighting", text="Step Size Lightning")
|
||||
col.prop(settings, "volume_max_texture_memory_per_field")
|
||||
|
||||
|
||||
class STORM_HYDRA_RENDER_PT_volumes_render(Panel):
|
||||
bl_label = "Render"
|
||||
bl_parent_id = "STORM_HYDRA_RENDER_PT_volumes"
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
layout.use_property_split = True
|
||||
layout.use_property_decorate = False
|
||||
|
||||
settings = context.scene.hydra_storm.final
|
||||
|
||||
col = layout.column(align=True)
|
||||
col.prop(settings, "volume_raymarching_step_size", text="Step Size")
|
||||
col.prop(settings, "volume_raymarching_step_size_lighting", text="Step Size Lightning")
|
||||
col.prop(settings, "volume_max_texture_memory_per_field")
|
||||
|
||||
|
||||
#
|
||||
# Film settings
|
||||
#
|
||||
class STORM_HYDRA_RENDER_PT_film(Panel):
|
||||
bl_label = "Film"
|
||||
bl_options = {'DEFAULT_CLOSED'}
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
layout.use_property_split = True
|
||||
layout.use_property_decorate = False
|
||||
|
||||
layout.prop(context.scene.render, "film_transparent", text="Transparent Background")
|
||||
|
||||
|
||||
#
|
||||
# View layer settings
|
||||
#
|
||||
class STORM_HYDRA_RENDER_PT_passes(Panel):
|
||||
bl_label = "Passes"
|
||||
bl_context = "view_layer"
|
||||
|
||||
def draw(self, context):
|
||||
pass
|
||||
|
||||
|
||||
class STORM_HYDRA_RENDER_PT_passes_data(Panel):
|
||||
bl_label = "Data"
|
||||
bl_context = "view_layer"
|
||||
bl_parent_id = "STORM_HYDRA_RENDER_PT_passes"
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
layout.use_property_split = True
|
||||
layout.use_property_decorate = False
|
||||
|
||||
view_layer = context.view_layer
|
||||
|
||||
col = layout.column(heading="Include", align=True)
|
||||
col.prop(view_layer, "use_pass_combined")
|
||||
col.prop(view_layer, "use_pass_z")
|
||||
|
||||
|
||||
#
|
||||
# Light settings
|
||||
#
|
||||
class STORM_HYDRA_LIGHT_PT_light(Panel):
|
||||
"""Physical light sources"""
|
||||
bl_label = "Light"
|
||||
bl_context = 'data'
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
return super().poll(context) and context.light
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
|
||||
light = context.light
|
||||
|
||||
layout.prop(light, "type", expand=True)
|
||||
|
||||
layout.use_property_split = True
|
||||
layout.use_property_decorate = False
|
||||
|
||||
main_col = layout.column()
|
||||
|
||||
main_col.prop(light, "color")
|
||||
main_col.prop(light, "energy")
|
||||
main_col.separator()
|
||||
|
||||
if light.type == 'POINT':
|
||||
row = main_col.row(align=True)
|
||||
row.prop(light, "shadow_soft_size", text="Radius")
|
||||
|
||||
elif light.type == 'SPOT':
|
||||
col = main_col.column(align=True)
|
||||
col.prop(light, 'spot_size', slider=True)
|
||||
col.prop(light, 'spot_blend', slider=True)
|
||||
|
||||
main_col.prop(light, 'show_cone')
|
||||
|
||||
elif light.type == 'SUN':
|
||||
main_col.prop(light, "angle")
|
||||
|
||||
elif light.type == 'AREA':
|
||||
main_col.prop(light, "shape", text="Shape")
|
||||
sub = main_col.column(align=True)
|
||||
|
||||
if light.shape in {'SQUARE', 'DISK'}:
|
||||
sub.prop(light, "size")
|
||||
elif light.shape in {'RECTANGLE', 'ELLIPSE'}:
|
||||
sub.prop(light, "size", text="Size X")
|
||||
sub.prop(light, "size_y", text="Y")
|
||||
|
||||
else:
|
||||
main_col.prop(light, 'size')
|
||||
|
||||
|
||||
register_classes, unregister_classes = bpy.utils.register_classes_factory((
|
||||
STORM_HYDRA_RENDER_PT_quality,
|
||||
STORM_HYDRA_RENDER_PT_quality_viewport,
|
||||
STORM_HYDRA_RENDER_PT_quality_render,
|
||||
STORM_HYDRA_RENDER_PT_volumes,
|
||||
STORM_HYDRA_RENDER_PT_volumes_viewport,
|
||||
STORM_HYDRA_RENDER_PT_volumes_render,
|
||||
STORM_HYDRA_RENDER_PT_film,
|
||||
STORM_HYDRA_LIGHT_PT_light,
|
||||
STORM_HYDRA_RENDER_PT_passes,
|
||||
STORM_HYDRA_RENDER_PT_passes_data,
|
||||
))
|
||||
|
||||
|
||||
def get_panels():
|
||||
# Follow the Cycles model of excluding panels we don't want.
|
||||
exclude_panels = {
|
||||
'RENDER_PT_stamp',
|
||||
'DATA_PT_light',
|
||||
'DATA_PT_spot',
|
||||
'NODE_DATA_PT_light',
|
||||
'DATA_PT_falloff_curve',
|
||||
'RENDER_PT_post_processing',
|
||||
'RENDER_PT_simplify',
|
||||
'SCENE_PT_audio',
|
||||
'RENDER_PT_freestyle'
|
||||
}
|
||||
include_eevee_panels = {
|
||||
'MATERIAL_PT_preview',
|
||||
'EEVEE_MATERIAL_PT_context_material',
|
||||
'EEVEE_MATERIAL_PT_surface',
|
||||
'EEVEE_MATERIAL_PT_volume',
|
||||
'EEVEE_MATERIAL_PT_settings',
|
||||
'EEVEE_WORLD_PT_surface',
|
||||
}
|
||||
|
||||
for panel_cls in bpy.types.Panel.__subclasses__():
|
||||
if hasattr(panel_cls, 'COMPAT_ENGINES') and (
|
||||
('BLENDER_RENDER' in panel_cls.COMPAT_ENGINES and panel_cls.__name__ not in exclude_panels) or
|
||||
('BLENDER_EEVEE' in panel_cls.COMPAT_ENGINES and panel_cls.__name__ in include_eevee_panels)
|
||||
):
|
||||
yield panel_cls
|
||||
|
||||
|
||||
def register():
|
||||
register_classes()
|
||||
|
||||
for panel_cls in get_panels():
|
||||
panel_cls.COMPAT_ENGINES.add(StormHydraRenderEngine.bl_idname)
|
||||
|
||||
|
||||
def unregister():
|
||||
unregister_classes()
|
||||
|
||||
for panel_cls in get_panels():
|
||||
if StormHydraRenderEngine.bl_idname in panel_cls.COMPAT_ENGINES:
|
||||
panel_cls.COMPAT_ENGINES.remove(StormHydraRenderEngine.bl_idname)
|
388
scripts/addons_core/io_anim_bvh/__init__.py
Normal file
388
scripts/addons_core/io_anim_bvh/__init__.py
Normal file
@ -0,0 +1,388 @@
|
||||
# SPDX-FileCopyrightText: 2011-2022 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
bl_info = {
|
||||
"name": "BioVision Motion Capture (BVH) format",
|
||||
"author": "Campbell Barton",
|
||||
"version": (1, 0, 1),
|
||||
"blender": (2, 81, 6),
|
||||
"location": "File > Import-Export",
|
||||
"description": "Import-Export BVH from armature objects",
|
||||
"warning": "",
|
||||
"doc_url": "{BLENDER_MANUAL_URL}/addons/import_export/anim_bvh.html",
|
||||
"support": 'OFFICIAL',
|
||||
"category": "Import-Export",
|
||||
}
|
||||
|
||||
if "bpy" in locals():
|
||||
import importlib
|
||||
if "import_bvh" in locals():
|
||||
importlib.reload(import_bvh)
|
||||
if "export_bvh" in locals():
|
||||
importlib.reload(export_bvh)
|
||||
|
||||
import bpy
|
||||
from bpy.props import (
|
||||
StringProperty,
|
||||
FloatProperty,
|
||||
IntProperty,
|
||||
BoolProperty,
|
||||
EnumProperty,
|
||||
)
|
||||
from bpy_extras.io_utils import (
|
||||
ImportHelper,
|
||||
ExportHelper,
|
||||
orientation_helper,
|
||||
axis_conversion,
|
||||
)
|
||||
|
||||
|
||||
@orientation_helper(axis_forward='-Z', axis_up='Y')
|
||||
class ImportBVH(bpy.types.Operator, ImportHelper):
|
||||
"""Load a BVH motion capture file"""
|
||||
bl_idname = "import_anim.bvh"
|
||||
bl_label = "Import BVH"
|
||||
bl_options = {'REGISTER', 'UNDO'}
|
||||
|
||||
filename_ext = ".bvh"
|
||||
filter_glob: StringProperty(default="*.bvh", options={'HIDDEN'})
|
||||
|
||||
target: EnumProperty(
|
||||
items=(
|
||||
('ARMATURE', "Armature", ""),
|
||||
('OBJECT', "Object", ""),
|
||||
),
|
||||
name="Target",
|
||||
description="Import target type",
|
||||
default='ARMATURE',
|
||||
)
|
||||
global_scale: FloatProperty(
|
||||
name="Scale",
|
||||
description="Scale the BVH by this value",
|
||||
min=0.0001, max=1000000.0,
|
||||
soft_min=0.001, soft_max=100.0,
|
||||
default=1.0,
|
||||
)
|
||||
frame_start: IntProperty(
|
||||
name="Start Frame",
|
||||
description="Starting frame for the animation",
|
||||
default=1,
|
||||
)
|
||||
use_fps_scale: BoolProperty(
|
||||
name="Scale FPS",
|
||||
description=(
|
||||
"Scale the framerate from the BVH to the current scenes, "
|
||||
"otherwise each BVH frame maps directly to a Blender frame"
|
||||
),
|
||||
default=False,
|
||||
)
|
||||
update_scene_fps: BoolProperty(
|
||||
name="Update Scene FPS",
|
||||
description=(
|
||||
"Set the scene framerate to that of the BVH file (note that this "
|
||||
"nullifies the 'Scale FPS' option, as the scale will be 1:1)"
|
||||
),
|
||||
default=False,
|
||||
)
|
||||
update_scene_duration: BoolProperty(
|
||||
name="Update Scene Duration",
|
||||
description="Extend the scene's duration to the BVH duration (never shortens the scene)",
|
||||
default=False,
|
||||
)
|
||||
use_cyclic: BoolProperty(
|
||||
name="Loop",
|
||||
description="Loop the animation playback",
|
||||
default=False,
|
||||
)
|
||||
rotate_mode: EnumProperty(
|
||||
name="Rotation",
|
||||
description="Rotation conversion",
|
||||
items=(
|
||||
('QUATERNION', "Quaternion",
|
||||
"Convert rotations to quaternions"),
|
||||
('NATIVE', "Euler (Native)",
|
||||
"Use the rotation order defined in the BVH file"),
|
||||
('XYZ', "Euler (XYZ)", "Convert rotations to euler XYZ"),
|
||||
('XZY', "Euler (XZY)", "Convert rotations to euler XZY"),
|
||||
('YXZ', "Euler (YXZ)", "Convert rotations to euler YXZ"),
|
||||
('YZX', "Euler (YZX)", "Convert rotations to euler YZX"),
|
||||
('ZXY', "Euler (ZXY)", "Convert rotations to euler ZXY"),
|
||||
('ZYX', "Euler (ZYX)", "Convert rotations to euler ZYX"),
|
||||
),
|
||||
default='NATIVE',
|
||||
)
|
||||
|
||||
def execute(self, context):
|
||||
keywords = self.as_keywords(
|
||||
ignore=(
|
||||
"axis_forward",
|
||||
"axis_up",
|
||||
"filter_glob",
|
||||
)
|
||||
)
|
||||
global_matrix = axis_conversion(
|
||||
from_forward=self.axis_forward,
|
||||
from_up=self.axis_up,
|
||||
).to_4x4()
|
||||
|
||||
keywords["global_matrix"] = global_matrix
|
||||
|
||||
from . import import_bvh
|
||||
return import_bvh.load(context, report=self.report, **keywords)
|
||||
|
||||
def draw(self, context):
|
||||
pass
|
||||
|
||||
|
||||
class BVH_PT_import_main(bpy.types.Panel):
|
||||
bl_space_type = 'FILE_BROWSER'
|
||||
bl_region_type = 'TOOL_PROPS'
|
||||
bl_label = ""
|
||||
bl_parent_id = "FILE_PT_operator"
|
||||
bl_options = {'HIDE_HEADER'}
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
sfile = context.space_data
|
||||
operator = sfile.active_operator
|
||||
|
||||
return operator.bl_idname == "IMPORT_ANIM_OT_bvh"
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
layout.use_property_split = True
|
||||
layout.use_property_decorate = False # No animation.
|
||||
|
||||
sfile = context.space_data
|
||||
operator = sfile.active_operator
|
||||
|
||||
layout.prop(operator, "target")
|
||||
|
||||
|
||||
class BVH_PT_import_transform(bpy.types.Panel):
|
||||
bl_space_type = 'FILE_BROWSER'
|
||||
bl_region_type = 'TOOL_PROPS'
|
||||
bl_label = "Transform"
|
||||
bl_parent_id = "FILE_PT_operator"
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
sfile = context.space_data
|
||||
operator = sfile.active_operator
|
||||
|
||||
return operator.bl_idname == "IMPORT_ANIM_OT_bvh"
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
layout.use_property_split = True
|
||||
layout.use_property_decorate = False # No animation.
|
||||
|
||||
sfile = context.space_data
|
||||
operator = sfile.active_operator
|
||||
|
||||
layout.prop(operator, "global_scale")
|
||||
layout.prop(operator, "rotate_mode")
|
||||
layout.prop(operator, "axis_forward")
|
||||
layout.prop(operator, "axis_up")
|
||||
|
||||
|
||||
class BVH_PT_import_animation(bpy.types.Panel):
|
||||
bl_space_type = 'FILE_BROWSER'
|
||||
bl_region_type = 'TOOL_PROPS'
|
||||
bl_label = "Animation"
|
||||
bl_parent_id = "FILE_PT_operator"
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
sfile = context.space_data
|
||||
operator = sfile.active_operator
|
||||
|
||||
return operator.bl_idname == "IMPORT_ANIM_OT_bvh"
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
layout.use_property_split = True
|
||||
layout.use_property_decorate = False # No animation.
|
||||
|
||||
sfile = context.space_data
|
||||
operator = sfile.active_operator
|
||||
|
||||
layout.prop(operator, "frame_start")
|
||||
layout.prop(operator, "use_fps_scale")
|
||||
layout.prop(operator, "use_cyclic")
|
||||
|
||||
layout.prop(operator, "update_scene_fps")
|
||||
layout.prop(operator, "update_scene_duration")
|
||||
|
||||
|
||||
class ExportBVH(bpy.types.Operator, ExportHelper):
|
||||
"""Save a BVH motion capture file from an armature"""
|
||||
bl_idname = "export_anim.bvh"
|
||||
bl_label = "Export BVH"
|
||||
|
||||
filename_ext = ".bvh"
|
||||
filter_glob: StringProperty(
|
||||
default="*.bvh",
|
||||
options={'HIDDEN'},
|
||||
)
|
||||
|
||||
global_scale: FloatProperty(
|
||||
name="Scale",
|
||||
description="Scale the BVH by this value",
|
||||
min=0.0001, max=1000000.0,
|
||||
soft_min=0.001, soft_max=100.0,
|
||||
default=1.0,
|
||||
)
|
||||
frame_start: IntProperty(
|
||||
name="Start Frame",
|
||||
description="Starting frame to export",
|
||||
default=0,
|
||||
)
|
||||
frame_end: IntProperty(
|
||||
name="End Frame",
|
||||
description="End frame to export",
|
||||
default=0,
|
||||
)
|
||||
rotate_mode: EnumProperty(
|
||||
name="Rotation",
|
||||
description="Rotation conversion",
|
||||
items=(
|
||||
('NATIVE', "Euler (Native)",
|
||||
"Use the rotation order defined in the BVH file"),
|
||||
('XYZ', "Euler (XYZ)", "Convert rotations to euler XYZ"),
|
||||
('XZY', "Euler (XZY)", "Convert rotations to euler XZY"),
|
||||
('YXZ', "Euler (YXZ)", "Convert rotations to euler YXZ"),
|
||||
('YZX', "Euler (YZX)", "Convert rotations to euler YZX"),
|
||||
('ZXY', "Euler (ZXY)", "Convert rotations to euler ZXY"),
|
||||
('ZYX', "Euler (ZYX)", "Convert rotations to euler ZYX"),
|
||||
),
|
||||
default='NATIVE',
|
||||
)
|
||||
root_transform_only: BoolProperty(
|
||||
name="Root Translation Only",
|
||||
description="Only write out translation channels for the root bone",
|
||||
default=False,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
obj = context.object
|
||||
return obj and obj.type == 'ARMATURE'
|
||||
|
||||
def invoke(self, context, event):
|
||||
self.frame_start = context.scene.frame_start
|
||||
self.frame_end = context.scene.frame_end
|
||||
|
||||
return super().invoke(context, event)
|
||||
|
||||
def execute(self, context):
|
||||
if self.frame_start == 0 and self.frame_end == 0:
|
||||
self.frame_start = context.scene.frame_start
|
||||
self.frame_end = context.scene.frame_end
|
||||
|
||||
keywords = self.as_keywords(
|
||||
ignore=(
|
||||
"axis_forward",
|
||||
"axis_up",
|
||||
"check_existing",
|
||||
"filter_glob",
|
||||
)
|
||||
)
|
||||
|
||||
from . import export_bvh
|
||||
return export_bvh.save(context, **keywords)
|
||||
|
||||
def draw(self, context):
|
||||
pass
|
||||
|
||||
|
||||
class BVH_PT_export_transform(bpy.types.Panel):
|
||||
bl_space_type = 'FILE_BROWSER'
|
||||
bl_region_type = 'TOOL_PROPS'
|
||||
bl_label = "Transform"
|
||||
bl_parent_id = "FILE_PT_operator"
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
sfile = context.space_data
|
||||
operator = sfile.active_operator
|
||||
|
||||
return operator.bl_idname == "EXPORT_ANIM_OT_bvh"
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
layout.use_property_split = True
|
||||
layout.use_property_decorate = False # No animation.
|
||||
|
||||
sfile = context.space_data
|
||||
operator = sfile.active_operator
|
||||
|
||||
layout.prop(operator, "global_scale")
|
||||
layout.prop(operator, "rotate_mode")
|
||||
layout.prop(operator, "root_transform_only")
|
||||
|
||||
|
||||
class BVH_PT_export_animation(bpy.types.Panel):
|
||||
bl_space_type = 'FILE_BROWSER'
|
||||
bl_region_type = 'TOOL_PROPS'
|
||||
bl_label = "Animation"
|
||||
bl_parent_id = "FILE_PT_operator"
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
sfile = context.space_data
|
||||
operator = sfile.active_operator
|
||||
|
||||
return operator.bl_idname == "EXPORT_ANIM_OT_bvh"
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
layout.use_property_split = True
|
||||
layout.use_property_decorate = False # No animation.
|
||||
|
||||
sfile = context.space_data
|
||||
operator = sfile.active_operator
|
||||
|
||||
col = layout.column(align=True)
|
||||
col.prop(operator, "frame_start", text="Frame Start")
|
||||
col.prop(operator, "frame_end", text="End")
|
||||
|
||||
|
||||
def menu_func_import(self, context):
|
||||
self.layout.operator(ImportBVH.bl_idname, text="Motion Capture (.bvh)")
|
||||
|
||||
|
||||
def menu_func_export(self, context):
|
||||
self.layout.operator(ExportBVH.bl_idname, text="Motion Capture (.bvh)")
|
||||
|
||||
|
||||
classes = (
|
||||
ImportBVH,
|
||||
BVH_PT_import_main,
|
||||
BVH_PT_import_transform,
|
||||
BVH_PT_import_animation,
|
||||
ExportBVH,
|
||||
BVH_PT_export_transform,
|
||||
BVH_PT_export_animation,
|
||||
)
|
||||
|
||||
|
||||
def register():
|
||||
for cls in classes:
|
||||
bpy.utils.register_class(cls)
|
||||
|
||||
bpy.types.TOPBAR_MT_file_import.append(menu_func_import)
|
||||
bpy.types.TOPBAR_MT_file_export.append(menu_func_export)
|
||||
|
||||
|
||||
def unregister():
|
||||
for cls in classes:
|
||||
bpy.utils.unregister_class(cls)
|
||||
|
||||
bpy.types.TOPBAR_MT_file_import.remove(menu_func_import)
|
||||
bpy.types.TOPBAR_MT_file_export.remove(menu_func_export)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
register()
|
294
scripts/addons_core/io_anim_bvh/export_bvh.py
Normal file
294
scripts/addons_core/io_anim_bvh/export_bvh.py
Normal file
@ -0,0 +1,294 @@
|
||||
# SPDX-FileCopyrightText: 2011 Campbell Barton
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
import bpy
|
||||
|
||||
|
||||
def write_armature(
|
||||
context,
|
||||
filepath,
|
||||
frame_start,
|
||||
frame_end,
|
||||
global_scale=1.0,
|
||||
rotate_mode='NATIVE',
|
||||
root_transform_only=False,
|
||||
):
|
||||
|
||||
def ensure_rot_order(rot_order_str):
|
||||
if set(rot_order_str) != {'X', 'Y', 'Z'}:
|
||||
rot_order_str = "XYZ"
|
||||
return rot_order_str
|
||||
|
||||
from mathutils import Matrix, Euler
|
||||
from math import degrees
|
||||
|
||||
file = open(filepath, "w", encoding="utf8", newline="\n")
|
||||
|
||||
obj = context.object
|
||||
arm = obj.data
|
||||
|
||||
# Build a dictionary of children.
|
||||
# None for parentless
|
||||
children = {None: []}
|
||||
|
||||
# initialize with blank lists
|
||||
for bone in arm.bones:
|
||||
children[bone.name] = []
|
||||
|
||||
# keep bone order from armature, no sorting, not esspential but means
|
||||
# we can maintain order from import -> export which secondlife incorrectly expects.
|
||||
for bone in arm.bones:
|
||||
children[getattr(bone.parent, "name", None)].append(bone.name)
|
||||
|
||||
# bone name list in the order that the bones are written
|
||||
serialized_names = []
|
||||
|
||||
node_locations = {}
|
||||
|
||||
file.write("HIERARCHY\n")
|
||||
|
||||
def write_recursive_nodes(bone_name, indent):
|
||||
my_children = children[bone_name]
|
||||
|
||||
indent_str = "\t" * indent
|
||||
|
||||
bone = arm.bones[bone_name]
|
||||
pose_bone = obj.pose.bones[bone_name]
|
||||
loc = bone.head_local
|
||||
node_locations[bone_name] = loc
|
||||
|
||||
if rotate_mode == "NATIVE":
|
||||
rot_order_str = ensure_rot_order(pose_bone.rotation_mode)
|
||||
else:
|
||||
rot_order_str = rotate_mode
|
||||
|
||||
# make relative if we can
|
||||
if bone.parent:
|
||||
loc = loc - node_locations[bone.parent.name]
|
||||
|
||||
if indent:
|
||||
file.write("%sJOINT %s\n" % (indent_str, bone_name))
|
||||
else:
|
||||
file.write("%sROOT %s\n" % (indent_str, bone_name))
|
||||
|
||||
file.write("%s{\n" % indent_str)
|
||||
file.write("%s\tOFFSET %.6f %.6f %.6f\n" % (indent_str, *(loc * global_scale)))
|
||||
if (bone.use_connect or root_transform_only) and bone.parent:
|
||||
file.write("%s\tCHANNELS 3 %srotation %srotation %srotation\n" % (indent_str, *rot_order_str))
|
||||
else:
|
||||
file.write(
|
||||
"%s\tCHANNELS 6 Xposition Yposition Zposition %srotation %srotation %srotation\n" % (
|
||||
indent_str,
|
||||
*rot_order_str,
|
||||
)
|
||||
)
|
||||
|
||||
if my_children:
|
||||
# store the location for the children
|
||||
# to get their relative offset
|
||||
|
||||
# Write children
|
||||
for child_bone in my_children:
|
||||
serialized_names.append(child_bone)
|
||||
write_recursive_nodes(child_bone, indent + 1)
|
||||
|
||||
else:
|
||||
# Write the bone end.
|
||||
file.write("%s\tEnd Site\n" % indent_str)
|
||||
file.write("%s\t{\n" % indent_str)
|
||||
loc = bone.tail_local - node_locations[bone_name]
|
||||
file.write("%s\t\tOFFSET %.6f %.6f %.6f\n" % (indent_str, *(loc * global_scale)))
|
||||
file.write("%s\t}\n" % indent_str)
|
||||
|
||||
file.write("%s}\n" % indent_str)
|
||||
|
||||
if len(children[None]) == 1:
|
||||
key = children[None][0]
|
||||
serialized_names.append(key)
|
||||
indent = 0
|
||||
|
||||
write_recursive_nodes(key, indent)
|
||||
|
||||
else:
|
||||
# Write a dummy parent node, with a dummy key name
|
||||
# Just be sure it's not used by another bone!
|
||||
i = 0
|
||||
key = "__%d" % i
|
||||
while key in children:
|
||||
i += 1
|
||||
key = "__%d" % i
|
||||
file.write("ROOT %s\n" % key)
|
||||
file.write("{\n")
|
||||
file.write("\tOFFSET 0.0 0.0 0.0\n")
|
||||
file.write("\tCHANNELS 0\n") # Xposition Yposition Zposition Xrotation Yrotation Zrotation
|
||||
indent = 1
|
||||
|
||||
# Write children
|
||||
for child_bone in children[None]:
|
||||
serialized_names.append(child_bone)
|
||||
write_recursive_nodes(child_bone, indent)
|
||||
|
||||
file.write("}\n")
|
||||
|
||||
# redefine bones as sorted by serialized_names
|
||||
# so we can write motion
|
||||
|
||||
class DecoratedBone:
|
||||
__slots__ = (
|
||||
# Bone name, used as key in many places.
|
||||
"name",
|
||||
"parent", # decorated bone parent, set in a later loop
|
||||
# Blender armature bone.
|
||||
"rest_bone",
|
||||
# Blender pose bone.
|
||||
"pose_bone",
|
||||
# Blender pose matrix.
|
||||
"pose_mat",
|
||||
# Blender rest matrix (armature space).
|
||||
"rest_arm_mat",
|
||||
# Blender rest matrix (local space).
|
||||
"rest_local_mat",
|
||||
# Pose_mat inverted.
|
||||
"pose_imat",
|
||||
# Rest_arm_mat inverted.
|
||||
"rest_arm_imat",
|
||||
# Rest_local_mat inverted.
|
||||
"rest_local_imat",
|
||||
# Last used euler to preserve euler compatibility in between keyframes.
|
||||
"prev_euler",
|
||||
# Is the bone disconnected to the parent bone?
|
||||
"skip_position",
|
||||
"rot_order",
|
||||
"rot_order_str",
|
||||
# Needed for the euler order when converting from a matrix.
|
||||
"rot_order_str_reverse",
|
||||
)
|
||||
|
||||
_eul_order_lookup = {
|
||||
'XYZ': (0, 1, 2),
|
||||
'XZY': (0, 2, 1),
|
||||
'YXZ': (1, 0, 2),
|
||||
'YZX': (1, 2, 0),
|
||||
'ZXY': (2, 0, 1),
|
||||
'ZYX': (2, 1, 0),
|
||||
}
|
||||
|
||||
def __init__(self, bone_name):
|
||||
self.name = bone_name
|
||||
self.rest_bone = arm.bones[bone_name]
|
||||
self.pose_bone = obj.pose.bones[bone_name]
|
||||
|
||||
if rotate_mode == "NATIVE":
|
||||
self.rot_order_str = ensure_rot_order(self.pose_bone.rotation_mode)
|
||||
else:
|
||||
self.rot_order_str = rotate_mode
|
||||
self.rot_order_str_reverse = self.rot_order_str[::-1]
|
||||
|
||||
self.rot_order = DecoratedBone._eul_order_lookup[self.rot_order_str]
|
||||
|
||||
self.pose_mat = self.pose_bone.matrix
|
||||
|
||||
# mat = self.rest_bone.matrix # UNUSED
|
||||
self.rest_arm_mat = self.rest_bone.matrix_local
|
||||
self.rest_local_mat = self.rest_bone.matrix
|
||||
|
||||
# inverted mats
|
||||
self.pose_imat = self.pose_mat.inverted()
|
||||
self.rest_arm_imat = self.rest_arm_mat.inverted()
|
||||
self.rest_local_imat = self.rest_local_mat.inverted()
|
||||
|
||||
self.parent = None
|
||||
self.prev_euler = Euler((0.0, 0.0, 0.0), self.rot_order_str_reverse)
|
||||
self.skip_position = ((self.rest_bone.use_connect or root_transform_only) and self.rest_bone.parent)
|
||||
|
||||
def update_posedata(self):
|
||||
self.pose_mat = self.pose_bone.matrix
|
||||
self.pose_imat = self.pose_mat.inverted()
|
||||
|
||||
def __repr__(self):
|
||||
if self.parent:
|
||||
return "[\"%s\" child on \"%s\"]\n" % (self.name, self.parent.name)
|
||||
else:
|
||||
return "[\"%s\" root bone]\n" % (self.name)
|
||||
|
||||
bones_decorated = [DecoratedBone(bone_name) for bone_name in serialized_names]
|
||||
|
||||
# Assign parents
|
||||
bones_decorated_dict = {dbone.name: dbone for dbone in bones_decorated}
|
||||
for dbone in bones_decorated:
|
||||
parent = dbone.rest_bone.parent
|
||||
if parent:
|
||||
dbone.parent = bones_decorated_dict[parent.name]
|
||||
del bones_decorated_dict
|
||||
# finish assigning parents
|
||||
|
||||
scene = context.scene
|
||||
frame_current = scene.frame_current
|
||||
|
||||
file.write("MOTION\n")
|
||||
file.write("Frames: %d\n" % (frame_end - frame_start + 1))
|
||||
file.write("Frame Time: %.6f\n" % (1.0 / (scene.render.fps / scene.render.fps_base)))
|
||||
|
||||
for frame in range(frame_start, frame_end + 1):
|
||||
scene.frame_set(frame)
|
||||
|
||||
for dbone in bones_decorated:
|
||||
dbone.update_posedata()
|
||||
|
||||
for dbone in bones_decorated:
|
||||
trans = Matrix.Translation(dbone.rest_bone.head_local)
|
||||
itrans = Matrix.Translation(-dbone.rest_bone.head_local)
|
||||
|
||||
if dbone.parent:
|
||||
mat_final = dbone.parent.rest_arm_mat @ dbone.parent.pose_imat @ dbone.pose_mat @ dbone.rest_arm_imat
|
||||
mat_final = itrans @ mat_final @ trans
|
||||
loc = mat_final.to_translation() + (dbone.rest_bone.head_local - dbone.parent.rest_bone.head_local)
|
||||
else:
|
||||
mat_final = dbone.pose_mat @ dbone.rest_arm_imat
|
||||
mat_final = itrans @ mat_final @ trans
|
||||
loc = mat_final.to_translation() + dbone.rest_bone.head
|
||||
|
||||
# keep eulers compatible, no jumping on interpolation.
|
||||
rot = mat_final.to_euler(dbone.rot_order_str_reverse, dbone.prev_euler)
|
||||
|
||||
if not dbone.skip_position:
|
||||
file.write("%.6f %.6f %.6f " % (loc * global_scale)[:])
|
||||
|
||||
file.write(
|
||||
"%.6f %.6f %.6f " % (
|
||||
degrees(rot[dbone.rot_order[0]]),
|
||||
degrees(rot[dbone.rot_order[1]]),
|
||||
degrees(rot[dbone.rot_order[2]]),
|
||||
)
|
||||
)
|
||||
|
||||
dbone.prev_euler = rot
|
||||
|
||||
file.write("\n")
|
||||
|
||||
file.close()
|
||||
|
||||
scene.frame_set(frame_current)
|
||||
|
||||
print("BVH Exported: %s frames:%d\n" % (filepath, frame_end - frame_start + 1))
|
||||
|
||||
|
||||
def save(
|
||||
context, filepath="",
|
||||
frame_start=-1,
|
||||
frame_end=-1,
|
||||
global_scale=1.0,
|
||||
rotate_mode="NATIVE",
|
||||
root_transform_only=False,
|
||||
):
|
||||
write_armature(
|
||||
context, filepath,
|
||||
frame_start=frame_start,
|
||||
frame_end=frame_end,
|
||||
global_scale=global_scale,
|
||||
rotate_mode=rotate_mode,
|
||||
root_transform_only=root_transform_only,
|
||||
)
|
||||
|
||||
return {'FINISHED'}
|
783
scripts/addons_core/io_anim_bvh/import_bvh.py
Normal file
783
scripts/addons_core/io_anim_bvh/import_bvh.py
Normal file
@ -0,0 +1,783 @@
|
||||
# SPDX-FileCopyrightText: 2011 Campbell Barton
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
from math import radians, ceil
|
||||
|
||||
import bpy
|
||||
from bpy.app.translations import pgettext_tip as tip_
|
||||
from mathutils import Vector, Euler, Matrix
|
||||
|
||||
|
||||
class BVH_Node:
|
||||
__slots__ = (
|
||||
# Bvh joint name.
|
||||
'name',
|
||||
# BVH_Node type or None for no parent.
|
||||
'parent',
|
||||
# A list of children of this type..
|
||||
'children',
|
||||
# Worldspace rest location for the head of this node.
|
||||
'rest_head_world',
|
||||
# Localspace rest location for the head of this node.
|
||||
'rest_head_local',
|
||||
# Worldspace rest location for the tail of this node.
|
||||
'rest_tail_world',
|
||||
# Worldspace rest location for the tail of this node.
|
||||
'rest_tail_local',
|
||||
# List of 6 ints, -1 for an unused channel,
|
||||
# otherwise an index for the BVH motion data lines,
|
||||
# loc triple then rot triple.
|
||||
'channels',
|
||||
# A triple of indices as to the order rotation is applied.
|
||||
# [0,1,2] is x/y/z - [None, None, None] if no rotation..
|
||||
'rot_order',
|
||||
# Same as above but a string 'XYZ' format..
|
||||
'rot_order_str',
|
||||
# A list one tuple's one for each frame: (locx, locy, locz, rotx, roty, rotz),
|
||||
# euler rotation ALWAYS stored xyz order, even when native used.
|
||||
'anim_data',
|
||||
# Convenience function, bool, same as: (channels[0] != -1 or channels[1] != -1 or channels[2] != -1).
|
||||
'has_loc',
|
||||
# Convenience function, bool, same as: (channels[3] != -1 or channels[4] != -1 or channels[5] != -1).
|
||||
'has_rot',
|
||||
# Index from the file, not strictly needed but nice to maintain order.
|
||||
'index',
|
||||
# Use this for whatever you want.
|
||||
'temp',
|
||||
)
|
||||
|
||||
_eul_order_lookup = {
|
||||
(None, None, None): 'XYZ', # XXX Dummy one, no rotation anyway!
|
||||
(0, 1, 2): 'XYZ',
|
||||
(0, 2, 1): 'XZY',
|
||||
(1, 0, 2): 'YXZ',
|
||||
(1, 2, 0): 'YZX',
|
||||
(2, 0, 1): 'ZXY',
|
||||
(2, 1, 0): 'ZYX',
|
||||
}
|
||||
|
||||
def __init__(self, name, rest_head_world, rest_head_local, parent, channels, rot_order, index):
|
||||
self.name = name
|
||||
self.rest_head_world = rest_head_world
|
||||
self.rest_head_local = rest_head_local
|
||||
self.rest_tail_world = None
|
||||
self.rest_tail_local = None
|
||||
self.parent = parent
|
||||
self.channels = channels
|
||||
self.rot_order = tuple(rot_order)
|
||||
self.rot_order_str = BVH_Node._eul_order_lookup[self.rot_order]
|
||||
self.index = index
|
||||
|
||||
# convenience functions
|
||||
self.has_loc = channels[0] != -1 or channels[1] != -1 or channels[2] != -1
|
||||
self.has_rot = channels[3] != -1 or channels[4] != -1 or channels[5] != -1
|
||||
|
||||
self.children = []
|
||||
|
||||
# List of 6 length tuples: (lx, ly, lz, rx, ry, rz)
|
||||
# even if the channels aren't used they will just be zero.
|
||||
self.anim_data = [(0, 0, 0, 0, 0, 0)]
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
"BVH name: '%s', rest_loc:(%.3f,%.3f,%.3f), rest_tail:(%.3f,%.3f,%.3f)" % (
|
||||
self.name,
|
||||
*self.rest_head_world,
|
||||
*self.rest_head_world,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def sorted_nodes(bvh_nodes):
|
||||
bvh_nodes_list = list(bvh_nodes.values())
|
||||
bvh_nodes_list.sort(key=lambda bvh_node: bvh_node.index)
|
||||
return bvh_nodes_list
|
||||
|
||||
|
||||
def read_bvh(context, file_path, rotate_mode='XYZ', global_scale=1.0):
|
||||
# File loading stuff
|
||||
# Open the file for importing
|
||||
file = open(file_path, 'r')
|
||||
|
||||
# Separate into a list of lists, each line a list of words.
|
||||
file_lines = file.readlines()
|
||||
# Non standard carriage returns?
|
||||
if len(file_lines) == 1:
|
||||
file_lines = file_lines[0].split('\r')
|
||||
|
||||
# Split by whitespace.
|
||||
file_lines = [ll for ll in [l.split() for l in file_lines] if ll]
|
||||
|
||||
# Create hierarchy as empties
|
||||
if file_lines[0][0].lower() == 'hierarchy':
|
||||
# print 'Importing the BVH Hierarchy for:', file_path
|
||||
pass
|
||||
else:
|
||||
raise Exception("This is not a BVH file")
|
||||
|
||||
bvh_nodes = {None: None}
|
||||
bvh_nodes_serial = [None]
|
||||
bvh_frame_count = None
|
||||
bvh_frame_time = None
|
||||
|
||||
channelIndex = -1
|
||||
|
||||
lineIdx = 0 # An index for the file.
|
||||
while lineIdx < len(file_lines) - 1:
|
||||
if file_lines[lineIdx][0].lower() in {'root', 'joint'}:
|
||||
|
||||
# Join spaces into 1 word with underscores joining it.
|
||||
if len(file_lines[lineIdx]) > 2:
|
||||
file_lines[lineIdx][1] = '_'.join(file_lines[lineIdx][1:])
|
||||
file_lines[lineIdx] = file_lines[lineIdx][:2]
|
||||
|
||||
# MAY NEED TO SUPPORT MULTIPLE ROOTS HERE! Still unsure weather multiple roots are possible?
|
||||
|
||||
# Make sure the names are unique - Object names will match joint names exactly and both will be unique.
|
||||
name = file_lines[lineIdx][1]
|
||||
|
||||
# While unlikely, there exists a user report of duplicate joint names, see: #109399.
|
||||
if name in bvh_nodes:
|
||||
name_orig = name
|
||||
name_index = 1
|
||||
while (name := "%s.%03d" % (name_orig, name_index)) in bvh_nodes:
|
||||
name_index += 1
|
||||
del name_orig, name_index
|
||||
|
||||
# print '%snode: %s, parent: %s' % (len(bvh_nodes_serial) * ' ', name, bvh_nodes_serial[-1])
|
||||
|
||||
lineIdx += 2 # Increment to the next line (Offset)
|
||||
rest_head_local = global_scale * Vector((
|
||||
float(file_lines[lineIdx][1]),
|
||||
float(file_lines[lineIdx][2]),
|
||||
float(file_lines[lineIdx][3]),
|
||||
))
|
||||
lineIdx += 1 # Increment to the next line (Channels)
|
||||
|
||||
# newChannel[Xposition, Yposition, Zposition, Xrotation, Yrotation, Zrotation]
|
||||
# newChannel references indices to the motiondata,
|
||||
# if not assigned then -1 refers to the last value that will be added on loading at a value of zero, this is appended
|
||||
# We'll add a zero value onto the end of the MotionDATA so this always refers to a value.
|
||||
my_channel = [-1, -1, -1, -1, -1, -1]
|
||||
my_rot_order = [None, None, None]
|
||||
rot_count = 0
|
||||
for channel in file_lines[lineIdx][2:]:
|
||||
channel = channel.lower()
|
||||
channelIndex += 1 # So the index points to the right channel
|
||||
if channel == 'xposition':
|
||||
my_channel[0] = channelIndex
|
||||
elif channel == 'yposition':
|
||||
my_channel[1] = channelIndex
|
||||
elif channel == 'zposition':
|
||||
my_channel[2] = channelIndex
|
||||
|
||||
elif channel == 'xrotation':
|
||||
my_channel[3] = channelIndex
|
||||
my_rot_order[rot_count] = 0
|
||||
rot_count += 1
|
||||
elif channel == 'yrotation':
|
||||
my_channel[4] = channelIndex
|
||||
my_rot_order[rot_count] = 1
|
||||
rot_count += 1
|
||||
elif channel == 'zrotation':
|
||||
my_channel[5] = channelIndex
|
||||
my_rot_order[rot_count] = 2
|
||||
rot_count += 1
|
||||
|
||||
channels = file_lines[lineIdx][2:]
|
||||
|
||||
my_parent = bvh_nodes_serial[-1] # account for none
|
||||
|
||||
# Apply the parents offset accumulatively
|
||||
if my_parent is None:
|
||||
rest_head_world = Vector(rest_head_local)
|
||||
else:
|
||||
rest_head_world = my_parent.rest_head_world + rest_head_local
|
||||
|
||||
bvh_node = bvh_nodes[name] = BVH_Node(
|
||||
name,
|
||||
rest_head_world,
|
||||
rest_head_local,
|
||||
my_parent,
|
||||
my_channel,
|
||||
my_rot_order,
|
||||
len(bvh_nodes) - 1,
|
||||
)
|
||||
|
||||
# If we have another child then we can call ourselves a parent, else
|
||||
bvh_nodes_serial.append(bvh_node)
|
||||
|
||||
# Account for an end node.
|
||||
# There is sometimes a name after 'End Site' but we will ignore it.
|
||||
if file_lines[lineIdx][0].lower() == 'end' and file_lines[lineIdx][1].lower() == 'site':
|
||||
# Increment to the next line (Offset)
|
||||
lineIdx += 2
|
||||
rest_tail = global_scale * Vector((
|
||||
float(file_lines[lineIdx][1]),
|
||||
float(file_lines[lineIdx][2]),
|
||||
float(file_lines[lineIdx][3]),
|
||||
))
|
||||
|
||||
bvh_nodes_serial[-1].rest_tail_world = bvh_nodes_serial[-1].rest_head_world + rest_tail
|
||||
bvh_nodes_serial[-1].rest_tail_local = bvh_nodes_serial[-1].rest_head_local + rest_tail
|
||||
|
||||
# Just so we can remove the parents in a uniform way,
|
||||
# the end has kids so this is a placeholder.
|
||||
bvh_nodes_serial.append(None)
|
||||
|
||||
if len(file_lines[lineIdx]) == 1 and file_lines[lineIdx][0] == '}': # == ['}']
|
||||
bvh_nodes_serial.pop() # Remove the last item
|
||||
|
||||
# End of the hierarchy. Begin the animation section of the file with
|
||||
# the following header.
|
||||
# MOTION
|
||||
# Frames: n
|
||||
# Frame Time: dt
|
||||
if len(file_lines[lineIdx]) == 1 and file_lines[lineIdx][0].lower() == 'motion':
|
||||
lineIdx += 1 # Read frame count.
|
||||
if (
|
||||
len(file_lines[lineIdx]) == 2 and
|
||||
file_lines[lineIdx][0].lower() == 'frames:'
|
||||
):
|
||||
bvh_frame_count = int(file_lines[lineIdx][1])
|
||||
|
||||
lineIdx += 1 # Read frame rate.
|
||||
if (
|
||||
len(file_lines[lineIdx]) == 3 and
|
||||
file_lines[lineIdx][0].lower() == 'frame' and
|
||||
file_lines[lineIdx][1].lower() == 'time:'
|
||||
):
|
||||
bvh_frame_time = float(file_lines[lineIdx][2])
|
||||
|
||||
lineIdx += 1 # Set the cursor to the first frame
|
||||
|
||||
break
|
||||
|
||||
lineIdx += 1
|
||||
|
||||
# Remove the None value used for easy parent reference
|
||||
del bvh_nodes[None]
|
||||
# Don't use anymore
|
||||
del bvh_nodes_serial
|
||||
|
||||
# importing world with any order but nicer to maintain order
|
||||
# second life expects it, which isn't to spec.
|
||||
bvh_nodes_list = sorted_nodes(bvh_nodes)
|
||||
|
||||
while lineIdx < len(file_lines):
|
||||
line = file_lines[lineIdx]
|
||||
for bvh_node in bvh_nodes_list:
|
||||
# for bvh_node in bvh_nodes_serial:
|
||||
lx = ly = lz = rx = ry = rz = 0.0
|
||||
channels = bvh_node.channels
|
||||
anim_data = bvh_node.anim_data
|
||||
if channels[0] != -1:
|
||||
lx = global_scale * float(line[channels[0]])
|
||||
|
||||
if channels[1] != -1:
|
||||
ly = global_scale * float(line[channels[1]])
|
||||
|
||||
if channels[2] != -1:
|
||||
lz = global_scale * float(line[channels[2]])
|
||||
|
||||
if channels[3] != -1 or channels[4] != -1 or channels[5] != -1:
|
||||
|
||||
rx = radians(float(line[channels[3]]))
|
||||
ry = radians(float(line[channels[4]]))
|
||||
rz = radians(float(line[channels[5]]))
|
||||
|
||||
# Done importing motion data #
|
||||
anim_data.append((lx, ly, lz, rx, ry, rz))
|
||||
lineIdx += 1
|
||||
|
||||
# Assign children
|
||||
for bvh_node in bvh_nodes_list:
|
||||
bvh_node_parent = bvh_node.parent
|
||||
if bvh_node_parent:
|
||||
bvh_node_parent.children.append(bvh_node)
|
||||
|
||||
# Now set the tip of each bvh_node
|
||||
for bvh_node in bvh_nodes_list:
|
||||
|
||||
if not bvh_node.rest_tail_world:
|
||||
if len(bvh_node.children) == 0:
|
||||
# could just fail here, but rare BVH files have childless nodes
|
||||
bvh_node.rest_tail_world = Vector(bvh_node.rest_head_world)
|
||||
bvh_node.rest_tail_local = Vector(bvh_node.rest_head_local)
|
||||
elif len(bvh_node.children) == 1:
|
||||
bvh_node.rest_tail_world = Vector(bvh_node.children[0].rest_head_world)
|
||||
bvh_node.rest_tail_local = bvh_node.rest_head_local + bvh_node.children[0].rest_head_local
|
||||
else:
|
||||
# allow this, see above
|
||||
# if not bvh_node.children:
|
||||
# raise Exception("bvh node has no end and no children. bad file")
|
||||
|
||||
# Removed temp for now
|
||||
rest_tail_world = Vector((0.0, 0.0, 0.0))
|
||||
rest_tail_local = Vector((0.0, 0.0, 0.0))
|
||||
for bvh_node_child in bvh_node.children:
|
||||
rest_tail_world += bvh_node_child.rest_head_world
|
||||
rest_tail_local += bvh_node_child.rest_head_local
|
||||
|
||||
bvh_node.rest_tail_world = rest_tail_world * (1.0 / len(bvh_node.children))
|
||||
bvh_node.rest_tail_local = rest_tail_local * (1.0 / len(bvh_node.children))
|
||||
|
||||
# Make sure tail isn't the same location as the head.
|
||||
if (bvh_node.rest_tail_local - bvh_node.rest_head_local).length <= 0.001 * global_scale:
|
||||
print("\tzero length node found:", bvh_node.name)
|
||||
bvh_node.rest_tail_local.y = bvh_node.rest_tail_local.y + global_scale / 10
|
||||
bvh_node.rest_tail_world.y = bvh_node.rest_tail_world.y + global_scale / 10
|
||||
|
||||
return bvh_nodes, bvh_frame_time, bvh_frame_count
|
||||
|
||||
|
||||
def bvh_node_dict2objects(context, bvh_name, bvh_nodes, rotate_mode='NATIVE', frame_start=1, IMPORT_LOOP=False):
|
||||
|
||||
if frame_start < 1:
|
||||
frame_start = 1
|
||||
|
||||
scene = context.scene
|
||||
for obj in scene.objects:
|
||||
obj.select_set(False)
|
||||
|
||||
objects = []
|
||||
|
||||
def add_ob(name):
|
||||
obj = bpy.data.objects.new(name, None)
|
||||
context.collection.objects.link(obj)
|
||||
objects.append(obj)
|
||||
obj.select_set(True)
|
||||
|
||||
# nicer drawing.
|
||||
obj.empty_display_type = 'CUBE'
|
||||
obj.empty_display_size = 0.1
|
||||
|
||||
return obj
|
||||
|
||||
# Add objects
|
||||
for name, bvh_node in bvh_nodes.items():
|
||||
bvh_node.temp = add_ob(name)
|
||||
bvh_node.temp.rotation_mode = bvh_node.rot_order_str[::-1]
|
||||
|
||||
# Parent the objects
|
||||
for bvh_node in bvh_nodes.values():
|
||||
for bvh_node_child in bvh_node.children:
|
||||
bvh_node_child.temp.parent = bvh_node.temp
|
||||
|
||||
# Offset
|
||||
for bvh_node in bvh_nodes.values():
|
||||
# Make relative to parents offset
|
||||
bvh_node.temp.location = bvh_node.rest_head_local
|
||||
|
||||
# Add tail objects
|
||||
for name, bvh_node in bvh_nodes.items():
|
||||
if not bvh_node.children:
|
||||
ob_end = add_ob(name + '_end')
|
||||
ob_end.parent = bvh_node.temp
|
||||
ob_end.location = bvh_node.rest_tail_world - bvh_node.rest_head_world
|
||||
|
||||
for name, bvh_node in bvh_nodes.items():
|
||||
obj = bvh_node.temp
|
||||
|
||||
for frame_current in range(len(bvh_node.anim_data)):
|
||||
|
||||
lx, ly, lz, rx, ry, rz = bvh_node.anim_data[frame_current]
|
||||
|
||||
if bvh_node.has_loc:
|
||||
obj.delta_location = Vector((lx, ly, lz)) - bvh_node.rest_head_world
|
||||
obj.keyframe_insert("delta_location", index=-1, frame=frame_start + frame_current)
|
||||
|
||||
if bvh_node.has_rot:
|
||||
obj.delta_rotation_euler = rx, ry, rz
|
||||
obj.keyframe_insert("delta_rotation_euler", index=-1, frame=frame_start + frame_current)
|
||||
|
||||
return objects
|
||||
|
||||
|
||||
def bvh_node_dict2armature(
|
||||
context,
|
||||
bvh_name,
|
||||
bvh_nodes,
|
||||
bvh_frame_time,
|
||||
rotate_mode='XYZ',
|
||||
frame_start=1,
|
||||
IMPORT_LOOP=False,
|
||||
global_matrix=None,
|
||||
use_fps_scale=False,
|
||||
):
|
||||
from bpy.utils import escape_identifier
|
||||
|
||||
if frame_start < 1:
|
||||
frame_start = 1
|
||||
|
||||
# Add the new armature,
|
||||
scene = context.scene
|
||||
for obj in scene.objects:
|
||||
obj.select_set(False)
|
||||
|
||||
arm_data = bpy.data.armatures.new(bvh_name)
|
||||
arm_ob = bpy.data.objects.new(bvh_name, arm_data)
|
||||
|
||||
context.collection.objects.link(arm_ob)
|
||||
|
||||
arm_ob.select_set(True)
|
||||
context.view_layer.objects.active = arm_ob
|
||||
|
||||
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
|
||||
bpy.ops.object.mode_set(mode='EDIT', toggle=False)
|
||||
|
||||
bvh_nodes_list = sorted_nodes(bvh_nodes)
|
||||
|
||||
# Get the average bone length for zero length bones, we may not use this.
|
||||
average_bone_length = 0.0
|
||||
nonzero_count = 0
|
||||
for bvh_node in bvh_nodes_list:
|
||||
l = (bvh_node.rest_head_local - bvh_node.rest_tail_local).length
|
||||
if l:
|
||||
average_bone_length += l
|
||||
nonzero_count += 1
|
||||
|
||||
# Very rare cases all bones could be zero length???
|
||||
if not average_bone_length:
|
||||
average_bone_length = 0.1
|
||||
else:
|
||||
# Normal operation
|
||||
average_bone_length = average_bone_length / nonzero_count
|
||||
|
||||
# XXX, annoying, remove bone.
|
||||
while arm_data.edit_bones:
|
||||
arm_ob.edit_bones.remove(arm_data.edit_bones[-1])
|
||||
|
||||
ZERO_AREA_BONES = []
|
||||
for bvh_node in bvh_nodes_list:
|
||||
|
||||
# New editbone
|
||||
bone = bvh_node.temp = arm_data.edit_bones.new(bvh_node.name)
|
||||
|
||||
bone.head = bvh_node.rest_head_world
|
||||
bone.tail = bvh_node.rest_tail_world
|
||||
|
||||
# Zero Length Bones! (an exceptional case)
|
||||
if (bone.head - bone.tail).length < 0.001:
|
||||
print("\tzero length bone found:", bone.name)
|
||||
if bvh_node.parent:
|
||||
ofs = bvh_node.parent.rest_head_local - bvh_node.parent.rest_tail_local
|
||||
if ofs.length: # is our parent zero length also?? unlikely
|
||||
bone.tail = bone.tail - ofs
|
||||
else:
|
||||
bone.tail.y = bone.tail.y + average_bone_length
|
||||
else:
|
||||
bone.tail.y = bone.tail.y + average_bone_length
|
||||
|
||||
ZERO_AREA_BONES.append(bone.name)
|
||||
|
||||
for bvh_node in bvh_nodes_list:
|
||||
if bvh_node.parent:
|
||||
# bvh_node.temp is the Editbone
|
||||
|
||||
# Set the bone parent
|
||||
bvh_node.temp.parent = bvh_node.parent.temp
|
||||
|
||||
# Set the connection state
|
||||
if (
|
||||
(not bvh_node.has_loc) and
|
||||
(bvh_node.parent.temp.name not in ZERO_AREA_BONES) and
|
||||
(bvh_node.parent.rest_tail_local == bvh_node.rest_head_local)
|
||||
):
|
||||
bvh_node.temp.use_connect = True
|
||||
|
||||
# Replace the editbone with the editbone name,
|
||||
# to avoid memory errors accessing the editbone outside editmode
|
||||
for bvh_node in bvh_nodes_list:
|
||||
bvh_node.temp = bvh_node.temp.name
|
||||
|
||||
# Now Apply the animation to the armature
|
||||
|
||||
# Get armature animation data
|
||||
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
|
||||
|
||||
pose = arm_ob.pose
|
||||
pose_bones = pose.bones
|
||||
|
||||
if rotate_mode == 'NATIVE':
|
||||
for bvh_node in bvh_nodes_list:
|
||||
bone_name = bvh_node.temp # may not be the same name as the bvh_node, could have been shortened.
|
||||
pose_bone = pose_bones[bone_name]
|
||||
pose_bone.rotation_mode = bvh_node.rot_order_str
|
||||
|
||||
elif rotate_mode != 'QUATERNION':
|
||||
for pose_bone in pose_bones:
|
||||
pose_bone.rotation_mode = rotate_mode
|
||||
else:
|
||||
# Quats default
|
||||
pass
|
||||
|
||||
context.view_layer.update()
|
||||
|
||||
arm_ob.animation_data_create()
|
||||
action = bpy.data.actions.new(name=bvh_name)
|
||||
arm_ob.animation_data.action = action
|
||||
|
||||
# Replace the bvh_node.temp (currently an editbone)
|
||||
# With a tuple (pose_bone, armature_bone, bone_rest_matrix, bone_rest_matrix_inv)
|
||||
num_frame = 0
|
||||
for bvh_node in bvh_nodes_list:
|
||||
bone_name = bvh_node.temp # may not be the same name as the bvh_node, could have been shortened.
|
||||
pose_bone = pose_bones[bone_name]
|
||||
rest_bone = arm_data.bones[bone_name]
|
||||
bone_rest_matrix = rest_bone.matrix_local.to_3x3()
|
||||
|
||||
bone_rest_matrix_inv = Matrix(bone_rest_matrix)
|
||||
bone_rest_matrix_inv.invert()
|
||||
|
||||
bone_rest_matrix_inv.resize_4x4()
|
||||
bone_rest_matrix.resize_4x4()
|
||||
bvh_node.temp = (pose_bone, bone, bone_rest_matrix, bone_rest_matrix_inv)
|
||||
|
||||
if 0 == num_frame:
|
||||
num_frame = len(bvh_node.anim_data)
|
||||
|
||||
# Choose to skip some frames at the beginning. Frame 0 is the rest pose
|
||||
# used internally by this importer. Frame 1, by convention, is also often
|
||||
# the rest pose of the skeleton exported by the motion capture system.
|
||||
skip_frame = 1
|
||||
if num_frame > skip_frame:
|
||||
num_frame = num_frame - skip_frame
|
||||
|
||||
# Create a shared time axis for all animation curves.
|
||||
time = [float(frame_start)] * num_frame
|
||||
if use_fps_scale:
|
||||
dt = scene.render.fps * bvh_frame_time
|
||||
for frame_i in range(1, num_frame):
|
||||
time[frame_i] += float(frame_i) * dt
|
||||
else:
|
||||
for frame_i in range(1, num_frame):
|
||||
time[frame_i] += float(frame_i)
|
||||
|
||||
# print("bvh_frame_time = %f, dt = %f, num_frame = %d"
|
||||
# % (bvh_frame_time, dt, num_frame]))
|
||||
|
||||
for i, bvh_node in enumerate(bvh_nodes_list):
|
||||
pose_bone, bone, bone_rest_matrix, bone_rest_matrix_inv = bvh_node.temp
|
||||
|
||||
if bvh_node.has_loc:
|
||||
# Not sure if there is a way to query this or access it in the
|
||||
# PoseBone structure.
|
||||
data_path = 'pose.bones["%s"].location' % escape_identifier(pose_bone.name)
|
||||
|
||||
location = [(0.0, 0.0, 0.0)] * num_frame
|
||||
for frame_i in range(num_frame):
|
||||
bvh_loc = bvh_node.anim_data[frame_i + skip_frame][:3]
|
||||
|
||||
bone_translate_matrix = Matrix.Translation(
|
||||
Vector(bvh_loc) - bvh_node.rest_head_local)
|
||||
location[frame_i] = (bone_rest_matrix_inv @
|
||||
bone_translate_matrix).to_translation()
|
||||
|
||||
# For each location x, y, z.
|
||||
for axis_i in range(3):
|
||||
curve = action.fcurves.new(data_path=data_path, index=axis_i, action_group=bvh_node.name)
|
||||
keyframe_points = curve.keyframe_points
|
||||
keyframe_points.add(num_frame)
|
||||
|
||||
for frame_i in range(num_frame):
|
||||
keyframe_points[frame_i].co = (
|
||||
time[frame_i],
|
||||
location[frame_i][axis_i],
|
||||
)
|
||||
|
||||
if bvh_node.has_rot:
|
||||
data_path = None
|
||||
rotate = None
|
||||
|
||||
if 'QUATERNION' == rotate_mode:
|
||||
rotate = [(1.0, 0.0, 0.0, 0.0)] * num_frame
|
||||
data_path = ('pose.bones["%s"].rotation_quaternion' % escape_identifier(pose_bone.name))
|
||||
else:
|
||||
rotate = [(0.0, 0.0, 0.0)] * num_frame
|
||||
data_path = ('pose.bones["%s"].rotation_euler' % escape_identifier(pose_bone.name))
|
||||
|
||||
prev_euler = Euler((0.0, 0.0, 0.0))
|
||||
for frame_i in range(num_frame):
|
||||
bvh_rot = bvh_node.anim_data[frame_i + skip_frame][3:]
|
||||
|
||||
# apply rotation order and convert to XYZ
|
||||
# note that the rot_order_str is reversed.
|
||||
euler = Euler(bvh_rot, bvh_node.rot_order_str[::-1])
|
||||
bone_rotation_matrix = euler.to_matrix().to_4x4()
|
||||
bone_rotation_matrix = (
|
||||
bone_rest_matrix_inv @
|
||||
bone_rotation_matrix @
|
||||
bone_rest_matrix
|
||||
)
|
||||
|
||||
if len(rotate[frame_i]) == 4:
|
||||
rotate[frame_i] = bone_rotation_matrix.to_quaternion()
|
||||
else:
|
||||
rotate[frame_i] = bone_rotation_matrix.to_euler(
|
||||
pose_bone.rotation_mode, prev_euler)
|
||||
prev_euler = rotate[frame_i]
|
||||
|
||||
# For each euler angle x, y, z (or quaternion w, x, y, z).
|
||||
for axis_i in range(len(rotate[0])):
|
||||
curve = action.fcurves.new(data_path=data_path, index=axis_i, action_group=bvh_node.name)
|
||||
keyframe_points = curve.keyframe_points
|
||||
keyframe_points.add(num_frame)
|
||||
|
||||
for frame_i in range(num_frame):
|
||||
keyframe_points[frame_i].co = (
|
||||
time[frame_i],
|
||||
rotate[frame_i][axis_i],
|
||||
)
|
||||
|
||||
for cu in action.fcurves:
|
||||
if IMPORT_LOOP:
|
||||
pass # 2.5 doenst have cyclic now?
|
||||
|
||||
for bez in cu.keyframe_points:
|
||||
bez.interpolation = 'LINEAR'
|
||||
|
||||
# finally apply matrix
|
||||
arm_ob.matrix_world = global_matrix
|
||||
bpy.ops.object.transform_apply(location=False, rotation=True, scale=False)
|
||||
|
||||
return arm_ob
|
||||
|
||||
|
||||
def load(
|
||||
context,
|
||||
filepath,
|
||||
*,
|
||||
target='ARMATURE',
|
||||
rotate_mode='NATIVE',
|
||||
global_scale=1.0,
|
||||
use_cyclic=False,
|
||||
frame_start=1,
|
||||
global_matrix=None,
|
||||
use_fps_scale=False,
|
||||
update_scene_fps=False,
|
||||
update_scene_duration=False,
|
||||
report=print,
|
||||
):
|
||||
import time
|
||||
t1 = time.time()
|
||||
print("\tparsing bvh %r..." % filepath, end="")
|
||||
|
||||
bvh_nodes, bvh_frame_time, bvh_frame_count = read_bvh(
|
||||
context, filepath,
|
||||
rotate_mode=rotate_mode,
|
||||
global_scale=global_scale,
|
||||
)
|
||||
|
||||
print("%.4f" % (time.time() - t1))
|
||||
|
||||
scene = context.scene
|
||||
frame_orig = scene.frame_current
|
||||
|
||||
# Broken BVH handling: guess frame rate when it is not contained in the file.
|
||||
if bvh_frame_time is None:
|
||||
report(
|
||||
{'WARNING'},
|
||||
"The BVH file does not contain frame duration in its MOTION "
|
||||
"section, assuming the BVH and Blender scene have the same "
|
||||
"frame rate"
|
||||
)
|
||||
bvh_frame_time = scene.render.fps_base / scene.render.fps
|
||||
# No need to scale the frame rate, as they're equal now anyway.
|
||||
use_fps_scale = False
|
||||
|
||||
if update_scene_fps:
|
||||
_update_scene_fps(context, report, bvh_frame_time)
|
||||
|
||||
# Now that we have a 1-to-1 mapping of Blender frames and BVH frames, there is no need
|
||||
# to scale the FPS any more. It's even better not to, to prevent roundoff errors.
|
||||
use_fps_scale = False
|
||||
|
||||
if update_scene_duration:
|
||||
_update_scene_duration(context, report, bvh_frame_count, bvh_frame_time, frame_start, use_fps_scale)
|
||||
|
||||
t1 = time.time()
|
||||
print("\timporting to blender...", end="")
|
||||
|
||||
bvh_name = bpy.path.display_name_from_filepath(filepath)
|
||||
|
||||
if target == 'ARMATURE':
|
||||
bvh_node_dict2armature(
|
||||
context, bvh_name, bvh_nodes, bvh_frame_time,
|
||||
rotate_mode=rotate_mode,
|
||||
frame_start=frame_start,
|
||||
IMPORT_LOOP=use_cyclic,
|
||||
global_matrix=global_matrix,
|
||||
use_fps_scale=use_fps_scale,
|
||||
)
|
||||
|
||||
elif target == 'OBJECT':
|
||||
bvh_node_dict2objects(
|
||||
context, bvh_name, bvh_nodes,
|
||||
rotate_mode=rotate_mode,
|
||||
frame_start=frame_start,
|
||||
IMPORT_LOOP=use_cyclic,
|
||||
# global_matrix=global_matrix, # TODO
|
||||
)
|
||||
|
||||
else:
|
||||
report({'ERROR'}, tip_("Invalid target %r (must be 'ARMATURE' or 'OBJECT')") % target)
|
||||
return {'CANCELLED'}
|
||||
|
||||
print('Done in %.4f\n' % (time.time() - t1))
|
||||
|
||||
context.scene.frame_set(frame_orig)
|
||||
|
||||
return {'FINISHED'}
|
||||
|
||||
|
||||
def _update_scene_fps(context, report, bvh_frame_time):
|
||||
"""Update the scene's FPS settings from the BVH, but only if the BVH contains enough info."""
|
||||
|
||||
# Broken BVH handling: prevent division by zero.
|
||||
if bvh_frame_time == 0.0:
|
||||
report(
|
||||
{'WARNING'},
|
||||
"Unable to update scene frame rate, as the BVH file "
|
||||
"contains a zero frame duration in its MOTION section",
|
||||
)
|
||||
return
|
||||
|
||||
scene = context.scene
|
||||
scene_fps = scene.render.fps / scene.render.fps_base
|
||||
new_fps = 1.0 / bvh_frame_time
|
||||
|
||||
if scene.render.fps != new_fps or scene.render.fps_base != 1.0:
|
||||
print("\tupdating scene FPS (was %f) to BVH FPS (%f)" % (scene_fps, new_fps))
|
||||
scene.render.fps = int(round(new_fps))
|
||||
scene.render.fps_base = scene.render.fps / new_fps
|
||||
|
||||
|
||||
def _update_scene_duration(
|
||||
context, report, bvh_frame_count, bvh_frame_time, frame_start,
|
||||
use_fps_scale):
|
||||
"""Extend the scene's duration so that the BVH file fits in its entirety."""
|
||||
|
||||
if bvh_frame_count is None:
|
||||
report(
|
||||
{'WARNING'},
|
||||
"Unable to extend the scene duration, as the BVH file does not "
|
||||
"contain the number of frames in its MOTION section",
|
||||
)
|
||||
return
|
||||
|
||||
# Not likely, but it can happen when a BVH is just used to store an armature.
|
||||
if bvh_frame_count == 0:
|
||||
return
|
||||
|
||||
if use_fps_scale:
|
||||
scene_fps = context.scene.render.fps / context.scene.render.fps_base
|
||||
scaled_frame_count = int(ceil(bvh_frame_count * bvh_frame_time * scene_fps))
|
||||
bvh_last_frame = frame_start + scaled_frame_count
|
||||
else:
|
||||
bvh_last_frame = frame_start + bvh_frame_count
|
||||
|
||||
# Only extend the scene, never shorten it.
|
||||
if context.scene.frame_end < bvh_last_frame:
|
||||
context.scene.frame_end = bvh_last_frame
|
67
scripts/addons_core/io_curve_svg/__init__.py
Normal file
67
scripts/addons_core/io_curve_svg/__init__.py
Normal file
@ -0,0 +1,67 @@
|
||||
# SPDX-FileCopyrightText: 2011-2022 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
bl_info = {
|
||||
"name": "Scalable Vector Graphics (SVG) 1.1 format",
|
||||
"author": "JM Soler, Sergey Sharybin",
|
||||
"blender": (2, 80, 0),
|
||||
"location": "File > Import > Scalable Vector Graphics (.svg)",
|
||||
"description": "Import SVG as curves",
|
||||
"warning": "",
|
||||
"doc_url": "{BLENDER_MANUAL_URL}/addons/import_export/curve_svg.html",
|
||||
"support": 'OFFICIAL',
|
||||
"category": "Import-Export",
|
||||
}
|
||||
|
||||
|
||||
# To support reload properly, try to access a package var,
|
||||
# if it's there, reload everything
|
||||
if "bpy" in locals():
|
||||
import importlib
|
||||
if "import_svg" in locals():
|
||||
importlib.reload(import_svg)
|
||||
|
||||
|
||||
import bpy
|
||||
from bpy.props import StringProperty
|
||||
from bpy_extras.io_utils import ImportHelper
|
||||
|
||||
|
||||
class ImportSVG(bpy.types.Operator, ImportHelper):
|
||||
"""Load a SVG file"""
|
||||
bl_idname = "import_curve.svg"
|
||||
bl_label = "Import SVG"
|
||||
bl_options = {'UNDO'}
|
||||
|
||||
filename_ext = ".svg"
|
||||
filter_glob: StringProperty(default="*.svg", options={'HIDDEN'})
|
||||
|
||||
def execute(self, context):
|
||||
from . import import_svg
|
||||
|
||||
return import_svg.load(self, context, filepath=self.filepath)
|
||||
|
||||
|
||||
def menu_func_import(self, context):
|
||||
self.layout.operator(ImportSVG.bl_idname,
|
||||
text="Scalable Vector Graphics (.svg)")
|
||||
|
||||
|
||||
def register():
|
||||
bpy.utils.register_class(ImportSVG)
|
||||
|
||||
bpy.types.TOPBAR_MT_file_import.append(menu_func_import)
|
||||
|
||||
|
||||
def unregister():
|
||||
bpy.utils.unregister_class(ImportSVG)
|
||||
|
||||
bpy.types.TOPBAR_MT_file_import.remove(menu_func_import)
|
||||
|
||||
# NOTES
|
||||
# - blender version is hardcoded
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
register()
|
1904
scripts/addons_core/io_curve_svg/import_svg.py
Normal file
1904
scripts/addons_core/io_curve_svg/import_svg.py
Normal file
File diff suppressed because it is too large
Load Diff
153
scripts/addons_core/io_curve_svg/svg_colors.py
Normal file
153
scripts/addons_core/io_curve_svg/svg_colors.py
Normal file
@ -0,0 +1,153 @@
|
||||
# SPDX-FileCopyrightText: 2004-2009 JM Soler
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
# Copied and adopted from paths_svg2obj.py script for Blender 2.49.
|
||||
|
||||
SVGColors = {'aliceblue': (240, 248, 255),
|
||||
'antiquewhite': (250, 235, 215),
|
||||
'aqua': (0, 255, 255),
|
||||
'aquamarine': (127, 255, 212),
|
||||
'azure': (240, 255, 255),
|
||||
'beige': (245, 245, 220),
|
||||
'bisque': (255, 228, 196),
|
||||
'black': (0, 0, 0),
|
||||
'blanchedalmond': (255, 235, 205),
|
||||
'blue': (0, 0, 255),
|
||||
'blueviolet': (138, 43, 226),
|
||||
'brown': (165, 42, 42),
|
||||
'burlywood': (222, 184, 135),
|
||||
'cadetblue': (95, 158, 160),
|
||||
'chartreuse': (127, 255, 0),
|
||||
'chocolate': (210, 105, 30),
|
||||
'coral': (255, 127, 80),
|
||||
'cornflowerblue': (100, 149, 237),
|
||||
'cornsilk': (255, 248, 220),
|
||||
'crimson': (220, 20, 60),
|
||||
'cyan': (0, 255, 255),
|
||||
'darkblue': (0, 0, 139),
|
||||
'darkcyan': (0, 139, 139),
|
||||
'darkgoldenrod': (184, 134, 11),
|
||||
'darkgray': (169, 169, 169),
|
||||
'darkgreen': (0, 100, 0),
|
||||
'darkgrey': (169, 169, 169),
|
||||
'darkkhaki': (189, 183, 107),
|
||||
'darkmagenta': (139, 0, 139),
|
||||
'darkolivegreen': (85, 107, 47),
|
||||
'darkorange': (255, 140, 0),
|
||||
'darkorchid': (153, 50, 204),
|
||||
'darkred': (139, 0, 0),
|
||||
'darksalmon': (233, 150, 122),
|
||||
'darkseagreen': (143, 188, 143),
|
||||
'darkslateblue': (72, 61, 139),
|
||||
'darkslategray': (47, 79, 79),
|
||||
'darkslategrey': (47, 79, 79),
|
||||
'darkturquoise': (0, 206, 209),
|
||||
'darkviolet': (148, 0, 211),
|
||||
'deeppink': (255, 20, 147),
|
||||
'deepskyblue': (0, 191, 255),
|
||||
'dimgray': (105, 105, 105),
|
||||
'dimgrey': (105, 105, 105),
|
||||
'dodgerblue': (30, 144, 255),
|
||||
'firebrick': (178, 34, 34),
|
||||
'floralwhite': (255, 250, 240),
|
||||
'forestgreen': (34, 139, 34),
|
||||
'fuchsia': (255, 0, 255),
|
||||
'gainsboro': (220, 220, 220),
|
||||
'ghostwhite': (248, 248, 255),
|
||||
'gold': (255, 215, 0),
|
||||
'goldenrod': (218, 165, 32),
|
||||
'gray': (128, 128, 128),
|
||||
'grey': (128, 128, 128),
|
||||
'green': (0, 128, 0),
|
||||
'greenyellow': (173, 255, 47),
|
||||
'honeydew': (240, 255, 240),
|
||||
'hotpink': (255, 105, 180),
|
||||
'indianred': (205, 92, 92),
|
||||
'indigo': (75, 0, 130),
|
||||
'ivory': (255, 255, 240),
|
||||
'khaki': (240, 230, 140),
|
||||
'lavender': (230, 230, 250),
|
||||
'lavenderblush': (255, 240, 245),
|
||||
'lawngreen': (124, 252, 0),
|
||||
'lemonchiffon': (255, 250, 205),
|
||||
'lightblue': (173, 216, 230),
|
||||
'lightcoral': (240, 128, 128),
|
||||
'lightcyan': (224, 255, 255),
|
||||
'lightgoldenrodyellow': (250, 250, 210),
|
||||
'lightgray': (211, 211, 211),
|
||||
'lightgreen': (144, 238, 144),
|
||||
'lightgrey': (211, 211, 211),
|
||||
'lightpink': (255, 182, 193),
|
||||
'lightsalmon': (255, 160, 122),
|
||||
'lightseagreen': (32, 178, 170),
|
||||
'lightskyblue': (135, 206, 250),
|
||||
'lightslategray': (119, 136, 153),
|
||||
'lightslategrey': (119, 136, 153),
|
||||
'lightsteelblue': (176, 196, 222),
|
||||
'lightyellow': (255, 255, 224),
|
||||
'lime': (0, 255, 0),
|
||||
'limegreen': (50, 205, 50),
|
||||
'linen': (250, 240, 230),
|
||||
'magenta': (255, 0, 255),
|
||||
'maroon': (128, 0, 0),
|
||||
'mediumaquamarine': (102, 205, 170),
|
||||
'mediumblue': (0, 0, 205),
|
||||
'mediumorchid': (186, 85, 211),
|
||||
'mediumpurple': (147, 112, 219),
|
||||
'mediumseagreen': (60, 179, 113),
|
||||
'mediumslateblue': (123, 104, 238),
|
||||
'mediumspringgreen': (0, 250, 154),
|
||||
'mediumturquoise': (72, 209, 204),
|
||||
'mediumvioletred': (199, 21, 133),
|
||||
'midnightblue': (25, 25, 112),
|
||||
'mintcream': (245, 255, 250),
|
||||
'mistyrose': (255, 228, 225),
|
||||
'moccasin': (255, 228, 181),
|
||||
'navajowhite': (255, 222, 173),
|
||||
'navy': (0, 0, 128),
|
||||
'oldlace': (253, 245, 230),
|
||||
'olive': (128, 128, 0),
|
||||
'olivedrab': (107, 142, 35),
|
||||
'orange': (255, 165, 0),
|
||||
'orangered': (255, 69, 0),
|
||||
'orchid': (218, 112, 214),
|
||||
'palegoldenrod': (238, 232, 170),
|
||||
'palegreen': (152, 251, 152),
|
||||
'paleturquoise': (175, 238, 238),
|
||||
'palevioletred': (219, 112, 147),
|
||||
'papayawhip': (255, 239, 213),
|
||||
'peachpuff': (255, 218, 185),
|
||||
'peru': (205, 133, 63),
|
||||
'pink': (255, 192, 203),
|
||||
'plum': (221, 160, 221),
|
||||
'powderblue': (176, 224, 230),
|
||||
'purple': (128, 0, 128),
|
||||
'red': (255, 0, 0),
|
||||
'rosybrown': (188, 143, 143),
|
||||
'royalblue': (65, 105, 225),
|
||||
'saddlebrown': (139, 69, 19),
|
||||
'salmon': (250, 128, 114),
|
||||
'sandybrown': (244, 164, 96),
|
||||
'seagreen': (46, 139, 87),
|
||||
'seashell': (255, 245, 238),
|
||||
'sienna': (160, 82, 45),
|
||||
'silver': (192, 192, 192),
|
||||
'skyblue': (135, 206, 235),
|
||||
'slateblue': (106, 90, 205),
|
||||
'slategray': (112, 128, 144),
|
||||
'slategrey': (112, 128, 144),
|
||||
'snow': (255, 250, 250),
|
||||
'springgreen': (0, 255, 127),
|
||||
'steelblue': (70, 130, 180),
|
||||
'tan': (210, 180, 140),
|
||||
'teal': (0, 128, 128),
|
||||
'thistle': (216, 191, 216),
|
||||
'tomato': (255, 99, 71),
|
||||
'turquoise': (64, 224, 208),
|
||||
'violet': (238, 130, 238),
|
||||
'wheat': (245, 222, 179),
|
||||
'white': (255, 255, 255),
|
||||
'whitesmoke': (245, 245, 245),
|
||||
'yellow': (255, 255, 0),
|
||||
'yellowgreen': (154, 205, 50)}
|
107
scripts/addons_core/io_curve_svg/svg_util.py
Normal file
107
scripts/addons_core/io_curve_svg/svg_util.py
Normal file
@ -0,0 +1,107 @@
|
||||
# SPDX-FileCopyrightText: 2010-2022 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
import re
|
||||
|
||||
|
||||
units = {"": 1.0,
|
||||
"px": 1.0,
|
||||
"in": 90.0,
|
||||
"mm": 90.0 / 25.4,
|
||||
"cm": 90.0 / 2.54,
|
||||
"pt": 1.25,
|
||||
"pc": 15.0,
|
||||
"em": 1.0,
|
||||
"ex": 1.0,
|
||||
"INVALID": 1.0, # some DocBook files contain this
|
||||
}
|
||||
|
||||
|
||||
def srgb_to_linearrgb(c):
|
||||
if c < 0.04045:
|
||||
return 0.0 if c < 0.0 else c * (1.0 / 12.92)
|
||||
else:
|
||||
return pow((c + 0.055) * (1.0 / 1.055), 2.4)
|
||||
|
||||
|
||||
def check_points_equal(point_a, point_b):
|
||||
return (abs(point_a[0] - point_b[0]) < 1e-6 and
|
||||
abs(point_a[1] - point_b[1]) < 1e-6)
|
||||
|
||||
|
||||
match_number = r"-?\d+(\.\d+)?([eE][-+]?\d+)?"
|
||||
match_first_comma = r"^\s*(?=,)"
|
||||
match_comma_pair = r",\s*(?=,)"
|
||||
match_last_comma = r",\s*$"
|
||||
|
||||
match_number_optional_parts = r"(-?\d+(\.\d*)?([eE][-+]?\d+)?)|(-?\.\d+([eE][-+]?\d+)?)"
|
||||
re_match_number_optional_parts = re.compile(match_number_optional_parts)
|
||||
|
||||
array_of_floats_pattern = f"({match_number_optional_parts})|{match_first_comma}|{match_comma_pair}|{match_last_comma}"
|
||||
re_array_of_floats_pattern = re.compile(array_of_floats_pattern)
|
||||
|
||||
|
||||
def parse_array_of_floats(text):
|
||||
"""
|
||||
Accepts comma or space separated list of floats (without units) and returns an array
|
||||
of floating point values.
|
||||
"""
|
||||
elements = re_array_of_floats_pattern.findall(text)
|
||||
return [value_to_float(v[0]) for v in elements]
|
||||
|
||||
|
||||
def read_float(text: str, start_index: int = 0):
|
||||
"""
|
||||
Reads floating point value from a string. Parsing starts at the given index.
|
||||
|
||||
Returns the value itself (as a string) and index of first character after the value.
|
||||
"""
|
||||
|
||||
n = len(text)
|
||||
|
||||
# Skip leading whitespace characters and characters which we consider ignorable for float
|
||||
# (like values separator).
|
||||
while start_index < n and (text[start_index].isspace() or text[start_index] == ','):
|
||||
start_index += 1
|
||||
if start_index == n:
|
||||
return "0", start_index
|
||||
|
||||
text_part = text[start_index:]
|
||||
match = re_match_number_optional_parts.match(text_part)
|
||||
|
||||
if match is None:
|
||||
raise Exception('Invalid float value near ' + text[start_index:start_index + 10])
|
||||
|
||||
token = match.group(0)
|
||||
endptr = start_index + match.end(0)
|
||||
|
||||
return token, endptr
|
||||
|
||||
|
||||
def parse_coord(coord, size):
|
||||
"""
|
||||
Parse coordinate component to common basis
|
||||
|
||||
Needed to handle coordinates set in cm, mm, inches.
|
||||
"""
|
||||
|
||||
token, last_char = read_float(coord)
|
||||
val = float(token)
|
||||
unit = coord[last_char:].strip() # strip() in case there is a space
|
||||
|
||||
if unit == '%':
|
||||
return float(size) / 100.0 * val
|
||||
else:
|
||||
return val * units[unit]
|
||||
|
||||
return val
|
||||
|
||||
|
||||
def value_to_float(value_encoded: str):
|
||||
"""
|
||||
A simple wrapper around float() which supports empty strings (which are converted to 0).
|
||||
"""
|
||||
if len(value_encoded) == 0:
|
||||
return 0
|
||||
return float(value_encoded)
|
166
scripts/addons_core/io_curve_svg/svg_util_test.py
Executable file
166
scripts/addons_core/io_curve_svg/svg_util_test.py
Executable file
@ -0,0 +1,166 @@
|
||||
#!/usr/bin/env python3
|
||||
# SPDX-FileCopyrightText: 2019-2022 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
# XXX Not really nice, but that hack is needed to allow execution of that test
|
||||
# from both automated CTest and by directly running the file manually.
|
||||
if __name__ == '__main__':
|
||||
from svg_util import (parse_array_of_floats, read_float, parse_coord,)
|
||||
else:
|
||||
from .svg_util import (parse_array_of_floats, read_float, parse_coord,)
|
||||
import unittest
|
||||
|
||||
|
||||
class ParseArrayOfFloatsTest(unittest.TestCase):
|
||||
def test_empty(self):
|
||||
self.assertEqual(parse_array_of_floats(""), [])
|
||||
self.assertEqual(parse_array_of_floats(" "), [])
|
||||
|
||||
def test_single_value(self):
|
||||
self.assertEqual(parse_array_of_floats("123"), [123])
|
||||
self.assertEqual(parse_array_of_floats(" \t 123 \t"), [123])
|
||||
|
||||
def test_single_value_exponent(self):
|
||||
self.assertEqual(parse_array_of_floats("12e+3"), [12000])
|
||||
self.assertEqual(parse_array_of_floats("12e-3"), [0.012])
|
||||
|
||||
def test_space_separated_values(self):
|
||||
self.assertEqual(parse_array_of_floats("123 45 6 89"),
|
||||
[123, 45, 6, 89])
|
||||
self.assertEqual(parse_array_of_floats(" 123 45 6 89 "),
|
||||
[123, 45, 6, 89])
|
||||
|
||||
def test_comma_separated_values(self):
|
||||
self.assertEqual(parse_array_of_floats("123,45,6,89"),
|
||||
[123, 45, 6, 89])
|
||||
self.assertEqual(parse_array_of_floats(" 123,45,6,89 "),
|
||||
[123, 45, 6, 89])
|
||||
|
||||
def test_mixed_separated_values(self):
|
||||
self.assertEqual(parse_array_of_floats("123,45 6,89"),
|
||||
[123, 45, 6, 89])
|
||||
self.assertEqual(parse_array_of_floats(" 123 45,6,89 "),
|
||||
[123, 45, 6, 89])
|
||||
|
||||
def test_omitted_value_with_comma(self):
|
||||
self.assertEqual(parse_array_of_floats("1,,3"), [1, 0, 3])
|
||||
self.assertEqual(parse_array_of_floats(",,3"), [0, 0, 3])
|
||||
|
||||
def test_sign_as_separator(self):
|
||||
self.assertEqual(parse_array_of_floats("1-3"), [1, -3])
|
||||
self.assertEqual(parse_array_of_floats("1+3"), [1, 3])
|
||||
|
||||
def test_all_commas(self):
|
||||
self.assertEqual(parse_array_of_floats(",,,"), [0, 0, 0, 0])
|
||||
|
||||
def test_value_with_decimal_separator(self):
|
||||
self.assertEqual(parse_array_of_floats("3.5"), [3.5])
|
||||
|
||||
def test_comma_separated_values_with_decimal_separator(self):
|
||||
self.assertEqual(parse_array_of_floats("2.75,8.5"), [2.75, 8.5])
|
||||
|
||||
def test_missing_decimal(self):
|
||||
self.assertEqual(parse_array_of_floats(".92"), [0.92])
|
||||
self.assertEqual(parse_array_of_floats(".92e+1"), [9.2])
|
||||
|
||||
self.assertEqual(parse_array_of_floats("-.92"), [-0.92])
|
||||
self.assertEqual(parse_array_of_floats("-.92e+1"), [-9.2])
|
||||
|
||||
|
||||
class ReadFloatTest(unittest.TestCase):
|
||||
def test_empty(self):
|
||||
value, endptr = read_float("", 0)
|
||||
self.assertEqual(value, "0")
|
||||
self.assertEqual(endptr, 0)
|
||||
|
||||
def test_empty_spaces(self):
|
||||
value, endptr = read_float(" ", 0)
|
||||
self.assertEqual(value, "0")
|
||||
self.assertEqual(endptr, 4)
|
||||
|
||||
def test_single_value(self):
|
||||
value, endptr = read_float("1.2", 0)
|
||||
self.assertEqual(value, "1.2")
|
||||
self.assertEqual(endptr, 3)
|
||||
|
||||
def test_scientific_value(self):
|
||||
value, endptr = read_float("1.2e+3", 0)
|
||||
self.assertEqual(value, "1.2e+3")
|
||||
self.assertEqual(endptr, 6)
|
||||
|
||||
def test_scientific_value_no_sign(self):
|
||||
value, endptr = read_float("1.2e3", 0)
|
||||
self.assertEqual(value, "1.2e3")
|
||||
self.assertEqual(endptr, 5)
|
||||
|
||||
def test_middle(self):
|
||||
value, endptr = read_float("1.2 3.4 5.6", 3)
|
||||
self.assertEqual(value, "3.4")
|
||||
self.assertEqual(endptr, 8)
|
||||
|
||||
def test_comma(self):
|
||||
value, endptr = read_float("1.2 ,,3.4 5.6", 3)
|
||||
self.assertEqual(value, "3.4")
|
||||
self.assertEqual(endptr, 10)
|
||||
|
||||
def test_not_a_number(self):
|
||||
# TODO(sergey): Make this catch more concrete.
|
||||
with self.assertRaises(Exception):
|
||||
read_float("1.2eV", 3)
|
||||
|
||||
def test_missing_fractional(self):
|
||||
value, endptr = read_float("1.", 0)
|
||||
self.assertEqual(value, "1.")
|
||||
self.assertEqual(endptr, 2)
|
||||
|
||||
value, endptr = read_float("2. 3", 0)
|
||||
self.assertEqual(value, "2.")
|
||||
self.assertEqual(endptr, 2)
|
||||
|
||||
def test_missing_decimal(self):
|
||||
value, endptr = read_float(".92", 0)
|
||||
self.assertEqual(value, ".92")
|
||||
self.assertEqual(endptr, 3)
|
||||
|
||||
value, endptr = read_float("-.92", 0)
|
||||
self.assertEqual(value, "-.92")
|
||||
self.assertEqual(endptr, 4)
|
||||
|
||||
value, endptr = read_float(".92e+3", 0)
|
||||
self.assertEqual(value, ".92e+3")
|
||||
self.assertEqual(endptr, 6)
|
||||
|
||||
value, endptr = read_float("-.92e+3", 0)
|
||||
self.assertEqual(value, "-.92e+3")
|
||||
self.assertEqual(endptr, 7)
|
||||
|
||||
# TODO(sergey): Make these catch more concrete.
|
||||
with self.assertRaises(Exception):
|
||||
read_float(".", 0)
|
||||
with self.assertRaises(Exception):
|
||||
read_float(".e+1", 0)
|
||||
|
||||
|
||||
class ParseCoordTest(unittest.TestCase):
|
||||
def test_empty(self):
|
||||
self.assertEqual(parse_coord("", 200), 0)
|
||||
|
||||
def test_empty_spaces(self):
|
||||
self.assertEqual(parse_coord(" ", 200), 0)
|
||||
|
||||
def test_no_units(self):
|
||||
self.assertEqual(parse_coord("1.2", 200), 1.2)
|
||||
|
||||
def test_unit_cm(self):
|
||||
self.assertAlmostEqual(parse_coord("1.2cm", 200), 42.51968503937008)
|
||||
|
||||
def test_unit_ex(self):
|
||||
self.assertAlmostEqual(parse_coord("1.2ex", 200), 1.2)
|
||||
|
||||
def test_unit_percentage(self):
|
||||
self.assertEqual(parse_coord("1.2%", 200), 2.4)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main(verbosity=2)
|
300
scripts/addons_core/io_mesh_uv_layout/__init__.py
Normal file
300
scripts/addons_core/io_mesh_uv_layout/__init__.py
Normal file
@ -0,0 +1,300 @@
|
||||
# SPDX-FileCopyrightText: 2011-2023 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
bl_info = {
|
||||
"name": "UV Layout",
|
||||
"author": "Campbell Barton, Matt Ebb",
|
||||
"version": (1, 2, 0),
|
||||
"blender": (3, 0, 0),
|
||||
"location": "UV Editor > UV > Export UV Layout",
|
||||
"description": "Export the UV layout as a 2D graphic",
|
||||
"warning": "",
|
||||
"doc_url": "{BLENDER_MANUAL_URL}/addons/import_export/mesh_uv_layout.html",
|
||||
"support": 'OFFICIAL',
|
||||
"category": "Import-Export",
|
||||
}
|
||||
|
||||
|
||||
# @todo write the wiki page
|
||||
|
||||
if "bpy" in locals():
|
||||
import importlib
|
||||
if "export_uv_eps" in locals():
|
||||
importlib.reload(export_uv_eps)
|
||||
if "export_uv_png" in locals():
|
||||
importlib.reload(export_uv_png)
|
||||
if "export_uv_svg" in locals():
|
||||
importlib.reload(export_uv_svg)
|
||||
|
||||
import os
|
||||
import bpy
|
||||
|
||||
from bpy.app.translations import contexts as i18n_contexts
|
||||
|
||||
from bpy.props import (
|
||||
StringProperty,
|
||||
BoolProperty,
|
||||
EnumProperty,
|
||||
IntVectorProperty,
|
||||
FloatProperty,
|
||||
)
|
||||
|
||||
|
||||
class ExportUVLayout(bpy.types.Operator):
|
||||
"""Export UV layout to file"""
|
||||
|
||||
bl_idname = "uv.export_layout"
|
||||
bl_label = "Export UV Layout"
|
||||
bl_options = {'REGISTER', 'UNDO'}
|
||||
|
||||
filepath: StringProperty(
|
||||
subtype='FILE_PATH',
|
||||
)
|
||||
export_all: BoolProperty(
|
||||
name="All UVs",
|
||||
description="Export all UVs in this mesh (not just visible ones)",
|
||||
default=False,
|
||||
)
|
||||
export_tiles: EnumProperty(
|
||||
name="Export Tiles",
|
||||
items=(
|
||||
('NONE', "None",
|
||||
"Export only UVs in the [0, 1] range"),
|
||||
('UDIM', "UDIM",
|
||||
"Export tiles in the UDIM numbering scheme: 1001 + u_tile + 10*v_tile"),
|
||||
('UV', "UVTILE",
|
||||
"Export tiles in the UVTILE numbering scheme: u(u_tile + 1)_v(v_tile + 1)"),
|
||||
),
|
||||
description="Choose whether to export only the [0, 1] range, or all UV tiles",
|
||||
default='NONE',
|
||||
)
|
||||
modified: BoolProperty(
|
||||
name="Modified",
|
||||
description="Exports UVs from the modified mesh",
|
||||
default=False,
|
||||
translation_context=i18n_contexts.id_mesh,
|
||||
)
|
||||
mode: EnumProperty(
|
||||
items=(
|
||||
('SVG', "Scalable Vector Graphic (.svg)",
|
||||
"Export the UV layout to a vector SVG file"),
|
||||
('EPS', "Encapsulated PostScript (.eps)",
|
||||
"Export the UV layout to a vector EPS file"),
|
||||
('PNG', "PNG Image (.png)",
|
||||
"Export the UV layout to a bitmap image"),
|
||||
),
|
||||
name="Format",
|
||||
description="File format to export the UV layout to",
|
||||
default='PNG',
|
||||
)
|
||||
size: IntVectorProperty(
|
||||
name="Size",
|
||||
size=2,
|
||||
default=(1024, 1024),
|
||||
min=8, max=32768,
|
||||
description="Dimensions of the exported file",
|
||||
)
|
||||
opacity: FloatProperty(
|
||||
name="Fill Opacity",
|
||||
min=0.0, max=1.0,
|
||||
default=0.25,
|
||||
description="Set amount of opacity for exported UV layout",
|
||||
)
|
||||
# For the file-selector.
|
||||
check_existing: BoolProperty(
|
||||
default=True,
|
||||
options={'HIDDEN'},
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
obj = context.active_object
|
||||
return obj is not None and obj.type == 'MESH' and obj.data.uv_layers
|
||||
|
||||
def invoke(self, context, event):
|
||||
self.size = self.get_image_size(context)
|
||||
self.filepath = self.get_default_file_name(context) + "." + self.mode.lower()
|
||||
context.window_manager.fileselect_add(self)
|
||||
return {'RUNNING_MODAL'}
|
||||
|
||||
def get_default_file_name(self, context):
|
||||
AMOUNT = 3
|
||||
objects = list(self.iter_objects_to_export(context))
|
||||
name = " ".join(sorted([obj.name for obj in objects[:AMOUNT]]))
|
||||
if len(objects) > AMOUNT:
|
||||
name += " and more"
|
||||
return name
|
||||
|
||||
def check(self, context):
|
||||
if any(self.filepath.endswith(ext) for ext in (".png", ".eps", ".svg")):
|
||||
self.filepath = self.filepath[:-4]
|
||||
|
||||
ext = "." + self.mode.lower()
|
||||
self.filepath = bpy.path.ensure_ext(self.filepath, ext)
|
||||
return True
|
||||
|
||||
def execute(self, context):
|
||||
obj = context.active_object
|
||||
is_editmode = (obj.mode == 'EDIT')
|
||||
if is_editmode:
|
||||
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
|
||||
|
||||
meshes = list(self.iter_meshes_to_export(context))
|
||||
polygon_data = list(self.iter_polygon_data_to_draw(context, meshes))
|
||||
different_colors = set(color for _, color in polygon_data)
|
||||
if self.modified:
|
||||
depsgraph = context.evaluated_depsgraph_get()
|
||||
for obj in self.iter_objects_to_export(context):
|
||||
obj_eval = obj.evaluated_get(depsgraph)
|
||||
obj_eval.to_mesh_clear()
|
||||
|
||||
tiles = self.tiles_to_export(polygon_data)
|
||||
export = self.get_exporter()
|
||||
dirname, filename = os.path.split(self.filepath)
|
||||
|
||||
# Strip UDIM or UV numbering, and extension
|
||||
import re
|
||||
name_regex = r"^(.*?)"
|
||||
udim_regex = r"(?:\.[0-9]{4})?"
|
||||
uv_regex = r"(?:\.u[0-9]+_v[0-9]+)?"
|
||||
ext_regex = r"(?:\.png|\.eps|\.svg)?$"
|
||||
if self.export_tiles == 'NONE':
|
||||
match = re.match(name_regex + ext_regex, filename)
|
||||
elif self.export_tiles == 'UDIM':
|
||||
match = re.match(name_regex + udim_regex + ext_regex, filename)
|
||||
elif self.export_tiles == 'UV':
|
||||
match = re.match(name_regex + uv_regex + ext_regex, filename)
|
||||
if match:
|
||||
filename = match.groups()[0]
|
||||
|
||||
for tile in sorted(tiles):
|
||||
filepath = os.path.join(dirname, filename)
|
||||
if self.export_tiles == 'UDIM':
|
||||
filepath += f".{1001 + tile[0] + tile[1] * 10:04}"
|
||||
elif self.export_tiles == 'UV':
|
||||
filepath += f".u{tile[0] + 1}_v{tile[1] + 1}"
|
||||
filepath = bpy.path.ensure_ext(filepath, "." + self.mode.lower())
|
||||
|
||||
export(filepath, tile, polygon_data, different_colors,
|
||||
self.size[0], self.size[1], self.opacity)
|
||||
|
||||
if is_editmode:
|
||||
bpy.ops.object.mode_set(mode='EDIT', toggle=False)
|
||||
|
||||
return {'FINISHED'}
|
||||
|
||||
def iter_meshes_to_export(self, context):
|
||||
depsgraph = context.evaluated_depsgraph_get()
|
||||
for obj in self.iter_objects_to_export(context):
|
||||
if self.modified:
|
||||
yield obj.evaluated_get(depsgraph).to_mesh()
|
||||
else:
|
||||
yield obj.data
|
||||
|
||||
@staticmethod
|
||||
def iter_objects_to_export(context):
|
||||
for obj in {*context.selected_objects, context.active_object}:
|
||||
if obj.type != 'MESH':
|
||||
continue
|
||||
mesh = obj.data
|
||||
if mesh.uv_layers.active is None:
|
||||
continue
|
||||
yield obj
|
||||
|
||||
def tiles_to_export(self, polygon_data):
|
||||
"""Get a set of tiles containing UVs.
|
||||
This assumes there is no UV edge crossing an otherwise empty tile.
|
||||
"""
|
||||
if self.export_tiles == 'NONE':
|
||||
return {(0, 0)}
|
||||
|
||||
from math import floor
|
||||
tiles = set()
|
||||
for poly in polygon_data:
|
||||
for uv in poly[0]:
|
||||
# Ignore UVs at corners - precisely touching the right or upper edge
|
||||
# of a tile should not load its right/upper neighbor as well.
|
||||
# From intern/cycles/scene/attribute.cpp
|
||||
u, v = uv[0], uv[1]
|
||||
x, y = floor(u), floor(v)
|
||||
if x > 0 and u < x + 1e-6:
|
||||
x -= 1
|
||||
if y > 0 and v < y + 1e-6:
|
||||
y -= 1
|
||||
if x >= 0 and y >= 0:
|
||||
tiles.add((x, y))
|
||||
return tiles
|
||||
|
||||
@staticmethod
|
||||
def currently_image_image_editor(context):
|
||||
return isinstance(context.space_data, bpy.types.SpaceImageEditor)
|
||||
|
||||
def get_currently_opened_image(self, context):
|
||||
if not self.currently_image_image_editor(context):
|
||||
return None
|
||||
return context.space_data.image
|
||||
|
||||
def get_image_size(self, context):
|
||||
# fallback if not in image context
|
||||
image_width = self.size[0]
|
||||
image_height = self.size[1]
|
||||
|
||||
# get size of "active" image if some exist
|
||||
image = self.get_currently_opened_image(context)
|
||||
if image is not None:
|
||||
width, height = image.size
|
||||
if width and height:
|
||||
image_width = width
|
||||
image_height = height
|
||||
|
||||
return image_width, image_height
|
||||
|
||||
def iter_polygon_data_to_draw(self, context, meshes):
|
||||
for mesh in meshes:
|
||||
uv_layer = mesh.uv_layers.active.data
|
||||
for polygon in mesh.polygons:
|
||||
if self.export_all or polygon.select:
|
||||
start = polygon.loop_start
|
||||
end = start + polygon.loop_total
|
||||
uvs = tuple(tuple(uv.uv) for uv in uv_layer[start:end])
|
||||
yield (uvs, self.get_polygon_color(mesh, polygon))
|
||||
|
||||
@staticmethod
|
||||
def get_polygon_color(mesh, polygon, default=(0.8, 0.8, 0.8)):
|
||||
if polygon.material_index < len(mesh.materials):
|
||||
material = mesh.materials[polygon.material_index]
|
||||
if material is not None:
|
||||
return tuple(material.diffuse_color)[:3]
|
||||
return default
|
||||
|
||||
def get_exporter(self):
|
||||
if self.mode == 'PNG':
|
||||
from . import export_uv_png
|
||||
return export_uv_png.export
|
||||
elif self.mode == 'EPS':
|
||||
from . import export_uv_eps
|
||||
return export_uv_eps.export
|
||||
elif self.mode == 'SVG':
|
||||
from . import export_uv_svg
|
||||
return export_uv_svg.export
|
||||
else:
|
||||
assert False
|
||||
|
||||
|
||||
def menu_func(self, context):
|
||||
self.layout.operator(ExportUVLayout.bl_idname)
|
||||
|
||||
|
||||
def register():
|
||||
bpy.utils.register_class(ExportUVLayout)
|
||||
bpy.types.IMAGE_MT_uvs.append(menu_func)
|
||||
|
||||
|
||||
def unregister():
|
||||
bpy.utils.unregister_class(ExportUVLayout)
|
||||
bpy.types.IMAGE_MT_uvs.remove(menu_func)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
register()
|
82
scripts/addons_core/io_mesh_uv_layout/export_uv_eps.py
Normal file
82
scripts/addons_core/io_mesh_uv_layout/export_uv_eps.py
Normal file
@ -0,0 +1,82 @@
|
||||
# SPDX-FileCopyrightText: 2011-2022 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
import bpy
|
||||
|
||||
|
||||
def export(filepath, tile, face_data, colors, width, height, opacity):
|
||||
with open(filepath, 'w', encoding='utf-8') as file:
|
||||
for text in get_file_parts(tile, face_data, colors, width, height, opacity):
|
||||
file.write(text)
|
||||
|
||||
|
||||
def get_file_parts(tile, face_data, colors, width, height, opacity):
|
||||
yield from header(width, height)
|
||||
if opacity > 0.0:
|
||||
name_by_color = {}
|
||||
yield from prepare_colors(colors, name_by_color)
|
||||
yield from draw_colored_polygons(tile, face_data, name_by_color, width, height)
|
||||
yield from draw_lines(tile, face_data, width, height)
|
||||
yield from footer()
|
||||
|
||||
|
||||
def header(width, height):
|
||||
yield "%!PS-Adobe-3.0 EPSF-3.0\n"
|
||||
yield f"%%Creator: Blender {bpy.app.version_string}\n"
|
||||
yield "%%Pages: 1\n"
|
||||
yield "%%Orientation: Portrait\n"
|
||||
yield f"%%BoundingBox: 0 0 {width} {height}\n"
|
||||
yield f"%%HiResBoundingBox: 0.0 0.0 {width:.4f} {height:.4f}\n"
|
||||
yield "%%EndComments\n"
|
||||
yield "%%Page: 1 1\n"
|
||||
yield "0 0 translate\n"
|
||||
yield "1.0 1.0 scale\n"
|
||||
yield "0 0 0 setrgbcolor\n"
|
||||
yield "[] 0 setdash\n"
|
||||
yield "1 setlinewidth\n"
|
||||
yield "1 setlinejoin\n"
|
||||
yield "1 setlinecap\n"
|
||||
|
||||
|
||||
def prepare_colors(colors, out_name_by_color):
|
||||
for i, color in enumerate(colors):
|
||||
name = f"COLOR_{i}"
|
||||
yield "/%s {" % name
|
||||
out_name_by_color[color] = name
|
||||
|
||||
yield "gsave\n"
|
||||
yield "%.3g %.3g %.3g setrgbcolor\n" % color
|
||||
yield "fill\n"
|
||||
yield "grestore\n"
|
||||
yield "0 setgray\n"
|
||||
yield "} def\n"
|
||||
|
||||
|
||||
def draw_colored_polygons(tile, face_data, name_by_color, width, height):
|
||||
for uvs, color in face_data:
|
||||
yield from draw_polygon_path(tile, uvs, width, height)
|
||||
yield "closepath\n"
|
||||
yield "%s\n" % name_by_color[color]
|
||||
|
||||
|
||||
def draw_lines(tile, face_data, width, height):
|
||||
for uvs, _ in face_data:
|
||||
yield from draw_polygon_path(tile, uvs, width, height)
|
||||
yield "closepath\n"
|
||||
yield "stroke\n"
|
||||
|
||||
|
||||
def draw_polygon_path(tile, uvs, width, height):
|
||||
yield "newpath\n"
|
||||
for j, uv in enumerate(uvs):
|
||||
uv_scale = ((uv[0] - tile[0]) * width, (uv[1] - tile[1]) * height)
|
||||
if j == 0:
|
||||
yield "%.5f %.5f moveto\n" % uv_scale
|
||||
else:
|
||||
yield "%.5f %.5f lineto\n" % uv_scale
|
||||
|
||||
|
||||
def footer():
|
||||
yield "showpage\n"
|
||||
yield "%%EOF\n"
|
119
scripts/addons_core/io_mesh_uv_layout/export_uv_png.py
Normal file
119
scripts/addons_core/io_mesh_uv_layout/export_uv_png.py
Normal file
@ -0,0 +1,119 @@
|
||||
# SPDX-FileCopyrightText: 2011-2023 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
import bpy
|
||||
import gpu
|
||||
from mathutils import Matrix
|
||||
from mathutils.geometry import tessellate_polygon
|
||||
from gpu_extras.batch import batch_for_shader
|
||||
|
||||
# Use OIIO if available, else Blender for writing the image.
|
||||
try:
|
||||
import OpenImageIO as oiio
|
||||
except ImportError:
|
||||
oiio = None
|
||||
|
||||
|
||||
def export(filepath, tile, face_data, colors, width, height, opacity):
|
||||
offscreen = gpu.types.GPUOffScreen(width, height)
|
||||
offscreen.bind()
|
||||
|
||||
try:
|
||||
fb = gpu.state.active_framebuffer_get()
|
||||
fb.clear(color=(0.0, 0.0, 0.0, 0.0))
|
||||
draw_image(tile, face_data, opacity)
|
||||
|
||||
pixel_data = fb.read_color(0, 0, width, height, 4, 0, 'UBYTE')
|
||||
pixel_data.dimensions = width * height * 4
|
||||
save_pixels(filepath, pixel_data, width, height)
|
||||
finally:
|
||||
offscreen.unbind()
|
||||
offscreen.free()
|
||||
|
||||
|
||||
def draw_image(tile, face_data, opacity):
|
||||
gpu.state.blend_set('ALPHA')
|
||||
|
||||
with gpu.matrix.push_pop():
|
||||
gpu.matrix.load_matrix(get_normalize_uvs_matrix(tile))
|
||||
gpu.matrix.load_projection_matrix(Matrix.Identity(4))
|
||||
|
||||
draw_background_colors(face_data, opacity)
|
||||
draw_lines(face_data)
|
||||
|
||||
gpu.state.blend_set('NONE')
|
||||
|
||||
|
||||
def get_normalize_uvs_matrix(tile):
|
||||
'''matrix maps x and y coordinates from [0, 1] to [-1, 1]'''
|
||||
matrix = Matrix.Identity(4)
|
||||
matrix.col[3][0] = -1 - (tile[0] * 2)
|
||||
matrix.col[3][1] = -1 - (tile[1] * 2)
|
||||
matrix[0][0] = 2
|
||||
matrix[1][1] = 2
|
||||
|
||||
# OIIO writes arrays from the left-upper corner.
|
||||
if oiio:
|
||||
matrix.col[3][1] *= -1.0
|
||||
matrix[1][1] *= -1.0
|
||||
|
||||
return matrix
|
||||
|
||||
|
||||
def draw_background_colors(face_data, opacity):
|
||||
coords = [uv for uvs, _ in face_data for uv in uvs]
|
||||
colors = [(*color, opacity) for uvs, color in face_data for _ in range(len(uvs))]
|
||||
|
||||
indices = []
|
||||
offset = 0
|
||||
for uvs, _ in face_data:
|
||||
triangles = tessellate_uvs(uvs)
|
||||
indices.extend([index + offset for index in triangle] for triangle in triangles)
|
||||
offset += len(uvs)
|
||||
|
||||
shader = gpu.shader.from_builtin('FLAT_COLOR')
|
||||
batch = batch_for_shader(
|
||||
shader, 'TRIS',
|
||||
{"pos": coords, "color": colors},
|
||||
indices=indices,
|
||||
)
|
||||
batch.draw(shader)
|
||||
|
||||
|
||||
def tessellate_uvs(uvs):
|
||||
return tessellate_polygon([uvs])
|
||||
|
||||
|
||||
def draw_lines(face_data):
|
||||
coords = []
|
||||
for uvs, _ in face_data:
|
||||
for i in range(len(uvs)):
|
||||
start = uvs[i]
|
||||
end = uvs[(i + 1) % len(uvs)]
|
||||
coords.append((start[0], start[1]))
|
||||
coords.append((end[0], end[1]))
|
||||
|
||||
shader = gpu.shader.from_builtin('POLYLINE_UNIFORM_COLOR')
|
||||
shader.uniform_float("viewportSize", gpu.state.viewport_get()[2:])
|
||||
shader.uniform_float("lineWidth", 1.0)
|
||||
shader.uniform_float("color", (0.0, 0.0, 0.0, 1.0))
|
||||
|
||||
batch = batch_for_shader(shader, 'LINES', {"pos": coords})
|
||||
batch.draw(shader)
|
||||
|
||||
|
||||
def save_pixels(filepath, pixel_data, width, height):
|
||||
if oiio:
|
||||
spec = oiio.ImageSpec(width, height, 4, "uint8")
|
||||
image = oiio.ImageOutput.create(filepath)
|
||||
image.open(filepath, spec)
|
||||
image.write_image(pixel_data)
|
||||
image.close()
|
||||
return
|
||||
|
||||
image = bpy.data.images.new("temp", width, height, alpha=True)
|
||||
image.filepath = filepath
|
||||
image.pixels = [v / 255 for v in pixel_data]
|
||||
image.save()
|
||||
bpy.data.images.remove(image)
|
54
scripts/addons_core/io_mesh_uv_layout/export_uv_svg.py
Normal file
54
scripts/addons_core/io_mesh_uv_layout/export_uv_svg.py
Normal file
@ -0,0 +1,54 @@
|
||||
# SPDX-FileCopyrightText: 2011-2022 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
import bpy
|
||||
from os.path import basename
|
||||
from xml.sax.saxutils import escape
|
||||
|
||||
|
||||
def export(filepath, tile, face_data, colors, width, height, opacity):
|
||||
with open(filepath, 'w', encoding='utf-8') as file:
|
||||
for text in get_file_parts(tile, face_data, colors, width, height, opacity):
|
||||
file.write(text)
|
||||
|
||||
|
||||
def get_file_parts(tile, face_data, colors, width, height, opacity):
|
||||
yield from header(width, height)
|
||||
yield from draw_polygons(tile, face_data, width, height, opacity)
|
||||
yield from footer()
|
||||
|
||||
|
||||
def header(width, height):
|
||||
yield '<?xml version="1.0" standalone="no"?>\n'
|
||||
yield '<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" \n'
|
||||
yield ' "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n'
|
||||
yield f'<svg width="{width}" height="{height}" viewBox="0 0 {width} {height}"\n'
|
||||
yield ' xmlns="http://www.w3.org/2000/svg" version="1.1">\n'
|
||||
desc = f"{basename(bpy.data.filepath)}, (Blender {bpy.app.version_string})"
|
||||
yield f'<desc>{escape(desc)}</desc>\n'
|
||||
|
||||
|
||||
def draw_polygons(tile, face_data, width, height, opacity):
|
||||
for uvs, color in face_data:
|
||||
fill = f'fill="{get_color_string(color)}"'
|
||||
|
||||
yield '<polygon stroke="black" stroke-width="1"'
|
||||
yield f' {fill} fill-opacity="{opacity:.2g}"'
|
||||
|
||||
yield ' points="'
|
||||
|
||||
for uv in uvs:
|
||||
x, y = uv[0] - tile[0], 1.0 - uv[1] + tile[1]
|
||||
yield f'{x*width:.3f},{y*height:.3f} '
|
||||
yield '" />\n'
|
||||
|
||||
|
||||
def get_color_string(color):
|
||||
r, g, b = color
|
||||
return f"rgb({round(r*255)}, {round(g*255)}, {round(b*255)})"
|
||||
|
||||
|
||||
def footer():
|
||||
yield '\n'
|
||||
yield '</svg>\n'
|
733
scripts/addons_core/io_scene_fbx/__init__.py
Normal file
733
scripts/addons_core/io_scene_fbx/__init__.py
Normal file
@ -0,0 +1,733 @@
|
||||
# SPDX-FileCopyrightText: 2011-2023 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
bl_info = {
|
||||
"name": "FBX format",
|
||||
"author": "Campbell Barton, Bastien Montagne, Jens Restemeier, @Mysteryem",
|
||||
"version": (5, 12, 3),
|
||||
"blender": (4, 2, 0),
|
||||
"location": "File > Import-Export",
|
||||
"description": "FBX IO meshes, UVs, vertex colors, materials, textures, cameras, lamps and actions",
|
||||
"warning": "",
|
||||
"doc_url": "{BLENDER_MANUAL_URL}/addons/import_export/scene_fbx.html",
|
||||
"support": 'OFFICIAL',
|
||||
"category": "Import-Export",
|
||||
}
|
||||
|
||||
|
||||
if "bpy" in locals():
|
||||
import importlib
|
||||
if "import_fbx" in locals():
|
||||
importlib.reload(import_fbx)
|
||||
if "export_fbx_bin" in locals():
|
||||
importlib.reload(export_fbx_bin)
|
||||
if "export_fbx" in locals():
|
||||
importlib.reload(export_fbx)
|
||||
|
||||
|
||||
import bpy
|
||||
from bpy.props import (
|
||||
StringProperty,
|
||||
BoolProperty,
|
||||
FloatProperty,
|
||||
EnumProperty,
|
||||
CollectionProperty,
|
||||
)
|
||||
from bpy_extras.io_utils import (
|
||||
ImportHelper,
|
||||
ExportHelper,
|
||||
orientation_helper,
|
||||
path_reference_mode,
|
||||
axis_conversion,
|
||||
poll_file_object_drop,
|
||||
)
|
||||
|
||||
|
||||
@orientation_helper(axis_forward='-Z', axis_up='Y')
|
||||
class ImportFBX(bpy.types.Operator, ImportHelper):
|
||||
"""Load a FBX file"""
|
||||
bl_idname = "import_scene.fbx"
|
||||
bl_label = "Import FBX"
|
||||
bl_options = {'UNDO', 'PRESET'}
|
||||
|
||||
directory: StringProperty()
|
||||
|
||||
filename_ext = ".fbx"
|
||||
filter_glob: StringProperty(default="*.fbx", options={'HIDDEN'})
|
||||
|
||||
files: CollectionProperty(
|
||||
name="File Path",
|
||||
type=bpy.types.OperatorFileListElement,
|
||||
)
|
||||
|
||||
ui_tab: EnumProperty(
|
||||
items=(('MAIN', "Main", "Main basic settings"),
|
||||
('ARMATURE', "Armatures", "Armature-related settings"),
|
||||
),
|
||||
name="ui_tab",
|
||||
description="Import options categories",
|
||||
)
|
||||
|
||||
use_manual_orientation: BoolProperty(
|
||||
name="Manual Orientation",
|
||||
description="Specify orientation and scale, instead of using embedded data in FBX file",
|
||||
default=False,
|
||||
)
|
||||
global_scale: FloatProperty(
|
||||
name="Scale",
|
||||
min=0.001, max=1000.0,
|
||||
default=1.0,
|
||||
)
|
||||
bake_space_transform: BoolProperty(
|
||||
name="Apply Transform",
|
||||
description="Bake space transform into object data, avoids getting unwanted rotations to objects when "
|
||||
"target space is not aligned with Blender's space "
|
||||
"(WARNING! experimental option, use at own risk, known to be broken with armatures/animations)",
|
||||
default=False,
|
||||
)
|
||||
|
||||
use_custom_normals: BoolProperty(
|
||||
name="Custom Normals",
|
||||
description="Import custom normals, if available (otherwise Blender will recompute them)",
|
||||
default=True,
|
||||
)
|
||||
colors_type: EnumProperty(
|
||||
name="Vertex Colors",
|
||||
items=(('NONE', "None", "Do not import color attributes"),
|
||||
('SRGB', "sRGB", "Expect file colors in sRGB color space"),
|
||||
('LINEAR', "Linear", "Expect file colors in linear color space"),
|
||||
),
|
||||
description="Import vertex color attributes",
|
||||
default='SRGB',
|
||||
)
|
||||
|
||||
use_image_search: BoolProperty(
|
||||
name="Image Search",
|
||||
description="Search subdirs for any associated images (WARNING: may be slow)",
|
||||
default=True,
|
||||
)
|
||||
|
||||
use_alpha_decals: BoolProperty(
|
||||
name="Alpha Decals",
|
||||
description="Treat materials with alpha as decals (no shadow casting)",
|
||||
default=False,
|
||||
)
|
||||
decal_offset: FloatProperty(
|
||||
name="Decal Offset",
|
||||
description="Displace geometry of alpha meshes",
|
||||
min=0.0, max=1.0,
|
||||
default=0.0,
|
||||
)
|
||||
|
||||
use_anim: BoolProperty(
|
||||
name="Import Animation",
|
||||
description="Import FBX animation",
|
||||
default=True,
|
||||
)
|
||||
anim_offset: FloatProperty(
|
||||
name="Animation Offset",
|
||||
description="Offset to apply to animation during import, in frames",
|
||||
default=1.0,
|
||||
)
|
||||
|
||||
use_subsurf: BoolProperty(
|
||||
name="Subdivision Data",
|
||||
description="Import FBX subdivision information as subdivision surface modifiers",
|
||||
default=False,
|
||||
)
|
||||
|
||||
use_custom_props: BoolProperty(
|
||||
name="Custom Properties",
|
||||
description="Import user properties as custom properties",
|
||||
default=True,
|
||||
)
|
||||
use_custom_props_enum_as_string: BoolProperty(
|
||||
name="Import Enums As Strings",
|
||||
description="Store enumeration values as strings",
|
||||
default=True,
|
||||
)
|
||||
|
||||
ignore_leaf_bones: BoolProperty(
|
||||
name="Ignore Leaf Bones",
|
||||
description="Ignore the last bone at the end of each chain (used to mark the length of the previous bone)",
|
||||
default=False,
|
||||
)
|
||||
force_connect_children: BoolProperty(
|
||||
name="Force Connect Children",
|
||||
description="Force connection of children bones to their parent, even if their computed head/tail "
|
||||
"positions do not match (can be useful with pure-joints-type armatures)",
|
||||
default=False,
|
||||
)
|
||||
automatic_bone_orientation: BoolProperty(
|
||||
name="Automatic Bone Orientation",
|
||||
description="Try to align the major bone axis with the bone children",
|
||||
default=False,
|
||||
)
|
||||
primary_bone_axis: EnumProperty(
|
||||
name="Primary Bone Axis",
|
||||
items=(('X', "X Axis", ""),
|
||||
('Y', "Y Axis", ""),
|
||||
('Z', "Z Axis", ""),
|
||||
('-X', "-X Axis", ""),
|
||||
('-Y', "-Y Axis", ""),
|
||||
('-Z', "-Z Axis", ""),
|
||||
),
|
||||
default='Y',
|
||||
)
|
||||
secondary_bone_axis: EnumProperty(
|
||||
name="Secondary Bone Axis",
|
||||
items=(('X', "X Axis", ""),
|
||||
('Y', "Y Axis", ""),
|
||||
('Z', "Z Axis", ""),
|
||||
('-X', "-X Axis", ""),
|
||||
('-Y', "-Y Axis", ""),
|
||||
('-Z', "-Z Axis", ""),
|
||||
),
|
||||
default='X',
|
||||
)
|
||||
|
||||
use_prepost_rot: BoolProperty(
|
||||
name="Use Pre/Post Rotation",
|
||||
description="Use pre/post rotation from FBX transform (you may have to disable that in some cases)",
|
||||
default=True,
|
||||
)
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
layout.use_property_split = True
|
||||
layout.use_property_decorate = False # No animation.
|
||||
|
||||
import_panel_include(layout, self)
|
||||
import_panel_transform(layout, self)
|
||||
import_panel_animation(layout, self)
|
||||
import_panel_armature(layout, self)
|
||||
|
||||
def execute(self, context):
|
||||
keywords = self.as_keywords(ignore=("filter_glob", "directory", "ui_tab", "filepath", "files"))
|
||||
|
||||
from . import import_fbx
|
||||
import os
|
||||
|
||||
if self.files:
|
||||
ret = {'CANCELLED'}
|
||||
dirname = os.path.dirname(self.filepath)
|
||||
for file in self.files:
|
||||
path = os.path.join(dirname, file.name)
|
||||
if import_fbx.load(self, context, filepath=path, **keywords) == {'FINISHED'}:
|
||||
ret = {'FINISHED'}
|
||||
return ret
|
||||
else:
|
||||
return import_fbx.load(self, context, filepath=self.filepath, **keywords)
|
||||
|
||||
def invoke(self, context, event):
|
||||
return self.invoke_popup(context)
|
||||
|
||||
|
||||
def import_panel_include(layout, operator):
|
||||
header, body = layout.panel("FBX_import_include", default_closed=False)
|
||||
header.label(text="Include")
|
||||
if body:
|
||||
body.prop(operator, "use_custom_normals")
|
||||
body.prop(operator, "use_subsurf")
|
||||
body.prop(operator, "use_custom_props")
|
||||
sub = body.row()
|
||||
sub.enabled = operator.use_custom_props
|
||||
sub.prop(operator, "use_custom_props_enum_as_string")
|
||||
body.prop(operator, "use_image_search")
|
||||
body.prop(operator, "colors_type")
|
||||
|
||||
|
||||
def import_panel_transform(layout, operator):
|
||||
header, body = layout.panel("FBX_import_transform", default_closed=False)
|
||||
header.label(text="Transform")
|
||||
if body:
|
||||
body.prop(operator, "global_scale")
|
||||
body.prop(operator, "decal_offset")
|
||||
row = body.row()
|
||||
row.prop(operator, "bake_space_transform")
|
||||
row.label(text="", icon='ERROR')
|
||||
body.prop(operator, "use_prepost_rot")
|
||||
|
||||
import_panel_transform_orientation(body, operator)
|
||||
|
||||
|
||||
def import_panel_transform_orientation(layout, operator):
|
||||
header, body = layout.panel("FBX_import_transform_manual_orientation", default_closed=False)
|
||||
header.use_property_split = False
|
||||
header.prop(operator, "use_manual_orientation", text="")
|
||||
header.label(text="Manual Orientation")
|
||||
if body:
|
||||
body.enabled = operator.use_manual_orientation
|
||||
body.prop(operator, "axis_forward")
|
||||
body.prop(operator, "axis_up")
|
||||
|
||||
|
||||
def import_panel_animation(layout, operator):
|
||||
header, body = layout.panel("FBX_import_animation", default_closed=True)
|
||||
header.use_property_split = False
|
||||
header.prop(operator, "use_anim", text="")
|
||||
header.label(text="Animation")
|
||||
if body:
|
||||
body.enabled = operator.use_anim
|
||||
body.prop(operator, "anim_offset")
|
||||
|
||||
|
||||
def import_panel_armature(layout, operator):
|
||||
header, body = layout.panel("FBX_import_armature", default_closed=True)
|
||||
header.label(text="Armature")
|
||||
if body:
|
||||
body.prop(operator, "ignore_leaf_bones")
|
||||
body.prop(operator, "force_connect_children"),
|
||||
body.prop(operator, "automatic_bone_orientation"),
|
||||
sub = body.column()
|
||||
sub.enabled = not operator.automatic_bone_orientation
|
||||
sub.prop(operator, "primary_bone_axis")
|
||||
sub.prop(operator, "secondary_bone_axis")
|
||||
|
||||
|
||||
@orientation_helper(axis_forward='-Z', axis_up='Y')
|
||||
class ExportFBX(bpy.types.Operator, ExportHelper):
|
||||
"""Write a FBX file"""
|
||||
bl_idname = "export_scene.fbx"
|
||||
bl_label = "Export FBX"
|
||||
bl_options = {'UNDO', 'PRESET'}
|
||||
|
||||
filename_ext = ".fbx"
|
||||
filter_glob: StringProperty(default="*.fbx", options={'HIDDEN'})
|
||||
|
||||
# List of operator properties, the attributes will be assigned
|
||||
# to the class instance from the operator settings before calling.
|
||||
|
||||
use_selection: BoolProperty(
|
||||
name="Selected Objects",
|
||||
description="Export selected and visible objects only",
|
||||
default=False,
|
||||
)
|
||||
use_visible: BoolProperty(
|
||||
name='Visible Objects',
|
||||
description='Export visible objects only',
|
||||
default=False
|
||||
)
|
||||
use_active_collection: BoolProperty(
|
||||
name="Active Collection",
|
||||
description="Export only objects from the active collection (and its children)",
|
||||
default=False,
|
||||
)
|
||||
collection: StringProperty(
|
||||
name="Source Collection",
|
||||
description="Export only objects from this collection (and its children)",
|
||||
default="",
|
||||
)
|
||||
global_scale: FloatProperty(
|
||||
name="Scale",
|
||||
description="Scale all data (Some importers do not support scaled armatures!)",
|
||||
min=0.001, max=1000.0,
|
||||
soft_min=0.01, soft_max=1000.0,
|
||||
default=1.0,
|
||||
)
|
||||
apply_unit_scale: BoolProperty(
|
||||
name="Apply Unit",
|
||||
description="Take into account current Blender units settings (if unset, raw Blender Units values are used as-is)",
|
||||
default=True,
|
||||
)
|
||||
apply_scale_options: EnumProperty(
|
||||
items=(('FBX_SCALE_NONE', "All Local",
|
||||
"Apply custom scaling and units scaling to each object transformation, FBX scale remains at 1.0"),
|
||||
('FBX_SCALE_UNITS', "FBX Units Scale",
|
||||
"Apply custom scaling to each object transformation, and units scaling to FBX scale"),
|
||||
('FBX_SCALE_CUSTOM', "FBX Custom Scale",
|
||||
"Apply custom scaling to FBX scale, and units scaling to each object transformation"),
|
||||
('FBX_SCALE_ALL', "FBX All",
|
||||
"Apply custom scaling and units scaling to FBX scale"),
|
||||
),
|
||||
name="Apply Scalings",
|
||||
description="How to apply custom and units scalings in generated FBX file "
|
||||
"(Blender uses FBX scale to detect units on import, "
|
||||
"but many other applications do not handle the same way)",
|
||||
)
|
||||
|
||||
use_space_transform: BoolProperty(
|
||||
name="Use Space Transform",
|
||||
description="Apply global space transform to the object rotations. When disabled "
|
||||
"only the axis space is written to the file and all object transforms are left as-is",
|
||||
default=True,
|
||||
)
|
||||
bake_space_transform: BoolProperty(
|
||||
name="Apply Transform",
|
||||
description="Bake space transform into object data, avoids getting unwanted rotations to objects when "
|
||||
"target space is not aligned with Blender's space "
|
||||
"(WARNING! experimental option, use at own risk, known to be broken with armatures/animations)",
|
||||
default=False,
|
||||
)
|
||||
|
||||
object_types: EnumProperty(
|
||||
name="Object Types",
|
||||
options={'ENUM_FLAG'},
|
||||
items=(('EMPTY', "Empty", ""),
|
||||
('CAMERA', "Camera", ""),
|
||||
('LIGHT', "Lamp", ""),
|
||||
('ARMATURE', "Armature", "WARNING: not supported in dupli/group instances"),
|
||||
('MESH', "Mesh", ""),
|
||||
('OTHER', "Other", "Other geometry types, like curve, metaball, etc. (converted to meshes)"),
|
||||
),
|
||||
description="Which kind of object to export",
|
||||
default={'EMPTY', 'CAMERA', 'LIGHT', 'ARMATURE', 'MESH', 'OTHER'},
|
||||
)
|
||||
|
||||
use_mesh_modifiers: BoolProperty(
|
||||
name="Apply Modifiers",
|
||||
description="Apply modifiers to mesh objects (except Armature ones) - "
|
||||
"WARNING: prevents exporting shape keys",
|
||||
default=True,
|
||||
)
|
||||
use_mesh_modifiers_render: BoolProperty(
|
||||
name="Use Modifiers Render Setting",
|
||||
description="Use render settings when applying modifiers to mesh objects (DISABLED in Blender 2.8)",
|
||||
default=True,
|
||||
)
|
||||
mesh_smooth_type: EnumProperty(
|
||||
name="Smoothing",
|
||||
items=(('OFF', "Normals Only", "Export only normals instead of writing edge or face smoothing data"),
|
||||
('FACE', "Face", "Write face smoothing"),
|
||||
('EDGE', "Edge", "Write edge smoothing"),
|
||||
),
|
||||
description="Export smoothing information "
|
||||
"(prefer 'Normals Only' option if your target importer understand split normals)",
|
||||
default='OFF',
|
||||
)
|
||||
colors_type: EnumProperty(
|
||||
name="Vertex Colors",
|
||||
items=(('NONE', "None", "Do not export color attributes"),
|
||||
('SRGB', "sRGB", "Export colors in sRGB color space"),
|
||||
('LINEAR', "Linear", "Export colors in linear color space"),
|
||||
),
|
||||
description="Export vertex color attributes",
|
||||
default='SRGB',
|
||||
)
|
||||
prioritize_active_color: BoolProperty(
|
||||
name="Prioritize Active Color",
|
||||
description="Make sure active color will be exported first. Could be important "
|
||||
"since some other software can discard other color attributes besides the first one",
|
||||
default=False,
|
||||
)
|
||||
use_subsurf: BoolProperty(
|
||||
name="Export Subdivision Surface",
|
||||
description="Export the last Catmull-Rom subdivision modifier as FBX subdivision "
|
||||
"(does not apply the modifier even if 'Apply Modifiers' is enabled)",
|
||||
default=False,
|
||||
)
|
||||
use_mesh_edges: BoolProperty(
|
||||
name="Loose Edges",
|
||||
description="Export loose edges (as two-vertices polygons)",
|
||||
default=False,
|
||||
)
|
||||
use_tspace: BoolProperty(
|
||||
name="Tangent Space",
|
||||
description="Add binormal and tangent vectors, together with normal they form the tangent space "
|
||||
"(will only work correctly with tris/quads only meshes!)",
|
||||
default=False,
|
||||
)
|
||||
use_triangles: BoolProperty(
|
||||
name="Triangulate Faces",
|
||||
description="Convert all faces to triangles",
|
||||
default=False,
|
||||
)
|
||||
use_custom_props: BoolProperty(
|
||||
name="Custom Properties",
|
||||
description="Export custom properties",
|
||||
default=False,
|
||||
)
|
||||
add_leaf_bones: BoolProperty(
|
||||
name="Add Leaf Bones",
|
||||
description="Append a final bone to the end of each chain to specify last bone length "
|
||||
"(use this when you intend to edit the armature from exported data)",
|
||||
default=True # False for commit!
|
||||
)
|
||||
primary_bone_axis: EnumProperty(
|
||||
name="Primary Bone Axis",
|
||||
items=(('X', "X Axis", ""),
|
||||
('Y', "Y Axis", ""),
|
||||
('Z', "Z Axis", ""),
|
||||
('-X', "-X Axis", ""),
|
||||
('-Y', "-Y Axis", ""),
|
||||
('-Z', "-Z Axis", ""),
|
||||
),
|
||||
default='Y',
|
||||
)
|
||||
secondary_bone_axis: EnumProperty(
|
||||
name="Secondary Bone Axis",
|
||||
items=(('X', "X Axis", ""),
|
||||
('Y', "Y Axis", ""),
|
||||
('Z', "Z Axis", ""),
|
||||
('-X', "-X Axis", ""),
|
||||
('-Y', "-Y Axis", ""),
|
||||
('-Z', "-Z Axis", ""),
|
||||
),
|
||||
default='X',
|
||||
)
|
||||
use_armature_deform_only: BoolProperty(
|
||||
name="Only Deform Bones",
|
||||
description="Only write deforming bones (and non-deforming ones when they have deforming children)",
|
||||
default=False,
|
||||
)
|
||||
armature_nodetype: EnumProperty(
|
||||
name="Armature FBXNode Type",
|
||||
items=(('NULL', "Null", "'Null' FBX node, similar to Blender's Empty (default)"),
|
||||
('ROOT', "Root", "'Root' FBX node, supposed to be the root of chains of bones..."),
|
||||
('LIMBNODE', "LimbNode", "'LimbNode' FBX node, a regular joint between two bones..."),
|
||||
),
|
||||
description="FBX type of node (object) used to represent Blender's armatures "
|
||||
"(use the Null type unless you experience issues with the other app, "
|
||||
"as other choices may not import back perfectly into Blender...)",
|
||||
default='NULL',
|
||||
)
|
||||
bake_anim: BoolProperty(
|
||||
name="Baked Animation",
|
||||
description="Export baked keyframe animation",
|
||||
default=True,
|
||||
)
|
||||
bake_anim_use_all_bones: BoolProperty(
|
||||
name="Key All Bones",
|
||||
description="Force exporting at least one key of animation for all bones "
|
||||
"(needed with some target applications, like UE4)",
|
||||
default=True,
|
||||
)
|
||||
bake_anim_use_nla_strips: BoolProperty(
|
||||
name="NLA Strips",
|
||||
description="Export each non-muted NLA strip as a separated FBX's AnimStack, if any, "
|
||||
"instead of global scene animation",
|
||||
default=True,
|
||||
)
|
||||
bake_anim_use_all_actions: BoolProperty(
|
||||
name="All Actions",
|
||||
description="Export each action as a separated FBX's AnimStack, instead of global scene animation "
|
||||
"(note that animated objects will get all actions compatible with them, "
|
||||
"others will get no animation at all)",
|
||||
default=True,
|
||||
)
|
||||
bake_anim_force_startend_keying: BoolProperty(
|
||||
name="Force Start/End Keying",
|
||||
description="Always add a keyframe at start and end of actions for animated channels",
|
||||
default=True,
|
||||
)
|
||||
bake_anim_step: FloatProperty(
|
||||
name="Sampling Rate",
|
||||
description="How often to evaluate animated values (in frames)",
|
||||
min=0.01, max=100.0,
|
||||
soft_min=0.1, soft_max=10.0,
|
||||
default=1.0,
|
||||
)
|
||||
bake_anim_simplify_factor: FloatProperty(
|
||||
name="Simplify",
|
||||
description="How much to simplify baked values (0.0 to disable, the higher the more simplified)",
|
||||
min=0.0, max=100.0, # No simplification to up to 10% of current magnitude tolerance.
|
||||
soft_min=0.0, soft_max=10.0,
|
||||
default=1.0, # default: min slope: 0.005, max frame step: 10.
|
||||
)
|
||||
path_mode: path_reference_mode
|
||||
embed_textures: BoolProperty(
|
||||
name="Embed Textures",
|
||||
description="Embed textures in FBX binary file (only for \"Copy\" path mode!)",
|
||||
default=False,
|
||||
)
|
||||
batch_mode: EnumProperty(
|
||||
name="Batch Mode",
|
||||
items=(('OFF', "Off", "Active scene to file"),
|
||||
('SCENE', "Scene", "Each scene as a file"),
|
||||
('COLLECTION', "Collection",
|
||||
"Each collection (data-block ones) as a file, does not include content of children collections"),
|
||||
('SCENE_COLLECTION', "Scene Collections",
|
||||
"Each collection (including master, non-data-block ones) of each scene as a file, "
|
||||
"including content from children collections"),
|
||||
('ACTIVE_SCENE_COLLECTION', "Active Scene Collections",
|
||||
"Each collection (including master, non-data-block one) of the active scene as a file, "
|
||||
"including content from children collections"),
|
||||
),
|
||||
)
|
||||
use_batch_own_dir: BoolProperty(
|
||||
name="Batch Own Dir",
|
||||
description="Create a dir for each exported file",
|
||||
default=True,
|
||||
)
|
||||
use_metadata: BoolProperty(
|
||||
name="Use Metadata",
|
||||
default=True,
|
||||
options={'HIDDEN'},
|
||||
)
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
layout.use_property_split = True
|
||||
layout.use_property_decorate = False # No animation.
|
||||
|
||||
# Are we inside the File browser
|
||||
is_file_browser = context.space_data.type == 'FILE_BROWSER'
|
||||
|
||||
export_main(layout, self, is_file_browser)
|
||||
export_panel_include(layout, self, is_file_browser)
|
||||
export_panel_transform(layout, self)
|
||||
export_panel_geometry(layout, self)
|
||||
export_panel_armature(layout, self)
|
||||
export_panel_animation(layout, self)
|
||||
|
||||
@property
|
||||
def check_extension(self):
|
||||
return self.batch_mode == 'OFF'
|
||||
|
||||
def execute(self, context):
|
||||
from mathutils import Matrix
|
||||
if not self.filepath:
|
||||
raise Exception("filepath not set")
|
||||
|
||||
global_matrix = (axis_conversion(to_forward=self.axis_forward,
|
||||
to_up=self.axis_up,
|
||||
).to_4x4()
|
||||
if self.use_space_transform else Matrix())
|
||||
|
||||
keywords = self.as_keywords(ignore=("check_existing",
|
||||
"filter_glob",
|
||||
"ui_tab",
|
||||
))
|
||||
|
||||
keywords["global_matrix"] = global_matrix
|
||||
|
||||
from . import export_fbx_bin
|
||||
return export_fbx_bin.save(self, context, **keywords)
|
||||
|
||||
|
||||
def export_main(layout, operator, is_file_browser):
|
||||
row = layout.row(align=True)
|
||||
row.prop(operator, "path_mode")
|
||||
sub = row.row(align=True)
|
||||
sub.enabled = (operator.path_mode == 'COPY')
|
||||
sub.prop(operator, "embed_textures", text="", icon='PACKAGE' if operator.embed_textures else 'UGLYPACKAGE')
|
||||
if is_file_browser:
|
||||
row = layout.row(align=True)
|
||||
row.prop(operator, "batch_mode")
|
||||
sub = row.row(align=True)
|
||||
sub.prop(operator, "use_batch_own_dir", text="", icon='NEWFOLDER')
|
||||
|
||||
|
||||
def export_panel_include(layout, operator, is_file_browser):
|
||||
header, body = layout.panel("FBX_export_include", default_closed=False)
|
||||
header.label(text="Include")
|
||||
if body:
|
||||
sublayout = body.column(heading="Limit to")
|
||||
sublayout.enabled = (operator.batch_mode == 'OFF')
|
||||
if is_file_browser:
|
||||
sublayout.prop(operator, "use_selection")
|
||||
sublayout.prop(operator, "use_visible")
|
||||
sublayout.prop(operator, "use_active_collection")
|
||||
|
||||
body.column().prop(operator, "object_types")
|
||||
body.prop(operator, "use_custom_props")
|
||||
|
||||
|
||||
def export_panel_transform(layout, operator):
|
||||
header, body = layout.panel("FBX_export_transform", default_closed=False)
|
||||
header.label(text="Transform")
|
||||
if body:
|
||||
body.prop(operator, "global_scale")
|
||||
body.prop(operator, "apply_scale_options")
|
||||
|
||||
body.prop(operator, "axis_forward")
|
||||
body.prop(operator, "axis_up")
|
||||
|
||||
body.prop(operator, "apply_unit_scale")
|
||||
body.prop(operator, "use_space_transform")
|
||||
row = body.row()
|
||||
row.prop(operator, "bake_space_transform")
|
||||
row.label(text="", icon='ERROR')
|
||||
|
||||
|
||||
def export_panel_geometry(layout, operator):
|
||||
header, body = layout.panel("FBX_export_geometry", default_closed=True)
|
||||
header.label(text="Geometry")
|
||||
if body:
|
||||
body.prop(operator, "mesh_smooth_type")
|
||||
body.prop(operator, "use_subsurf")
|
||||
body.prop(operator, "use_mesh_modifiers")
|
||||
#sub = body.row()
|
||||
# sub.enabled = operator.use_mesh_modifiers and False # disabled in 2.8...
|
||||
#sub.prop(operator, "use_mesh_modifiers_render")
|
||||
body.prop(operator, "use_mesh_edges")
|
||||
body.prop(operator, "use_triangles")
|
||||
sub = body.row()
|
||||
# ~ sub.enabled = operator.mesh_smooth_type in {'OFF'}
|
||||
sub.prop(operator, "use_tspace")
|
||||
body.prop(operator, "colors_type")
|
||||
body.prop(operator, "prioritize_active_color")
|
||||
|
||||
|
||||
def export_panel_armature(layout, operator):
|
||||
header, body = layout.panel("FBX_export_armature", default_closed=True)
|
||||
header.label(text="Armature")
|
||||
if body:
|
||||
body.prop(operator, "primary_bone_axis")
|
||||
body.prop(operator, "secondary_bone_axis")
|
||||
body.prop(operator, "armature_nodetype")
|
||||
body.prop(operator, "use_armature_deform_only")
|
||||
body.prop(operator, "add_leaf_bones")
|
||||
|
||||
|
||||
def export_panel_animation(layout, operator):
|
||||
header, body = layout.panel("FBX_export_bake_animation", default_closed=True)
|
||||
header.use_property_split = False
|
||||
header.prop(operator, "bake_anim", text="")
|
||||
header.label(text="Animation")
|
||||
if body:
|
||||
body.enabled = operator.bake_anim
|
||||
body.prop(operator, "bake_anim_use_all_bones")
|
||||
body.prop(operator, "bake_anim_use_nla_strips")
|
||||
body.prop(operator, "bake_anim_use_all_actions")
|
||||
body.prop(operator, "bake_anim_force_startend_keying")
|
||||
body.prop(operator, "bake_anim_step")
|
||||
body.prop(operator, "bake_anim_simplify_factor")
|
||||
|
||||
|
||||
class IO_FH_fbx(bpy.types.FileHandler):
|
||||
bl_idname = "IO_FH_fbx"
|
||||
bl_label = "FBX"
|
||||
bl_import_operator = "import_scene.fbx"
|
||||
bl_export_operator = "export_scene.fbx"
|
||||
bl_file_extensions = ".fbx"
|
||||
|
||||
@classmethod
|
||||
def poll_drop(cls, context):
|
||||
return poll_file_object_drop(context)
|
||||
|
||||
|
||||
def menu_func_import(self, context):
|
||||
self.layout.operator(ImportFBX.bl_idname, text="FBX (.fbx)")
|
||||
|
||||
|
||||
def menu_func_export(self, context):
|
||||
self.layout.operator(ExportFBX.bl_idname, text="FBX (.fbx)")
|
||||
|
||||
|
||||
classes = (
|
||||
ImportFBX,
|
||||
ExportFBX,
|
||||
IO_FH_fbx,
|
||||
)
|
||||
|
||||
|
||||
def register():
|
||||
for cls in classes:
|
||||
bpy.utils.register_class(cls)
|
||||
|
||||
bpy.types.TOPBAR_MT_file_import.append(menu_func_import)
|
||||
bpy.types.TOPBAR_MT_file_export.append(menu_func_export)
|
||||
|
||||
|
||||
def unregister():
|
||||
bpy.types.TOPBAR_MT_file_import.remove(menu_func_import)
|
||||
bpy.types.TOPBAR_MT_file_export.remove(menu_func_export)
|
||||
|
||||
for cls in classes:
|
||||
bpy.utils.unregister_class(cls)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
register()
|
62
scripts/addons_core/io_scene_fbx/data_types.py
Normal file
62
scripts/addons_core/io_scene_fbx/data_types.py
Normal file
@ -0,0 +1,62 @@
|
||||
# SPDX-FileCopyrightText: 2006-2012 assimp team
|
||||
# SPDX-FileCopyrightText: 2013 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
BOOL = b'B'[0]
|
||||
CHAR = b'C'[0]
|
||||
INT8 = b'Z'[0]
|
||||
INT16 = b'Y'[0]
|
||||
INT32 = b'I'[0]
|
||||
INT64 = b'L'[0]
|
||||
FLOAT32 = b'F'[0]
|
||||
FLOAT64 = b'D'[0]
|
||||
BYTES = b'R'[0]
|
||||
STRING = b'S'[0]
|
||||
INT32_ARRAY = b'i'[0]
|
||||
INT64_ARRAY = b'l'[0]
|
||||
FLOAT32_ARRAY = b'f'[0]
|
||||
FLOAT64_ARRAY = b'd'[0]
|
||||
BOOL_ARRAY = b'b'[0]
|
||||
BYTE_ARRAY = b'c'[0]
|
||||
|
||||
# Some other misc defines
|
||||
# Known combinations so far - supposed meaning: A = animatable, A+ = animated, U = UserProp
|
||||
# VALID_NUMBER_FLAGS = {b'A', b'A+', b'AU', b'A+U'} # Not used...
|
||||
|
||||
# array types - actual length may vary (depending on underlying C implementation)!
|
||||
import array
|
||||
|
||||
# For now, bytes and bool are assumed always 1byte.
|
||||
ARRAY_BOOL = 'b'
|
||||
ARRAY_BYTE = 'B'
|
||||
|
||||
ARRAY_INT32 = None
|
||||
ARRAY_INT64 = None
|
||||
for _t in 'ilq':
|
||||
size = array.array(_t).itemsize
|
||||
if size == 4:
|
||||
ARRAY_INT32 = _t
|
||||
elif size == 8:
|
||||
ARRAY_INT64 = _t
|
||||
if ARRAY_INT32 and ARRAY_INT64:
|
||||
break
|
||||
if not ARRAY_INT32:
|
||||
raise Exception("Impossible to get a 4-bytes integer type for array!")
|
||||
if not ARRAY_INT64:
|
||||
raise Exception("Impossible to get an 8-bytes integer type for array!")
|
||||
|
||||
ARRAY_FLOAT32 = None
|
||||
ARRAY_FLOAT64 = None
|
||||
for _t in 'fd':
|
||||
size = array.array(_t).itemsize
|
||||
if size == 4:
|
||||
ARRAY_FLOAT32 = _t
|
||||
elif size == 8:
|
||||
ARRAY_FLOAT64 = _t
|
||||
if ARRAY_FLOAT32 and ARRAY_FLOAT64:
|
||||
break
|
||||
if not ARRAY_FLOAT32:
|
||||
raise Exception("Impossible to get a 4-bytes float type for array!")
|
||||
if not ARRAY_FLOAT64:
|
||||
raise Exception("Impossible to get an 8-bytes float type for array!")
|
434
scripts/addons_core/io_scene_fbx/encode_bin.py
Normal file
434
scripts/addons_core/io_scene_fbx/encode_bin.py
Normal file
@ -0,0 +1,434 @@
|
||||
# SPDX-FileCopyrightText: 2013 Campbell Barton
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
try:
|
||||
from . import data_types
|
||||
from .fbx_utils_threading import MultiThreadedTaskConsumer
|
||||
except:
|
||||
import data_types
|
||||
from fbx_utils_threading import MultiThreadedTaskConsumer
|
||||
|
||||
from struct import pack
|
||||
from contextlib import contextmanager
|
||||
import array
|
||||
import numpy as np
|
||||
import zlib
|
||||
|
||||
_BLOCK_SENTINEL_LENGTH = ...
|
||||
_BLOCK_SENTINEL_DATA = ...
|
||||
_ELEM_META_FORMAT = ...
|
||||
_ELEM_META_SIZE = ...
|
||||
_IS_BIG_ENDIAN = (__import__("sys").byteorder != 'little')
|
||||
_HEAD_MAGIC = b'Kaydara FBX Binary\x20\x20\x00\x1a\x00'
|
||||
|
||||
# fbx has very strict CRC rules, all based on file timestamp
|
||||
# until we figure these out, write files at a fixed time. (workaround!)
|
||||
|
||||
# Assumes: CreationTime
|
||||
_TIME_ID = b'1970-01-01 10:00:00:000'
|
||||
_FILE_ID = b'\x28\xb3\x2a\xeb\xb6\x24\xcc\xc2\xbf\xc8\xb0\x2a\xa9\x2b\xfc\xf1'
|
||||
_FOOT_ID = b'\xfa\xbc\xab\x09\xd0\xc8\xd4\x66\xb1\x76\xfb\x83\x1c\xf7\x26\x7e'
|
||||
|
||||
# Awful exceptions: those "classes" of elements seem to need block sentinel even when having no children and some props.
|
||||
_ELEMS_ID_ALWAYS_BLOCK_SENTINEL = {b"AnimationStack", b"AnimationLayer"}
|
||||
|
||||
|
||||
class FBXElem:
|
||||
__slots__ = (
|
||||
"id",
|
||||
"props",
|
||||
"props_type",
|
||||
"elems",
|
||||
|
||||
"_props_length", # combine length of props
|
||||
"_end_offset", # byte offset from the start of the file.
|
||||
)
|
||||
|
||||
def __init__(self, id):
|
||||
assert(len(id) < 256) # length must fit in a uint8
|
||||
self.id = id
|
||||
self.props = []
|
||||
self.props_type = bytearray()
|
||||
self.elems = []
|
||||
self._end_offset = -1
|
||||
self._props_length = -1
|
||||
|
||||
@classmethod
|
||||
@contextmanager
|
||||
def enable_multithreading_cm(cls):
|
||||
"""Temporarily enable multithreaded array compression.
|
||||
|
||||
The context manager handles starting up and shutting down the threads.
|
||||
|
||||
Only exits once all the threads are done (either all tasks were completed or an error occurred and the threads
|
||||
were stopped prematurely).
|
||||
|
||||
Writing to a file is temporarily disabled as a safeguard."""
|
||||
# __enter__()
|
||||
orig_func = cls._add_compressed_array_helper
|
||||
orig_write = cls._write
|
||||
|
||||
def insert_compressed_array(props, insert_at, data, length):
|
||||
# zlib.compress releases the GIL, so can be multithreaded.
|
||||
data = zlib.compress(data, 1)
|
||||
comp_len = len(data)
|
||||
|
||||
encoding = 1
|
||||
data = pack('<3I', length, encoding, comp_len) + data
|
||||
props[insert_at] = data
|
||||
|
||||
with MultiThreadedTaskConsumer.new_cpu_bound_cm(insert_compressed_array) as wrapped_func:
|
||||
try:
|
||||
def _add_compressed_array_helper_multi(self, data, length):
|
||||
# Append a dummy value that will be replaced with the compressed array data later.
|
||||
self.props.append(...)
|
||||
# The index to insert the compressed array into.
|
||||
insert_at = len(self.props) - 1
|
||||
# Schedule the array to be compressed on a separate thread and then inserted into the hierarchy at
|
||||
# `insert_at`.
|
||||
wrapped_func(self.props, insert_at, data, length)
|
||||
|
||||
# As an extra safeguard, temporarily replace the `_write` function to raise an error if called.
|
||||
def temp_write(*_args, **_kwargs):
|
||||
raise RuntimeError("Writing is not allowed until multithreaded array compression has been disabled")
|
||||
|
||||
cls._add_compressed_array_helper = _add_compressed_array_helper_multi
|
||||
cls._write = temp_write
|
||||
|
||||
# Return control back to the caller of __enter__().
|
||||
yield
|
||||
finally:
|
||||
# __exit__()
|
||||
# Restore the original functions.
|
||||
cls._add_compressed_array_helper = orig_func
|
||||
cls._write = orig_write
|
||||
# Exiting the MultiThreadedTaskConsumer context manager will wait for all scheduled tasks to complete.
|
||||
|
||||
def add_bool(self, data):
|
||||
assert(isinstance(data, bool))
|
||||
data = pack('?', data)
|
||||
|
||||
self.props_type.append(data_types.BOOL)
|
||||
self.props.append(data)
|
||||
|
||||
def add_char(self, data):
|
||||
assert(isinstance(data, bytes))
|
||||
assert(len(data) == 1)
|
||||
data = pack('<c', data)
|
||||
|
||||
self.props_type.append(data_types.CHAR)
|
||||
self.props.append(data)
|
||||
|
||||
def add_int8(self, data):
|
||||
assert(isinstance(data, int))
|
||||
data = pack('<b', data)
|
||||
|
||||
self.props_type.append(data_types.INT8)
|
||||
self.props.append(data)
|
||||
|
||||
def add_int16(self, data):
|
||||
assert(isinstance(data, int))
|
||||
data = pack('<h', data)
|
||||
|
||||
self.props_type.append(data_types.INT16)
|
||||
self.props.append(data)
|
||||
|
||||
def add_int32(self, data):
|
||||
assert(isinstance(data, int))
|
||||
data = pack('<i', data)
|
||||
|
||||
self.props_type.append(data_types.INT32)
|
||||
self.props.append(data)
|
||||
|
||||
def add_int64(self, data):
|
||||
assert(isinstance(data, int))
|
||||
data = pack('<q', data)
|
||||
|
||||
self.props_type.append(data_types.INT64)
|
||||
self.props.append(data)
|
||||
|
||||
def add_float32(self, data):
|
||||
assert(isinstance(data, float))
|
||||
data = pack('<f', data)
|
||||
|
||||
self.props_type.append(data_types.FLOAT32)
|
||||
self.props.append(data)
|
||||
|
||||
def add_float64(self, data):
|
||||
assert(isinstance(data, float))
|
||||
data = pack('<d', data)
|
||||
|
||||
self.props_type.append(data_types.FLOAT64)
|
||||
self.props.append(data)
|
||||
|
||||
def add_bytes(self, data):
|
||||
assert(isinstance(data, bytes))
|
||||
data = pack('<I', len(data)) + data
|
||||
|
||||
self.props_type.append(data_types.BYTES)
|
||||
self.props.append(data)
|
||||
|
||||
def add_string(self, data):
|
||||
assert(isinstance(data, bytes))
|
||||
data = pack('<I', len(data)) + data
|
||||
|
||||
self.props_type.append(data_types.STRING)
|
||||
self.props.append(data)
|
||||
|
||||
def add_string_unicode(self, data):
|
||||
assert(isinstance(data, str))
|
||||
data = data.encode('utf8')
|
||||
data = pack('<I', len(data)) + data
|
||||
|
||||
self.props_type.append(data_types.STRING)
|
||||
self.props.append(data)
|
||||
|
||||
def _add_compressed_array_helper(self, data, length):
|
||||
"""Note: This function may be swapped out by enable_multithreading_cm with an equivalent that supports
|
||||
multithreading."""
|
||||
data = zlib.compress(data, 1)
|
||||
comp_len = len(data)
|
||||
|
||||
encoding = 1
|
||||
data = pack('<3I', length, encoding, comp_len) + data
|
||||
self.props.append(data)
|
||||
|
||||
def _add_array_helper(self, data, prop_type, length):
|
||||
self.props_type.append(prop_type)
|
||||
# mimic behavior of fbxconverter (also common sense)
|
||||
# we could make this configurable.
|
||||
encoding = 0 if len(data) <= 128 else 1
|
||||
if encoding == 0:
|
||||
data = pack('<3I', length, encoding, len(data)) + data
|
||||
self.props.append(data)
|
||||
elif encoding == 1:
|
||||
self._add_compressed_array_helper(data, length)
|
||||
|
||||
def _add_parray_helper(self, data, array_type, prop_type):
|
||||
assert (isinstance(data, array.array))
|
||||
assert (data.typecode == array_type)
|
||||
|
||||
length = len(data)
|
||||
|
||||
if _IS_BIG_ENDIAN:
|
||||
data = data[:]
|
||||
data.byteswap()
|
||||
data = data.tobytes()
|
||||
|
||||
self._add_array_helper(data, prop_type, length)
|
||||
|
||||
def _add_ndarray_helper(self, data, dtype, prop_type):
|
||||
assert (isinstance(data, np.ndarray))
|
||||
assert (data.dtype == dtype)
|
||||
|
||||
length = data.size
|
||||
|
||||
if _IS_BIG_ENDIAN and data.dtype.isnative:
|
||||
data = data.byteswap()
|
||||
data = data.tobytes()
|
||||
|
||||
self._add_array_helper(data, prop_type, length)
|
||||
|
||||
def add_int32_array(self, data):
|
||||
if isinstance(data, np.ndarray):
|
||||
self._add_ndarray_helper(data, np.int32, data_types.INT32_ARRAY)
|
||||
else:
|
||||
if not isinstance(data, array.array):
|
||||
data = array.array(data_types.ARRAY_INT32, data)
|
||||
self._add_parray_helper(data, data_types.ARRAY_INT32, data_types.INT32_ARRAY)
|
||||
|
||||
def add_int64_array(self, data):
|
||||
if isinstance(data, np.ndarray):
|
||||
self._add_ndarray_helper(data, np.int64, data_types.INT64_ARRAY)
|
||||
else:
|
||||
if not isinstance(data, array.array):
|
||||
data = array.array(data_types.ARRAY_INT64, data)
|
||||
self._add_parray_helper(data, data_types.ARRAY_INT64, data_types.INT64_ARRAY)
|
||||
|
||||
def add_float32_array(self, data):
|
||||
if isinstance(data, np.ndarray):
|
||||
self._add_ndarray_helper(data, np.float32, data_types.FLOAT32_ARRAY)
|
||||
else:
|
||||
if not isinstance(data, array.array):
|
||||
data = array.array(data_types.ARRAY_FLOAT32, data)
|
||||
self._add_parray_helper(data, data_types.ARRAY_FLOAT32, data_types.FLOAT32_ARRAY)
|
||||
|
||||
def add_float64_array(self, data):
|
||||
if isinstance(data, np.ndarray):
|
||||
self._add_ndarray_helper(data, np.float64, data_types.FLOAT64_ARRAY)
|
||||
else:
|
||||
if not isinstance(data, array.array):
|
||||
data = array.array(data_types.ARRAY_FLOAT64, data)
|
||||
self._add_parray_helper(data, data_types.ARRAY_FLOAT64, data_types.FLOAT64_ARRAY)
|
||||
|
||||
def add_bool_array(self, data):
|
||||
if isinstance(data, np.ndarray):
|
||||
self._add_ndarray_helper(data, bool, data_types.BOOL_ARRAY)
|
||||
else:
|
||||
if not isinstance(data, array.array):
|
||||
data = array.array(data_types.ARRAY_BOOL, data)
|
||||
self._add_parray_helper(data, data_types.ARRAY_BOOL, data_types.BOOL_ARRAY)
|
||||
|
||||
def add_byte_array(self, data):
|
||||
if isinstance(data, np.ndarray):
|
||||
self._add_ndarray_helper(data, np.byte, data_types.BYTE_ARRAY)
|
||||
else:
|
||||
if not isinstance(data, array.array):
|
||||
data = array.array(data_types.ARRAY_BYTE, data)
|
||||
self._add_parray_helper(data, data_types.ARRAY_BYTE, data_types.BYTE_ARRAY)
|
||||
|
||||
# -------------------------
|
||||
# internal helper functions
|
||||
|
||||
def _calc_offsets(self, offset, is_last):
|
||||
"""
|
||||
Call before writing, calculates fixed offsets.
|
||||
"""
|
||||
assert(self._end_offset == -1)
|
||||
assert(self._props_length == -1)
|
||||
|
||||
offset += _ELEM_META_SIZE # 3 uints (or 3 ulonglongs for FBX 7500 and later)
|
||||
offset += 1 + len(self.id) # len + idname
|
||||
|
||||
props_length = 0
|
||||
for data in self.props:
|
||||
# 1 byte for the prop type
|
||||
props_length += 1 + len(data)
|
||||
self._props_length = props_length
|
||||
offset += props_length
|
||||
|
||||
offset = self._calc_offsets_children(offset, is_last)
|
||||
|
||||
self._end_offset = offset
|
||||
return offset
|
||||
|
||||
def _calc_offsets_children(self, offset, is_last):
|
||||
if self.elems:
|
||||
elem_last = self.elems[-1]
|
||||
for elem in self.elems:
|
||||
offset = elem._calc_offsets(offset, (elem is elem_last))
|
||||
offset += _BLOCK_SENTINEL_LENGTH
|
||||
elif (not self.props and not is_last) or self.id in _ELEMS_ID_ALWAYS_BLOCK_SENTINEL:
|
||||
offset += _BLOCK_SENTINEL_LENGTH
|
||||
|
||||
return offset
|
||||
|
||||
def _write(self, write, tell, is_last):
|
||||
assert(self._end_offset != -1)
|
||||
assert(self._props_length != -1)
|
||||
|
||||
write(pack(_ELEM_META_FORMAT, self._end_offset, len(self.props), self._props_length))
|
||||
|
||||
write(bytes((len(self.id),)))
|
||||
write(self.id)
|
||||
|
||||
for i, data in enumerate(self.props):
|
||||
write(bytes((self.props_type[i],)))
|
||||
write(data)
|
||||
|
||||
self._write_children(write, tell, is_last)
|
||||
|
||||
if tell() != self._end_offset:
|
||||
raise IOError("scope length not reached, "
|
||||
"something is wrong (%d)" % (self._end_offset - tell()))
|
||||
|
||||
def _write_children(self, write, tell, is_last):
|
||||
if self.elems:
|
||||
elem_last = self.elems[-1]
|
||||
for elem in self.elems:
|
||||
assert(elem.id != b'')
|
||||
elem._write(write, tell, (elem is elem_last))
|
||||
write(_BLOCK_SENTINEL_DATA)
|
||||
elif (not self.props and not is_last) or self.id in _ELEMS_ID_ALWAYS_BLOCK_SENTINEL:
|
||||
write(_BLOCK_SENTINEL_DATA)
|
||||
|
||||
|
||||
def _write_timedate_hack(elem_root):
|
||||
# perform 2 changes
|
||||
# - set the FileID
|
||||
# - set the CreationTime
|
||||
|
||||
ok = 0
|
||||
for elem in elem_root.elems:
|
||||
if elem.id == b'FileId':
|
||||
assert(elem.props_type[0] == b'R'[0])
|
||||
assert(len(elem.props_type) == 1)
|
||||
elem.props.clear()
|
||||
elem.props_type.clear()
|
||||
|
||||
elem.add_bytes(_FILE_ID)
|
||||
ok += 1
|
||||
elif elem.id == b'CreationTime':
|
||||
assert(elem.props_type[0] == b'S'[0])
|
||||
assert(len(elem.props_type) == 1)
|
||||
elem.props.clear()
|
||||
elem.props_type.clear()
|
||||
|
||||
elem.add_string(_TIME_ID)
|
||||
ok += 1
|
||||
|
||||
if ok == 2:
|
||||
break
|
||||
|
||||
if ok != 2:
|
||||
print("Missing fields!")
|
||||
|
||||
|
||||
# FBX 7500 (aka FBX2016) introduces incompatible changes at binary level:
|
||||
# * The NULL block marking end of nested stuff switches from 13 bytes long to 25 bytes long.
|
||||
# * The FBX element metadata (end_offset, prop_count and prop_length) switch from uint32 to uint64.
|
||||
def init_version(fbx_version):
|
||||
global _BLOCK_SENTINEL_LENGTH, _BLOCK_SENTINEL_DATA, _ELEM_META_FORMAT, _ELEM_META_SIZE
|
||||
|
||||
_BLOCK_SENTINEL_LENGTH = ...
|
||||
_BLOCK_SENTINEL_DATA = ...
|
||||
_ELEM_META_FORMAT = ...
|
||||
_ELEM_META_SIZE = ...
|
||||
|
||||
if fbx_version < 7500:
|
||||
_ELEM_META_FORMAT = '<3I'
|
||||
_ELEM_META_SIZE = 12
|
||||
else:
|
||||
_ELEM_META_FORMAT = '<3Q'
|
||||
_ELEM_META_SIZE = 24
|
||||
_BLOCK_SENTINEL_LENGTH = _ELEM_META_SIZE + 1
|
||||
_BLOCK_SENTINEL_DATA = (b'\0' * _BLOCK_SENTINEL_LENGTH)
|
||||
|
||||
|
||||
def write(fn, elem_root, version):
|
||||
assert(elem_root.id == b'')
|
||||
|
||||
with open(fn, 'wb') as f:
|
||||
write = f.write
|
||||
tell = f.tell
|
||||
|
||||
init_version(version)
|
||||
|
||||
write(_HEAD_MAGIC)
|
||||
write(pack('<I', version))
|
||||
|
||||
# hack since we don't decode time.
|
||||
# ideally we would _not_ modify this data.
|
||||
_write_timedate_hack(elem_root)
|
||||
|
||||
elem_root._calc_offsets_children(tell(), False)
|
||||
elem_root._write_children(write, tell, False)
|
||||
|
||||
write(_FOOT_ID)
|
||||
write(b'\x00' * 4)
|
||||
|
||||
# padding for alignment (values between 1 & 16 observed)
|
||||
# if already aligned to 16, add a full 16 bytes padding.
|
||||
ofs = tell()
|
||||
pad = ((ofs + 15) & ~15) - ofs
|
||||
if pad == 0:
|
||||
pad = 16
|
||||
|
||||
write(b'\0' * pad)
|
||||
|
||||
write(pack('<I', version))
|
||||
|
||||
# unknown magic (always the same)
|
||||
write(b'\0' * 120)
|
||||
write(b'\xf8\x5a\x8c\x6a\xde\xf5\xd9\x7e\xec\xe9\x0c\xe3\x75\x8f\x29\x0b')
|
3742
scripts/addons_core/io_scene_fbx/export_fbx_bin.py
Normal file
3742
scripts/addons_core/io_scene_fbx/export_fbx_bin.py
Normal file
File diff suppressed because it is too large
Load Diff
341
scripts/addons_core/io_scene_fbx/fbx2json.py
Executable file
341
scripts/addons_core/io_scene_fbx/fbx2json.py
Executable file
@ -0,0 +1,341 @@
|
||||
#!/usr/bin/env python3
|
||||
# SPDX-FileCopyrightText: 2006-2012 assimp team
|
||||
# SPDX-FileCopyrightText: 2013 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
"""
|
||||
Usage
|
||||
=====
|
||||
|
||||
fbx2json [FILES]...
|
||||
|
||||
This script will write a JSON file for each FBX argument given.
|
||||
|
||||
|
||||
Output
|
||||
======
|
||||
|
||||
The JSON data is formatted into a list of nested lists of 4 items:
|
||||
|
||||
``[id, [data, ...], "data_types", [subtree, ...]]``
|
||||
|
||||
Where each list may be empty, and the items in
|
||||
the subtree are formatted the same way.
|
||||
|
||||
data_types is a string, aligned with data that spesifies a type
|
||||
for each property.
|
||||
|
||||
The types are as follows:
|
||||
|
||||
* 'Z': - INT8
|
||||
* 'Y': - INT16
|
||||
* 'B': - BOOL
|
||||
* 'C': - CHAR
|
||||
* 'I': - INT32
|
||||
* 'F': - FLOAT32
|
||||
* 'D': - FLOAT64
|
||||
* 'L': - INT64
|
||||
* 'R': - BYTES
|
||||
* 'S': - STRING
|
||||
* 'f': - FLOAT32_ARRAY
|
||||
* 'i': - INT32_ARRAY
|
||||
* 'd': - FLOAT64_ARRAY
|
||||
* 'l': - INT64_ARRAY
|
||||
* 'b': - BOOL ARRAY
|
||||
* 'c': - BYTE ARRAY
|
||||
|
||||
Note that key:value pairs aren't used since the id's are not
|
||||
ensured to be unique.
|
||||
"""
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# FBX Binary Parser
|
||||
|
||||
from struct import unpack
|
||||
import array
|
||||
import zlib
|
||||
|
||||
# at the end of each nested block, there is a NUL record to indicate
|
||||
# that the sub-scope exists (i.e. to distinguish between P: and P : {})
|
||||
_BLOCK_SENTINEL_LENGTH = ...
|
||||
_BLOCK_SENTINEL_DATA = ...
|
||||
read_fbx_elem_uint = ...
|
||||
_IS_BIG_ENDIAN = (__import__("sys").byteorder != 'little')
|
||||
_HEAD_MAGIC = b'Kaydara FBX Binary\x20\x20\x00\x1a\x00'
|
||||
from collections import namedtuple
|
||||
FBXElem = namedtuple("FBXElem", ("id", "props", "props_type", "elems"))
|
||||
del namedtuple
|
||||
|
||||
|
||||
def read_uint(read):
|
||||
return unpack(b'<I', read(4))[0]
|
||||
|
||||
|
||||
def read_uint64(read):
|
||||
return unpack(b'<Q', read(8))[0]
|
||||
|
||||
|
||||
def read_ubyte(read):
|
||||
return unpack(b'B', read(1))[0]
|
||||
|
||||
|
||||
def read_string_ubyte(read):
|
||||
size = read_ubyte(read)
|
||||
data = read(size)
|
||||
return data
|
||||
|
||||
|
||||
def unpack_array(read, array_type, array_stride, array_byteswap):
|
||||
length = read_uint(read)
|
||||
encoding = read_uint(read)
|
||||
comp_len = read_uint(read)
|
||||
|
||||
data = read(comp_len)
|
||||
|
||||
if encoding == 0:
|
||||
pass
|
||||
elif encoding == 1:
|
||||
data = zlib.decompress(data)
|
||||
|
||||
assert(length * array_stride == len(data))
|
||||
|
||||
data_array = array.array(array_type, data)
|
||||
if array_byteswap and _IS_BIG_ENDIAN:
|
||||
data_array.byteswap()
|
||||
return data_array
|
||||
|
||||
|
||||
read_data_dict = {
|
||||
b'Z'[0]: lambda read: unpack(b'<b', read(1))[0], # 8 bit int
|
||||
b'Y'[0]: lambda read: unpack(b'<h', read(2))[0], # 16 bit int
|
||||
b'B'[0]: lambda read: unpack(b'?', read(1))[0], # 1 bit bool (yes/no)
|
||||
b'C'[0]: lambda read: unpack(b'<c', read(1))[0], # char
|
||||
b'I'[0]: lambda read: unpack(b'<i', read(4))[0], # 32 bit int
|
||||
b'F'[0]: lambda read: unpack(b'<f', read(4))[0], # 32 bit float
|
||||
b'D'[0]: lambda read: unpack(b'<d', read(8))[0], # 64 bit float
|
||||
b'L'[0]: lambda read: unpack(b'<q', read(8))[0], # 64 bit int
|
||||
b'R'[0]: lambda read: read(read_uint(read)), # binary data
|
||||
b'S'[0]: lambda read: read(read_uint(read)), # string data
|
||||
b'f'[0]: lambda read: unpack_array(read, 'f', 4, False), # array (float)
|
||||
b'i'[0]: lambda read: unpack_array(read, 'i', 4, True), # array (int)
|
||||
b'd'[0]: lambda read: unpack_array(read, 'd', 8, False), # array (double)
|
||||
b'l'[0]: lambda read: unpack_array(read, 'q', 8, True), # array (long)
|
||||
b'b'[0]: lambda read: unpack_array(read, 'b', 1, False), # array (bool)
|
||||
b'c'[0]: lambda read: unpack_array(read, 'B', 1, False), # array (ubyte)
|
||||
}
|
||||
|
||||
|
||||
# FBX 7500 (aka FBX2016) introduces incompatible changes at binary level:
|
||||
# * The NULL block marking end of nested stuff switches from 13 bytes long to 25 bytes long.
|
||||
# * The FBX element metadata (end_offset, prop_count and prop_length) switch from uint32 to uint64.
|
||||
def init_version(fbx_version):
|
||||
global _BLOCK_SENTINEL_LENGTH, _BLOCK_SENTINEL_DATA, read_fbx_elem_uint
|
||||
|
||||
assert(_BLOCK_SENTINEL_LENGTH == ...)
|
||||
assert(_BLOCK_SENTINEL_DATA == ...)
|
||||
|
||||
if fbx_version < 7500:
|
||||
_BLOCK_SENTINEL_LENGTH = 13
|
||||
read_fbx_elem_uint = read_uint
|
||||
else:
|
||||
_BLOCK_SENTINEL_LENGTH = 25
|
||||
read_fbx_elem_uint = read_uint64
|
||||
_BLOCK_SENTINEL_DATA = (b'\0' * _BLOCK_SENTINEL_LENGTH)
|
||||
|
||||
|
||||
def read_elem(read, tell, use_namedtuple):
|
||||
# [0] the offset at which this block ends
|
||||
# [1] the number of properties in the scope
|
||||
# [2] the length of the property list
|
||||
end_offset = read_fbx_elem_uint(read)
|
||||
if end_offset == 0:
|
||||
return None
|
||||
|
||||
prop_count = read_fbx_elem_uint(read)
|
||||
prop_length = read_fbx_elem_uint(read)
|
||||
|
||||
elem_id = read_string_ubyte(read) # elem name of the scope/key
|
||||
elem_props_type = bytearray(prop_count) # elem property types
|
||||
elem_props_data = [None] * prop_count # elem properties (if any)
|
||||
elem_subtree = [] # elem children (if any)
|
||||
|
||||
for i in range(prop_count):
|
||||
data_type = read(1)[0]
|
||||
elem_props_data[i] = read_data_dict[data_type](read)
|
||||
elem_props_type[i] = data_type
|
||||
|
||||
if tell() < end_offset:
|
||||
while tell() < (end_offset - _BLOCK_SENTINEL_LENGTH):
|
||||
elem_subtree.append(read_elem(read, tell, use_namedtuple))
|
||||
|
||||
if read(_BLOCK_SENTINEL_LENGTH) != _BLOCK_SENTINEL_DATA:
|
||||
raise IOError("failed to read nested block sentinel, "
|
||||
"expected all bytes to be 0")
|
||||
|
||||
if tell() != end_offset:
|
||||
raise IOError("scope length not reached, something is wrong")
|
||||
|
||||
args = (elem_id, elem_props_data, elem_props_type, elem_subtree)
|
||||
return FBXElem(*args) if use_namedtuple else args
|
||||
|
||||
|
||||
def parse_version(fn):
|
||||
"""
|
||||
Return the FBX version,
|
||||
if the file isn't a binary FBX return zero.
|
||||
"""
|
||||
with open(fn, 'rb') as f:
|
||||
read = f.read
|
||||
|
||||
if read(len(_HEAD_MAGIC)) != _HEAD_MAGIC:
|
||||
return 0
|
||||
|
||||
return read_uint(read)
|
||||
|
||||
|
||||
def parse(fn, use_namedtuple=True):
|
||||
root_elems = []
|
||||
|
||||
with open(fn, 'rb') as f:
|
||||
read = f.read
|
||||
tell = f.tell
|
||||
|
||||
if read(len(_HEAD_MAGIC)) != _HEAD_MAGIC:
|
||||
raise IOError("Invalid header")
|
||||
|
||||
fbx_version = read_uint(read)
|
||||
init_version(fbx_version)
|
||||
|
||||
while True:
|
||||
elem = read_elem(read, tell, use_namedtuple)
|
||||
if elem is None:
|
||||
break
|
||||
root_elems.append(elem)
|
||||
|
||||
args = (b'', [], bytearray(0), root_elems)
|
||||
return FBXElem(*args) if use_namedtuple else args, fbx_version
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Inline Modules
|
||||
|
||||
# pyfbx.data_types
|
||||
data_types = type(array)("data_types")
|
||||
data_types.__dict__.update(
|
||||
dict(
|
||||
INT8=b'Z'[0],
|
||||
INT16=b'Y'[0],
|
||||
BOOL=b'B'[0],
|
||||
CHAR=b'C'[0],
|
||||
INT32=b'I'[0],
|
||||
FLOAT32=b'F'[0],
|
||||
FLOAT64=b'D'[0],
|
||||
INT64=b'L'[0],
|
||||
BYTES=b'R'[0],
|
||||
STRING=b'S'[0],
|
||||
FLOAT32_ARRAY=b'f'[0],
|
||||
INT32_ARRAY=b'i'[0],
|
||||
FLOAT64_ARRAY=b'd'[0],
|
||||
INT64_ARRAY=b'l'[0],
|
||||
BOOL_ARRAY=b'b'[0],
|
||||
BYTE_ARRAY=b'c'[0],
|
||||
))
|
||||
|
||||
# pyfbx.parse_bin
|
||||
parse_bin = type(array)("parse_bin")
|
||||
parse_bin.__dict__.update(
|
||||
dict(
|
||||
parse=parse
|
||||
))
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# JSON Converter
|
||||
# from pyfbx import parse_bin, data_types
|
||||
import json
|
||||
import array
|
||||
|
||||
|
||||
def fbx2json_property_as_string(prop, prop_type):
|
||||
if prop_type == data_types.STRING:
|
||||
prop_str = prop.decode('utf-8')
|
||||
prop_str = prop_str.replace('\x00\x01', '::')
|
||||
return json.dumps(prop_str)
|
||||
else:
|
||||
prop_py_type = type(prop)
|
||||
if prop_py_type == bytes:
|
||||
return json.dumps(repr(prop)[2:-1])
|
||||
elif prop_py_type == bool:
|
||||
return json.dumps(prop)
|
||||
elif prop_py_type == array.array:
|
||||
return repr(list(prop))
|
||||
|
||||
return repr(prop)
|
||||
|
||||
|
||||
def fbx2json_properties_as_string(fbx_elem):
|
||||
return ", ".join(fbx2json_property_as_string(*prop_item)
|
||||
for prop_item in zip(fbx_elem.props,
|
||||
fbx_elem.props_type))
|
||||
|
||||
|
||||
def fbx2json_recurse(fw, fbx_elem, ident, is_last):
|
||||
fbx_elem_id = fbx_elem.id.decode('utf-8')
|
||||
fw('%s["%s", ' % (ident, fbx_elem_id))
|
||||
fw('[%s], ' % fbx2json_properties_as_string(fbx_elem))
|
||||
fw('"%s", ' % (fbx_elem.props_type.decode('ascii')))
|
||||
|
||||
fw('[')
|
||||
if fbx_elem.elems:
|
||||
fw('\n')
|
||||
ident_sub = ident + " "
|
||||
for fbx_elem_sub in fbx_elem.elems:
|
||||
fbx2json_recurse(fw, fbx_elem_sub, ident_sub,
|
||||
fbx_elem_sub is fbx_elem.elems[-1])
|
||||
fw(']')
|
||||
|
||||
fw(']%s' % ('' if is_last else ',\n'))
|
||||
|
||||
|
||||
def fbx2json(fn):
|
||||
import os
|
||||
|
||||
fn_json = "%s.json" % os.path.splitext(fn)[0]
|
||||
print("Writing: %r " % fn_json, end="")
|
||||
fbx_root_elem, fbx_version = parse(fn, use_namedtuple=True)
|
||||
print("(Version %d) ..." % fbx_version)
|
||||
|
||||
with open(fn_json, 'w', encoding="ascii", errors='xmlcharrefreplace') as f:
|
||||
fw = f.write
|
||||
fw('[\n')
|
||||
ident_sub = " "
|
||||
for fbx_elem_sub in fbx_root_elem.elems:
|
||||
fbx2json_recurse(f.write, fbx_elem_sub, ident_sub,
|
||||
fbx_elem_sub is fbx_root_elem.elems[-1])
|
||||
fw(']\n')
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Command Line
|
||||
|
||||
def main():
|
||||
import sys
|
||||
|
||||
if "--help" in sys.argv:
|
||||
print(__doc__)
|
||||
return
|
||||
|
||||
for arg in sys.argv[1:]:
|
||||
try:
|
||||
fbx2json(arg)
|
||||
except:
|
||||
print("Failed to convert %r, error:" % arg)
|
||||
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
1931
scripts/addons_core/io_scene_fbx/fbx_utils.py
Normal file
1931
scripts/addons_core/io_scene_fbx/fbx_utils.py
Normal file
File diff suppressed because it is too large
Load Diff
194
scripts/addons_core/io_scene_fbx/fbx_utils_threading.py
Normal file
194
scripts/addons_core/io_scene_fbx/fbx_utils_threading.py
Normal file
@ -0,0 +1,194 @@
|
||||
# SPDX-FileCopyrightText: 2023 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
from contextlib import contextmanager, nullcontext
|
||||
import os
|
||||
from queue import SimpleQueue
|
||||
|
||||
# Note: `bpy` cannot be imported here because this module is also used by the fbx2json.py and json2fbx.py scripts.
|
||||
|
||||
# For debugging/profiling purposes, can be modified at runtime to force single-threaded execution.
|
||||
_MULTITHREADING_ENABLED = True
|
||||
# The concurrent.futures module may not work or may not be available on WebAssembly platforms wasm32-emscripten and
|
||||
# wasm32-wasi.
|
||||
try:
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
except ModuleNotFoundError:
|
||||
_MULTITHREADING_ENABLED = False
|
||||
ThreadPoolExecutor = None
|
||||
else:
|
||||
try:
|
||||
# The module may be available, but not be fully functional. An error may be raised when attempting to start a
|
||||
# new thread.
|
||||
with ThreadPoolExecutor() as tpe:
|
||||
# Attempt to start a thread by submitting a callable.
|
||||
tpe.submit(lambda: None)
|
||||
except Exception:
|
||||
# Assume that multithreading is not supported and fall back to single-threaded execution.
|
||||
_MULTITHREADING_ENABLED = False
|
||||
|
||||
|
||||
def get_cpu_count():
|
||||
"""Get the number of cpus assigned to the current process if that information is available on this system.
|
||||
If not available, get the total number of cpus.
|
||||
If the cpu count is indeterminable, it is assumed that there is only 1 cpu available."""
|
||||
sched_getaffinity = getattr(os, "sched_getaffinity", None)
|
||||
if sched_getaffinity is not None:
|
||||
# Return the number of cpus assigned to the current process.
|
||||
return len(sched_getaffinity(0))
|
||||
count = os.cpu_count()
|
||||
return count if count is not None else 1
|
||||
|
||||
|
||||
class MultiThreadedTaskConsumer:
|
||||
"""Helper class that encapsulates everything needed to run a function on separate threads, with a single-threaded
|
||||
fallback if multithreading is not available.
|
||||
|
||||
Lower overhead than typical use of ThreadPoolExecutor because no Future objects are returned, which makes this class
|
||||
more suitable to running many smaller tasks.
|
||||
|
||||
As with any threaded parallelization, because of Python's Global Interpreter Lock, only one thread can execute
|
||||
Python code at a time, so threaded parallelization is only useful when the functions used release the GIL, such as
|
||||
many IO related functions."""
|
||||
# A special task value used to signal task consumer threads to shut down.
|
||||
_SHUT_DOWN_THREADS = object()
|
||||
|
||||
__slots__ = ("_consumer_function", "_shared_task_queue", "_task_consumer_futures", "_executor",
|
||||
"_max_consumer_threads", "_shutting_down", "_max_queue_per_consumer")
|
||||
|
||||
def __init__(self, consumer_function, max_consumer_threads, max_queue_per_consumer=5):
|
||||
# It's recommended to use MultiThreadedTaskConsumer.new_cpu_bound_cm() instead of creating new instances
|
||||
# directly.
|
||||
# __init__ should only be called after checking _MULTITHREADING_ENABLED.
|
||||
assert(_MULTITHREADING_ENABLED)
|
||||
# The function that will be called on separate threads to consume tasks.
|
||||
self._consumer_function = consumer_function
|
||||
# All the threads share a single queue. This is a simplistic approach, but it is unlikely to be problematic
|
||||
# unless the main thread is expected to wait a long time for the consumer threads to finish.
|
||||
self._shared_task_queue = SimpleQueue()
|
||||
# Reference to each thread is kept through the returned Future objects. This is used as part of determining when
|
||||
# new threads should be started and is used to be able to receive and handle exceptions from the threads.
|
||||
self._task_consumer_futures = []
|
||||
# Create the executor.
|
||||
self._executor = ThreadPoolExecutor(max_workers=max_consumer_threads)
|
||||
# Technically the max workers of the executor is accessible through its `._max_workers`, but since it's private,
|
||||
# meaning it could be changed without warning, we'll store the max workers/consumers ourselves.
|
||||
self._max_consumer_threads = max_consumer_threads
|
||||
# The maximum task queue size (before another consumer thread is started) increases by this amount with every
|
||||
# additional consumer thread.
|
||||
self._max_queue_per_consumer = max_queue_per_consumer
|
||||
# When shutting down the threads, this is set to True as an extra safeguard to prevent new tasks being
|
||||
# scheduled.
|
||||
self._shutting_down = False
|
||||
|
||||
@classmethod
|
||||
def new_cpu_bound_cm(cls, consumer_function, other_cpu_bound_threads_in_use=1, hard_max_threads=32):
|
||||
"""Return a context manager that, when entered, returns a wrapper around `consumer_function` that schedules
|
||||
`consumer_function` to be run on a separate thread.
|
||||
|
||||
If the system can't use multithreading, then the context manager's returned function will instead be the input
|
||||
`consumer_function` argument, causing tasks to be run immediately on the calling thread.
|
||||
|
||||
When exiting the context manager, it waits for all scheduled tasks to complete and prevents the creation of new
|
||||
tasks, similar to calling ThreadPoolExecutor.shutdown(). For these reasons, the wrapped function should only be
|
||||
called from the thread that entered the context manager, otherwise there is no guarantee that all tasks will get
|
||||
scheduled before the context manager exits.
|
||||
|
||||
Any task that fails with an exception will cause all task consumer threads to stop.
|
||||
|
||||
The maximum number of threads used matches the number of cpus available up to a maximum of `hard_max_threads`.
|
||||
`hard_max_threads`'s default of 32 matches ThreadPoolExecutor's default behaviour.
|
||||
|
||||
The maximum number of threads used is decreased by `other_cpu_bound_threads_in_use`. Defaulting to `1`, assuming
|
||||
that the calling thread will also be doing CPU-bound work.
|
||||
|
||||
Most IO-bound tasks can probably use a ThreadPoolExecutor directly instead because there will typically be fewer
|
||||
tasks and, on average, each individual task will take longer.
|
||||
If needed, `cls.new_cpu_bound_cm(consumer_function, -4)` could be suitable for lots of small IO-bound tasks,
|
||||
because it ensures a minimum of 5 threads, like the default ThreadPoolExecutor."""
|
||||
if _MULTITHREADING_ENABLED:
|
||||
max_threads = get_cpu_count() - other_cpu_bound_threads_in_use
|
||||
max_threads = min(max_threads, hard_max_threads)
|
||||
if max_threads > 0:
|
||||
return cls(consumer_function, max_threads)._wrap_executor_cm()
|
||||
# Fall back to single-threaded.
|
||||
return nullcontext(consumer_function)
|
||||
|
||||
def _task_consumer_callable(self):
|
||||
"""Callable that is run by each task consumer thread.
|
||||
Signals the other task consumer threads to stop when stopped intentionally or when an exception occurs."""
|
||||
try:
|
||||
while True:
|
||||
# Blocks until it can get a task.
|
||||
task_args = self._shared_task_queue.get()
|
||||
|
||||
if task_args is self._SHUT_DOWN_THREADS:
|
||||
# This special value signals that it's time for all the threads to stop.
|
||||
break
|
||||
else:
|
||||
# Call the task consumer function.
|
||||
self._consumer_function(*task_args)
|
||||
finally:
|
||||
# Either the thread has been told to shut down because it received _SHUT_DOWN_THREADS or an exception has
|
||||
# occurred.
|
||||
# Add _SHUT_DOWN_THREADS to the queue so that the other consumer threads will also shut down.
|
||||
self._shared_task_queue.put(self._SHUT_DOWN_THREADS)
|
||||
|
||||
def _schedule_task(self, *args):
|
||||
"""Task consumer threads are only started as tasks are added.
|
||||
|
||||
To mitigate starting lots of threads if many tasks are scheduled in quick succession, new threads are only
|
||||
started if the number of queued tasks grows too large.
|
||||
|
||||
This function is a slight misuse of ThreadPoolExecutor. Normally each task to be scheduled would be submitted
|
||||
through ThreadPoolExecutor.submit, but doing so is noticeably slower for small tasks. We could start new Thread
|
||||
instances manually without using ThreadPoolExecutor, but ThreadPoolExecutor gives us a higher level API for
|
||||
waiting for threads to finish and handling exceptions without having to implement an API using Thread ourselves.
|
||||
"""
|
||||
if self._shutting_down:
|
||||
# Shouldn't occur through normal usage.
|
||||
raise RuntimeError("Cannot schedule new tasks after shutdown")
|
||||
# Schedule the task by adding it to the task queue.
|
||||
self._shared_task_queue.put(args)
|
||||
# Check if more consumer threads need to be added to account for the rate at which tasks are being scheduled
|
||||
# compared to the rate at which tasks are being consumed.
|
||||
current_consumer_count = len(self._task_consumer_futures)
|
||||
if current_consumer_count < self._max_consumer_threads:
|
||||
# The max queue size increases as new threads are added, otherwise, by the time the next task is added, it's
|
||||
# likely that the queue size will still be over the max, causing another new thread to be added immediately.
|
||||
# Increasing the max queue size whenever a new thread is started gives some time for the new thread to start
|
||||
# up and begin consuming tasks before it's determined that another thread is needed.
|
||||
max_queue_size_for_current_consumers = self._max_queue_per_consumer * current_consumer_count
|
||||
|
||||
if self._shared_task_queue.qsize() > max_queue_size_for_current_consumers:
|
||||
# Add a new consumer thread because the queue has grown too large.
|
||||
self._task_consumer_futures.append(self._executor.submit(self._task_consumer_callable))
|
||||
|
||||
@contextmanager
|
||||
def _wrap_executor_cm(self):
|
||||
"""Wrap the executor's context manager to instead return self._schedule_task and such that the threads
|
||||
automatically start shutting down before the executor itself starts shutting down."""
|
||||
# .__enter__()
|
||||
# Exiting the context manager of the executor will wait for all threads to finish and prevent new
|
||||
# threads from being created, as if its shutdown() method had been called.
|
||||
with self._executor:
|
||||
try:
|
||||
yield self._schedule_task
|
||||
finally:
|
||||
# .__exit__()
|
||||
self._shutting_down = True
|
||||
# Signal all consumer threads to finish up and shut down so that the executor can shut down.
|
||||
# When this is run on the same thread that schedules new tasks, this guarantees that no more tasks will
|
||||
# be scheduled after the consumer threads start to shut down.
|
||||
self._shared_task_queue.put(self._SHUT_DOWN_THREADS)
|
||||
|
||||
# Because `self._executor` was entered with a context manager, it will wait for all the consumer threads
|
||||
# to finish even if we propagate an exception from one of the threads here.
|
||||
for future in self._task_consumer_futures:
|
||||
# .exception() waits for the future to finish and returns its raised exception or None.
|
||||
ex = future.exception()
|
||||
if ex is not None:
|
||||
# If one of the threads raised an exception, propagate it to the main thread.
|
||||
# Only the first exception will be propagated if there were multiple.
|
||||
raise ex
|
4024
scripts/addons_core/io_scene_fbx/import_fbx.py
Normal file
4024
scripts/addons_core/io_scene_fbx/import_fbx.py
Normal file
File diff suppressed because it is too large
Load Diff
165
scripts/addons_core/io_scene_fbx/json2fbx.py
Executable file
165
scripts/addons_core/io_scene_fbx/json2fbx.py
Executable file
@ -0,0 +1,165 @@
|
||||
#!/usr/bin/env python3
|
||||
# SPDX-FileCopyrightText: 2014-2023 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
"""
|
||||
Usage
|
||||
=====
|
||||
|
||||
json2fbx [FILES]...
|
||||
|
||||
This script will write a binary FBX file for each JSON argument given.
|
||||
|
||||
|
||||
Input
|
||||
======
|
||||
|
||||
The JSON data is formatted into a list of nested lists of 4 items:
|
||||
|
||||
``[id, [data, ...], "data_types", [subtree, ...]]``
|
||||
|
||||
Where each list may be empty, and the items in
|
||||
the subtree are formatted the same way.
|
||||
|
||||
data_types is a string, aligned with data that spesifies a type
|
||||
for each property.
|
||||
|
||||
The types are as follows:
|
||||
|
||||
* 'Z': - INT8
|
||||
* 'Y': - INT16
|
||||
* 'B': - BOOL
|
||||
* 'C': - CHAR
|
||||
* 'I': - INT32
|
||||
* 'F': - FLOAT32
|
||||
* 'D': - FLOAT64
|
||||
* 'L': - INT64
|
||||
* 'R': - BYTES
|
||||
* 'S': - STRING
|
||||
* 'f': - FLOAT32_ARRAY
|
||||
* 'i': - INT32_ARRAY
|
||||
* 'd': - FLOAT64_ARRAY
|
||||
* 'l': - INT64_ARRAY
|
||||
* 'b': - BOOL ARRAY
|
||||
* 'c': - BYTE ARRAY
|
||||
|
||||
Note that key:value pairs aren't used since the id's are not
|
||||
ensured to be unique.
|
||||
"""
|
||||
|
||||
|
||||
def elem_empty(elem, name):
|
||||
import encode_bin
|
||||
sub_elem = encode_bin.FBXElem(name)
|
||||
if elem is not None:
|
||||
elem.elems.append(sub_elem)
|
||||
return sub_elem
|
||||
|
||||
|
||||
def parse_json_rec(fbx_root, json_node):
|
||||
name, data, data_types, children = json_node
|
||||
ver = 0
|
||||
|
||||
assert(len(data_types) == len(data))
|
||||
|
||||
e = elem_empty(fbx_root, name.encode())
|
||||
for d, dt in zip(data, data_types):
|
||||
if dt == "B":
|
||||
e.add_bool(d)
|
||||
elif dt == "C":
|
||||
d = eval('b"""' + d + '"""')
|
||||
e.add_char(d)
|
||||
elif dt == "Z":
|
||||
e.add_int8(d)
|
||||
elif dt == "Y":
|
||||
e.add_int16(d)
|
||||
elif dt == "I":
|
||||
e.add_int32(d)
|
||||
elif dt == "L":
|
||||
e.add_int64(d)
|
||||
elif dt == "F":
|
||||
e.add_float32(d)
|
||||
elif dt == "D":
|
||||
e.add_float64(d)
|
||||
elif dt == "R":
|
||||
d = eval('b"""' + d + '"""')
|
||||
e.add_bytes(d)
|
||||
elif dt == "S":
|
||||
d = d.encode().replace(b"::", b"\x00\x01")
|
||||
e.add_string(d)
|
||||
elif dt == "i":
|
||||
e.add_int32_array(d)
|
||||
elif dt == "l":
|
||||
e.add_int64_array(d)
|
||||
elif dt == "f":
|
||||
e.add_float32_array(d)
|
||||
elif dt == "d":
|
||||
e.add_float64_array(d)
|
||||
elif dt == "b":
|
||||
e.add_bool_array(d)
|
||||
elif dt == "c":
|
||||
e.add_byte_array(d)
|
||||
|
||||
if name == "FBXVersion":
|
||||
assert(data_types == "I")
|
||||
ver = int(data[0])
|
||||
|
||||
for child in children:
|
||||
_ver = parse_json_rec(e, child)
|
||||
if _ver:
|
||||
ver = _ver
|
||||
|
||||
return ver
|
||||
|
||||
|
||||
def parse_json(json_root):
|
||||
root = elem_empty(None, b"")
|
||||
ver = 0
|
||||
|
||||
for n in json_root:
|
||||
_ver = parse_json_rec(root, n)
|
||||
if _ver:
|
||||
ver = _ver
|
||||
|
||||
return root, ver
|
||||
|
||||
|
||||
def json2fbx(fn):
|
||||
import os
|
||||
import json
|
||||
|
||||
import encode_bin
|
||||
|
||||
fn_fbx = "%s.fbx" % os.path.splitext(fn)[0]
|
||||
print("Writing: %r " % fn_fbx, end="")
|
||||
with open(fn) as f_json:
|
||||
json_root = json.load(f_json)
|
||||
with encode_bin.FBXElem.enable_multithreading_cm():
|
||||
fbx_root, fbx_version = parse_json(json_root)
|
||||
print("(Version %d) ..." % fbx_version)
|
||||
encode_bin.write(fn_fbx, fbx_root, fbx_version)
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Command Line
|
||||
|
||||
def main():
|
||||
import sys
|
||||
|
||||
if "--help" in sys.argv:
|
||||
print(__doc__)
|
||||
return
|
||||
|
||||
for arg in sys.argv[1:]:
|
||||
try:
|
||||
json2fbx(arg)
|
||||
except:
|
||||
print("Failed to convert %r, error:" % arg)
|
||||
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
274
scripts/addons_core/io_scene_fbx/parse_fbx.py
Normal file
274
scripts/addons_core/io_scene_fbx/parse_fbx.py
Normal file
@ -0,0 +1,274 @@
|
||||
# SPDX-FileCopyrightText: 2006-2012 assimp team
|
||||
# SPDX-FileCopyrightText: 2013 Blender Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
__all__ = (
|
||||
"parse",
|
||||
"data_types",
|
||||
"parse_version",
|
||||
"FBXElem",
|
||||
)
|
||||
|
||||
from struct import unpack
|
||||
import array
|
||||
import zlib
|
||||
from io import BytesIO
|
||||
|
||||
from . import data_types
|
||||
from .fbx_utils_threading import MultiThreadedTaskConsumer
|
||||
|
||||
# at the end of each nested block, there is a NUL record to indicate
|
||||
# that the sub-scope exists (i.e. to distinguish between P: and P : {})
|
||||
_BLOCK_SENTINEL_LENGTH = ...
|
||||
_BLOCK_SENTINEL_DATA = ...
|
||||
read_fbx_elem_start = ...
|
||||
_IS_BIG_ENDIAN = (__import__("sys").byteorder != 'little')
|
||||
_HEAD_MAGIC = b'Kaydara FBX Binary\x20\x20\x00\x1a\x00'
|
||||
from collections import namedtuple
|
||||
FBXElem = namedtuple("FBXElem", ("id", "props", "props_type", "elems"))
|
||||
del namedtuple
|
||||
|
||||
|
||||
def read_uint(read):
|
||||
return unpack(b'<I', read(4))[0]
|
||||
|
||||
|
||||
def read_ubyte(read):
|
||||
return unpack(b'B', read(1))[0]
|
||||
|
||||
|
||||
def read_string_ubyte(read):
|
||||
size = read_ubyte(read)
|
||||
data = read(size)
|
||||
return data
|
||||
|
||||
|
||||
def read_array_params(read):
|
||||
return unpack(b'<III', read(12))
|
||||
|
||||
|
||||
def read_elem_start32(read):
|
||||
end_offset, prop_count, _prop_length, elem_id_size = unpack(b'<IIIB', read(13))
|
||||
elem_id = read(elem_id_size) if elem_id_size else b""
|
||||
return end_offset, prop_count, elem_id
|
||||
|
||||
|
||||
def read_elem_start64(read):
|
||||
end_offset, prop_count, _prop_length, elem_id_size = unpack(b'<QQQB', read(25))
|
||||
elem_id = read(elem_id_size) if elem_id_size else b""
|
||||
return end_offset, prop_count, elem_id
|
||||
|
||||
|
||||
def _create_array(data, length, array_type, array_stride, array_byteswap):
|
||||
"""Create an array from FBX data."""
|
||||
# If size of the data does not match the expected size of the array, then something is wrong with the code or the
|
||||
# FBX file.
|
||||
assert(length * array_stride == len(data))
|
||||
|
||||
data_array = array.array(array_type, data)
|
||||
if array_byteswap and _IS_BIG_ENDIAN:
|
||||
data_array.byteswap()
|
||||
return data_array
|
||||
|
||||
|
||||
def _decompress_and_insert_array(elem_props_data, index_to_set, compressed_array_args):
|
||||
"""Decompress array data and insert the created array into the FBX tree being parsed.
|
||||
|
||||
This is usually called from a separate thread to the main thread."""
|
||||
compressed_data, length, array_type, array_stride, array_byteswap = compressed_array_args
|
||||
|
||||
# zlib.decompress releases the Global Interpreter Lock, so another thread can run code while waiting for the
|
||||
# decompression to complete.
|
||||
data = zlib.decompress(compressed_data, bufsize=length * array_stride)
|
||||
|
||||
# Create and insert the array into the parsed FBX hierarchy.
|
||||
elem_props_data[index_to_set] = _create_array(data, length, array_type, array_stride, array_byteswap)
|
||||
|
||||
|
||||
def unpack_array(read, array_type, array_stride, array_byteswap):
|
||||
"""Unpack an array from an FBX file being parsed.
|
||||
|
||||
If the array data is compressed, the compressed data is combined with the other arguments into a tuple to prepare
|
||||
for decompressing on a separate thread if possible.
|
||||
|
||||
If the array data is not compressed, the array is created.
|
||||
|
||||
Returns (tuple, True) or (array, False)."""
|
||||
length, encoding, comp_len = read_array_params(read)
|
||||
|
||||
data = read(comp_len)
|
||||
|
||||
if encoding == 1:
|
||||
# Array data requires decompression, which is done in a separate thread if possible.
|
||||
return (data, length, array_type, array_stride, array_byteswap), True
|
||||
else:
|
||||
return _create_array(data, length, array_type, array_stride, array_byteswap), False
|
||||
|
||||
|
||||
read_array_dict = {
|
||||
b'b'[0]: lambda read: unpack_array(read, data_types.ARRAY_BOOL, 1, False), # bool
|
||||
b'c'[0]: lambda read: unpack_array(read, data_types.ARRAY_BYTE, 1, False), # ubyte
|
||||
b'i'[0]: lambda read: unpack_array(read, data_types.ARRAY_INT32, 4, True), # int
|
||||
b'l'[0]: lambda read: unpack_array(read, data_types.ARRAY_INT64, 8, True), # long
|
||||
b'f'[0]: lambda read: unpack_array(read, data_types.ARRAY_FLOAT32, 4, False), # float
|
||||
b'd'[0]: lambda read: unpack_array(read, data_types.ARRAY_FLOAT64, 8, False), # double
|
||||
}
|
||||
|
||||
read_data_dict = {
|
||||
b'Z'[0]: lambda read: unpack(b'<b', read(1))[0], # byte
|
||||
b'Y'[0]: lambda read: unpack(b'<h', read(2))[0], # 16 bit int
|
||||
b'B'[0]: lambda read: unpack(b'?', read(1))[0], # 1 bit bool (yes/no)
|
||||
b'C'[0]: lambda read: unpack(b'<c', read(1))[0], # char
|
||||
b'I'[0]: lambda read: unpack(b'<i', read(4))[0], # 32 bit int
|
||||
b'F'[0]: lambda read: unpack(b'<f', read(4))[0], # 32 bit float
|
||||
b'D'[0]: lambda read: unpack(b'<d', read(8))[0], # 64 bit float
|
||||
b'L'[0]: lambda read: unpack(b'<q', read(8))[0], # 64 bit int
|
||||
b'R'[0]: lambda read: read(read_uint(read)), # binary data
|
||||
b'S'[0]: lambda read: read(read_uint(read)), # string data
|
||||
}
|
||||
|
||||
|
||||
# FBX 7500 (aka FBX2016) introduces incompatible changes at binary level:
|
||||
# * The NULL block marking end of nested stuff switches from 13 bytes long to 25 bytes long.
|
||||
# * The FBX element metadata (end_offset, prop_count and prop_length) switch from uint32 to uint64.
|
||||
def init_version(fbx_version):
|
||||
global _BLOCK_SENTINEL_LENGTH, _BLOCK_SENTINEL_DATA, read_fbx_elem_start
|
||||
|
||||
_BLOCK_SENTINEL_LENGTH = ...
|
||||
_BLOCK_SENTINEL_DATA = ...
|
||||
|
||||
if fbx_version < 7500:
|
||||
_BLOCK_SENTINEL_LENGTH = 13
|
||||
read_fbx_elem_start = read_elem_start32
|
||||
else:
|
||||
_BLOCK_SENTINEL_LENGTH = 25
|
||||
read_fbx_elem_start = read_elem_start64
|
||||
_BLOCK_SENTINEL_DATA = (b'\0' * _BLOCK_SENTINEL_LENGTH)
|
||||
|
||||
|
||||
def read_elem(read, tell, use_namedtuple, decompress_array_func, tell_file_offset=0):
|
||||
# [0] the offset at which this block ends
|
||||
# [1] the number of properties in the scope
|
||||
# [2] the length of the property list
|
||||
# [3] elem name length
|
||||
# [4] elem name of the scope/key
|
||||
# read_fbx_elem_start does not return [2] because we don't use it and does not return [3] because it is only used to
|
||||
# get [4].
|
||||
end_offset, prop_count, elem_id = read_fbx_elem_start(read)
|
||||
if end_offset == 0:
|
||||
return None
|
||||
|
||||
elem_props_type = bytearray(prop_count) # elem property types
|
||||
elem_props_data = [None] * prop_count # elem properties (if any)
|
||||
elem_subtree = [] # elem children (if any)
|
||||
|
||||
for i in range(prop_count):
|
||||
data_type = read(1)[0]
|
||||
if data_type in read_array_dict:
|
||||
val, needs_decompression = read_array_dict[data_type](read)
|
||||
if needs_decompression:
|
||||
# Array decompression releases the GIL, so can be multithreaded (if possible on the current system) for
|
||||
# performance.
|
||||
# After decompressing, the array is inserted into elem_props_data[i].
|
||||
decompress_array_func(elem_props_data, i, val)
|
||||
else:
|
||||
elem_props_data[i] = val
|
||||
else:
|
||||
elem_props_data[i] = read_data_dict[data_type](read)
|
||||
elem_props_type[i] = data_type
|
||||
|
||||
pos = tell()
|
||||
local_end_offset = end_offset - tell_file_offset
|
||||
|
||||
if pos < local_end_offset:
|
||||
# The default BufferedReader used when `open()`-ing files in 'rb' mode has to get the raw stream position from
|
||||
# the OS every time its tell() function is called. This is about 10 times slower than the tell() function of
|
||||
# BytesIO objects, so reading chunks of bytes from the file into memory at once and exposing them through
|
||||
# BytesIO can give better performance. We know the total size of each element's subtree so can read entire
|
||||
# subtrees into memory at a time.
|
||||
# The "Objects" element's subtree, however, usually makes up most of the file, so we specifically avoid reading
|
||||
# all its sub-elements into memory at once to reduce memory requirements at the cost of slightly worse
|
||||
# performance when memory is not a concern.
|
||||
# If we're currently reading directly from the opened file, then tell_file_offset will be zero.
|
||||
if tell_file_offset == 0 and elem_id != b"Objects":
|
||||
block_bytes_remaining = local_end_offset - pos
|
||||
|
||||
# Read the entire subtree
|
||||
sub_elem_bytes = read(block_bytes_remaining)
|
||||
num_bytes_read = len(sub_elem_bytes)
|
||||
if num_bytes_read != block_bytes_remaining:
|
||||
raise IOError("failed to read complete nested block, expected %i bytes, but only got %i"
|
||||
% (block_bytes_remaining, num_bytes_read))
|
||||
|
||||
# BytesIO provides IO API for reading bytes in memory, so we can use the same code as reading bytes directly
|
||||
# from a file.
|
||||
f = BytesIO(sub_elem_bytes)
|
||||
tell = f.tell
|
||||
read = f.read
|
||||
# The new `tell` function starts at zero and is offset by `pos` bytes from the start of the file.
|
||||
start_sub_pos = 0
|
||||
tell_file_offset = pos
|
||||
sub_tree_end = block_bytes_remaining - _BLOCK_SENTINEL_LENGTH
|
||||
else:
|
||||
# The `tell` function is unchanged, so starts at the value returned by `tell()`, which is still `pos`
|
||||
# because no reads have been made since then.
|
||||
start_sub_pos = pos
|
||||
sub_tree_end = local_end_offset - _BLOCK_SENTINEL_LENGTH
|
||||
|
||||
sub_pos = start_sub_pos
|
||||
while sub_pos < sub_tree_end:
|
||||
elem_subtree.append(read_elem(read, tell, use_namedtuple, decompress_array_func, tell_file_offset))
|
||||
sub_pos = tell()
|
||||
|
||||
# At the end of each subtree there should be a sentinel (an empty element with all bytes set to zero).
|
||||
if read(_BLOCK_SENTINEL_LENGTH) != _BLOCK_SENTINEL_DATA:
|
||||
raise IOError("failed to read nested block sentinel, "
|
||||
"expected all bytes to be 0")
|
||||
|
||||
# Update `pos` for the number of bytes that have been read.
|
||||
pos += (sub_pos - start_sub_pos) + _BLOCK_SENTINEL_LENGTH
|
||||
|
||||
if pos != local_end_offset:
|
||||
raise IOError("scope length not reached, something is wrong")
|
||||
|
||||
args = (elem_id, elem_props_data, elem_props_type, elem_subtree)
|
||||
return FBXElem(*args) if use_namedtuple else args
|
||||
|
||||
|
||||
def parse_version(fn):
|
||||
"""
|
||||
Return the FBX version,
|
||||
if the file isn't a binary FBX return zero.
|
||||
"""
|
||||
with open(fn, 'rb') as f:
|
||||
read = f.read
|
||||
|
||||
if read(len(_HEAD_MAGIC)) != _HEAD_MAGIC:
|
||||
return 0
|
||||
|
||||
return read_uint(read)
|
||||
|
||||
|
||||
def parse(fn, use_namedtuple=True):
|
||||
root_elems = []
|
||||
|
||||
multithread_decompress_array_cm = MultiThreadedTaskConsumer.new_cpu_bound_cm(_decompress_and_insert_array)
|
||||
with open(fn, 'rb') as f, multithread_decompress_array_cm as decompress_array_func:
|
||||
read = f.read
|
||||
tell = f.tell
|
||||
|
||||
if read(len(_HEAD_MAGIC)) != _HEAD_MAGIC:
|
||||
raise IOError("Invalid header")
|
||||
|
||||
fbx_version = read_uint(read)
|
||||
init_version(fbx_version)
|
||||
|
||||
while True:
|
||||
elem = read_elem(read, tell, use_namedtuple, decompress_array_func)
|
||||
if elem is None:
|
||||
break
|
||||
root_elems.append(elem)
|
||||
|
||||
args = (b'', [], bytearray(0), root_elems)
|
||||
return FBXElem(*args) if use_namedtuple else args, fbx_version
|
1972
scripts/addons_core/io_scene_gltf2/__init__.py
Executable file
1972
scripts/addons_core/io_scene_gltf2/__init__.py
Executable file
File diff suppressed because it is too large
Load Diff
3
scripts/addons_core/io_scene_gltf2/blender/__init__.py
Normal file
3
scripts/addons_core/io_scene_gltf2/blender/__init__.py
Normal file
@ -0,0 +1,3 @@
|
||||
# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
215
scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_conversion.py
Executable file
215
scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_conversion.py
Executable file
@ -0,0 +1,215 @@
|
||||
# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from math import sin, cos, tan, atan
|
||||
from mathutils import Matrix, Vector
|
||||
import numpy as np
|
||||
from ...io.com import gltf2_io_constants
|
||||
|
||||
PBR_WATTS_TO_LUMENS = 683
|
||||
# Industry convention, biological peak at 555nm, scientific standard as part of SI candela definition.
|
||||
|
||||
|
||||
# This means use the inverse of the TRS transform.
|
||||
def inverted_trs_mapping_node(mapping_transform):
|
||||
offset = mapping_transform["offset"]
|
||||
rotation = mapping_transform["rotation"]
|
||||
scale = mapping_transform["scale"]
|
||||
|
||||
# Inverse of a TRS is not always a TRS. This function will be right
|
||||
# at least when the following don't occur.
|
||||
if abs(rotation) > 1e-5 and abs(scale[0] - scale[1]) > 1e-5:
|
||||
return None
|
||||
if abs(scale[0]) < 1e-5 or abs(scale[1]) < 1e-5:
|
||||
return None
|
||||
|
||||
new_offset = Matrix.Rotation(-rotation, 3, 'Z') @ Vector((-offset[0], -offset[1], 1))
|
||||
new_offset[0] /= scale[0]
|
||||
new_offset[1] /= scale[1]
|
||||
return {
|
||||
"offset": new_offset[0:2],
|
||||
"rotation": -rotation,
|
||||
"scale": [1 / scale[0], 1 / scale[1]],
|
||||
}
|
||||
|
||||
|
||||
def texture_transform_blender_to_gltf(mapping_transform):
|
||||
"""
|
||||
Converts the offset/rotation/scale from a Mapping node applied in Blender's
|
||||
UV space to the equivalent KHR_texture_transform.
|
||||
"""
|
||||
offset = mapping_transform.get('offset', [0, 0])
|
||||
rotation = mapping_transform.get('rotation', 0)
|
||||
scale = mapping_transform.get('scale', [1, 1])
|
||||
return {
|
||||
'offset': [
|
||||
offset[0] - scale[1] * sin(rotation),
|
||||
1 - offset[1] - scale[1] * cos(rotation),
|
||||
],
|
||||
'rotation': rotation,
|
||||
'scale': [scale[0], scale[1]],
|
||||
}
|
||||
|
||||
|
||||
def texture_transform_gltf_to_blender(texture_transform):
|
||||
"""
|
||||
Converts a KHR_texture_transform into the equivalent offset/rotation/scale
|
||||
for a Mapping node applied in Blender's UV space.
|
||||
"""
|
||||
offset = texture_transform.get('offset', [0, 0])
|
||||
rotation = texture_transform.get('rotation', 0)
|
||||
scale = texture_transform.get('scale', [1, 1])
|
||||
return {
|
||||
'offset': [
|
||||
offset[0] + scale[1] * sin(rotation),
|
||||
1 - offset[1] - scale[1] * cos(rotation),
|
||||
],
|
||||
'rotation': rotation,
|
||||
'scale': [scale[0], scale[1]],
|
||||
}
|
||||
|
||||
|
||||
def get_target(property):
|
||||
return {
|
||||
"delta_location": "translation",
|
||||
"delta_rotation_euler": "rotation",
|
||||
"delta_rotation_quaternion": "rotation",
|
||||
"delta_scale": "scale",
|
||||
"location": "translation",
|
||||
"rotation_axis_angle": "rotation",
|
||||
"rotation_euler": "rotation",
|
||||
"rotation_quaternion": "rotation",
|
||||
"scale": "scale",
|
||||
"value": "weights"
|
||||
}.get(property, None)
|
||||
|
||||
|
||||
def get_component_type(attribute_component_type):
|
||||
return {
|
||||
"INT8": gltf2_io_constants.ComponentType.Float,
|
||||
"BYTE_COLOR": gltf2_io_constants.ComponentType.UnsignedShort,
|
||||
"FLOAT2": gltf2_io_constants.ComponentType.Float,
|
||||
"FLOAT_COLOR": gltf2_io_constants.ComponentType.Float,
|
||||
"FLOAT_VECTOR": gltf2_io_constants.ComponentType.Float,
|
||||
"FLOAT_VECTOR_4": gltf2_io_constants.ComponentType.Float,
|
||||
"INT": gltf2_io_constants.ComponentType.Float, # No signed Int in glTF accessor
|
||||
"FLOAT": gltf2_io_constants.ComponentType.Float,
|
||||
"BOOLEAN": gltf2_io_constants.ComponentType.Float
|
||||
}.get(attribute_component_type)
|
||||
|
||||
|
||||
def get_channel_from_target(target):
|
||||
return {
|
||||
"rotation": "rotation_quaternion",
|
||||
"translation": "location",
|
||||
"scale": "scale"
|
||||
}.get(target)
|
||||
|
||||
|
||||
def get_data_type(attribute_component_type):
|
||||
return {
|
||||
"INT8": gltf2_io_constants.DataType.Scalar,
|
||||
"BYTE_COLOR": gltf2_io_constants.DataType.Vec4,
|
||||
"FLOAT2": gltf2_io_constants.DataType.Vec2,
|
||||
"FLOAT_COLOR": gltf2_io_constants.DataType.Vec4,
|
||||
"FLOAT_VECTOR": gltf2_io_constants.DataType.Vec3,
|
||||
"FLOAT_VECTOR_4": gltf2_io_constants.DataType.Vec4,
|
||||
"INT": gltf2_io_constants.DataType.Scalar,
|
||||
"FLOAT": gltf2_io_constants.DataType.Scalar,
|
||||
"BOOLEAN": gltf2_io_constants.DataType.Scalar,
|
||||
}.get(attribute_component_type)
|
||||
|
||||
|
||||
def get_data_length(attribute_component_type):
|
||||
return {
|
||||
"INT8": 1,
|
||||
"BYTE_COLOR": 4,
|
||||
"FLOAT2": 2,
|
||||
"FLOAT_COLOR": 4,
|
||||
"FLOAT_VECTOR": 3,
|
||||
"FLOAT_VECTOR_4": 4,
|
||||
"INT": 1,
|
||||
"FLOAT": 1,
|
||||
"BOOLEAN": 1
|
||||
}.get(attribute_component_type)
|
||||
|
||||
|
||||
def get_numpy_type(attribute_component_type):
|
||||
return {
|
||||
"INT8": np.float32,
|
||||
"BYTE_COLOR": np.float32,
|
||||
"FLOAT2": np.float32,
|
||||
"FLOAT_COLOR": np.float32,
|
||||
"FLOAT_VECTOR": np.float32,
|
||||
"FLOAT_VECTOR_4": np.float32,
|
||||
"INT": np.float32, # signed integer are not supported by glTF
|
||||
"FLOAT": np.float32,
|
||||
"BOOLEAN": np.float32
|
||||
}.get(attribute_component_type)
|
||||
|
||||
|
||||
def get_attribute_type(component_type, data_type):
|
||||
if gltf2_io_constants.DataType.num_elements(data_type) == 1:
|
||||
return {
|
||||
gltf2_io_constants.ComponentType.Float: "FLOAT",
|
||||
gltf2_io_constants.ComponentType.UnsignedByte: "INT" # What is the best for compatibility?
|
||||
}.get(component_type, None)
|
||||
elif gltf2_io_constants.DataType.num_elements(data_type) == 2:
|
||||
return {
|
||||
gltf2_io_constants.ComponentType.Float: "FLOAT2"
|
||||
}.get(component_type, None)
|
||||
elif gltf2_io_constants.DataType.num_elements(data_type) == 3:
|
||||
return {
|
||||
gltf2_io_constants.ComponentType.Float: "FLOAT_VECTOR"
|
||||
}.get(component_type, None)
|
||||
elif gltf2_io_constants.DataType.num_elements(data_type) == 4:
|
||||
return {
|
||||
gltf2_io_constants.ComponentType.Float: "FLOAT_COLOR",
|
||||
gltf2_io_constants.ComponentType.UnsignedShort: "BYTE_COLOR",
|
||||
gltf2_io_constants.ComponentType.UnsignedByte: "BYTE_COLOR" # What is the best for compatibility?
|
||||
}.get(component_type, None)
|
||||
else:
|
||||
pass
|
||||
|
||||
|
||||
def get_attribute(attributes, name, data_type, domain):
|
||||
attribute = attributes.get(name)
|
||||
if attribute is not None and attribute.data_type == data_type and attribute.domain == domain:
|
||||
return attribute
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def get_gltf_interpolation(interpolation):
|
||||
return {
|
||||
"BEZIER": "CUBICSPLINE",
|
||||
"LINEAR": "LINEAR",
|
||||
"CONSTANT": "STEP"
|
||||
}.get(interpolation, "LINEAR")
|
||||
|
||||
|
||||
def get_anisotropy_rotation_gltf_to_blender(rotation):
|
||||
# glTF rotation is in randian, Blender in 0 to 1
|
||||
return rotation / (2 * np.pi)
|
||||
|
||||
|
||||
def get_anisotropy_rotation_blender_to_gltf(rotation):
|
||||
# glTF rotation is in randian, Blender in 0 to 1
|
||||
return rotation * (2 * np.pi)
|
||||
|
||||
|
||||
def yvof_blender_to_gltf(angle, width, height, sensor_fit):
|
||||
|
||||
aspect_ratio = width / height
|
||||
|
||||
if width >= height:
|
||||
if sensor_fit != 'VERTICAL':
|
||||
return 2.0 * atan(tan(angle * 0.5) / aspect_ratio)
|
||||
else:
|
||||
return angle
|
||||
else:
|
||||
if sensor_fit != 'HORIZONTAL':
|
||||
return angle
|
||||
else:
|
||||
return 2.0 * atan(tan(angle * 0.5) / aspect_ratio)
|
80
scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_data_path.py
Executable file
80
scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_data_path.py
Executable file
@ -0,0 +1,80 @@
|
||||
# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
|
||||
def get_target_property_name(data_path: str) -> str:
|
||||
"""Retrieve target property."""
|
||||
|
||||
if data_path.endswith("]"):
|
||||
return None
|
||||
else:
|
||||
return data_path.rsplit('.', 1)[-1]
|
||||
|
||||
|
||||
def get_target_object_path(data_path: str) -> str:
|
||||
"""Retrieve target object data path without property"""
|
||||
if data_path.endswith("]"):
|
||||
return data_path.rsplit('[', 1)[0]
|
||||
elif data_path.startswith("pose.bones["):
|
||||
return data_path[:data_path.find('"]')] + '"]'
|
||||
path_split = data_path.rsplit('.', 1)
|
||||
self_targeting = len(path_split) < 2
|
||||
if self_targeting:
|
||||
return ""
|
||||
return path_split[0]
|
||||
|
||||
|
||||
def get_rotation_modes(target_property: str):
|
||||
"""Retrieve rotation modes based on target_property"""
|
||||
if target_property in ["rotation_euler", "delta_rotation_euler"]:
|
||||
return True, ["XYZ", "XZY", "YXZ", "YZX", "ZXY", "ZYX"]
|
||||
elif target_property in ["rotation_quaternion", "delta_rotation_quaternion"]:
|
||||
return True, ["QUATERNION"]
|
||||
elif target_property in ["rotation_axis_angle"]:
|
||||
return True, ["AXIS_ANGLE"]
|
||||
else:
|
||||
return False, []
|
||||
|
||||
|
||||
def is_location(target_property):
|
||||
return "location" in target_property
|
||||
|
||||
|
||||
def is_rotation(target_property):
|
||||
return "rotation" in target_property
|
||||
|
||||
|
||||
def is_scale(target_property):
|
||||
return "scale" in target_property
|
||||
|
||||
|
||||
def get_delta_modes(target_property: str) -> str:
|
||||
"""Retrieve location based on target_property"""
|
||||
return target_property.startswith("delta_")
|
||||
|
||||
|
||||
def is_bone_anim_channel(data_path: str) -> bool:
|
||||
return data_path[:10] == "pose.bones"
|
||||
|
||||
|
||||
def get_sk_exported(key_blocks):
|
||||
return [
|
||||
k
|
||||
for k in key_blocks
|
||||
if not skip_sk(key_blocks, k)
|
||||
]
|
||||
|
||||
|
||||
def skip_sk(key_blocks, k):
|
||||
# Do not export:
|
||||
# - if muted
|
||||
# - if relative key is SK itself (this avoid exporting Basis too if user didn't change order)
|
||||
# - the Basis (the first SK of the list)
|
||||
return k == k.relative_key \
|
||||
or k.mute \
|
||||
or is_first_index(key_blocks, k) is True
|
||||
|
||||
|
||||
def is_first_index(key_blocks, k):
|
||||
return key_blocks[0].name == k.name
|
@ -0,0 +1,15 @@
|
||||
# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
BLENDER_IOR = 1.45
|
||||
BLENDER_SPECULAR = 0.5
|
||||
BLENDER_SPECULAR_TINT = 0.0
|
||||
|
||||
BLENDER_GLTF_SPECIAL_COLLECTION = "glTF_not_exported"
|
||||
|
||||
LIGHTS = {
|
||||
"POINT": "point",
|
||||
"SUN": "directional",
|
||||
"SPOT": "spot"
|
||||
}
|
@ -0,0 +1,87 @@
|
||||
# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
|
||||
import bpy
|
||||
from .gltf2_blender_json import is_json_convertible
|
||||
|
||||
|
||||
# Custom properties, which are in most cases present and should not be imported/exported.
|
||||
BLACK_LIST = ['cycles', 'cycles_visibility', 'cycles_curves', 'glTF2ExportSettings']
|
||||
|
||||
|
||||
def generate_extras(blender_element):
|
||||
"""Filter and create a custom property, which is stored in the glTF extra field."""
|
||||
if not blender_element:
|
||||
return None
|
||||
|
||||
extras = {}
|
||||
|
||||
for custom_property in blender_element.keys():
|
||||
if custom_property in BLACK_LIST:
|
||||
continue
|
||||
|
||||
value = __to_json_compatible(blender_element[custom_property])
|
||||
|
||||
if value is not None:
|
||||
extras[custom_property] = value
|
||||
|
||||
if not extras:
|
||||
return None
|
||||
|
||||
return extras
|
||||
|
||||
|
||||
def __to_json_compatible(value):
|
||||
"""Make a value (usually a custom property) compatible with json"""
|
||||
|
||||
if isinstance(value, bpy.types.ID):
|
||||
return value
|
||||
|
||||
elif isinstance(value, str):
|
||||
return value
|
||||
|
||||
elif isinstance(value, (int, float)):
|
||||
return value
|
||||
|
||||
# for list classes
|
||||
elif isinstance(value, list):
|
||||
value = list(value)
|
||||
# make sure contents are json-compatible too
|
||||
for index in range(len(value)):
|
||||
value[index] = __to_json_compatible(value[index])
|
||||
return value
|
||||
|
||||
# for IDPropertyArray classes
|
||||
elif hasattr(value, "to_list"):
|
||||
value = value.to_list()
|
||||
return value
|
||||
|
||||
elif hasattr(value, "to_dict"):
|
||||
value = value.to_dict()
|
||||
if is_json_convertible(value):
|
||||
return value
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def set_extras(blender_element, extras, exclude=[]):
|
||||
"""Copy extras onto a Blender object."""
|
||||
if not extras or not isinstance(extras, dict):
|
||||
return
|
||||
|
||||
for custom_property, value in extras.items():
|
||||
if custom_property in BLACK_LIST:
|
||||
continue
|
||||
if custom_property in exclude:
|
||||
continue
|
||||
|
||||
try:
|
||||
blender_element[custom_property] = value
|
||||
except Exception:
|
||||
# Try to convert to string
|
||||
try:
|
||||
blender_element[custom_property] = str(value)
|
||||
except Exception:
|
||||
print('Error setting property %s to value of type %s' % (custom_property, type(value)))
|
27
scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_json.py
Executable file
27
scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_json.py
Executable file
@ -0,0 +1,27 @@
|
||||
# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import json
|
||||
import bpy
|
||||
|
||||
|
||||
class BlenderJSONEncoder(json.JSONEncoder):
|
||||
"""Blender JSON Encoder."""
|
||||
|
||||
def default(self, obj):
|
||||
if isinstance(obj, bpy.types.ID):
|
||||
return dict(
|
||||
name=obj.name,
|
||||
type=obj.__class__.__name__
|
||||
)
|
||||
return super(BlenderJSONEncoder, self).default(obj)
|
||||
|
||||
|
||||
def is_json_convertible(data):
|
||||
"""Test, if a data set can be expressed as JSON."""
|
||||
try:
|
||||
json.dumps(data, cls=BlenderJSONEncoder)
|
||||
return True
|
||||
except:
|
||||
return False
|
@ -0,0 +1,32 @@
|
||||
# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
|
||||
# Get compatibility at export with old files
|
||||
|
||||
|
||||
def get_gltf_node_old_name():
|
||||
return "glTF Settings"
|
||||
|
||||
# Old group name
|
||||
|
||||
|
||||
def get_gltf_old_group_node_name():
|
||||
return "glTF Metallic Roughness"
|
||||
|
||||
|
||||
def get_gltf_node_name():
|
||||
return "glTF Material Output"
|
||||
|
||||
|
||||
def create_settings_group(name):
|
||||
gltf_node_group = bpy.data.node_groups.new(name, 'ShaderNodeTree')
|
||||
gltf_node_group.interface.new_socket("Occlusion", socket_type="NodeSocketFloat")
|
||||
thicknessFactor = gltf_node_group.interface.new_socket("Thickness", socket_type="NodeSocketFloat", )
|
||||
thicknessFactor.default_value = 0.0
|
||||
gltf_node_group.nodes.new('NodeGroupOutput')
|
||||
gltf_node_group_input = gltf_node_group.nodes.new('NodeGroupInput')
|
||||
gltf_node_group_input.location = -200, 0
|
||||
return gltf_node_group
|
209
scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_math.py
Executable file
209
scripts/addons_core/io_scene_gltf2/blender/com/gltf2_blender_math.py
Executable file
@ -0,0 +1,209 @@
|
||||
# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import typing
|
||||
import math
|
||||
from mathutils import Matrix, Vector, Quaternion, Euler
|
||||
|
||||
from .gltf2_blender_data_path import get_target_property_name
|
||||
|
||||
|
||||
def list_to_mathutils(values: typing.List[float], data_path: str) -> typing.Union[Vector, Quaternion, Euler]:
|
||||
"""Transform a list to blender py object."""
|
||||
target = get_target_property_name(data_path)
|
||||
|
||||
if target == 'delta_location':
|
||||
return Vector(values) # TODO Should be Vector(values) - Vector(something)?
|
||||
elif target == 'delta_rotation_euler':
|
||||
return Euler(values).to_quaternion() # TODO Should be Euler(values).to_quaternion() @ something?
|
||||
elif target == 'location':
|
||||
return Vector(values)
|
||||
elif target == 'rotation_axis_angle':
|
||||
angle = values[0]
|
||||
axis = values[1:]
|
||||
return Quaternion(axis, math.radians(angle))
|
||||
elif target == 'rotation_euler':
|
||||
return Euler(values).to_quaternion()
|
||||
elif target == 'rotation_quaternion':
|
||||
return Quaternion(values)
|
||||
elif target == 'scale':
|
||||
return Vector(values)
|
||||
elif target == 'value':
|
||||
return Vector(values)
|
||||
|
||||
return values
|
||||
|
||||
|
||||
def mathutils_to_gltf(x: typing.Union[Vector, Quaternion]) -> typing.List[float]:
|
||||
"""Transform a py object to glTF list."""
|
||||
if isinstance(x, Vector):
|
||||
return list(x)
|
||||
if isinstance(x, Quaternion):
|
||||
# Blender has w-first quaternion notation
|
||||
return [x[1], x[2], x[3], x[0]]
|
||||
else:
|
||||
return list(x)
|
||||
|
||||
|
||||
def to_yup() -> Matrix:
|
||||
"""Transform to Yup."""
|
||||
return Matrix(
|
||||
((1.0, 0.0, 0.0, 0.0),
|
||||
(0.0, 0.0, 1.0, 0.0),
|
||||
(0.0, -1.0, 0.0, 0.0),
|
||||
(0.0, 0.0, 0.0, 1.0))
|
||||
)
|
||||
|
||||
|
||||
to_zup = to_yup
|
||||
|
||||
|
||||
def swizzle_yup(v: typing.Union[Vector, Quaternion], data_path: str) -> typing.Union[Vector, Quaternion]:
|
||||
"""Manage Yup."""
|
||||
target = get_target_property_name(data_path)
|
||||
swizzle_func = {
|
||||
"delta_location": swizzle_yup_location,
|
||||
"delta_rotation_euler": swizzle_yup_rotation,
|
||||
"location": swizzle_yup_location,
|
||||
"rotation_axis_angle": swizzle_yup_rotation,
|
||||
"rotation_euler": swizzle_yup_rotation,
|
||||
"rotation_quaternion": swizzle_yup_rotation,
|
||||
"scale": swizzle_yup_scale,
|
||||
"value": swizzle_yup_value
|
||||
}.get(target)
|
||||
|
||||
if swizzle_func is None:
|
||||
raise RuntimeError("Cannot transform values at {}".format(data_path))
|
||||
|
||||
return swizzle_func(v)
|
||||
|
||||
|
||||
def swizzle_yup_location(loc: Vector) -> Vector:
|
||||
"""Manage Yup location."""
|
||||
return Vector((loc[0], loc[2], -loc[1]))
|
||||
|
||||
|
||||
def swizzle_yup_rotation(rot: Quaternion) -> Quaternion:
|
||||
"""Manage Yup rotation."""
|
||||
return Quaternion((rot[0], rot[1], rot[3], -rot[2]))
|
||||
|
||||
|
||||
def swizzle_yup_scale(scale: Vector) -> Vector:
|
||||
"""Manage Yup scale."""
|
||||
return Vector((scale[0], scale[2], scale[1]))
|
||||
|
||||
|
||||
def swizzle_yup_value(value: typing.Any) -> typing.Any:
|
||||
"""Manage Yup value."""
|
||||
return value
|
||||
|
||||
|
||||
def transform(v: typing.Union[Vector, Quaternion], data_path: str, transform: Matrix = Matrix.Identity(
|
||||
4), need_rotation_correction: bool = False) -> typing .Union[Vector, Quaternion]:
|
||||
"""Manage transformations."""
|
||||
target = get_target_property_name(data_path)
|
||||
transform_func = {
|
||||
"delta_location": transform_location,
|
||||
"delta_rotation_euler": transform_rotation,
|
||||
"location": transform_location,
|
||||
"rotation_axis_angle": transform_rotation,
|
||||
"rotation_euler": transform_rotation,
|
||||
"rotation_quaternion": transform_rotation,
|
||||
"scale": transform_scale,
|
||||
"value": transform_value
|
||||
}.get(target)
|
||||
|
||||
if transform_func is None:
|
||||
raise RuntimeError("Cannot transform values at {}".format(data_path))
|
||||
|
||||
return transform_func(v, transform, need_rotation_correction)
|
||||
|
||||
|
||||
def transform_location(location: Vector, transform: Matrix = Matrix.Identity(4),
|
||||
need_rotation_correction: bool = False) -> Vector:
|
||||
"""Transform location."""
|
||||
correction = Quaternion((2**0.5 / 2, -2**0.5 / 2, 0.0, 0.0))
|
||||
m = Matrix.Translation(location)
|
||||
if need_rotation_correction:
|
||||
m @= correction.to_matrix().to_4x4()
|
||||
m = transform @ m
|
||||
return m.to_translation()
|
||||
|
||||
|
||||
def transform_rotation(rotation: Quaternion, transform: Matrix = Matrix.Identity(4),
|
||||
need_rotation_correction: bool = False) -> Quaternion:
|
||||
"""Transform rotation."""
|
||||
rotation.normalize()
|
||||
correction = Quaternion((2**0.5 / 2, -2**0.5 / 2, 0.0, 0.0))
|
||||
m = rotation.to_matrix().to_4x4()
|
||||
if need_rotation_correction:
|
||||
m @= correction.to_matrix().to_4x4()
|
||||
m = transform @ m
|
||||
return m.to_quaternion()
|
||||
|
||||
|
||||
def transform_scale(scale: Vector, transform: Matrix = Matrix.Identity(4),
|
||||
need_rotation_correction: bool = False) -> Vector:
|
||||
"""Transform scale."""
|
||||
m = Matrix.Identity(4)
|
||||
m[0][0] = scale.x
|
||||
m[1][1] = scale.y
|
||||
m[2][2] = scale.z
|
||||
m = transform @ m
|
||||
|
||||
return m.to_scale()
|
||||
|
||||
|
||||
def transform_value(value: Vector, _: Matrix = Matrix.Identity(4), need_rotation_correction: bool = False) -> Vector:
|
||||
"""Transform value."""
|
||||
return value
|
||||
|
||||
|
||||
def round_if_near(value: float, target: float) -> float:
|
||||
"""If value is very close to target, round to target."""
|
||||
return value if abs(value - target) > 2.0e-6 else target
|
||||
|
||||
|
||||
def scale_rot_swap_matrix(rot):
|
||||
"""Returns a matrix m st. Scale[s] Rot[rot] = Rot[rot] Scale[m s].
|
||||
If rot.to_matrix() is a signed permutation matrix, works for any s.
|
||||
Otherwise works only if s is a uniform scaling.
|
||||
"""
|
||||
m = nearby_signed_perm_matrix(rot) # snap to signed perm matrix
|
||||
m.transpose() # invert permutation
|
||||
for i in range(3):
|
||||
for j in range(3):
|
||||
m[i][j] = abs(m[i][j]) # discard sign
|
||||
return m
|
||||
|
||||
|
||||
def nearby_signed_perm_matrix(rot):
|
||||
"""Returns a signed permutation matrix close to rot.to_matrix().
|
||||
(A signed permutation matrix is like a permutation matrix, except
|
||||
the non-zero entries can be ±1.)
|
||||
"""
|
||||
m = rot.to_matrix()
|
||||
x, y, z = m[0], m[1], m[2]
|
||||
|
||||
# Set the largest entry in the first row to ±1
|
||||
a, b, c = abs(x[0]), abs(x[1]), abs(x[2])
|
||||
i = 0 if a >= b and a >= c else 1 if b >= c else 2
|
||||
x[i] = 1 if x[i] > 0 else -1
|
||||
x[(i + 1) % 3] = 0
|
||||
x[(i + 2) % 3] = 0
|
||||
|
||||
# Same for second row: only two columns to consider now.
|
||||
a, b = abs(y[(i + 1) % 3]), abs(y[(i + 2) % 3])
|
||||
j = (i + 1) % 3 if a >= b else (i + 2) % 3
|
||||
y[j] = 1 if y[j] > 0 else -1
|
||||
y[(j + 1) % 3] = 0
|
||||
y[(j + 2) % 3] = 0
|
||||
|
||||
# Same for third row: only one column left
|
||||
k = (0 + 1 + 2) - i - j
|
||||
z[k] = 1 if z[k] > 0 else -1
|
||||
z[(k + 1) % 3] = 0
|
||||
z[(k + 2) % 3] = 0
|
||||
|
||||
return m
|
@ -0,0 +1,734 @@
|
||||
# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
from ..com.gltf2_blender_material_helpers import get_gltf_node_name, create_settings_group
|
||||
|
||||
################ glTF Material Output node ###########################################
|
||||
|
||||
|
||||
def create_gltf_ao_group(operator, group_name):
|
||||
|
||||
# create a new group
|
||||
gltf_ao_group = bpy.data.node_groups.new(group_name, "ShaderNodeTree")
|
||||
|
||||
return gltf_ao_group
|
||||
|
||||
|
||||
class NODE_OT_GLTF_SETTINGS(bpy.types.Operator):
|
||||
bl_idname = "node.gltf_settings_node_operator"
|
||||
bl_label = "glTF Material Output"
|
||||
bl_description = "Add a node to the active tree for glTF export"
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
space = context.space_data
|
||||
return (
|
||||
space is not None
|
||||
and space.type == "NODE_EDITOR"
|
||||
and context.object and context.object.active_material
|
||||
and context.object.active_material.use_nodes is True
|
||||
and bpy.context.preferences.addons['io_scene_gltf2'].preferences.settings_node_ui is True
|
||||
)
|
||||
|
||||
def execute(self, context):
|
||||
gltf_settings_node_name = get_gltf_node_name()
|
||||
if gltf_settings_node_name in bpy.data.node_groups:
|
||||
my_group = bpy.data.node_groups[get_gltf_node_name()]
|
||||
else:
|
||||
my_group = create_settings_group(gltf_settings_node_name)
|
||||
node_tree = context.object.active_material.node_tree
|
||||
new_node = node_tree.nodes.new("ShaderNodeGroup")
|
||||
new_node.node_tree = bpy.data.node_groups[my_group.name]
|
||||
return {"FINISHED"}
|
||||
|
||||
|
||||
def add_gltf_settings_to_menu(self, context):
|
||||
if bpy.context.preferences.addons['io_scene_gltf2'].preferences.settings_node_ui is True:
|
||||
self.layout.operator("node.gltf_settings_node_operator")
|
||||
|
||||
################################### KHR_materials_variants ####################
|
||||
|
||||
# Global UI panel
|
||||
|
||||
|
||||
class gltf2_KHR_materials_variants_variant(bpy.types.PropertyGroup):
|
||||
variant_idx: bpy.props.IntProperty()
|
||||
name: bpy.props.StringProperty(name="Variant Name")
|
||||
|
||||
|
||||
class SCENE_UL_gltf2_variants(bpy.types.UIList):
|
||||
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
|
||||
|
||||
if self.layout_type in {'DEFAULT', 'COMPACT'}:
|
||||
layout.prop(item, "name", text="", emboss=False)
|
||||
|
||||
elif self.layout_type in {'GRID'}:
|
||||
layout.alignment = 'CENTER'
|
||||
|
||||
|
||||
class SCENE_PT_gltf2_variants(bpy.types.Panel):
|
||||
bl_label = "glTF Material Variants"
|
||||
bl_space_type = 'VIEW_3D'
|
||||
bl_region_type = 'UI'
|
||||
bl_category = "glTF Variants"
|
||||
|
||||
@classmethod
|
||||
def poll(self, context):
|
||||
return bpy.context.preferences.addons['io_scene_gltf2'].preferences.KHR_materials_variants_ui is True
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
row = layout.row()
|
||||
|
||||
if bpy.data.scenes[0].get('gltf2_KHR_materials_variants_variants') and len(
|
||||
bpy.data.scenes[0].gltf2_KHR_materials_variants_variants) > 0:
|
||||
|
||||
row.template_list(
|
||||
"SCENE_UL_gltf2_variants",
|
||||
"",
|
||||
bpy.data.scenes[0],
|
||||
"gltf2_KHR_materials_variants_variants",
|
||||
bpy.data.scenes[0],
|
||||
"gltf2_active_variant")
|
||||
col = row.column()
|
||||
row = col.column(align=True)
|
||||
row.operator("scene.gltf2_variant_add", icon="ADD", text="")
|
||||
row.operator("scene.gltf2_variant_remove", icon="REMOVE", text="")
|
||||
|
||||
row = layout.row()
|
||||
row.operator("scene.gltf2_display_variant", text="Display Variant")
|
||||
row = layout.row()
|
||||
row.operator("scene.gltf2_assign_to_variant", text="Assign To Variant")
|
||||
row = layout.row()
|
||||
row.operator("scene.gltf2_reset_to_original", text="Reset To Original")
|
||||
row.operator("scene.gltf2_assign_as_original", text="Assign as Original")
|
||||
else:
|
||||
row.operator("scene.gltf2_variant_add", text="Add Material Variant")
|
||||
|
||||
|
||||
class SCENE_OT_gltf2_variant_add(bpy.types.Operator):
|
||||
"""Add a new Material Variant"""
|
||||
bl_idname = "scene.gltf2_variant_add"
|
||||
bl_label = "Add Material Variant"
|
||||
bl_options = {'REGISTER'}
|
||||
|
||||
@classmethod
|
||||
def poll(self, context):
|
||||
return True
|
||||
|
||||
def execute(self, context):
|
||||
var = bpy.data.scenes[0].gltf2_KHR_materials_variants_variants.add()
|
||||
var.variant_idx = len(bpy.data.scenes[0].gltf2_KHR_materials_variants_variants) - 1
|
||||
var.name = "VariantName"
|
||||
bpy.data.scenes[0].gltf2_active_variant = len(bpy.data.scenes[0].gltf2_KHR_materials_variants_variants) - 1
|
||||
return {'FINISHED'}
|
||||
|
||||
|
||||
class SCENE_OT_gltf2_variant_remove(bpy.types.Operator):
|
||||
"""Add a new Material Variant"""
|
||||
bl_idname = "scene.gltf2_variant_remove"
|
||||
bl_label = "Remove Variant"
|
||||
bl_options = {'REGISTER'}
|
||||
|
||||
@classmethod
|
||||
def poll(self, context):
|
||||
return len(bpy.data.scenes[0].gltf2_KHR_materials_variants_variants) > 0
|
||||
|
||||
def execute(self, context):
|
||||
bpy.data.scenes[0].gltf2_KHR_materials_variants_variants.remove(bpy.data.scenes[0].gltf2_active_variant)
|
||||
|
||||
# loop on all mesh
|
||||
for obj in [o for o in bpy.data.objects if o.type == "MESH"]:
|
||||
mesh = obj.data
|
||||
remove_idx_data = []
|
||||
for idx, i in enumerate(mesh.gltf2_variant_mesh_data):
|
||||
remove_idx_variants = []
|
||||
for idx_var, v in enumerate(i.variants):
|
||||
if v.variant.variant_idx == bpy.data.scenes[0].gltf2_active_variant:
|
||||
remove_idx_variants.append(idx_var)
|
||||
elif v.variant.variant_idx > bpy.data.scenes[0].gltf2_active_variant:
|
||||
v.variant.variant_idx -= 1
|
||||
|
||||
if len(remove_idx_variants) > 0:
|
||||
for idx_var in remove_idx_variants:
|
||||
i.variants.remove(idx_var)
|
||||
|
||||
if len(i.variants) == 0:
|
||||
remove_idx_data.append(idx)
|
||||
|
||||
if len(remove_idx_data) > 0:
|
||||
for idx_data in remove_idx_data:
|
||||
mesh.gltf2_variant_mesh_data.remove(idx_data)
|
||||
|
||||
return {'FINISHED'}
|
||||
|
||||
|
||||
# Operator to display a variant
|
||||
class SCENE_OT_gltf2_display_variant(bpy.types.Operator):
|
||||
bl_idname = "scene.gltf2_display_variant"
|
||||
bl_label = "Display Variant"
|
||||
bl_options = {'REGISTER'}
|
||||
|
||||
@classmethod
|
||||
def poll(self, context):
|
||||
return len(bpy.data.scenes[0].gltf2_KHR_materials_variants_variants) > 0
|
||||
|
||||
def execute(self, context):
|
||||
|
||||
gltf2_active_variant = bpy.data.scenes[0].gltf2_active_variant
|
||||
|
||||
# loop on all mesh
|
||||
for obj in [o for o in bpy.data.objects if o.type == "MESH"]:
|
||||
mesh = obj.data
|
||||
for i in mesh.gltf2_variant_mesh_data:
|
||||
if i.variants and gltf2_active_variant in [v.variant.variant_idx for v in i.variants]:
|
||||
mat = i.material
|
||||
slot = i.material_slot_index
|
||||
if slot < len(obj.material_slots): # Seems user remove some slots...
|
||||
obj.material_slots[slot].material = mat
|
||||
|
||||
return {'FINISHED'}
|
||||
|
||||
# Operator to assign current mesh materials to a variant
|
||||
|
||||
|
||||
class SCENE_OT_gltf2_assign_to_variant(bpy.types.Operator):
|
||||
bl_idname = "scene.gltf2_assign_to_variant"
|
||||
bl_label = "Assign To Variant"
|
||||
bl_options = {'REGISTER'}
|
||||
|
||||
@classmethod
|
||||
def poll(self, context):
|
||||
return len(bpy.data.scenes[0].gltf2_KHR_materials_variants_variants) > 0 \
|
||||
and bpy.context.object and bpy.context.object.type == "MESH"
|
||||
|
||||
def execute(self, context):
|
||||
gltf2_active_variant = bpy.data.scenes[0].gltf2_active_variant
|
||||
obj = bpy.context.object
|
||||
|
||||
# loop on material slots ( primitives )
|
||||
for mat_slot_idx, s in enumerate(obj.material_slots):
|
||||
# Check if there is already data for this slot
|
||||
found = False
|
||||
for i in obj.data.gltf2_variant_mesh_data:
|
||||
if i.material_slot_index == mat_slot_idx and i.material == s.material:
|
||||
found = True
|
||||
variant_primitive = i
|
||||
|
||||
if found is False:
|
||||
variant_primitive = obj.data.gltf2_variant_mesh_data.add()
|
||||
variant_primitive.material_slot_index = mat_slot_idx
|
||||
variant_primitive.material = s.material
|
||||
|
||||
vari = variant_primitive.variants.add()
|
||||
vari.variant.variant_idx = bpy.data.scenes[0].gltf2_active_variant
|
||||
|
||||
return {'FINISHED'}
|
||||
|
||||
# Operator to reset mesh to original (using default material when exists)
|
||||
|
||||
|
||||
class SCENE_OT_gltf2_reset_to_original(bpy.types.Operator):
|
||||
bl_idname = "scene.gltf2_reset_to_original"
|
||||
bl_label = "Reset to Original"
|
||||
bl_options = {'REGISTER'}
|
||||
|
||||
@classmethod
|
||||
def poll(self, context):
|
||||
return bpy.context.object and bpy.context.object.type == "MESH" and len(
|
||||
context.object.data.gltf2_variant_default_materials) > 0
|
||||
|
||||
def execute(self, context):
|
||||
obj = bpy.context.object
|
||||
|
||||
# loop on material slots ( primitives )
|
||||
for mat_slot_idx, s in enumerate(obj.material_slots):
|
||||
# Check if there is a default material for this slot
|
||||
found = False
|
||||
for i in obj.data.gltf2_variant_default_materials:
|
||||
if i.material_slot_index == mat_slot_idx:
|
||||
s.material = i.default_material
|
||||
break
|
||||
|
||||
return {'FINISHED'}
|
||||
|
||||
# Operator to assign current materials as default materials
|
||||
|
||||
|
||||
class SCENE_OT_gltf2_assign_as_original(bpy.types.Operator):
|
||||
bl_idname = "scene.gltf2_assign_as_original"
|
||||
bl_label = "Assign as Original"
|
||||
bl_options = {'REGISTER'}
|
||||
|
||||
@classmethod
|
||||
def poll(self, context):
|
||||
return bpy.context.object and bpy.context.object.type == "MESH"
|
||||
|
||||
def execute(self, context):
|
||||
obj = bpy.context.object
|
||||
|
||||
# loop on material slots ( primitives )
|
||||
for mat_slot_idx, s in enumerate(obj.material_slots):
|
||||
# Check if there is a default material for this slot
|
||||
found = False
|
||||
for i in obj.data.gltf2_variant_default_materials:
|
||||
if i.material_slot_index == mat_slot_idx:
|
||||
found = True
|
||||
# Update if needed
|
||||
i.default_material = s.material
|
||||
break
|
||||
|
||||
if found is False:
|
||||
default_mat = obj.data.gltf2_variant_default_materials.add()
|
||||
default_mat.material_slot_index = mat_slot_idx
|
||||
default_mat.default_material = s.material
|
||||
|
||||
return {'FINISHED'}
|
||||
|
||||
# Mesh Panel
|
||||
|
||||
|
||||
class gltf2_KHR_materials_variant_pointer(bpy.types.PropertyGroup):
|
||||
variant: bpy.props.PointerProperty(type=gltf2_KHR_materials_variants_variant)
|
||||
|
||||
|
||||
class gltf2_KHR_materials_variants_default_material(bpy.types.PropertyGroup):
|
||||
material_slot_index: bpy.props.IntProperty(name="Material Slot Index")
|
||||
default_material: bpy.props.PointerProperty(type=bpy.types.Material)
|
||||
|
||||
|
||||
class gltf2_KHR_materials_variants_primitive(bpy.types.PropertyGroup):
|
||||
material_slot_index: bpy.props.IntProperty(name="Material Slot Index")
|
||||
material: bpy.props.PointerProperty(type=bpy.types.Material)
|
||||
variants: bpy.props.CollectionProperty(type=gltf2_KHR_materials_variant_pointer)
|
||||
active_variant_idx: bpy.props.IntProperty()
|
||||
|
||||
|
||||
class MESH_UL_gltf2_mesh_variants(bpy.types.UIList):
|
||||
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
|
||||
|
||||
vari = item.variant
|
||||
layout.context_pointer_set("id", vari)
|
||||
|
||||
if self.layout_type in {'DEFAULT', 'COMPACT'}:
|
||||
layout.prop(bpy.data.scenes[0].gltf2_KHR_materials_variants_variants[vari.variant_idx],
|
||||
"name", text="", emboss=False)
|
||||
elif self.layout_type in {'GRID'}:
|
||||
layout.alignment = 'CENTER'
|
||||
|
||||
|
||||
class MESH_PT_gltf2_mesh_variants(bpy.types.Panel):
|
||||
bl_label = "glTF Material Variants"
|
||||
bl_space_type = 'PROPERTIES'
|
||||
bl_region_type = 'WINDOW'
|
||||
bl_context = "material"
|
||||
|
||||
@classmethod
|
||||
def poll(self, context):
|
||||
return bpy.context.preferences.addons['io_scene_gltf2'].preferences.KHR_materials_variants_ui is True \
|
||||
and len(bpy.context.object.material_slots) > 0
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
|
||||
active_material_slots = bpy.context.object.active_material_index
|
||||
|
||||
found = False
|
||||
if 'gltf2_variant_mesh_data' in bpy.context.object.data.keys():
|
||||
for idx, prim in enumerate(bpy.context.object.data.gltf2_variant_mesh_data):
|
||||
if prim.material_slot_index == active_material_slots and id(prim.material) == id(
|
||||
bpy.context.object.material_slots[active_material_slots].material):
|
||||
found = True
|
||||
break
|
||||
|
||||
row = layout.row()
|
||||
if found is True:
|
||||
row.template_list("MESH_UL_gltf2_mesh_variants", "", prim, "variants", prim, "active_variant_idx")
|
||||
col = row.column()
|
||||
row = col.column(align=True)
|
||||
row.operator("scene.gltf2_variants_slot_add", icon="ADD", text="")
|
||||
row.operator("scene.gltf2_remove_material_variant", icon="REMOVE", text="")
|
||||
|
||||
row = layout.row()
|
||||
if 'gltf2_KHR_materials_variants_variants' in bpy.data.scenes[0].keys() and len(
|
||||
bpy.data.scenes[0].gltf2_KHR_materials_variants_variants) > 0:
|
||||
row.prop_search(
|
||||
context.object.data,
|
||||
"gltf2_variant_pointer",
|
||||
bpy.data.scenes[0],
|
||||
"gltf2_KHR_materials_variants_variants",
|
||||
text="Variant")
|
||||
row = layout.row()
|
||||
row.operator("scene.gltf2_material_to_variant", text="Assign To Variant")
|
||||
else:
|
||||
row.label(text="Please Create a Variant First")
|
||||
else:
|
||||
if 'gltf2_KHR_materials_variants_variants' in bpy.data.scenes[0].keys() and len(
|
||||
bpy.data.scenes[0].gltf2_KHR_materials_variants_variants) > 0:
|
||||
row.operator("scene.gltf2_variants_slot_add", text="Add a new Variant Slot")
|
||||
else:
|
||||
row.label(text="Please Create a Variant First")
|
||||
|
||||
|
||||
class SCENE_OT_gltf2_variant_slot_add(bpy.types.Operator):
|
||||
"""Add a new Slot"""
|
||||
bl_idname = "scene.gltf2_variants_slot_add"
|
||||
bl_label = "Add new Slot"
|
||||
bl_options = {'REGISTER'}
|
||||
|
||||
@classmethod
|
||||
def poll(self, context):
|
||||
return len(bpy.context.object.material_slots) > 0
|
||||
|
||||
def execute(self, context):
|
||||
mesh = context.object.data
|
||||
# Check if there is already a data for this slot_idx + material
|
||||
|
||||
found = False
|
||||
for i in mesh.gltf2_variant_mesh_data:
|
||||
if i.material_slot_index == context.object.active_material_index and i.material == context.object.material_slots[
|
||||
context.object.active_material_index].material:
|
||||
found = True
|
||||
variant_primitive = i
|
||||
|
||||
if found is False:
|
||||
variant_primitive = mesh.gltf2_variant_mesh_data.add()
|
||||
variant_primitive.material_slot_index = context.object.active_material_index
|
||||
variant_primitive.material = context.object.material_slots[context.object.active_material_index].material
|
||||
|
||||
vari = variant_primitive.variants.add()
|
||||
vari.variant.variant_idx = bpy.data.scenes[0].gltf2_active_variant
|
||||
|
||||
return {'FINISHED'}
|
||||
|
||||
|
||||
class SCENE_OT_gltf2_material_to_variant(bpy.types.Operator):
|
||||
"""Assign Variant to Slot"""
|
||||
bl_idname = "scene.gltf2_material_to_variant"
|
||||
bl_label = "Assign Material To Variant"
|
||||
bl_options = {'REGISTER'}
|
||||
|
||||
@classmethod
|
||||
def poll(self, context):
|
||||
return len(bpy.context.object.material_slots) > 0 and context.object.data.gltf2_variant_pointer != ""
|
||||
|
||||
def execute(self, context):
|
||||
mesh = context.object.data
|
||||
|
||||
found = False
|
||||
for i in mesh.gltf2_variant_mesh_data:
|
||||
if i.material_slot_index == context.object.active_material_index and i.material == context.object.material_slots[
|
||||
context.object.active_material_index].material:
|
||||
found = True
|
||||
variant_primitive = i
|
||||
|
||||
if found is False:
|
||||
return {'CANCELLED'}
|
||||
|
||||
vari = variant_primitive.variants[variant_primitive.active_variant_idx]
|
||||
|
||||
# Retrieve variant idx
|
||||
found = False
|
||||
for v in bpy.data.scenes[0].gltf2_KHR_materials_variants_variants:
|
||||
if v.name == context.object.data.gltf2_variant_pointer:
|
||||
found = True
|
||||
break
|
||||
|
||||
if found is False:
|
||||
return {'CANCELLED'}
|
||||
|
||||
vari.variant.variant_idx = v.variant_idx
|
||||
|
||||
return {'FINISHED'}
|
||||
|
||||
|
||||
class SCENE_OT_gltf2_remove_material_variant(bpy.types.Operator):
|
||||
"""Remove a variant Slot"""
|
||||
bl_idname = "scene.gltf2_remove_material_variant"
|
||||
bl_label = "Remove a variant Slot"
|
||||
bl_options = {'REGISTER'}
|
||||
|
||||
@classmethod
|
||||
def poll(self, context):
|
||||
return len(bpy.context.object.material_slots) > 0 and len(bpy.context.object.data.gltf2_variant_mesh_data) > 0
|
||||
|
||||
def execute(self, context):
|
||||
mesh = context.object.data
|
||||
|
||||
found = False
|
||||
found_idx = -1
|
||||
for idx, i in enumerate(mesh.gltf2_variant_mesh_data):
|
||||
if i.material_slot_index == context.object.active_material_index and i.material == context.object.material_slots[
|
||||
context.object.active_material_index].material:
|
||||
found = True
|
||||
variant_primitive = i
|
||||
found_idx = idx
|
||||
|
||||
if found is False:
|
||||
return {'CANCELLED'}
|
||||
|
||||
variant_primitive.variants.remove(variant_primitive.active_variant_idx)
|
||||
|
||||
if len(variant_primitive.variants) == 0:
|
||||
mesh.gltf2_variant_mesh_data.remove(found_idx)
|
||||
|
||||
return {'FINISHED'}
|
||||
|
||||
|
||||
################ glTF Animation ###########################################
|
||||
|
||||
class gltf2_animation_NLATrackNames(bpy.types.PropertyGroup):
|
||||
name: bpy.props.StringProperty(name="NLA Track Name")
|
||||
|
||||
|
||||
class SCENE_UL_gltf2_animation_track(bpy.types.UIList):
|
||||
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
|
||||
|
||||
if self.layout_type in {'DEFAULT', 'COMPACT'}:
|
||||
row = layout.row()
|
||||
icon = 'SOLO_ON' if index == bpy.data.scenes[0].gltf2_animation_applied else 'SOLO_OFF'
|
||||
row.prop(item, "name", text="", emboss=False)
|
||||
op = row.operator("scene.gltf2_animation_apply", text='', icon=icon)
|
||||
op.index = index
|
||||
|
||||
elif self.layout_type in {'GRID'}:
|
||||
layout.alignment = 'CENTER'
|
||||
|
||||
|
||||
class SCENE_OT_gltf2_animation_apply(bpy.types.Operator):
|
||||
"""Apply glTF animations"""
|
||||
bl_idname = "scene.gltf2_animation_apply"
|
||||
bl_label = "Apply glTF animation"
|
||||
bl_options = {'REGISTER'}
|
||||
|
||||
index: bpy.props.IntProperty()
|
||||
|
||||
@classmethod
|
||||
def poll(self, context):
|
||||
return True
|
||||
|
||||
def execute(self, context):
|
||||
|
||||
track_name = bpy.data.scenes[0].gltf2_animation_tracks[self.index].name
|
||||
|
||||
# remove all actions from objects
|
||||
for obj in bpy.context.scene.objects:
|
||||
if obj.animation_data:
|
||||
obj.animation_data.action = None
|
||||
obj.matrix_world = obj.gltf2_animation_rest
|
||||
|
||||
for track in [track for track in obj.animation_data.nla_tracks if track.name ==
|
||||
track_name and len(track.strips) > 0 and track.strips[0].action is not None]:
|
||||
obj.animation_data.action = track.strips[0].action
|
||||
|
||||
if obj.type == "MESH" and obj.data and obj.data.shape_keys and obj.data.shape_keys.animation_data:
|
||||
obj.data.shape_keys.animation_data.action = None
|
||||
for idx, data in enumerate(obj.gltf2_animation_weight_rest):
|
||||
obj.data.shape_keys.key_blocks[idx + 1].value = data.val
|
||||
|
||||
for track in [track for track in obj.data.shape_keys.animation_data.nla_tracks if track.name ==
|
||||
track_name and len(track.strips) > 0 and track.strips[0].action is not None]:
|
||||
obj.data.shape_keys.animation_data.action = track.strips[0].action
|
||||
|
||||
if obj.type in ["LIGHT", "CAMERA"] and obj.data and obj.data.animation_data:
|
||||
obj.data.animation_data.action = None
|
||||
for track in [track for track in obj.data.animation_data.nla_tracks if track.name ==
|
||||
track_name and len(track.strips) > 0 and track.strips[0].action is not None]:
|
||||
obj.data.animation_data.action = track.strips[0].action
|
||||
|
||||
for mat in bpy.data.materials:
|
||||
if not mat.node_tree:
|
||||
continue
|
||||
if mat.node_tree.animation_data:
|
||||
mat.node_tree.animation_data.action = None
|
||||
for track in [track for track in mat.node_tree.animation_data.nla_tracks if track.name ==
|
||||
track_name and len(track.strips) > 0 and track.strips[0].action is not None]:
|
||||
mat.node_tree.animation_data.action = track.strips[0].action
|
||||
|
||||
bpy.data.scenes[0].gltf2_animation_applied = self.index
|
||||
return {'FINISHED'}
|
||||
|
||||
|
||||
class SCENE_PT_gltf2_animation(bpy.types.Panel):
|
||||
bl_label = "glTF Animations"
|
||||
bl_space_type = 'DOPESHEET_EDITOR'
|
||||
bl_region_type = 'UI'
|
||||
bl_category = "glTF"
|
||||
|
||||
@classmethod
|
||||
def poll(self, context):
|
||||
return bpy.context.preferences.addons['io_scene_gltf2'].preferences.animation_ui is True
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
row = layout.row()
|
||||
|
||||
if len(bpy.data.scenes[0].gltf2_animation_tracks) > 0:
|
||||
row.template_list(
|
||||
"SCENE_UL_gltf2_animation_track",
|
||||
"",
|
||||
bpy.data.scenes[0],
|
||||
"gltf2_animation_tracks",
|
||||
bpy.data.scenes[0],
|
||||
"gltf2_animation_active")
|
||||
else:
|
||||
row.label(text="No glTF Animation")
|
||||
|
||||
|
||||
class GLTF2_weight(bpy.types.PropertyGroup):
|
||||
val: bpy.props.FloatProperty(name="weight")
|
||||
|
||||
################################### Filtering animation ####################
|
||||
|
||||
|
||||
class SCENE_OT_gltf2_action_filter_refresh(bpy.types.Operator):
|
||||
"""Refresh list of actions"""
|
||||
bl_idname = "scene.gltf2_action_filter_refresh"
|
||||
bl_label = "Refresh action list"
|
||||
bl_options = {'REGISTER'}
|
||||
|
||||
@classmethod
|
||||
def poll(self, context):
|
||||
return True
|
||||
|
||||
def execute(self, context):
|
||||
for action in bpy.data.actions:
|
||||
if id(action) in [id(i.action) for i in bpy.data.scenes[0].gltf_action_filter]:
|
||||
continue
|
||||
item = bpy.data.scenes[0].gltf_action_filter.add()
|
||||
item.action = action
|
||||
item.keep = True
|
||||
|
||||
return {'FINISHED'}
|
||||
|
||||
|
||||
class SCENE_UL_gltf2_filter_action(bpy.types.UIList):
|
||||
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
|
||||
|
||||
action = item.action
|
||||
layout.context_pointer_set("id", action)
|
||||
|
||||
if self.layout_type in {'DEFAULT', 'COMPACT'}:
|
||||
layout.prop(item.action, "name", text="", emboss=False)
|
||||
layout.prop(item, "keep", text="", emboss=True)
|
||||
|
||||
elif self.layout_type in {'GRID'}:
|
||||
layout.alignment = 'CENTER'
|
||||
|
||||
|
||||
def export_panel_animation_action_filter(layout, operator):
|
||||
if operator.export_animation_mode not in ["ACTIONS", "ACTIVE_ACTIONS", "BROADCAST"]:
|
||||
return
|
||||
|
||||
header, body = layout.panel("GLTF_export_action_filter", default_closed=True)
|
||||
header.use_property_split = False
|
||||
header.prop(operator, "export_action_filter", text="")
|
||||
header.label(text="Action Filter")
|
||||
if body and operator.export_action_filter:
|
||||
body.active = operator.export_animations and operator.export_action_filter
|
||||
|
||||
row = body.row()
|
||||
|
||||
if len(bpy.data.actions) > 0:
|
||||
row.template_list(
|
||||
"SCENE_UL_gltf2_filter_action",
|
||||
"",
|
||||
bpy.data.scenes[0],
|
||||
"gltf_action_filter",
|
||||
bpy.data.scenes[0],
|
||||
"gltf_action_filter_active")
|
||||
col = row.column()
|
||||
row = col.column(align=True)
|
||||
row.operator("scene.gltf2_action_filter_refresh", icon="FILE_REFRESH", text="")
|
||||
else:
|
||||
row.label(text="No Actions in .blend file")
|
||||
|
||||
###############################################################################
|
||||
|
||||
|
||||
def register():
|
||||
bpy.utils.register_class(NODE_OT_GLTF_SETTINGS)
|
||||
bpy.types.NODE_MT_category_shader_output.append(add_gltf_settings_to_menu)
|
||||
bpy.utils.register_class(SCENE_OT_gltf2_action_filter_refresh)
|
||||
bpy.utils.register_class(SCENE_UL_gltf2_filter_action)
|
||||
|
||||
|
||||
def variant_register():
|
||||
bpy.utils.register_class(SCENE_OT_gltf2_display_variant)
|
||||
bpy.utils.register_class(SCENE_OT_gltf2_assign_to_variant)
|
||||
bpy.utils.register_class(SCENE_OT_gltf2_reset_to_original)
|
||||
bpy.utils.register_class(SCENE_OT_gltf2_assign_as_original)
|
||||
bpy.utils.register_class(SCENE_OT_gltf2_remove_material_variant)
|
||||
bpy.utils.register_class(gltf2_KHR_materials_variants_variant)
|
||||
bpy.utils.register_class(gltf2_KHR_materials_variant_pointer)
|
||||
bpy.utils.register_class(gltf2_KHR_materials_variants_primitive)
|
||||
bpy.utils.register_class(gltf2_KHR_materials_variants_default_material)
|
||||
bpy.utils.register_class(SCENE_UL_gltf2_variants)
|
||||
bpy.utils.register_class(SCENE_PT_gltf2_variants)
|
||||
bpy.utils.register_class(MESH_UL_gltf2_mesh_variants)
|
||||
bpy.utils.register_class(MESH_PT_gltf2_mesh_variants)
|
||||
bpy.utils.register_class(SCENE_OT_gltf2_variant_add)
|
||||
bpy.utils.register_class(SCENE_OT_gltf2_variant_remove)
|
||||
bpy.utils.register_class(SCENE_OT_gltf2_material_to_variant)
|
||||
bpy.utils.register_class(SCENE_OT_gltf2_variant_slot_add)
|
||||
bpy.types.Mesh.gltf2_variant_mesh_data = bpy.props.CollectionProperty(type=gltf2_KHR_materials_variants_primitive)
|
||||
bpy.types.Mesh.gltf2_variant_default_materials = bpy.props.CollectionProperty(
|
||||
type=gltf2_KHR_materials_variants_default_material)
|
||||
bpy.types.Mesh.gltf2_variant_pointer = bpy.props.StringProperty()
|
||||
bpy.types.Scene.gltf2_KHR_materials_variants_variants = bpy.props.CollectionProperty(
|
||||
type=gltf2_KHR_materials_variants_variant)
|
||||
bpy.types.Scene.gltf2_active_variant = bpy.props.IntProperty()
|
||||
|
||||
|
||||
def unregister():
|
||||
bpy.utils.unregister_class(NODE_OT_GLTF_SETTINGS)
|
||||
bpy.utils.unregister_class(SCENE_UL_gltf2_filter_action)
|
||||
bpy.utils.unregister_class(SCENE_OT_gltf2_action_filter_refresh)
|
||||
|
||||
|
||||
def variant_unregister():
|
||||
bpy.utils.unregister_class(SCENE_OT_gltf2_variant_add)
|
||||
bpy.utils.unregister_class(SCENE_OT_gltf2_variant_remove)
|
||||
bpy.utils.unregister_class(SCENE_OT_gltf2_material_to_variant)
|
||||
bpy.utils.unregister_class(SCENE_OT_gltf2_variant_slot_add)
|
||||
bpy.utils.unregister_class(SCENE_OT_gltf2_display_variant)
|
||||
bpy.utils.unregister_class(SCENE_OT_gltf2_assign_to_variant)
|
||||
bpy.utils.unregister_class(SCENE_OT_gltf2_reset_to_original)
|
||||
bpy.utils.unregister_class(SCENE_OT_gltf2_assign_as_original)
|
||||
bpy.utils.unregister_class(SCENE_OT_gltf2_remove_material_variant)
|
||||
bpy.utils.unregister_class(SCENE_PT_gltf2_variants)
|
||||
bpy.utils.unregister_class(SCENE_UL_gltf2_variants)
|
||||
bpy.utils.unregister_class(MESH_PT_gltf2_mesh_variants)
|
||||
bpy.utils.unregister_class(MESH_UL_gltf2_mesh_variants)
|
||||
bpy.utils.unregister_class(gltf2_KHR_materials_variants_default_material)
|
||||
bpy.utils.unregister_class(gltf2_KHR_materials_variants_primitive)
|
||||
bpy.utils.unregister_class(gltf2_KHR_materials_variants_variant)
|
||||
bpy.utils.unregister_class(gltf2_KHR_materials_variant_pointer)
|
||||
|
||||
|
||||
def anim_ui_register():
|
||||
bpy.utils.register_class(GLTF2_weight)
|
||||
bpy.utils.register_class(SCENE_OT_gltf2_animation_apply)
|
||||
bpy.utils.register_class(gltf2_animation_NLATrackNames)
|
||||
bpy.utils.register_class(SCENE_UL_gltf2_animation_track)
|
||||
bpy.types.Scene.gltf2_animation_tracks = bpy.props.CollectionProperty(type=gltf2_animation_NLATrackNames)
|
||||
bpy.types.Scene.gltf2_animation_active = bpy.props.IntProperty()
|
||||
bpy.types.Scene.gltf2_animation_applied = bpy.props.IntProperty()
|
||||
bpy.types.Object.gltf2_animation_rest = bpy.props.FloatVectorProperty(name="Rest", size=[4, 4], subtype="MATRIX")
|
||||
bpy.types.Object.gltf2_animation_weight_rest = bpy.props.CollectionProperty(type=GLTF2_weight)
|
||||
bpy.utils.register_class(SCENE_PT_gltf2_animation)
|
||||
|
||||
|
||||
def anim_ui_unregister():
|
||||
bpy.utils.unregister_class(SCENE_PT_gltf2_animation)
|
||||
del bpy.types.Scene.gltf2_animation_active
|
||||
del bpy.types.Scene.gltf2_animation_tracks
|
||||
del bpy.types.Scene.gltf2_animation_applied
|
||||
del bpy.types.Object.gltf2_animation_rest
|
||||
del bpy.types.Object.gltf2_animation_weight_rest
|
||||
bpy.utils.unregister_class(SCENE_UL_gltf2_animation_track)
|
||||
bpy.utils.unregister_class(gltf2_animation_NLATrackNames)
|
||||
bpy.utils.unregister_class(SCENE_OT_gltf2_animation_apply)
|
||||
bpy.utils.unregister_class(GLTF2_weight)
|
@ -0,0 +1,70 @@
|
||||
# SPDX-FileCopyrightText: 2018-2024 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
def fast_structured_np_unique(arr, *args, **kwargs):
|
||||
"""
|
||||
np.unique optimized for structured arrays when a sorted result is not required.
|
||||
|
||||
np.unique works through sorting, but sorting a structured array requires as many sorts as there are fields in the
|
||||
structured dtype.
|
||||
|
||||
By viewing the array as a single non-structured dtype that sorts according to its bytes, unique elements can be
|
||||
found with a single sort. Since the values are viewed as a different type to their original, this means that the
|
||||
returned array of unique values may not be sorted according to their original type.
|
||||
|
||||
Float field caveats:
|
||||
All elements of -0.0 in the input array will be replaced with 0.0 to ensure that both values are collapsed into one.
|
||||
NaN values can have lots of different byte representations (e.g. signalling/quiet and custom payloads). Only the
|
||||
duplicates of each unique byte representation will be collapsed into one.
|
||||
|
||||
Nested structured dtypes are not supported.
|
||||
The behaviour of structured dtypes with overlapping fields is undefined.
|
||||
"""
|
||||
structured_dtype = arr.dtype
|
||||
fields = structured_dtype.fields
|
||||
if fields is None:
|
||||
raise RuntimeError('%s is not a structured dtype' % structured_dtype)
|
||||
|
||||
for field_name, (field_dtype, *_offset_and_optional_title) in fields.items():
|
||||
if field_dtype.subdtype is not None:
|
||||
raise RuntimeError('Nested structured types are not supported in %s' % structured_dtype)
|
||||
if field_dtype.kind == 'f':
|
||||
# Replace all -0.0 in the array with 0.0 because -0.0 and 0.0 have different byte representations.
|
||||
arr[field_name][arr[field_name] == -0.0] = 0.0
|
||||
elif field_dtype.kind not in "iuUSV":
|
||||
# Signed integer, unsigned integer, unicode string, byte string (bytes) and raw bytes (void) can be left
|
||||
# as they are. Everything else is unsupported.
|
||||
raise RuntimeError('Unsupported structured field type %s for field %s' % (field_dtype, field_name))
|
||||
|
||||
structured_itemsize = structured_dtype.itemsize
|
||||
|
||||
# Integer types sort the fastest, but are only available for specific itemsizes.
|
||||
uint_dtypes_by_itemsize = {1: np.uint8, 2: np.uint16, 4: np.uint32, 8: np.uint64}
|
||||
# Signed/unsigned makes no noticeable speed difference, but using unsigned will result in ordering according to
|
||||
# individual bytes like the other, non-integer types.
|
||||
if structured_itemsize in uint_dtypes_by_itemsize:
|
||||
entire_structure_dtype = uint_dtypes_by_itemsize[structured_itemsize]
|
||||
else:
|
||||
# Construct a flexible size dtype with matching itemsize to the entire structured dtype.
|
||||
# Should always be 4 because each character in a unicode string is UCS4.
|
||||
str_itemsize = np.dtype((np.str_, 1)).itemsize
|
||||
if structured_itemsize % str_itemsize == 0:
|
||||
# Unicode strings seem to be slightly faster to sort than bytes.
|
||||
entire_structure_dtype = np.dtype((np.str_, structured_itemsize // str_itemsize))
|
||||
else:
|
||||
# Bytes seem to be slightly faster to sort than raw bytes (np.void).
|
||||
entire_structure_dtype = np.dtype((np.bytes_, structured_itemsize))
|
||||
|
||||
result = np.unique(arr.view(entire_structure_dtype), *args, **kwargs)
|
||||
|
||||
unique = result[0] if isinstance(result, tuple) else result
|
||||
# View in the original dtype.
|
||||
unique = unique.view(arr.dtype)
|
||||
if isinstance(result, tuple):
|
||||
return (unique,) + result[1:]
|
||||
else:
|
||||
return unique
|
55
scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_animation.py
Normal file
55
scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_animation.py
Normal file
@ -0,0 +1,55 @@
|
||||
# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
from .....io.com import gltf2_io
|
||||
from .....io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ....com.gltf2_blender_extras import generate_extras
|
||||
from .gltf2_blender_gather_fcurves_channels import gather_animation_fcurves_channels
|
||||
|
||||
|
||||
def gather_animation_fcurves(
|
||||
obj_uuid: str,
|
||||
blender_action: bpy.types.Action,
|
||||
export_settings
|
||||
):
|
||||
|
||||
name = __gather_name(blender_action, export_settings)
|
||||
|
||||
channels, to_be_sampled, extra_samplers = __gather_channels_fcurves(obj_uuid, blender_action, export_settings)
|
||||
|
||||
animation = gltf2_io.Animation(
|
||||
channels=channels,
|
||||
extensions=None,
|
||||
extras=__gather_extras(blender_action, export_settings),
|
||||
name=name,
|
||||
samplers=[]
|
||||
)
|
||||
|
||||
if not animation.channels:
|
||||
return None, to_be_sampled, extra_samplers
|
||||
|
||||
blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object
|
||||
export_user_extensions('animation_gather_fcurve', export_settings, blender_object, blender_action)
|
||||
|
||||
return animation, to_be_sampled, extra_samplers
|
||||
|
||||
|
||||
def __gather_name(blender_action: bpy.types.Action,
|
||||
export_settings
|
||||
) -> str:
|
||||
return blender_action.name
|
||||
|
||||
|
||||
def __gather_channels_fcurves(
|
||||
obj_uuid: str,
|
||||
blender_action: bpy.types.Action,
|
||||
export_settings):
|
||||
return gather_animation_fcurves_channels(obj_uuid, blender_action, export_settings)
|
||||
|
||||
|
||||
def __gather_extras(blender_action, export_settings):
|
||||
if export_settings['gltf_extras']:
|
||||
return generate_extras(blender_action)
|
||||
return None
|
53
scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_channel_target.py
Normal file
53
scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_channel_target.py
Normal file
@ -0,0 +1,53 @@
|
||||
# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
import typing
|
||||
from .....io.com import gltf2_io
|
||||
from .....io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ....com.gltf2_blender_conversion import get_target
|
||||
from ...gltf2_blender_gather_cache import cached
|
||||
from ...gltf2_blender_gather_joints import gather_joint_vnode
|
||||
|
||||
|
||||
@cached
|
||||
def gather_fcurve_channel_target(
|
||||
obj_uuid: str,
|
||||
channels: typing.Tuple[bpy.types.FCurve],
|
||||
bone: typing.Optional[str],
|
||||
export_settings
|
||||
) -> gltf2_io.AnimationChannelTarget:
|
||||
|
||||
animation_channel_target = gltf2_io.AnimationChannelTarget(
|
||||
extensions=None,
|
||||
extras=None,
|
||||
node=__gather_node(obj_uuid, bone, export_settings),
|
||||
path=__gather_path(channels, export_settings)
|
||||
)
|
||||
|
||||
blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object
|
||||
export_user_extensions('animation_gather_fcurve_channel_target', export_settings, blender_object, bone)
|
||||
|
||||
return animation_channel_target
|
||||
|
||||
|
||||
def __gather_node(obj_uuid: str,
|
||||
bone: typing.Union[str, None],
|
||||
export_settings
|
||||
) -> gltf2_io.Node:
|
||||
|
||||
if bone is not None:
|
||||
return gather_joint_vnode(export_settings['vtree'].nodes[obj_uuid].bones[bone], export_settings)
|
||||
else:
|
||||
return export_settings['vtree'].nodes[obj_uuid].node
|
||||
|
||||
|
||||
def __gather_path(channels: typing.Tuple[bpy.types.FCurve],
|
||||
export_settings
|
||||
) -> str:
|
||||
|
||||
# Note: channels has some None items only for SK if some SK are not animated, so keep a not None channel item
|
||||
target = [c for c in channels if c is not None][0].data_path.split('.')[-1]
|
||||
|
||||
return get_target(target)
|
377
scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_channels.py
Normal file
377
scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_channels.py
Normal file
@ -0,0 +1,377 @@
|
||||
# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
import typing
|
||||
from .....io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from .....blender.com.gltf2_blender_data_path import skip_sk
|
||||
from .....io.com import gltf2_io
|
||||
from ....exp.gltf2_blender_gather_cache import cached
|
||||
from ....com.gltf2_blender_data_path import get_target_object_path, get_target_property_name, get_rotation_modes
|
||||
from ....com.gltf2_blender_conversion import get_target, get_channel_from_target
|
||||
from ...gltf2_blender_get import get_object_from_datapath
|
||||
from .gltf2_blender_gather_fcurves_channel_target import gather_fcurve_channel_target
|
||||
from .gltf2_blender_gather_fcurves_sampler import gather_animation_fcurves_sampler
|
||||
|
||||
|
||||
@cached
|
||||
def gather_animation_fcurves_channels(
|
||||
obj_uuid: int,
|
||||
blender_action: bpy.types.Action,
|
||||
export_settings
|
||||
):
|
||||
|
||||
channels_to_perform, to_be_sampled, extra_channels_to_perform = get_channel_groups(
|
||||
obj_uuid, blender_action, export_settings)
|
||||
|
||||
custom_range = None
|
||||
if blender_action.use_frame_range:
|
||||
custom_range = (blender_action.frame_start, blender_action.frame_end)
|
||||
|
||||
channels = []
|
||||
extra_samplers = []
|
||||
|
||||
for chan in [chan for chan in channels_to_perform.values() if len(chan['properties']) != 0]:
|
||||
for channel_group in chan['properties'].values():
|
||||
channel = __gather_animation_fcurve_channel(
|
||||
chan['obj_uuid'], channel_group, chan['bone'], custom_range, export_settings)
|
||||
if channel is not None:
|
||||
channels.append(channel)
|
||||
|
||||
if export_settings['gltf_export_extra_animations']:
|
||||
for chan in [chan for chan in extra_channels_to_perform.values() if len(chan['properties']) != 0]:
|
||||
for channel_group_name, channel_group in chan['properties'].items():
|
||||
|
||||
# No glTF channel here, as we don't have any target
|
||||
# Trying to retrieve sampler directly
|
||||
sampler = __gather_sampler(obj_uuid, tuple(channel_group), None, custom_range, True, export_settings)
|
||||
if sampler is not None:
|
||||
extra_samplers.append((channel_group_name, sampler, "OBJECT", None))
|
||||
|
||||
return channels, to_be_sampled, extra_samplers
|
||||
|
||||
|
||||
def get_channel_groups(obj_uuid: str, blender_action: bpy.types.Action, export_settings, no_sample_option=False):
|
||||
# no_sample_option is used when we want to retrieve all SK channels, to be evaluate.
|
||||
targets = {}
|
||||
targets_extra = {}
|
||||
|
||||
blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object
|
||||
|
||||
# When mutliple rotation mode detected, keep the currently used
|
||||
multiple_rotation_mode_detected = {}
|
||||
|
||||
# When both normal and delta are used --> Set to to_be_sampled list
|
||||
to_be_sampled = [] # (object_uuid , type , prop, optional(bone.name) )
|
||||
|
||||
for fcurve in blender_action.fcurves:
|
||||
type_ = None
|
||||
# In some invalid files, channel hasn't any keyframes ... this channel need to be ignored
|
||||
if len(fcurve.keyframe_points) == 0:
|
||||
continue
|
||||
try:
|
||||
# example of target_property : location, rotation_quaternion, value
|
||||
target_property = get_target_property_name(fcurve.data_path)
|
||||
except:
|
||||
export_settings['log'].warning(
|
||||
"Invalid animation fcurve data path on action {}".format(
|
||||
blender_action.name))
|
||||
continue
|
||||
object_path = get_target_object_path(fcurve.data_path)
|
||||
|
||||
# find the object affected by this action
|
||||
# object_path : blank for blender_object itself, key_blocks["<name>"] for SK, pose.bones["<name>"] for bones
|
||||
if not object_path:
|
||||
if fcurve.data_path.startswith("["):
|
||||
target = blender_object
|
||||
type_ = "EXTRA"
|
||||
else:
|
||||
target = blender_object
|
||||
type_ = "OBJECT"
|
||||
else:
|
||||
try:
|
||||
target = get_object_from_datapath(blender_object, object_path)
|
||||
|
||||
if blender_object.type == "ARMATURE" and fcurve.data_path.startswith("pose.bones["):
|
||||
if target_property is not None:
|
||||
if get_target(target_property) is not None:
|
||||
type_ = "BONE"
|
||||
else:
|
||||
type_ = "EXTRA"
|
||||
else:
|
||||
type_ = "EXTRA"
|
||||
|
||||
else:
|
||||
type_ = "EXTRA"
|
||||
if blender_object.type == "MESH" and object_path.startswith("key_blocks"):
|
||||
shape_key = blender_object.data.shape_keys.path_resolve(object_path)
|
||||
if skip_sk(blender_object.data.shape_keys.key_blocks, shape_key):
|
||||
continue
|
||||
target = blender_object.data.shape_keys
|
||||
type_ = "SK"
|
||||
except ValueError as e:
|
||||
# if the object is a mesh and the action target path can not be resolved, we know that this is a morph
|
||||
# animation.
|
||||
if blender_object.type == "MESH":
|
||||
try:
|
||||
shape_key = blender_object.data.shape_keys.path_resolve(object_path)
|
||||
if skip_sk(blender_object.data.shape_keys.key_blocks, shape_key):
|
||||
continue
|
||||
target = blender_object.data.shape_keys
|
||||
type_ = "SK"
|
||||
except:
|
||||
# Something is wrong, for example a bone animation is linked to an object mesh...
|
||||
export_settings['log'].warning(
|
||||
"Invalid animation fcurve data path on action {}".format(
|
||||
blender_action.name))
|
||||
continue
|
||||
else:
|
||||
export_settings['log'].warning("Animation target {} not found".format(object_path))
|
||||
continue
|
||||
|
||||
# Detect that object or bone are not multiple keyed for euler and quaternion
|
||||
# Keep only the current rotation mode used by object
|
||||
rotation, rotation_modes = get_rotation_modes(target_property)
|
||||
if rotation and target.rotation_mode not in rotation_modes:
|
||||
multiple_rotation_mode_detected[target] = True
|
||||
continue
|
||||
|
||||
if type_ == "EXTRA":
|
||||
# No group by property, because we are going to export fcurve separately
|
||||
# We are going to evaluate fcurve, so no check if need to be sampled
|
||||
if target_property is None:
|
||||
target_property = fcurve.data_path
|
||||
if not target_property.startswith("pose.bones["):
|
||||
target_property = fcurve.data_path
|
||||
target_data = targets_extra.get(target, {})
|
||||
target_data['type'] = type_
|
||||
target_data['bone'] = target.name
|
||||
target_data['obj_uuid'] = obj_uuid
|
||||
target_properties = target_data.get('properties', {})
|
||||
channels = target_properties.get(target_property, [])
|
||||
channels.append(fcurve)
|
||||
target_properties[target_property] = channels
|
||||
target_data['properties'] = target_properties
|
||||
targets_extra[target] = target_data
|
||||
continue
|
||||
|
||||
# group channels by target object and affected property of the target
|
||||
target_data = targets.get(target, {})
|
||||
target_data['type'] = type_
|
||||
target_data['obj_uuid'] = obj_uuid
|
||||
target_data['bone'] = target.name if type_ == "BONE" else None
|
||||
|
||||
target_properties = target_data.get('properties', {})
|
||||
channels = target_properties.get(target_property, [])
|
||||
channels.append(fcurve)
|
||||
target_properties[target_property] = channels
|
||||
target_data['properties'] = target_properties
|
||||
targets[target] = target_data
|
||||
|
||||
for targ in multiple_rotation_mode_detected.keys():
|
||||
export_settings['log'].warning("Multiple rotation mode detected for {}".format(targ.name))
|
||||
|
||||
# Now that all curves are extracted,
|
||||
# - check that there is no normal + delta transforms
|
||||
# - check that each group can be exported not sampled
|
||||
# - be sure that shapekeys curves are correctly sorted
|
||||
|
||||
for obj, target_data in targets.items():
|
||||
properties = target_data['properties'].keys()
|
||||
properties = [get_target(prop) for prop in properties]
|
||||
if len(properties) != len(set(properties)):
|
||||
new_properties = {}
|
||||
# There are some transformation + delta transformation
|
||||
# We can't use fcurve, so the property will be sampled
|
||||
for prop in target_data['properties'].keys():
|
||||
if len([get_target(p) for p in target_data['properties'] if get_target(p) == get_target(prop)]) > 1:
|
||||
# normal + delta
|
||||
to_be_sampled.append((obj_uuid, target_data['type'], get_channel_from_target(
|
||||
get_target(prop)), None)) # None, because no delta exists on Bones
|
||||
else:
|
||||
new_properties[prop] = target_data['properties'][prop]
|
||||
|
||||
target_data['properties'] = new_properties
|
||||
|
||||
# Check if the property can be exported without sampling
|
||||
new_properties = {}
|
||||
for prop in target_data['properties'].keys():
|
||||
if no_sample_option is False and needs_baking(
|
||||
obj_uuid, target_data['properties'][prop], export_settings) is True:
|
||||
to_be_sampled.append((obj_uuid, target_data['type'], get_channel_from_target(
|
||||
get_target(prop)), target_data['bone'])) # bone can be None if not a bone :)
|
||||
else:
|
||||
new_properties[prop] = target_data['properties'][prop]
|
||||
|
||||
target_data['properties'] = new_properties
|
||||
|
||||
# Make sure sort is correct for shapekeys
|
||||
if target_data['type'] == "SK":
|
||||
for prop in target_data['properties'].keys():
|
||||
target_data['properties'][prop] = tuple(
|
||||
__get_channel_group_sorted(
|
||||
target_data['properties'][prop],
|
||||
export_settings['vtree'].nodes[obj_uuid].blender_object))
|
||||
else:
|
||||
for prop in target_data['properties'].keys():
|
||||
target_data['properties'][prop] = tuple(target_data['properties'][prop])
|
||||
|
||||
to_be_sampled = list(set(to_be_sampled))
|
||||
|
||||
return targets, to_be_sampled, targets_extra
|
||||
|
||||
|
||||
def __get_channel_group_sorted(channels: typing.Tuple[bpy.types.FCurve], blender_object: bpy.types.Object):
|
||||
# if this is shapekey animation, we need to sort in same order than shapekeys
|
||||
# else, no need to sort
|
||||
if blender_object.type == "MESH":
|
||||
first_channel = channels[0]
|
||||
object_path = get_target_object_path(first_channel.data_path)
|
||||
if object_path:
|
||||
if not blender_object.data.shape_keys:
|
||||
# Something is wrong. Maybe the user assigned an armature action
|
||||
# to a mesh object. Returning without sorting
|
||||
return channels
|
||||
|
||||
# This is shapekeys, we need to sort channels
|
||||
shapekeys_idx = {}
|
||||
cpt_sk = 0
|
||||
for sk in blender_object.data.shape_keys.key_blocks:
|
||||
if skip_sk(blender_object.data.shape_keys.key_blocks, sk):
|
||||
continue
|
||||
shapekeys_idx[sk.name] = cpt_sk
|
||||
cpt_sk += 1
|
||||
|
||||
# Note: channels will have some None items only for SK if some SK are not animated
|
||||
idx_channel_mapping = []
|
||||
all_sorted_channels = []
|
||||
for sk_c in channels:
|
||||
try:
|
||||
sk_name = blender_object.data.shape_keys.path_resolve(get_target_object_path(sk_c.data_path)).name
|
||||
idx = shapekeys_idx[sk_name]
|
||||
idx_channel_mapping.append((shapekeys_idx[sk_name], sk_c))
|
||||
except:
|
||||
# Something is wrong. For example, an armature action linked to a mesh object
|
||||
continue
|
||||
|
||||
existing_idx = dict(idx_channel_mapping)
|
||||
for i in range(0, cpt_sk):
|
||||
if i not in existing_idx.keys():
|
||||
all_sorted_channels.append(None)
|
||||
else:
|
||||
all_sorted_channels.append(existing_idx[i])
|
||||
|
||||
if all([i is None for i in all_sorted_channels]): # all channel in error, and some non keyed SK
|
||||
return channels # This happen when an armature action is linked to a mesh object with non keyed SK
|
||||
|
||||
return tuple(all_sorted_channels)
|
||||
|
||||
# if not shapekeys, stay in same order, because order doesn't matter
|
||||
return channels
|
||||
|
||||
|
||||
def __gather_animation_fcurve_channel(obj_uuid: str,
|
||||
channel_group: typing.Tuple[bpy.types.FCurve],
|
||||
bone: typing.Optional[str],
|
||||
custom_range: typing.Optional[set],
|
||||
export_settings
|
||||
) -> typing.Union[gltf2_io.AnimationChannel, None]:
|
||||
|
||||
__target = __gather_target(obj_uuid, channel_group, bone, export_settings)
|
||||
if __target.path is not None:
|
||||
sampler = __gather_sampler(obj_uuid, channel_group, bone, custom_range, False, export_settings)
|
||||
|
||||
if sampler is None:
|
||||
# After check, no need to animate this node for this channel
|
||||
return None
|
||||
|
||||
animation_channel = gltf2_io.AnimationChannel(
|
||||
extensions=None,
|
||||
extras=None,
|
||||
sampler=sampler,
|
||||
target=__target
|
||||
)
|
||||
|
||||
blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object
|
||||
export_user_extensions('animation_gather_fcurve_channel_target', export_settings, blender_object, bone)
|
||||
|
||||
return animation_channel
|
||||
return None
|
||||
|
||||
|
||||
def __gather_target(obj_uuid: str,
|
||||
channel_group: typing.Tuple[bpy.types.FCurve],
|
||||
bone: typing.Optional[str],
|
||||
export_settings
|
||||
) -> gltf2_io.AnimationChannelTarget:
|
||||
|
||||
return gather_fcurve_channel_target(obj_uuid, channel_group, bone, export_settings)
|
||||
|
||||
|
||||
def __gather_sampler(obj_uuid: str,
|
||||
channel_group: typing.Tuple[bpy.types.FCurve],
|
||||
bone: typing.Optional[str],
|
||||
custom_range: typing.Optional[set],
|
||||
extra_mode: bool,
|
||||
export_settings) -> gltf2_io.AnimationSampler:
|
||||
|
||||
return gather_animation_fcurves_sampler(obj_uuid, channel_group, bone, custom_range, extra_mode, export_settings)
|
||||
|
||||
|
||||
def needs_baking(obj_uuid: str,
|
||||
channels: typing.Tuple[bpy.types.FCurve],
|
||||
export_settings
|
||||
) -> bool:
|
||||
"""
|
||||
Check if baking is needed.
|
||||
|
||||
Some blender animations need to be baked as they can not directly be expressed in glTF.
|
||||
"""
|
||||
def all_equal(lst):
|
||||
return lst[1:] == lst[:-1]
|
||||
|
||||
# Note: channels has some None items only for SK if some SK are not animated
|
||||
# Sampling due to unsupported interpolation
|
||||
interpolation = [c for c in channels if c is not None][0].keyframe_points[0].interpolation
|
||||
if interpolation not in ["BEZIER", "LINEAR", "CONSTANT"]:
|
||||
export_settings['log'].warning(
|
||||
"Baking animation because of an unsupported interpolation method: {}".format(interpolation)
|
||||
)
|
||||
return True
|
||||
|
||||
if any(any(k.interpolation != interpolation for k in c.keyframe_points) for c in channels if c is not None):
|
||||
# There are different interpolation methods in one action group
|
||||
export_settings['log'].warning(
|
||||
"Baking animation because there are keyframes with different "
|
||||
"interpolation methods in one channel"
|
||||
)
|
||||
return True
|
||||
|
||||
if not all_equal([len(c.keyframe_points) for c in channels if c is not None]):
|
||||
export_settings['log'].warning(
|
||||
"Baking animation because the number of keyframes is not "
|
||||
"equal for all channel tracks"
|
||||
)
|
||||
return True
|
||||
|
||||
if len([c for c in channels if c is not None][0].keyframe_points) <= 1:
|
||||
# we need to bake to 'STEP', as at least two keyframes are required to interpolate
|
||||
return True
|
||||
|
||||
if not all_equal(list(zip([[k.co[0] for k in c.keyframe_points] for c in channels if c is not None]))):
|
||||
# The channels have differently located keyframes
|
||||
export_settings['log'].warning("Baking animation because of differently located keyframes in one channel")
|
||||
return True
|
||||
|
||||
if export_settings['vtree'].nodes[obj_uuid].blender_object.type == "ARMATURE":
|
||||
animation_target = get_object_from_datapath(
|
||||
export_settings['vtree'].nodes[obj_uuid].blender_object, [
|
||||
c for c in channels if c is not None][0].data_path)
|
||||
if isinstance(animation_target, bpy.types.PoseBone):
|
||||
if len(animation_target.constraints) != 0:
|
||||
# Constraints such as IK act on the bone -> can not be represented in glTF atm
|
||||
export_settings['log'].warning("Baking animation because of unsupported constraints acting on the bone")
|
||||
return True
|
||||
|
||||
return False
|
208
scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_keyframes.py
Normal file
208
scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_keyframes.py
Normal file
@ -0,0 +1,208 @@
|
||||
# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
import typing
|
||||
from .....blender.com.gltf2_blender_data_path import get_sk_exported
|
||||
from ....com.gltf2_blender_data_path import get_target_object_path
|
||||
from ...gltf2_blender_gather_cache import cached
|
||||
from ..gltf2_blender_gather_keyframes import Keyframe
|
||||
|
||||
|
||||
@cached
|
||||
def gather_fcurve_keyframes(
|
||||
obj_uuid: str,
|
||||
channel_group: typing.Tuple[bpy.types.FCurve],
|
||||
bone: typing.Optional[str],
|
||||
custom_range: typing.Optional[set],
|
||||
extra_mode: bool,
|
||||
export_settings):
|
||||
|
||||
keyframes = []
|
||||
|
||||
non_keyed_values = gather_non_keyed_values(obj_uuid, channel_group, bone, extra_mode, export_settings)
|
||||
|
||||
# Just use the keyframes as they are specified in blender
|
||||
# Note: channels has some None items only for SK if some SK are not animated
|
||||
frames = [keyframe.co[0] for keyframe in [c for c in channel_group if c is not None][0].keyframe_points]
|
||||
# some weird files have duplicate frame at same time, removed them
|
||||
frames = sorted(set(frames))
|
||||
|
||||
if export_settings['gltf_negative_frames'] == "CROP":
|
||||
frames = [f for f in frames if f >= 0]
|
||||
|
||||
if export_settings['gltf_frame_range'] is True:
|
||||
frames = [f for f in frames if f >= bpy.context.scene.frame_start and f <= bpy.context.scene.frame_end]
|
||||
|
||||
if custom_range is not None:
|
||||
frames = [f for f in frames if f >= custom_range[0] and f <= custom_range[1]]
|
||||
|
||||
if len(frames) == 0:
|
||||
return None
|
||||
|
||||
for i, frame in enumerate(frames):
|
||||
key = Keyframe(channel_group, frame, None)
|
||||
key.value = [c.evaluate(frame) for c in channel_group if c is not None]
|
||||
# Complete key with non keyed values, if needed
|
||||
if len([c for c in channel_group if c is not None]) != key.get_target_len():
|
||||
complete_key(key, non_keyed_values)
|
||||
|
||||
# compute tangents for cubic spline interpolation
|
||||
if [c for c in channel_group if c is not None][0].keyframe_points[0].interpolation == "BEZIER":
|
||||
# Construct the in tangent
|
||||
if frame == frames[0]:
|
||||
# start in-tangent should become all zero
|
||||
key.set_first_tangent()
|
||||
else:
|
||||
# otherwise construct an in tangent coordinate from the keyframes control points. We intermediately
|
||||
# use a point at t+1 to define the tangent. This allows the tangent control point to be transformed
|
||||
# normally, but only works for locally linear transformation. The more non-linear a transform, the
|
||||
# more imprecise this method is.
|
||||
# We could use any other (v1, t1) for which (v1 - v0) / (t1 - t0) equals the tangent. By using t+1
|
||||
# for both in and out tangents, we guarantee that (even if there are errors or numerical imprecisions)
|
||||
# symmetrical control points translate to symmetrical tangents.
|
||||
# Note: I am not sure that linearity is never broken with quaternions and their normalization.
|
||||
# Especially at sign swap it might occur that the value gets negated but the control point not.
|
||||
# I have however not once encountered an issue with this.
|
||||
key.in_tangent = [c.keyframe_points[i].co[1] +
|
||||
(c.keyframe_points[i].handle_left[1] -
|
||||
c.keyframe_points[i].co[1]) /
|
||||
(c.keyframe_points[i].handle_left[0] -
|
||||
c.keyframe_points[i].co[0]) for c in channel_group if c is not None]
|
||||
# Construct the out tangent
|
||||
if frame == frames[-1]:
|
||||
# end out-tangent should become all zero
|
||||
key.set_last_tangent()
|
||||
else:
|
||||
# otherwise construct an in tangent coordinate from the keyframes control points.
|
||||
# This happens the same way how in tangents are handled above.
|
||||
key.out_tangent = [c.keyframe_points[i].co[1] +
|
||||
(c.keyframe_points[i].handle_right[1] -
|
||||
c.keyframe_points[i].co[1]) /
|
||||
(c.keyframe_points[i].handle_right[0] -
|
||||
c.keyframe_points[i].co[0]) for c in channel_group if c is not None]
|
||||
|
||||
__complete_key_tangents(key, non_keyed_values)
|
||||
|
||||
keyframes.append(key)
|
||||
|
||||
return keyframes
|
||||
|
||||
|
||||
def gather_non_keyed_values(
|
||||
obj_uuid: str,
|
||||
channel_group: typing.Tuple[bpy.types.FCurve],
|
||||
bone: typing.Optional[str],
|
||||
extra_mode: bool,
|
||||
export_settings
|
||||
) -> typing.Tuple[typing.Optional[float]]:
|
||||
|
||||
if extra_mode is True:
|
||||
# No need to check if there are non non keyed values, as we export fcurve independently
|
||||
return [None]
|
||||
|
||||
blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object
|
||||
|
||||
non_keyed_values = []
|
||||
|
||||
# Note: channels has some None items only for SK if some SK are not animated
|
||||
if None not in channel_group:
|
||||
# classic case for object TRS or bone TRS
|
||||
# Or if all morph target are animated
|
||||
|
||||
target = channel_group[0].data_path.split('.')[-1]
|
||||
if target == "value":
|
||||
# All morph targets are animated
|
||||
return tuple([None] * len(channel_group))
|
||||
|
||||
indices = [c.array_index for c in channel_group]
|
||||
indices.sort()
|
||||
length = {
|
||||
"delta_location": 3,
|
||||
"delta_rotation_euler": 3,
|
||||
"delta_rotation_quaternion": 4,
|
||||
"delta_scale": 3,
|
||||
"location": 3,
|
||||
"rotation_axis_angle": 4,
|
||||
"rotation_euler": 3,
|
||||
"rotation_quaternion": 4,
|
||||
"scale": 3,
|
||||
"value": len(channel_group)
|
||||
}.get(target)
|
||||
|
||||
if length is None:
|
||||
# This is not a known target
|
||||
return ()
|
||||
|
||||
for i in range(0, length):
|
||||
if i in indices:
|
||||
non_keyed_values.append(None)
|
||||
else:
|
||||
if bone is None:
|
||||
non_keyed_values.append({
|
||||
"delta_location": blender_object.delta_location,
|
||||
"delta_rotation_euler": blender_object.delta_rotation_euler,
|
||||
"delta_rotation_quaternion": blender_object.delta_rotation_quaternion,
|
||||
"delta_scale": blender_object.delta_scale,
|
||||
"location": blender_object.location,
|
||||
"rotation_axis_angle": blender_object.rotation_axis_angle,
|
||||
"rotation_euler": blender_object.rotation_euler,
|
||||
"rotation_quaternion": blender_object.rotation_quaternion,
|
||||
"scale": blender_object.scale
|
||||
}[target][i])
|
||||
else:
|
||||
# TODO, this is not working if the action is not active (NLA case for example) ?
|
||||
trans, rot, scale = blender_object.pose.bones[bone].matrix_basis.decompose()
|
||||
non_keyed_values.append({
|
||||
"location": trans,
|
||||
"rotation_axis_angle": rot,
|
||||
"rotation_euler": rot,
|
||||
"rotation_quaternion": rot,
|
||||
"scale": scale
|
||||
}[target][i])
|
||||
|
||||
return tuple(non_keyed_values)
|
||||
|
||||
else:
|
||||
# We are in case of morph target, where all targets are not animated
|
||||
# So channels has some None items
|
||||
first_channel = [c for c in channel_group if c is not None][0]
|
||||
object_path = get_target_object_path(first_channel.data_path)
|
||||
if object_path:
|
||||
shapekeys_idx = {}
|
||||
cpt_sk = 0
|
||||
for sk in get_sk_exported(blender_object.data.shape_keys.key_blocks):
|
||||
shapekeys_idx[cpt_sk] = sk.name
|
||||
cpt_sk += 1
|
||||
|
||||
for idx_c, channel in enumerate(channel_group):
|
||||
if channel is None:
|
||||
non_keyed_values.append(blender_object.data.shape_keys.key_blocks[shapekeys_idx[idx_c]].value)
|
||||
else:
|
||||
non_keyed_values.append(None)
|
||||
|
||||
return tuple(non_keyed_values)
|
||||
|
||||
|
||||
def complete_key(key: Keyframe, non_keyed_values: typing.Tuple[typing.Optional[float]]):
|
||||
"""
|
||||
Complete keyframe with non keyed values
|
||||
"""
|
||||
for i in range(0, key.get_target_len()):
|
||||
if i in key.get_indices():
|
||||
continue # this is a keyed array_index or a SK animated
|
||||
key.set_value_index(i, non_keyed_values[i])
|
||||
|
||||
|
||||
def __complete_key_tangents(key: Keyframe, non_keyed_values: typing.Tuple[typing.Optional[float]]):
|
||||
"""
|
||||
Complete keyframe with non keyed values for tangents
|
||||
"""
|
||||
for i in range(0, key.get_target_len()):
|
||||
if i in key.get_indices():
|
||||
continue # this is a keyed array_index or a SK animated
|
||||
if key.in_tangent is not None:
|
||||
key.set_value_index_in(i, non_keyed_values[i])
|
||||
if key.out_tangent is not None:
|
||||
key.set_value_index_out(i, non_keyed_values[i])
|
231
scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_sampler.py
Normal file
231
scripts/addons_core/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_sampler.py
Normal file
@ -0,0 +1,231 @@
|
||||
# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
import typing
|
||||
import mathutils
|
||||
from .....io.com import gltf2_io
|
||||
from .....io.com import gltf2_io_constants
|
||||
from .....blender.com.gltf2_blender_conversion import get_gltf_interpolation
|
||||
from .....io.exp import gltf2_io_binary_data
|
||||
from .....io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ....com.gltf2_blender_data_path import get_target_property_name
|
||||
from ....com import gltf2_blender_math
|
||||
from ...gltf2_blender_gather_cache import cached
|
||||
from ...gltf2_blender_gather_accessors import gather_accessor
|
||||
from ...gltf2_blender_gather_tree import VExportNode
|
||||
from .gltf2_blender_gather_fcurves_keyframes import gather_fcurve_keyframes
|
||||
|
||||
|
||||
@cached
|
||||
def gather_animation_fcurves_sampler(
|
||||
obj_uuid: str,
|
||||
channel_group: typing.Tuple[bpy.types.FCurve],
|
||||
bone: typing.Optional[str],
|
||||
custom_range: typing.Optional[set],
|
||||
extra_mode: bool,
|
||||
export_settings
|
||||
) -> gltf2_io.AnimationSampler:
|
||||
|
||||
# matrix_parent_inverse needed for fcurves?
|
||||
|
||||
keyframes = __gather_keyframes(
|
||||
obj_uuid,
|
||||
channel_group,
|
||||
bone,
|
||||
custom_range,
|
||||
extra_mode,
|
||||
export_settings)
|
||||
|
||||
if keyframes is None:
|
||||
# After check, no need to animate this node for this channel
|
||||
return None
|
||||
|
||||
# Now we are raw input/output, we need to convert to glTF data
|
||||
input, output = __convert_keyframes(obj_uuid, channel_group, bone, keyframes, extra_mode, export_settings)
|
||||
|
||||
sampler = gltf2_io.AnimationSampler(
|
||||
extensions=None,
|
||||
extras=None,
|
||||
input=input,
|
||||
interpolation=__gather_interpolation(channel_group, export_settings),
|
||||
output=output
|
||||
)
|
||||
|
||||
blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object
|
||||
export_user_extensions('animation_gather_fcurve_channel_sampler', export_settings, blender_object, bone)
|
||||
|
||||
return sampler
|
||||
|
||||
|
||||
@cached
|
||||
def __gather_keyframes(
|
||||
obj_uuid: str,
|
||||
channel_group: typing.Tuple[bpy.types.FCurve],
|
||||
bone: typing.Optional[str],
|
||||
custom_range: typing.Optional[set],
|
||||
extra_mode: bool,
|
||||
export_settings
|
||||
):
|
||||
|
||||
return gather_fcurve_keyframes(obj_uuid, channel_group, bone, custom_range, extra_mode, export_settings)
|
||||
|
||||
|
||||
def __convert_keyframes(
|
||||
obj_uuid: str,
|
||||
channel_group: typing.Tuple[bpy.types.FCurve],
|
||||
bone_name: typing.Optional[str],
|
||||
keyframes,
|
||||
extra_mode: bool,
|
||||
export_settings):
|
||||
|
||||
times = [k.seconds for k in keyframes]
|
||||
input = gather_accessor(
|
||||
gltf2_io_binary_data.BinaryData.from_list(times, gltf2_io_constants.ComponentType.Float),
|
||||
gltf2_io_constants.ComponentType.Float,
|
||||
len(times),
|
||||
tuple([max(times)]),
|
||||
tuple([min(times)]),
|
||||
gltf2_io_constants.DataType.Scalar,
|
||||
export_settings)
|
||||
|
||||
is_yup = export_settings['gltf_yup']
|
||||
|
||||
need_rotation_correction = (
|
||||
export_settings['gltf_cameras'] and export_settings['vtree'].nodes[obj_uuid].blender_type == VExportNode.CAMERA) or (
|
||||
export_settings['gltf_lights'] and export_settings['vtree'].nodes[obj_uuid].blender_type == VExportNode.LIGHT)
|
||||
|
||||
target_datapath = [c for c in channel_group if c is not None][0].data_path
|
||||
|
||||
if bone_name is not None:
|
||||
bone = export_settings['vtree'].nodes[obj_uuid].blender_object.pose.bones[bone_name]
|
||||
if bone.parent is None:
|
||||
# bone at root of armature
|
||||
axis_basis_change = mathutils.Matrix.Identity(4)
|
||||
if is_yup:
|
||||
axis_basis_change = mathutils.Matrix(
|
||||
((1.0, 0.0, 0.0, 0.0),
|
||||
(0.0, 0.0, 1.0, 0.0),
|
||||
(0.0, -1.0, 0.0, 0.0),
|
||||
(0.0, 0.0, 0.0, 1.0)))
|
||||
correction_matrix_local = axis_basis_change @ bone.bone.matrix_local
|
||||
else:
|
||||
# Bone is not at root of armature
|
||||
# There are 2 cases :
|
||||
parent_uuid = export_settings['vtree'].nodes[export_settings['vtree']
|
||||
.nodes[obj_uuid].bones[bone_name]].parent_uuid
|
||||
if parent_uuid is not None and export_settings['vtree'].nodes[parent_uuid].blender_type == VExportNode.BONE:
|
||||
# export bone is not at root of armature neither
|
||||
blender_bone_parent = export_settings['vtree'].nodes[parent_uuid].blender_bone
|
||||
correction_matrix_local = (
|
||||
blender_bone_parent.bone.matrix_local.inverted_safe() @
|
||||
bone.bone.matrix_local
|
||||
)
|
||||
else:
|
||||
# exported bone (after filter) is at root of armature
|
||||
axis_basis_change = mathutils.Matrix.Identity(4)
|
||||
if is_yup:
|
||||
axis_basis_change = mathutils.Matrix(
|
||||
((1.0, 0.0, 0.0, 0.0),
|
||||
(0.0, 0.0, 1.0, 0.0),
|
||||
(0.0, -1.0, 0.0, 0.0),
|
||||
(0.0, 0.0, 0.0, 1.0)))
|
||||
correction_matrix_local = axis_basis_change
|
||||
|
||||
transform = correction_matrix_local
|
||||
|
||||
else:
|
||||
if export_settings['vtree'].nodes[obj_uuid].blender_object.parent is not None:
|
||||
matrix_parent_inverse = export_settings['vtree'].nodes[obj_uuid].blender_object.matrix_parent_inverse.copy(
|
||||
).freeze()
|
||||
else:
|
||||
matrix_parent_inverse = mathutils.Matrix.Identity(4).freeze()
|
||||
transform = matrix_parent_inverse
|
||||
|
||||
values = []
|
||||
fps = (bpy.context.scene.render.fps * bpy.context.scene.render.fps_base)
|
||||
for keyframe in keyframes:
|
||||
|
||||
if extra_mode is True:
|
||||
# Export as is, without trying to convert
|
||||
keyframe_value = keyframe.value
|
||||
if keyframe.in_tangent is not None:
|
||||
keyframe_value = keyframe.in_tangent + keyframe_value
|
||||
if keyframe.out_tangent is not None:
|
||||
keyframe_value = keyframe_value + keyframe.out_tangent
|
||||
values += keyframe_value
|
||||
continue
|
||||
|
||||
# Transform the data and build gltf control points
|
||||
value = gltf2_blender_math.transform(keyframe.value, target_datapath, transform, need_rotation_correction)
|
||||
if is_yup and bone_name is None:
|
||||
value = gltf2_blender_math.swizzle_yup(value, target_datapath)
|
||||
keyframe_value = gltf2_blender_math.mathutils_to_gltf(value)
|
||||
|
||||
if keyframe.in_tangent is not None:
|
||||
# we can directly transform the tangent as it currently is represented by a control point
|
||||
in_tangent = gltf2_blender_math.transform(
|
||||
keyframe.in_tangent, target_datapath, transform, need_rotation_correction)
|
||||
if is_yup and bone_name is None:
|
||||
in_tangent = gltf2_blender_math.swizzle_yup(in_tangent, target_datapath)
|
||||
# the tangent in glTF is relative to the keyframe value and uses seconds
|
||||
if not isinstance(value, list):
|
||||
in_tangent = fps * (in_tangent - value)
|
||||
else:
|
||||
in_tangent = [fps * (in_tangent[i] - value[i]) for i in range(len(value))]
|
||||
keyframe_value = gltf2_blender_math.mathutils_to_gltf(in_tangent) + keyframe_value # append
|
||||
|
||||
if keyframe.out_tangent is not None:
|
||||
# we can directly transform the tangent as it currently is represented by a control point
|
||||
out_tangent = gltf2_blender_math.transform(
|
||||
keyframe.out_tangent, target_datapath, transform, need_rotation_correction)
|
||||
if is_yup and bone_name is None:
|
||||
out_tangent = gltf2_blender_math.swizzle_yup(out_tangent, target_datapath)
|
||||
# the tangent in glTF is relative to the keyframe value and uses seconds
|
||||
if not isinstance(value, list):
|
||||
out_tangent = fps * (out_tangent - value)
|
||||
else:
|
||||
out_tangent = [fps * (out_tangent[i] - value[i]) for i in range(len(value))]
|
||||
keyframe_value = keyframe_value + gltf2_blender_math.mathutils_to_gltf(out_tangent) # append
|
||||
|
||||
values += keyframe_value
|
||||
|
||||
# store the keyframe data in a binary buffer
|
||||
component_type = gltf2_io_constants.ComponentType.Float
|
||||
if get_target_property_name(target_datapath) == "value":
|
||||
# channels with 'weight' targets must have scalar accessors
|
||||
data_type = gltf2_io_constants.DataType.Scalar
|
||||
else:
|
||||
data_type = gltf2_io_constants.DataType.vec_type_from_num(len(keyframes[0].value))
|
||||
|
||||
output = gltf2_io.Accessor(
|
||||
buffer_view=gltf2_io_binary_data.BinaryData.from_list(values, component_type),
|
||||
byte_offset=None,
|
||||
component_type=component_type,
|
||||
count=len(values) // gltf2_io_constants.DataType.num_elements(data_type),
|
||||
extensions=None,
|
||||
extras=None,
|
||||
max=None,
|
||||
min=None,
|
||||
name=None,
|
||||
normalized=None,
|
||||
sparse=None,
|
||||
type=data_type
|
||||
)
|
||||
|
||||
return input, output
|
||||
|
||||
|
||||
def __gather_interpolation(
|
||||
channel_group: typing.Tuple[bpy.types.FCurve],
|
||||
export_settings,
|
||||
) -> str:
|
||||
|
||||
# Note: channels has some None items only for SK if some SK are not animated
|
||||
# Non-sampled keyframes implies that all keys are of the same type, and that the
|
||||
# type is supported by glTF (because we checked in needs_baking).
|
||||
blender_keyframe = [c for c in channel_group if c is not None][0].keyframe_points[0]
|
||||
|
||||
# Select the interpolation method.
|
||||
return get_gltf_interpolation(blender_keyframe.interpolation)
|
740
scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_action.py
Normal file
740
scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_action.py
Normal file
@ -0,0 +1,740 @@
|
||||
# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
import typing
|
||||
from ....io.com import gltf2_io
|
||||
from ....io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ....blender.com.gltf2_blender_conversion import get_gltf_interpolation
|
||||
from ...com.gltf2_blender_data_path import is_bone_anim_channel
|
||||
from ...com.gltf2_blender_extras import generate_extras
|
||||
from ..gltf2_blender_gather_cache import cached
|
||||
from ..gltf2_blender_gather_tree import VExportNode
|
||||
from .fcurves.gltf2_blender_gather_fcurves_animation import gather_animation_fcurves
|
||||
from .sampled.armature.armature_action_sampled import gather_action_armature_sampled
|
||||
from .sampled.armature.armature_channels import gather_sampled_bone_channel
|
||||
from .sampled.object.gltf2_blender_gather_object_action_sampled import gather_action_object_sampled
|
||||
from .sampled.shapekeys.gltf2_blender_gather_sk_action_sampled import gather_action_sk_sampled
|
||||
from .sampled.object.gltf2_blender_gather_object_channels import gather_object_sampled_channels, gather_sampled_object_channel
|
||||
from .sampled.shapekeys.gltf2_blender_gather_sk_channels import gather_sampled_sk_channel
|
||||
from .gltf2_blender_gather_drivers import get_sk_drivers
|
||||
from .gltf2_blender_gather_animation_utils import reset_bone_matrix, reset_sk_data, link_samplers, add_slide_data, merge_tracks_perform, bake_animation
|
||||
|
||||
|
||||
def gather_actions_animations(export_settings):
|
||||
|
||||
prepare_actions_range(export_settings)
|
||||
|
||||
animations = []
|
||||
merged_tracks = {}
|
||||
|
||||
vtree = export_settings['vtree']
|
||||
for obj_uuid in vtree.get_all_objects():
|
||||
|
||||
# Do not manage not exported objects
|
||||
if vtree.nodes[obj_uuid].node is None:
|
||||
if export_settings["gltf_armature_object_remove"] is True:
|
||||
# Manage armature object, as this is the object that has the animation
|
||||
if not vtree.nodes[obj_uuid].blender_object:
|
||||
continue
|
||||
else:
|
||||
continue
|
||||
|
||||
if export_settings['vtree'].nodes[obj_uuid].blender_type == VExportNode.COLLECTION:
|
||||
continue
|
||||
|
||||
animations_, merged_tracks = gather_action_animations(obj_uuid, merged_tracks, len(animations), export_settings)
|
||||
animations += animations_
|
||||
|
||||
if export_settings['gltf_animation_mode'] == "ACTIVE_ACTIONS":
|
||||
# Fake an animation with all animations of the scene
|
||||
merged_tracks = {}
|
||||
merged_tracks_name = 'Animation'
|
||||
if(len(export_settings['gltf_nla_strips_merged_animation_name']) > 0):
|
||||
merged_tracks_name = export_settings['gltf_nla_strips_merged_animation_name']
|
||||
merged_tracks[merged_tracks_name] = []
|
||||
for idx, animation in enumerate(animations):
|
||||
merged_tracks[merged_tracks_name].append(idx)
|
||||
|
||||
new_animations = merge_tracks_perform(merged_tracks, animations, export_settings)
|
||||
|
||||
return new_animations
|
||||
|
||||
|
||||
def prepare_actions_range(export_settings):
|
||||
|
||||
track_slide = {}
|
||||
|
||||
vtree = export_settings['vtree']
|
||||
for obj_uuid in vtree.get_all_objects():
|
||||
|
||||
if vtree.nodes[obj_uuid].blender_type == VExportNode.COLLECTION:
|
||||
continue
|
||||
|
||||
# Do not manage not exported objects
|
||||
if vtree.nodes[obj_uuid].node is None:
|
||||
if export_settings["gltf_armature_object_remove"] is True:
|
||||
# Manage armature object, as this is the object that has the animation
|
||||
if not vtree.nodes[obj_uuid].blender_object:
|
||||
continue
|
||||
else:
|
||||
continue
|
||||
|
||||
if obj_uuid not in export_settings['ranges']:
|
||||
export_settings['ranges'][obj_uuid] = {}
|
||||
|
||||
blender_actions = __get_blender_actions(obj_uuid, export_settings)
|
||||
for blender_action, track, type_ in blender_actions:
|
||||
|
||||
# What about frame_range bug for single keyframe animations ? 107030
|
||||
start_frame = int(blender_action.frame_range[0])
|
||||
end_frame = int(blender_action.frame_range[1])
|
||||
|
||||
if end_frame - start_frame == 1:
|
||||
# To workaround Blender bug 107030, check manually
|
||||
try: # Avoid crash in case of strange/buggy fcurves
|
||||
start_frame = int(min([c.range()[0] for c in blender_action.fcurves]))
|
||||
end_frame = int(max([c.range()[1] for c in blender_action.fcurves]))
|
||||
except:
|
||||
pass
|
||||
|
||||
export_settings['ranges'][obj_uuid][blender_action.name] = {}
|
||||
|
||||
# If some negative frame and crop -> set start at 0
|
||||
if start_frame < 0 and export_settings['gltf_negative_frames'] == "CROP":
|
||||
start_frame = 0
|
||||
|
||||
if export_settings['gltf_frame_range'] is True:
|
||||
start_frame = max(bpy.context.scene.frame_start, start_frame)
|
||||
end_frame = min(bpy.context.scene.frame_end, end_frame)
|
||||
|
||||
export_settings['ranges'][obj_uuid][blender_action.name]['start'] = start_frame
|
||||
export_settings['ranges'][obj_uuid][blender_action.name]['end'] = end_frame
|
||||
|
||||
if export_settings['gltf_negative_frames'] == "SLIDE":
|
||||
if track is not None:
|
||||
if not (track.startswith("NlaTrack") or track.startswith("[Action Stash]")):
|
||||
if track not in track_slide.keys() or (
|
||||
track in track_slide.keys() and start_frame < track_slide[track]):
|
||||
track_slide.update({track: start_frame})
|
||||
else:
|
||||
if start_frame < 0:
|
||||
add_slide_data(start_frame, obj_uuid, blender_action.name, export_settings)
|
||||
else:
|
||||
if export_settings['gltf_animation_mode'] == "ACTIVE_ACTIONS":
|
||||
if None not in track_slide.keys() or (
|
||||
None in track_slide.keys() and start_frame < track_slide[None]):
|
||||
track_slide.update({None: start_frame})
|
||||
else:
|
||||
if start_frame < 0:
|
||||
add_slide_data(start_frame, obj_uuid, blender_action.name, export_settings)
|
||||
|
||||
if export_settings['gltf_anim_slide_to_zero'] is True and start_frame > 0:
|
||||
if track is not None:
|
||||
if not (track.startswith("NlaTrack") or track.startswith("[Action Stash]")):
|
||||
if track not in track_slide.keys() or (
|
||||
track in track_slide.keys() and start_frame < track_slide[track]):
|
||||
track_slide.update({track: start_frame})
|
||||
else:
|
||||
add_slide_data(start_frame, obj_uuid, blender_action.name, export_settings)
|
||||
else:
|
||||
if export_settings['gltf_animation_mode'] == "ACTIVE_ACTIONS":
|
||||
if None not in track_slide.keys() or (
|
||||
None in track_slide.keys() and start_frame < track_slide[None]):
|
||||
track_slide.update({None: start_frame})
|
||||
else:
|
||||
add_slide_data(start_frame, obj_uuid, blender_action.name, export_settings)
|
||||
|
||||
if type_ == "SHAPEKEY" and export_settings['gltf_bake_animation']:
|
||||
export_settings['ranges'][obj_uuid][obj_uuid] = {}
|
||||
export_settings['ranges'][obj_uuid][obj_uuid]['start'] = bpy.context.scene.frame_start
|
||||
export_settings['ranges'][obj_uuid][obj_uuid]['end'] = bpy.context.scene.frame_end
|
||||
|
||||
# For baking drivers
|
||||
if export_settings['vtree'].nodes[obj_uuid].blender_type == VExportNode.ARMATURE and export_settings['gltf_morph_anim'] is True:
|
||||
obj_drivers = get_sk_drivers(obj_uuid, export_settings)
|
||||
for obj_dr in obj_drivers:
|
||||
if obj_dr not in export_settings['ranges']:
|
||||
export_settings['ranges'][obj_dr] = {}
|
||||
export_settings['ranges'][obj_dr][obj_uuid + "_" + blender_action.name] = {}
|
||||
export_settings['ranges'][obj_dr][obj_uuid + "_" + blender_action.name]['start'] = start_frame
|
||||
export_settings['ranges'][obj_dr][obj_uuid + "_" + blender_action.name]['end'] = end_frame
|
||||
|
||||
if len(blender_actions) == 0 and export_settings['gltf_bake_animation']:
|
||||
# No animation on this object
|
||||
# In case of baking animation, we will use scene frame range
|
||||
# Will be calculated later if max range. Can be set here if scene frame range
|
||||
export_settings['ranges'][obj_uuid][obj_uuid] = {}
|
||||
export_settings['ranges'][obj_uuid][obj_uuid]['start'] = bpy.context.scene.frame_start
|
||||
export_settings['ranges'][obj_uuid][obj_uuid]['end'] = bpy.context.scene.frame_end
|
||||
|
||||
# For baking drivers
|
||||
if export_settings['vtree'].nodes[obj_uuid].blender_type == VExportNode.ARMATURE and export_settings['gltf_morph_anim'] is True:
|
||||
obj_drivers = get_sk_drivers(obj_uuid, export_settings)
|
||||
for obj_dr in obj_drivers:
|
||||
if obj_dr not in export_settings['ranges']:
|
||||
export_settings['ranges'][obj_dr] = {}
|
||||
export_settings['ranges'][obj_dr][obj_uuid + "_" + obj_uuid] = {}
|
||||
export_settings['ranges'][obj_dr][obj_uuid + "_" +
|
||||
obj_uuid]['start'] = bpy.context.scene.frame_start
|
||||
export_settings['ranges'][obj_dr][obj_uuid + "_" + obj_uuid]['end'] = bpy.context.scene.frame_end
|
||||
|
||||
if (export_settings['gltf_negative_frames'] == "SLIDE"
|
||||
or export_settings['gltf_anim_slide_to_zero'] is True) \
|
||||
and len(track_slide) > 0:
|
||||
# Need to store animation slides
|
||||
for obj_uuid in vtree.get_all_objects():
|
||||
|
||||
# Do not manage not exported objects
|
||||
if vtree.nodes[obj_uuid].node is None:
|
||||
if export_settings['gltf_armature_object_remove'] is True:
|
||||
# Manage armature object, as this is the object that has the animation
|
||||
if not vtree.nodes[obj_uuid].blender_object:
|
||||
continue
|
||||
else:
|
||||
continue
|
||||
|
||||
blender_actions = __get_blender_actions(obj_uuid, export_settings)
|
||||
for blender_action, track, type_ in blender_actions:
|
||||
if track in track_slide.keys():
|
||||
if export_settings['gltf_negative_frames'] == "SLIDE" and track_slide[track] < 0:
|
||||
add_slide_data(track_slide[track], obj_uuid, blender_action.name, export_settings)
|
||||
elif export_settings['gltf_anim_slide_to_zero'] is True:
|
||||
add_slide_data(track_slide[track], obj_uuid, blender_action.name, export_settings)
|
||||
|
||||
|
||||
def gather_action_animations(obj_uuid: int,
|
||||
tracks: typing.Dict[str,
|
||||
typing.List[int]],
|
||||
offset: int,
|
||||
export_settings) -> typing.Tuple[typing.List[gltf2_io.Animation],
|
||||
typing.Dict[str,
|
||||
typing.List[int]]]:
|
||||
"""
|
||||
Gather all animations which contribute to the objects property, and corresponding track names
|
||||
|
||||
:param blender_object: The blender object which is animated
|
||||
:param export_settings:
|
||||
:return: A list of glTF2 animations and tracks
|
||||
"""
|
||||
animations = []
|
||||
|
||||
blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object
|
||||
|
||||
# Collect all 'actions' affecting this object. There is a direct mapping between blender actions and glTF animations
|
||||
blender_actions = __get_blender_actions(obj_uuid, export_settings)
|
||||
|
||||
# When object is not animated at all (no SK)
|
||||
# We can create an animation for this object
|
||||
if len(blender_actions) == 0:
|
||||
animation = bake_animation(obj_uuid, obj_uuid, export_settings)
|
||||
if animation is not None:
|
||||
animations.append(animation)
|
||||
|
||||
|
||||
# Keep current situation and prepare export
|
||||
current_action = None
|
||||
current_sk_action = None
|
||||
current_world_matrix = None
|
||||
if blender_object and blender_object.animation_data and blender_object.animation_data.action:
|
||||
# There is an active action. Storing it, to be able to restore after switching all actions during export
|
||||
current_action = blender_object.animation_data.action
|
||||
elif len(blender_actions) != 0 and blender_object.animation_data is not None and blender_object.animation_data.action is None:
|
||||
# No current action set, storing world matrix of object
|
||||
current_world_matrix = blender_object.matrix_world.copy()
|
||||
|
||||
if blender_object and blender_object.type == "MESH" \
|
||||
and blender_object.data is not None \
|
||||
and blender_object.data.shape_keys is not None \
|
||||
and blender_object.data.shape_keys.animation_data is not None \
|
||||
and blender_object.data.shape_keys.animation_data.action is not None:
|
||||
current_sk_action = blender_object.data.shape_keys.animation_data.action
|
||||
|
||||
# Remove any solo (starred) NLA track. Restored after export
|
||||
solo_track = None
|
||||
if blender_object and blender_object.animation_data:
|
||||
for track in blender_object.animation_data.nla_tracks:
|
||||
if track.is_solo:
|
||||
solo_track = track
|
||||
track.is_solo = False
|
||||
break
|
||||
|
||||
# Remove any tweak mode. Restore after export
|
||||
if blender_object and blender_object.animation_data:
|
||||
restore_tweak_mode = blender_object.animation_data.use_tweak_mode
|
||||
|
||||
# Remove use of NLA. Restore after export
|
||||
if blender_object and blender_object.animation_data:
|
||||
current_use_nla = blender_object.animation_data.use_nla
|
||||
blender_object.animation_data.use_nla = False
|
||||
|
||||
# Try to disable all except armature in viewport, for performance
|
||||
if export_settings['gltf_optimize_armature_disable_viewport'] \
|
||||
and export_settings['vtree'].nodes[obj_uuid].blender_object.type == "ARMATURE":
|
||||
|
||||
# If the skinned mesh has driver(s), we can't disable it to bake armature.
|
||||
need_to_enable_again = False
|
||||
sk_drivers = get_sk_drivers(obj_uuid, export_settings)
|
||||
if len(sk_drivers) == 0:
|
||||
need_to_enable_again = True
|
||||
# Before baking, disabling from viewport all meshes
|
||||
for obj in [n.blender_object for n in export_settings['vtree'].nodes.values() if n.blender_type in
|
||||
[VExportNode.OBJECT, VExportNode.ARMATURE, VExportNode.COLLECTION]]:
|
||||
obj.hide_viewport = True
|
||||
export_settings['vtree'].nodes[obj_uuid].blender_object.hide_viewport = False
|
||||
else:
|
||||
export_settings['log'].warning("Can't disable viewport because of drivers")
|
||||
# We changed the option here, so we don't need to re-check it later, during
|
||||
export_settings['gltf_optimize_armature_disable_viewport'] = False
|
||||
|
||||
export_user_extensions('animation_switch_loop_hook', export_settings, blender_object, False)
|
||||
|
||||
# Export
|
||||
|
||||
# Export all collected actions.
|
||||
for blender_action, track_name, on_type in blender_actions:
|
||||
|
||||
# Set action as active, to be able to bake if needed
|
||||
if on_type == "OBJECT": # Not for shapekeys!
|
||||
if blender_object.animation_data.action is None \
|
||||
or (blender_object.animation_data.action.name != blender_action.name):
|
||||
if blender_object.animation_data.is_property_readonly('action'):
|
||||
blender_object.animation_data.use_tweak_mode = False
|
||||
try:
|
||||
reset_bone_matrix(blender_object, export_settings)
|
||||
export_user_extensions(
|
||||
'pre_animation_switch_hook',
|
||||
export_settings,
|
||||
blender_object,
|
||||
blender_action,
|
||||
track_name,
|
||||
on_type)
|
||||
blender_object.animation_data.action = blender_action
|
||||
export_user_extensions(
|
||||
'post_animation_switch_hook',
|
||||
export_settings,
|
||||
blender_object,
|
||||
blender_action,
|
||||
track_name,
|
||||
on_type)
|
||||
except:
|
||||
error = "Action is readonly. Please check NLA editor"
|
||||
export_settings['log'].warning(
|
||||
"Animation '{}' could not be exported. Cause: {}".format(
|
||||
blender_action.name, error))
|
||||
continue
|
||||
|
||||
if on_type == "SHAPEKEY":
|
||||
if blender_object.data.shape_keys.animation_data.action is None \
|
||||
or (blender_object.data.shape_keys.animation_data.action.name != blender_action.name):
|
||||
if blender_object.data.shape_keys.animation_data.is_property_readonly('action'):
|
||||
blender_object.data.shape_keys.animation_data.use_tweak_mode = False
|
||||
reset_sk_data(blender_object, blender_actions, export_settings)
|
||||
export_user_extensions(
|
||||
'pre_animation_switch_hook',
|
||||
export_settings,
|
||||
blender_object,
|
||||
blender_action,
|
||||
track_name,
|
||||
on_type)
|
||||
blender_object.data.shape_keys.animation_data.action = blender_action
|
||||
export_user_extensions(
|
||||
'post_animation_switch_hook',
|
||||
export_settings,
|
||||
blender_object,
|
||||
blender_action,
|
||||
track_name,
|
||||
on_type)
|
||||
|
||||
if export_settings['gltf_force_sampling'] is True:
|
||||
if export_settings['vtree'].nodes[obj_uuid].blender_object.type == "ARMATURE":
|
||||
animation, extra_samplers = gather_action_armature_sampled(
|
||||
obj_uuid, blender_action, None, export_settings)
|
||||
elif on_type == "OBJECT":
|
||||
animation, extra_samplers = gather_action_object_sampled(
|
||||
obj_uuid, blender_action, None, export_settings)
|
||||
else:
|
||||
animation = gather_action_sk_sampled(obj_uuid, blender_action, None, export_settings)
|
||||
else:
|
||||
# Not sampled
|
||||
# This returns
|
||||
# - animation on fcurves
|
||||
# - fcurve that cannot be handled not sampled, to be sampled
|
||||
# to_be_sampled is : (object_uuid , type , prop, optional(bone.name) )
|
||||
animation, to_be_sampled, extra_samplers = gather_animation_fcurves(
|
||||
obj_uuid, blender_action, export_settings)
|
||||
for (obj_uuid, type_, prop, bone) in to_be_sampled:
|
||||
if type_ == "BONE":
|
||||
channel = gather_sampled_bone_channel(
|
||||
obj_uuid,
|
||||
bone,
|
||||
prop,
|
||||
blender_action.name,
|
||||
True,
|
||||
get_gltf_interpolation("LINEAR"),
|
||||
export_settings)
|
||||
elif type_ == "OBJECT":
|
||||
channel = gather_sampled_object_channel(
|
||||
obj_uuid, prop, blender_action.name, True, get_gltf_interpolation("LINEAR"), export_settings)
|
||||
elif type_ == "SK":
|
||||
channel = gather_sampled_sk_channel(obj_uuid, blender_action.name, export_settings)
|
||||
elif type_ == "EXTRA":
|
||||
channel = None
|
||||
else:
|
||||
export_settings['log'].error("Type unknown. Should not happen")
|
||||
|
||||
if animation is None and channel is not None:
|
||||
# If all channels need to be sampled, no animation was created
|
||||
# Need to create animation, and add channel
|
||||
animation = gltf2_io.Animation(
|
||||
channels=[channel],
|
||||
extensions=None,
|
||||
extras=__gather_extras(blender_action, export_settings),
|
||||
name=blender_action.name,
|
||||
samplers=[]
|
||||
)
|
||||
else:
|
||||
if channel is not None:
|
||||
animation.channels.append(channel)
|
||||
|
||||
# Add extra samplers
|
||||
# Because this is not core glTF specification, you can add extra samplers using hook
|
||||
if export_settings['gltf_export_extra_animations'] and len(extra_samplers) != 0:
|
||||
export_user_extensions(
|
||||
'extra_animation_manage',
|
||||
export_settings,
|
||||
extra_samplers,
|
||||
obj_uuid,
|
||||
blender_object,
|
||||
blender_action,
|
||||
animation)
|
||||
|
||||
# If we are in a SK animation, and we need to bake (if there also in TRS anim)
|
||||
if len([a for a in blender_actions if a[2] == "OBJECT"]) == 0 and on_type == "SHAPEKEY":
|
||||
if export_settings['gltf_bake_animation'] is True and export_settings['gltf_force_sampling'] is True:
|
||||
# We also have to check if this is a skinned mesh, because we don't have to force animation baking on this case
|
||||
# (skinned meshes TRS must be ignored, says glTF specification)
|
||||
if export_settings['vtree'].nodes[obj_uuid].skin is None:
|
||||
if obj_uuid not in export_settings['ranges'].keys():
|
||||
export_settings['ranges'][obj_uuid] = {}
|
||||
export_settings['ranges'][obj_uuid][obj_uuid] = export_settings['ranges'][obj_uuid][blender_action.name]
|
||||
channels, _ = gather_object_sampled_channels(obj_uuid, obj_uuid, export_settings)
|
||||
if channels is not None:
|
||||
if animation is None:
|
||||
animation = gltf2_io.Animation(
|
||||
channels=channels,
|
||||
extensions=None, # as other animations
|
||||
extras=None, # Because there is no animation to get extras from
|
||||
name=blender_object.name, # Use object name as animation name
|
||||
samplers=[]
|
||||
)
|
||||
else:
|
||||
animation.channels.extend(channels)
|
||||
|
||||
if len([a for a in blender_actions if a[2] == "SHAPEKEY"]) == 0 \
|
||||
and export_settings['gltf_morph_anim'] \
|
||||
and blender_object.type == "MESH" \
|
||||
and blender_object.data is not None \
|
||||
and blender_object.data.shape_keys is not None:
|
||||
if export_settings['gltf_bake_animation'] is True and export_settings['gltf_force_sampling'] is True:
|
||||
# We need to check that this mesh is not driven by armature parent
|
||||
# In that case, no need to bake, because animation is already baked by driven sk armature
|
||||
ignore_sk = False
|
||||
if export_settings['vtree'].nodes[obj_uuid].parent_uuid is not None \
|
||||
and export_settings['vtree'].nodes[export_settings['vtree'].nodes[obj_uuid].parent_uuid].blender_type == VExportNode.ARMATURE:
|
||||
obj_drivers = get_sk_drivers(export_settings['vtree'].nodes[obj_uuid].parent_uuid, export_settings)
|
||||
if obj_uuid in obj_drivers:
|
||||
ignore_sk = True
|
||||
|
||||
if ignore_sk is False:
|
||||
if obj_uuid not in export_settings['ranges'].keys():
|
||||
export_settings['ranges'][obj_uuid] = {}
|
||||
export_settings['ranges'][obj_uuid][obj_uuid] = export_settings['ranges'][obj_uuid][blender_action.name]
|
||||
channel = gather_sampled_sk_channel(obj_uuid, obj_uuid, export_settings)
|
||||
if channel is not None:
|
||||
if animation is None:
|
||||
animation = gltf2_io.Animation(
|
||||
channels=[channel],
|
||||
extensions=None, # as other animations
|
||||
extras=None, # Because there is no animation to get extras from
|
||||
name=blender_object.name, # Use object name as animation name
|
||||
samplers=[]
|
||||
)
|
||||
else:
|
||||
animation.channels.append(channel)
|
||||
|
||||
if animation is not None:
|
||||
link_samplers(animation, export_settings)
|
||||
animations.append(animation)
|
||||
|
||||
# Store data for merging animation later
|
||||
if track_name is not None: # Do not take into account animation not in NLA
|
||||
# Do not take into account default NLA track names
|
||||
if not (track_name.startswith("NlaTrack") or track_name.startswith("[Action Stash]")):
|
||||
if track_name not in tracks.keys():
|
||||
tracks[track_name] = []
|
||||
tracks[track_name].append(offset + len(animations) - 1) # Store index of animation in animations
|
||||
|
||||
|
||||
# Restoring current situation
|
||||
|
||||
# Restore action status
|
||||
# TODO: do this in a finally
|
||||
if blender_object and blender_object.animation_data:
|
||||
if blender_object.animation_data.action is not None:
|
||||
if current_action is None:
|
||||
# remove last exported action
|
||||
reset_bone_matrix(blender_object, export_settings)
|
||||
blender_object.animation_data.action = None
|
||||
elif blender_object.animation_data.action.name != current_action.name:
|
||||
# Restore action that was active at start of exporting
|
||||
reset_bone_matrix(blender_object, export_settings)
|
||||
blender_object.animation_data.action = current_action
|
||||
if solo_track is not None:
|
||||
solo_track.is_solo = True
|
||||
blender_object.animation_data.use_tweak_mode = restore_tweak_mode
|
||||
blender_object.animation_data.use_nla = current_use_nla
|
||||
|
||||
if blender_object and blender_object.type == "MESH" \
|
||||
and blender_object.data is not None \
|
||||
and blender_object.data.shape_keys is not None \
|
||||
and blender_object.data.shape_keys.animation_data is not None:
|
||||
reset_sk_data(blender_object, blender_actions, export_settings)
|
||||
blender_object.data.shape_keys.animation_data.action = current_sk_action
|
||||
|
||||
if blender_object and current_world_matrix is not None:
|
||||
blender_object.matrix_world = current_world_matrix
|
||||
|
||||
if export_settings['gltf_optimize_armature_disable_viewport'] \
|
||||
and export_settings['vtree'].nodes[obj_uuid].blender_object.type == "ARMATURE":
|
||||
if need_to_enable_again is True:
|
||||
# And now, restoring meshes in viewport
|
||||
for node, obj in [(n, n.blender_object) for n in export_settings['vtree'].nodes.values()
|
||||
if n.blender_type in [VExportNode.OBJECT, VExportNode.ARMATURE, VExportNode.COLLECTION]]:
|
||||
obj.hide_viewport = node.default_hide_viewport
|
||||
export_settings['vtree'].nodes[obj_uuid].blender_object.hide_viewport = export_settings['vtree'].nodes[obj_uuid].default_hide_viewport
|
||||
|
||||
export_user_extensions('animation_switch_loop_hook', export_settings, blender_object, True)
|
||||
|
||||
return animations, tracks
|
||||
|
||||
|
||||
@cached
|
||||
def __get_blender_actions(obj_uuid: str,
|
||||
export_settings
|
||||
) -> typing.List[typing.Tuple[bpy.types.Action, str, str]]:
|
||||
blender_actions = []
|
||||
blender_tracks = {}
|
||||
action_on_type = {}
|
||||
|
||||
blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object
|
||||
|
||||
export_user_extensions('pre_gather_actions_hook', export_settings, blender_object)
|
||||
|
||||
if export_settings['gltf_animation_mode'] == "BROADCAST":
|
||||
return __get_blender_actions_broadcast(obj_uuid, export_settings)
|
||||
|
||||
if blender_object and blender_object.animation_data is not None:
|
||||
# Collect active action.
|
||||
if blender_object.animation_data.action is not None:
|
||||
|
||||
# Check the action is not in list of actions to ignore
|
||||
if hasattr(bpy.data.scenes[0], "gltf_action_filter") and id(blender_object.animation_data.action) in [
|
||||
id(item.action) for item in bpy.data.scenes[0].gltf_action_filter if item.keep is False]:
|
||||
pass # We ignore this action
|
||||
else:
|
||||
blender_actions.append(blender_object.animation_data.action)
|
||||
blender_tracks[blender_object.animation_data.action.name] = None
|
||||
action_on_type[blender_object.animation_data.action.name] = "OBJECT"
|
||||
|
||||
# Collect associated strips from NLA tracks.
|
||||
if export_settings['gltf_animation_mode'] == "ACTIONS":
|
||||
for track in blender_object.animation_data.nla_tracks:
|
||||
# Multi-strip tracks do not export correctly yet (they need to be baked),
|
||||
# so skip them for now and only write single-strip tracks.
|
||||
non_muted_strips = [strip for strip in track.strips if strip.action is not None and strip.mute is False]
|
||||
if track.strips is None or len(non_muted_strips) != 1:
|
||||
export_settings['log'].warning(
|
||||
"NLA track '{}' has {} strips, but only single-strip tracks are supported in 'actions' mode.".format(
|
||||
track.name, len(
|
||||
track.strips)), popup=True)
|
||||
continue
|
||||
for strip in non_muted_strips:
|
||||
|
||||
# Check the action is not in list of actions to ignore
|
||||
if hasattr(bpy.data.scenes[0], "gltf_action_filter") and id(strip.action) in [
|
||||
id(item.action) for item in bpy.data.scenes[0].gltf_action_filter if item.keep is False]:
|
||||
continue # We ignore this action
|
||||
|
||||
blender_actions.append(strip.action)
|
||||
# Always set after possible active action -> None will be overwrite
|
||||
blender_tracks[strip.action.name] = track.name
|
||||
action_on_type[strip.action.name] = "OBJECT"
|
||||
|
||||
# For caching, actions linked to SK must be after actions about TRS
|
||||
if export_settings['gltf_morph_anim'] and blender_object and blender_object.type == "MESH" \
|
||||
and blender_object.data is not None \
|
||||
and blender_object.data.shape_keys is not None \
|
||||
and blender_object.data.shape_keys.animation_data is not None:
|
||||
|
||||
if blender_object.data.shape_keys.animation_data.action is not None:
|
||||
|
||||
# Check the action is not in list of actions to ignore
|
||||
if hasattr(bpy.data.scenes[0], "gltf_action_filter") and id(blender_object.data.shape_keys.animation_data.action) in [
|
||||
id(item.action) for item in bpy.data.scenes[0].gltf_action_filter if item.keep is False]:
|
||||
pass # We ignore this action
|
||||
else:
|
||||
blender_actions.append(blender_object.data.shape_keys.animation_data.action)
|
||||
blender_tracks[blender_object.data.shape_keys.animation_data.action.name] = None
|
||||
action_on_type[blender_object.data.shape_keys.animation_data.action.name] = "SHAPEKEY"
|
||||
|
||||
if export_settings['gltf_animation_mode'] == "ACTIONS":
|
||||
for track in blender_object.data.shape_keys.animation_data.nla_tracks:
|
||||
# Multi-strip tracks do not export correctly yet (they need to be baked),
|
||||
# so skip them for now and only write single-strip tracks.
|
||||
non_muted_strips = [strip for strip in track.strips if strip.action is not None and strip.mute is False]
|
||||
if track.strips is None or len(non_muted_strips) != 1:
|
||||
continue
|
||||
for strip in non_muted_strips:
|
||||
# Check the action is not in list of actions to ignore
|
||||
if hasattr(bpy.data.scenes[0], "gltf_action_filter") and id(strip.action) in [
|
||||
id(item.action) for item in bpy.data.scenes[0].gltf_action_filter if item.keep is False]:
|
||||
continue # We ignore this action
|
||||
|
||||
blender_actions.append(strip.action)
|
||||
# Always set after possible active action -> None will be overwrite
|
||||
blender_tracks[strip.action.name] = track.name
|
||||
action_on_type[strip.action.name] = "SHAPEKEY"
|
||||
|
||||
# If there are only 1 armature, include all animations, even if not in NLA
|
||||
# But only if armature has already some animation_data
|
||||
# If not, we says that this armature is never animated, so don't add these additional actions
|
||||
if export_settings['gltf_export_anim_single_armature'] is True:
|
||||
if blender_object and blender_object.type == "ARMATURE" and blender_object.animation_data is not None:
|
||||
if len(export_settings['vtree'].get_all_node_of_type(VExportNode.ARMATURE)) == 1:
|
||||
# Keep all actions on objects (no Shapekey animation)
|
||||
for act in [a for a in bpy.data.actions if a.id_root == "OBJECT"]:
|
||||
# We need to check this is an armature action
|
||||
# Checking that at least 1 bone is animated
|
||||
if not __is_armature_action(act):
|
||||
continue
|
||||
# Check if this action is already taken into account
|
||||
if act.name in blender_tracks.keys():
|
||||
continue
|
||||
|
||||
# Check the action is not in list of actions to ignore
|
||||
if hasattr(bpy.data.scenes[0], "gltf_action_filter") and id(act) in [id(item.action)
|
||||
for item in bpy.data.scenes[0].gltf_action_filter if item.keep is False]:
|
||||
continue # We ignore this action
|
||||
|
||||
blender_actions.append(act)
|
||||
blender_tracks[act.name] = None
|
||||
action_on_type[act.name] = "OBJECT"
|
||||
|
||||
# Use a class to get parameters, to be able to modify them
|
||||
class GatherActionHookParameters:
|
||||
def __init__(self, blender_actions, blender_tracks, action_on_type):
|
||||
self.blender_actions = blender_actions
|
||||
self.blender_tracks = blender_tracks
|
||||
self.action_on_type = action_on_type
|
||||
|
||||
gatheractionhookparams = GatherActionHookParameters(blender_actions, blender_tracks, action_on_type)
|
||||
|
||||
export_user_extensions('gather_actions_hook', export_settings, blender_object, gatheractionhookparams)
|
||||
|
||||
# Get params back from hooks
|
||||
blender_actions = gatheractionhookparams.blender_actions
|
||||
blender_tracks = gatheractionhookparams.blender_tracks
|
||||
action_on_type = gatheractionhookparams.action_on_type
|
||||
|
||||
# Remove duplicate actions.
|
||||
blender_actions = list(set(blender_actions))
|
||||
# sort animations alphabetically (case insensitive) so they have a defined order and match Blender's Action list
|
||||
blender_actions.sort(key=lambda a: a.name.lower())
|
||||
|
||||
return [(blender_action, blender_tracks[blender_action.name], action_on_type[blender_action.name])
|
||||
for blender_action in blender_actions]
|
||||
|
||||
|
||||
def __is_armature_action(blender_action) -> bool:
|
||||
for fcurve in blender_action.fcurves:
|
||||
if is_bone_anim_channel(fcurve.data_path):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def __gather_extras(blender_action, export_settings):
|
||||
if export_settings['gltf_extras']:
|
||||
return generate_extras(blender_action)
|
||||
return None
|
||||
|
||||
|
||||
def __get_blender_actions_broadcast(obj_uuid, export_settings):
|
||||
blender_actions = []
|
||||
blender_tracks = {}
|
||||
action_on_type = {}
|
||||
|
||||
blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object
|
||||
|
||||
# Note : Like in FBX exporter:
|
||||
# - Object with animation data will get all actions
|
||||
# - Object without animation will not get any action
|
||||
|
||||
# Collect all actions
|
||||
for blender_action in bpy.data.actions:
|
||||
if hasattr(bpy.data.scenes[0], "gltf_action_filter") and id(blender_action) in [
|
||||
id(item.action) for item in bpy.data.scenes[0].gltf_action_filter if item.keep is False]:
|
||||
continue # We ignore this action
|
||||
|
||||
# Keep all actions on objects (no Shapekey animation, No armature animation (on bones))
|
||||
if blender_action.id_root == "OBJECT": # TRS and Bone animations
|
||||
if blender_object.animation_data is None:
|
||||
continue
|
||||
if blender_object and blender_object.type == "ARMATURE" and __is_armature_action(blender_action):
|
||||
blender_actions.append(blender_action)
|
||||
blender_tracks[blender_action.name] = None
|
||||
action_on_type[blender_action.name] = "OBJECT"
|
||||
elif blender_object.type == "MESH":
|
||||
if not __is_armature_action(blender_action):
|
||||
blender_actions.append(blender_action)
|
||||
blender_tracks[blender_action.name] = None
|
||||
action_on_type[blender_action.name] = "OBJECT"
|
||||
elif blender_action.id_root == "KEY":
|
||||
if blender_object.type != "MESH" or blender_object.data is None or blender_object.data.shape_keys is None or blender_object.data.shape_keys.animation_data is None:
|
||||
continue
|
||||
# Checking that the object has some SK and some animation on it
|
||||
if blender_object is None:
|
||||
continue
|
||||
if blender_object.type != "MESH":
|
||||
continue
|
||||
if blender_object.data is None or blender_object.data.shape_keys is None:
|
||||
continue
|
||||
blender_actions.append(blender_action)
|
||||
blender_tracks[blender_action.name] = None
|
||||
action_on_type[blender_action.name] = "SHAPEKEY"
|
||||
|
||||
# Use a class to get parameters, to be able to modify them
|
||||
|
||||
class GatherActionHookParameters:
|
||||
def __init__(self, blender_actions, blender_tracks, action_on_type):
|
||||
self.blender_actions = blender_actions
|
||||
self.blender_tracks = blender_tracks
|
||||
self.action_on_type = action_on_type
|
||||
|
||||
gatheractionhookparams = GatherActionHookParameters(blender_actions, blender_tracks, action_on_type)
|
||||
|
||||
export_user_extensions('gather_actions_hook', export_settings, blender_object, gatheractionhookparams)
|
||||
|
||||
# Get params back from hooks
|
||||
blender_actions = gatheractionhookparams.blender_actions
|
||||
blender_tracks = gatheractionhookparams.blender_tracks
|
||||
action_on_type = gatheractionhookparams.action_on_type
|
||||
|
||||
# Remove duplicate actions.
|
||||
blender_actions = list(set(blender_actions))
|
||||
# sort animations alphabetically (case insensitive) so they have a defined order and match Blender's Action list
|
||||
blender_actions.sort(key=lambda a: a.name.lower())
|
||||
|
||||
return [(blender_action, blender_tracks[blender_action.name], action_on_type[blender_action.name])
|
||||
for blender_action in blender_actions]
|
291
scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_animation_utils.py
Normal file
291
scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_animation_utils.py
Normal file
@ -0,0 +1,291 @@
|
||||
# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
import typing
|
||||
from mathutils import Matrix
|
||||
from ....blender.com.gltf2_blender_data_path import get_sk_exported
|
||||
from ....io.com import gltf2_io
|
||||
from ....io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ..gltf2_blender_gather_tree import VExportNode
|
||||
from .sampled.armature.armature_action_sampled import gather_action_armature_sampled
|
||||
from .sampled.object.gltf2_blender_gather_object_action_sampled import gather_action_object_sampled
|
||||
from .sampled.shapekeys.gltf2_blender_gather_sk_channels import gather_sampled_sk_channel
|
||||
from .sampled.data.gltf2_blender_gather_data_channels import gather_data_sampled_channels
|
||||
from .gltf2_blender_gather_drivers import get_sk_drivers
|
||||
|
||||
|
||||
def link_samplers(animation: gltf2_io.Animation, export_settings):
|
||||
"""
|
||||
Move animation samplers to their own list and store their indices at their previous locations.
|
||||
|
||||
After gathering, samplers are stored in the channels properties of the animation and need to be moved
|
||||
to their own list while storing an index into this list at the position where they previously were.
|
||||
This behaviour is similar to that of the glTFExporter that traverses all nodes
|
||||
:param animation:
|
||||
:param export_settings:
|
||||
:return:
|
||||
"""
|
||||
# TODO: move this to some util module and update gltf2 exporter also
|
||||
T = typing.TypeVar('T')
|
||||
|
||||
def __append_unique_and_get_index(l: typing.List[T], item: T):
|
||||
if item in l:
|
||||
return l.index(item)
|
||||
else:
|
||||
index = len(l)
|
||||
l.append(item)
|
||||
return index
|
||||
|
||||
for i, channel in enumerate(animation.channels):
|
||||
animation.channels[i].sampler = __append_unique_and_get_index(animation.samplers, channel.sampler)
|
||||
|
||||
|
||||
def reset_bone_matrix(blender_object, export_settings) -> None:
|
||||
if export_settings['gltf_export_reset_pose_bones'] is False:
|
||||
return
|
||||
|
||||
# Only for armatures
|
||||
if blender_object.type != "ARMATURE":
|
||||
return
|
||||
|
||||
# Remove current action if any
|
||||
if blender_object.animation_data and blender_object.animation_data.action:
|
||||
blender_object.animation_data.action = None
|
||||
|
||||
# Resetting bones TRS to avoid to keep not keyed value on a future action set
|
||||
for bone in blender_object.pose.bones:
|
||||
bone.matrix_basis = Matrix()
|
||||
|
||||
|
||||
def reset_sk_data(blender_object, blender_actions, export_settings) -> None:
|
||||
# Using NLA for SK is not so common
|
||||
# Reset to 0.0 will happen here only if there are at least 2 tracks to export
|
||||
if export_settings['gltf_export_reset_sk_data'] is False:
|
||||
return
|
||||
|
||||
if len([i for i in blender_actions if i[2] == "SHAPEKEY"]) <= 1:
|
||||
return
|
||||
|
||||
if blender_object.type != "MESH":
|
||||
return
|
||||
|
||||
# Reset
|
||||
for sk in get_sk_exported(blender_object.data.shape_keys.key_blocks):
|
||||
sk.value = 0.0
|
||||
|
||||
|
||||
def add_slide_data(start_frame, uuid: int, key: str, export_settings, add_drivers=True):
|
||||
|
||||
if uuid not in export_settings['slide'].keys():
|
||||
export_settings['slide'][uuid] = {}
|
||||
export_settings['slide'][uuid][key] = start_frame
|
||||
|
||||
# Add slide info for driver sk too
|
||||
if add_drivers is True:
|
||||
obj_drivers = get_sk_drivers(uuid, export_settings)
|
||||
for obj_dr in obj_drivers:
|
||||
if obj_dr not in export_settings['slide'].keys():
|
||||
export_settings['slide'][obj_dr] = {}
|
||||
export_settings['slide'][obj_dr][uuid + "_" + key] = start_frame
|
||||
|
||||
|
||||
def merge_tracks_perform(merged_tracks, animations, export_settings):
|
||||
to_delete_idx = []
|
||||
for merged_anim_track in merged_tracks.keys():
|
||||
if len(merged_tracks[merged_anim_track]) < 2:
|
||||
|
||||
# There is only 1 animation in the track
|
||||
# If name of the track is not a default name, use this name for action
|
||||
if len(merged_tracks[merged_anim_track]) != 0:
|
||||
animations[merged_tracks[merged_anim_track][0]].name = merged_anim_track
|
||||
|
||||
continue
|
||||
|
||||
base_animation_idx = None
|
||||
offset_sampler = 0
|
||||
|
||||
for idx, anim_idx in enumerate(merged_tracks[merged_anim_track]):
|
||||
if idx == 0:
|
||||
base_animation_idx = anim_idx
|
||||
animations[anim_idx].name = merged_anim_track
|
||||
already_animated = []
|
||||
for channel in animations[anim_idx].channels:
|
||||
already_animated.append((channel.target.node, channel.target.path))
|
||||
continue
|
||||
|
||||
to_delete_idx.append(anim_idx)
|
||||
|
||||
# Merging extensions
|
||||
# Provide a hook to handle extension merging since there is no way to know author intent
|
||||
export_user_extensions(
|
||||
'merge_animation_extensions_hook',
|
||||
export_settings,
|
||||
animations[anim_idx],
|
||||
animations[base_animation_idx])
|
||||
|
||||
# Merging extras
|
||||
# Warning, some values can be overwritten if present in multiple merged animations
|
||||
if animations[anim_idx].extras is not None:
|
||||
for k in animations[anim_idx].extras.keys():
|
||||
if animations[base_animation_idx].extras is None:
|
||||
animations[base_animation_idx].extras = {}
|
||||
animations[base_animation_idx].extras[k] = animations[anim_idx].extras[k]
|
||||
|
||||
offset_sampler = len(animations[base_animation_idx].samplers)
|
||||
for sampler in animations[anim_idx].samplers:
|
||||
animations[base_animation_idx].samplers.append(sampler)
|
||||
|
||||
for channel in animations[anim_idx].channels:
|
||||
if (channel.target.node, channel.target.path) in already_animated:
|
||||
export_settings['log'].warning(
|
||||
"Some strips have same channel animation ({}), on node {} !".format(
|
||||
channel.target.path, channel.target.node.name))
|
||||
continue
|
||||
animations[base_animation_idx].channels.append(channel)
|
||||
animations[base_animation_idx].channels[-1].sampler = animations[base_animation_idx].channels[-1].sampler + offset_sampler
|
||||
already_animated.append((channel.target.node, channel.target.path))
|
||||
|
||||
new_animations = []
|
||||
if len(to_delete_idx) != 0:
|
||||
for idx, animation in enumerate(animations):
|
||||
if idx in to_delete_idx:
|
||||
continue
|
||||
new_animations.append(animation)
|
||||
else:
|
||||
new_animations = animations
|
||||
|
||||
# If some strips have same channel animations, we already ignored some.
|
||||
# But if the channels was exactly the same, we already pick index of sampler, and we have a mix of samplers, and index of samplers, in animation.samplers
|
||||
# So get back to list of objects only
|
||||
# This can lead to unused samplers... but keep them, as, anyway, data are not exported properly
|
||||
for anim in new_animations:
|
||||
new_samplers = []
|
||||
for s in anim.samplers:
|
||||
if type(s) == int:
|
||||
new_samplers.append(anim.samplers[s])
|
||||
else:
|
||||
new_samplers.append(s)
|
||||
anim.samplers = new_samplers
|
||||
|
||||
return new_animations
|
||||
|
||||
|
||||
def bake_animation(obj_uuid: str, animation_key: str, export_settings, mode=None):
|
||||
|
||||
# Bake situation does not export any extra animation channels, as we bake TRS + weights on Track or scene level, without direct
|
||||
# Access to fcurve and action data
|
||||
|
||||
# if there is no animation in file => no need to bake
|
||||
if len(bpy.data.actions) == 0:
|
||||
return None
|
||||
|
||||
blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object
|
||||
|
||||
# No TRS animation are found for this object.
|
||||
# But we may need to bake
|
||||
# (Only when force sampling is ON)
|
||||
# If force sampling is OFF, can lead to inconsistent export anyway
|
||||
if (export_settings['gltf_bake_animation'] is True
|
||||
or export_settings['gltf_animation_mode'] == "NLA_TRACKS") \
|
||||
and blender_object and blender_object.type != "ARMATURE" and export_settings['gltf_force_sampling'] is True:
|
||||
animation = None
|
||||
# We also have to check if this is a skinned mesh, because we don't have to force animation baking on this case
|
||||
# (skinned meshes TRS must be ignored, says glTF specification)
|
||||
if export_settings['vtree'].nodes[obj_uuid].skin is None:
|
||||
if mode is None or mode == "OBJECT":
|
||||
animation, _ = gather_action_object_sampled(obj_uuid, None, animation_key, export_settings)
|
||||
|
||||
# Need to bake sk only if not linked to a driver sk by parent armature
|
||||
# In case of NLA track export, no baking of SK
|
||||
if export_settings['gltf_morph_anim'] \
|
||||
and blender_object \
|
||||
and blender_object.type == "MESH" \
|
||||
and blender_object.data is not None \
|
||||
and blender_object.data.shape_keys is not None:
|
||||
|
||||
ignore_sk = False
|
||||
if export_settings['vtree'].nodes[obj_uuid].parent_uuid is not None \
|
||||
and export_settings['vtree'].nodes[export_settings['vtree'].nodes[obj_uuid].parent_uuid].blender_type == VExportNode.ARMATURE:
|
||||
obj_drivers = get_sk_drivers(export_settings['vtree'].nodes[obj_uuid].parent_uuid, export_settings)
|
||||
if obj_uuid in obj_drivers:
|
||||
ignore_sk = True
|
||||
|
||||
if mode == "OBJECT":
|
||||
ignore_sk = True
|
||||
|
||||
if ignore_sk is False:
|
||||
channel = gather_sampled_sk_channel(obj_uuid, animation_key, export_settings)
|
||||
if channel is not None:
|
||||
if animation is None:
|
||||
animation = gltf2_io.Animation(
|
||||
channels=[channel],
|
||||
extensions=None, # as other animations
|
||||
extras=None, # Because there is no animation to get extras from
|
||||
name=blender_object.name, # Use object name as animation name
|
||||
samplers=[]
|
||||
)
|
||||
else:
|
||||
animation.channels.append(channel)
|
||||
|
||||
if animation is not None and animation.channels:
|
||||
link_samplers(animation, export_settings)
|
||||
return animation
|
||||
|
||||
elif (export_settings['gltf_bake_animation'] is True
|
||||
or export_settings['gltf_animation_mode'] == "NLA_TRACKS") \
|
||||
and blender_object \
|
||||
and blender_object.type == "ARMATURE" \
|
||||
and mode is None or mode == "OBJECT":
|
||||
# We need to bake all bones. Because some bone can have some constraints linking to
|
||||
# some other armature bones, for example
|
||||
|
||||
animation, _ = gather_action_armature_sampled(obj_uuid, None, animation_key, export_settings)
|
||||
link_samplers(animation, export_settings)
|
||||
if animation is not None:
|
||||
return animation
|
||||
return None
|
||||
|
||||
|
||||
def bake_data_animation(blender_type_data, blender_id, animation_key, on_type, export_settings):
|
||||
# if there is no animation in file => no need to bake
|
||||
if len(bpy.data.actions) == 0:
|
||||
return None
|
||||
|
||||
total_channels = []
|
||||
animation = None
|
||||
|
||||
if (export_settings['gltf_bake_animation'] is True
|
||||
or export_settings['gltf_animation_mode'] == "NLA_TRACKS"):
|
||||
|
||||
if blender_type_data == "materials":
|
||||
blender_data_object = [i for i in bpy.data.materials if id(i) == blender_id][0]
|
||||
elif blender_type_data == "cameras":
|
||||
blender_data_object = [i for i in bpy.data.cameras if id(i) == blender_id][0]
|
||||
elif blender_type_data == "lights":
|
||||
blender_data_object = [i for i in bpy.data.lights if id(i) == blender_id][0]
|
||||
else:
|
||||
pass # Should not happen
|
||||
|
||||
# Export now KHR_animation_pointer for materials / light / camera
|
||||
for i in [a for a in export_settings['KHR_animation_pointer'][blender_type_data].keys() if a == blender_id]:
|
||||
if len(export_settings['KHR_animation_pointer'][blender_type_data][i]['paths']) == 0:
|
||||
continue
|
||||
|
||||
channels = gather_data_sampled_channels(blender_type_data, i, animation_key, on_type, export_settings)
|
||||
if channels is not None:
|
||||
total_channels.extend(channels)
|
||||
|
||||
if len(total_channels) > 0:
|
||||
animation = gltf2_io.Animation(
|
||||
channels=total_channels,
|
||||
extensions=None, # as other animations
|
||||
extras=None, # Because there is no animation to get extras from
|
||||
name=blender_data_object.name, # Use object name as animation name
|
||||
samplers=[]
|
||||
)
|
||||
|
||||
if animation is not None and animation.channels:
|
||||
link_samplers(animation, export_settings)
|
||||
return animation
|
22
scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_animations.py
Normal file
22
scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_animations.py
Normal file
@ -0,0 +1,22 @@
|
||||
# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
|
||||
from .gltf2_blender_gather_action import gather_actions_animations
|
||||
from .gltf2_blender_gather_scene_animation import gather_scene_animations
|
||||
from .gltf2_blender_gather_tracks import gather_tracks_animations
|
||||
|
||||
|
||||
def gather_animations(export_settings):
|
||||
|
||||
# Reinit stored data
|
||||
export_settings['ranges'] = {}
|
||||
export_settings['slide'] = {}
|
||||
|
||||
if export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS", "BROADCAST"]:
|
||||
return gather_actions_animations(export_settings)
|
||||
elif export_settings['gltf_animation_mode'] == "SCENE":
|
||||
return gather_scene_animations(export_settings)
|
||||
elif export_settings['gltf_animation_mode'] == "NLA_TRACKS":
|
||||
return gather_tracks_animations(export_settings)
|
79
scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_drivers.py
Normal file
79
scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_drivers.py
Normal file
@ -0,0 +1,79 @@
|
||||
# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from ....blender.com.gltf2_blender_data_path import get_sk_exported, skip_sk
|
||||
from ...com.gltf2_blender_data_path import get_target_object_path
|
||||
from ..gltf2_blender_gather_cache import skdriverdiscovercache
|
||||
|
||||
|
||||
@skdriverdiscovercache
|
||||
def get_sk_drivers(blender_armature_uuid, export_settings):
|
||||
|
||||
# If no SK are exported --> No driver animation to export
|
||||
if export_settings['gltf_morph_anim'] is False:
|
||||
return []
|
||||
|
||||
drivers = []
|
||||
|
||||
# Take into account skinned mesh, and mesh parented to a bone of the armature
|
||||
children_list = export_settings['vtree'].nodes[blender_armature_uuid].children.copy()
|
||||
for bone in export_settings['vtree'].get_all_bones(blender_armature_uuid):
|
||||
children_list.extend(export_settings['vtree'].nodes[bone].children)
|
||||
|
||||
for child_uuid in children_list:
|
||||
|
||||
if export_settings['vtree'].nodes[child_uuid].blender_type == "BONE":
|
||||
continue
|
||||
|
||||
child = export_settings['vtree'].nodes[child_uuid].blender_object
|
||||
|
||||
if not child.data:
|
||||
continue
|
||||
# child.data can be an armature - which has no shapekeys
|
||||
if not hasattr(child.data, 'shape_keys'):
|
||||
continue
|
||||
if not child.data.shape_keys:
|
||||
continue
|
||||
if not child.data.shape_keys.animation_data:
|
||||
continue
|
||||
if not child.data.shape_keys.animation_data.drivers:
|
||||
continue
|
||||
if len(child.data.shape_keys.animation_data.drivers) <= 0:
|
||||
continue
|
||||
|
||||
shapekeys_idx = {}
|
||||
cpt_sk = 0
|
||||
for sk in get_sk_exported(child.data.shape_keys.key_blocks):
|
||||
shapekeys_idx[sk.name] = cpt_sk
|
||||
cpt_sk += 1
|
||||
|
||||
# Note: channels will have some None items only for SK if some SK are not animated
|
||||
idx_channel_mapping = []
|
||||
all_sorted_channels = []
|
||||
for sk_c in child.data.shape_keys.animation_data.drivers:
|
||||
# Check if driver is valid. If not, ignore this driver channel
|
||||
try:
|
||||
# Check if driver is valid.
|
||||
# Try/Except is no more a suffisant check, starting with version Blender 3.0,
|
||||
# Blender crashes when trying to resolve path on invalid driver
|
||||
if not sk_c.is_valid:
|
||||
continue
|
||||
sk_name = child.data.shape_keys.path_resolve(get_target_object_path(sk_c.data_path)).name
|
||||
except:
|
||||
continue
|
||||
if skip_sk(child.data.shape_keys.key_blocks, child.data.shape_keys.key_blocks[sk_name]):
|
||||
continue
|
||||
idx_channel_mapping.append((shapekeys_idx[sk_name], sk_c))
|
||||
existing_idx = dict(idx_channel_mapping)
|
||||
for i in range(0, cpt_sk):
|
||||
if i not in existing_idx.keys():
|
||||
all_sorted_channels.append(None)
|
||||
else:
|
||||
all_sorted_channels.append(existing_idx[i])
|
||||
|
||||
# Checks there are some driver on SK, and that there is not only invalid drivers
|
||||
if len(all_sorted_channels) > 0 and not all([i is None for i in all_sorted_channels]):
|
||||
drivers.append(child_uuid)
|
||||
|
||||
return drivers
|
127
scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_keyframes.py
Normal file
127
scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_keyframes.py
Normal file
@ -0,0 +1,127 @@
|
||||
# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import typing
|
||||
import bpy
|
||||
import mathutils
|
||||
from ...com import gltf2_blender_math
|
||||
|
||||
|
||||
class Keyframe:
|
||||
def __init__(self, channels: typing.Tuple[bpy.types.FCurve], frame: float, bake_channel: typing.Union[str, None]):
|
||||
self.seconds = frame / (bpy.context.scene.render.fps * bpy.context.scene.render.fps_base)
|
||||
self.frame = frame
|
||||
self.fps = (bpy.context.scene.render.fps * bpy.context.scene.render.fps_base)
|
||||
self.__length_morph = 0
|
||||
# Note: channels has some None items only for SK if some SK are not animated
|
||||
if bake_channel is None:
|
||||
if not all([c is None for c in channels]):
|
||||
self.target = [c for c in channels if c is not None][0].data_path.split('.')[-1]
|
||||
if self.target != "value":
|
||||
self.__indices = [c.array_index for c in channels]
|
||||
else:
|
||||
self.__indices = [i for i, c in enumerate(channels) if c is not None]
|
||||
self.__length_morph = len(channels)
|
||||
else:
|
||||
# If all channels are None (baking evaluate SK case)
|
||||
self.target = "value"
|
||||
self.__indices = []
|
||||
self.__length_morph = len(channels)
|
||||
for i in range(self.get_target_len()):
|
||||
self.__indices.append(i)
|
||||
|
||||
else:
|
||||
if bake_channel == "value":
|
||||
self.__length_morph = len(channels)
|
||||
self.target = bake_channel
|
||||
self.__indices = []
|
||||
for i in range(self.get_target_len()):
|
||||
self.__indices.append(i)
|
||||
|
||||
# Data holders for virtual properties
|
||||
self.__value = None
|
||||
self.__in_tangent = None
|
||||
self.__out_tangent = None
|
||||
|
||||
def get_target_len(self):
|
||||
length = {
|
||||
"delta_location": 3,
|
||||
"delta_rotation_euler": 3,
|
||||
"delta_rotation_quaternion": 4,
|
||||
"delta_scale": 3,
|
||||
"location": 3,
|
||||
"rotation_axis_angle": 4,
|
||||
"rotation_euler": 3,
|
||||
"rotation_quaternion": 4,
|
||||
"scale": 3,
|
||||
"value": self.__length_morph
|
||||
}.get(self.target, 1)
|
||||
|
||||
return length
|
||||
|
||||
def __set_indexed(self, value):
|
||||
# Sometimes blender animations only reference a subset of components of a data target. Keyframe should always
|
||||
# contain a complete Vector/ Quaternion --> use the array_index value of the keyframe to set components in such
|
||||
# structures
|
||||
# For SK, must contains all SK values
|
||||
result = [0.0] * self.get_target_len()
|
||||
for i, v in zip(self.__indices, value):
|
||||
result[i] = v
|
||||
return result
|
||||
|
||||
def get_indices(self):
|
||||
return self.__indices
|
||||
|
||||
def set_value_index(self, idx, val):
|
||||
self.__value[idx] = val
|
||||
|
||||
def set_value_index_in(self, idx, val):
|
||||
self.__in_tangent[idx] = val
|
||||
|
||||
def set_value_index_out(self, idx, val):
|
||||
self.__out_tangent[idx] = val
|
||||
|
||||
def set_first_tangent(self):
|
||||
self.__in_tangent = self.__value
|
||||
|
||||
def set_last_tangent(self):
|
||||
self.__out_tangent = self.__value
|
||||
|
||||
@property
|
||||
def value(self) -> typing.Union[mathutils.Vector, mathutils.Euler, mathutils.Quaternion, typing.List[float]]:
|
||||
if self.target == "value":
|
||||
return self.__value
|
||||
return gltf2_blender_math.list_to_mathutils(self.__value, self.target)
|
||||
|
||||
@value.setter
|
||||
def value(self, value: typing.List[float]):
|
||||
self.__value = self.__set_indexed(value)
|
||||
|
||||
@value.setter
|
||||
def value_total(self, value: typing.List[float]):
|
||||
self.__value = value
|
||||
|
||||
@property
|
||||
def in_tangent(self) -> typing.Union[mathutils.Vector, mathutils.Euler, mathutils.Quaternion, typing.List[float]]:
|
||||
if self.__in_tangent is None:
|
||||
return None
|
||||
if self.target == "value":
|
||||
return self.__in_tangent
|
||||
return gltf2_blender_math.list_to_mathutils(self.__in_tangent, self.target)
|
||||
|
||||
@in_tangent.setter
|
||||
def in_tangent(self, value: typing.List[float]):
|
||||
self.__in_tangent = self.__set_indexed(value)
|
||||
|
||||
@property
|
||||
def out_tangent(self) -> typing.Union[mathutils.Vector, mathutils.Euler, mathutils.Quaternion, typing.List[float]]:
|
||||
if self.__out_tangent is None:
|
||||
return None
|
||||
if self.target == "value":
|
||||
return self.__out_tangent
|
||||
return gltf2_blender_math.list_to_mathutils(self.__out_tangent, self.target)
|
||||
|
||||
@out_tangent.setter
|
||||
def out_tangent(self, value: typing.List[float]):
|
||||
self.__out_tangent = self.__set_indexed(value)
|
231
scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_scene_animation.py
Normal file
231
scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_scene_animation.py
Normal file
@ -0,0 +1,231 @@
|
||||
# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
from ....io.com import gltf2_io
|
||||
from ...com.gltf2_blender_extras import generate_extras
|
||||
from ..gltf2_blender_gather_tree import VExportNode
|
||||
from .gltf2_blender_gather_drivers import get_sk_drivers
|
||||
from .sampled.armature.armature_channels import gather_armature_sampled_channels
|
||||
from .sampled.object.gltf2_blender_gather_object_channels import gather_object_sampled_channels
|
||||
from .sampled.shapekeys.gltf2_blender_gather_sk_channels import gather_sk_sampled_channels
|
||||
from .sampled.data.gltf2_blender_gather_data_channels import gather_data_sampled_channels
|
||||
from .gltf2_blender_gather_animation_utils import link_samplers, add_slide_data
|
||||
|
||||
|
||||
def gather_scene_animations(export_settings):
|
||||
|
||||
# if there is no animation in file => no need to bake. Except if we are trying to bake GN instances
|
||||
if len(bpy.data.actions) == 0 and export_settings['gltf_gn_mesh'] is False:
|
||||
# TODO : get a better filter by checking we really have some GN instances...
|
||||
return []
|
||||
|
||||
total_channels = []
|
||||
animations = []
|
||||
|
||||
start_frame = bpy.context.scene.frame_start
|
||||
end_frame = bpy.context.scene.frame_end
|
||||
|
||||
# The following options has no impact:
|
||||
# - We force sampling & baking
|
||||
# - Export_frame_range --> Because this is the case for SCENE mode, because we bake all scene frame range
|
||||
# - CROP or SLIDE --> Scene don't have negative frames
|
||||
|
||||
# This mode will bake all objects like there are in the scene
|
||||
vtree = export_settings['vtree']
|
||||
for obj_uuid in vtree.get_all_objects():
|
||||
|
||||
# Do not manage not exported objects
|
||||
if vtree.nodes[obj_uuid].node is None:
|
||||
if export_settings['gltf_armature_object_remove'] is True:
|
||||
# Manage armature object, as this is the object that has the animation
|
||||
if not vtree.nodes[obj_uuid].blender_object:
|
||||
continue
|
||||
else:
|
||||
continue
|
||||
|
||||
if export_settings['vtree'].nodes[obj_uuid].blender_type == VExportNode.COLLECTION:
|
||||
continue
|
||||
|
||||
# blender_object can be None for GN instances
|
||||
blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object
|
||||
|
||||
export_settings['ranges'][obj_uuid] = {}
|
||||
export_settings['ranges'][obj_uuid][obj_uuid] = {'start': start_frame, 'end': end_frame}
|
||||
if blender_object and blender_object.type == "ARMATURE":
|
||||
# Manage sk drivers
|
||||
obj_drivers = get_sk_drivers(obj_uuid, export_settings)
|
||||
for obj_dr in obj_drivers:
|
||||
if obj_dr not in export_settings['ranges']:
|
||||
export_settings['ranges'][obj_dr] = {}
|
||||
export_settings['ranges'][obj_dr][obj_uuid + "_" + obj_uuid] = {}
|
||||
export_settings['ranges'][obj_dr][obj_uuid + "_" + obj_uuid]['start'] = start_frame
|
||||
export_settings['ranges'][obj_dr][obj_uuid + "_" + obj_uuid]['end'] = end_frame
|
||||
|
||||
if export_settings['gltf_anim_slide_to_zero'] is True and start_frame > 0:
|
||||
add_slide_data(start_frame, obj_uuid, obj_uuid, export_settings)
|
||||
|
||||
# Perform baking animation export
|
||||
|
||||
if blender_object and blender_object.type != "ARMATURE":
|
||||
# We have to check if this is a skinned mesh, because we don't have to force animation baking on this case
|
||||
if export_settings['vtree'].nodes[obj_uuid].skin is None:
|
||||
channels, _ = gather_object_sampled_channels(obj_uuid, obj_uuid, export_settings)
|
||||
if channels is not None:
|
||||
total_channels.extend(channels)
|
||||
if export_settings['gltf_morph_anim'] and blender_object.type == "MESH" \
|
||||
and blender_object.data is not None \
|
||||
and blender_object.data.shape_keys is not None:
|
||||
|
||||
# We must ignore sk for meshes that are driven by armature parent
|
||||
ignore_sk = False
|
||||
if export_settings['vtree'].nodes[obj_uuid].parent_uuid is not None \
|
||||
and export_settings['vtree'].nodes[export_settings['vtree'].nodes[obj_uuid].parent_uuid].blender_type == VExportNode.ARMATURE:
|
||||
obj_drivers = get_sk_drivers(export_settings['vtree'].nodes[obj_uuid].parent_uuid, export_settings)
|
||||
if obj_uuid in obj_drivers:
|
||||
ignore_sk = True
|
||||
|
||||
if ignore_sk is False:
|
||||
channels = gather_sk_sampled_channels(obj_uuid, obj_uuid, export_settings)
|
||||
if channels is not None:
|
||||
total_channels.extend(channels)
|
||||
elif blender_object is None:
|
||||
# This is GN instances
|
||||
# Currently, not checking if this instance is skinned.... #TODO
|
||||
channels, _ = gather_object_sampled_channels(obj_uuid, obj_uuid, export_settings)
|
||||
if channels is not None:
|
||||
total_channels.extend(channels)
|
||||
else:
|
||||
channels, _ = gather_armature_sampled_channels(obj_uuid, obj_uuid, export_settings)
|
||||
if channels is not None:
|
||||
total_channels.extend(channels)
|
||||
|
||||
if export_settings['gltf_anim_scene_split_object'] is True:
|
||||
if len(total_channels) > 0:
|
||||
animation = gltf2_io.Animation(
|
||||
channels=total_channels,
|
||||
extensions=None,
|
||||
extras=__gather_extras(blender_object, export_settings),
|
||||
name=blender_object.name if blender_object else "GN Instance",
|
||||
samplers=[]
|
||||
)
|
||||
link_samplers(animation, export_settings)
|
||||
animations.append(animation)
|
||||
|
||||
total_channels = []
|
||||
|
||||
if export_settings['gltf_export_anim_pointer'] is True:
|
||||
# Export now KHR_animation_pointer for materials
|
||||
for mat in export_settings['KHR_animation_pointer']['materials'].keys():
|
||||
if len(export_settings['KHR_animation_pointer']['materials'][mat]['paths']) == 0:
|
||||
continue
|
||||
|
||||
blender_material = [m for m in bpy.data.materials if id(m) == mat][0]
|
||||
|
||||
export_settings['ranges'][id(blender_material)] = {}
|
||||
export_settings['ranges'][id(blender_material)][id(blender_material)] = {
|
||||
'start': start_frame, 'end': end_frame}
|
||||
|
||||
if export_settings['gltf_anim_slide_to_zero'] is True and start_frame > 0:
|
||||
add_slide_data(start_frame, mat, mat, export_settings, add_drivers=False)
|
||||
|
||||
channels = gather_data_sampled_channels('materials', mat, mat, None, export_settings)
|
||||
if channels is not None:
|
||||
total_channels.extend(channels)
|
||||
|
||||
if export_settings['gltf_anim_scene_split_object'] is True:
|
||||
if len(total_channels) > 0:
|
||||
animation = gltf2_io.Animation(
|
||||
channels=total_channels,
|
||||
extensions=None,
|
||||
extras=__gather_extras(blender_material, export_settings),
|
||||
name=blender_material.name,
|
||||
samplers=[]
|
||||
)
|
||||
link_samplers(animation, export_settings)
|
||||
animations.append(animation)
|
||||
|
||||
total_channels = []
|
||||
|
||||
# Export now KHR_animation_pointer for lights
|
||||
for light in export_settings['KHR_animation_pointer']['lights'].keys():
|
||||
if len(export_settings['KHR_animation_pointer']['lights'][light]['paths']) == 0:
|
||||
continue
|
||||
|
||||
blender_light = [l for l in bpy.data.lights if id(l) == light][0]
|
||||
|
||||
export_settings['ranges'][id(blender_light)] = {}
|
||||
export_settings['ranges'][id(blender_light)][id(blender_light)] = {'start': start_frame, 'end': end_frame}
|
||||
|
||||
if export_settings['gltf_anim_slide_to_zero'] is True and start_frame > 0:
|
||||
add_slide_data(start_frame, light, light, export_settings, add_drivers=False)
|
||||
|
||||
channels = gather_data_sampled_channels('lights', light, light, None, export_settings)
|
||||
if channels is not None:
|
||||
total_channels.extend(channels)
|
||||
|
||||
if export_settings['gltf_anim_scene_split_object'] is True:
|
||||
if len(total_channels) > 0:
|
||||
animation = gltf2_io.Animation(
|
||||
channels=total_channels,
|
||||
extensions=None,
|
||||
extras=__gather_extras(blender_light, export_settings),
|
||||
name=blender_light.name,
|
||||
samplers=[]
|
||||
)
|
||||
link_samplers(animation, export_settings)
|
||||
animations.append(animation)
|
||||
|
||||
total_channels = []
|
||||
|
||||
# Export now KHR_animation_pointer for cameras
|
||||
for cam in export_settings['KHR_animation_pointer']['cameras'].keys():
|
||||
if len(export_settings['KHR_animation_pointer']['cameras'][cam]['paths']) == 0:
|
||||
continue
|
||||
|
||||
blender_camera = [l for l in bpy.data.cameras if id(l) == cam][0]
|
||||
|
||||
export_settings['ranges'][id(blender_camera)] = {}
|
||||
export_settings['ranges'][id(blender_camera)][id(blender_camera)] = {'start': start_frame, 'end': end_frame}
|
||||
|
||||
if export_settings['gltf_anim_slide_to_zero'] is True and start_frame > 0:
|
||||
add_slide_data(start_frame, cam, cam, export_settings, add_drivers=False)
|
||||
|
||||
channels = gather_data_sampled_channels('cameras', cam, cam, None, export_settings)
|
||||
if channels is not None:
|
||||
total_channels.extend(channels)
|
||||
|
||||
if export_settings['gltf_anim_scene_split_object'] is True:
|
||||
if len(total_channels) > 0:
|
||||
animation = gltf2_io.Animation(
|
||||
channels=total_channels,
|
||||
extensions=None,
|
||||
extras=__gather_extras(blender_camera, export_settings),
|
||||
name=blender_camera.name,
|
||||
samplers=[]
|
||||
)
|
||||
link_samplers(animation, export_settings)
|
||||
animations.append(animation)
|
||||
|
||||
total_channels = []
|
||||
|
||||
if export_settings['gltf_anim_scene_split_object'] is False:
|
||||
if len(total_channels) > 0:
|
||||
animation = gltf2_io.Animation(
|
||||
channels=total_channels,
|
||||
extensions=None,
|
||||
extras=__gather_extras(bpy.context.scene, export_settings),
|
||||
name=bpy.context.scene.name,
|
||||
samplers=[]
|
||||
)
|
||||
link_samplers(animation, export_settings)
|
||||
animations.append(animation)
|
||||
|
||||
return animations
|
||||
|
||||
|
||||
def __gather_extras(blender_asset, export_settings):
|
||||
if export_settings['gltf_extras']:
|
||||
return generate_extras(blender_asset)
|
||||
return None
|
718
scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_tracks.py
Normal file
718
scripts/addons_core/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_tracks.py
Normal file
@ -0,0 +1,718 @@
|
||||
# SPDX-FileCopyrightText: 2018-2023 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
import typing
|
||||
from ....io.com import gltf2_io
|
||||
from ....io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ..gltf2_blender_gather_cache import cached
|
||||
from ..gltf2_blender_gather_tree import VExportNode
|
||||
from .gltf2_blender_gather_animation_utils import merge_tracks_perform, bake_animation, bake_data_animation, add_slide_data, reset_bone_matrix, reset_sk_data
|
||||
from .gltf2_blender_gather_drivers import get_sk_drivers
|
||||
from .sampled.gltf2_blender_gather_animation_sampling_cache import get_cache_data
|
||||
|
||||
|
||||
def gather_tracks_animations(export_settings):
|
||||
|
||||
animations = []
|
||||
merged_tracks = {}
|
||||
|
||||
vtree = export_settings['vtree']
|
||||
for obj_uuid in vtree.get_all_objects():
|
||||
|
||||
# Do not manage not exported objects
|
||||
if vtree.nodes[obj_uuid].node is None:
|
||||
if export_settings['gltf_armature_object_remove'] is True:
|
||||
# Manage armature object, as this is the object that has the animation
|
||||
if not vtree.nodes[obj_uuid].blender_object:
|
||||
continue
|
||||
else:
|
||||
continue
|
||||
|
||||
if export_settings['vtree'].nodes[obj_uuid].blender_type == VExportNode.COLLECTION:
|
||||
continue
|
||||
|
||||
animations_, merged_tracks = gather_track_animations(obj_uuid, merged_tracks, len(animations), export_settings)
|
||||
animations += animations_
|
||||
|
||||
if export_settings['gltf_export_anim_pointer'] is True:
|
||||
# Manage Material tracks (for KHR_animation_pointer)
|
||||
for mat in export_settings['KHR_animation_pointer']['materials'].keys():
|
||||
animations_, merged_tracks = gather_data_track_animations(
|
||||
'materials', mat, merged_tracks, len(animations), export_settings)
|
||||
animations += animations_
|
||||
|
||||
# Manage Cameras tracks (for KHR_animation_pointer)
|
||||
for cam in export_settings['KHR_animation_pointer']['cameras'].keys():
|
||||
animations_, merged_tracks = gather_data_track_animations(
|
||||
'cameras', cam, merged_tracks, len(animations), export_settings)
|
||||
animations += animations_
|
||||
|
||||
# Manage lights tracks (for KHR_animation_pointer)
|
||||
for light in export_settings['KHR_animation_pointer']['lights'].keys():
|
||||
animations_, merged_tracks = gather_data_track_animations(
|
||||
'lights', light, merged_tracks, len(animations), export_settings)
|
||||
animations += animations_
|
||||
|
||||
new_animations = merge_tracks_perform(merged_tracks, animations, export_settings)
|
||||
|
||||
return new_animations
|
||||
|
||||
|
||||
def gather_track_animations(obj_uuid: int,
|
||||
tracks: typing.Dict[str,
|
||||
typing.List[int]],
|
||||
offset: int,
|
||||
export_settings) -> typing.Tuple[typing.List[gltf2_io.Animation],
|
||||
typing.Dict[str,
|
||||
typing.List[int]]]:
|
||||
|
||||
animations = []
|
||||
|
||||
# Bake situation does not export any extra animation channels, as we bake TRS + weights on Track or scene level, without direct
|
||||
# Access to fcurve and action data
|
||||
|
||||
blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object
|
||||
# Collect all tracks affecting this object.
|
||||
blender_tracks = __get_blender_tracks(obj_uuid, export_settings)
|
||||
|
||||
# If no tracks, return
|
||||
# This will avoid to set / reset some data
|
||||
if len(blender_tracks) == 0:
|
||||
return animations, tracks
|
||||
|
||||
# Keep current situation
|
||||
current_action = None
|
||||
current_sk_action = None
|
||||
current_world_matrix = None
|
||||
current_use_nla = None
|
||||
current_use_nla_sk = None
|
||||
restore_track_mute = {}
|
||||
restore_track_mute["OBJECT"] = {}
|
||||
restore_track_mute["SHAPEKEY"] = {}
|
||||
|
||||
if blender_object.animation_data:
|
||||
current_action = blender_object.animation_data.action
|
||||
current_use_nla = blender_object.animation_data.use_nla
|
||||
restore_tweak_mode = blender_object.animation_data.use_tweak_mode
|
||||
current_world_matrix = blender_object.matrix_world.copy()
|
||||
|
||||
if blender_object.type == "MESH" \
|
||||
and blender_object.data is not None \
|
||||
and blender_object.data.shape_keys is not None \
|
||||
and blender_object.data.shape_keys.animation_data is not None:
|
||||
current_sk_action = blender_object.data.shape_keys.animation_data.action
|
||||
current_use_nla_sk = blender_object.data.shape_keys.animation_data.use_nla
|
||||
|
||||
# Prepare export for obj
|
||||
solo_track = None
|
||||
if blender_object.animation_data:
|
||||
blender_object.animation_data.action = None
|
||||
blender_object.animation_data.use_nla = True
|
||||
# Remove any solo (starred) NLA track. Restored after export
|
||||
for track in blender_object.animation_data.nla_tracks:
|
||||
if track.is_solo:
|
||||
solo_track = track
|
||||
track.is_solo = False
|
||||
break
|
||||
|
||||
solo_track_sk = None
|
||||
if blender_object.type == "MESH" \
|
||||
and blender_object.data is not None \
|
||||
and blender_object.data.shape_keys is not None \
|
||||
and blender_object.data.shape_keys.animation_data is not None:
|
||||
# Remove any solo (starred) NLA track. Restored after export
|
||||
for track in blender_object.data.shape_keys.animation_data.nla_tracks:
|
||||
if track.is_solo:
|
||||
solo_track_sk = track
|
||||
track.is_solo = False
|
||||
break
|
||||
|
||||
# Mute all channels
|
||||
for track_group in [b[0] for b in blender_tracks if b[2] == "OBJECT"]:
|
||||
for track in track_group:
|
||||
restore_track_mute["OBJECT"][track.idx] = blender_object.animation_data.nla_tracks[track.idx].mute
|
||||
blender_object.animation_data.nla_tracks[track.idx].mute = True
|
||||
for track_group in [b[0] for b in blender_tracks if b[2] == "SHAPEKEY"]:
|
||||
for track in track_group:
|
||||
restore_track_mute["SHAPEKEY"][track.idx] = blender_object.data.shape_keys.animation_data.nla_tracks[track.idx].mute
|
||||
blender_object.data.shape_keys.animation_data.nla_tracks[track.idx].mute = True
|
||||
|
||||
export_user_extensions('animation_track_switch_loop_hook', export_settings, blender_object, False)
|
||||
|
||||
# Export
|
||||
|
||||
# Export all collected tracks.
|
||||
for bl_tracks, track_name, on_type in blender_tracks:
|
||||
prepare_tracks_range(obj_uuid, bl_tracks, track_name, export_settings)
|
||||
|
||||
if on_type == "OBJECT":
|
||||
# Enable tracks
|
||||
for track in bl_tracks:
|
||||
export_user_extensions(
|
||||
'pre_animation_track_switch_hook',
|
||||
export_settings,
|
||||
blender_object,
|
||||
track,
|
||||
track_name,
|
||||
on_type)
|
||||
blender_object.animation_data.nla_tracks[track.idx].mute = False
|
||||
export_user_extensions(
|
||||
'post_animation_track_switch_hook',
|
||||
export_settings,
|
||||
blender_object,
|
||||
track,
|
||||
track_name,
|
||||
on_type)
|
||||
else:
|
||||
# Enable tracks
|
||||
for track in bl_tracks:
|
||||
export_user_extensions(
|
||||
'pre_animation_track_switch_hook',
|
||||
export_settings,
|
||||
blender_object,
|
||||
track,
|
||||
track_name,
|
||||
on_type)
|
||||
blender_object.data.shape_keys.animation_data.nla_tracks[track.idx].mute = False
|
||||
export_user_extensions(
|
||||
'post_animation_track_switch_hook',
|
||||
export_settings,
|
||||
blender_object,
|
||||
track,
|
||||
track_name,
|
||||
on_type)
|
||||
|
||||
reset_bone_matrix(blender_object, export_settings)
|
||||
if on_type == "SHAPEKEY":
|
||||
reset_sk_data(blender_object, blender_tracks, export_settings)
|
||||
|
||||
# Export animation
|
||||
animation = bake_animation(obj_uuid, track_name, export_settings, mode=on_type)
|
||||
get_cache_data.reset_cache()
|
||||
if animation is not None:
|
||||
animations.append(animation)
|
||||
|
||||
# Store data for merging animation later
|
||||
# Do not take into account default NLA track names
|
||||
if not (track_name.startswith("NlaTrack") or track_name.startswith("[Action Stash]")):
|
||||
if track_name not in tracks.keys():
|
||||
tracks[track_name] = []
|
||||
tracks[track_name].append(offset + len(animations) - 1) # Store index of animation in animations
|
||||
|
||||
# Restoring muting
|
||||
if on_type == "OBJECT":
|
||||
for track in bl_tracks:
|
||||
blender_object.animation_data.nla_tracks[track.idx].mute = True
|
||||
else:
|
||||
for track in bl_tracks:
|
||||
blender_object.data.shape_keys.animation_data.nla_tracks[track.idx].mute = True
|
||||
|
||||
# Restoring
|
||||
if current_action is not None:
|
||||
blender_object.animation_data.action = current_action
|
||||
if current_sk_action is not None:
|
||||
blender_object.data.shape_keys.animation_data.action = current_sk_action
|
||||
if solo_track is not None:
|
||||
solo_track.is_solo = True
|
||||
if solo_track_sk is not None:
|
||||
solo_track_sk.is_solo = True
|
||||
if blender_object.animation_data:
|
||||
blender_object.animation_data.use_nla = current_use_nla
|
||||
blender_object.animation_data.use_tweak_mode = restore_tweak_mode
|
||||
for track_group in [b[0] for b in blender_tracks if b[2] == "OBJECT"]:
|
||||
for track in track_group:
|
||||
blender_object.animation_data.nla_tracks[track.idx].mute = restore_track_mute["OBJECT"][track.idx]
|
||||
if blender_object.type == "MESH" \
|
||||
and blender_object.data is not None \
|
||||
and blender_object.data.shape_keys is not None \
|
||||
and blender_object.data.shape_keys.animation_data is not None:
|
||||
blender_object.data.shape_keys.animation_data.use_nla = current_use_nla_sk
|
||||
for track_group in [b[0] for b in blender_tracks if b[2] == "SHAPEKEY"]:
|
||||
for track in track_group:
|
||||
blender_object.data.shape_keys.animation_data.nla_tracks[track.idx].mute = restore_track_mute["SHAPEKEY"][track.idx]
|
||||
|
||||
blender_object.matrix_world = current_world_matrix
|
||||
|
||||
export_user_extensions('animation_track_switch_loop_hook', export_settings, blender_object, True)
|
||||
|
||||
return animations, tracks
|
||||
|
||||
|
||||
@cached
|
||||
def __get_blender_tracks(obj_uuid: str, export_settings):
|
||||
|
||||
blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object
|
||||
export_user_extensions('pre_gather_tracks_hook', export_settings, blender_object)
|
||||
|
||||
tracks, names, types = __get_nla_tracks_obj(obj_uuid, export_settings)
|
||||
tracks_sk, names_sk, types_sk = __get_nla_tracks_sk(obj_uuid, export_settings)
|
||||
|
||||
tracks.extend(tracks_sk)
|
||||
names.extend(names_sk)
|
||||
types.extend(types_sk)
|
||||
|
||||
# Use a class to get parameters, to be able to modify them
|
||||
class GatherTrackHookParameters:
|
||||
def __init__(self, blender_tracks, blender_tracks_name, track_on_type):
|
||||
self.blender_tracks = blender_tracks
|
||||
self.blender_tracks_name = blender_tracks_name
|
||||
self.track_on_type = track_on_type
|
||||
|
||||
gathertrackhookparams = GatherTrackHookParameters(tracks, names, types)
|
||||
|
||||
export_user_extensions('gather_tracks_hook', export_settings, blender_object, gathertrackhookparams)
|
||||
|
||||
# Get params back from hooks
|
||||
tracks = gathertrackhookparams.blender_tracks
|
||||
names = gathertrackhookparams.blender_tracks_name
|
||||
types = gathertrackhookparams.track_on_type
|
||||
|
||||
return list(zip(tracks, names, types))
|
||||
|
||||
|
||||
class NLATrack:
|
||||
def __init__(self, idx, frame_start, frame_end, default_solo, default_muted):
|
||||
self.idx = idx
|
||||
self.frame_start = frame_start
|
||||
self.frame_end = frame_end
|
||||
self.default_solo = default_solo
|
||||
self.default_muted = default_muted
|
||||
|
||||
|
||||
def __get_nla_tracks_obj(obj_uuid: str, export_settings):
|
||||
|
||||
obj = export_settings['vtree'].nodes[obj_uuid].blender_object
|
||||
|
||||
if not obj.animation_data:
|
||||
return [], [], []
|
||||
if len(obj.animation_data.nla_tracks) == 0:
|
||||
return [], [], []
|
||||
|
||||
exported_tracks = []
|
||||
|
||||
current_exported_tracks = []
|
||||
|
||||
for idx_track, track in enumerate(obj.animation_data.nla_tracks):
|
||||
if len(track.strips) == 0:
|
||||
continue
|
||||
|
||||
stored_track = NLATrack(
|
||||
idx_track,
|
||||
track.strips[0].frame_start,
|
||||
track.strips[-1].frame_end,
|
||||
track.is_solo,
|
||||
track.mute
|
||||
)
|
||||
|
||||
# Keep tracks where some blending together
|
||||
if any([strip.blend_type != 'REPLACE' for strip in track.strips]):
|
||||
# There is some blending. Keeping with previous track
|
||||
pass
|
||||
else:
|
||||
# The previous one(s) can go to the list, if any (not for first track)
|
||||
if len(current_exported_tracks) != 0:
|
||||
exported_tracks.append(current_exported_tracks)
|
||||
current_exported_tracks = []
|
||||
current_exported_tracks.append(stored_track)
|
||||
|
||||
# End of loop. Keep the last one(s)
|
||||
exported_tracks.append(current_exported_tracks)
|
||||
|
||||
track_names = [obj.animation_data.nla_tracks[tracks_group[0].idx].name for tracks_group in exported_tracks]
|
||||
on_types = ['OBJECT'] * len(track_names)
|
||||
return exported_tracks, track_names, on_types
|
||||
|
||||
|
||||
def __get_nla_tracks_sk(obj_uuid: str, export_settings):
|
||||
|
||||
obj = export_settings['vtree'].nodes[obj_uuid].blender_object
|
||||
|
||||
if not obj.type == "MESH":
|
||||
return [], [], []
|
||||
if obj.data is None:
|
||||
return [], [], []
|
||||
if obj.data.shape_keys is None:
|
||||
return [], [], []
|
||||
if not obj.data.shape_keys.animation_data:
|
||||
return [], [], []
|
||||
if len(obj.data.shape_keys.animation_data.nla_tracks) == 0:
|
||||
return [], [], []
|
||||
|
||||
exported_tracks = []
|
||||
|
||||
current_exported_tracks = []
|
||||
|
||||
for idx_track, track in enumerate(obj.data.shape_keys.animation_data.nla_tracks):
|
||||
if len(track.strips) == 0:
|
||||
continue
|
||||
|
||||
stored_track = NLATrack(
|
||||
idx_track,
|
||||
track.strips[0].frame_start,
|
||||
track.strips[-1].frame_end,
|
||||
track.is_solo,
|
||||
track.mute
|
||||
)
|
||||
|
||||
# Keep tracks where some blending together
|
||||
if any([strip.blend_type != 'REPLACE' for strip in track.strips]):
|
||||
# There is some blending. Keeping with previous track
|
||||
pass
|
||||
else:
|
||||
# The previous one(s) can go to the list, if any (not for first track)
|
||||
if len(current_exported_tracks) != 0:
|
||||
exported_tracks.append(current_exported_tracks)
|
||||
current_exported_tracks = []
|
||||
current_exported_tracks.append(stored_track)
|
||||
|
||||
# End of loop. Keep the last one(s)
|
||||
exported_tracks.append(current_exported_tracks)
|
||||
|
||||
track_names = [obj.data.shape_keys.animation_data.nla_tracks[tracks_group[0].idx].name for tracks_group in exported_tracks]
|
||||
on_types = ['SHAPEKEY'] * len(track_names)
|
||||
return exported_tracks, track_names, on_types
|
||||
|
||||
|
||||
def prepare_tracks_range(obj_uuid, tracks, track_name, export_settings, with_driver=True):
|
||||
|
||||
track_slide = {}
|
||||
|
||||
for idx, btrack in enumerate(tracks):
|
||||
frame_start = btrack.frame_start if idx == 0 else min(frame_start, btrack.frame_start)
|
||||
frame_end = btrack.frame_end if idx == 0 else max(frame_end, btrack.frame_end)
|
||||
|
||||
# If some negative frame and crop -> set start at 0
|
||||
if frame_start < 0 and export_settings['gltf_negative_frames'] == "CROP":
|
||||
frame_start = 0
|
||||
|
||||
if export_settings['gltf_frame_range'] is True:
|
||||
frame_start = max(bpy.context.scene.frame_start, frame_start)
|
||||
frame_end = min(bpy.context.scene.frame_end, frame_end)
|
||||
|
||||
export_settings['ranges'][obj_uuid] = {}
|
||||
export_settings['ranges'][obj_uuid][track_name] = {}
|
||||
export_settings['ranges'][obj_uuid][track_name]['start'] = int(frame_start)
|
||||
export_settings['ranges'][obj_uuid][track_name]['end'] = int(frame_end)
|
||||
|
||||
if export_settings['gltf_negative_frames'] == "SLIDE":
|
||||
if not (track_name.startswith("NlaTrack") or track_name.startswith("[Action Stash]")):
|
||||
if track_name not in track_slide.keys() or (
|
||||
track_name in track_slide.keys() and frame_start < track_slide[track_name]):
|
||||
track_slide.update({track_name: frame_start})
|
||||
else:
|
||||
if frame_start < 0:
|
||||
add_slide_data(frame_start, obj_uuid, track_name, export_settings)
|
||||
|
||||
if export_settings['gltf_anim_slide_to_zero'] is True and frame_start > 0:
|
||||
if not (track_name.startswith("NlaTrack") or track_name.startswith("[Action Stash]")):
|
||||
if track_name not in track_slide.keys() or (
|
||||
track_name in track_slide.keys() and frame_start < track_slide[track_name]):
|
||||
track_slide.update({track_name: frame_start})
|
||||
else:
|
||||
add_slide_data(frame_start, obj_uuid, track_name, export_settings)
|
||||
|
||||
# For drivers
|
||||
if with_driver is True:
|
||||
if export_settings['vtree'].nodes[obj_uuid].blender_type == VExportNode.ARMATURE and export_settings['gltf_morph_anim'] is True:
|
||||
obj_drivers = get_sk_drivers(obj_uuid, export_settings)
|
||||
for obj_dr in obj_drivers:
|
||||
if obj_dr not in export_settings['ranges']:
|
||||
export_settings['ranges'][obj_dr] = {}
|
||||
export_settings['ranges'][obj_dr][obj_uuid + "_" + track_name] = {}
|
||||
export_settings['ranges'][obj_dr][obj_uuid + "_" + track_name]['start'] = frame_start
|
||||
export_settings['ranges'][obj_dr][obj_uuid + "_" + track_name]['end'] = frame_end
|
||||
|
||||
if (export_settings['gltf_negative_frames'] == "SLIDE"
|
||||
or export_settings['gltf_anim_slide_to_zero'] is True) \
|
||||
and len(track_slide) > 0:
|
||||
|
||||
if track_name in track_slide.keys():
|
||||
if export_settings['gltf_negative_frames'] == "SLIDE" and track_slide[track_name] < 0:
|
||||
add_slide_data(track_slide[track_name], obj_uuid, track_name, export_settings)
|
||||
elif export_settings['gltf_anim_slide_to_zero'] is True:
|
||||
add_slide_data(track_slide[track_name], obj_uuid, track_name, export_settings)
|
||||
|
||||
|
||||
def gather_data_track_animations(
|
||||
blender_type_data: str,
|
||||
blender_id: str,
|
||||
tracks: typing.Dict[str, typing.List[int]],
|
||||
offset: int,
|
||||
export_settings) -> typing.Tuple[typing.List[gltf2_io.Animation], typing.Dict[str, typing.List[int]]]:
|
||||
|
||||
animations = []
|
||||
|
||||
# Collect all tracks affecting this object.
|
||||
blender_tracks = __get_data_blender_tracks(blender_type_data, blender_id, export_settings)
|
||||
|
||||
if blender_type_data == "materials":
|
||||
blender_data_object = [mat for mat in bpy.data.materials if id(mat) == blender_id][0]
|
||||
elif blender_type_data == "cameras":
|
||||
blender_data_object = [cam for cam in bpy.data.cameras if id(cam) == blender_id][0]
|
||||
elif blender_type_data == "lights":
|
||||
blender_data_object = [light for light in bpy.data.lights if id(light) == blender_id][0]
|
||||
else:
|
||||
pass # Should not happen
|
||||
|
||||
# Keep current situation
|
||||
current_action = None
|
||||
current_nodetree_action = None
|
||||
current_use_nla = None
|
||||
current_use_nla_node_tree = None
|
||||
restore_track_mute = {}
|
||||
restore_track_mute["MATERIAL"] = {}
|
||||
restore_track_mute["NODETREE"] = {}
|
||||
restore_track_mute["LIGHT"] = {}
|
||||
restore_track_mute["CAMERA"] = {}
|
||||
|
||||
if blender_data_object.animation_data:
|
||||
current_action = blender_data_object.animation_data.action
|
||||
current_use_nla = blender_data_object.animation_data.use_nla
|
||||
restore_tweak_mode = blender_data_object.animation_data.use_tweak_mode
|
||||
|
||||
if blender_type_data in ["materials", "lights"] \
|
||||
and blender_data_object.node_tree is not None \
|
||||
and blender_data_object.node_tree.animation_data is not None:
|
||||
current_nodetree_action = blender_data_object.node_tree.animation_data.action
|
||||
current_use_nla_node_tree = blender_data_object.node_tree.animation_data.use_nla
|
||||
|
||||
# Prepare export for obj
|
||||
solo_track = None
|
||||
if blender_data_object.animation_data:
|
||||
blender_data_object.animation_data.action = None
|
||||
blender_data_object.animation_data.use_nla = True
|
||||
# Remove any solo (starred) NLA track. Restored after export
|
||||
for track in blender_data_object.animation_data.nla_tracks:
|
||||
if track.is_solo:
|
||||
solo_track = track
|
||||
track.is_solo = False
|
||||
break
|
||||
|
||||
solo_track_sk = None
|
||||
if blender_type_data == ["materials", "lights"] \
|
||||
and blender_data_object.node_tree is not None \
|
||||
and blender_data_object.node_tree.animation_data is not None:
|
||||
# Remove any solo (starred) NLA track. Restored after export
|
||||
for track in blender_data_object.node_tree.animation_data.nla_tracks:
|
||||
if track.is_solo:
|
||||
solo_track_sk = track
|
||||
track.is_solo = False
|
||||
break
|
||||
|
||||
# Mute all channels
|
||||
if blender_type_data == "materials":
|
||||
for track_group in [b[0] for b in blender_tracks if b[2] == "MATERIAL"]:
|
||||
for track in track_group:
|
||||
restore_track_mute["MATERIAL"][track.idx] = blender_data_object.animation_data.nla_tracks[track.idx].mute
|
||||
blender_data_object.animation_data.nla_tracks[track.idx].mute = True
|
||||
for track_group in [b[0] for b in blender_tracks if b[2] == "NODETREE"]:
|
||||
for track in track_group:
|
||||
restore_track_mute["NODETREE"][track.idx] = blender_data_object.node_tree.animation_data.nla_tracks[track.idx].mute
|
||||
blender_data_object.node_tree.animation_data.nla_tracks[track.idx].mute = True
|
||||
elif blender_type_data == "cameras":
|
||||
for track_group in [b[0] for b in blender_tracks if b[2] == "CAMERA"]:
|
||||
for track in track_group:
|
||||
restore_track_mute["CAMERA"][track.idx] = blender_data_object.animation_data.nla_tracks[track.idx].mute
|
||||
blender_data_object.animation_data.nla_tracks[track.idx].mute = True
|
||||
elif blender_type_data == "lights":
|
||||
for track_group in [b[0] for b in blender_tracks if b[2] == "LIGHT"]:
|
||||
for track in track_group:
|
||||
restore_track_mute["LIGHT"][track.idx] = blender_data_object.animation_data.nla_tracks[track.idx].mute
|
||||
blender_data_object.animation_data.nla_tracks[track.idx].mute = True
|
||||
for track_group in [b[0] for b in blender_tracks if b[2] == "NODETREE"]:
|
||||
for track in track_group:
|
||||
restore_track_mute["NODETREE"][track.idx] = blender_data_object.node_tree.animation_data.nla_tracks[track.idx].mute
|
||||
blender_data_object.node_tree.animation_data.nla_tracks[track.idx].mute = True
|
||||
|
||||
# Export
|
||||
|
||||
# Export all collected tracks.
|
||||
for bl_tracks, track_name, on_type in blender_tracks:
|
||||
prepare_tracks_range(blender_id, bl_tracks, track_name, export_settings, with_driver=False)
|
||||
|
||||
if on_type in ["MATERIAL", "CAMERA", "LIGHT"]:
|
||||
# Enable tracks
|
||||
for track in bl_tracks:
|
||||
blender_data_object.animation_data.nla_tracks[track.idx].mute = False
|
||||
elif on_type == "NODETREE":
|
||||
# Enable tracks
|
||||
for track in bl_tracks:
|
||||
blender_data_object.node_tree.animation_data.nla_tracks[track.idx].mute = False
|
||||
|
||||
# Export animation
|
||||
animation = bake_data_animation(blender_type_data, blender_id, track_name, on_type, export_settings)
|
||||
get_cache_data.reset_cache()
|
||||
if animation is not None:
|
||||
animations.append(animation)
|
||||
|
||||
# Store data for merging animation later
|
||||
# Do not take into account default NLA track names
|
||||
if not (track_name.startswith("NlaTrack") or track_name.startswith("[Action Stash]")):
|
||||
if track_name not in tracks.keys():
|
||||
tracks[track_name] = []
|
||||
tracks[track_name].append(offset + len(animations) - 1) # Store index of animation in animations
|
||||
|
||||
# Restoring muting
|
||||
if on_type in ["MATERIAL", "CAMERA", "LIGHT"]:
|
||||
for track in bl_tracks:
|
||||
blender_data_object.animation_data.nla_tracks[track.idx].mute = True
|
||||
elif on_type == "NODETREE":
|
||||
for track in bl_tracks:
|
||||
blender_data_object.node_tree.animation_data.nla_tracks[track.idx].mute = True
|
||||
|
||||
# Restoring
|
||||
if current_action is not None:
|
||||
blender_data_object.animation_data.action = current_action
|
||||
if current_nodetree_action is not None:
|
||||
blender_data_object.node_tree.animation_data.action = current_nodetree_action
|
||||
if solo_track is not None:
|
||||
solo_track.is_solo = True
|
||||
if solo_track_sk is not None:
|
||||
solo_track_sk.is_solo = True
|
||||
if blender_data_object.animation_data:
|
||||
blender_data_object.animation_data.use_nla = current_use_nla
|
||||
blender_data_object.animation_data.use_tweak_mode = restore_tweak_mode
|
||||
if blender_type_data == "materials":
|
||||
for track_group in [b[0] for b in blender_tracks if b[2] == "MATERIAL"]:
|
||||
for track in track_group:
|
||||
blender_data_object.animation_data.nla_tracks[track.idx].mute = restore_track_mute["MATERIAL"][track.idx]
|
||||
elif blender_type_data == "cameras":
|
||||
for track_group in [b[0] for b in blender_tracks if b[2] == "CAMERA"]:
|
||||
for track in track_group:
|
||||
blender_data_object.animation_data.nla_tracks[track.idx].mute = restore_track_mute["CAMERA"][track.idx]
|
||||
elif blender_type_data == "lights":
|
||||
for track_group in [b[0] for b in blender_tracks if b[2] == "LIGHT"]:
|
||||
for track in track_group:
|
||||
blender_data_object.animation_data.nla_tracks[track.idx].mute = restore_track_mute["LIGHT"][track.idx]
|
||||
if blender_type_data in ["materials", "lights"] \
|
||||
and blender_data_object.node_tree is not None \
|
||||
and blender_data_object.node_tree.animation_data is not None:
|
||||
blender_data_object.node_tree.animation_data.use_nla = current_use_nla_node_tree
|
||||
for track_group in [b[0] for b in blender_tracks if b[2] == "NODETREE"]:
|
||||
for track in track_group:
|
||||
blender_data_object.node_tree.animation_data.nla_tracks[track.idx].mute = restore_track_mute["NODETREE"][track.idx]
|
||||
|
||||
return animations, tracks
|
||||
|
||||
|
||||
def __get_data_blender_tracks(blender_type_data, blender_id, export_settings):
|
||||
tracks, names, types = __get_nla_tracks_material(blender_type_data, blender_id, export_settings)
|
||||
if blender_type_data in ["materials", "lights"]:
|
||||
tracks_tree, names_tree, types_tree = __get_nla_tracks_material_node_tree(
|
||||
blender_type_data, blender_id, export_settings)
|
||||
else:
|
||||
tracks_tree, names_tree, types_tree = [], [], []
|
||||
|
||||
tracks.extend(tracks_tree)
|
||||
names.extend(names_tree)
|
||||
types.extend(types_tree)
|
||||
|
||||
return list(zip(tracks, names, types))
|
||||
|
||||
|
||||
def __get_nla_tracks_material(blender_type_data, blender_id, export_settings):
|
||||
if blender_type_data == "materials":
|
||||
blender_data_object = [mat for mat in bpy.data.materials if id(mat) == blender_id][0]
|
||||
elif blender_type_data == "cameras":
|
||||
blender_data_object = [cam for cam in bpy.data.cameras if id(cam) == blender_id][0]
|
||||
elif blender_type_data == "lights":
|
||||
blender_data_object = [light for light in bpy.data.lights if id(light) == blender_id][0]
|
||||
else:
|
||||
pass # Should not happen
|
||||
|
||||
if not blender_data_object.animation_data:
|
||||
return [], [], []
|
||||
if len(blender_data_object.animation_data.nla_tracks) == 0:
|
||||
return [], [], []
|
||||
|
||||
exported_tracks = []
|
||||
|
||||
current_exported_tracks = []
|
||||
|
||||
for idx_track, track in enumerate(blender_data_object.animation_data.nla_tracks):
|
||||
if len(track.strips) == 0:
|
||||
continue
|
||||
|
||||
stored_track = NLATrack(
|
||||
idx_track,
|
||||
track.strips[0].frame_start,
|
||||
track.strips[-1].frame_end,
|
||||
track.is_solo,
|
||||
track.mute
|
||||
)
|
||||
|
||||
# Keep tracks where some blending together
|
||||
if any([strip.blend_type != 'REPLACE' for strip in track.strips]):
|
||||
# There is some blending. Keeping with previous track
|
||||
pass
|
||||
else:
|
||||
# The previous one(s) can go to the list, if any (not for first track)
|
||||
if len(current_exported_tracks) != 0:
|
||||
exported_tracks.append(current_exported_tracks)
|
||||
current_exported_tracks = []
|
||||
current_exported_tracks.append(stored_track)
|
||||
|
||||
# End of loop. Keep the last one(s)
|
||||
exported_tracks.append(current_exported_tracks)
|
||||
|
||||
track_names = [blender_data_object.animation_data.nla_tracks[tracks_group[0].idx].name for tracks_group in exported_tracks]
|
||||
if blender_type_data == "materials":
|
||||
on_types = ['MATERIAL'] * len(track_names)
|
||||
elif blender_type_data == "cameras":
|
||||
on_types = ['CAMERA'] * len(track_names)
|
||||
elif blender_type_data == "lights":
|
||||
on_types = ['LIGHT'] * len(track_names)
|
||||
else:
|
||||
pass # Should not happen
|
||||
return exported_tracks, track_names, on_types
|
||||
|
||||
|
||||
def __get_nla_tracks_material_node_tree(blender_type_data, blender_id, export_settings):
|
||||
if blender_type_data == "materials":
|
||||
blender_object_data = [mat for mat in bpy.data.materials if id(mat) == blender_id][0]
|
||||
elif blender_type_data == "lights":
|
||||
blender_object_data = [light for light in bpy.data.lights if id(light) == blender_id][0]
|
||||
|
||||
if not blender_object_data.node_tree:
|
||||
return [], [], []
|
||||
if not blender_object_data.node_tree.animation_data:
|
||||
return [], [], []
|
||||
if len(blender_object_data.node_tree.animation_data.nla_tracks) == 0:
|
||||
return [], [], []
|
||||
|
||||
exported_tracks = []
|
||||
|
||||
current_exported_tracks = []
|
||||
|
||||
for idx_track, track in enumerate(blender_object_data.node_tree.animation_data.nla_tracks):
|
||||
if len(track.strips) == 0:
|
||||
continue
|
||||
|
||||
stored_track = NLATrack(
|
||||
idx_track,
|
||||
track.strips[0].frame_start,
|
||||
track.strips[-1].frame_end,
|
||||
track.is_solo,
|
||||
track.mute
|
||||
)
|
||||
|
||||
# Keep tracks where some blending together
|
||||
if any([strip.blend_type != 'REPLACE' for strip in track.strips]):
|
||||
# There is some blending. Keeping with previous track
|
||||
pass
|
||||
else:
|
||||
# The previous one(s) can go to the list, if any (not for first track)
|
||||
if len(current_exported_tracks) != 0:
|
||||
exported_tracks.append(current_exported_tracks)
|
||||
current_exported_tracks = []
|
||||
current_exported_tracks.append(stored_track)
|
||||
|
||||
# End of loop. Keep the last one(s)
|
||||
exported_tracks.append(current_exported_tracks)
|
||||
|
||||
track_names = [
|
||||
blender_object_data.node_tree.animation_data.nla_tracks[tracks_group[0].idx].name for tracks_group in exported_tracks]
|
||||
on_types = ['NODETREE'] * len(track_names)
|
||||
return exported_tracks, track_names, on_types
|
88
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_action_sampled.py
Normal file
88
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_action_sampled.py
Normal file
@ -0,0 +1,88 @@
|
||||
# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
import typing
|
||||
from ......io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ......io.com import gltf2_io
|
||||
from .....com.gltf2_blender_extras import generate_extras
|
||||
from ...fcurves.gltf2_blender_gather_fcurves_sampler import gather_animation_fcurves_sampler
|
||||
from .armature_channels import gather_armature_sampled_channels
|
||||
|
||||
|
||||
def gather_action_armature_sampled(armature_uuid: str,
|
||||
blender_action: typing.Optional[bpy.types.Action],
|
||||
cache_key: str,
|
||||
export_settings):
|
||||
|
||||
blender_object = export_settings['vtree'].nodes[armature_uuid].blender_object
|
||||
|
||||
name = __gather_name(blender_action, armature_uuid, cache_key, export_settings)
|
||||
|
||||
try:
|
||||
channels, extra_channels = __gather_channels(
|
||||
armature_uuid, blender_action.name if blender_action else cache_key, export_settings)
|
||||
animation = gltf2_io.Animation(
|
||||
channels=channels,
|
||||
extensions=None,
|
||||
extras=__gather_extras(blender_action, export_settings),
|
||||
name=name,
|
||||
samplers=[] # We need to gather the samplers after gathering all channels --> populate this list in __link_samplers
|
||||
)
|
||||
except RuntimeError as error:
|
||||
export_settings['log'].warning("Animation '{}' could not be exported. Cause: {}".format(name, error))
|
||||
return None
|
||||
|
||||
export_user_extensions('pre_gather_animation_hook', export_settings, animation, blender_action, blender_object)
|
||||
|
||||
extra_samplers = []
|
||||
if export_settings['gltf_export_extra_animations']:
|
||||
for chan in [chan for chan in extra_channels.values() if len(chan['properties']) != 0]:
|
||||
for channel_group_name, channel_group in chan['properties'].items():
|
||||
|
||||
# No glTF channel here, as we don't have any target
|
||||
# Trying to retrieve sampler directly
|
||||
sampler = gather_animation_fcurves_sampler(
|
||||
armature_uuid, tuple(channel_group), None, None, True, export_settings)
|
||||
if sampler is not None:
|
||||
extra_samplers.append((channel_group_name, sampler))
|
||||
|
||||
if not animation.channels:
|
||||
return None, extra_samplers
|
||||
|
||||
# To allow reuse of samplers in one animation : This will be done later, when we know all channels are here
|
||||
|
||||
export_user_extensions(
|
||||
'gather_animation_hook',
|
||||
export_settings,
|
||||
animation,
|
||||
blender_action,
|
||||
blender_object) # For compatibility for older version
|
||||
export_user_extensions('animation_action_armature_sampled', export_settings,
|
||||
animation, blender_object, blender_action, cache_key)
|
||||
|
||||
return animation, extra_samplers
|
||||
|
||||
|
||||
def __gather_name(blender_action: bpy.types.Action,
|
||||
armature_uuid: str,
|
||||
cache_key: str,
|
||||
export_settings
|
||||
) -> str:
|
||||
if blender_action:
|
||||
return blender_action.name
|
||||
elif armature_uuid == cache_key:
|
||||
return export_settings['vtree'].nodes[armature_uuid].blender_object.name
|
||||
else:
|
||||
return cache_key
|
||||
|
||||
|
||||
def __gather_channels(armature_uuid, blender_action_name, export_settings) -> typing.List[gltf2_io.AnimationChannel]:
|
||||
return gather_armature_sampled_channels(armature_uuid, blender_action_name, export_settings)
|
||||
|
||||
|
||||
def __gather_extras(blender_action, export_settings):
|
||||
if export_settings['gltf_extras']:
|
||||
return generate_extras(blender_action) if blender_action else None
|
||||
return None
|
54
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_channel_target.py
Normal file
54
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_channel_target.py
Normal file
@ -0,0 +1,54 @@
|
||||
# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from ......io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ......io.com import gltf2_io
|
||||
from ....gltf2_blender_gather_cache import cached
|
||||
from ....gltf2_blender_gather_joints import gather_joint_vnode
|
||||
|
||||
|
||||
@cached
|
||||
def gather_armature_sampled_channel_target(
|
||||
armature_uuid: str,
|
||||
bone: str,
|
||||
channel: str,
|
||||
export_settings
|
||||
) -> gltf2_io.AnimationChannelTarget:
|
||||
|
||||
blender_object = export_settings['vtree'].nodes[armature_uuid].blender_object
|
||||
|
||||
animation_channel_target = gltf2_io.AnimationChannelTarget(
|
||||
extensions=__gather_extensions(armature_uuid, bone, channel, export_settings),
|
||||
extras=__gather_extras(armature_uuid, bone, channel, export_settings),
|
||||
node=__gather_node(armature_uuid, bone, export_settings),
|
||||
path=__gather_path(channel, export_settings)
|
||||
)
|
||||
|
||||
export_user_extensions('gather_animation_bone_sampled_channel_target_hook',
|
||||
export_settings,
|
||||
blender_object,
|
||||
bone,
|
||||
channel)
|
||||
|
||||
return animation_channel_target
|
||||
|
||||
|
||||
def __gather_extensions(armature_uuid, bone, channel, export_settings):
|
||||
return None
|
||||
|
||||
|
||||
def __gather_extras(armature_uuid, bone, channel, export_settings):
|
||||
return None
|
||||
|
||||
|
||||
def __gather_node(armature_uuid, bone, export_settings):
|
||||
return gather_joint_vnode(export_settings['vtree'].nodes[armature_uuid].bones[bone], export_settings)
|
||||
|
||||
|
||||
def __gather_path(channel, export_settings):
|
||||
return {
|
||||
"location": "translation",
|
||||
"rotation_quaternion": "rotation",
|
||||
"scale": "scale"
|
||||
}.get(channel)
|
216
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_channels.py
Normal file
216
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_channels.py
Normal file
@ -0,0 +1,216 @@
|
||||
# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
import typing
|
||||
from ......io.com import gltf2_io
|
||||
from ......io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from .....com.gltf2_blender_conversion import get_gltf_interpolation
|
||||
from .....com.gltf2_blender_conversion import get_target, get_channel_from_target
|
||||
from ...fcurves.gltf2_blender_gather_fcurves_channels import get_channel_groups
|
||||
from ...fcurves.gltf2_blender_gather_fcurves_channels import needs_baking
|
||||
from ...gltf2_blender_gather_drivers import get_sk_drivers
|
||||
from ..object.gltf2_blender_gather_object_channels import gather_sampled_object_channel
|
||||
from ..shapekeys.gltf2_blender_gather_sk_channels import gather_sampled_sk_channel
|
||||
from .armature_channel_target import gather_armature_sampled_channel_target
|
||||
from .armature_sampler import gather_bone_sampled_animation_sampler
|
||||
|
||||
|
||||
def gather_armature_sampled_channels(armature_uuid, blender_action_name,
|
||||
export_settings) -> typing.List[gltf2_io.AnimationChannel]:
|
||||
channels = []
|
||||
extra_channels = {}
|
||||
|
||||
# Then bake all bones
|
||||
bones_to_be_animated = []
|
||||
bones_uuid = export_settings["vtree"].get_all_bones(armature_uuid)
|
||||
bones_to_be_animated = [
|
||||
export_settings["vtree"].nodes[b].blender_bone.name for b in bones_uuid if export_settings["vtree"].nodes[b].leaf_reference is None]
|
||||
|
||||
# List of really animated bones is needed for optimization decision
|
||||
list_of_animated_bone_channels = {}
|
||||
if armature_uuid != blender_action_name and blender_action_name in bpy.data.actions:
|
||||
# Not bake situation
|
||||
channels_animated, to_be_sampled, extra_channels = get_channel_groups(
|
||||
armature_uuid, bpy.data.actions[blender_action_name], export_settings)
|
||||
for chan in [chan for chan in channels_animated.values() if chan['bone'] is not None]:
|
||||
for prop in chan['properties'].keys():
|
||||
list_of_animated_bone_channels[(chan['bone'], get_channel_from_target(get_target(prop)))] = get_gltf_interpolation(
|
||||
chan['properties'][prop][0].keyframe_points[0].interpolation) # Could be exported without sampling : keep interpolation
|
||||
|
||||
for _, _, chan_prop, chan_bone in [chan for chan in to_be_sampled if chan[1] == "BONE"]:
|
||||
list_of_animated_bone_channels[
|
||||
(
|
||||
chan_bone,
|
||||
chan_prop,
|
||||
)
|
||||
] = get_gltf_interpolation("LINEAR") # if forced to be sampled, keep LINEAR interpolation
|
||||
|
||||
for bone in bones_to_be_animated:
|
||||
for p in ["location", "rotation_quaternion", "scale"]:
|
||||
channel = gather_sampled_bone_channel(
|
||||
armature_uuid,
|
||||
bone,
|
||||
p,
|
||||
blender_action_name,
|
||||
(bone, p) in list_of_animated_bone_channels.keys(),
|
||||
list_of_animated_bone_channels[(bone, p)] if (bone, p) in list_of_animated_bone_channels.keys() else get_gltf_interpolation("LINEAR"),
|
||||
export_settings)
|
||||
if channel is not None:
|
||||
channels.append(channel)
|
||||
|
||||
# Retrieve animation on armature object itself, if any
|
||||
# If armature is baked (no animation of armature), need to use all channels
|
||||
if blender_action_name == armature_uuid or export_settings['gltf_animation_mode'] in ["SCENE", "NLA_TRACKS"]:
|
||||
armature_channels = []
|
||||
else:
|
||||
armature_channels = __gather_armature_object_channel(
|
||||
armature_uuid, bpy.data.actions[blender_action_name], export_settings)
|
||||
|
||||
for p in ["location", "rotation_quaternion", "scale"]:
|
||||
armature_channel = gather_sampled_object_channel(
|
||||
armature_uuid,
|
||||
p,
|
||||
blender_action_name,
|
||||
p in [a[0] for a in armature_channels],
|
||||
[c[1] for c in armature_channels if c[0] == p][0] if p in [a[0] for a in armature_channels] else "LINEAR",
|
||||
export_settings
|
||||
)
|
||||
|
||||
if armature_channel is not None:
|
||||
channels.append(armature_channel)
|
||||
|
||||
# Retrieve channels for drivers, if needed
|
||||
drivers_to_manage = get_sk_drivers(armature_uuid, export_settings)
|
||||
for obj_driver_uuid in drivers_to_manage:
|
||||
channel = gather_sampled_sk_channel(obj_driver_uuid, armature_uuid + "_" + blender_action_name, export_settings)
|
||||
if channel is not None:
|
||||
channels.append(channel)
|
||||
|
||||
return channels, extra_channels
|
||||
|
||||
|
||||
def gather_sampled_bone_channel(
|
||||
armature_uuid: str,
|
||||
bone: str,
|
||||
channel: str,
|
||||
action_name: str,
|
||||
node_channel_is_animated: bool,
|
||||
node_channel_interpolation: str,
|
||||
export_settings
|
||||
):
|
||||
|
||||
__target = __gather_target(armature_uuid, bone, channel, export_settings)
|
||||
if __target.path is not None:
|
||||
sampler = __gather_sampler(
|
||||
armature_uuid,
|
||||
bone,
|
||||
channel,
|
||||
action_name,
|
||||
node_channel_is_animated,
|
||||
node_channel_interpolation,
|
||||
export_settings)
|
||||
|
||||
if sampler is None:
|
||||
# After check, no need to animate this node for this channel
|
||||
return None
|
||||
|
||||
animation_channel = gltf2_io.AnimationChannel(
|
||||
extensions=None,
|
||||
extras=None,
|
||||
sampler=sampler,
|
||||
target=__target
|
||||
)
|
||||
|
||||
export_user_extensions('gather_animation_channel_hook',
|
||||
export_settings,
|
||||
animation_channel,
|
||||
channel,
|
||||
export_settings['vtree'].nodes[armature_uuid].blender_object,
|
||||
bone,
|
||||
action_name,
|
||||
node_channel_is_animated
|
||||
)
|
||||
|
||||
return animation_channel
|
||||
return None
|
||||
|
||||
|
||||
def __gather_target(armature_uuid: str,
|
||||
bone: str,
|
||||
channel: str,
|
||||
export_settings
|
||||
) -> gltf2_io.AnimationChannelTarget:
|
||||
|
||||
return gather_armature_sampled_channel_target(
|
||||
armature_uuid, bone, channel, export_settings)
|
||||
|
||||
|
||||
def __gather_sampler(
|
||||
armature_uuid,
|
||||
bone,
|
||||
channel,
|
||||
action_name,
|
||||
node_channel_is_animated,
|
||||
node_channel_interpolation,
|
||||
export_settings):
|
||||
return gather_bone_sampled_animation_sampler(
|
||||
armature_uuid,
|
||||
bone,
|
||||
channel,
|
||||
action_name,
|
||||
node_channel_is_animated,
|
||||
node_channel_interpolation,
|
||||
export_settings
|
||||
)
|
||||
|
||||
|
||||
def __gather_armature_object_channel(obj_uuid: str, blender_action, export_settings):
|
||||
channels = []
|
||||
|
||||
channels_animated, to_be_sampled, extra_channels = get_channel_groups(obj_uuid, blender_action, export_settings)
|
||||
# Remove all channel linked to bones, keep only directly object channels
|
||||
channels_animated = [c for c in channels_animated.values() if c['type'] == "OBJECT"]
|
||||
to_be_sampled = [c for c in to_be_sampled if c[1] == "OBJECT"]
|
||||
|
||||
original_channels = []
|
||||
for c in channels_animated:
|
||||
original_channels.extend([(prop, c['properties'][prop][0].keyframe_points[0].interpolation)
|
||||
for prop in c['properties'].keys()])
|
||||
|
||||
for c, inter in original_channels:
|
||||
channels.append(
|
||||
(
|
||||
{
|
||||
"location": "location",
|
||||
"rotation_quaternion": "rotation_quaternion",
|
||||
"rotation_euler": "rotation_quaternion",
|
||||
"scale": "scale",
|
||||
"delta_location": "location",
|
||||
"delta_scale": "scale",
|
||||
"delta_rotation_euler": "rotation_quaternion",
|
||||
"delta_rotation_quaternion": "rotation_quaternion"
|
||||
}.get(c),
|
||||
get_gltf_interpolation(inter)
|
||||
)
|
||||
)
|
||||
|
||||
for c in to_be_sampled:
|
||||
channels.append(
|
||||
(
|
||||
{
|
||||
"location": "location",
|
||||
"rotation_quaternion": "rotation_quaternion",
|
||||
"rotation_euler": "rotation_quaternion",
|
||||
"scale": "scale",
|
||||
"delta_location": "location",
|
||||
"delta_scale": "scale",
|
||||
"delta_rotation_euler": "rotation_quaternion",
|
||||
"delta_rotation_quaternion": "rotation_quaternion"
|
||||
}.get(c[2]),
|
||||
get_gltf_interpolation("LINEAR") # Forced to be sampled, so use LINEAR
|
||||
)
|
||||
)
|
||||
|
||||
return channels
|
93
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_keyframes.py
Normal file
93
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_keyframes.py
Normal file
@ -0,0 +1,93 @@
|
||||
# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import typing
|
||||
import numpy as np
|
||||
from ....gltf2_blender_gather_cache import cached
|
||||
from ...gltf2_blender_gather_keyframes import Keyframe
|
||||
from ..gltf2_blender_gather_animation_sampling_cache import get_cache_data
|
||||
|
||||
|
||||
@cached
|
||||
def gather_bone_sampled_keyframes(
|
||||
armature_uuid: str,
|
||||
bone: str,
|
||||
channel: str,
|
||||
action_name: str,
|
||||
node_channel_is_animated: bool,
|
||||
export_settings
|
||||
) -> typing.List[Keyframe]:
|
||||
|
||||
start_frame = export_settings['ranges'][armature_uuid][action_name]['start']
|
||||
end_frame = export_settings['ranges'][armature_uuid][action_name]['end']
|
||||
|
||||
keyframes = []
|
||||
|
||||
frame = start_frame
|
||||
step = export_settings['gltf_frame_step']
|
||||
|
||||
while frame <= end_frame:
|
||||
key = Keyframe(None, frame, channel)
|
||||
|
||||
mat = get_cache_data(
|
||||
'bone',
|
||||
armature_uuid,
|
||||
bone,
|
||||
action_name,
|
||||
frame,
|
||||
step,
|
||||
export_settings)
|
||||
|
||||
trans, rot, scale = mat.decompose()
|
||||
|
||||
key.value = {
|
||||
"location": trans,
|
||||
"rotation_quaternion": rot,
|
||||
"scale": scale
|
||||
}[channel]
|
||||
|
||||
keyframes.append(key)
|
||||
frame += step
|
||||
|
||||
if len(keyframes) == 0:
|
||||
# For example, option CROP negative frames, but all are negatives
|
||||
return None
|
||||
|
||||
if not export_settings['gltf_optimize_animation']:
|
||||
# For bones, if all values are the same, keeping only if changing values, or if user want to keep data
|
||||
if node_channel_is_animated is True:
|
||||
return keyframes # Always keeping
|
||||
else:
|
||||
# baked bones
|
||||
if export_settings['gltf_optimize_animation_keep_armature'] is False:
|
||||
# Not keeping if not changing property
|
||||
cst = fcurve_is_constant(keyframes)
|
||||
return None if cst is True else keyframes
|
||||
else:
|
||||
# Keep data, as requested by user. We keep all samples, as user don't want to optimize
|
||||
return keyframes
|
||||
|
||||
else:
|
||||
|
||||
# For armatures
|
||||
# Check if all values are the same
|
||||
# In that case, if there is no real keyframe on this channel for this given bone,
|
||||
# We can ignore these keyframes
|
||||
# if there are some fcurve, we can keep only 2 keyframes, first and last
|
||||
cst = fcurve_is_constant(keyframes)
|
||||
|
||||
if node_channel_is_animated is True: # fcurve on this bone for this property
|
||||
# Keep animation, but keep only 2 keyframes if data are not changing
|
||||
return [keyframes[0], keyframes[-1]] if cst is True and len(keyframes) >= 2 else keyframes
|
||||
else: # bone is not animated (no fcurve)
|
||||
# Not keeping if not changing property if user decided to not keep
|
||||
if export_settings['gltf_optimize_animation_keep_armature'] is False:
|
||||
return None if cst is True else keyframes
|
||||
else:
|
||||
# Keep at least 2 keyframes if data are not changing
|
||||
return [keyframes[0], keyframes[-1]] if cst is True and len(keyframes) >= 2 else keyframes
|
||||
|
||||
|
||||
def fcurve_is_constant(keyframes):
|
||||
return all([j < 0.0001 for j in np.ptp([[k.value[i] for i in range(len(keyframes[0].value))] for k in keyframes], axis=0)])
|
231
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_sampler.py
Normal file
231
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_sampler.py
Normal file
@ -0,0 +1,231 @@
|
||||
# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
import typing
|
||||
import mathutils
|
||||
from ......io.com import gltf2_io
|
||||
from ......io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ......io.com import gltf2_io_constants
|
||||
from ......io.exp import gltf2_io_binary_data
|
||||
from .....com import gltf2_blender_math
|
||||
from ....gltf2_blender_gather_accessors import gather_accessor
|
||||
from ....gltf2_blender_gather_cache import cached
|
||||
from ....gltf2_blender_gather_tree import VExportNode
|
||||
from .armature_keyframes import gather_bone_sampled_keyframes
|
||||
|
||||
|
||||
@cached
|
||||
def gather_bone_sampled_animation_sampler(
|
||||
armature_uuid: str,
|
||||
bone: str,
|
||||
channel: str,
|
||||
action_name: str,
|
||||
node_channel_is_animated: bool,
|
||||
node_channel_interpolation: str,
|
||||
export_settings
|
||||
):
|
||||
|
||||
pose_bone = export_settings['vtree'].nodes[armature_uuid].blender_object.pose.bones[bone]
|
||||
|
||||
keyframes = __gather_keyframes(
|
||||
armature_uuid,
|
||||
bone,
|
||||
channel,
|
||||
action_name,
|
||||
node_channel_is_animated,
|
||||
export_settings)
|
||||
|
||||
if keyframes is None:
|
||||
# After check, no need to animate this node for this channel
|
||||
return None
|
||||
|
||||
# Now we are raw input/output, we need to convert to glTF data
|
||||
input, output = __convert_keyframes(armature_uuid, bone, channel, keyframes, action_name, export_settings)
|
||||
|
||||
sampler = gltf2_io.AnimationSampler(
|
||||
extensions=None,
|
||||
extras=None,
|
||||
input=input,
|
||||
interpolation=__gather_interpolation(
|
||||
node_channel_is_animated,
|
||||
node_channel_interpolation,
|
||||
keyframes,
|
||||
export_settings),
|
||||
output=output)
|
||||
|
||||
export_user_extensions('gather_animation_sampler_hook',
|
||||
export_settings,
|
||||
sampler,
|
||||
export_settings['vtree'].nodes[armature_uuid].blender_object,
|
||||
pose_bone,
|
||||
action_name,
|
||||
node_channel_is_animated)
|
||||
|
||||
return sampler
|
||||
|
||||
|
||||
@cached
|
||||
def __gather_keyframes(
|
||||
armature_uuid: str,
|
||||
bone: str,
|
||||
channel: str,
|
||||
action_name: str,
|
||||
node_channel_is_animated: bool,
|
||||
export_settings
|
||||
):
|
||||
|
||||
keyframes = gather_bone_sampled_keyframes(
|
||||
armature_uuid,
|
||||
bone,
|
||||
channel,
|
||||
action_name,
|
||||
node_channel_is_animated,
|
||||
export_settings
|
||||
)
|
||||
|
||||
if keyframes is None:
|
||||
# After check, no need to animation this node
|
||||
return None
|
||||
|
||||
return keyframes
|
||||
|
||||
|
||||
def __convert_keyframes(armature_uuid, bone_name, channel, keyframes, action_name, export_settings):
|
||||
|
||||
# Sliding can come from:
|
||||
# - option SLIDE for negative frames
|
||||
# - option to start animation at frame 0 for looping
|
||||
if armature_uuid in export_settings['slide'].keys(
|
||||
) and action_name in export_settings['slide'][armature_uuid].keys():
|
||||
for k in keyframes:
|
||||
k.frame += -export_settings['slide'][armature_uuid][action_name]
|
||||
k.seconds = k.frame / (bpy.context.scene.render.fps * bpy.context.scene.render.fps_base)
|
||||
|
||||
times = [k.seconds for k in keyframes]
|
||||
input = gather_accessor(
|
||||
gltf2_io_binary_data.BinaryData.from_list(times, gltf2_io_constants.ComponentType.Float),
|
||||
gltf2_io_constants.ComponentType.Float,
|
||||
len(times),
|
||||
tuple([max(times)]),
|
||||
tuple([min(times)]),
|
||||
gltf2_io_constants.DataType.Scalar,
|
||||
export_settings)
|
||||
|
||||
is_yup = export_settings['gltf_yup']
|
||||
|
||||
bone = export_settings['vtree'].nodes[armature_uuid].blender_object.pose.bones[bone_name]
|
||||
target_datapath = "pose.bones['" + bone_name + "']." + channel
|
||||
|
||||
if bone.parent is None:
|
||||
# bone at root of armature
|
||||
axis_basis_change = mathutils.Matrix.Identity(4)
|
||||
if is_yup:
|
||||
axis_basis_change = mathutils.Matrix(
|
||||
((1.0, 0.0, 0.0, 0.0),
|
||||
(0.0, 0.0, 1.0, 0.0),
|
||||
(0.0, -1.0, 0.0, 0.0),
|
||||
(0.0, 0.0, 0.0, 1.0)))
|
||||
correction_matrix_local = axis_basis_change @ bone.bone.matrix_local
|
||||
else:
|
||||
# Bone is not at root of armature
|
||||
# There are 2 cases :
|
||||
parent_uuid = export_settings['vtree'].nodes[export_settings['vtree']
|
||||
.nodes[armature_uuid].bones[bone.name]].parent_uuid
|
||||
if parent_uuid is not None and export_settings['vtree'].nodes[parent_uuid].blender_type == VExportNode.BONE:
|
||||
# export bone is not at root of armature neither
|
||||
blender_bone_parent = export_settings['vtree'].nodes[parent_uuid].blender_bone
|
||||
correction_matrix_local = (
|
||||
blender_bone_parent.bone.matrix_local.inverted_safe() @
|
||||
bone.bone.matrix_local
|
||||
)
|
||||
else:
|
||||
# exported bone (after filter) is at root of armature
|
||||
axis_basis_change = mathutils.Matrix.Identity(4)
|
||||
if is_yup:
|
||||
axis_basis_change = mathutils.Matrix(
|
||||
((1.0, 0.0, 0.0, 0.0),
|
||||
(0.0, 0.0, 1.0, 0.0),
|
||||
(0.0, -1.0, 0.0, 0.0),
|
||||
(0.0, 0.0, 0.0, 1.0)))
|
||||
correction_matrix_local = axis_basis_change
|
||||
transform = correction_matrix_local
|
||||
|
||||
values = []
|
||||
fps = (bpy.context.scene.render.fps * bpy.context.scene.render.fps_base)
|
||||
for keyframe in keyframes:
|
||||
# Transform the data and build gltf control points
|
||||
value = gltf2_blender_math.transform(keyframe.value, target_datapath, transform, False)
|
||||
keyframe_value = gltf2_blender_math.mathutils_to_gltf(value)
|
||||
|
||||
if keyframe.in_tangent is not None:
|
||||
# we can directly transform the tangent as it currently is represented by a control point
|
||||
in_tangent = gltf2_blender_math.transform(keyframe.in_tangent, target_datapath, transform, False)
|
||||
|
||||
# the tangent in glTF is relative to the keyframe value and uses seconds
|
||||
if not isinstance(value, list):
|
||||
in_tangent = fps * (in_tangent - value)
|
||||
else:
|
||||
in_tangent = [fps * (in_tangent[i] - value[i]) for i in range(len(value))]
|
||||
keyframe_value = gltf2_blender_math.mathutils_to_gltf(in_tangent) + keyframe_value # append
|
||||
|
||||
if keyframe.out_tangent is not None:
|
||||
# we can directly transform the tangent as it currently is represented by a control point
|
||||
out_tangent = gltf2_blender_math.transform(keyframe.out_tangent, target_datapath, transform, False)
|
||||
|
||||
# the tangent in glTF is relative to the keyframe value and uses seconds
|
||||
if not isinstance(value, list):
|
||||
out_tangent = fps * (out_tangent - value)
|
||||
else:
|
||||
out_tangent = [fps * (out_tangent[i] - value[i]) for i in range(len(value))]
|
||||
keyframe_value = keyframe_value + gltf2_blender_math.mathutils_to_gltf(out_tangent) # append
|
||||
|
||||
values += keyframe_value
|
||||
|
||||
# store the keyframe data in a binary buffer
|
||||
component_type = gltf2_io_constants.ComponentType.Float
|
||||
data_type = gltf2_io_constants.DataType.vec_type_from_num(len(keyframes[0].value))
|
||||
|
||||
output = gltf2_io.Accessor(
|
||||
buffer_view=gltf2_io_binary_data.BinaryData.from_list(values, component_type),
|
||||
byte_offset=None,
|
||||
component_type=component_type,
|
||||
count=len(values) // gltf2_io_constants.DataType.num_elements(data_type),
|
||||
extensions=None,
|
||||
extras=None,
|
||||
max=None,
|
||||
min=None,
|
||||
name=None,
|
||||
normalized=None,
|
||||
sparse=None,
|
||||
type=data_type
|
||||
)
|
||||
|
||||
return input, output
|
||||
|
||||
|
||||
def __gather_interpolation(node_channel_is_animated, node_channel_interpolation, keyframes, export_settings):
|
||||
|
||||
if len(keyframes) > 2:
|
||||
# keep STEP as STEP, other become LINEAR
|
||||
return {
|
||||
"STEP": "STEP"
|
||||
}.get(node_channel_interpolation, "LINEAR")
|
||||
elif len(keyframes) == 1:
|
||||
if node_channel_is_animated is False:
|
||||
return "STEP"
|
||||
elif node_channel_interpolation == "CUBICSPLINE":
|
||||
return "LINEAR" # We can't have a single keyframe with CUBICSPLINE
|
||||
else:
|
||||
return node_channel_interpolation
|
||||
else:
|
||||
# If we only have 2 keyframes, set interpolation to STEP if baked
|
||||
if node_channel_is_animated is False:
|
||||
# baked => We have first and last keyframe
|
||||
return "STEP"
|
||||
else:
|
||||
if keyframes[0].value == keyframes[1].value:
|
||||
return "STEP"
|
||||
else:
|
||||
return "LINEAR"
|
48
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/data/gltf2_blender_gather_data_channel_target.py
Normal file
48
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/data/gltf2_blender_gather_data_channel_target.py
Normal file
@ -0,0 +1,48 @@
|
||||
# SPDX-FileCopyrightText: 2018-2023 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from ......io.com import gltf2_io
|
||||
from ....gltf2_blender_gather_cache import cached
|
||||
|
||||
|
||||
@cached
|
||||
def gather_data_sampled_channel_target(
|
||||
blender_type_data: str,
|
||||
blender_id,
|
||||
channel: str,
|
||||
additional_key: str, # Used to differentiate between material / material node_tree
|
||||
export_settings
|
||||
) -> gltf2_io.AnimationChannelTarget:
|
||||
|
||||
animation_channel_target = gltf2_io.AnimationChannelTarget(
|
||||
extensions=__gather_extensions(blender_type_data, blender_id, channel, export_settings),
|
||||
extras=__gather_extras(blender_type_data, blender_id, channel, export_settings),
|
||||
node=__gather_node(blender_type_data, blender_id, export_settings),
|
||||
path=__gather_path(blender_type_data, blender_id, channel, export_settings)
|
||||
)
|
||||
|
||||
return animation_channel_target
|
||||
|
||||
|
||||
def __gather_extensions(blender_type_data, blender_id, channel, export_settings):
|
||||
return None
|
||||
|
||||
|
||||
def __gather_extras(blender_type_data, blender_id, channel, export_settings):
|
||||
return None
|
||||
|
||||
|
||||
def __gather_node(blender_type_data, blender_id, export_settings):
|
||||
if blender_type_data == "materials":
|
||||
return export_settings['KHR_animation_pointer']['materials'][blender_id]['glTF_material']
|
||||
elif blender_type_data == "lights":
|
||||
return export_settings['KHR_animation_pointer']['lights'][blender_id]['glTF_light']
|
||||
elif blender_type_data == "cameras":
|
||||
return export_settings['KHR_animation_pointer']['cameras'][blender_id]['glTF_camera']
|
||||
else:
|
||||
pass # This should never happen
|
||||
|
||||
|
||||
def __gather_path(blender_type_data, blender_id, channel, export_settings):
|
||||
return export_settings['KHR_animation_pointer'][blender_type_data][blender_id]['paths'][channel]['path']
|
113
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/data/gltf2_blender_gather_data_channels.py
Normal file
113
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/data/gltf2_blender_gather_data_channels.py
Normal file
@ -0,0 +1,113 @@
|
||||
# SPDX-FileCopyrightText: 2018-2023 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
import typing
|
||||
from ......io.com import gltf2_io
|
||||
from ......blender.com.gltf2_blender_conversion import get_gltf_interpolation
|
||||
from .gltf2_blender_gather_data_channel_target import gather_data_sampled_channel_target
|
||||
from .gltf2_blender_gather_data_sampler import gather_data_sampled_animation_sampler
|
||||
|
||||
|
||||
def gather_data_sampled_channels(blender_type_data, blender_id, blender_action_name,
|
||||
additional_key, export_settings) -> typing.List[gltf2_io.AnimationChannel]:
|
||||
channels = []
|
||||
|
||||
list_of_animated_data_channels = {} # TODOPointer
|
||||
|
||||
baseColorFactor_alpha_merged_already_done = False
|
||||
for path in export_settings['KHR_animation_pointer'][blender_type_data][blender_id]['paths'].keys():
|
||||
|
||||
# Do not manage alpha, as it will be managaed by the baseColorFactor (merging Color and alpha)
|
||||
if export_settings['KHR_animation_pointer'][blender_type_data][blender_id]['paths'][path][
|
||||
'path'] == "/materials/XXX/pbrMetallicRoughness/baseColorFactor" and baseColorFactor_alpha_merged_already_done is True:
|
||||
continue
|
||||
|
||||
channel = gather_sampled_data_channel(
|
||||
blender_type_data,
|
||||
blender_id,
|
||||
path,
|
||||
blender_action_name,
|
||||
path in list_of_animated_data_channels.keys(),
|
||||
list_of_animated_data_channels[path] if path in list_of_animated_data_channels.keys() else get_gltf_interpolation("LINEAR"),
|
||||
additional_key,
|
||||
export_settings)
|
||||
if channel is not None:
|
||||
channels.append(channel)
|
||||
|
||||
if export_settings['KHR_animation_pointer'][blender_type_data][blender_id]['paths'][path]['path'] == "/materials/XXX/pbrMetallicRoughness/baseColorFactor":
|
||||
baseColorFactor_alpha_merged_already_done = True
|
||||
|
||||
return channels
|
||||
|
||||
|
||||
def gather_sampled_data_channel(
|
||||
blender_type_data: str,
|
||||
blender_id: str,
|
||||
channel: str,
|
||||
action_name: str,
|
||||
node_channel_is_animated: bool,
|
||||
node_channel_interpolation: str,
|
||||
additional_key: str, # Used to differentiate between material / material node_tree
|
||||
export_settings
|
||||
):
|
||||
|
||||
__target = __gather_target(blender_type_data, blender_id, channel, additional_key, export_settings)
|
||||
if __target.path is not None:
|
||||
sampler = __gather_sampler(
|
||||
blender_type_data,
|
||||
blender_id,
|
||||
channel,
|
||||
action_name,
|
||||
node_channel_is_animated,
|
||||
node_channel_interpolation,
|
||||
additional_key,
|
||||
export_settings)
|
||||
|
||||
if sampler is None:
|
||||
# After check, no need to animate this node for this channel
|
||||
return None
|
||||
|
||||
animation_channel = gltf2_io.AnimationChannel(
|
||||
extensions=None,
|
||||
extras=None,
|
||||
sampler=sampler,
|
||||
target=__target
|
||||
)
|
||||
|
||||
return animation_channel
|
||||
return None
|
||||
|
||||
|
||||
def __gather_target(
|
||||
blender_type_data: str,
|
||||
blender_id: str,
|
||||
channel: str,
|
||||
additional_key: str, # Used to differentiate between material / material node_tree
|
||||
export_settings
|
||||
) -> gltf2_io.AnimationChannelTarget:
|
||||
|
||||
return gather_data_sampled_channel_target(
|
||||
blender_type_data, blender_id, channel, additional_key, export_settings)
|
||||
|
||||
|
||||
def __gather_sampler(
|
||||
blender_type_data,
|
||||
blender_id,
|
||||
channel,
|
||||
action_name,
|
||||
node_channel_is_animated,
|
||||
node_channel_interpolation,
|
||||
additional_key,
|
||||
export_settings):
|
||||
return gather_data_sampled_animation_sampler(
|
||||
blender_type_data,
|
||||
blender_id,
|
||||
channel,
|
||||
action_name,
|
||||
node_channel_is_animated,
|
||||
node_channel_interpolation,
|
||||
additional_key,
|
||||
export_settings
|
||||
)
|
142
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/data/gltf2_blender_gather_data_keyframes.py
Normal file
142
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/data/gltf2_blender_gather_data_keyframes.py
Normal file
@ -0,0 +1,142 @@
|
||||
# SPDX-FileCopyrightText: 2018-2023 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import typing
|
||||
import math
|
||||
import numpy as np
|
||||
import bpy
|
||||
from .....com.gltf2_blender_conversion import PBR_WATTS_TO_LUMENS
|
||||
from ....gltf2_blender_gather_cache import cached
|
||||
from ...gltf2_blender_gather_keyframes import Keyframe
|
||||
from ..gltf2_blender_gather_animation_sampling_cache import get_cache_data
|
||||
|
||||
|
||||
@cached
|
||||
def gather_data_sampled_keyframes(
|
||||
blender_type_data: str,
|
||||
blender_id,
|
||||
channel,
|
||||
action_name,
|
||||
node_channel_is_animated: bool,
|
||||
additional_key, # Used to differentiate between material / material node_tree
|
||||
export_settings):
|
||||
|
||||
start_frame = export_settings['ranges'][blender_id][action_name]['start']
|
||||
end_frame = export_settings['ranges'][blender_id][action_name]['end']
|
||||
|
||||
keyframes = []
|
||||
|
||||
frame = start_frame
|
||||
step = export_settings['gltf_frame_step']
|
||||
while frame <= end_frame:
|
||||
|
||||
# Retrieve length of data to export
|
||||
if export_settings['KHR_animation_pointer'][blender_type_data][blender_id]['paths'][channel]['path'] != "/materials/XXX/pbrMetallicRoughness/baseColorFactor":
|
||||
length = export_settings['KHR_animation_pointer'][blender_type_data][blender_id]['paths'][channel]['length']
|
||||
else:
|
||||
length = 4
|
||||
|
||||
key = Keyframe([None] * length, frame, 'value')
|
||||
|
||||
value = get_cache_data(
|
||||
'value',
|
||||
blender_id,
|
||||
channel,
|
||||
action_name,
|
||||
frame,
|
||||
step,
|
||||
export_settings
|
||||
)
|
||||
|
||||
# Convert data if needed
|
||||
if blender_type_data == "materials":
|
||||
if "attenuationDistance" in export_settings['KHR_animation_pointer']['materials'][blender_id]['paths'][channel]['path']:
|
||||
value = 1.0 / value if value != 0.0 else 1e13
|
||||
|
||||
if export_settings['KHR_animation_pointer']['materials'][blender_id]['paths'][channel]['path'] == "/materials/XXX/occlusionTexture/strength":
|
||||
if export_settings['KHR_animation_pointer']['materials'][blender_id]['paths'][channel]['reverse'] is True:
|
||||
value = 1.0 - value
|
||||
|
||||
if export_settings['KHR_animation_pointer']['materials'][blender_id]['paths'][channel]['path'] == "/materials/XXX/emissiveFactor":
|
||||
# We need to retrieve the strength of the emissive too
|
||||
strength = get_cache_data(
|
||||
'value',
|
||||
blender_id,
|
||||
export_settings['KHR_animation_pointer']['materials'][blender_id]['paths'][channel]['strength_channel'],
|
||||
action_name,
|
||||
frame,
|
||||
step,
|
||||
export_settings
|
||||
)
|
||||
|
||||
value = [f * strength for f in value]
|
||||
if any([i > 1.0 for i in value or []]):
|
||||
# Clamp to range [0,1]
|
||||
# Official glTF clamp to range [0,1]
|
||||
# If we are outside, we need to use extension KHR_materials_emissive_strength
|
||||
strength = max(value)
|
||||
value = [f / strength for f in value]
|
||||
else:
|
||||
pass # Don't need to do anything, as we are in the range [0,1]
|
||||
|
||||
if export_settings['KHR_animation_pointer']['materials'][blender_id]['paths'][channel][
|
||||
'path'] == "/materials/XXX/extensions/KHR_materials_emissive_strength/emissiveStrength":
|
||||
# We need to retrieve the emissive factor
|
||||
factor = get_cache_data(
|
||||
'value',
|
||||
blender_id,
|
||||
export_settings['KHR_animation_pointer']['materials'][blender_id]['paths'][channel]['factor_channel'],
|
||||
action_name,
|
||||
frame,
|
||||
step,
|
||||
export_settings
|
||||
)
|
||||
|
||||
factor = [f * value for f in factor]
|
||||
if any([i > 1.0 for i in factor or []]):
|
||||
# Clamp to range [0,1]
|
||||
# Official glTF clamp to range [0,1]
|
||||
# If we are outside, we need to use extension KHR_materials_emissive_strength
|
||||
value = max(factor)
|
||||
else:
|
||||
value = 1.0 # no need to have an emissiveStrength extension for this frame
|
||||
|
||||
# For specularFactor and specularColorFactor, we already multiplied it by 2.0, and clamp it to 1.0 (and adapt specularColor accordingly)
|
||||
# This is done in cache retrieval
|
||||
|
||||
elif blender_type_data == "lights":
|
||||
if export_settings['KHR_animation_pointer']['lights'][blender_id]['paths'][channel]['path'] == "/extensions/KHR_lights_punctual/lights/XXX/intensity":
|
||||
# Lights need conversion in case quadratic_falloff_node is used, for intensity
|
||||
if 'quadratic_falloff_node' in channel:
|
||||
value /= (math.pi * 4.0)
|
||||
|
||||
if export_settings['gltf_lighting_mode'] == 'SPEC' \
|
||||
and export_settings['KHR_animation_pointer']['lights'][blender_id]['paths'][channel]['lamp_type'] != "SUN":
|
||||
value *= PBR_WATTS_TO_LUMENS
|
||||
|
||||
if export_settings['KHR_animation_pointer']['lights'][blender_id]['paths'][channel]['path'] == "/extensions/KHR_lights_punctual/lights/XXX/spot.outerConeAngle":
|
||||
value *= 0.5
|
||||
|
||||
# innerConeAngle is handled in cache retrieval, as it requires spot_size and spot_blend
|
||||
|
||||
# Camera yvof is calculated in cache retrieval, as it requires sensor_fit, angle, aspect ratio
|
||||
|
||||
key.value_total = value
|
||||
keyframes.append(key)
|
||||
frame += step
|
||||
|
||||
if len(keyframes) == 0:
|
||||
# For example, option CROP negative frames, but all are negatives
|
||||
return None
|
||||
|
||||
cst = fcurve_is_constant(keyframes)
|
||||
return None if cst is True else keyframes
|
||||
|
||||
|
||||
def fcurve_is_constant(keyframes):
|
||||
if type(keyframes[0].value).__name__ == "float":
|
||||
return all([j < 0.0001 for j in np.ptp([[k.value] for k in keyframes], axis=0)])
|
||||
else:
|
||||
return all([j < 0.0001 for j in np.ptp([[k.value[i]
|
||||
for i in range(len(keyframes[0].value))] for k in keyframes], axis=0)])
|
136
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/data/gltf2_blender_gather_data_sampler.py
Normal file
136
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/data/gltf2_blender_gather_data_sampler.py
Normal file
@ -0,0 +1,136 @@
|
||||
# SPDX-FileCopyrightText: 2018-2023 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
import typing
|
||||
from ......io.com import gltf2_io
|
||||
from ......io.exp import gltf2_io_binary_data
|
||||
from ......io.com import gltf2_io_constants
|
||||
from ....gltf2_blender_gather_cache import cached
|
||||
from ....gltf2_blender_gather_accessors import gather_accessor
|
||||
from .gltf2_blender_gather_data_keyframes import gather_data_sampled_keyframes
|
||||
|
||||
|
||||
@cached
|
||||
def gather_data_sampled_animation_sampler(
|
||||
blender_type_data: str,
|
||||
blender_id: str,
|
||||
channel: str,
|
||||
action_name: str,
|
||||
node_channel_is_animated: bool,
|
||||
node_channel_interpolation: str,
|
||||
additional_key: str, # Used to differentiate between material / material node_tree
|
||||
export_settings
|
||||
):
|
||||
|
||||
keyframes = __gather_keyframes(
|
||||
blender_type_data,
|
||||
blender_id,
|
||||
channel,
|
||||
action_name,
|
||||
node_channel_is_animated,
|
||||
additional_key,
|
||||
export_settings)
|
||||
|
||||
if keyframes is None:
|
||||
# After check, no need to animate this node for this channel
|
||||
return None
|
||||
|
||||
# Now we are raw input/output, we need to convert to glTF data
|
||||
input, output = __convert_keyframes(blender_type_data, blender_id, channel, keyframes, action_name, export_settings)
|
||||
|
||||
sampler = gltf2_io.AnimationSampler(extensions=None, extras=None, input=input, interpolation=__gather_interpolation(
|
||||
blender_type_data, node_channel_is_animated, node_channel_interpolation, keyframes, export_settings), output=output)
|
||||
|
||||
return sampler
|
||||
|
||||
|
||||
def __gather_keyframes(
|
||||
blender_type_data,
|
||||
blender_id,
|
||||
channel,
|
||||
action_name,
|
||||
node_channel_is_animated,
|
||||
additional_key, # Used to differentiate between material / material node_tree
|
||||
export_settings):
|
||||
|
||||
keyframes = gather_data_sampled_keyframes(
|
||||
blender_type_data,
|
||||
blender_id,
|
||||
channel,
|
||||
action_name,
|
||||
node_channel_is_animated,
|
||||
additional_key,
|
||||
export_settings
|
||||
)
|
||||
|
||||
if keyframes is None:
|
||||
# After check, no need to animation this node
|
||||
return None
|
||||
|
||||
return keyframes
|
||||
|
||||
|
||||
def __convert_keyframes(blender_type_data, blender_id, channel, keyframes, action_name, export_settings):
|
||||
|
||||
# Sliding can come from:
|
||||
# - option SLIDE for negative frames
|
||||
# - option to start animation at frame 0 for looping
|
||||
if blender_id in export_settings['slide'].keys() and action_name in export_settings['slide'][blender_id].keys():
|
||||
for k in keyframes:
|
||||
k.frame += -export_settings['slide'][blender_id][action_name]
|
||||
k.seconds = k.frame / bpy.context.scene.render.fps
|
||||
|
||||
times = [k.seconds for k in keyframes]
|
||||
input = gather_accessor(
|
||||
gltf2_io_binary_data.BinaryData.from_list(times, gltf2_io_constants.ComponentType.Float),
|
||||
gltf2_io_constants.ComponentType.Float,
|
||||
len(times),
|
||||
tuple([max(times)]),
|
||||
tuple([min(times)]),
|
||||
gltf2_io_constants.DataType.Scalar,
|
||||
export_settings)
|
||||
|
||||
values = []
|
||||
for keyframe in keyframes:
|
||||
keyframe_value = __convert_to_gltf(keyframe.value)
|
||||
values += keyframe_value
|
||||
|
||||
# store the keyframe data in a binary buffer
|
||||
component_type = gltf2_io_constants.ComponentType.Float
|
||||
if type(keyframes[0].value).__name__ != "float":
|
||||
data_type = gltf2_io_constants.DataType.vec_type_from_num(len(keyframes[0].value))
|
||||
else:
|
||||
data_type = gltf2_io_constants.DataType.vec_type_from_num(1)
|
||||
|
||||
output = gltf2_io.Accessor(
|
||||
buffer_view=gltf2_io_binary_data.BinaryData.from_list(values, component_type),
|
||||
byte_offset=None,
|
||||
component_type=component_type,
|
||||
count=len(values) // gltf2_io_constants.DataType.num_elements(data_type),
|
||||
extensions=None,
|
||||
extras=None,
|
||||
max=None,
|
||||
min=None,
|
||||
name=None,
|
||||
normalized=None,
|
||||
sparse=None,
|
||||
type=data_type
|
||||
)
|
||||
|
||||
return input, output
|
||||
|
||||
|
||||
def __gather_interpolation(
|
||||
blender_type_data,
|
||||
node_channel_is_animated,
|
||||
node_channel_interpolation,
|
||||
keyframes,
|
||||
export_settings):
|
||||
# TODOPointer
|
||||
return 'LINEAR'
|
||||
|
||||
|
||||
def __convert_to_gltf(value):
|
||||
return value if type(value).__name__ != "float" else [value]
|
662
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/gltf2_blender_gather_animation_sampling_cache.py
Normal file
662
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/gltf2_blender_gather_animation_sampling_cache.py
Normal file
@ -0,0 +1,662 @@
|
||||
# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import mathutils
|
||||
import bpy
|
||||
import typing
|
||||
from .....blender.com.gltf2_blender_data_path import get_sk_exported
|
||||
from .....blender.com.gltf2_blender_conversion import inverted_trs_mapping_node, texture_transform_blender_to_gltf, yvof_blender_to_gltf
|
||||
from ...gltf2_blender_gather_cache import datacache
|
||||
from ...gltf2_blender_gather_tree import VExportNode
|
||||
from ..gltf2_blender_gather_drivers import get_sk_drivers
|
||||
|
||||
# Warning : If you change some parameter here, need to be changed in cache system
|
||||
|
||||
|
||||
@datacache
|
||||
def get_cache_data(path: str,
|
||||
blender_obj_uuid: str,
|
||||
bone: typing.Optional[str],
|
||||
action_name: str,
|
||||
current_frame: int,
|
||||
step: int,
|
||||
export_settings,
|
||||
only_gather_provided=False
|
||||
):
|
||||
|
||||
data = {}
|
||||
|
||||
min_, max_ = get_range(blender_obj_uuid, action_name, export_settings)
|
||||
|
||||
if only_gather_provided:
|
||||
# If object is not in vtree, this is a material or light for pointers
|
||||
obj_uuids = [blender_obj_uuid] if blender_obj_uuid in export_settings['vtree'].nodes.keys() else []
|
||||
else:
|
||||
obj_uuids = [uid for (uid, n) in export_settings['vtree'].nodes.items()
|
||||
if n.blender_type not in [VExportNode.BONE]]
|
||||
|
||||
# For TRACK mode, we reset cache after each track export, so we don't need to keep others objects
|
||||
if export_settings['gltf_animation_mode'] in "NLA_TRACKS":
|
||||
# If object is not in vtree, this is a material or light for pointers
|
||||
obj_uuids = [blender_obj_uuid] if blender_obj_uuid in export_settings['vtree'].nodes.keys() else []
|
||||
|
||||
# If there is only 1 object to cache, we can disable viewport for other objects (for performance)
|
||||
# This can be on these cases:
|
||||
# - TRACK mode
|
||||
# - Only one object to cache (but here, no really useful for performance)
|
||||
# - Action mode, where some object have multiple actions
|
||||
# - For this case, on first call, we will cache active action for all objects
|
||||
# - On next calls, we will cache only the action of current object, so we can disable viewport for others
|
||||
# For armature : We already checked that we can disable viewport (in case
|
||||
# of drivers, this is currently not possible)
|
||||
|
||||
need_to_enable_again = False
|
||||
if export_settings['gltf_optimize_armature_disable_viewport'] is True and len(obj_uuids) == 1:
|
||||
need_to_enable_again = True
|
||||
# Before baking, disabling from viewport all meshes
|
||||
for obj in [n.blender_object for n in export_settings['vtree'].nodes.values() if n.blender_type in
|
||||
[VExportNode.OBJECT, VExportNode.ARMATURE, VExportNode.COLLECTION]]:
|
||||
if obj is None:
|
||||
continue
|
||||
obj.hide_viewport = True
|
||||
export_settings['vtree'].nodes[obj_uuids[0]].blender_object.hide_viewport = False
|
||||
|
||||
depsgraph = bpy.context.evaluated_depsgraph_get()
|
||||
|
||||
frame = min_
|
||||
while frame <= max_:
|
||||
bpy.context.scene.frame_set(int(frame))
|
||||
current_instance = {} # For GN instances, we are going to track instances by their order in instance iterator
|
||||
|
||||
object_caching(data, obj_uuids, current_instance, action_name, frame, depsgraph, export_settings)
|
||||
|
||||
# KHR_animation_pointer caching for materials, lights, cameras
|
||||
if export_settings['gltf_export_anim_pointer'] is True:
|
||||
material_nodetree_caching(data, action_name, frame, export_settings)
|
||||
material_caching(data, action_name, frame, export_settings)
|
||||
light_nodetree_caching(data, action_name, frame, export_settings)
|
||||
camera_caching(data, action_name, frame, export_settings)
|
||||
|
||||
frame += step
|
||||
|
||||
# And now, restoring meshes in viewport
|
||||
for node, obj in [(n, n.blender_object) for n in export_settings['vtree'].nodes.values() if n.blender_type in
|
||||
[VExportNode.OBJECT, VExportNode.ARMATURE, VExportNode.COLLECTION]]:
|
||||
obj.hide_viewport = node.default_hide_viewport
|
||||
|
||||
return data
|
||||
|
||||
# For perf, we may be more precise, and get a list of ranges to be exported that include all needed frames
|
||||
|
||||
|
||||
def get_range(obj_uuid, key, export_settings):
|
||||
if export_settings['gltf_animation_mode'] in ["NLA_TRACKS"]:
|
||||
return export_settings['ranges'][obj_uuid][key]['start'], export_settings['ranges'][obj_uuid][key]['end']
|
||||
else:
|
||||
min_ = None
|
||||
max_ = None
|
||||
for obj in export_settings['ranges'].keys():
|
||||
for anim in export_settings['ranges'][obj].keys():
|
||||
if min_ is None or min_ > export_settings['ranges'][obj][anim]['start']:
|
||||
min_ = export_settings['ranges'][obj][anim]['start']
|
||||
if max_ is None or max_ < export_settings['ranges'][obj][anim]['end']:
|
||||
max_ = export_settings['ranges'][obj][anim]['end']
|
||||
return min_, max_
|
||||
|
||||
|
||||
def initialize_data_dict(data, key1, key2, key3, key4):
|
||||
# No check on key1, this is already done before calling this function
|
||||
if key2 not in data[key1].keys():
|
||||
data[key1][key2] = {}
|
||||
data[key1][key2][key3] = {}
|
||||
data[key1][key2][key3][key4] = {}
|
||||
|
||||
|
||||
def material_caching(data, action_name, frame, export_settings):
|
||||
for mat in export_settings['KHR_animation_pointer']['materials'].keys():
|
||||
if len(export_settings['KHR_animation_pointer']['materials'][mat]['paths']) == 0:
|
||||
continue
|
||||
|
||||
blender_material = [m for m in bpy.data.materials if id(m) == mat]
|
||||
if len(blender_material) == 0:
|
||||
# This is not a material from Blender (coming from Geometry Node for example, so no animation on it)
|
||||
continue
|
||||
else:
|
||||
blender_material = blender_material[0]
|
||||
if mat not in data.keys():
|
||||
data[mat] = {}
|
||||
|
||||
if blender_material and blender_material.animation_data and blender_material.animation_data.action \
|
||||
and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS"]:
|
||||
key1, key2, key3 = mat, blender_material.animation_data.action.name, "value"
|
||||
elif export_settings['gltf_animation_mode'] in ["NLA_TRACKS"]:
|
||||
key1, key2, key3 = mat, action_name, "value"
|
||||
else:
|
||||
# case of baking materials (scene export).
|
||||
# There is no animation, so use id as key
|
||||
key1, key2, key3 = mat, mat, "value"
|
||||
|
||||
if key2 not in data[key1].keys():
|
||||
data[key1][key2] = {}
|
||||
data[key1][key2][key3] = {}
|
||||
|
||||
for path in export_settings['KHR_animation_pointer']['materials'][mat]['paths'].keys():
|
||||
data[key1][key2][key3][path] = {}
|
||||
|
||||
for path in export_settings['KHR_animation_pointer']['materials'][mat]['paths'].keys():
|
||||
|
||||
if path.startswith("node_tree"):
|
||||
continue
|
||||
|
||||
val = blender_material.path_resolve(path)
|
||||
if type(val).__name__ == "float":
|
||||
data[key1][key2][key3][path][frame] = val
|
||||
else:
|
||||
data[key1][key2][key3][path][frame] = list(val)
|
||||
|
||||
|
||||
def material_nodetree_caching(data, action_name, frame, export_settings):
|
||||
# After caching objects, caching materials, for KHR_animation_pointer
|
||||
for mat in export_settings['KHR_animation_pointer']['materials'].keys():
|
||||
if len(export_settings['KHR_animation_pointer']['materials'][mat]['paths']) == 0:
|
||||
continue
|
||||
|
||||
blender_material = [m for m in bpy.data.materials if id(m) == mat]
|
||||
if len(blender_material) == 0:
|
||||
# This is not a material from Blender (coming from Geometry Node for example, so no animation on it)
|
||||
continue
|
||||
else:
|
||||
blender_material = blender_material[0]
|
||||
if mat not in data.keys():
|
||||
data[mat] = {}
|
||||
|
||||
if blender_material.node_tree and blender_material.node_tree.animation_data and blender_material.node_tree.animation_data.action \
|
||||
and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS"]:
|
||||
|
||||
key1, key2, key3 = mat, blender_material.node_tree.animation_data.action.name, "value"
|
||||
elif export_settings['gltf_animation_mode'] in ["NLA_TRACKS"]:
|
||||
key1, key2, key3 = mat, action_name, "value"
|
||||
else:
|
||||
# case of baking materials (scene export).
|
||||
# There is no animation, so use id as key
|
||||
|
||||
key1, key2, key3 = mat, mat, "value"
|
||||
|
||||
if key2 not in data[key1].keys():
|
||||
data[key1][key2] = {}
|
||||
data[key1][key2][key3] = {}
|
||||
for path in export_settings['KHR_animation_pointer']['materials'][mat]['paths'].keys():
|
||||
data[key1][key2][key3][path] = {}
|
||||
|
||||
baseColorFactor_alpha_merged_already_done = False
|
||||
for path in export_settings['KHR_animation_pointer']['materials'][mat]['paths'].keys():
|
||||
|
||||
if not path.startswith("node_tree"):
|
||||
continue
|
||||
|
||||
# Manage special case where we merge baseColorFactor and alpha
|
||||
if export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['path'] == "/materials/XXX/pbrMetallicRoughness/baseColorFactor" \
|
||||
and export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['length'] == 3:
|
||||
if baseColorFactor_alpha_merged_already_done is True:
|
||||
continue
|
||||
val_color = blender_material.path_resolve(path)
|
||||
data_color = list(val_color)[:export_settings['KHR_animation_pointer']
|
||||
['materials'][mat]['paths'][path]['length']]
|
||||
if export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['additional_path'] is not None:
|
||||
val_alpha = blender_material.path_resolve(
|
||||
export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['additional_path'])
|
||||
else:
|
||||
val_alpha = 1.0
|
||||
data[key1][key2][key3][path][frame] = data_color + [val_alpha]
|
||||
baseColorFactor_alpha_merged_already_done = True
|
||||
# Manage special case where we merge baseColorFactor and alpha
|
||||
elif export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['path'] == "/materials/XXX/pbrMetallicRoughness/baseColorFactor" \
|
||||
and export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['length'] == 1:
|
||||
if baseColorFactor_alpha_merged_already_done is True:
|
||||
continue
|
||||
val_alpha = blender_material.path_resolve(path)
|
||||
if export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['additional_path'] is not None:
|
||||
val_color = blender_material.path_resolve(
|
||||
export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['additional_path'])
|
||||
data_color = list(val_color)[:export_settings['KHR_animation_pointer']
|
||||
['materials'][mat]['paths']['additional_path']['length']]
|
||||
else:
|
||||
data_color = [1.0, 1.0, 1.0]
|
||||
data[key1][key2][key3][path][frame] = data_color + [val_alpha]
|
||||
baseColorFactor_alpha_merged_already_done = True
|
||||
|
||||
# Manage special case for KHR_texture_transform offset, that needs
|
||||
# rotation and scale too (and not only translation)
|
||||
elif "KHR_texture_transform" in export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['path'] \
|
||||
and export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['path'].endswith("offset"):
|
||||
|
||||
val_offset = blender_material.path_resolve(path)
|
||||
rotation_path = [
|
||||
i for i in export_settings['KHR_animation_pointer']['materials'][mat]['paths'].keys() if export_settings['KHR_animation_pointer']['materials'][mat]['paths'][i]['path'].rsplit(
|
||||
"/",
|
||||
1)[0] == export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['path'].rsplit(
|
||||
"/",
|
||||
1)[0] and export_settings['KHR_animation_pointer']['materials'][mat]['paths'][i]['path'].rsplit(
|
||||
"/",
|
||||
1)[1] == "rotation"][0]
|
||||
val_rotation = blender_material.path_resolve(rotation_path)
|
||||
scale_path = [
|
||||
i for i in export_settings['KHR_animation_pointer']['materials'][mat]['paths'].keys() if export_settings['KHR_animation_pointer']['materials'][mat]['paths'][i]['path'].rsplit(
|
||||
"/",
|
||||
1)[0] == export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['path'].rsplit(
|
||||
"/",
|
||||
1)[0] and export_settings['KHR_animation_pointer']['materials'][mat]['paths'][i]['path'].rsplit(
|
||||
"/",
|
||||
1)[1] == "scale"][0]
|
||||
val_scale = blender_material.path_resolve(scale_path)
|
||||
|
||||
mapping_transform = {}
|
||||
mapping_transform["offset"] = [val_offset[0], val_offset[1]]
|
||||
mapping_transform["rotation"] = val_rotation
|
||||
mapping_transform["scale"] = [val_scale[0], val_scale[1]]
|
||||
|
||||
if export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['vector_type'] == "TEXTURE":
|
||||
mapping_transform = inverted_trs_mapping_node(mapping_transform)
|
||||
if mapping_transform is None:
|
||||
# Can not be converted to TRS, so ... keeping default values
|
||||
export_settings['log'].warning(
|
||||
"Can not convert texture transform to TRS. Keeping default values.")
|
||||
mapping_transform = {}
|
||||
mapping_transform["offset"] = [0.0, 0.0]
|
||||
mapping_transform["rotation"] = 0.0
|
||||
mapping_transform["scale"] = [1.0, 1.0]
|
||||
elif export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['vector_type'] == "VECTOR":
|
||||
# Vectors don't get translated
|
||||
mapping_transform["offset"] = [0, 0]
|
||||
|
||||
texture_transform = texture_transform_blender_to_gltf(mapping_transform)
|
||||
|
||||
data[key1][key2][key3][path][frame] = texture_transform['offset']
|
||||
data[key1][key2][key3][rotation_path][frame] = texture_transform['rotation']
|
||||
data[key1][key2][key3][scale_path][frame] = texture_transform['scale']
|
||||
if export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['vector_type'] != "VECTOR":
|
||||
# Already handled by offset
|
||||
continue
|
||||
else:
|
||||
val = blender_material.path_resolve(path)
|
||||
mapping_transform = {}
|
||||
mapping_transform["offset"] = [0, 0] # Placeholder, not needed
|
||||
mapping_transform["rotation"] = val
|
||||
mapping_transform["scale"] = [1, 1] # Placeholder, not needed
|
||||
texture_transform = texture_transform_blender_to_gltf(mapping_transform)
|
||||
data[key1][key2][key3][path][frame] = texture_transform['rotation']
|
||||
elif "KHR_texture_transform" in export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['path'] \
|
||||
and export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['path'].endswith("scale"):
|
||||
if export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['vector_type'] != "VECTOR":
|
||||
# Already handled by offset
|
||||
continue
|
||||
else:
|
||||
val = blender_material.path_resolve(path)
|
||||
mapping_transform = {}
|
||||
mapping_transform["offset"] = [0, 0] # Placeholder, not needed
|
||||
mapping_transform["rotation"] = 0.0 # Placeholder, not needed
|
||||
mapping_transform["scale"] = [val[0], val[1]]
|
||||
texture_transform = texture_transform_blender_to_gltf(mapping_transform)
|
||||
data[key1][key2][key3][path][frame] = texture_transform['rotation']
|
||||
|
||||
# Manage special cases for specularFactor & specularColorFactor
|
||||
elif export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['path'] == "/materials/XXX/extensions/KHR_materials_specular/specularFactor":
|
||||
val = blender_material.path_resolve(path)
|
||||
val = val * 2.0
|
||||
if val > 1.0:
|
||||
fac = val
|
||||
val = 1.0
|
||||
else:
|
||||
fac = 1.0
|
||||
|
||||
data[key1][key2][key3][path][frame] = val
|
||||
|
||||
# Retrieve specularColorFactor
|
||||
colorfactor_path = [
|
||||
i for i in export_settings['KHR_animation_pointer']['materials'][mat]['paths'].keys() if export_settings['KHR_animation_pointer']['materials'][mat]['paths'][i]['path'].rsplit(
|
||||
"/",
|
||||
1)[0] == export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['path'].rsplit(
|
||||
"/",
|
||||
1)[0] and export_settings['KHR_animation_pointer']['materials'][mat]['paths'][i]['path'].rsplit(
|
||||
"/",
|
||||
1)[1] == "specularColorFactor"][0]
|
||||
val_colorfactor = blender_material.path_resolve(colorfactor_path)
|
||||
if fac > 1.0:
|
||||
val_colorfactor = [i * fac for i in val_colorfactor]
|
||||
data[key1][key2][key3][colorfactor_path][frame] = val_colorfactor
|
||||
elif export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['path'] == "/materials/XXX/extensions/KHR_materials_specular/specularColorFactor":
|
||||
# Already handled by specularFactor
|
||||
continue
|
||||
|
||||
# Classic case
|
||||
else:
|
||||
val = blender_material.path_resolve(path)
|
||||
if type(val).__name__ == "float":
|
||||
data[key1][key2][key3][path][frame] = val
|
||||
else:
|
||||
data[key1][key2][key3][path][frame] = list(val)[
|
||||
:export_settings['KHR_animation_pointer']['materials'][mat]['paths'][path]['length']]
|
||||
|
||||
|
||||
def armature_caching(data, obj_uuid, blender_obj, action_name, frame, export_settings):
|
||||
bones = export_settings['vtree'].get_all_bones(obj_uuid)
|
||||
if blender_obj.animation_data and blender_obj.animation_data.action \
|
||||
and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS", "BROADCAST"]:
|
||||
key1, key2, key3 = obj_uuid, blender_obj.animation_data.action.name, "bone"
|
||||
elif blender_obj.animation_data \
|
||||
and export_settings['gltf_animation_mode'] in ["NLA_TRACKS"]:
|
||||
key1, key2, key3 = obj_uuid, action_name, "bone"
|
||||
else:
|
||||
key1, key2, key3 = obj_uuid, obj_uuid, "bone"
|
||||
|
||||
if key3 not in data[key1][key2].keys():
|
||||
data[key1][key2][key3] = {}
|
||||
|
||||
for bone_uuid in [bone for bone in bones if export_settings['vtree'].nodes[bone].leaf_reference is None]:
|
||||
blender_bone = export_settings['vtree'].nodes[bone_uuid].blender_bone
|
||||
|
||||
if export_settings['vtree'].nodes[bone_uuid].parent_uuid is not None and export_settings['vtree'].nodes[
|
||||
export_settings['vtree'].nodes[bone_uuid].parent_uuid].blender_type == VExportNode.BONE:
|
||||
blender_bone_parent = export_settings['vtree'].nodes[export_settings['vtree']
|
||||
.nodes[bone_uuid].parent_uuid].blender_bone
|
||||
rest_mat = blender_bone_parent.bone.matrix_local.inverted_safe() @ blender_bone.bone.matrix_local
|
||||
matrix = rest_mat.inverted_safe() @ blender_bone_parent.matrix.inverted_safe() @ blender_bone.matrix
|
||||
else:
|
||||
if blender_bone.parent is None:
|
||||
matrix = blender_bone.bone.matrix_local.inverted_safe() @ blender_bone.matrix
|
||||
else:
|
||||
# Bone has a parent, but in export, after filter, is at root of armature
|
||||
matrix = blender_bone.matrix.copy()
|
||||
|
||||
# Because there is no armature object, we need to apply the TRS of armature to the root bone
|
||||
if export_settings['gltf_armature_object_remove'] is True:
|
||||
matrix = matrix @ blender_obj.matrix_world
|
||||
|
||||
if blender_bone.name not in data[key1][key2][key3].keys():
|
||||
data[key1][key2][key3][blender_bone.name] = {}
|
||||
data[key1][key2][key3][blender_bone.name][frame] = matrix
|
||||
|
||||
|
||||
def object_caching(data, obj_uuids, current_instance, action_name, frame, depsgraph, export_settings):
|
||||
for obj_uuid in obj_uuids:
|
||||
blender_obj = export_settings['vtree'].nodes[obj_uuid].blender_object
|
||||
if blender_obj is None: # GN instance
|
||||
if export_settings['vtree'].nodes[obj_uuid].parent_uuid not in current_instance.keys():
|
||||
current_instance[export_settings['vtree'].nodes[obj_uuid].parent_uuid] = 0
|
||||
|
||||
# TODO: we may want to avoid looping on all objects, but an accurate filter must be found
|
||||
|
||||
# calculate local matrix
|
||||
if export_settings['vtree'].nodes[obj_uuid].parent_uuid is None:
|
||||
parent_mat = mathutils.Matrix.Identity(4).freeze()
|
||||
else:
|
||||
if export_settings['vtree'].nodes[export_settings['vtree'].nodes[obj_uuid].parent_uuid].blender_type not in [
|
||||
VExportNode.BONE]:
|
||||
if export_settings['vtree'].nodes[export_settings['vtree']
|
||||
.nodes[obj_uuid].parent_uuid].blender_type != VExportNode.COLLECTION:
|
||||
parent_mat = export_settings['vtree'].nodes[export_settings['vtree']
|
||||
.nodes[obj_uuid].parent_uuid].blender_object.matrix_world
|
||||
else:
|
||||
parent_mat = export_settings['vtree'].nodes[export_settings['vtree']
|
||||
.nodes[obj_uuid].parent_uuid].matrix_world
|
||||
else:
|
||||
# Object animated is parented to a bone
|
||||
blender_bone = export_settings['vtree'].nodes[export_settings['vtree']
|
||||
.nodes[obj_uuid].parent_bone_uuid].blender_bone
|
||||
armature_object = export_settings['vtree'].nodes[export_settings['vtree']
|
||||
.nodes[export_settings['vtree'].nodes[obj_uuid].parent_bone_uuid].armature].blender_object
|
||||
axis_basis_change = mathutils.Matrix(
|
||||
((1.0, 0.0, 0.0, 0.0), (0.0, 0.0, 1.0, 0.0), (0.0, -1.0, 0.0, 0.0), (0.0, 0.0, 0.0, 1.0)))
|
||||
|
||||
parent_mat = armature_object.matrix_world @ blender_bone.matrix @ axis_basis_change
|
||||
|
||||
# For object inside collection (at root), matrix world is already expressed regarding collection parent
|
||||
if export_settings['vtree'].nodes[obj_uuid].parent_uuid is not None and export_settings['vtree'].nodes[
|
||||
export_settings['vtree'].nodes[obj_uuid].parent_uuid].blender_type == VExportNode.INST_COLLECTION:
|
||||
parent_mat = mathutils.Matrix.Identity(4).freeze()
|
||||
|
||||
if blender_obj:
|
||||
if export_settings['vtree'].nodes[obj_uuid].blender_type != VExportNode.COLLECTION:
|
||||
mat = parent_mat.inverted_safe() @ blender_obj.matrix_world
|
||||
else:
|
||||
mat = parent_mat.inverted_safe()
|
||||
else:
|
||||
eval = export_settings['vtree'].nodes[export_settings['vtree'].nodes[obj_uuid].parent_uuid].blender_object.evaluated_get(
|
||||
depsgraph)
|
||||
cpt_inst = 0
|
||||
for inst in depsgraph.object_instances: # use only as iterator
|
||||
if inst.parent == eval:
|
||||
if current_instance[export_settings['vtree'].nodes[obj_uuid].parent_uuid] == cpt_inst:
|
||||
mat = inst.matrix_world.copy()
|
||||
current_instance[export_settings['vtree'].nodes[obj_uuid].parent_uuid] += 1
|
||||
break
|
||||
cpt_inst += 1
|
||||
|
||||
if obj_uuid not in data.keys():
|
||||
data[obj_uuid] = {}
|
||||
|
||||
if export_settings['vtree'].nodes[obj_uuid].blender_type != VExportNode.COLLECTION:
|
||||
if blender_obj and blender_obj.animation_data and blender_obj.animation_data.action \
|
||||
and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS", "BROADCAST"]:
|
||||
key1, key2, key3, key4 = obj_uuid, blender_obj.animation_data.action.name, "matrix", None
|
||||
elif export_settings['gltf_animation_mode'] in ["NLA_TRACKS"]:
|
||||
key1, key2, key3, key4 = obj_uuid, action_name, "matrix", None
|
||||
else:
|
||||
# case of baking object.
|
||||
# There is no animation, so use uuid of object as key
|
||||
key1, key2, key3, key4 = obj_uuid, obj_uuid, "matrix", None
|
||||
else:
|
||||
key1, key2, key3, key4 = obj_uuid, obj_uuid, "matrix", None
|
||||
initialize_data_dict(data, key1, key2, key3, key4)
|
||||
data[key1][key2][key3][key4][frame] = mat
|
||||
|
||||
# Store data for all bones, if object is an armature
|
||||
|
||||
if blender_obj and blender_obj.type == "ARMATURE":
|
||||
armature_caching(data, obj_uuid, blender_obj, action_name, frame, export_settings)
|
||||
|
||||
elif blender_obj is None: # GN instances
|
||||
# case of baking object, for GN instances
|
||||
# There is no animation, so use uuid of object as key
|
||||
key1, key2, key3, key4 = obj_uuid, obj_uuid, "matrix", None
|
||||
initialize_data_dict(data, key1, key2, key3, key4)
|
||||
data[key1][key2][key3][key4][frame] = mat
|
||||
|
||||
# Check SK animation here, as we are caching data
|
||||
# This will avoid to have to do it again when exporting SK animation
|
||||
cache_sk = False
|
||||
if export_settings['gltf_morph_anim'] and blender_obj and blender_obj.type == "MESH" \
|
||||
and blender_obj.data is not None \
|
||||
and blender_obj.data.shape_keys is not None \
|
||||
and blender_obj.data.shape_keys.animation_data is not None \
|
||||
and blender_obj.data.shape_keys.animation_data.action is not None \
|
||||
and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS", "BROADCAST"]:
|
||||
|
||||
key1, key2, key3, key4 = obj_uuid, blender_obj.data.shape_keys.animation_data.action.name, "sk", None
|
||||
cache_sk = True
|
||||
|
||||
elif export_settings['gltf_morph_anim'] and blender_obj and blender_obj.type == "MESH" \
|
||||
and blender_obj.data is not None \
|
||||
and blender_obj.data.shape_keys is not None \
|
||||
and blender_obj.data.shape_keys.animation_data is not None \
|
||||
and export_settings['gltf_animation_mode'] in ["NLA_TRACKS"]:
|
||||
|
||||
key1, key2, key3, key4 = obj_uuid, action_name, "sk", None
|
||||
cache_sk = True
|
||||
|
||||
elif export_settings['gltf_morph_anim'] and blender_obj and blender_obj.type == "MESH" \
|
||||
and blender_obj.data is not None \
|
||||
and blender_obj.data.shape_keys is not None:
|
||||
key1, key2, key3, key4 = obj_uuid, obj_uuid, "sk", None
|
||||
cache_sk = True
|
||||
|
||||
if cache_sk:
|
||||
initialize_data_dict(data, key1, key2, key3, key4)
|
||||
if key3 not in data[key1][key2].keys():
|
||||
data[key1][key2][key3] = {}
|
||||
data[key1][key2][key3][key4] = {}
|
||||
data[key1][key2][key3][key4][frame] = [
|
||||
k.value for k in get_sk_exported(
|
||||
blender_obj.data.shape_keys.key_blocks)]
|
||||
cache_sk = False
|
||||
|
||||
# caching driver sk meshes
|
||||
# This will avoid to have to do it again when exporting SK animation
|
||||
if blender_obj and blender_obj.type == "ARMATURE":
|
||||
sk_drivers = get_sk_drivers(obj_uuid, export_settings)
|
||||
for dr_obj in sk_drivers:
|
||||
cache_sk = False
|
||||
driver_object = export_settings['vtree'].nodes[dr_obj].blender_object
|
||||
if dr_obj not in data.keys():
|
||||
data[dr_obj] = {}
|
||||
if blender_obj.animation_data and blender_obj.animation_data.action \
|
||||
and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS", "BROADCAST"]:
|
||||
key1, key2, key3, key4 = dr_obj, obj_uuid + "_" + blender_obj.animation_data.action.name, "sk", None
|
||||
cache_sk = True
|
||||
elif blender_obj.animation_data \
|
||||
and export_settings['gltf_animation_mode'] in ["NLA_TRACKS"]:
|
||||
key1, key2, key3, key4 = dr_obj, obj_uuid + "_" + action_name, "sk", None
|
||||
cache_sk = True
|
||||
else:
|
||||
key1, key2, key3, key4 = dr_obj, obj_uuid + "_" + obj_uuid, "sk", None
|
||||
cache_sk = True
|
||||
|
||||
if cache_sk:
|
||||
initialize_data_dict(data, key1, key2, key3, key4)
|
||||
data[key1][key2][key3][key4][frame] = [
|
||||
k.value for k in get_sk_exported(
|
||||
driver_object.data.shape_keys.key_blocks)]
|
||||
cache_sk = False
|
||||
|
||||
|
||||
def light_nodetree_caching(data, action_name, frame, export_settings):
|
||||
# After caching materials, caching lights, for KHR_animation_pointer
|
||||
for light in export_settings['KHR_animation_pointer']['lights'].keys():
|
||||
if len(export_settings['KHR_animation_pointer']['lights'][light]['paths']) == 0:
|
||||
continue
|
||||
|
||||
blender_light = [m for m in bpy.data.lights if id(m) == light][0]
|
||||
if light not in data.keys():
|
||||
data[light] = {}
|
||||
|
||||
if blender_light.node_tree and blender_light.node_tree.animation_data and blender_light.node_tree.animation_data.action \
|
||||
and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS"]:
|
||||
key1, key2, key3 = light, blender_light.node_tree.animation_data.action.name, "value"
|
||||
elif export_settings['gltf_animation_mode'] in ["NLA_TRACKS"]:
|
||||
key1, key2, key3 = light, action_name, "value"
|
||||
else:
|
||||
# case of baking materials (scene export).
|
||||
# There is no animation, so use id as key
|
||||
key1, key2, key3 = light, light, "value"
|
||||
|
||||
if key2 not in data[key1].keys():
|
||||
data[key1][key2] = {}
|
||||
data[key1][key2][key3] = {}
|
||||
for path in export_settings['KHR_animation_pointer']['lights'][light]['paths'].keys():
|
||||
data[key1][key2][key3][path] = {}
|
||||
|
||||
for path in export_settings['KHR_animation_pointer']['lights'][light]['paths'].keys():
|
||||
val = blender_light.path_resolve(path)
|
||||
if type(val).__name__ == "float":
|
||||
data[key1][key2][key3][path][frame] = val
|
||||
else:
|
||||
data[key1][key2][key3][path][frame] = list(val)
|
||||
|
||||
|
||||
def light_caching(data, action_name, frame, export_settings):
|
||||
# After caching materials, caching lights, for KHR_animation_pointer
|
||||
for light in export_settings['KHR_animation_pointer']['lights'].keys():
|
||||
if len(export_settings['KHR_animation_pointer']['lights'][light]['paths']) == 0:
|
||||
continue
|
||||
|
||||
blender_light = [m for m in bpy.data.lights if id(m) == light][0]
|
||||
if light not in data.keys():
|
||||
data[light] = {}
|
||||
|
||||
if blender_light and blender_light.animation_data and blender_light.animation_data.action \
|
||||
and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS"]:
|
||||
key1, key2, key3 = light, blender_light.animation_data.action.name, "value"
|
||||
elif export_settings['gltf_animation_mode'] in ["NLA_TRACKS"]:
|
||||
key1, key2, key3 = light, action_name, "value"
|
||||
else:
|
||||
# case of baking materials (scene export).
|
||||
# There is no animation, so use id as key
|
||||
key1, key2, key3 = light, light, "value"
|
||||
|
||||
if key2 not in data[key1].keys():
|
||||
data[key1][key2] = {}
|
||||
data[key1][key2][key3] = {}
|
||||
for path in export_settings['KHR_animation_pointer']['lights'][light]['paths'].keys():
|
||||
data[key1][key2][key3][path] = {}
|
||||
|
||||
for path in export_settings['KHR_animation_pointer']['lights'][light]['paths'].keys():
|
||||
# Manage special case for innerConeAngle because it requires spot_size & spot_blend
|
||||
if export_settings['KHR_animation_pointer']['lights'][light]['paths'][path]['path'] == "/extensions/KHR_lights_punctual/lights/XXX/spot.innerConeAngle":
|
||||
val = blender_light.path_resolve(path)
|
||||
val_size = blender_light.path_resolve(
|
||||
export_settings['KHR_animation_pointer']['lights'][light]['paths'][path]['additional_path'])
|
||||
data[key1][key2][key3][path][frame] = (val_size * 0.5) - ((val_size * 0.5) * val)
|
||||
else:
|
||||
# classic case
|
||||
val = blender_light.path_resolve(path)
|
||||
if type(val).__name__ == "float":
|
||||
data[key1][key2][key3][path][frame] = val
|
||||
else:
|
||||
# When color is coming from a node, it is 4 values (RGBA), so need to convert it to 3 values (RGB)
|
||||
if export_settings['KHR_animation_pointer']['lights'][light]['paths'][path]['length'] == 3 and len(
|
||||
val) == 4:
|
||||
val = val[:3]
|
||||
data[key1][key2][key3][path][frame] = list(val)
|
||||
|
||||
|
||||
def camera_caching(data, action_name, frame, export_settings):
|
||||
# After caching lights, caching cameras, for KHR_animation_pointer
|
||||
for cam in export_settings['KHR_animation_pointer']['cameras'].keys():
|
||||
if len(export_settings['KHR_animation_pointer']['cameras'][cam]['paths']) == 0:
|
||||
continue
|
||||
|
||||
blender_camera = [m for m in bpy.data.cameras if id(m) == cam][0]
|
||||
if cam not in data.keys():
|
||||
data[cam] = {}
|
||||
|
||||
if blender_camera and blender_camera.animation_data and blender_camera.animation_data.action \
|
||||
and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS"]:
|
||||
key1, key2, key3 = cam, blender_camera.animation_data.action.name, "value"
|
||||
elif export_settings['gltf_animation_mode'] in ["NLA_TRACKS"]:
|
||||
key1, key2, key3 = cam, action_name, "value"
|
||||
else:
|
||||
# case of baking materials (scene export).
|
||||
# There is no animation, so use id as key
|
||||
key1, key2, key3 = cam, cam, "value"
|
||||
|
||||
if key2 not in data[key1].keys():
|
||||
data[key1][key2] = {}
|
||||
data[key1][key2][key3] = {}
|
||||
for path in export_settings['KHR_animation_pointer']['cameras'][cam]['paths'].keys():
|
||||
data[key1][key2][key3][path] = {}
|
||||
|
||||
for path in export_settings['KHR_animation_pointer']['cameras'][cam]['paths'].keys():
|
||||
_render = bpy.context.scene.render
|
||||
width = _render.pixel_aspect_x * _render.resolution_x
|
||||
height = _render.pixel_aspect_y * _render.resolution_y
|
||||
del _render
|
||||
# Manage special case for yvof because it requires sensor_fit, aspect ratio, angle
|
||||
if export_settings['KHR_animation_pointer']['cameras'][cam]['paths'][path]['path'] == "/cameras/XXX/perspective/yfov":
|
||||
val = yvof_blender_to_gltf(blender_camera.angle, width, height, blender_camera.sensor_fit)
|
||||
data[key1][key2][key3][path][frame] = val
|
||||
# Manage special case for xmag because it requires ortho_scale & scene data
|
||||
elif export_settings['KHR_animation_pointer']['cameras'][cam]['paths'][path]['path'] == "/cameras/XXX/orthographic/xmag":
|
||||
val = blender_camera.ortho_scale
|
||||
data[key1][key2][key3][path][frame] = val * (width / max(width, height)) / 2.0
|
||||
# Manage special case for ymag because it requires ortho_scale & scene data
|
||||
elif export_settings['KHR_animation_pointer']['cameras'][cam]['paths'][path]['path'] == "/cameras/XXX/orthographic/ymag":
|
||||
val = blender_camera.ortho_scale
|
||||
data[key1][key2][key3][path][frame] = val * (height / max(width, height)) / 2.0
|
||||
else:
|
||||
# classic case
|
||||
val = blender_camera.path_resolve(path)
|
||||
if type(val).__name__ == "float":
|
||||
data[key1][key2][key3][path][frame] = val
|
||||
else:
|
||||
data[key1][key2][key3][path][frame] = list(val)
|
78
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_action_sampled.py
Normal file
78
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_action_sampled.py
Normal file
@ -0,0 +1,78 @@
|
||||
# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
import typing
|
||||
from ......io.com import gltf2_io
|
||||
from ......io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from .....com.gltf2_blender_extras import generate_extras
|
||||
from ...fcurves.gltf2_blender_gather_fcurves_sampler import gather_animation_fcurves_sampler
|
||||
from .gltf2_blender_gather_object_channels import gather_object_sampled_channels
|
||||
|
||||
|
||||
def gather_action_object_sampled(object_uuid: str,
|
||||
blender_action: typing.Optional[bpy.types.Action],
|
||||
cache_key: str,
|
||||
export_settings):
|
||||
|
||||
extra_samplers = []
|
||||
|
||||
# If no animation in file, no need to bake
|
||||
if len(bpy.data.actions) == 0:
|
||||
return None, extra_samplers
|
||||
|
||||
channels, extra_channels = __gather_channels(
|
||||
object_uuid, blender_action.name if blender_action else cache_key, export_settings)
|
||||
animation = gltf2_io.Animation(
|
||||
channels=channels,
|
||||
extensions=None,
|
||||
extras=__gather_extras(blender_action, export_settings),
|
||||
name=__gather_name(object_uuid, blender_action, cache_key, export_settings),
|
||||
samplers=[]
|
||||
)
|
||||
|
||||
if export_settings['gltf_export_extra_animations']:
|
||||
for chan in [chan for chan in extra_channels.values() if len(chan['properties']) != 0]:
|
||||
for channel_group_name, channel_group in chan['properties'].items():
|
||||
|
||||
# No glTF channel here, as we don't have any target
|
||||
# Trying to retrieve sampler directly
|
||||
sampler = gather_animation_fcurves_sampler(
|
||||
object_uuid, tuple(channel_group), None, None, True, export_settings)
|
||||
if sampler is not None:
|
||||
extra_samplers.append((channel_group_name, sampler, "OBJECT", None))
|
||||
|
||||
if not animation.channels:
|
||||
return None, extra_samplers
|
||||
|
||||
blender_object = export_settings['vtree'].nodes[object_uuid].blender_object
|
||||
export_user_extensions(
|
||||
'animation_action_object_sampled',
|
||||
export_settings,
|
||||
animation,
|
||||
blender_object,
|
||||
blender_action,
|
||||
cache_key)
|
||||
|
||||
return animation, extra_samplers
|
||||
|
||||
|
||||
def __gather_name(object_uuid: str, blender_action: typing.Optional[bpy.types.Action], cache_key: str, export_settings):
|
||||
if blender_action:
|
||||
return blender_action.name
|
||||
elif cache_key == object_uuid:
|
||||
return export_settings['vtree'].nodes[object_uuid].blender_object.name
|
||||
else:
|
||||
return cache_key
|
||||
|
||||
|
||||
def __gather_channels(object_uuid: str, blender_action_name: str,
|
||||
export_settings) -> typing.List[gltf2_io.AnimationChannel]:
|
||||
return gather_object_sampled_channels(object_uuid, blender_action_name, export_settings)
|
||||
|
||||
|
||||
def __gather_extras(blender_action, export_settings):
|
||||
if export_settings['gltf_extras']:
|
||||
return generate_extras(blender_action) if blender_action else None
|
||||
return None
|
51
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_channel_target.py
Normal file
51
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_channel_target.py
Normal file
@ -0,0 +1,51 @@
|
||||
# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from ......io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ......io.com import gltf2_io
|
||||
from ....gltf2_blender_gather_cache import cached
|
||||
|
||||
|
||||
@cached
|
||||
def gather_object_sampled_channel_target(
|
||||
obj_uuid: str,
|
||||
channel: str,
|
||||
export_settings
|
||||
) -> gltf2_io.AnimationChannelTarget:
|
||||
|
||||
blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object
|
||||
|
||||
animation_channel_target = gltf2_io.AnimationChannelTarget(
|
||||
extensions=__gather_extensions(obj_uuid, channel, export_settings),
|
||||
extras=__gather_extras(obj_uuid, channel, export_settings),
|
||||
node=__gather_node(obj_uuid, export_settings),
|
||||
path=__gather_path(channel, export_settings)
|
||||
)
|
||||
|
||||
export_user_extensions('gather_animation_object_sampled_channel_target_hook',
|
||||
export_settings,
|
||||
blender_object,
|
||||
channel)
|
||||
|
||||
return animation_channel_target
|
||||
|
||||
|
||||
def __gather_extensions(armature_uuid, channel, export_settings):
|
||||
return None
|
||||
|
||||
|
||||
def __gather_extras(armature_uuid, channel, export_settings):
|
||||
return None
|
||||
|
||||
|
||||
def __gather_node(obj_uuid: str, export_settings):
|
||||
return export_settings['vtree'].nodes[obj_uuid].node
|
||||
|
||||
|
||||
def __gather_path(channel, export_settings):
|
||||
return {
|
||||
"location": "translation",
|
||||
"rotation_quaternion": "rotation",
|
||||
"scale": "scale"
|
||||
}.get(channel)
|
125
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_channels.py
Normal file
125
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_channels.py
Normal file
@ -0,0 +1,125 @@
|
||||
# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
import typing
|
||||
from ......io.com import gltf2_io
|
||||
from ......io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ......blender.com.gltf2_blender_conversion import get_gltf_interpolation
|
||||
from .....com.gltf2_blender_conversion import get_target, get_channel_from_target
|
||||
from ....gltf2_blender_gather_cache import cached
|
||||
from ...fcurves.gltf2_blender_gather_fcurves_channels import get_channel_groups
|
||||
from .gltf2_blender_gather_object_sampler import gather_object_sampled_animation_sampler
|
||||
from .gltf2_blender_gather_object_channel_target import gather_object_sampled_channel_target
|
||||
|
||||
|
||||
def gather_object_sampled_channels(object_uuid: str, blender_action_name: str,
|
||||
export_settings) -> typing.List[gltf2_io.AnimationChannel]:
|
||||
channels = []
|
||||
extra_channels = {}
|
||||
|
||||
# Bake situation does not export any extra animation channels, as we bake TRS + weights on Track or scene level, without direct
|
||||
# Access to fcurve and action data
|
||||
|
||||
list_of_animated_channels = {}
|
||||
if object_uuid != blender_action_name and blender_action_name in bpy.data.actions:
|
||||
# Not bake situation
|
||||
channels_animated, to_be_sampled, extra_channels = get_channel_groups(
|
||||
object_uuid, bpy.data.actions[blender_action_name], export_settings)
|
||||
for chan in [chan for chan in channels_animated.values() if chan['bone'] is None]:
|
||||
for prop in chan['properties'].keys():
|
||||
list_of_animated_channels[get_channel_from_target(get_target(prop))] = get_gltf_interpolation(
|
||||
chan['properties'][prop][0].keyframe_points[0].interpolation) # Could be exported without sampling : keep interpolation
|
||||
|
||||
for _, _, chan_prop, _ in [chan for chan in to_be_sampled if chan[1] == "OBJECT"]:
|
||||
list_of_animated_channels[chan_prop] = get_gltf_interpolation(
|
||||
"LINEAR") # if forced to be sampled, keep LINEAR interpolation
|
||||
|
||||
for p in ["location", "rotation_quaternion", "scale"]:
|
||||
channel = gather_sampled_object_channel(
|
||||
object_uuid,
|
||||
p,
|
||||
blender_action_name,
|
||||
p in list_of_animated_channels.keys(),
|
||||
list_of_animated_channels[p] if p in list_of_animated_channels.keys() else get_gltf_interpolation("LINEAR"),
|
||||
export_settings
|
||||
)
|
||||
if channel is not None:
|
||||
channels.append(channel)
|
||||
|
||||
blender_object = export_settings['vtree'].nodes[object_uuid].blender_object
|
||||
export_user_extensions('animation_gather_object_channel', export_settings, blender_object, blender_action_name)
|
||||
|
||||
return channels if len(channels) > 0 else None, extra_channels
|
||||
|
||||
|
||||
@cached
|
||||
def gather_sampled_object_channel(
|
||||
obj_uuid: str,
|
||||
channel: str,
|
||||
action_name: str,
|
||||
node_channel_is_animated: bool,
|
||||
node_channel_interpolation: str,
|
||||
export_settings
|
||||
):
|
||||
|
||||
__target = __gather_target(obj_uuid, channel, export_settings)
|
||||
if __target.path is not None:
|
||||
sampler = __gather_sampler(
|
||||
obj_uuid,
|
||||
channel,
|
||||
action_name,
|
||||
node_channel_is_animated,
|
||||
node_channel_interpolation,
|
||||
export_settings)
|
||||
|
||||
if sampler is None:
|
||||
# After check, no need to animate this node for this channel
|
||||
return None
|
||||
|
||||
animation_channel = gltf2_io.AnimationChannel(
|
||||
extensions=None,
|
||||
extras=None,
|
||||
sampler=sampler,
|
||||
target=__target
|
||||
)
|
||||
|
||||
export_user_extensions('gather_animation_channel_hook',
|
||||
export_settings,
|
||||
animation_channel,
|
||||
channel,
|
||||
export_settings['vtree'].nodes[obj_uuid].blender_object,
|
||||
node_channel_is_animated
|
||||
)
|
||||
|
||||
return animation_channel
|
||||
return None
|
||||
|
||||
|
||||
def __gather_target(
|
||||
obj_uuid: str,
|
||||
channel: str,
|
||||
export_settings
|
||||
):
|
||||
|
||||
return gather_object_sampled_channel_target(
|
||||
obj_uuid, channel, export_settings)
|
||||
|
||||
|
||||
def __gather_sampler(
|
||||
obj_uuid: str,
|
||||
channel: str,
|
||||
action_name: str,
|
||||
node_channel_is_animated: bool,
|
||||
node_channel_interpolation: str,
|
||||
export_settings):
|
||||
|
||||
return gather_object_sampled_animation_sampler(
|
||||
obj_uuid,
|
||||
channel,
|
||||
action_name,
|
||||
node_channel_is_animated,
|
||||
node_channel_interpolation,
|
||||
export_settings
|
||||
)
|
86
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_keyframes.py
Normal file
86
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_keyframes.py
Normal file
@ -0,0 +1,86 @@
|
||||
# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import numpy as np
|
||||
from ....gltf2_blender_gather_tree import VExportNode
|
||||
from ....gltf2_blender_gather_cache import cached
|
||||
from ...gltf2_blender_gather_keyframes import Keyframe
|
||||
from ..gltf2_blender_gather_animation_sampling_cache import get_cache_data
|
||||
|
||||
|
||||
@cached
|
||||
def gather_object_sampled_keyframes(
|
||||
obj_uuid: str,
|
||||
channel: str,
|
||||
action_name: str,
|
||||
node_channel_is_animated: bool,
|
||||
export_settings
|
||||
):
|
||||
|
||||
start_frame = export_settings['ranges'][obj_uuid][action_name]['start']
|
||||
end_frame = export_settings['ranges'][obj_uuid][action_name]['end']
|
||||
|
||||
keyframes = []
|
||||
|
||||
frame = start_frame
|
||||
step = export_settings['gltf_frame_step']
|
||||
|
||||
while frame <= end_frame:
|
||||
key = Keyframe(None, frame, channel)
|
||||
|
||||
mat = get_cache_data(
|
||||
'matrix',
|
||||
obj_uuid,
|
||||
None,
|
||||
action_name,
|
||||
frame,
|
||||
step,
|
||||
export_settings)
|
||||
|
||||
trans, rot, sca = mat.decompose()
|
||||
key.value_total = {
|
||||
"location": trans,
|
||||
"rotation_quaternion": rot,
|
||||
"scale": sca,
|
||||
}[channel]
|
||||
|
||||
keyframes.append(key)
|
||||
frame += step
|
||||
|
||||
if len(keyframes) == 0:
|
||||
# For example, option CROP negative frames, but all are negatives
|
||||
return None
|
||||
|
||||
if not export_settings['gltf_optimize_animation']:
|
||||
# For objects, if all values are the same, keeping only if changing values, or if user want to keep data
|
||||
if node_channel_is_animated is True:
|
||||
return keyframes # Always keeping
|
||||
else:
|
||||
# baked object
|
||||
if export_settings['gltf_optimize_animation_keep_object'] is False:
|
||||
# Not keeping if not changing property
|
||||
cst = fcurve_is_constant(keyframes)
|
||||
return None if cst is True else keyframes
|
||||
else:
|
||||
# Keep data, as requested by user. We keep all samples, as user don't want to optimize
|
||||
return keyframes
|
||||
|
||||
else:
|
||||
|
||||
# For objects, if all values are the same, we keep only first and last
|
||||
cst = fcurve_is_constant(keyframes)
|
||||
if node_channel_is_animated is True:
|
||||
return [keyframes[0], keyframes[-1]] if cst is True and len(keyframes) >= 2 else keyframes
|
||||
else:
|
||||
# baked object
|
||||
# Not keeping if not changing property if user decided to not keep
|
||||
if export_settings['gltf_optimize_animation_keep_object'] is False:
|
||||
return None if cst is True else keyframes
|
||||
else:
|
||||
# Keep at least 2 keyframes if data are not changing
|
||||
return [keyframes[0], keyframes[-1]] if cst is True and len(keyframes) >= 2 else keyframes
|
||||
|
||||
|
||||
def fcurve_is_constant(keyframes):
|
||||
return all([j < 0.0001 for j in np.ptp([[k.value[i] for i in range(len(keyframes[0].value))] for k in keyframes], axis=0)])
|
171
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_sampler.py
Normal file
171
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_sampler.py
Normal file
@ -0,0 +1,171 @@
|
||||
# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
import mathutils
|
||||
from ......io.com import gltf2_io
|
||||
from ......io.com import gltf2_io_constants
|
||||
from ......io.exp import gltf2_io_binary_data
|
||||
from ......io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from .....com.gltf2_blender_data_path import get_target_object_path
|
||||
from .....com import gltf2_blender_math
|
||||
from ....gltf2_blender_gather_tree import VExportNode
|
||||
from ....gltf2_blender_gather_cache import cached
|
||||
from ....gltf2_blender_gather_accessors import gather_accessor
|
||||
from .gltf2_blender_gather_object_keyframes import gather_object_sampled_keyframes
|
||||
|
||||
|
||||
@cached
|
||||
def gather_object_sampled_animation_sampler(
|
||||
obj_uuid: str,
|
||||
channel: str,
|
||||
action_name: str,
|
||||
node_channel_is_animated: bool,
|
||||
node_channel_interpolation: str,
|
||||
export_settings
|
||||
):
|
||||
|
||||
keyframes = __gather_keyframes(
|
||||
obj_uuid,
|
||||
channel,
|
||||
action_name,
|
||||
node_channel_is_animated,
|
||||
export_settings)
|
||||
|
||||
if keyframes is None:
|
||||
# After check, no need to animate this node for this channel
|
||||
return None
|
||||
|
||||
# Now we are raw input/output, we need to convert to glTF data
|
||||
input, output = __convert_keyframes(obj_uuid, channel, keyframes, action_name, export_settings)
|
||||
|
||||
sampler = gltf2_io.AnimationSampler(
|
||||
extensions=None,
|
||||
extras=None,
|
||||
input=input,
|
||||
interpolation=__gather_interpolation(
|
||||
node_channel_is_animated,
|
||||
node_channel_interpolation,
|
||||
keyframes,
|
||||
export_settings),
|
||||
output=output)
|
||||
|
||||
blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object
|
||||
export_user_extensions('animation_gather_object_sampler', export_settings, blender_object, action_name)
|
||||
|
||||
return sampler
|
||||
|
||||
|
||||
def __gather_keyframes(
|
||||
obj_uuid: str,
|
||||
channel: str,
|
||||
action_name: str,
|
||||
node_channel_is_animated: bool,
|
||||
export_settings
|
||||
):
|
||||
|
||||
keyframes = gather_object_sampled_keyframes(
|
||||
obj_uuid,
|
||||
channel,
|
||||
action_name,
|
||||
node_channel_is_animated,
|
||||
export_settings
|
||||
)
|
||||
|
||||
return keyframes
|
||||
|
||||
|
||||
def __convert_keyframes(obj_uuid: str, channel: str, keyframes, action_name: str, export_settings):
|
||||
|
||||
# Sliding can come from:
|
||||
# - option SLIDE for negative frames
|
||||
# - option to start animation at frame 0 for looping
|
||||
if obj_uuid in export_settings['slide'].keys() and action_name in export_settings['slide'][obj_uuid].keys():
|
||||
for k in keyframes:
|
||||
k.frame += -export_settings['slide'][obj_uuid][action_name]
|
||||
k.seconds = k.frame / (bpy.context.scene.render.fps * bpy.context.scene.render.fps_base)
|
||||
|
||||
times = [k.seconds for k in keyframes]
|
||||
input = gather_accessor(
|
||||
gltf2_io_binary_data.BinaryData.from_list(times, gltf2_io_constants.ComponentType.Float),
|
||||
gltf2_io_constants.ComponentType.Float,
|
||||
len(times),
|
||||
tuple([max(times)]),
|
||||
tuple([min(times)]),
|
||||
gltf2_io_constants.DataType.Scalar,
|
||||
export_settings)
|
||||
|
||||
is_yup = export_settings['gltf_yup']
|
||||
|
||||
object_path = get_target_object_path(channel)
|
||||
transform = mathutils.Matrix.Identity(4)
|
||||
|
||||
need_rotation_correction = (
|
||||
export_settings['gltf_cameras'] and export_settings['vtree'].nodes[obj_uuid].blender_type == VExportNode.CAMERA) or (
|
||||
export_settings['gltf_lights'] and export_settings['vtree'].nodes[obj_uuid].blender_type == VExportNode.LIGHT)
|
||||
|
||||
values = []
|
||||
fps = (bpy.context.scene.render.fps * bpy.context.scene.render.fps_base)
|
||||
for keyframe in keyframes:
|
||||
|
||||
# Transform the data and build gltf control points
|
||||
value = gltf2_blender_math.transform(keyframe.value, channel, transform, need_rotation_correction)
|
||||
if is_yup:
|
||||
value = gltf2_blender_math.swizzle_yup(value, channel)
|
||||
keyframe_value = gltf2_blender_math.mathutils_to_gltf(value)
|
||||
|
||||
# No tangents when baking, we are using LINEAR interpolation
|
||||
|
||||
values += keyframe_value
|
||||
|
||||
# store the keyframe data in a binary buffer
|
||||
component_type = gltf2_io_constants.ComponentType.Float
|
||||
data_type = gltf2_io_constants.DataType.vec_type_from_num(len(keyframes[0].value))
|
||||
|
||||
output = gltf2_io.Accessor(
|
||||
buffer_view=gltf2_io_binary_data.BinaryData.from_list(values, component_type),
|
||||
byte_offset=None,
|
||||
component_type=component_type,
|
||||
count=len(values) // gltf2_io_constants.DataType.num_elements(data_type),
|
||||
extensions=None,
|
||||
extras=None,
|
||||
max=None,
|
||||
min=None,
|
||||
name=None,
|
||||
normalized=None,
|
||||
sparse=None,
|
||||
type=data_type
|
||||
)
|
||||
|
||||
return input, output
|
||||
|
||||
|
||||
def __gather_interpolation(
|
||||
node_channel_is_animated: bool,
|
||||
node_channel_interpolation: str,
|
||||
keyframes,
|
||||
export_settings):
|
||||
|
||||
if len(keyframes) > 2:
|
||||
# keep STEP as STEP, other become LINEAR
|
||||
return {
|
||||
"STEP": "STEP"
|
||||
}.get(node_channel_interpolation, "LINEAR")
|
||||
elif len(keyframes) == 1:
|
||||
if node_channel_is_animated is False:
|
||||
return "STEP"
|
||||
elif node_channel_interpolation == "CUBICSPLINE":
|
||||
return "LINEAR" # We can't have a single keyframe with CUBICSPLINE
|
||||
else:
|
||||
return node_channel_interpolation
|
||||
else:
|
||||
# If we only have 2 keyframes, set interpolation to STEP if baked
|
||||
if node_channel_is_animated is False:
|
||||
# baked => We have first and last keyframe
|
||||
return "STEP"
|
||||
else:
|
||||
if keyframes[0].value == keyframes[1].value:
|
||||
return "STEP"
|
||||
else:
|
||||
return "LINEAR"
|
62
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_action_sampled.py
Normal file
62
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_action_sampled.py
Normal file
@ -0,0 +1,62 @@
|
||||
# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
import typing
|
||||
from ......io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ......io.com import gltf2_io
|
||||
from .....com.gltf2_blender_extras import generate_extras
|
||||
from .gltf2_blender_gather_sk_channels import gather_sk_sampled_channels
|
||||
|
||||
|
||||
def gather_action_sk_sampled(object_uuid: str,
|
||||
blender_action: typing.Optional[bpy.types.Action],
|
||||
cache_key: str,
|
||||
export_settings):
|
||||
|
||||
# If no animation in file, no need to bake
|
||||
if len(bpy.data.actions) == 0:
|
||||
return None
|
||||
|
||||
animation = gltf2_io.Animation(
|
||||
channels=__gather_channels(object_uuid, blender_action.name if blender_action else cache_key, export_settings),
|
||||
extensions=None,
|
||||
extras=__gather_extras(blender_action, export_settings),
|
||||
name=__gather_name(object_uuid, blender_action, cache_key, export_settings),
|
||||
samplers=[]
|
||||
)
|
||||
|
||||
if not animation.channels:
|
||||
return None
|
||||
|
||||
blender_object = export_settings['vtree'].nodes[object_uuid].blender_object
|
||||
export_user_extensions(
|
||||
'animation_action_sk_sampled',
|
||||
export_settings,
|
||||
animation,
|
||||
blender_object,
|
||||
blender_action,
|
||||
cache_key)
|
||||
|
||||
return animation
|
||||
|
||||
|
||||
def __gather_name(object_uuid: str, blender_action: typing.Optional[bpy.types.Action], cache_key: str, export_settings):
|
||||
if blender_action:
|
||||
return blender_action.name
|
||||
elif object_uuid == cache_key:
|
||||
return export_settings['vtree'].nodes[object_uuid].blender_object.name
|
||||
else:
|
||||
return cache_key
|
||||
|
||||
|
||||
def __gather_channels(object_uuid: str, blender_action_name: str,
|
||||
export_settings) -> typing.List[gltf2_io.AnimationChannel]:
|
||||
return gather_sk_sampled_channels(object_uuid, blender_action_name, export_settings)
|
||||
|
||||
|
||||
def __gather_extras(blender_action, export_settings):
|
||||
if export_settings['gltf_extras']:
|
||||
return generate_extras(blender_action) if blender_action else None
|
||||
return None
|
38
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_channel_target.py
Normal file
38
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_channel_target.py
Normal file
@ -0,0 +1,38 @@
|
||||
# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from ......io.com import gltf2_io
|
||||
from ......io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ....gltf2_blender_gather_cache import cached
|
||||
|
||||
|
||||
@cached
|
||||
def gather_sk_sampled_channel_target(
|
||||
obj_uuid: str,
|
||||
export_settings
|
||||
) -> gltf2_io.AnimationChannelTarget:
|
||||
|
||||
animation_channel_target = gltf2_io.AnimationChannelTarget(
|
||||
extensions=__gather_extensions(obj_uuid, export_settings),
|
||||
extras=__gather_extras(obj_uuid, export_settings),
|
||||
node=__gather_node(obj_uuid, export_settings),
|
||||
path='weights'
|
||||
)
|
||||
|
||||
blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object
|
||||
export_user_extensions('animation_action_sk_sampled_target', export_settings, blender_object)
|
||||
|
||||
return animation_channel_target
|
||||
|
||||
|
||||
def __gather_extensions(armature_uuid, export_settings):
|
||||
return None
|
||||
|
||||
|
||||
def __gather_extras(armature_uuid, export_settings):
|
||||
return None
|
||||
|
||||
|
||||
def __gather_node(obj_uuid: str, export_settings):
|
||||
return export_settings['vtree'].nodes[obj_uuid].node
|
75
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_channels.py
Normal file
75
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_channels.py
Normal file
@ -0,0 +1,75 @@
|
||||
# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from ......io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ......io.com import gltf2_io
|
||||
from ....gltf2_blender_gather_cache import cached
|
||||
from .gltf2_blender_gather_sk_channel_target import gather_sk_sampled_channel_target
|
||||
from .gltf2_blender_gather_sk_sampler import gather_sk_sampled_animation_sampler
|
||||
|
||||
|
||||
def gather_sk_sampled_channels(
|
||||
object_uuid: str,
|
||||
blender_action_name: str,
|
||||
export_settings):
|
||||
|
||||
# Only 1 channel when exporting shape keys
|
||||
|
||||
channels = []
|
||||
|
||||
channel = gather_sampled_sk_channel(
|
||||
object_uuid,
|
||||
blender_action_name,
|
||||
export_settings
|
||||
)
|
||||
|
||||
if channel is not None:
|
||||
channels.append(channel)
|
||||
|
||||
blender_object = export_settings['vtree'].nodes[object_uuid].blender_object
|
||||
export_user_extensions('animation_gather_sk_channels', export_settings, blender_object, blender_action_name)
|
||||
|
||||
return channels if len(channels) > 0 else None
|
||||
|
||||
|
||||
@cached
|
||||
def gather_sampled_sk_channel(
|
||||
obj_uuid: str,
|
||||
action_name: str,
|
||||
export_settings
|
||||
):
|
||||
|
||||
__target = __gather_target(obj_uuid, export_settings)
|
||||
if __target.path is not None:
|
||||
sampler = __gather_sampler(obj_uuid, action_name, export_settings)
|
||||
|
||||
if sampler is None:
|
||||
# After check, no need to animate this node for this channel
|
||||
return None
|
||||
|
||||
animation_channel = gltf2_io.AnimationChannel(
|
||||
extensions=None,
|
||||
extras=None,
|
||||
sampler=sampler,
|
||||
target=__target
|
||||
)
|
||||
|
||||
blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object
|
||||
export_user_extensions('animation_gather_sk_channel', export_settings, blender_object, action_name)
|
||||
|
||||
return animation_channel
|
||||
return None
|
||||
|
||||
|
||||
def __gather_target(obj_uuid: str, export_settings):
|
||||
return gather_sk_sampled_channel_target(
|
||||
obj_uuid, export_settings)
|
||||
|
||||
|
||||
def __gather_sampler(obj_uuid: str, action_name: str, export_settings):
|
||||
return gather_sk_sampled_animation_sampler(
|
||||
obj_uuid,
|
||||
action_name,
|
||||
export_settings
|
||||
)
|
109
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_keyframes.py
Normal file
109
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_keyframes.py
Normal file
@ -0,0 +1,109 @@
|
||||
# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
import typing
|
||||
import numpy as np
|
||||
from ......blender.com.gltf2_blender_data_path import get_sk_exported
|
||||
from ....gltf2_blender_gather_cache import cached
|
||||
from ...gltf2_blender_gather_keyframes import Keyframe
|
||||
from ...fcurves.gltf2_blender_gather_fcurves_channels import get_channel_groups
|
||||
from ...fcurves.gltf2_blender_gather_fcurves_keyframes import gather_non_keyed_values
|
||||
from ..gltf2_blender_gather_animation_sampling_cache import get_cache_data
|
||||
|
||||
|
||||
@cached
|
||||
def gather_sk_sampled_keyframes(obj_uuid,
|
||||
action_name,
|
||||
export_settings):
|
||||
|
||||
start_frame = export_settings['ranges'][obj_uuid][action_name]['start']
|
||||
end_frame = export_settings['ranges'][obj_uuid][action_name]['end']
|
||||
|
||||
keyframes = []
|
||||
|
||||
frame = start_frame
|
||||
step = export_settings['gltf_frame_step']
|
||||
blender_obj = export_settings['vtree'].nodes[obj_uuid].blender_object
|
||||
|
||||
if export_settings['gltf_optimize_armature_disable_viewport'] is True:
|
||||
# Using this option, we miss the drivers :(
|
||||
# No solution exists for now. In the future, we should be able to copy a driver
|
||||
if action_name in bpy.data.actions:
|
||||
channel_group, _ = get_channel_groups(
|
||||
obj_uuid, bpy.data.actions[action_name], export_settings, no_sample_option=True)
|
||||
elif blender_obj.data.shape_keys.animation_data and blender_obj.data.shape_keys.animation_data.action:
|
||||
channel_group, _ = get_channel_groups(
|
||||
obj_uuid, blender_obj.data.shape_keys.animation_data.action, export_settings, no_sample_option=True)
|
||||
else:
|
||||
channel_group = {}
|
||||
channels = [None] * len(get_sk_exported(blender_obj.data.shape_keys.key_blocks))
|
||||
|
||||
# One day, if we will be able to bake drivers or evaluate it the right
|
||||
# way, we can add here the driver fcurves
|
||||
|
||||
for chan in channel_group.values():
|
||||
channels = chan['properties']['value']
|
||||
break
|
||||
|
||||
non_keyed_values = gather_non_keyed_values(obj_uuid, channels, None, export_settings)
|
||||
|
||||
while frame <= end_frame:
|
||||
key = Keyframe(channels, frame, None)
|
||||
key.value = [c.evaluate(frame) for c in channels if c is not None]
|
||||
# Complete key with non keyed values, if needed
|
||||
if len([c for c in channels if c is not None]) != key.get_target_len():
|
||||
complete_key(key, non_keyed_values)
|
||||
|
||||
keyframes.append(key)
|
||||
frame += step
|
||||
|
||||
else:
|
||||
# Full bake, we will go frame by frame. This can take time (more than using evaluate)
|
||||
|
||||
while frame <= end_frame:
|
||||
key = Keyframe([None] * (len(get_sk_exported(blender_obj.data.shape_keys.key_blocks))), frame, 'value')
|
||||
key.value_total = get_cache_data(
|
||||
'sk',
|
||||
obj_uuid,
|
||||
None,
|
||||
action_name,
|
||||
frame,
|
||||
step,
|
||||
export_settings
|
||||
)
|
||||
|
||||
keyframes.append(key)
|
||||
frame += step
|
||||
|
||||
if len(keyframes) == 0:
|
||||
# For example, option CROP negative frames, but all are negatives
|
||||
return None
|
||||
|
||||
# In case SK has only basis
|
||||
if any([len(k.value) == 0 for k in keyframes]):
|
||||
return None
|
||||
|
||||
if not export_settings['gltf_optimize_animation']:
|
||||
return keyframes
|
||||
|
||||
# For sk, if all values are the same, we keep only first and last
|
||||
cst = fcurve_is_constant(keyframes)
|
||||
return [keyframes[0], keyframes[-1]] if cst is True and len(keyframes) >= 2 else keyframes
|
||||
|
||||
|
||||
def fcurve_is_constant(keyframes):
|
||||
return all([j < 0.0001 for j in np.ptp([[k.value[i] for i in range(len(keyframes[0].value))] for k in keyframes], axis=0)])
|
||||
|
||||
# TODO de-duplicate, but import issue???
|
||||
|
||||
|
||||
def complete_key(key: Keyframe, non_keyed_values: typing.Tuple[typing.Optional[float]]):
|
||||
"""
|
||||
Complete keyframe with non keyed values
|
||||
"""
|
||||
for i in range(0, key.get_target_len()):
|
||||
if i in key.get_indices():
|
||||
continue # this is a keyed array_index or a SK animated
|
||||
key.set_value_index(i, non_keyed_values[i])
|
112
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_sampler.py
Normal file
112
scripts/addons_core/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_sampler.py
Normal file
@ -0,0 +1,112 @@
|
||||
# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
from ......io.com import gltf2_io, gltf2_io_constants
|
||||
from ......io.exp import gltf2_io_binary_data
|
||||
from ......io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from .....com.gltf2_blender_math import mathutils_to_gltf
|
||||
from ....gltf2_blender_gather_accessors import gather_accessor
|
||||
from .gltf2_blender_gather_sk_keyframes import gather_sk_sampled_keyframes
|
||||
|
||||
|
||||
def gather_sk_sampled_animation_sampler(
|
||||
obj_uuid,
|
||||
action_name,
|
||||
export_settings
|
||||
):
|
||||
|
||||
keyframes = __gather_keyframes(
|
||||
obj_uuid,
|
||||
action_name,
|
||||
export_settings)
|
||||
|
||||
if keyframes is None:
|
||||
# After check, no need to animate this node for this channel
|
||||
return None
|
||||
|
||||
# Now we are raw input/output, we need to convert to glTF data
|
||||
input, output = __convert_keyframes(obj_uuid, keyframes, action_name, export_settings)
|
||||
|
||||
sampler = gltf2_io.AnimationSampler(
|
||||
extensions=None,
|
||||
extras=None,
|
||||
input=input,
|
||||
interpolation=__gather_interpolation(export_settings),
|
||||
output=output
|
||||
)
|
||||
|
||||
blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object
|
||||
export_user_extensions('animation_gather_sk_channels', export_settings, blender_object, action_name)
|
||||
|
||||
return sampler
|
||||
|
||||
|
||||
def __gather_keyframes(
|
||||
obj_uuid,
|
||||
action_name,
|
||||
export_settings):
|
||||
|
||||
keyframes = gather_sk_sampled_keyframes(
|
||||
obj_uuid,
|
||||
action_name,
|
||||
export_settings
|
||||
)
|
||||
|
||||
if keyframes is None:
|
||||
# After check, no need to animation this node
|
||||
return None
|
||||
|
||||
return keyframes
|
||||
|
||||
|
||||
def __convert_keyframes(obj_uuid, keyframes, action_name: str, export_settings):
|
||||
|
||||
# Sliding can come from:
|
||||
# - option SLIDE for negative frames
|
||||
# - option to start animation at frame 0 for looping
|
||||
if obj_uuid in export_settings['slide'].keys() and action_name in export_settings['slide'][obj_uuid].keys():
|
||||
for k in keyframes:
|
||||
k.frame += -export_settings['slide'][obj_uuid][action_name]
|
||||
k.seconds = k.frame / (bpy.context.scene.render.fps * bpy.context.scene.render.fps_base)
|
||||
|
||||
times = [k.seconds for k in keyframes]
|
||||
input = gather_accessor(
|
||||
gltf2_io_binary_data.BinaryData.from_list(times, gltf2_io_constants.ComponentType.Float),
|
||||
gltf2_io_constants.ComponentType.Float,
|
||||
len(times),
|
||||
tuple([max(times)]),
|
||||
tuple([min(times)]),
|
||||
gltf2_io_constants.DataType.Scalar,
|
||||
export_settings)
|
||||
|
||||
values = []
|
||||
for keyframe in keyframes:
|
||||
keyframe_value = mathutils_to_gltf(keyframe.value)
|
||||
values += keyframe_value
|
||||
|
||||
component_type = gltf2_io_constants.ComponentType.Float
|
||||
data_type = gltf2_io_constants.DataType.Scalar
|
||||
|
||||
output = gltf2_io.Accessor(
|
||||
buffer_view=gltf2_io_binary_data.BinaryData.from_list(values, component_type),
|
||||
byte_offset=None,
|
||||
component_type=component_type,
|
||||
count=len(values) // gltf2_io_constants.DataType.num_elements(data_type),
|
||||
extensions=None,
|
||||
extras=None,
|
||||
max=None,
|
||||
min=None,
|
||||
name=None,
|
||||
normalized=None,
|
||||
sparse=None,
|
||||
type=data_type
|
||||
)
|
||||
|
||||
return input, output
|
||||
|
||||
|
||||
def __gather_interpolation(export_settings):
|
||||
# TODO: check if the SK was animated with CONSTANT
|
||||
return 'LINEAR'
|
402
scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_export.py
Executable file
402
scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_export.py
Executable file
@ -0,0 +1,402 @@
|
||||
# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
|
||||
import bpy
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from ...io.exp import gltf2_io_export
|
||||
from ...io.exp import gltf2_io_draco_compression_extension
|
||||
from ...io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ..com import gltf2_blender_json
|
||||
from . import gltf2_blender_gather
|
||||
from .gltf2_blender_gltf2_exporter import GlTF2Exporter
|
||||
|
||||
|
||||
def save(context, export_settings):
|
||||
"""Start the glTF 2.0 export and saves to content either to a .gltf or .glb file."""
|
||||
if bpy.context.active_object is not None:
|
||||
if bpy.context.active_object.mode != "OBJECT": # For linked object, you can't force OBJECT mode
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
|
||||
original_frame = bpy.context.scene.frame_current
|
||||
if not export_settings['gltf_current_frame']:
|
||||
bpy.context.scene.frame_set(0)
|
||||
|
||||
__notify_start(context, export_settings)
|
||||
start_time = time.time()
|
||||
pre_export_callbacks = export_settings["pre_export_callbacks"]
|
||||
for callback in pre_export_callbacks:
|
||||
callback(export_settings)
|
||||
|
||||
json, buffer = __export(export_settings)
|
||||
|
||||
post_export_callbacks = export_settings["post_export_callbacks"]
|
||||
for callback in post_export_callbacks:
|
||||
callback(export_settings)
|
||||
__write_file(json, buffer, export_settings)
|
||||
|
||||
end_time = time.time()
|
||||
__notify_end(context, end_time - start_time, export_settings)
|
||||
|
||||
if not export_settings['gltf_current_frame']:
|
||||
bpy.context.scene.frame_set(int(original_frame))
|
||||
|
||||
return {'FINISHED'}
|
||||
|
||||
|
||||
def __export(export_settings):
|
||||
exporter = GlTF2Exporter(export_settings)
|
||||
__gather_gltf(exporter, export_settings)
|
||||
buffer = __create_buffer(exporter, export_settings)
|
||||
exporter.finalize_images()
|
||||
|
||||
export_user_extensions('gather_gltf_extensions_hook', export_settings, exporter.glTF)
|
||||
exporter.traverse_extensions()
|
||||
|
||||
# Detect extensions that are animated
|
||||
# If they are not animated, we can remove the extension if it is empty (all default values), and if default values don't change the shader
|
||||
# But if they are animated, we need to keep the extension, even if it is empty
|
||||
__detect_animated_extensions(exporter.glTF.to_dict(), export_settings)
|
||||
|
||||
# now that addons possibly add some fields in json, we can fix if needed
|
||||
# Also deleting no more needed extensions, based on what we detected above
|
||||
json = __fix_json(exporter.glTF.to_dict(), export_settings)
|
||||
|
||||
# IOR is a special case where we need to export only if some other extensions are used
|
||||
__check_ior(json, export_settings)
|
||||
|
||||
# Volum is a special case where we need to export only if transmission is used
|
||||
__check_volume(json, export_settings)
|
||||
|
||||
__manage_extension_declaration(json, export_settings)
|
||||
|
||||
# We need to run it again, as we can now have some "extensions" dict that are empty
|
||||
# Or extensionsUsed / extensionsRequired that are empty
|
||||
# (because we removed some extensions)
|
||||
json = __fix_json(json, export_settings)
|
||||
|
||||
# Convert additional data if needed
|
||||
if export_settings['gltf_unused_textures'] is True:
|
||||
additional_json_textures = __fix_json([i.to_dict()
|
||||
for i in exporter.additional_data.additional_textures], export_settings)
|
||||
|
||||
# Now that we have the final json, we can add the additional data
|
||||
# We can not do that for all people, because we don't want this extra to become "a standard"
|
||||
# So let's use the "extras" field filled by a user extension
|
||||
|
||||
export_user_extensions('gather_gltf_additional_textures_hook', export_settings, json, additional_json_textures)
|
||||
|
||||
# if len(additional_json_textures) > 0:
|
||||
# if json.get('extras') is None:
|
||||
# json['extras'] = {}
|
||||
# json['extras']['additionalTextures'] = additional_json_textures
|
||||
|
||||
return json, buffer
|
||||
|
||||
|
||||
def __check_ior(json, export_settings):
|
||||
if 'materials' not in json.keys():
|
||||
return
|
||||
for mat in json['materials']:
|
||||
if 'extensions' not in mat.keys():
|
||||
continue
|
||||
if 'KHR_materials_ior' not in mat['extensions'].keys():
|
||||
continue
|
||||
# We keep IOR only if some other extensions are used
|
||||
# And because we may have deleted some extensions, we need to check again
|
||||
need_to_export_ior = [
|
||||
'KHR_materials_transmission',
|
||||
'KHR_materials_volume',
|
||||
'KHR_materials_specular'
|
||||
]
|
||||
|
||||
if not any([e in mat['extensions'].keys() for e in need_to_export_ior]):
|
||||
del mat['extensions']['KHR_materials_ior']
|
||||
|
||||
# Check if we need to keep the extension declaration
|
||||
ior_found = False
|
||||
for mat in json['materials']:
|
||||
if 'extensions' not in mat.keys():
|
||||
continue
|
||||
if 'KHR_materials_ior' not in mat['extensions'].keys():
|
||||
continue
|
||||
ior_found = True
|
||||
break
|
||||
if not ior_found:
|
||||
export_settings['gltf_need_to_keep_extension_declaration'] = [
|
||||
e for e in export_settings['gltf_need_to_keep_extension_declaration'] if e != 'KHR_materials_ior']
|
||||
|
||||
|
||||
def __check_volume(json, export_settings):
|
||||
if 'materials' not in json.keys():
|
||||
return
|
||||
for mat in json['materials']:
|
||||
if 'extensions' not in mat.keys():
|
||||
continue
|
||||
if 'KHR_materials_volume' not in mat['extensions'].keys():
|
||||
continue
|
||||
# We keep volume only if transmission is used
|
||||
# And because we may have deleted some extensions, we need to check again
|
||||
if 'KHR_materials_transmission' not in mat['extensions'].keys():
|
||||
del mat['extensions']['KHR_materials_volume']
|
||||
|
||||
# Check if we need to keep the extension declaration
|
||||
volume_found = False
|
||||
for mat in json['materials']:
|
||||
if 'extensions' not in mat.keys():
|
||||
continue
|
||||
if 'KHR_materials_volume' not in mat['extensions'].keys():
|
||||
continue
|
||||
volume_found = True
|
||||
break
|
||||
if not volume_found:
|
||||
export_settings['gltf_need_to_keep_extension_declaration'] = [
|
||||
e for e in export_settings['gltf_need_to_keep_extension_declaration'] if e != 'KHR_materials_volume']
|
||||
|
||||
|
||||
def __detect_animated_extensions(obj, export_settings):
|
||||
export_settings['gltf_animated_extensions'] = []
|
||||
export_settings['gltf_need_to_keep_extension_declaration'] = []
|
||||
if 'animations' not in obj.keys():
|
||||
return
|
||||
for anim in obj['animations']:
|
||||
if 'extensions' in anim.keys():
|
||||
for channel in anim['channels']:
|
||||
if not channel['target']['path'] == "pointer":
|
||||
continue
|
||||
pointer = channel['target']['extensions']['KHR_animation_pointer']['pointer']
|
||||
if "/KHR" not in pointer:
|
||||
continue
|
||||
tab = pointer.split("/")
|
||||
tab = [i for i in tab if i.startswith("KHR_")]
|
||||
if len(tab) == 0:
|
||||
continue
|
||||
if tab[-1] not in export_settings['gltf_animated_extensions']:
|
||||
export_settings['gltf_animated_extensions'].append(tab[-1])
|
||||
|
||||
|
||||
def __manage_extension_declaration(json, export_settings):
|
||||
if 'extensionsUsed' in json.keys():
|
||||
new_ext_used = []
|
||||
for ext in json['extensionsUsed']:
|
||||
if ext not in export_settings['gltf_need_to_keep_extension_declaration']:
|
||||
continue
|
||||
new_ext_used.append(ext)
|
||||
json['extensionsUsed'] = new_ext_used
|
||||
if 'extensionsRequired' in json.keys():
|
||||
new_ext_required = []
|
||||
for ext in json['extensionsRequired']:
|
||||
if ext not in export_settings['gltf_need_to_keep_extension_declaration']:
|
||||
continue
|
||||
new_ext_required.append(ext)
|
||||
json['extensionsRequired'] = new_ext_required
|
||||
|
||||
|
||||
def __gather_gltf(exporter, export_settings):
|
||||
active_scene_idx, scenes, animations = gltf2_blender_gather.gather_gltf2(export_settings)
|
||||
|
||||
unused_skins = export_settings['vtree'].get_unused_skins()
|
||||
|
||||
if export_settings['gltf_draco_mesh_compression']:
|
||||
gltf2_io_draco_compression_extension.encode_scene_primitives(scenes, export_settings)
|
||||
exporter.add_draco_extension()
|
||||
|
||||
export_user_extensions('gather_gltf_hook', export_settings, active_scene_idx, scenes, animations)
|
||||
|
||||
for idx, scene in enumerate(scenes):
|
||||
exporter.add_scene(scene, idx == active_scene_idx, export_settings=export_settings)
|
||||
for animation in animations:
|
||||
exporter.add_animation(animation)
|
||||
exporter.manage_gpu_instancing_nodes(export_settings)
|
||||
exporter.traverse_unused_skins(unused_skins)
|
||||
exporter.traverse_additional_textures()
|
||||
exporter.traverse_additional_images()
|
||||
|
||||
|
||||
def __create_buffer(exporter, export_settings):
|
||||
buffer = bytes()
|
||||
if export_settings['gltf_format'] == 'GLB':
|
||||
buffer = exporter.finalize_buffer(export_settings['gltf_filedirectory'], is_glb=True)
|
||||
else:
|
||||
if export_settings['gltf_format'] == 'GLTF_EMBEDDED':
|
||||
exporter.finalize_buffer(export_settings['gltf_filedirectory'])
|
||||
else:
|
||||
exporter.finalize_buffer(export_settings['gltf_filedirectory'],
|
||||
export_settings['gltf_binaryfilename'])
|
||||
|
||||
return buffer
|
||||
|
||||
|
||||
def __postprocess_with_gltfpack(export_settings):
|
||||
|
||||
gltfpack_binary_file_path = bpy.context.preferences.addons['io_scene_gltf2'].preferences.gltfpack_path_ui
|
||||
|
||||
gltf_file_path = export_settings['gltf_filepath']
|
||||
gltf_file_base = os.path.splitext(os.path.basename(gltf_file_path))[0]
|
||||
gltf_file_extension = os.path.splitext(os.path.basename(gltf_file_path))[1]
|
||||
gltf_file_directory = os.path.dirname(gltf_file_path)
|
||||
gltf_output_file_directory = os.path.join(gltf_file_directory, "gltfpacked")
|
||||
if (os.path.exists(gltf_output_file_directory) is False):
|
||||
os.makedirs(gltf_output_file_directory)
|
||||
|
||||
gltf_input_file_path = gltf_file_path
|
||||
gltf_output_file_path = os.path.join(gltf_output_file_directory, gltf_file_base + gltf_file_extension)
|
||||
|
||||
options = []
|
||||
|
||||
if (export_settings['gltf_gltfpack_tc']):
|
||||
options.append("-tc")
|
||||
|
||||
if (export_settings['gltf_gltfpack_tq']):
|
||||
options.append("-tq")
|
||||
options.append(f"{export_settings['gltf_gltfpack_tq']}")
|
||||
|
||||
if (export_settings['gltf_gltfpack_si'] != 1.0):
|
||||
options.append("-si")
|
||||
options.append(f"{export_settings['gltf_gltfpack_si']}")
|
||||
|
||||
if (export_settings['gltf_gltfpack_sa']):
|
||||
options.append("-sa")
|
||||
|
||||
if (export_settings['gltf_gltfpack_slb']):
|
||||
options.append("-slb")
|
||||
|
||||
if (export_settings['gltf_gltfpack_noq']):
|
||||
options.append("-noq")
|
||||
else:
|
||||
options.append("-vp")
|
||||
options.append(f"{export_settings['gltf_gltfpack_vp']}")
|
||||
options.append("-vt")
|
||||
options.append(f"{export_settings['gltf_gltfpack_vt']}")
|
||||
options.append("-vn")
|
||||
options.append(f"{export_settings['gltf_gltfpack_vn']}")
|
||||
options.append("-vc")
|
||||
options.append(f"{export_settings['gltf_gltfpack_vc']}")
|
||||
|
||||
match export_settings['gltf_gltfpack_vpi']:
|
||||
case "Integer":
|
||||
options.append("-vpi")
|
||||
case "Normalized":
|
||||
options.append("-vpn")
|
||||
case "Floating-point":
|
||||
options.append("-vpf")
|
||||
|
||||
parameters = []
|
||||
parameters.append("-i")
|
||||
parameters.append(gltf_input_file_path)
|
||||
parameters.append("-o")
|
||||
parameters.append(gltf_output_file_path)
|
||||
|
||||
try:
|
||||
subprocess.run([gltfpack_binary_file_path] + options + parameters, check=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
export_settings['log'].error("Calling gltfpack was not successful")
|
||||
|
||||
|
||||
def __fix_json(obj, export_settings):
|
||||
# TODO: move to custom JSON encoder
|
||||
fixed = obj
|
||||
if isinstance(obj, dict):
|
||||
fixed = {}
|
||||
for key, value in obj.items():
|
||||
if key == 'extras' and value is not None:
|
||||
fixed[key] = value
|
||||
continue
|
||||
if not __should_include_json_value(key, value, export_settings):
|
||||
continue
|
||||
fixed[key] = __fix_json(value, export_settings)
|
||||
elif isinstance(obj, list):
|
||||
fixed = []
|
||||
for value in obj:
|
||||
fixed.append(__fix_json(value, export_settings))
|
||||
elif isinstance(obj, float):
|
||||
# force floats to int, if they are integers (prevent INTEGER_WRITTEN_AS_FLOAT validator warnings)
|
||||
if int(obj) == obj:
|
||||
return int(obj)
|
||||
return fixed
|
||||
|
||||
|
||||
def __should_include_json_value(key, value, export_settings):
|
||||
allowed_empty_collections = ["KHR_materials_unlit"]
|
||||
allowed_empty_collections_if_animated = \
|
||||
[
|
||||
"KHR_materials_specular",
|
||||
"KHR_materials_clearcoat",
|
||||
"KHR_texture_transform",
|
||||
"KHR_materials_emissive_strength",
|
||||
"KHR_materials_ior",
|
||||
# "KHR_materials_iridescence",
|
||||
"KHR_materials_sheen",
|
||||
"KHR_materials_specular",
|
||||
"KHR_materials_transmission",
|
||||
"KHR_materials_volume",
|
||||
"KHR_lights_punctual",
|
||||
"KHR_materials_anisotropy"
|
||||
]
|
||||
|
||||
if value is None:
|
||||
return False
|
||||
elif __is_empty_collection(value) and key not in allowed_empty_collections:
|
||||
# Empty collection is not allowed, except if it is animated
|
||||
if key in allowed_empty_collections_if_animated:
|
||||
if key in export_settings['gltf_animated_extensions']:
|
||||
# There is an animation, so we can keep this empty collection, and store
|
||||
# that this extension declaration needs to be kept
|
||||
export_settings['gltf_need_to_keep_extension_declaration'].append(key)
|
||||
return True
|
||||
else:
|
||||
# There is no animation, so we will not keep this empty collection
|
||||
return False
|
||||
# We can't have this empty collection, because it can't be animated
|
||||
return False
|
||||
elif not __is_empty_collection(value):
|
||||
if key.startswith("KHR_") or key.startswith("EXT_"):
|
||||
export_settings['gltf_need_to_keep_extension_declaration'].append(key)
|
||||
elif __is_empty_collection(value) and key in allowed_empty_collections:
|
||||
# We can have this empty collection for this extension. So keeping it, and
|
||||
# store that this extension declaration needs to be kept
|
||||
export_settings['gltf_need_to_keep_extension_declaration'].append(key)
|
||||
return True
|
||||
|
||||
|
||||
def __is_empty_collection(value):
|
||||
return (isinstance(value, dict) or isinstance(value, list)) and len(value) == 0
|
||||
|
||||
|
||||
def __write_file(json, buffer, export_settings):
|
||||
try:
|
||||
gltf2_io_export.save_gltf(
|
||||
json,
|
||||
export_settings,
|
||||
gltf2_blender_json.BlenderJSONEncoder,
|
||||
buffer)
|
||||
if (export_settings['gltf_use_gltfpack']):
|
||||
__postprocess_with_gltfpack(export_settings)
|
||||
|
||||
except AssertionError as e:
|
||||
_, _, tb = sys.exc_info()
|
||||
traceback.print_tb(tb) # Fixed format
|
||||
tb_info = traceback.extract_tb(tb)
|
||||
for tbi in tb_info:
|
||||
filename, line, func, text = tbi
|
||||
export_settings['log'].error('An error occurred on line {} in statement {}'.format(line, text))
|
||||
export_settings['log'].error(str(e))
|
||||
raise e
|
||||
|
||||
|
||||
def __notify_start(context, export_settings):
|
||||
export_settings['log'].info('Starting glTF 2.0 export')
|
||||
context.window_manager.progress_begin(0, 100)
|
||||
context.window_manager.progress_update(0)
|
||||
|
||||
|
||||
def __notify_end(context, elapsed, export_settings):
|
||||
export_settings['log'].info('Finished glTF 2.0 export in {} s'.format(elapsed))
|
||||
context.window_manager.progress_end()
|
||||
print()
|
126
scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather.py
Executable file
126
scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather.py
Executable file
@ -0,0 +1,126 @@
|
||||
# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
|
||||
from ...io.com import gltf2_io
|
||||
from ...io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ..com.gltf2_blender_extras import generate_extras
|
||||
from .gltf2_blender_gather_cache import cached
|
||||
from . import gltf2_blender_gather_nodes
|
||||
from . import gltf2_blender_gather_joints
|
||||
from . import gltf2_blender_gather_tree
|
||||
from .animation.sampled.object.gltf2_blender_gather_object_keyframes import get_cache_data
|
||||
from .animation.gltf2_blender_gather_animations import gather_animations
|
||||
|
||||
|
||||
def gather_gltf2(export_settings):
|
||||
"""
|
||||
Gather glTF properties from the current state of blender.
|
||||
|
||||
:return: list of scene graphs to be added to the glTF export
|
||||
"""
|
||||
scenes = []
|
||||
animations = [] # unfortunately animations in gltf2 are just as 'root' as scenes.
|
||||
active_scene = None
|
||||
store_user_scene = bpy.context.scene
|
||||
scenes_to_export = bpy.data.scenes if export_settings['gltf_active_scene'] is False else [
|
||||
scene for scene in bpy.data.scenes if scene.name == store_user_scene.name]
|
||||
for blender_scene in scenes_to_export:
|
||||
scenes.append(__gather_scene(blender_scene, export_settings))
|
||||
if export_settings['gltf_animations']:
|
||||
# resetting object cache
|
||||
get_cache_data.reset_cache()
|
||||
animations += gather_animations(export_settings)
|
||||
if bpy.context.scene.name == store_user_scene.name:
|
||||
active_scene = len(scenes) - 1
|
||||
|
||||
# restore user scene
|
||||
bpy.context.window.scene = store_user_scene
|
||||
return active_scene, scenes, animations
|
||||
|
||||
|
||||
@cached
|
||||
def __gather_scene(blender_scene, export_settings):
|
||||
scene = gltf2_io.Scene(
|
||||
extensions=None,
|
||||
extras=__gather_extras(blender_scene, export_settings),
|
||||
name=blender_scene.name,
|
||||
nodes=[]
|
||||
)
|
||||
|
||||
# Initialize some data needed for animation pointer
|
||||
export_settings['KHR_animation_pointer'] = {}
|
||||
export_settings['KHR_animation_pointer']['materials'] = {}
|
||||
export_settings['KHR_animation_pointer']['lights'] = {}
|
||||
export_settings['KHR_animation_pointer']['cameras'] = {}
|
||||
|
||||
vtree = gltf2_blender_gather_tree.VExportTree(export_settings)
|
||||
vtree.construct(blender_scene)
|
||||
vtree.search_missing_armature() # In case armature are no parented correctly
|
||||
if export_settings['gltf_armature_object_remove'] is True:
|
||||
vtree.check_if_we_can_remove_armature() # Check if we can remove the armatures objects
|
||||
|
||||
export_user_extensions('vtree_before_filter_hook', export_settings, vtree)
|
||||
|
||||
# Now, we can filter tree if needed
|
||||
vtree.filter()
|
||||
|
||||
vtree.bake_armature_bone_list() # Used in case we remove the armature. Doing it after filter, as filter can remove some bones
|
||||
|
||||
if export_settings['gltf_flatten_bones_hierarchy'] is True:
|
||||
vtree.break_bone_hierarchy()
|
||||
if export_settings['gltf_flatten_obj_hierarchy'] is True:
|
||||
vtree.break_obj_hierarchy()
|
||||
|
||||
vtree.variants_reset_to_original()
|
||||
|
||||
export_user_extensions('vtree_after_filter_hook', export_settings, vtree)
|
||||
|
||||
export_settings['vtree'] = vtree
|
||||
|
||||
# If we don't remove armature object, we can't have bones directly at root of scene
|
||||
# So looping only on root nodes, as they are all nodes, not bones
|
||||
if export_settings['gltf_armature_object_remove'] is False:
|
||||
for r in [vtree.nodes[r] for r in vtree.roots]:
|
||||
node = gltf2_blender_gather_nodes.gather_node(
|
||||
r, export_settings)
|
||||
if node is not None:
|
||||
scene.nodes.append(node)
|
||||
else:
|
||||
# If we remove armature objects, we can have bone at root of scene
|
||||
armature_root_joints = {}
|
||||
for r in [vtree.nodes[r] for r in vtree.roots]:
|
||||
# Classic Object/node case
|
||||
if r.blender_type != gltf2_blender_gather_tree.VExportNode.BONE:
|
||||
node = gltf2_blender_gather_nodes.gather_node(
|
||||
r, export_settings)
|
||||
if node is not None:
|
||||
scene.nodes.append(node)
|
||||
else:
|
||||
# We can have bone are root of scene because we remove the armature object
|
||||
# and the armature was at root of scene
|
||||
node = gltf2_blender_gather_joints.gather_joint_vnode(
|
||||
r.uuid, export_settings)
|
||||
if node is not None:
|
||||
scene.nodes.append(node)
|
||||
if r.armature not in armature_root_joints.keys():
|
||||
armature_root_joints[r.armature] = []
|
||||
armature_root_joints[r.armature].append(node)
|
||||
|
||||
# Manage objects parented to bones, now we go through all root objects
|
||||
for k, v in armature_root_joints.items():
|
||||
gltf2_blender_gather_nodes.get_objects_parented_to_bones(k, v, export_settings)
|
||||
|
||||
vtree.add_neutral_bones()
|
||||
|
||||
export_user_extensions('gather_scene_hook', export_settings, scene, blender_scene)
|
||||
|
||||
return scene
|
||||
|
||||
|
||||
def __gather_extras(blender_object, export_settings):
|
||||
if export_settings['gltf_extras']:
|
||||
return generate_extras(blender_object)
|
||||
return None
|
@ -0,0 +1,189 @@
|
||||
# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ...io.com import gltf2_io
|
||||
from ...io.com import gltf2_io_constants
|
||||
from ...io.exp import gltf2_io_binary_data
|
||||
from .gltf2_blender_gather_cache import cached
|
||||
|
||||
|
||||
@cached
|
||||
def gather_accessor(buffer_view: gltf2_io_binary_data.BinaryData,
|
||||
component_type: gltf2_io_constants.ComponentType,
|
||||
count,
|
||||
max,
|
||||
min,
|
||||
type: gltf2_io_constants.DataType,
|
||||
export_settings) -> gltf2_io.Accessor:
|
||||
return gltf2_io.Accessor(
|
||||
buffer_view=buffer_view,
|
||||
byte_offset=None,
|
||||
component_type=component_type,
|
||||
count=count,
|
||||
extensions=None,
|
||||
extras=None,
|
||||
max=list(max) if max is not None else None,
|
||||
min=list(min) if min is not None else None,
|
||||
name=None,
|
||||
normalized=None,
|
||||
sparse=None,
|
||||
type=type
|
||||
)
|
||||
|
||||
|
||||
def array_to_accessor(
|
||||
array,
|
||||
export_settings,
|
||||
component_type,
|
||||
data_type,
|
||||
include_max_and_min=False,
|
||||
sparse_type=None,
|
||||
normalized=None,
|
||||
):
|
||||
|
||||
# Not trying to check if sparse is better
|
||||
if sparse_type is None:
|
||||
|
||||
buffer_view = gltf2_io_binary_data.BinaryData(
|
||||
array.tobytes(),
|
||||
gltf2_io_constants.BufferViewTarget.ARRAY_BUFFER,
|
||||
)
|
||||
|
||||
amax = None
|
||||
amin = None
|
||||
if include_max_and_min:
|
||||
amax = np.amax(array, axis=0).tolist()
|
||||
amin = np.amin(array, axis=0).tolist()
|
||||
|
||||
return gltf2_io.Accessor(
|
||||
buffer_view=buffer_view,
|
||||
byte_offset=None,
|
||||
component_type=component_type,
|
||||
count=len(array),
|
||||
extensions=None,
|
||||
extras=None,
|
||||
max=amax,
|
||||
min=amin,
|
||||
name=None,
|
||||
normalized=normalized,
|
||||
sparse=None,
|
||||
type=data_type,
|
||||
)
|
||||
|
||||
# Trying to check if sparse is better (if user want it)
|
||||
buffer_view = None
|
||||
sparse = None
|
||||
|
||||
try_sparse = False
|
||||
if sparse_type == "SK":
|
||||
try_sparse = export_settings['gltf_try_sparse_sk']
|
||||
|
||||
if try_sparse:
|
||||
sparse, omit_sparse = __try_sparse_accessor(array)
|
||||
else:
|
||||
omit_sparse = False
|
||||
if not sparse and omit_sparse is False:
|
||||
buffer_view = gltf2_io_binary_data.BinaryData(
|
||||
array.tobytes(),
|
||||
gltf2_io_constants.BufferViewTarget.ARRAY_BUFFER,
|
||||
)
|
||||
elif omit_sparse is True:
|
||||
if sparse_type == "SK" and export_settings['gltf_try_omit_sparse_sk'] is True:
|
||||
sparse = None # sparse will be None, buffer_view too
|
||||
|
||||
amax = None
|
||||
amin = None
|
||||
if include_max_and_min:
|
||||
amax = np.amax(array, axis=0).tolist()
|
||||
amin = np.amin(array, axis=0).tolist()
|
||||
|
||||
return gltf2_io.Accessor(
|
||||
buffer_view=buffer_view,
|
||||
byte_offset=None,
|
||||
component_type=component_type,
|
||||
count=len(array),
|
||||
extensions=None,
|
||||
extras=None,
|
||||
max=amax,
|
||||
min=amin,
|
||||
name=None,
|
||||
normalized=None,
|
||||
sparse=sparse,
|
||||
type=data_type,
|
||||
)
|
||||
|
||||
|
||||
def __try_sparse_accessor(array):
|
||||
"""
|
||||
Returns an AccessorSparse for array, or None if
|
||||
writing a dense accessor would be better.
|
||||
Return True if we can omit sparse accessor
|
||||
"""
|
||||
|
||||
omit_sparse = False
|
||||
|
||||
# Find indices of non-zero elements
|
||||
nonzero_indices = np.where(np.any(array, axis=1))[0]
|
||||
|
||||
# For all-zero arrays, omitting sparse entirely is legal but poorly
|
||||
# supported, so force nonzero_indices to be nonempty.
|
||||
if len(nonzero_indices) == 0:
|
||||
omit_sparse = True
|
||||
nonzero_indices = np.array([0])
|
||||
|
||||
# How big of indices do we need?
|
||||
if nonzero_indices[-1] <= 255:
|
||||
indices_type = gltf2_io_constants.ComponentType.UnsignedByte
|
||||
elif nonzero_indices[-1] <= 65535:
|
||||
indices_type = gltf2_io_constants.ComponentType.UnsignedShort
|
||||
else:
|
||||
indices_type = gltf2_io_constants.ComponentType.UnsignedInt
|
||||
|
||||
# Cast indices to appropiate type (if needed)
|
||||
nonzero_indices = nonzero_indices.astype(
|
||||
gltf2_io_constants.ComponentType.to_numpy_dtype(indices_type),
|
||||
copy=False,
|
||||
)
|
||||
|
||||
# Calculate size if we don't use sparse
|
||||
one_elem_size = len(array[:1].tobytes())
|
||||
dense_size = len(array) * one_elem_size
|
||||
|
||||
# Calculate approximate size if we do use sparse
|
||||
indices_size = (
|
||||
len(nonzero_indices[:1].tobytes()) *
|
||||
len(nonzero_indices)
|
||||
)
|
||||
values_size = len(nonzero_indices) * one_elem_size
|
||||
json_increase = 170 # sparse makes the JSON about this much bigger
|
||||
penalty = 64 # further penalty avoids sparse in marginal cases
|
||||
sparse_size = indices_size + values_size + json_increase + penalty
|
||||
|
||||
if sparse_size >= dense_size:
|
||||
return None, omit_sparse
|
||||
|
||||
return gltf2_io.AccessorSparse(
|
||||
count=len(nonzero_indices),
|
||||
extensions=None,
|
||||
extras=None,
|
||||
indices=gltf2_io.AccessorSparseIndices(
|
||||
buffer_view=gltf2_io_binary_data.BinaryData(
|
||||
nonzero_indices.tobytes()
|
||||
),
|
||||
byte_offset=None,
|
||||
component_type=indices_type,
|
||||
extensions=None,
|
||||
extras=None,
|
||||
),
|
||||
values=gltf2_io.AccessorSparseValues(
|
||||
buffer_view=gltf2_io_binary_data.BinaryData(
|
||||
array[nonzero_indices].tobytes()
|
||||
),
|
||||
byte_offset=None,
|
||||
extensions=None,
|
||||
extras=None,
|
||||
),
|
||||
), omit_sparse
|
153
scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_cache.py
Executable file
153
scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_cache.py
Executable file
@ -0,0 +1,153 @@
|
||||
# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import functools
|
||||
|
||||
|
||||
def cached_by_key(key):
|
||||
"""
|
||||
Decorates functions whose result should be cached. Use it like:
|
||||
@cached_by_key(key=...)
|
||||
def func(..., export_settings):
|
||||
...
|
||||
The decorated function, func, must always take an "export_settings" arg
|
||||
(the cache is stored here).
|
||||
The key argument to the decorator is a function that computes the key to
|
||||
cache on. It is passed all the arguments to func.
|
||||
"""
|
||||
def inner(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper_cached(*args, **kwargs):
|
||||
if kwargs.get("export_settings"):
|
||||
export_settings = kwargs["export_settings"]
|
||||
else:
|
||||
export_settings = args[-1]
|
||||
|
||||
cache_key = key(*args, **kwargs)
|
||||
|
||||
# invalidate cache if export settings have changed
|
||||
if not hasattr(func, "__export_settings") or export_settings != func.__export_settings:
|
||||
func.__cache = {}
|
||||
func.__export_settings = export_settings
|
||||
# use or fill cache
|
||||
if cache_key in func.__cache:
|
||||
return func.__cache[cache_key]
|
||||
else:
|
||||
result = func(*args, **kwargs)
|
||||
func.__cache[cache_key] = result
|
||||
return result
|
||||
|
||||
return wrapper_cached
|
||||
|
||||
return inner
|
||||
|
||||
|
||||
def default_key(*args, **kwargs):
|
||||
"""
|
||||
Default cache key for @cached functions.
|
||||
Cache on all arguments (except export_settings).
|
||||
"""
|
||||
assert len(args) >= 2 and 0 <= len(kwargs) <= 1, "Wrong signature for cached function"
|
||||
cache_key_args = args
|
||||
# make a shallow copy of the keyword arguments so that 'export_settings' can be removed
|
||||
cache_key_kwargs = dict(kwargs)
|
||||
if kwargs.get("export_settings"):
|
||||
del cache_key_kwargs["export_settings"]
|
||||
else:
|
||||
cache_key_args = args[:-1]
|
||||
|
||||
cache_key = ()
|
||||
for i in cache_key_args:
|
||||
cache_key += (i,)
|
||||
for i in cache_key_kwargs.values():
|
||||
cache_key += (i,)
|
||||
|
||||
return cache_key
|
||||
|
||||
|
||||
def cached(func):
|
||||
return cached_by_key(key=default_key)(func)
|
||||
|
||||
|
||||
def datacache(func):
|
||||
|
||||
def reset_all_cache():
|
||||
func.__cache = {}
|
||||
|
||||
func.reset_cache = reset_all_cache
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper_objectcache(*args, **kwargs):
|
||||
|
||||
# 0 : path
|
||||
# 1 : object_uuid
|
||||
# 2 : bone (can be, of course, None for path other than 'bone')
|
||||
# 3 : action_name
|
||||
# 4 : current_frame
|
||||
# 5 : step
|
||||
# 6 : export_settings
|
||||
# only_gather_provided : only_gather_provided
|
||||
|
||||
cache_key_args = args
|
||||
cache_key_args = args[:-1]
|
||||
|
||||
if not hasattr(func, "__cache"):
|
||||
func.reset_cache()
|
||||
|
||||
# object is not cached yet
|
||||
if cache_key_args[1] not in func.__cache.keys():
|
||||
result = func(*args)
|
||||
func.__cache = result
|
||||
# Here are the key used: result[obj_uuid][action_name][path][bone][frame]
|
||||
return result[cache_key_args[1]][cache_key_args[3]][cache_key_args[0]][cache_key_args[2]][cache_key_args[4]]
|
||||
# object is in cache, but not this action
|
||||
# We need to not erase other actions of this object
|
||||
elif cache_key_args[3] not in func.__cache[cache_key_args[1]].keys():
|
||||
result = func(*args, only_gather_provided=True)
|
||||
# The result can contains multiples animations, in case this is an armature with drivers
|
||||
# Need to create all newly retrieved animations
|
||||
func.__cache.update(result)
|
||||
# Here are the key used: result[obj_uuid][action_name][path][bone][frame]
|
||||
return result[cache_key_args[1]][cache_key_args[3]][cache_key_args[0]][cache_key_args[2]][cache_key_args[4]]
|
||||
# all is already cached
|
||||
else:
|
||||
# Here are the key used: result[obj_uuid][action_name][path][bone][frame]
|
||||
return func.__cache[cache_key_args[1]][cache_key_args[3]
|
||||
][cache_key_args[0]][cache_key_args[2]][cache_key_args[4]]
|
||||
return wrapper_objectcache
|
||||
|
||||
|
||||
# TODO: replace "cached" with "unique" in all cases where the caching is functional and not only for performance reasons
|
||||
call_or_fetch = cached
|
||||
unique = cached
|
||||
|
||||
|
||||
def skdriverdiscovercache(func):
|
||||
|
||||
def reset_cache_skdriverdiscovercache():
|
||||
func.__current_armature_uuid = None
|
||||
func.__skdriverdiscover = {}
|
||||
|
||||
func.reset_cache = reset_cache_skdriverdiscovercache
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper_skdriverdiscover(*args, **kwargs):
|
||||
|
||||
# 0 : armature_uuid
|
||||
# 1 : export_settings
|
||||
|
||||
cache_key_args = args
|
||||
cache_key_args = args[:-1]
|
||||
|
||||
if not hasattr(func, "__current_armature_uuid") or func.__current_armature_uuid is None:
|
||||
func.reset_cache()
|
||||
|
||||
if cache_key_args[0] != func.__current_armature_uuid:
|
||||
result = func(*args)
|
||||
func.__skdriverdiscover[cache_key_args[0]] = result
|
||||
func.__current_armature_uuid = cache_key_args[0]
|
||||
return result
|
||||
else:
|
||||
return func.__skdriverdiscover[cache_key_args[0]]
|
||||
return wrapper_skdriverdiscover
|
152
scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_cameras.py
Executable file
152
scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_cameras.py
Executable file
@ -0,0 +1,152 @@
|
||||
# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
import math
|
||||
from ...io.com import gltf2_io
|
||||
from ...blender.com.gltf2_blender_conversion import yvof_blender_to_gltf
|
||||
from ...io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ..com.gltf2_blender_extras import generate_extras
|
||||
from .gltf2_blender_gather_cache import cached
|
||||
|
||||
|
||||
@cached
|
||||
def gather_camera(blender_camera, export_settings):
|
||||
if not __filter_camera(blender_camera, export_settings):
|
||||
return None
|
||||
|
||||
export_settings['current_paths'] = {} # For KHR_animation_pointer
|
||||
|
||||
camera = gltf2_io.Camera(
|
||||
extensions=__gather_extensions(blender_camera, export_settings),
|
||||
extras=__gather_extras(blender_camera, export_settings),
|
||||
name=__gather_name(blender_camera, export_settings),
|
||||
orthographic=__gather_orthographic(blender_camera, export_settings),
|
||||
perspective=__gather_perspective(blender_camera, export_settings),
|
||||
type=__gather_type(blender_camera, export_settings)
|
||||
)
|
||||
|
||||
export_user_extensions('gather_camera_hook', export_settings, camera, blender_camera)
|
||||
|
||||
return camera
|
||||
|
||||
|
||||
def __filter_camera(blender_camera, export_settings):
|
||||
return bool(__gather_type(blender_camera, export_settings))
|
||||
|
||||
|
||||
def __gather_extensions(blender_camera, export_settings):
|
||||
return None
|
||||
|
||||
|
||||
def __gather_extras(blender_camera, export_settings):
|
||||
if export_settings['gltf_extras']:
|
||||
return generate_extras(blender_camera)
|
||||
return None
|
||||
|
||||
|
||||
def __gather_name(blender_camera, export_settings):
|
||||
return blender_camera.name
|
||||
|
||||
|
||||
def __gather_orthographic(blender_camera, export_settings):
|
||||
if __gather_type(blender_camera, export_settings) == "orthographic":
|
||||
orthographic = gltf2_io.CameraOrthographic(
|
||||
extensions=None,
|
||||
extras=None,
|
||||
xmag=None,
|
||||
ymag=None,
|
||||
zfar=None,
|
||||
znear=None
|
||||
)
|
||||
|
||||
_render = bpy.context.scene.render
|
||||
scene_x = _render.resolution_x * _render.pixel_aspect_x
|
||||
scene_y = _render.resolution_y * _render.pixel_aspect_y
|
||||
scene_square = max(scene_x, scene_y)
|
||||
del _render
|
||||
|
||||
# `Camera().ortho_scale` (and also FOV FTR) maps to the maximum of either image width or image height— This is the box that gets shown from camera view with the checkbox `.show_sensor = True`.
|
||||
|
||||
orthographic.xmag = blender_camera.ortho_scale * (scene_x / scene_square) / 2
|
||||
orthographic.ymag = blender_camera.ortho_scale * (scene_y / scene_square) / 2
|
||||
|
||||
orthographic.znear = blender_camera.clip_start
|
||||
orthographic.zfar = blender_camera.clip_end
|
||||
|
||||
# Store data for KHR_animation_pointer
|
||||
path_ = {}
|
||||
path_['length'] = 1
|
||||
path_['path'] = "/cameras/XXX/orthographic/xmag"
|
||||
export_settings['current_paths']['ortho_scale_x'] = path_
|
||||
|
||||
path_ = {}
|
||||
path_['length'] = 1
|
||||
path_['path'] = "/cameras/XXX/orthographic/ymag"
|
||||
export_settings['current_paths']['ortho_scale_y'] = path_
|
||||
|
||||
path_ = {}
|
||||
path_['length'] = 1
|
||||
path_['path'] = "/cameras/XXX/orthographic/zfar"
|
||||
export_settings['current_paths']['clip_end'] = path_
|
||||
|
||||
path_ = {}
|
||||
path_['length'] = 1
|
||||
path_['path'] = "/cameras/XXX/orthographic/znear"
|
||||
export_settings['current_paths']['clip_start'] = path_
|
||||
|
||||
return orthographic
|
||||
return None
|
||||
|
||||
|
||||
def __gather_perspective(blender_camera, export_settings):
|
||||
if __gather_type(blender_camera, export_settings) == "perspective":
|
||||
perspective = gltf2_io.CameraPerspective(
|
||||
aspect_ratio=None,
|
||||
extensions=None,
|
||||
extras=None,
|
||||
yfov=None,
|
||||
zfar=None,
|
||||
znear=None
|
||||
)
|
||||
|
||||
_render = bpy.context.scene.render
|
||||
width = _render.pixel_aspect_x * _render.resolution_x
|
||||
height = _render.pixel_aspect_y * _render.resolution_y
|
||||
perspective.aspect_ratio = width / height
|
||||
del _render
|
||||
|
||||
perspective.yfov = yvof_blender_to_gltf(blender_camera.angle, width, height, blender_camera.sensor_fit)
|
||||
|
||||
perspective.znear = blender_camera.clip_start
|
||||
perspective.zfar = blender_camera.clip_end
|
||||
|
||||
path_ = {}
|
||||
path_['length'] = 1
|
||||
path_['path'] = "/cameras/XXX/perspective/zfar"
|
||||
export_settings['current_paths']['clip_end'] = path_
|
||||
|
||||
path_ = {}
|
||||
path_['length'] = 1
|
||||
path_['path'] = "/cameras/XXX/perspective/znear"
|
||||
export_settings['current_paths']['clip_start'] = path_
|
||||
|
||||
path_ = {}
|
||||
path_['length'] = 1
|
||||
path_['path'] = "/cameras/XXX/perspective/yfov"
|
||||
path_['sensor_fit'] = 'sensor_fit'
|
||||
export_settings['current_paths']['angle'] = path_
|
||||
|
||||
# aspect ratio is not animatable in blender
|
||||
|
||||
return perspective
|
||||
return None
|
||||
|
||||
|
||||
def __gather_type(blender_camera, export_settings):
|
||||
if blender_camera.type == 'PERSP':
|
||||
return "perspective"
|
||||
elif blender_camera.type == 'ORTHO':
|
||||
return "orthographic"
|
||||
return None
|
110
scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_joints.py
Executable file
110
scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_joints.py
Executable file
@ -0,0 +1,110 @@
|
||||
# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from mathutils import Quaternion, Vector
|
||||
from ...io.com import gltf2_io
|
||||
from ...io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ..com.gltf2_blender_extras import generate_extras
|
||||
from .gltf2_blender_gather_cache import cached
|
||||
from . import gltf2_blender_gather_tree
|
||||
|
||||
|
||||
# TODO these 3 functions move to shared file
|
||||
def __convert_swizzle_location(loc, export_settings):
|
||||
"""Convert a location from Blender coordinate system to glTF coordinate system."""
|
||||
if export_settings['gltf_yup']:
|
||||
return Vector((loc[0], loc[2], -loc[1]))
|
||||
else:
|
||||
return Vector((loc[0], loc[1], loc[2]))
|
||||
|
||||
|
||||
def __convert_swizzle_rotation(rot, export_settings):
|
||||
"""
|
||||
Convert a quaternion rotation from Blender coordinate system to glTF coordinate system.
|
||||
|
||||
'w' is still at first position.
|
||||
"""
|
||||
if export_settings['gltf_yup']:
|
||||
return Quaternion((rot[0], rot[1], rot[3], -rot[2]))
|
||||
else:
|
||||
return Quaternion((rot[0], rot[1], rot[2], rot[3]))
|
||||
|
||||
|
||||
def __convert_swizzle_scale(scale, export_settings):
|
||||
"""Convert a scale from Blender coordinate system to glTF coordinate system."""
|
||||
if export_settings['gltf_yup']:
|
||||
return Vector((scale[0], scale[2], scale[1]))
|
||||
else:
|
||||
return Vector((scale[0], scale[1], scale[2]))
|
||||
|
||||
|
||||
@cached
|
||||
def gather_joint_vnode(vnode, export_settings):
|
||||
"""
|
||||
Generate a glTF2 node from a blender bone, as joints in glTF2 are simply nodes.
|
||||
|
||||
:param blender_bone: a blender PoseBone
|
||||
:param export_settings: the settings for this export
|
||||
:return: a glTF2 node (acting as a joint)
|
||||
"""
|
||||
vtree = export_settings['vtree']
|
||||
blender_bone = vtree.nodes[vnode].blender_bone
|
||||
|
||||
if export_settings['gltf_armature_object_remove'] is True:
|
||||
if vtree.nodes[vnode].parent_uuid is not None:
|
||||
mat = vtree.nodes[vtree.nodes[vnode].parent_uuid].matrix_world.inverted_safe(
|
||||
) @ vtree.nodes[vnode].matrix_world
|
||||
else:
|
||||
mat = vtree.nodes[vnode].matrix_world
|
||||
else:
|
||||
mat = vtree.nodes[vtree.nodes[vnode].parent_uuid].matrix_world.inverted_safe() @ vtree.nodes[vnode].matrix_world
|
||||
|
||||
trans, rot, sca = mat.decompose()
|
||||
|
||||
trans = __convert_swizzle_location(trans, export_settings)
|
||||
rot = __convert_swizzle_rotation(rot, export_settings)
|
||||
sca = __convert_swizzle_scale(sca, export_settings)
|
||||
|
||||
translation, rotation, scale = (None, None, None)
|
||||
if trans[0] != 0.0 or trans[1] != 0.0 or trans[2] != 0.0:
|
||||
translation = [trans[0], trans[1], trans[2]]
|
||||
if rot[0] != 1.0 or rot[1] != 0.0 or rot[2] != 0.0 or rot[3] != 0.0:
|
||||
rotation = [rot[1], rot[2], rot[3], rot[0]]
|
||||
if sca[0] != 1.0 or sca[1] != 1.0 or sca[2] != 1.0:
|
||||
scale = [sca[0], sca[1], sca[2]]
|
||||
|
||||
# traverse into children
|
||||
children = []
|
||||
|
||||
for bone_uuid in [c for c in vtree.nodes[vnode].children if vtree.nodes[c].blender_type ==
|
||||
gltf2_blender_gather_tree.VExportNode.BONE]:
|
||||
children.append(gather_joint_vnode(bone_uuid, export_settings))
|
||||
|
||||
# finally add to the joints array containing all the joints in the hierarchy
|
||||
node = gltf2_io.Node(
|
||||
camera=None,
|
||||
children=children,
|
||||
extensions=None,
|
||||
extras=__gather_extras(blender_bone, export_settings),
|
||||
matrix=None,
|
||||
mesh=None,
|
||||
name=blender_bone.name if vtree.nodes[vnode].leaf_reference is None else vtree.nodes[vtree.nodes[vnode].leaf_reference].blender_bone.name + '_leaf',
|
||||
rotation=rotation,
|
||||
scale=scale,
|
||||
skin=None,
|
||||
translation=translation,
|
||||
weights=None
|
||||
)
|
||||
|
||||
export_user_extensions('gather_joint_hook', export_settings, node, blender_bone)
|
||||
|
||||
vtree.nodes[vnode].node = node
|
||||
|
||||
return node
|
||||
|
||||
|
||||
def __gather_extras(blender_bone, export_settings):
|
||||
if export_settings['gltf_extras']:
|
||||
return generate_extras(blender_bone.bone)
|
||||
return None
|
@ -0,0 +1,47 @@
|
||||
# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from typing import Optional
|
||||
from ...io.com import gltf2_io_lights_punctual
|
||||
|
||||
|
||||
def gather_light_spot(blender_lamp, export_settings) -> Optional[gltf2_io_lights_punctual.LightSpot]:
|
||||
|
||||
if not __filter_light_spot(blender_lamp, export_settings):
|
||||
return None
|
||||
|
||||
spot = gltf2_io_lights_punctual.LightSpot(
|
||||
inner_cone_angle=__gather_inner_cone_angle(blender_lamp, export_settings),
|
||||
outer_cone_angle=__gather_outer_cone_angle(blender_lamp, export_settings)
|
||||
)
|
||||
return spot
|
||||
|
||||
|
||||
def __filter_light_spot(blender_lamp, _) -> bool:
|
||||
if blender_lamp.type != "SPOT":
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def __gather_inner_cone_angle(blender_lamp, export_settings) -> Optional[float]:
|
||||
angle = blender_lamp.spot_size * 0.5
|
||||
|
||||
path_ = {}
|
||||
path_['length'] = 1
|
||||
path_['path'] = "/extensions/KHR_lights_punctual/lights/XXX/spot.innerConeAngle"
|
||||
path_['additional_path'] = "spot_size"
|
||||
export_settings['current_paths']["spot_blend"] = path_
|
||||
|
||||
return angle - angle * blender_lamp.spot_blend
|
||||
|
||||
|
||||
def __gather_outer_cone_angle(blender_lamp, export_settings) -> Optional[float]:
|
||||
|
||||
path_ = {}
|
||||
path_['length'] = 1
|
||||
path_['path'] = "/extensions/KHR_lights_punctual/lights/XXX/spot.outerConeAngle"
|
||||
export_settings['current_paths']["spot_size"] = path_
|
||||
|
||||
return blender_lamp.spot_size * 0.5
|
@ -0,0 +1,196 @@
|
||||
# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
import math
|
||||
from typing import Optional, List, Dict, Any
|
||||
from ...io.com import gltf2_io_lights_punctual
|
||||
from ...io.com import gltf2_io_debug
|
||||
from ..com.gltf2_blender_extras import generate_extras
|
||||
from ..com.gltf2_blender_conversion import PBR_WATTS_TO_LUMENS
|
||||
from ..com.gltf2_blender_default import LIGHTS
|
||||
from .gltf2_blender_gather_cache import cached
|
||||
from . import gltf2_blender_gather_light_spots
|
||||
from .material import gltf2_blender_search_node_tree
|
||||
|
||||
|
||||
@cached
|
||||
def gather_lights_punctual(blender_lamp, export_settings) -> Optional[Dict[str, Any]]:
|
||||
|
||||
export_settings['current_paths'] = {} # For KHR_animation_pointer
|
||||
|
||||
if not __filter_lights_punctual(blender_lamp, export_settings):
|
||||
return None
|
||||
|
||||
light = gltf2_io_lights_punctual.Light(
|
||||
color=__gather_color(blender_lamp, export_settings),
|
||||
intensity=__gather_intensity(blender_lamp, export_settings),
|
||||
spot=__gather_spot(blender_lamp, export_settings),
|
||||
type=__gather_type(blender_lamp, export_settings),
|
||||
range=__gather_range(blender_lamp, export_settings),
|
||||
name=__gather_name(blender_lamp, export_settings),
|
||||
extensions=__gather_extensions(blender_lamp, export_settings),
|
||||
extras=__gather_extras(blender_lamp, export_settings)
|
||||
)
|
||||
|
||||
return light.to_dict()
|
||||
|
||||
|
||||
def __filter_lights_punctual(blender_lamp, export_settings) -> bool:
|
||||
if blender_lamp.type in ["HEMI", "AREA"]:
|
||||
export_settings['log'].warning("Unsupported light source {}".format(blender_lamp.type))
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def __gather_color(blender_lamp, export_settings) -> Optional[List[float]]:
|
||||
emission_node = __get_cycles_emission_node(blender_lamp)
|
||||
if emission_node is not None:
|
||||
|
||||
# Store data for KHR_animation_pointer
|
||||
path_ = {}
|
||||
path_['length'] = 3
|
||||
path_['path'] = "/extensions/KHR_lights_punctual/lights/XXX/color"
|
||||
export_settings['current_paths']["node_tree." + emission_node.inputs["Color"].path_from_id() +
|
||||
".default_value"] = path_
|
||||
|
||||
return list(emission_node.inputs["Color"].default_value)[:3]
|
||||
|
||||
# Store data for KHR_animation_pointer
|
||||
path_ = {}
|
||||
path_['length'] = 3
|
||||
path_['path'] = "/extensions/KHR_lights_punctual/lights/XXX/color"
|
||||
export_settings['current_paths']['color'] = path_
|
||||
|
||||
return list(blender_lamp.color)
|
||||
|
||||
|
||||
def __gather_intensity(blender_lamp, export_settings) -> Optional[float]:
|
||||
emission_node = __get_cycles_emission_node(blender_lamp)
|
||||
if emission_node is not None:
|
||||
if blender_lamp.type != 'SUN':
|
||||
# When using cycles, the strength should be influenced by a LightFalloff node
|
||||
result = gltf2_blender_search_node_tree.from_socket(
|
||||
gltf2_blender_search_node_tree.NodeSocket(emission_node.inputs.get("Strength"), blender_lamp.node_tree),
|
||||
gltf2_blender_search_node_tree.FilterByType(bpy.types.ShaderNodeLightFalloff)
|
||||
)
|
||||
if result:
|
||||
quadratic_falloff_node = result[0].shader_node
|
||||
emission_strength = quadratic_falloff_node.inputs["Strength"].default_value / (math.pi * 4.0)
|
||||
|
||||
# Store data for KHR_animation_pointer
|
||||
path_ = {}
|
||||
path_['length'] = 1
|
||||
path_['path'] = "/extensions/KHR_lights_punctual/lights/XXX/intensity"
|
||||
path_['lamp_type'] = blender_lamp.type
|
||||
export_settings['current_paths']["node_tree." +
|
||||
quadratic_falloff_node.inputs["Strength"].path_from_id() +
|
||||
".default_value"] = path_
|
||||
|
||||
else:
|
||||
export_settings['log'].warning('No quadratic light falloff node attached to emission strength property')
|
||||
|
||||
path_ = {}
|
||||
path_['length'] = 1
|
||||
path_['path'] = "/extensions/KHR_lights_punctual/lights/XXX/intensity"
|
||||
path_['lamp_type'] = blender_lamp.type
|
||||
export_settings['current_paths']["energy"] = path_
|
||||
|
||||
emission_strength = blender_lamp.energy
|
||||
else:
|
||||
emission_strength = emission_node.inputs["Strength"].default_value
|
||||
|
||||
path_ = {}
|
||||
path_['length'] = 1
|
||||
path_['path'] = "/extensions/KHR_lights_punctual/lights/XXX/intensity"
|
||||
path_['lamp_type'] = blender_lamp.type
|
||||
export_settings['current_paths']["node_tree." +
|
||||
emission_node.inputs["Strength"].path_from_id() +
|
||||
".default_value"] = path_
|
||||
|
||||
else:
|
||||
emission_strength = blender_lamp.energy
|
||||
|
||||
path_ = {}
|
||||
path_['length'] = 1
|
||||
path_['path'] = "/extensions/KHR_lights_punctual/lights/XXX/intensity"
|
||||
path_['lamp_type'] = blender_lamp.type
|
||||
export_settings['current_paths']["energy"] = path_
|
||||
|
||||
if export_settings['gltf_lighting_mode'] == 'RAW':
|
||||
return emission_strength
|
||||
else:
|
||||
# Assume at this point the computed strength is still in the appropriate
|
||||
# watt-related SI unit, which if everything up to here was done with
|
||||
# physical basis it hopefully should be.
|
||||
if blender_lamp.type == 'SUN': # W/m^2 in Blender to lm/m^2 for GLTF/KHR_lights_punctual.
|
||||
emission_luminous = emission_strength
|
||||
else:
|
||||
# Other than directional, only point and spot lamps are supported by GLTF.
|
||||
# In Blender, points are omnidirectional W, and spots are specified as if they're points.
|
||||
# Point and spot should both be lm/r^2 in GLTF.
|
||||
emission_luminous = emission_strength / (4 * math.pi)
|
||||
if export_settings['gltf_lighting_mode'] == 'SPEC':
|
||||
emission_luminous *= PBR_WATTS_TO_LUMENS
|
||||
elif export_settings['gltf_lighting_mode'] == 'COMPAT':
|
||||
pass # Just so we have an exhaustive tree to catch bugged values.
|
||||
else:
|
||||
raise ValueError(export_settings['gltf_lighting_mode'])
|
||||
return emission_luminous
|
||||
|
||||
|
||||
def __gather_spot(blender_lamp, export_settings) -> Optional[gltf2_io_lights_punctual.LightSpot]:
|
||||
if blender_lamp.type == "SPOT":
|
||||
return gltf2_blender_gather_light_spots.gather_light_spot(blender_lamp, export_settings)
|
||||
return None
|
||||
|
||||
|
||||
def __gather_type(blender_lamp, _) -> str:
|
||||
return LIGHTS[blender_lamp.type]
|
||||
|
||||
|
||||
def __gather_range(blender_lamp, export_settings) -> Optional[float]:
|
||||
if blender_lamp.use_custom_distance:
|
||||
|
||||
path_ = {}
|
||||
path_['length'] = 1
|
||||
path_['path'] = "/extensions/KHR_lights_punctual/lights/XXX/range"
|
||||
export_settings['current_paths']["cutoff_distance"] = path_
|
||||
|
||||
return blender_lamp.cutoff_distance
|
||||
return None
|
||||
|
||||
|
||||
def __gather_name(blender_lamp, export_settings) -> Optional[str]:
|
||||
return blender_lamp.name
|
||||
|
||||
|
||||
def __gather_extensions(blender_lamp, export_settings) -> Optional[dict]:
|
||||
return None
|
||||
|
||||
|
||||
def __gather_extras(blender_lamp, export_settings) -> Optional[Any]:
|
||||
if export_settings['gltf_extras']:
|
||||
return generate_extras(blender_lamp)
|
||||
return None
|
||||
|
||||
|
||||
def __get_cycles_emission_node(blender_lamp) -> Optional[bpy.types.ShaderNodeEmission]:
|
||||
if blender_lamp.use_nodes and blender_lamp.node_tree:
|
||||
for currentNode in blender_lamp.node_tree.nodes:
|
||||
is_shadernode_output = isinstance(currentNode, bpy.types.ShaderNodeOutputLight)
|
||||
if is_shadernode_output:
|
||||
if not currentNode.is_active_output:
|
||||
continue
|
||||
result = gltf2_blender_search_node_tree.from_socket(
|
||||
gltf2_blender_search_node_tree.NodeSocket(
|
||||
currentNode.inputs.get("Surface"),
|
||||
blender_lamp.node_tree),
|
||||
gltf2_blender_search_node_tree.FilterByType(
|
||||
bpy.types.ShaderNodeEmission))
|
||||
if not result:
|
||||
continue
|
||||
return result[0].shader_node
|
||||
return None
|
160
scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_mesh.py
Executable file
160
scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_mesh.py
Executable file
@ -0,0 +1,160 @@
|
||||
# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
from typing import Optional, Dict, List, Any, Tuple
|
||||
from ...io.com import gltf2_io
|
||||
from ...blender.com.gltf2_blender_data_path import get_sk_exported
|
||||
from ...io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ..com.gltf2_blender_extras import generate_extras
|
||||
from . import gltf2_blender_gather_primitives
|
||||
from .gltf2_blender_gather_cache import cached_by_key
|
||||
|
||||
|
||||
def get_mesh_cache_key(blender_mesh,
|
||||
blender_object,
|
||||
vertex_groups,
|
||||
modifiers,
|
||||
materials,
|
||||
original_mesh,
|
||||
export_settings):
|
||||
# Use id of original mesh
|
||||
# Do not use bpy.types that can be unhashable
|
||||
# Do not use mesh name, that can be not unique (when linked)
|
||||
|
||||
# If materials are not exported, no need to cache by material
|
||||
if export_settings['gltf_materials'] is None:
|
||||
mats = None
|
||||
else:
|
||||
mats = tuple(id(m) if m is not None else None for m in materials)
|
||||
|
||||
# TODO check what is really needed for modifiers
|
||||
|
||||
mesh_to_id_cache = blender_mesh if original_mesh is None else original_mesh
|
||||
return (
|
||||
(id(mesh_to_id_cache),),
|
||||
(modifiers,),
|
||||
mats
|
||||
)
|
||||
|
||||
|
||||
@cached_by_key(key=get_mesh_cache_key)
|
||||
def gather_mesh(blender_mesh: bpy.types.Mesh,
|
||||
uuid_for_skined_data,
|
||||
vertex_groups: bpy.types.VertexGroups,
|
||||
modifiers: Optional[bpy.types.ObjectModifiers],
|
||||
materials: Tuple[bpy.types.Material],
|
||||
original_mesh: bpy.types.Mesh,
|
||||
export_settings
|
||||
) -> Optional[gltf2_io.Mesh]:
|
||||
if not __filter_mesh(blender_mesh, vertex_groups, modifiers, export_settings):
|
||||
return None
|
||||
|
||||
mesh = gltf2_io.Mesh(
|
||||
extensions=__gather_extensions(
|
||||
blender_mesh, vertex_groups, modifiers, export_settings), extras=__gather_extras(
|
||||
blender_mesh, vertex_groups, modifiers, export_settings), name=__gather_name(
|
||||
blender_mesh, vertex_groups, modifiers, export_settings), weights=__gather_weights(
|
||||
blender_mesh, vertex_groups, modifiers, export_settings), primitives=__gather_primitives(
|
||||
blender_mesh, uuid_for_skined_data, vertex_groups, modifiers, materials, export_settings), )
|
||||
|
||||
if len(mesh.primitives) == 0:
|
||||
export_settings['log'].warning("Mesh '{}' has no primitives and will be omitted.".format(mesh.name))
|
||||
return None
|
||||
|
||||
blender_object = None
|
||||
if uuid_for_skined_data:
|
||||
blender_object = export_settings['vtree'].nodes[uuid_for_skined_data].blender_object
|
||||
|
||||
export_user_extensions('gather_mesh_hook',
|
||||
export_settings,
|
||||
mesh,
|
||||
blender_mesh,
|
||||
blender_object,
|
||||
vertex_groups,
|
||||
modifiers,
|
||||
materials)
|
||||
|
||||
return mesh
|
||||
|
||||
|
||||
def __filter_mesh(blender_mesh: bpy.types.Mesh,
|
||||
vertex_groups: bpy.types.VertexGroups,
|
||||
modifiers: Optional[bpy.types.ObjectModifiers],
|
||||
export_settings
|
||||
) -> bool:
|
||||
return True
|
||||
|
||||
|
||||
def __gather_extensions(blender_mesh: bpy.types.Mesh,
|
||||
vertex_groups: bpy.types.VertexGroups,
|
||||
modifiers: Optional[bpy.types.ObjectModifiers],
|
||||
export_settings
|
||||
) -> Any:
|
||||
return None
|
||||
|
||||
|
||||
def __gather_extras(blender_mesh: bpy.types.Mesh,
|
||||
vertex_groups: bpy.types.VertexGroups,
|
||||
modifiers: Optional[bpy.types.ObjectModifiers],
|
||||
export_settings
|
||||
) -> Optional[Dict[Any, Any]]:
|
||||
|
||||
extras = {}
|
||||
|
||||
if export_settings['gltf_extras']:
|
||||
extras = generate_extras(blender_mesh) or {}
|
||||
|
||||
# Not for GN Instances
|
||||
if export_settings['gltf_morph'] and blender_mesh.shape_keys and blender_mesh.users != 0:
|
||||
morph_max = len(blender_mesh.shape_keys.key_blocks) - 1
|
||||
if morph_max > 0:
|
||||
extras['targetNames'] = [k.name for k in get_sk_exported(blender_mesh.shape_keys.key_blocks)]
|
||||
|
||||
if extras:
|
||||
return extras
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def __gather_name(blender_mesh: bpy.types.Mesh,
|
||||
vertex_groups: bpy.types.VertexGroups,
|
||||
modifiers: Optional[bpy.types.ObjectModifiers],
|
||||
export_settings
|
||||
) -> str:
|
||||
return blender_mesh.name
|
||||
|
||||
|
||||
def __gather_primitives(blender_mesh: bpy.types.Mesh,
|
||||
uuid_for_skined_data,
|
||||
vertex_groups: bpy.types.VertexGroups,
|
||||
modifiers: Optional[bpy.types.ObjectModifiers],
|
||||
materials: Tuple[bpy.types.Material],
|
||||
export_settings
|
||||
) -> List[gltf2_io.MeshPrimitive]:
|
||||
return gltf2_blender_gather_primitives.gather_primitives(blender_mesh,
|
||||
uuid_for_skined_data,
|
||||
vertex_groups,
|
||||
modifiers,
|
||||
materials,
|
||||
export_settings)
|
||||
|
||||
|
||||
def __gather_weights(blender_mesh: bpy.types.Mesh,
|
||||
vertex_groups: bpy.types.VertexGroups,
|
||||
modifiers: Optional[bpy.types.ObjectModifiers],
|
||||
export_settings
|
||||
) -> Optional[List[float]]:
|
||||
if not export_settings['gltf_morph'] or not blender_mesh.shape_keys:
|
||||
return None
|
||||
|
||||
# Not for GN Instances
|
||||
if blender_mesh.users == 0:
|
||||
return None
|
||||
|
||||
morph_max = len(blender_mesh.shape_keys.key_blocks) - 1
|
||||
if morph_max <= 0:
|
||||
return None
|
||||
|
||||
return [k.value for k in get_sk_exported(blender_mesh.shape_keys.key_blocks)]
|
530
scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_nodes.py
Executable file
530
scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_nodes.py
Executable file
@ -0,0 +1,530 @@
|
||||
# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import math
|
||||
import bpy
|
||||
from mathutils import Matrix, Quaternion, Vector
|
||||
|
||||
from ...io.com import gltf2_io
|
||||
from ...io.com import gltf2_io_extensions
|
||||
from ...io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ..com.gltf2_blender_extras import generate_extras
|
||||
from ..com.gltf2_blender_default import LIGHTS
|
||||
from ..com import gltf2_blender_math
|
||||
from . import gltf2_blender_gather_tree
|
||||
from . import gltf2_blender_gather_skins
|
||||
from . import gltf2_blender_gather_cameras
|
||||
from . import gltf2_blender_gather_mesh
|
||||
from . import gltf2_blender_gather_joints
|
||||
from . import gltf2_blender_gather_lights
|
||||
from .gltf2_blender_gather_tree import VExportNode
|
||||
|
||||
|
||||
def gather_node(vnode, export_settings):
|
||||
|
||||
blender_object = vnode.blender_object
|
||||
|
||||
skin = gather_skin(vnode.uuid, export_settings)
|
||||
if skin is not None:
|
||||
vnode.skin = skin
|
||||
|
||||
# Hook to check if we should export mesh or not (force it to None)
|
||||
|
||||
class GltfHookNodeMesh:
|
||||
def __init__(self):
|
||||
self.export_mesh = True
|
||||
|
||||
gltf_hook_node_mesh = GltfHookNodeMesh()
|
||||
|
||||
export_user_extensions('gather_node_mesh_hook', export_settings, gltf_hook_node_mesh, blender_object)
|
||||
if gltf_hook_node_mesh.export_mesh is True:
|
||||
mesh = __gather_mesh(vnode, blender_object, export_settings)
|
||||
else:
|
||||
mesh = None
|
||||
|
||||
node = gltf2_io.Node(
|
||||
camera=__gather_camera(vnode, export_settings),
|
||||
children=__gather_children(vnode, export_settings),
|
||||
extensions=__gather_extensions(vnode, export_settings),
|
||||
extras=__gather_extras(blender_object, export_settings),
|
||||
matrix=__gather_matrix(blender_object, export_settings),
|
||||
mesh=mesh,
|
||||
name=__gather_name(blender_object, export_settings),
|
||||
rotation=None,
|
||||
scale=None,
|
||||
skin=skin,
|
||||
translation=None,
|
||||
weights=__gather_weights(blender_object, export_settings)
|
||||
)
|
||||
|
||||
# If node mesh is skined, transforms should be ignored at import, so no need to set them here
|
||||
if node.skin is None:
|
||||
node.translation, node.rotation, node.scale = __gather_trans_rot_scale(vnode, export_settings)
|
||||
|
||||
export_user_extensions('gather_node_hook', export_settings, node, blender_object)
|
||||
|
||||
vnode.node = node
|
||||
|
||||
return node
|
||||
|
||||
|
||||
def __gather_camera(vnode, export_settings):
|
||||
if not vnode.blender_object:
|
||||
return
|
||||
if vnode.blender_type == VExportNode.COLLECTION:
|
||||
return None
|
||||
if vnode.blender_object.type != 'CAMERA':
|
||||
return None
|
||||
|
||||
cam = gltf2_blender_gather_cameras.gather_camera(vnode.blender_object.data, export_settings)
|
||||
|
||||
if len(export_settings['current_paths']) > 0:
|
||||
export_settings['KHR_animation_pointer']['cameras'][id(vnode.blender_object.data)] = {}
|
||||
export_settings['KHR_animation_pointer']['cameras'][id(
|
||||
vnode.blender_object.data)]['paths'] = export_settings['current_paths'].copy()
|
||||
export_settings['KHR_animation_pointer']['cameras'][id(vnode.blender_object.data)]['glTF_camera'] = cam
|
||||
|
||||
export_settings['current_paths'] = {}
|
||||
|
||||
return cam
|
||||
|
||||
|
||||
def __gather_children(vnode, export_settings):
|
||||
children = []
|
||||
|
||||
vtree = export_settings['vtree']
|
||||
|
||||
armature_object_uuid = None
|
||||
|
||||
# Standard Children / Collection
|
||||
if export_settings['gltf_armature_object_remove'] is False:
|
||||
for c in [vtree.nodes[c]
|
||||
for c in vnode.children if vtree.nodes[c].blender_type != gltf2_blender_gather_tree.VExportNode.BONE]:
|
||||
node = gather_node(c, export_settings)
|
||||
if node is not None:
|
||||
children.append(node)
|
||||
else:
|
||||
root_joints = []
|
||||
for c in [vtree.nodes[c] for c in vnode.children]:
|
||||
if c.blender_type != gltf2_blender_gather_tree.VExportNode.BONE:
|
||||
node = gather_node(c, export_settings)
|
||||
if node is not None:
|
||||
children.append(node)
|
||||
else:
|
||||
# We come here because armature was remove, and bone can be a child of any object
|
||||
joint = gltf2_blender_gather_joints.gather_joint_vnode(c.uuid, export_settings)
|
||||
children.append(joint)
|
||||
armature_object_uuid = c.armature
|
||||
root_joints.append(joint)
|
||||
|
||||
# Now got all bone children (that are root joints), we can get object parented to bones
|
||||
|
||||
# Armature --> Retrieve Blender bones
|
||||
# This can't happen if we remove the Armature Object
|
||||
if vnode.blender_type == gltf2_blender_gather_tree.VExportNode.ARMATURE:
|
||||
armature_object_uuid = vnode.uuid
|
||||
root_joints = []
|
||||
root_bones_uuid = export_settings['vtree'].get_root_bones_uuid(vnode.uuid)
|
||||
for bone_uuid in root_bones_uuid:
|
||||
joint = gltf2_blender_gather_joints.gather_joint_vnode(bone_uuid, export_settings)
|
||||
children.append(joint)
|
||||
root_joints.append(joint)
|
||||
|
||||
if vnode.blender_type == gltf2_blender_gather_tree.VExportNode.ARMATURE \
|
||||
or armature_object_uuid is not None:
|
||||
|
||||
# Object parented to bones
|
||||
get_objects_parented_to_bones(armature_object_uuid, root_joints, export_settings)
|
||||
|
||||
return children
|
||||
|
||||
|
||||
def get_objects_parented_to_bones(armature_object_uuid, root_joints, export_settings):
|
||||
vtree = export_settings['vtree']
|
||||
direct_bone_children = []
|
||||
for n in [vtree.nodes[i] for i in vtree.get_all_bones(armature_object_uuid)]:
|
||||
direct_bone_children.extend([c for c in n.children if vtree.nodes[c].blender_type !=
|
||||
gltf2_blender_gather_tree.VExportNode.BONE])
|
||||
|
||||
for child in direct_bone_children: # List of object that are parented to bones
|
||||
# find parent joint
|
||||
parent_joint = __find_parent_joint(root_joints, vtree.nodes[child].blender_object.parent_bone)
|
||||
if not parent_joint:
|
||||
continue
|
||||
child_node = gather_node(vtree.nodes[child], export_settings)
|
||||
if child_node is None:
|
||||
continue
|
||||
|
||||
mat = vtree.nodes[vtree.nodes[child].parent_bone_uuid].matrix_world.inverted_safe(
|
||||
) @ vtree.nodes[child].matrix_world
|
||||
loc, rot_quat, scale = mat.decompose()
|
||||
|
||||
trans = __convert_swizzle_location(loc, export_settings)
|
||||
rot = __convert_swizzle_rotation(rot_quat, export_settings)
|
||||
sca = __convert_swizzle_scale(scale, export_settings)
|
||||
|
||||
translation, rotation, scale = (None, None, None)
|
||||
if trans[0] != 0.0 or trans[1] != 0.0 or trans[2] != 0.0:
|
||||
translation = [trans[0], trans[1], trans[2]]
|
||||
if rot[0] != 1.0 or rot[1] != 0.0 or rot[2] != 0.0 or rot[3] != 0.0:
|
||||
rotation = [rot[1], rot[2], rot[3], rot[0]]
|
||||
if sca[0] != 1.0 or sca[1] != 1.0 or sca[2] != 1.0:
|
||||
scale = [sca[0], sca[1], sca[2]]
|
||||
|
||||
child_node.translation = translation
|
||||
child_node.rotation = rotation
|
||||
child_node.scale = scale
|
||||
|
||||
parent_joint.children.append(child_node)
|
||||
|
||||
|
||||
def __find_parent_joint(joints, name):
|
||||
for joint in joints:
|
||||
if joint.name == name:
|
||||
return joint
|
||||
parent_joint = __find_parent_joint(joint.children, name)
|
||||
if parent_joint:
|
||||
return parent_joint
|
||||
return None
|
||||
|
||||
|
||||
def __gather_extensions(vnode, export_settings):
|
||||
blender_object = vnode.blender_object
|
||||
extensions = {}
|
||||
|
||||
blender_lamp = None
|
||||
if vnode.blender_type == VExportNode.COLLECTION:
|
||||
return None
|
||||
|
||||
if export_settings["gltf_lights"] and vnode.blender_type == VExportNode.INSTANCE and vnode.data is not None:
|
||||
if vnode.data.type in LIGHTS:
|
||||
blender_lamp = vnode.data
|
||||
elif export_settings["gltf_lights"] and blender_object is not None and (blender_object.type == "LAMP" or blender_object.type == "LIGHT"):
|
||||
blender_lamp = blender_object.data
|
||||
|
||||
if blender_lamp is not None:
|
||||
light = gltf2_blender_gather_lights.gather_lights_punctual(
|
||||
blender_lamp,
|
||||
export_settings
|
||||
)
|
||||
if light is not None:
|
||||
light_extension = gltf2_io_extensions.ChildOfRootExtension(
|
||||
name="KHR_lights_punctual",
|
||||
path=["lights"],
|
||||
extension=light
|
||||
)
|
||||
extensions["KHR_lights_punctual"] = gltf2_io_extensions.Extension(
|
||||
name="KHR_lights_punctual",
|
||||
extension={
|
||||
"light": light_extension
|
||||
}
|
||||
)
|
||||
if len(export_settings['current_paths']) > 0:
|
||||
export_settings['KHR_animation_pointer']['lights'][id(blender_lamp)] = {}
|
||||
export_settings['KHR_animation_pointer']['lights'][id(
|
||||
blender_lamp)]['paths'] = export_settings['current_paths'].copy()
|
||||
export_settings['KHR_animation_pointer']['lights'][id(blender_lamp)]['glTF_light'] = light_extension
|
||||
|
||||
export_settings['current_paths'] = {}
|
||||
|
||||
return extensions if extensions else None
|
||||
|
||||
|
||||
def __gather_extras(blender_object, export_settings):
|
||||
if export_settings['gltf_extras']:
|
||||
return generate_extras(blender_object)
|
||||
return None
|
||||
|
||||
|
||||
def __gather_matrix(blender_object, export_settings):
|
||||
# return blender_object.matrix_local
|
||||
return []
|
||||
|
||||
|
||||
def __gather_mesh(vnode, blender_object, export_settings):
|
||||
if vnode.blender_type == VExportNode.COLLECTION:
|
||||
return None
|
||||
if blender_object and blender_object.type in ['CURVE', 'SURFACE', 'FONT']:
|
||||
return __gather_mesh_from_nonmesh(blender_object, export_settings)
|
||||
if blender_object is None and type(vnode.data).__name__ not in ["Mesh"]:
|
||||
return None # TODO
|
||||
if blender_object is None:
|
||||
# GN instance
|
||||
blender_mesh = vnode.data
|
||||
# Keep materials from the tmp mesh, but if no material, keep from object
|
||||
materials = tuple(mat for mat in blender_mesh.materials)
|
||||
if len(materials) == 1 and materials[0] is None:
|
||||
materials = tuple(ms.material for ms in vnode.original_object.material_slots)
|
||||
|
||||
uuid_for_skined_data = None
|
||||
modifiers = None
|
||||
|
||||
if blender_mesh is None:
|
||||
return None
|
||||
|
||||
else:
|
||||
if blender_object.type != "MESH":
|
||||
return None
|
||||
# For duplis instancer, when show is off -> export as empty
|
||||
if vnode.force_as_empty is True:
|
||||
return None
|
||||
# Be sure that object is valid (no NaN for example)
|
||||
res = blender_object.data.validate()
|
||||
if res is True:
|
||||
export_settings['log'].warning("Mesh " + blender_object.data.name +
|
||||
" is not valid, and may be exported wrongly")
|
||||
|
||||
modifiers = blender_object.modifiers
|
||||
if len(modifiers) == 0:
|
||||
modifiers = None
|
||||
|
||||
if export_settings['gltf_apply']:
|
||||
if modifiers is None: # If no modifier, use original mesh, it will instance all shared mesh in a single glTF mesh
|
||||
blender_mesh = blender_object.data
|
||||
# Keep materials from object, as no modifiers are applied, so no risk that
|
||||
# modifiers changed them
|
||||
materials = tuple(ms.material for ms in blender_object.material_slots)
|
||||
else:
|
||||
armature_modifiers = {}
|
||||
if export_settings['gltf_skins']:
|
||||
# temporarily disable Armature modifiers if exporting skins
|
||||
for idx, modifier in enumerate(blender_object.modifiers):
|
||||
if modifier.type == 'ARMATURE':
|
||||
armature_modifiers[idx] = modifier.show_viewport
|
||||
modifier.show_viewport = False
|
||||
|
||||
depsgraph = bpy.context.evaluated_depsgraph_get()
|
||||
blender_mesh_owner = blender_object.evaluated_get(depsgraph)
|
||||
blender_mesh = blender_mesh_owner.to_mesh(preserve_all_data_layers=True, depsgraph=depsgraph)
|
||||
for prop in blender_object.data.keys():
|
||||
blender_mesh[prop] = blender_object.data[prop]
|
||||
|
||||
if export_settings['gltf_skins']:
|
||||
# restore Armature modifiers
|
||||
for idx, show_viewport in armature_modifiers.items():
|
||||
blender_object.modifiers[idx].show_viewport = show_viewport
|
||||
|
||||
# Keep materials from the newly created tmp mesh, but if no materials, keep from object
|
||||
materials = tuple(mat for mat in blender_mesh.materials)
|
||||
if len(materials) == 1 and materials[0] is None:
|
||||
materials = tuple(ms.material for ms in blender_object.material_slots)
|
||||
|
||||
else:
|
||||
blender_mesh = blender_object.data
|
||||
if not export_settings['gltf_skins']:
|
||||
modifiers = None
|
||||
else:
|
||||
# Check if there is an armature modidier
|
||||
if len([mod for mod in blender_object.modifiers if mod.type == "ARMATURE"]) == 0:
|
||||
modifiers = None
|
||||
|
||||
# Keep materials from object, as no modifiers are applied, so no risk that
|
||||
# modifiers changed them
|
||||
materials = tuple(ms.material for ms in blender_object.material_slots)
|
||||
|
||||
# retrieve armature
|
||||
# Because mesh data will be transforms to skeleton space,
|
||||
# we can't instantiate multiple object at different location, skined by same armature
|
||||
uuid_for_skined_data = None
|
||||
if export_settings['gltf_skins']:
|
||||
for idx, modifier in enumerate(blender_object.modifiers):
|
||||
if modifier.type == 'ARMATURE':
|
||||
uuid_for_skined_data = vnode.uuid
|
||||
|
||||
result = gltf2_blender_gather_mesh.gather_mesh(blender_mesh,
|
||||
uuid_for_skined_data,
|
||||
blender_object.vertex_groups if blender_object else None,
|
||||
modifiers,
|
||||
materials,
|
||||
None,
|
||||
export_settings)
|
||||
|
||||
if export_settings['gltf_apply'] and modifiers is not None:
|
||||
blender_mesh_owner.to_mesh_clear()
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def __gather_mesh_from_nonmesh(blender_object, export_settings):
|
||||
"""Handles curves, surfaces, text, etc."""
|
||||
needs_to_mesh_clear = False
|
||||
try:
|
||||
# Convert to a mesh
|
||||
try:
|
||||
if export_settings['gltf_apply']:
|
||||
depsgraph = bpy.context.evaluated_depsgraph_get()
|
||||
blender_mesh_owner = blender_object.evaluated_get(depsgraph)
|
||||
blender_mesh = blender_mesh_owner.to_mesh(preserve_all_data_layers=True, depsgraph=depsgraph)
|
||||
# TODO: do we need preserve_all_data_layers?
|
||||
|
||||
else:
|
||||
blender_mesh_owner = blender_object
|
||||
blender_mesh = blender_mesh_owner.to_mesh()
|
||||
|
||||
# In some cases (for example curve with single vertice), no blender_mesh is created (without crash)
|
||||
if blender_mesh is None:
|
||||
return None
|
||||
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
needs_to_mesh_clear = True
|
||||
|
||||
materials = tuple([ms.material for ms in blender_object.material_slots if ms.material is not None])
|
||||
modifiers = None
|
||||
blender_object_for_skined_data = None
|
||||
|
||||
result = gltf2_blender_gather_mesh.gather_mesh(blender_mesh,
|
||||
blender_object_for_skined_data,
|
||||
blender_object.vertex_groups,
|
||||
modifiers,
|
||||
materials,
|
||||
blender_object.data,
|
||||
export_settings)
|
||||
|
||||
finally:
|
||||
if needs_to_mesh_clear:
|
||||
blender_mesh_owner.to_mesh_clear()
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def __gather_name(blender_object, export_settings):
|
||||
|
||||
new_name = blender_object.name if blender_object else "GN Instance"
|
||||
|
||||
class GltfHookName:
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
gltf_hook_name = GltfHookName(new_name)
|
||||
|
||||
export_user_extensions('gather_node_name_hook', export_settings, gltf_hook_name, blender_object)
|
||||
return gltf_hook_name.name
|
||||
|
||||
|
||||
def __gather_trans_rot_scale(vnode, export_settings):
|
||||
if vnode.parent_uuid is None:
|
||||
# No parent, so matrix is world matrix
|
||||
trans, rot, sca = vnode.matrix_world.decompose()
|
||||
else:
|
||||
# calculate local matrix
|
||||
if export_settings['vtree'].nodes[vnode.parent_uuid].skin is None:
|
||||
trans, rot, sca = (
|
||||
export_settings['vtree'].nodes[vnode.parent_uuid].matrix_world.inverted_safe() @ vnode.matrix_world).decompose()
|
||||
else:
|
||||
# But ... if parent has skin, the parent TRS are not taken into account, so don't get local from parent, but from armature
|
||||
# It also depens if skined mesh is parented to armature or not
|
||||
if export_settings['vtree'].nodes[vnode.parent_uuid].parent_uuid is not None and export_settings['vtree'].nodes[
|
||||
export_settings['vtree'].nodes[vnode.parent_uuid].parent_uuid].blender_type == VExportNode.ARMATURE:
|
||||
trans, rot, sca = (export_settings['vtree'].nodes[export_settings['vtree'].nodes[vnode.parent_uuid].armature].matrix_world.inverted_safe(
|
||||
) @ vnode.matrix_world).decompose()
|
||||
else:
|
||||
trans, rot, sca = vnode.matrix_world.decompose()
|
||||
|
||||
# make sure the rotation is normalized
|
||||
rot.normalize()
|
||||
|
||||
trans = __convert_swizzle_location(trans, export_settings)
|
||||
rot = __convert_swizzle_rotation(rot, export_settings)
|
||||
sca = __convert_swizzle_scale(sca, export_settings)
|
||||
|
||||
if vnode.blender_object and vnode.blender_type != VExportNode.COLLECTION and vnode.blender_object.instance_type == 'COLLECTION' and vnode.blender_object.instance_collection:
|
||||
offset = -__convert_swizzle_location(
|
||||
vnode.blender_object.instance_collection.instance_offset, export_settings)
|
||||
|
||||
s = Matrix.Diagonal(sca).to_4x4()
|
||||
r = rot.to_matrix().to_4x4()
|
||||
t = Matrix.Translation(trans).to_4x4()
|
||||
o = Matrix.Translation(offset).to_4x4()
|
||||
m = t @ r @ s @ o
|
||||
|
||||
trans = m.translation
|
||||
|
||||
translation, rotation, scale = (None, None, None)
|
||||
trans[0], trans[1], trans[2] = gltf2_blender_math.round_if_near(
|
||||
trans[0], 0.0), gltf2_blender_math.round_if_near(
|
||||
trans[1], 0.0), gltf2_blender_math.round_if_near(
|
||||
trans[2], 0.0)
|
||||
rot[0], rot[1], rot[2], rot[3] = gltf2_blender_math.round_if_near(
|
||||
rot[0], 1.0), gltf2_blender_math.round_if_near(
|
||||
rot[1], 0.0), gltf2_blender_math.round_if_near(
|
||||
rot[2], 0.0), gltf2_blender_math.round_if_near(
|
||||
rot[3], 0.0)
|
||||
sca[0], sca[1], sca[2] = gltf2_blender_math.round_if_near(
|
||||
sca[0], 1.0), gltf2_blender_math.round_if_near(
|
||||
sca[1], 1.0), gltf2_blender_math.round_if_near(
|
||||
sca[2], 1.0)
|
||||
if trans[0] != 0.0 or trans[1] != 0.0 or trans[2] != 0.0:
|
||||
translation = [trans[0], trans[1], trans[2]]
|
||||
if rot[0] != 1.0 or rot[1] != 0.0 or rot[2] != 0.0 or rot[3] != 0.0:
|
||||
rotation = [rot[1], rot[2], rot[3], rot[0]]
|
||||
if sca[0] != 1.0 or sca[1] != 1.0 or sca[2] != 1.0:
|
||||
scale = [sca[0], sca[1], sca[2]]
|
||||
return translation, rotation, scale
|
||||
|
||||
|
||||
def gather_skin(vnode, export_settings):
|
||||
|
||||
if export_settings['vtree'].nodes[vnode].blender_type == VExportNode.COLLECTION:
|
||||
return None
|
||||
|
||||
blender_object = export_settings['vtree'].nodes[vnode].blender_object
|
||||
modifiers = {m.type: m for m in blender_object.modifiers} if blender_object else {}
|
||||
if "ARMATURE" not in modifiers or modifiers["ARMATURE"].object is None:
|
||||
return None
|
||||
|
||||
# no skin needed when the modifier is linked without having a vertex group
|
||||
if len(blender_object.vertex_groups) == 0:
|
||||
return None
|
||||
|
||||
# check if any vertices in the mesh are part of a vertex group
|
||||
depsgraph = bpy.context.evaluated_depsgraph_get()
|
||||
blender_mesh_owner = blender_object.evaluated_get(depsgraph)
|
||||
blender_mesh = blender_mesh_owner.to_mesh(preserve_all_data_layers=True, depsgraph=depsgraph)
|
||||
if not any(vertex.groups is not None and len(vertex.groups) > 0 for vertex in blender_mesh.vertices):
|
||||
return None
|
||||
|
||||
# Prevent infinite recursive error. A mesh can't have an Armature modifier
|
||||
# and be bone parented to a bone of this armature
|
||||
# In that case, ignore the armature modifier, keep only the bone parenting
|
||||
if blender_object.parent is not None \
|
||||
and blender_object.parent_type == 'BONE' \
|
||||
and blender_object.parent.name == modifiers["ARMATURE"].object.name:
|
||||
|
||||
return None
|
||||
|
||||
# Skins and meshes must be in the same glTF node, which is different from how blender handles armatures
|
||||
return gltf2_blender_gather_skins.gather_skin(export_settings['vtree'].nodes[vnode].armature, export_settings)
|
||||
|
||||
|
||||
def __gather_weights(blender_object, export_settings):
|
||||
return None
|
||||
|
||||
|
||||
def __convert_swizzle_location(loc, export_settings):
|
||||
"""Convert a location from Blender coordinate system to glTF coordinate system."""
|
||||
if export_settings['gltf_yup']:
|
||||
return Vector((loc[0], loc[2], -loc[1]))
|
||||
else:
|
||||
return Vector((loc[0], loc[1], loc[2]))
|
||||
|
||||
|
||||
def __convert_swizzle_rotation(rot, export_settings):
|
||||
"""
|
||||
Convert a quaternion rotation from Blender coordinate system to glTF coordinate system.
|
||||
|
||||
'w' is still at first position.
|
||||
"""
|
||||
if export_settings['gltf_yup']:
|
||||
return Quaternion((rot[0], rot[1], rot[3], -rot[2]))
|
||||
else:
|
||||
return Quaternion((rot[0], rot[1], rot[2], rot[3]))
|
||||
|
||||
|
||||
def __convert_swizzle_scale(scale, export_settings):
|
||||
"""Convert a scale from Blender coordinate system to glTF coordinate system."""
|
||||
if export_settings['gltf_yup']:
|
||||
return Vector((scale[0], scale[2], scale[1]))
|
||||
else:
|
||||
return Vector((scale[0], scale[1], scale[2]))
|
222
scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_primitive_attributes.py
Executable file
222
scripts/addons_core/io_scene_gltf2/blender/exp/gltf2_blender_gather_primitive_attributes.py
Executable file
@ -0,0 +1,222 @@
|
||||
# SPDX-FileCopyrightText: 2018-2021 The glTF-Blender-IO authors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import numpy as np
|
||||
from math import ceil
|
||||
|
||||
from ...io.com import gltf2_io, gltf2_io_constants, gltf2_io_debug
|
||||
from ...io.exp import gltf2_io_binary_data
|
||||
from ...io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from .gltf2_blender_gather_accessors import array_to_accessor
|
||||
|
||||
|
||||
def gather_primitive_attributes(blender_primitive, export_settings):
|
||||
"""
|
||||
Gathers the attributes, such as POSITION, NORMAL, TANGENT, and all custom attributes from a blender primitive
|
||||
|
||||
:return: a dictionary of attributes
|
||||
"""
|
||||
attributes = {}
|
||||
|
||||
# loop on each attribute extracted
|
||||
# for skinning, all linked attributes (WEIGHTS_ and JOINTS_) need to be calculated
|
||||
# in one shot (because of normalization), so we need to check that it is called only once.
|
||||
|
||||
skin_done = False
|
||||
|
||||
for attribute in blender_primitive["attributes"]:
|
||||
if (attribute.startswith("JOINTS_") or attribute.startswith("WEIGHTS_")) and skin_done is True:
|
||||
continue
|
||||
if attribute.startswith("MORPH_"):
|
||||
continue # Target for morphs will be managed later
|
||||
attributes.update(__gather_attribute(blender_primitive, attribute, export_settings))
|
||||
if (attribute.startswith("JOINTS_") or attribute.startswith("WEIGHTS_")):
|
||||
skin_done = True
|
||||
|
||||
return attributes
|
||||
|
||||
|
||||
def __gather_skins(blender_primitive, export_settings):
|
||||
attributes = {}
|
||||
|
||||
if not export_settings['gltf_skins']:
|
||||
return attributes
|
||||
|
||||
# Retrieve max set index
|
||||
max_bone_set_index = 0
|
||||
while blender_primitive["attributes"].get(
|
||||
'JOINTS_' +
|
||||
str(max_bone_set_index)) and blender_primitive["attributes"].get(
|
||||
'WEIGHTS_' +
|
||||
str(max_bone_set_index)):
|
||||
max_bone_set_index += 1
|
||||
max_bone_set_index -= 1
|
||||
|
||||
# Here, a set represents a group of 4 weights.
|
||||
# So max_bone_set_index value:
|
||||
# if -1 => No weights
|
||||
# if 0 => Max 4 weights
|
||||
# if 1 => Max 8 weights
|
||||
# etc...
|
||||
|
||||
# If no skinning
|
||||
if max_bone_set_index < 0:
|
||||
return attributes
|
||||
|
||||
# Retrieve the wanted by user max set index
|
||||
if export_settings['gltf_all_vertex_influences']:
|
||||
wanted_max_bone_set_index = max_bone_set_index
|
||||
else:
|
||||
wanted_max_bone_set_index = ceil(export_settings['gltf_vertex_influences_nb'] / 4) - 1
|
||||
|
||||
# No need to create a set with only zero if user asked more than requested group set.
|
||||
if wanted_max_bone_set_index > max_bone_set_index:
|
||||
wanted_max_bone_set_index = max_bone_set_index
|
||||
|
||||
# Set warning, for the case where there are more group of 4 weights needed
|
||||
# Warning for the case where we are in the same group, will be done later
|
||||
# (for example, 3 weights needed, but 2 wanted by user)
|
||||
if max_bone_set_index > wanted_max_bone_set_index:
|
||||
export_settings['log'].warning(
|
||||
"There are more than {} joint vertex influences."
|
||||
"The {} with highest weight will be used (and normalized).".format(
|
||||
export_settings['gltf_vertex_influences_nb'],
|
||||
export_settings['gltf_vertex_influences_nb']))
|
||||
|
||||
# Take into account only the first set of 4 weights
|
||||
max_bone_set_index = wanted_max_bone_set_index
|
||||
|
||||
# Convert weights to numpy arrays, and setting joints
|
||||
weight_arrs = []
|
||||
for s in range(0, max_bone_set_index + 1):
|
||||
|
||||
weight_id = 'WEIGHTS_' + str(s)
|
||||
weight = blender_primitive["attributes"][weight_id]
|
||||
weight = np.array(weight, dtype=np.float32)
|
||||
weight = weight.reshape(len(weight) // 4, 4)
|
||||
|
||||
# Set warning for the case where we are in the same group, will be done later (for example, 3 weights needed, but 2 wanted by user)
|
||||
# And then, remove no more needed weights
|
||||
if s == max_bone_set_index and not export_settings['gltf_all_vertex_influences']:
|
||||
# Check how many to remove
|
||||
to_remove = (wanted_max_bone_set_index + 1) * 4 - export_settings['gltf_vertex_influences_nb']
|
||||
if to_remove > 0:
|
||||
warning_done = False
|
||||
for i in range(0, to_remove):
|
||||
idx = 4 - 1 - i
|
||||
if not all(weight[:, idx]):
|
||||
if warning_done is False:
|
||||
export_settings['log'].warning(
|
||||
"There are more than {} joint vertex influences."
|
||||
"The {} with highest weight will be used (and normalized).".format(
|
||||
export_settings['gltf_vertex_influences_nb'],
|
||||
export_settings['gltf_vertex_influences_nb']))
|
||||
warning_done = True
|
||||
weight[:, idx] = 0.0
|
||||
|
||||
weight_arrs.append(weight)
|
||||
|
||||
# joints
|
||||
joint_id = 'JOINTS_' + str(s)
|
||||
internal_joint = blender_primitive["attributes"][joint_id]
|
||||
component_type = gltf2_io_constants.ComponentType.UnsignedShort
|
||||
if max(internal_joint) < 256:
|
||||
component_type = gltf2_io_constants.ComponentType.UnsignedByte
|
||||
joints = np.array(internal_joint, dtype=gltf2_io_constants.ComponentType.to_numpy_dtype(component_type))
|
||||
joints = joints.reshape(-1, 4)
|
||||
|
||||
if s == max_bone_set_index and not export_settings['gltf_all_vertex_influences']:
|
||||
# Check how many to remove
|
||||
to_remove = (wanted_max_bone_set_index + 1) * 4 - export_settings['gltf_vertex_influences_nb']
|
||||
if to_remove > 0:
|
||||
for i in range(0, to_remove):
|
||||
idx = 4 - 1 - i
|
||||
joints[:, idx] = 0.0
|
||||
|
||||
joint = array_to_accessor(
|
||||
joints,
|
||||
export_settings,
|
||||
component_type,
|
||||
data_type=gltf2_io_constants.DataType.Vec4,
|
||||
)
|
||||
attributes[joint_id] = joint
|
||||
|
||||
# Sum weights for each vertex
|
||||
for s in range(0, max_bone_set_index + 1):
|
||||
sums = weight_arrs[s].sum(axis=1)
|
||||
if s == 0:
|
||||
weight_total = sums
|
||||
else:
|
||||
weight_total += sums
|
||||
|
||||
# Normalize weights so they sum to 1
|
||||
weight_total = weight_total.reshape(-1, 1)
|
||||
for s in range(0, max_bone_set_index + 1):
|
||||
weight_id = 'WEIGHTS_' + str(s)
|
||||
weight_arrs[s] /= weight_total
|
||||
|
||||
weight = array_to_accessor(
|
||||
weight_arrs[s],
|
||||
export_settings,
|
||||
component_type=gltf2_io_constants.ComponentType.Float,
|
||||
data_type=gltf2_io_constants.DataType.Vec4,
|
||||
)
|
||||
attributes[weight_id] = weight
|
||||
|
||||
return attributes
|
||||
|
||||
|
||||
def __gather_attribute(blender_primitive, attribute, export_settings):
|
||||
data = blender_primitive["attributes"][attribute]
|
||||
|
||||
include_max_and_mins = {
|
||||
"POSITION": True
|
||||
}
|
||||
|
||||
if (attribute.startswith("_") or attribute.startswith("COLOR_")
|
||||
) and blender_primitive["attributes"][attribute]['component_type'] == gltf2_io_constants.ComponentType.UnsignedShort:
|
||||
# Byte Color vertex color, need to normalize
|
||||
|
||||
data['data'] *= 65535
|
||||
data['data'] += 0.5 # bias for rounding
|
||||
data['data'] = data['data'].astype(np.uint16)
|
||||
|
||||
export_user_extensions('gather_attribute_change', export_settings, attribute, data, True)
|
||||
|
||||
return {
|
||||
attribute: gltf2_io.Accessor(
|
||||
buffer_view=gltf2_io_binary_data.BinaryData(
|
||||
data['data'].tobytes(),
|
||||
gltf2_io_constants.BufferViewTarget.ARRAY_BUFFER),
|
||||
byte_offset=None,
|
||||
component_type=data['component_type'],
|
||||
count=len(
|
||||
data['data']),
|
||||
extensions=None,
|
||||
extras=None,
|
||||
max=None,
|
||||
min=None,
|
||||
name=None,
|
||||
normalized=True,
|
||||
sparse=None,
|
||||
type=data['data_type'],
|
||||
)}
|
||||
|
||||
elif attribute.startswith("JOINTS_") or attribute.startswith("WEIGHTS_"):
|
||||
return __gather_skins(blender_primitive, export_settings)
|
||||
|
||||
else:
|
||||
|
||||
export_user_extensions('gather_attribute_change', export_settings, attribute, data, False)
|
||||
|
||||
return {
|
||||
attribute: array_to_accessor(
|
||||
data['data'],
|
||||
export_settings,
|
||||
component_type=data['component_type'],
|
||||
data_type=data['data_type'],
|
||||
include_max_and_min=include_max_and_mins.get(attribute, False),
|
||||
normalized=data.get('normalized')
|
||||
)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user