forked from bartvdbraak/blender
moving io scripts to 'addons' dir in extensions svn, leaving MDD format since I dont maintain this.
This commit is contained in:
parent
439140d6ae
commit
c69ecd3f90
@ -1,121 +0,0 @@
|
||||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
# <pep8 compliant>
|
||||
|
||||
# To support reload properly, try to access a package var, if it's there, reload everything
|
||||
if "bpy" in locals():
|
||||
import imp
|
||||
if "import_bvh" in locals():
|
||||
imp.reload(import_bvh)
|
||||
|
||||
|
||||
import bpy
|
||||
from bpy.props import *
|
||||
from io_utils import ImportHelper, ExportHelper
|
||||
|
||||
|
||||
class ImportBVH(bpy.types.Operator, ImportHelper):
|
||||
'''Load a BVH motion capture file'''
|
||||
bl_idname = "import_anim.bvh"
|
||||
bl_label = "Import BVH"
|
||||
|
||||
filename_ext = ".bvh"
|
||||
filter_glob = StringProperty(default="*.bvh", options={'HIDDEN'})
|
||||
|
||||
target = EnumProperty(items=(
|
||||
('ARMATURE', "Armature", ""),
|
||||
('OBJECT', "Object", ""),
|
||||
),
|
||||
name="Target",
|
||||
description="Import target type.",
|
||||
default='ARMATURE')
|
||||
|
||||
global_scale = FloatProperty(name="Scale", description="Scale the BVH by this value", min=0.0001, max=1000000.0, soft_min=0.001, soft_max=100.0, default=1.0)
|
||||
frame_start = IntProperty(name="Start Frame", description="Starting frame for the animation", default=1)
|
||||
use_cyclic = BoolProperty(name="Loop", description="Loop the animation playback", default=False)
|
||||
rotate_mode = EnumProperty(items=(
|
||||
('QUATERNION', "Quaternion", "Convert rotations to quaternions"),
|
||||
('NATIVE', "Euler (Native)", "Use the rotation order defined in the BVH file"),
|
||||
('XYZ', "Euler (XYZ)", "Convert rotations to euler XYZ"),
|
||||
('XZY', "Euler (XZY)", "Convert rotations to euler XZY"),
|
||||
('YXZ', "Euler (YXZ)", "Convert rotations to euler YXZ"),
|
||||
('YZX', "Euler (YZX)", "Convert rotations to euler YZX"),
|
||||
('ZXY', "Euler (ZXY)", "Convert rotations to euler ZXY"),
|
||||
('ZYX', "Euler (ZYX)", "Convert rotations to euler ZYX"),
|
||||
),
|
||||
name="Rotation",
|
||||
description="Rotation conversion.",
|
||||
default='NATIVE')
|
||||
|
||||
def execute(self, context):
|
||||
from . import import_bvh
|
||||
return import_bvh.load(self, context, **self.as_keywords(ignore=("filter_glob",)))
|
||||
|
||||
|
||||
class ExportBVH(bpy.types.Operator, ExportHelper):
|
||||
'''Save a BVH motion capture file from an armature'''
|
||||
bl_idname = "export_anim.bvh"
|
||||
bl_label = "Export BVH"
|
||||
|
||||
filename_ext = ".bvh"
|
||||
filter_glob = StringProperty(default="*.bvh", options={'HIDDEN'})
|
||||
|
||||
global_scale = FloatProperty(name="Scale", description="Scale the BVH by this value", min=0.0001, max=1000000.0, soft_min=0.001, soft_max=100.0, default=1.0)
|
||||
frame_start = IntProperty(name="Start Frame", description="Starting frame to export", default=0)
|
||||
frame_end = IntProperty(name="End Frame", description="End frame to export", default=0)
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
obj = context.object
|
||||
return obj and obj.type == 'ARMATURE'
|
||||
|
||||
def invoke(self, context, event):
|
||||
self.frame_start = context.scene.frame_start
|
||||
self.frame_end = context.scene.frame_end
|
||||
|
||||
return super().invoke(context, event)
|
||||
|
||||
def execute(self, context):
|
||||
if self.frame_start == 0 and self.frame_end == 0:
|
||||
self.frame_start = context.scene.frame_start
|
||||
self.frame_end = context.scene.frame_end
|
||||
|
||||
from . import export_bvh
|
||||
return export_bvh.save(self, context, **self.as_keywords(ignore=("check_existing", "filter_glob")))
|
||||
|
||||
|
||||
def menu_func_import(self, context):
|
||||
self.layout.operator(ImportBVH.bl_idname, text="Motion Capture (.bvh)")
|
||||
|
||||
|
||||
def menu_func_export(self, context):
|
||||
self.layout.operator(ExportBVH.bl_idname, text="Motion Capture (.bvh)")
|
||||
|
||||
|
||||
def register():
|
||||
bpy.types.INFO_MT_file_import.append(menu_func_import)
|
||||
bpy.types.INFO_MT_file_export.append(menu_func_export)
|
||||
|
||||
|
||||
def unregister():
|
||||
bpy.types.INFO_MT_file_import.remove(menu_func_import)
|
||||
bpy.types.INFO_MT_file_export.remove(menu_func_export)
|
||||
|
||||
if __name__ == "__main__":
|
||||
register()
|
@ -1,245 +0,0 @@
|
||||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
# <pep8 compliant>
|
||||
|
||||
# Script copyright (C) Campbell Barton
|
||||
# fixes from Andrea Rugliancich
|
||||
|
||||
import bpy
|
||||
|
||||
|
||||
def write_armature(context, filepath, frame_start, frame_end, global_scale=1.0):
|
||||
|
||||
from mathutils import Matrix, Vector, Euler
|
||||
from math import degrees
|
||||
|
||||
file = open(filepath, "w")
|
||||
|
||||
obj = context.object
|
||||
arm = obj.data
|
||||
|
||||
# Build a dictionary of children.
|
||||
# None for parentless
|
||||
children = {None: []}
|
||||
|
||||
# initialize with blank lists
|
||||
for bone in arm.bones:
|
||||
children[bone.name] = []
|
||||
|
||||
for bone in arm.bones:
|
||||
children[getattr(bone.parent, "name", None)].append(bone.name)
|
||||
|
||||
# sort the children
|
||||
for children_list in children.values():
|
||||
children_list.sort()
|
||||
|
||||
# bone name list in the order that the bones are written
|
||||
serialized_names = []
|
||||
|
||||
node_locations = {}
|
||||
|
||||
file.write("HIERARCHY\n")
|
||||
|
||||
def write_recursive_nodes(bone_name, indent):
|
||||
my_children = children[bone_name]
|
||||
|
||||
indent_str = "\t" * indent
|
||||
|
||||
bone = arm.bones[bone_name]
|
||||
loc = bone.head_local
|
||||
node_locations[bone_name] = loc
|
||||
|
||||
# make relative if we can
|
||||
if bone.parent:
|
||||
loc = loc - node_locations[bone.parent.name]
|
||||
|
||||
if indent:
|
||||
file.write("%sJOINT %s\n" % (indent_str, bone_name))
|
||||
else:
|
||||
file.write("%sROOT %s\n" % (indent_str, bone_name))
|
||||
|
||||
file.write("%s{\n" % indent_str)
|
||||
file.write("%s\tOFFSET %.6f %.6f %.6f\n" % (indent_str, loc.x * global_scale, loc.y * global_scale, loc.z * global_scale))
|
||||
if bone.use_connect and bone.parent:
|
||||
file.write("%s\tCHANNELS 3 Xrotation Yrotation Zrotation\n" % indent_str)
|
||||
else:
|
||||
file.write("%s\tCHANNELS 6 Xposition Yposition Zposition Xrotation Yrotation Zrotation\n" % indent_str)
|
||||
|
||||
if my_children:
|
||||
# store the location for the children
|
||||
# to het their relative offset
|
||||
|
||||
# Write children
|
||||
for child_bone in my_children:
|
||||
serialized_names.append(child_bone)
|
||||
write_recursive_nodes(child_bone, indent + 1)
|
||||
|
||||
else:
|
||||
# Write the bone end.
|
||||
file.write("%s\tEnd Site\n" % indent_str)
|
||||
file.write("%s\t{\n" % indent_str)
|
||||
loc = bone.tail_local - node_locations[bone_name]
|
||||
file.write("%s\t\tOFFSET %.6f %.6f %.6f\n" % (indent_str, loc.x * global_scale, loc.y * global_scale, loc.z * global_scale))
|
||||
file.write("%s\t}\n" % indent_str)
|
||||
|
||||
file.write("%s}\n" % indent_str)
|
||||
|
||||
if len(children[None]) == 1:
|
||||
key = children[None][0]
|
||||
serialized_names.append(key)
|
||||
indent = 0
|
||||
|
||||
write_recursive_nodes(key, indent)
|
||||
|
||||
else:
|
||||
# Write a dummy parent node
|
||||
file.write("ROOT %s\n" % key)
|
||||
file.write("{\n")
|
||||
file.write("\tOFFSET 0.0 0.0 0.0\n")
|
||||
file.write("\tCHANNELS 0\n") # Xposition Yposition Zposition Xrotation Yrotation Zrotation
|
||||
key = None
|
||||
indent = 1
|
||||
|
||||
write_recursive_nodes(key, indent)
|
||||
|
||||
file.write("}\n")
|
||||
|
||||
# redefine bones as sorted by serialized_names
|
||||
# so we can write motion
|
||||
|
||||
class decorated_bone(object):
|
||||
__slots__ = (\
|
||||
"name", # bone name, used as key in many places
|
||||
"parent", # decorated bone parent, set in a later loop
|
||||
"rest_bone", # blender armature bone
|
||||
"pose_bone", # blender pose bone
|
||||
"pose_mat", # blender pose matrix
|
||||
"rest_arm_mat", # blender rest matrix (armature space)
|
||||
"rest_local_mat", # blender rest batrix (local space)
|
||||
"pose_imat", # pose_mat inverted
|
||||
"rest_arm_imat", # rest_arm_mat inverted
|
||||
"rest_local_imat", # rest_local_mat inverted
|
||||
"prev_euler", # last used euler to preserve euler compability in between keyframes
|
||||
"connected", # is the bone connected to the parent bone?
|
||||
)
|
||||
|
||||
def __init__(self, bone_name):
|
||||
self.name = bone_name
|
||||
self.rest_bone = arm.bones[bone_name]
|
||||
self.pose_bone = obj.pose.bones[bone_name]
|
||||
|
||||
self.pose_mat = self.pose_bone.matrix
|
||||
|
||||
mat = self.rest_bone.matrix
|
||||
self.rest_arm_mat = self.rest_bone.matrix_local
|
||||
self.rest_local_mat = self.rest_bone.matrix
|
||||
|
||||
# inverted mats
|
||||
self.pose_imat = self.pose_mat.copy().invert()
|
||||
self.rest_arm_imat = self.rest_arm_mat.copy().invert()
|
||||
self.rest_local_imat = self.rest_local_mat.copy().invert()
|
||||
|
||||
self.parent = None
|
||||
self.prev_euler = Euler((0.0, 0.0, 0.0))
|
||||
self.connected = (self.rest_bone.use_connect and self.rest_bone.parent)
|
||||
|
||||
def update_posedata(self):
|
||||
self.pose_mat = self.pose_bone.matrix
|
||||
self.pose_imat = self.pose_mat.copy().invert()
|
||||
|
||||
def __repr__(self):
|
||||
if self.parent:
|
||||
return "[\"%s\" child on \"%s\"]\n" % (self.name, self.parent.name)
|
||||
else:
|
||||
return "[\"%s\" root bone]\n" % (self.name)
|
||||
|
||||
bones_decorated = [decorated_bone(bone_name) for bone_name in serialized_names]
|
||||
|
||||
# Assign parents
|
||||
bones_decorated_dict = {}
|
||||
for dbone in bones_decorated:
|
||||
bones_decorated_dict[dbone.name] = dbone
|
||||
|
||||
for dbone in bones_decorated:
|
||||
parent = dbone.rest_bone.parent
|
||||
if parent:
|
||||
dbone.parent = bones_decorated_dict[parent.name]
|
||||
del bones_decorated_dict
|
||||
# finish assigning parents
|
||||
|
||||
scene = bpy.context.scene
|
||||
|
||||
file.write("MOTION\n")
|
||||
file.write("Frames: %d\n" % (frame_end - frame_start + 1))
|
||||
file.write("Frame Time: %.6f\n" % (1.0 / (scene.render.fps / scene.render.fps_base)))
|
||||
|
||||
for frame in range(frame_start, frame_end + 1):
|
||||
scene.frame_set(frame)
|
||||
|
||||
for dbone in bones_decorated:
|
||||
dbone.update_posedata()
|
||||
|
||||
for dbone in bones_decorated:
|
||||
trans = Matrix.Translation(dbone.rest_bone.head_local)
|
||||
itrans = Matrix.Translation(-dbone.rest_bone.head_local)
|
||||
|
||||
if dbone.parent:
|
||||
mat_final = dbone.parent.rest_arm_mat * dbone.parent.pose_imat * dbone.pose_mat * dbone.rest_arm_imat
|
||||
mat_final = itrans * mat_final * trans
|
||||
loc = mat_final.translation_part() + (dbone.rest_bone.head_local - dbone.parent.rest_bone.head_local)
|
||||
else:
|
||||
mat_final = dbone.pose_mat * dbone.rest_arm_imat
|
||||
mat_final = itrans * mat_final * trans
|
||||
loc = mat_final.translation_part() + dbone.rest_bone.head
|
||||
|
||||
# keep eulers compatible, no jumping on interpolation.
|
||||
rot = mat_final.rotation_part().invert().to_euler('XYZ', dbone.prev_euler)
|
||||
|
||||
if not dbone.connected:
|
||||
file.write("%.6f %.6f %.6f " % (loc * global_scale)[:])
|
||||
|
||||
file.write("%.6f %.6f %.6f " % (-degrees(rot[0]), -degrees(rot[1]), -degrees(rot[2])))
|
||||
|
||||
dbone.prev_euler = rot
|
||||
|
||||
file.write("\n")
|
||||
|
||||
file.close()
|
||||
|
||||
print("BVH Exported: %s frames:%d\n" % (filepath, frame_end - frame_start + 1))
|
||||
|
||||
|
||||
def save(operator, context, filepath="",
|
||||
frame_start=-1,
|
||||
frame_end=-1,
|
||||
global_scale=1.0,
|
||||
):
|
||||
|
||||
write_armature(context, filepath,
|
||||
frame_start=frame_start,
|
||||
frame_end=frame_end,
|
||||
global_scale=global_scale,
|
||||
)
|
||||
|
||||
return {'FINISHED'}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
scene = bpy.context.scene
|
||||
_read(bpy.data.filepath.rstrip(".blend") + ".bvh", bpy.context.object, scene.frame_start, scene.frame_end, 1.0)
|
@ -1,550 +0,0 @@
|
||||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
# <pep8 compliant>
|
||||
|
||||
# Script copyright (C) Campbell Barton
|
||||
|
||||
import math
|
||||
from math import radians
|
||||
|
||||
import bpy
|
||||
import mathutils
|
||||
from mathutils import Vector, Euler, Matrix
|
||||
|
||||
|
||||
class bvh_node_class(object):
|
||||
__slots__ = (
|
||||
'name', # bvh joint name
|
||||
'parent', # bvh_node_class type or None for no parent
|
||||
'children', # a list of children of this type.
|
||||
'rest_head_world', # worldspace rest location for the head of this node
|
||||
'rest_head_local', # localspace rest location for the head of this node
|
||||
'rest_tail_world', # worldspace rest location for the tail of this node
|
||||
'rest_tail_local', # worldspace rest location for the tail of this node
|
||||
'channels', # list of 6 ints, -1 for an unused channel, otherwise an index for the BVH motion data lines, lock triple then rot triple
|
||||
'rot_order', # a triple of indicies as to the order rotation is applied. [0,1,2] is x/y/z - [None, None, None] if no rotation.
|
||||
'rot_order_str', # same as above but a string 'XYZ' format.
|
||||
'anim_data', # a list one tuple's one for each frame. (locx, locy, locz, rotx, roty, rotz), euler rotation ALWAYS stored xyz order, even when native used.
|
||||
'has_loc', # Conveinience function, bool, same as (channels[0]!=-1 or channels[1]!=-1 channels[2]!=-1)
|
||||
'has_rot', # Conveinience function, bool, same as (channels[3]!=-1 or channels[4]!=-1 channels[5]!=-1)
|
||||
'temp') # use this for whatever you want
|
||||
|
||||
_eul_order_lookup = {\
|
||||
(0, 1, 2): 'XYZ',
|
||||
(0, 2, 1): 'XZY',
|
||||
(1, 0, 2): 'YXZ',
|
||||
(1, 2, 0): 'YZX',
|
||||
(2, 0, 1): 'ZXY',
|
||||
(2, 1, 0): 'ZYX'}
|
||||
|
||||
def __init__(self, name, rest_head_world, rest_head_local, parent, channels, rot_order):
|
||||
self.name = name
|
||||
self.rest_head_world = rest_head_world
|
||||
self.rest_head_local = rest_head_local
|
||||
self.rest_tail_world = None
|
||||
self.rest_tail_local = None
|
||||
self.parent = parent
|
||||
self.channels = channels
|
||||
self.rot_order = tuple(rot_order)
|
||||
self.rot_order_str = __class__._eul_order_lookup[self.rot_order]
|
||||
|
||||
# convenience functions
|
||||
self.has_loc = channels[0] != -1 or channels[1] != -1 or channels[2] != -1
|
||||
self.has_rot = channels[3] != -1 or channels[4] != -1 or channels[5] != -1
|
||||
|
||||
self.children = []
|
||||
|
||||
# list of 6 length tuples: (lx,ly,lz, rx,ry,rz)
|
||||
# even if the channels arnt used they will just be zero
|
||||
#
|
||||
self.anim_data = [(0, 0, 0, 0, 0, 0)]
|
||||
|
||||
def __repr__(self):
|
||||
return 'BVH name:"%s", rest_loc:(%.3f,%.3f,%.3f), rest_tail:(%.3f,%.3f,%.3f)' %\
|
||||
(self.name,\
|
||||
self.rest_head_world.x, self.rest_head_world.y, self.rest_head_world.z,\
|
||||
self.rest_head_world.x, self.rest_head_world.y, self.rest_head_world.z)
|
||||
|
||||
|
||||
def read_bvh(context, file_path, rotate_mode='XYZ', global_scale=1.0):
|
||||
# File loading stuff
|
||||
# Open the file for importing
|
||||
file = open(file_path, 'rU')
|
||||
|
||||
# Seperate into a list of lists, each line a list of words.
|
||||
file_lines = file.readlines()
|
||||
# Non standard carrage returns?
|
||||
if len(file_lines) == 1:
|
||||
file_lines = file_lines[0].split('\r')
|
||||
|
||||
# Split by whitespace.
|
||||
file_lines = [ll for ll in [l.split() for l in file_lines] if ll]
|
||||
|
||||
# Create Hirachy as empties
|
||||
if file_lines[0][0].lower() == 'hierarchy':
|
||||
#print 'Importing the BVH Hierarchy for:', file_path
|
||||
pass
|
||||
else:
|
||||
raise 'ERROR: This is not a BVH file'
|
||||
|
||||
bvh_nodes = {None: None}
|
||||
bvh_nodes_serial = [None]
|
||||
|
||||
channelIndex = -1
|
||||
|
||||
lineIdx = 0 # An index for the file.
|
||||
while lineIdx < len(file_lines) - 1:
|
||||
#...
|
||||
if file_lines[lineIdx][0].lower() == 'root' or file_lines[lineIdx][0].lower() == 'joint':
|
||||
|
||||
# Join spaces into 1 word with underscores joining it.
|
||||
if len(file_lines[lineIdx]) > 2:
|
||||
file_lines[lineIdx][1] = '_'.join(file_lines[lineIdx][1:])
|
||||
file_lines[lineIdx] = file_lines[lineIdx][:2]
|
||||
|
||||
# MAY NEED TO SUPPORT MULTIPLE ROOT's HERE!!!, Still unsure weather multiple roots are possible.??
|
||||
|
||||
# Make sure the names are unique- Object names will match joint names exactly and both will be unique.
|
||||
name = file_lines[lineIdx][1]
|
||||
|
||||
#print '%snode: %s, parent: %s' % (len(bvh_nodes_serial) * ' ', name, bvh_nodes_serial[-1])
|
||||
|
||||
lineIdx += 2 # Increment to the next line (Offset)
|
||||
rest_head_local = Vector((float(file_lines[lineIdx][1]), float(file_lines[lineIdx][2]), float(file_lines[lineIdx][3]))) * global_scale
|
||||
lineIdx += 1 # Increment to the next line (Channels)
|
||||
|
||||
# newChannel[Xposition, Yposition, Zposition, Xrotation, Yrotation, Zrotation]
|
||||
# newChannel references indecies to the motiondata,
|
||||
# if not assigned then -1 refers to the last value that will be added on loading at a value of zero, this is appended
|
||||
# We'll add a zero value onto the end of the MotionDATA so this is always refers to a value.
|
||||
my_channel = [-1, -1, -1, -1, -1, -1]
|
||||
my_rot_order = [None, None, None]
|
||||
rot_count = 0
|
||||
for channel in file_lines[lineIdx][2:]:
|
||||
channel = channel.lower()
|
||||
channelIndex += 1 # So the index points to the right channel
|
||||
if channel == 'xposition':
|
||||
my_channel[0] = channelIndex
|
||||
elif channel == 'yposition':
|
||||
my_channel[1] = channelIndex
|
||||
elif channel == 'zposition':
|
||||
my_channel[2] = channelIndex
|
||||
|
||||
elif channel == 'xrotation':
|
||||
my_channel[3] = channelIndex
|
||||
my_rot_order[rot_count] = 0
|
||||
rot_count += 1
|
||||
elif channel == 'yrotation':
|
||||
my_channel[4] = channelIndex
|
||||
my_rot_order[rot_count] = 1
|
||||
rot_count += 1
|
||||
elif channel == 'zrotation':
|
||||
my_channel[5] = channelIndex
|
||||
my_rot_order[rot_count] = 2
|
||||
rot_count += 1
|
||||
|
||||
channels = file_lines[lineIdx][2:]
|
||||
|
||||
my_parent = bvh_nodes_serial[-1] # account for none
|
||||
|
||||
# Apply the parents offset accumulatively
|
||||
if my_parent is None:
|
||||
rest_head_world = Vector(rest_head_local)
|
||||
else:
|
||||
rest_head_world = my_parent.rest_head_world + rest_head_local
|
||||
|
||||
bvh_node = bvh_nodes[name] = bvh_node_class(name, rest_head_world, rest_head_local, my_parent, my_channel, my_rot_order)
|
||||
|
||||
# If we have another child then we can call ourselves a parent, else
|
||||
bvh_nodes_serial.append(bvh_node)
|
||||
|
||||
# Account for an end node
|
||||
if file_lines[lineIdx][0].lower() == 'end' and file_lines[lineIdx][1].lower() == 'site': # There is sometimes a name after 'End Site' but we will ignore it.
|
||||
lineIdx += 2 # Increment to the next line (Offset)
|
||||
rest_tail = Vector((float(file_lines[lineIdx][1]), float(file_lines[lineIdx][2]), float(file_lines[lineIdx][3]))) * global_scale
|
||||
|
||||
bvh_nodes_serial[-1].rest_tail_world = bvh_nodes_serial[-1].rest_head_world + rest_tail
|
||||
bvh_nodes_serial[-1].rest_tail_local = bvh_nodes_serial[-1].rest_head_local + rest_tail
|
||||
|
||||
# Just so we can remove the Parents in a uniform way- End has kids
|
||||
# so this is a placeholder
|
||||
bvh_nodes_serial.append(None)
|
||||
|
||||
if len(file_lines[lineIdx]) == 1 and file_lines[lineIdx][0] == '}': # == ['}']
|
||||
bvh_nodes_serial.pop() # Remove the last item
|
||||
|
||||
if len(file_lines[lineIdx]) == 1 and file_lines[lineIdx][0].lower() == 'motion':
|
||||
#print '\nImporting motion data'
|
||||
lineIdx += 3 # Set the cursor to the first frame
|
||||
break
|
||||
|
||||
lineIdx += 1
|
||||
|
||||
# Remove the None value used for easy parent reference
|
||||
del bvh_nodes[None]
|
||||
# Dont use anymore
|
||||
del bvh_nodes_serial
|
||||
|
||||
bvh_nodes_list = bvh_nodes.values()
|
||||
|
||||
while lineIdx < len(file_lines):
|
||||
line = file_lines[lineIdx]
|
||||
for bvh_node in bvh_nodes_list:
|
||||
#for bvh_node in bvh_nodes_serial:
|
||||
lx = ly = lz = rx = ry = rz = 0.0
|
||||
channels = bvh_node.channels
|
||||
anim_data = bvh_node.anim_data
|
||||
if channels[0] != -1:
|
||||
lx = global_scale * float(line[channels[0]])
|
||||
|
||||
if channels[1] != -1:
|
||||
ly = global_scale * float(line[channels[1]])
|
||||
|
||||
if channels[2] != -1:
|
||||
lz = global_scale * float(line[channels[2]])
|
||||
|
||||
if channels[3] != -1 or channels[4] != -1 or channels[5] != -1:
|
||||
|
||||
rx = radians(float(line[channels[3]]))
|
||||
ry = radians(float(line[channels[4]]))
|
||||
rz = radians(float(line[channels[5]]))
|
||||
|
||||
# Done importing motion data #
|
||||
anim_data.append((lx, ly, lz, rx, ry, rz))
|
||||
lineIdx += 1
|
||||
|
||||
# Assign children
|
||||
for bvh_node in bvh_nodes.values():
|
||||
bvh_node_parent = bvh_node.parent
|
||||
if bvh_node_parent:
|
||||
bvh_node_parent.children.append(bvh_node)
|
||||
|
||||
# Now set the tip of each bvh_node
|
||||
for bvh_node in bvh_nodes.values():
|
||||
|
||||
if not bvh_node.rest_tail_world:
|
||||
if len(bvh_node.children) == 0:
|
||||
# could just fail here, but rare BVH files have childless nodes
|
||||
bvh_node.rest_tail_world = Vector(bvh_node.rest_head_world)
|
||||
bvh_node.rest_tail_local = Vector(bvh_node.rest_head_local)
|
||||
elif len(bvh_node.children) == 1:
|
||||
bvh_node.rest_tail_world = Vector(bvh_node.children[0].rest_head_world)
|
||||
bvh_node.rest_tail_local = bvh_node.rest_head_local + bvh_node.children[0].rest_head_local
|
||||
else:
|
||||
# allow this, see above
|
||||
#if not bvh_node.children:
|
||||
# raise 'error, bvh node has no end and no children. bad file'
|
||||
|
||||
# Removed temp for now
|
||||
rest_tail_world = Vector((0.0, 0.0, 0.0))
|
||||
rest_tail_local = Vector((0.0, 0.0, 0.0))
|
||||
for bvh_node_child in bvh_node.children:
|
||||
rest_tail_world += bvh_node_child.rest_head_world
|
||||
rest_tail_local += bvh_node_child.rest_head_local
|
||||
|
||||
bvh_node.rest_tail_world = rest_tail_world * (1.0 / len(bvh_node.children))
|
||||
bvh_node.rest_tail_local = rest_tail_local * (1.0 / len(bvh_node.children))
|
||||
|
||||
# Make sure tail isnt the same location as the head.
|
||||
if (bvh_node.rest_tail_local - bvh_node.rest_head_local).length <= 0.001 * global_scale:
|
||||
bvh_node.rest_tail_local.y = bvh_node.rest_tail_local.y + global_scale / 10
|
||||
bvh_node.rest_tail_world.y = bvh_node.rest_tail_world.y + global_scale / 10
|
||||
|
||||
return bvh_nodes
|
||||
|
||||
|
||||
def bvh_node_dict2objects(context, bvh_name, bvh_nodes, rotate_mode='NATIVE', frame_start=1, IMPORT_LOOP=False):
|
||||
|
||||
if frame_start < 1:
|
||||
frame_start = 1
|
||||
|
||||
scene = context.scene
|
||||
for obj in scene.objects:
|
||||
obj.select = False
|
||||
|
||||
objects = []
|
||||
|
||||
def add_ob(name):
|
||||
obj = bpy.data.objects.new(name, None)
|
||||
scene.objects.link(obj)
|
||||
objects.append(obj)
|
||||
obj.select = True
|
||||
|
||||
# nicer drawing.
|
||||
obj.empty_draw_type = 'CUBE'
|
||||
obj.empty_draw_size = 0.1
|
||||
|
||||
return obj
|
||||
|
||||
# Add objects
|
||||
for name, bvh_node in bvh_nodes.items():
|
||||
bvh_node.temp = add_ob(name)
|
||||
bvh_node.temp.rotation_mode = bvh_node.rot_order_str[::-1]
|
||||
|
||||
# Parent the objects
|
||||
for bvh_node in bvh_nodes.values():
|
||||
for bvh_node_child in bvh_node.children:
|
||||
bvh_node_child.temp.parent = bvh_node.temp
|
||||
|
||||
# Offset
|
||||
for bvh_node in bvh_nodes.values():
|
||||
# Make relative to parents offset
|
||||
bvh_node.temp.location = bvh_node.rest_head_local
|
||||
|
||||
# Add tail objects
|
||||
for name, bvh_node in bvh_nodes.items():
|
||||
if not bvh_node.children:
|
||||
ob_end = add_ob(name + '_end')
|
||||
ob_end.parent = bvh_node.temp
|
||||
ob_end.location = bvh_node.rest_tail_world - bvh_node.rest_head_world
|
||||
|
||||
for name, bvh_node in bvh_nodes.items():
|
||||
obj = bvh_node.temp
|
||||
|
||||
for frame_current in range(len(bvh_node.anim_data)):
|
||||
|
||||
lx, ly, lz, rx, ry, rz = bvh_node.anim_data[frame_current]
|
||||
|
||||
if bvh_node.has_loc:
|
||||
obj.delta_location = Vector((lx, ly, lz)) - bvh_node.rest_head_world
|
||||
obj.keyframe_insert("delta_location", index=-1, frame=frame_start + frame_current)
|
||||
|
||||
if bvh_node.has_rot:
|
||||
obj.delta_rotation_euler = rx, ry, rz
|
||||
obj.keyframe_insert("delta_rotation_euler", index=-1, frame=frame_start + frame_current)
|
||||
|
||||
return objects
|
||||
|
||||
|
||||
def bvh_node_dict2armature(context, bvh_name, bvh_nodes, rotate_mode='XYZ', frame_start=1, IMPORT_LOOP=False):
|
||||
|
||||
if frame_start < 1:
|
||||
frame_start = 1
|
||||
|
||||
# Add the new armature,
|
||||
scene = context.scene
|
||||
for obj in scene.objects:
|
||||
obj.select = False
|
||||
|
||||
arm_data = bpy.data.armatures.new(bvh_name)
|
||||
arm_ob = bpy.data.objects.new(bvh_name, arm_data)
|
||||
|
||||
scene.objects.link(arm_ob)
|
||||
|
||||
arm_ob.select = True
|
||||
scene.objects.active = arm_ob
|
||||
|
||||
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
|
||||
bpy.ops.object.mode_set(mode='EDIT', toggle=False)
|
||||
|
||||
# Get the average bone length for zero length bones, we may not use this.
|
||||
average_bone_length = 0.0
|
||||
nonzero_count = 0
|
||||
for bvh_node in bvh_nodes.values():
|
||||
l = (bvh_node.rest_head_local - bvh_node.rest_tail_local).length
|
||||
if l:
|
||||
average_bone_length += l
|
||||
nonzero_count += 1
|
||||
|
||||
# Very rare cases all bones couldbe zero length???
|
||||
if not average_bone_length:
|
||||
average_bone_length = 0.1
|
||||
else:
|
||||
# Normal operation
|
||||
average_bone_length = average_bone_length / nonzero_count
|
||||
|
||||
# XXX, annoying, remove bone.
|
||||
while arm_data.edit_bones:
|
||||
arm_ob.edit_bones.remove(arm_data.edit_bones[-1])
|
||||
|
||||
ZERO_AREA_BONES = []
|
||||
for name, bvh_node in bvh_nodes.items():
|
||||
# New editbone
|
||||
bone = bvh_node.temp = arm_data.edit_bones.new(name)
|
||||
|
||||
bone.head = bvh_node.rest_head_world
|
||||
bone.tail = bvh_node.rest_tail_world
|
||||
|
||||
# ZERO AREA BONES.
|
||||
if (bone.head - bone.tail).length < 0.001:
|
||||
if bvh_node.parent:
|
||||
ofs = bvh_node.parent.rest_head_local - bvh_node.parent.rest_tail_local
|
||||
if ofs.length: # is our parent zero length also?? unlikely
|
||||
bone.tail = bone.tail + ofs
|
||||
else:
|
||||
bone.tail.y = bone.tail.y + average_bone_length
|
||||
else:
|
||||
bone.tail.y = bone.tail.y + average_bone_length
|
||||
|
||||
ZERO_AREA_BONES.append(bone.name)
|
||||
|
||||
for bvh_node in bvh_nodes.values():
|
||||
if bvh_node.parent:
|
||||
# bvh_node.temp is the Editbone
|
||||
|
||||
# Set the bone parent
|
||||
bvh_node.temp.parent = bvh_node.parent.temp
|
||||
|
||||
# Set the connection state
|
||||
if not bvh_node.has_loc and\
|
||||
bvh_node.parent and\
|
||||
bvh_node.parent.temp.name not in ZERO_AREA_BONES and\
|
||||
bvh_node.parent.rest_tail_local == bvh_node.rest_head_local:
|
||||
bvh_node.temp.use_connect = True
|
||||
|
||||
# Replace the editbone with the editbone name,
|
||||
# to avoid memory errors accessing the editbone outside editmode
|
||||
for bvh_node in bvh_nodes.values():
|
||||
bvh_node.temp = bvh_node.temp.name
|
||||
|
||||
# Now Apply the animation to the armature
|
||||
|
||||
# Get armature animation data
|
||||
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
|
||||
|
||||
pose = arm_ob.pose
|
||||
pose_bones = pose.bones
|
||||
|
||||
if rotate_mode == 'NATIVE':
|
||||
for bvh_node in bvh_nodes.values():
|
||||
bone_name = bvh_node.temp # may not be the same name as the bvh_node, could have been shortened.
|
||||
pose_bone = pose_bones[bone_name]
|
||||
pose_bone.rotation_mode = bvh_node.rot_order_str
|
||||
|
||||
elif rotate_mode != 'QUATERNION':
|
||||
for pose_bone in pose_bones:
|
||||
pose_bone.rotation_mode = rotate_mode
|
||||
else:
|
||||
# Quats default
|
||||
pass
|
||||
|
||||
context.scene.update()
|
||||
|
||||
arm_ob.animation_data_create()
|
||||
action = bpy.data.actions.new(name=bvh_name)
|
||||
arm_ob.animation_data.action = action
|
||||
|
||||
# Replace the bvh_node.temp (currently an editbone)
|
||||
# With a tuple (pose_bone, armature_bone, bone_rest_matrix, bone_rest_matrix_inv)
|
||||
for bvh_node in bvh_nodes.values():
|
||||
bone_name = bvh_node.temp # may not be the same name as the bvh_node, could have been shortened.
|
||||
pose_bone = pose_bones[bone_name]
|
||||
rest_bone = arm_data.bones[bone_name]
|
||||
bone_rest_matrix = rest_bone.matrix_local.rotation_part()
|
||||
|
||||
bone_rest_matrix_inv = Matrix(bone_rest_matrix)
|
||||
bone_rest_matrix_inv.invert()
|
||||
|
||||
bone_rest_matrix_inv.resize4x4()
|
||||
bone_rest_matrix.resize4x4()
|
||||
bvh_node.temp = (pose_bone, bone, bone_rest_matrix, bone_rest_matrix_inv)
|
||||
|
||||
# Make a dict for fast access without rebuilding a list all the time.
|
||||
|
||||
# KEYFRAME METHOD, SLOW, USE IPOS DIRECT
|
||||
# TODO: use f-point samples instead (Aligorith)
|
||||
if rotate_mode != 'QUATERNION':
|
||||
prev_euler = [Euler() for i in range(len(bvh_nodes))]
|
||||
|
||||
# Animate the data, the last used bvh_node will do since they all have the same number of frames
|
||||
for frame_current in range(len(bvh_node.anim_data) - 1): # skip the first frame (rest frame)
|
||||
# print frame_current
|
||||
|
||||
# if frame_current==40: # debugging
|
||||
# break
|
||||
|
||||
scene.frame_set(frame_start + frame_current)
|
||||
|
||||
# Dont neet to set the current frame
|
||||
for i, bvh_node in enumerate(bvh_nodes.values()):
|
||||
pose_bone, bone, bone_rest_matrix, bone_rest_matrix_inv = bvh_node.temp
|
||||
lx, ly, lz, rx, ry, rz = bvh_node.anim_data[frame_current + 1]
|
||||
|
||||
if bvh_node.has_rot:
|
||||
# apply rotation order and convert to XYZ
|
||||
# note that the rot_order_str is reversed.
|
||||
bone_rotation_matrix = Euler((rx, ry, rz), bvh_node.rot_order_str[::-1]).to_matrix().resize4x4()
|
||||
bone_rotation_matrix = bone_rest_matrix_inv * bone_rotation_matrix * bone_rest_matrix
|
||||
|
||||
if rotate_mode == 'QUATERNION':
|
||||
pose_bone.rotation_quaternion = bone_rotation_matrix.to_quat()
|
||||
else:
|
||||
euler = bone_rotation_matrix.to_euler(bvh_node.rot_order_str, prev_euler[i])
|
||||
pose_bone.rotation_euler = euler
|
||||
prev_euler[i] = euler
|
||||
|
||||
if bvh_node.has_loc:
|
||||
pose_bone.location = (bone_rest_matrix_inv * Matrix.Translation(Vector((lx, ly, lz)) - bvh_node.rest_head_local)).translation_part()
|
||||
|
||||
if bvh_node.has_loc:
|
||||
pose_bone.keyframe_insert("location")
|
||||
if bvh_node.has_rot:
|
||||
if rotate_mode == 'QUATERNION':
|
||||
pose_bone.keyframe_insert("rotation_quaternion")
|
||||
else:
|
||||
pose_bone.keyframe_insert("rotation_euler")
|
||||
|
||||
for cu in action.fcurves:
|
||||
if IMPORT_LOOP:
|
||||
pass # 2.5 doenst have cyclic now?
|
||||
|
||||
for bez in cu.keyframe_points:
|
||||
bez.interpolation = 'LINEAR'
|
||||
|
||||
return arm_ob
|
||||
|
||||
|
||||
def load(operator, context, filepath="", target='ARMATURE', rotate_mode='NATIVE', global_scale=1.0, use_cyclic=False, frame_start=1):
|
||||
import time
|
||||
t1 = time.time()
|
||||
print('\tparsing bvh %r...' % filepath, end="")
|
||||
|
||||
bvh_nodes = read_bvh(context, filepath,
|
||||
rotate_mode=rotate_mode,
|
||||
global_scale=global_scale)
|
||||
|
||||
print('%.4f' % (time.time() - t1))
|
||||
|
||||
frame_orig = context.scene.frame_current
|
||||
|
||||
t1 = time.time()
|
||||
print('\timporting to blender...', end="")
|
||||
|
||||
bvh_name = bpy.path.display_name_from_filepath(filepath)
|
||||
|
||||
if target == 'ARMATURE':
|
||||
bvh_node_dict2armature(context, bvh_name, bvh_nodes,
|
||||
rotate_mode=rotate_mode,
|
||||
frame_start=frame_start,
|
||||
IMPORT_LOOP=use_cyclic)
|
||||
|
||||
elif target == 'OBJECT':
|
||||
bvh_node_dict2objects(context, bvh_name, bvh_nodes,
|
||||
rotate_mode=rotate_mode,
|
||||
frame_start=frame_start,
|
||||
IMPORT_LOOP=use_cyclic)
|
||||
|
||||
else:
|
||||
raise Exception("invalid type")
|
||||
|
||||
print('Done in %.4f\n' % (time.time() - t1))
|
||||
|
||||
context.scene.frame_set(frame_orig)
|
||||
|
||||
return {'FINISHED'}
|
@ -1,100 +0,0 @@
|
||||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
# <pep8 compliant>
|
||||
|
||||
# To support reload properly, try to access a package var, if it's there, reload everything
|
||||
if "bpy" in locals():
|
||||
import imp
|
||||
if "export_ply" in locals():
|
||||
imp.reload(export_ply)
|
||||
if "import_ply" in locals():
|
||||
imp.reload(import_ply)
|
||||
|
||||
|
||||
import bpy
|
||||
from bpy.props import *
|
||||
from io_utils import ImportHelper, ExportHelper
|
||||
|
||||
|
||||
class ImportPLY(bpy.types.Operator, ImportHelper):
|
||||
'''Load a BVH motion capture file'''
|
||||
bl_idname = "import_mesh.ply"
|
||||
bl_label = "Import PLY"
|
||||
|
||||
filename_ext = ".ply"
|
||||
filter_glob = StringProperty(default="*.ply", options={'HIDDEN'})
|
||||
|
||||
def execute(self, context):
|
||||
from . import import_ply
|
||||
return import_ply.load(self, context, **self.as_keywords(ignore=("filter_glob",)))
|
||||
|
||||
|
||||
class ExportPLY(bpy.types.Operator, ExportHelper):
|
||||
'''Export a single object as a stanford PLY with normals, colours and texture coordinates.'''
|
||||
bl_idname = "export_mesh.ply"
|
||||
bl_label = "Export PLY"
|
||||
|
||||
filename_ext = ".ply"
|
||||
filter_glob = StringProperty(default="*.ply", options={'HIDDEN'})
|
||||
|
||||
use_modifiers = BoolProperty(name="Apply Modifiers", description="Apply Modifiers to the exported mesh", default=True)
|
||||
use_normals = BoolProperty(name="Normals", description="Export Normals for smooth and hard shaded faces", default=True)
|
||||
use_uv_coords = BoolProperty(name="UVs", description="Exort the active UV layer", default=True)
|
||||
use_colors = BoolProperty(name="Vertex Colors", description="Exort the active vertex color layer", default=True)
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
return context.active_object != None
|
||||
|
||||
def execute(self, context):
|
||||
filepath = self.filepath
|
||||
filepath = bpy.path.ensure_ext(filepath, self.filename_ext)
|
||||
from . import export_ply
|
||||
return export_ply.save(self, context, **self.as_keywords(ignore=("check_existing", "filter_glob")))
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
|
||||
row = layout.row()
|
||||
row.prop(self, "use_modifiers")
|
||||
row.prop(self, "use_normals")
|
||||
row = layout.row()
|
||||
row.prop(self, "use_uv_coords")
|
||||
row.prop(self, "use_colors")
|
||||
|
||||
|
||||
def menu_func_import(self, context):
|
||||
self.layout.operator(ImportPLY.bl_idname, text="Stanford (.ply)")
|
||||
|
||||
|
||||
def menu_func_export(self, context):
|
||||
self.layout.operator(ExportPLY.bl_idname, text="Stanford (.ply)")
|
||||
|
||||
|
||||
def register():
|
||||
bpy.types.INFO_MT_file_import.append(menu_func_import)
|
||||
bpy.types.INFO_MT_file_export.append(menu_func_export)
|
||||
|
||||
|
||||
def unregister():
|
||||
bpy.types.INFO_MT_file_import.remove(menu_func_import)
|
||||
bpy.types.INFO_MT_file_export.remove(menu_func_export)
|
||||
|
||||
if __name__ == "__main__":
|
||||
register()
|
@ -1,203 +0,0 @@
|
||||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
# <pep8 compliant>
|
||||
|
||||
# Copyright (C) 2004, 2005: Bruce Merry, bmerry@cs.uct.ac.za
|
||||
# Contributors: Bruce Merry, Campbell Barton
|
||||
|
||||
"""
|
||||
This script exports Stanford PLY files from Blender. It supports normals,
|
||||
colours, and texture coordinates per face or per vertex.
|
||||
Only one mesh can be exported at a time.
|
||||
"""
|
||||
|
||||
import bpy
|
||||
import os
|
||||
|
||||
|
||||
def save(operator, context, filepath="", use_modifiers=True, use_normals=True, use_uv_coords=True, use_colors=True):
|
||||
|
||||
def rvec3d(v):
|
||||
return round(v[0], 6), round(v[1], 6), round(v[2], 6)
|
||||
|
||||
def rvec2d(v):
|
||||
return round(v[0], 6), round(v[1], 6)
|
||||
|
||||
scene = context.scene
|
||||
obj = context.object
|
||||
|
||||
if not obj:
|
||||
raise Exception("Error, Select 1 active object")
|
||||
|
||||
file = open(filepath, 'w')
|
||||
|
||||
if scene.objects.active:
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
|
||||
if use_modifiers:
|
||||
mesh = obj.create_mesh(scene, True, 'PREVIEW')
|
||||
else:
|
||||
mesh = obj.data
|
||||
|
||||
if not mesh:
|
||||
raise Exception("Error, could not get mesh data from active object")
|
||||
|
||||
# mesh.transform(obj.matrix_world) # XXX
|
||||
|
||||
faceUV = (len(mesh.uv_textures) > 0)
|
||||
vertexUV = (len(mesh.sticky) > 0)
|
||||
vertexColors = len(mesh.vertex_colors) > 0
|
||||
|
||||
if (not faceUV) and (not vertexUV):
|
||||
use_uv_coords = False
|
||||
if not vertexColors:
|
||||
use_colors = False
|
||||
|
||||
if not use_uv_coords:
|
||||
faceUV = vertexUV = False
|
||||
if not use_colors:
|
||||
vertexColors = False
|
||||
|
||||
if faceUV:
|
||||
active_uv_layer = mesh.uv_textures.active
|
||||
if not active_uv_layer:
|
||||
use_uv_coords = False
|
||||
faceUV = None
|
||||
else:
|
||||
active_uv_layer = active_uv_layer.data
|
||||
|
||||
if vertexColors:
|
||||
active_col_layer = mesh.vertex_colors.active
|
||||
if not active_col_layer:
|
||||
use_colors = False
|
||||
vertexColors = None
|
||||
else:
|
||||
active_col_layer = active_col_layer.data
|
||||
|
||||
# incase
|
||||
color = uvcoord = uvcoord_key = normal = normal_key = None
|
||||
|
||||
mesh_verts = mesh.vertices # save a lookup
|
||||
ply_verts = [] # list of dictionaries
|
||||
# vdict = {} # (index, normal, uv) -> new index
|
||||
vdict = [{} for i in range(len(mesh_verts))]
|
||||
ply_faces = [[] for f in range(len(mesh.faces))]
|
||||
vert_count = 0
|
||||
for i, f in enumerate(mesh.faces):
|
||||
|
||||
smooth = f.use_smooth
|
||||
if not smooth:
|
||||
normal = tuple(f.normal)
|
||||
normal_key = rvec3d(normal)
|
||||
|
||||
if faceUV:
|
||||
uv = active_uv_layer[i]
|
||||
uv = uv.uv1, uv.uv2, uv.uv3, uv.uv4 # XXX - crufty :/
|
||||
if vertexColors:
|
||||
col = active_col_layer[i]
|
||||
col = col.color1[:], col.color2[:], col.color3[:], col.color4[:]
|
||||
|
||||
f_verts = f.vertices
|
||||
|
||||
pf = ply_faces[i]
|
||||
for j, vidx in enumerate(f_verts):
|
||||
v = mesh_verts[vidx]
|
||||
|
||||
if smooth:
|
||||
normal = tuple(v.normal)
|
||||
normal_key = rvec3d(normal)
|
||||
|
||||
if faceUV:
|
||||
uvcoord = uv[j][0], 1.0 - uv[j][1]
|
||||
uvcoord_key = rvec2d(uvcoord)
|
||||
elif vertexUV:
|
||||
uvcoord = v.uvco[0], 1.0 - v.uvco[1]
|
||||
uvcoord_key = rvec2d(uvcoord)
|
||||
|
||||
if vertexColors:
|
||||
color = col[j]
|
||||
color = int(color[0] * 255.0), int(color[1] * 255.0), int(color[2] * 255.0)
|
||||
|
||||
key = normal_key, uvcoord_key, color
|
||||
|
||||
vdict_local = vdict[vidx]
|
||||
pf_vidx = vdict_local.get(key) # Will be None initially
|
||||
|
||||
if pf_vidx is None: # same as vdict_local.has_key(key)
|
||||
pf_vidx = vdict_local[key] = vert_count
|
||||
ply_verts.append((vidx, normal, uvcoord, color))
|
||||
vert_count += 1
|
||||
|
||||
pf.append(pf_vidx)
|
||||
|
||||
file.write('ply\n')
|
||||
file.write('format ascii 1.0\n')
|
||||
file.write('comment Created by Blender %s - www.blender.org, source file: %r\n' % (bpy.app.version_string, os.path.basename(bpy.data.filepath)))
|
||||
|
||||
file.write('element vertex %d\n' % len(ply_verts))
|
||||
|
||||
file.write('property float x\n')
|
||||
file.write('property float y\n')
|
||||
file.write('property float z\n')
|
||||
|
||||
if use_normals:
|
||||
file.write('property float nx\n')
|
||||
file.write('property float ny\n')
|
||||
file.write('property float nz\n')
|
||||
if use_uv_coords:
|
||||
file.write('property float s\n')
|
||||
file.write('property float t\n')
|
||||
if use_colors:
|
||||
file.write('property uchar red\n')
|
||||
file.write('property uchar green\n')
|
||||
file.write('property uchar blue\n')
|
||||
|
||||
file.write('element face %d\n' % len(mesh.faces))
|
||||
file.write('property list uchar uint vertex_indices\n')
|
||||
file.write('end_header\n')
|
||||
|
||||
for i, v in enumerate(ply_verts):
|
||||
file.write('%.6f %.6f %.6f ' % mesh_verts[v[0]].co[:]) # co
|
||||
if use_normals:
|
||||
file.write('%.6f %.6f %.6f ' % v[1]) # no
|
||||
if use_uv_coords:
|
||||
file.write('%.6f %.6f ' % v[2]) # uv
|
||||
if use_colors:
|
||||
file.write('%u %u %u' % v[3]) # col
|
||||
file.write('\n')
|
||||
|
||||
for pf in ply_faces:
|
||||
if len(pf) == 3:
|
||||
file.write('3 %d %d %d\n' % tuple(pf))
|
||||
else:
|
||||
file.write('4 %d %d %d %d\n' % tuple(pf))
|
||||
|
||||
file.close()
|
||||
print("writing %r done" % filepath)
|
||||
|
||||
if use_modifiers:
|
||||
bpy.data.meshes.remove(mesh)
|
||||
|
||||
# XXX
|
||||
"""
|
||||
if is_editmode:
|
||||
Blender.Window.EditMode(1, '', 0)
|
||||
"""
|
||||
|
||||
return {'FINISHED'}
|
@ -1,339 +0,0 @@
|
||||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
# <pep8 compliant>
|
||||
|
||||
import re
|
||||
import struct
|
||||
|
||||
|
||||
class element_spec(object):
|
||||
__slots__ = ("name",
|
||||
"count",
|
||||
"properties",
|
||||
)
|
||||
|
||||
def __init__(self, name, count):
|
||||
self.name = name
|
||||
self.count = count
|
||||
self.properties = []
|
||||
|
||||
def load(self, format, stream):
|
||||
if format == 'ascii':
|
||||
stream = re.split('\s+', stream.readline())
|
||||
return [x.load(format, stream) for x in self.properties]
|
||||
|
||||
def index(self, name):
|
||||
for i, p in enumerate(self.properties):
|
||||
if p.name == name:
|
||||
return i
|
||||
return -1
|
||||
|
||||
|
||||
class property_spec(object):
|
||||
__slots__ = ("name",
|
||||
"list_type",
|
||||
"numeric_type",
|
||||
)
|
||||
|
||||
def __init__(self, name, list_type, numeric_type):
|
||||
self.name = name
|
||||
self.list_type = list_type
|
||||
self.numeric_type = numeric_type
|
||||
|
||||
def read_format(self, format, count, num_type, stream):
|
||||
if format == 'ascii':
|
||||
if num_type == 's':
|
||||
ans = []
|
||||
for i in range(count):
|
||||
s = stream[i]
|
||||
if len(s) < 2 or s[0] != '"' or s[-1] != '"':
|
||||
print('Invalid string', s)
|
||||
print('Note: ply_import.py does not handle whitespace in strings')
|
||||
return None
|
||||
ans.append(s[1:-1])
|
||||
stream[:count] = []
|
||||
return ans
|
||||
if num_type == 'f' or num_type == 'd':
|
||||
mapper = float
|
||||
else:
|
||||
mapper = int
|
||||
ans = [mapper(x) for x in stream[:count]]
|
||||
stream[:count] = []
|
||||
return ans
|
||||
else:
|
||||
if num_type == 's':
|
||||
ans = []
|
||||
for i in range(count):
|
||||
fmt = format + 'i'
|
||||
data = stream.read(struct.calcsize(fmt))
|
||||
length = struct.unpack(fmt, data)[0]
|
||||
fmt = '%s%is' % (format, length)
|
||||
data = stream.read(struct.calcsize(fmt))
|
||||
s = struct.unpack(fmt, data)[0]
|
||||
ans.append(s[:-1]) # strip the NULL
|
||||
return ans
|
||||
else:
|
||||
fmt = '%s%i%s' % (format, count, num_type)
|
||||
data = stream.read(struct.calcsize(fmt))
|
||||
return struct.unpack(fmt, data)
|
||||
|
||||
def load(self, format, stream):
|
||||
if self.list_type is not None:
|
||||
count = int(self.read_format(format, 1, self.list_type, stream)[0])
|
||||
return self.read_format(format, count, self.numeric_type, stream)
|
||||
else:
|
||||
return self.read_format(format, 1, self.numeric_type, stream)[0]
|
||||
|
||||
|
||||
class object_spec(object):
|
||||
__slots__ = ("specs",
|
||||
)
|
||||
'A list of element_specs'
|
||||
def __init__(self):
|
||||
self.specs = []
|
||||
|
||||
def load(self, format, stream):
|
||||
return dict([(i.name, [i.load(format, stream) for j in range(i.count)]) for i in self.specs])
|
||||
|
||||
'''
|
||||
# Longhand for above LC
|
||||
answer = {}
|
||||
for i in self.specs:
|
||||
answer[i.name] = []
|
||||
for j in range(i.count):
|
||||
if not j % 100 and meshtools.show_progress:
|
||||
Blender.Window.DrawProgressBar(float(j) / i.count, 'Loading ' + i.name)
|
||||
answer[i.name].append(i.load(format, stream))
|
||||
return answer
|
||||
'''
|
||||
|
||||
|
||||
def read(filepath):
|
||||
format = ''
|
||||
version = '1.0'
|
||||
format_specs = {'binary_little_endian': '<',
|
||||
'binary_big_endian': '>',
|
||||
'ascii': 'ascii'}
|
||||
type_specs = {'char': 'b',
|
||||
'uchar': 'B',
|
||||
'int8': 'b',
|
||||
'uint8': 'B',
|
||||
'int16': 'h',
|
||||
'uint16': 'H',
|
||||
'ushort': 'H',
|
||||
'int': 'i',
|
||||
'int32': 'i',
|
||||
'uint': 'I',
|
||||
'uint32': 'I',
|
||||
'float': 'f',
|
||||
'float32': 'f',
|
||||
'float64': 'd',
|
||||
'double': 'd',
|
||||
'string': 's'}
|
||||
obj_spec = object_spec()
|
||||
|
||||
try:
|
||||
file = open(filepath, 'rU') # Only for parsing the header, not binary data
|
||||
signature = file.readline()
|
||||
|
||||
if not signature.startswith('ply'):
|
||||
print('Signature line was invalid')
|
||||
return None
|
||||
|
||||
while 1:
|
||||
tokens = re.split(r'[ \n]+', file.readline())
|
||||
|
||||
if len(tokens) == 0:
|
||||
continue
|
||||
if tokens[0] == 'end_header':
|
||||
break
|
||||
elif tokens[0] == 'comment' or tokens[0] == 'obj_info':
|
||||
continue
|
||||
elif tokens[0] == 'format':
|
||||
if len(tokens) < 3:
|
||||
print('Invalid format line')
|
||||
return None
|
||||
if tokens[1] not in format_specs: # .keys(): # keys is implicit
|
||||
print('Unknown format', tokens[1])
|
||||
return None
|
||||
if tokens[2] != version:
|
||||
print('Unknown version', tokens[2])
|
||||
return None
|
||||
format = tokens[1]
|
||||
elif tokens[0] == 'element':
|
||||
if len(tokens) < 3:
|
||||
print('Invalid element line')
|
||||
return None
|
||||
obj_spec.specs.append(element_spec(tokens[1], int(tokens[2])))
|
||||
elif tokens[0] == 'property':
|
||||
if not len(obj_spec.specs):
|
||||
print('Property without element')
|
||||
return None
|
||||
if tokens[1] == 'list':
|
||||
obj_spec.specs[-1].properties.append(property_spec(tokens[4], type_specs[tokens[2]], type_specs[tokens[3]]))
|
||||
else:
|
||||
obj_spec.specs[-1].properties.append(property_spec(tokens[2], None, type_specs[tokens[1]]))
|
||||
|
||||
if format != 'ascii':
|
||||
file.close() # was ascii, now binary
|
||||
file = open(filepath, 'rb')
|
||||
|
||||
# skip the header...
|
||||
while not file.readline().startswith('end_header'):
|
||||
pass
|
||||
|
||||
obj = obj_spec.load(format_specs[format], file)
|
||||
|
||||
except IOError:
|
||||
try:
|
||||
file.close()
|
||||
except:
|
||||
pass
|
||||
|
||||
return None
|
||||
try:
|
||||
file.close()
|
||||
except:
|
||||
pass
|
||||
|
||||
return obj_spec, obj
|
||||
|
||||
|
||||
import bpy
|
||||
|
||||
|
||||
def load_ply(filepath):
|
||||
import time
|
||||
from io_utils import load_image, unpack_list, unpack_face_list
|
||||
|
||||
t = time.time()
|
||||
obj_spec, obj = read(filepath)
|
||||
if obj is None:
|
||||
print('Invalid file')
|
||||
return
|
||||
|
||||
uvindices = colindices = None
|
||||
# noindices = None # Ignore normals
|
||||
|
||||
for el in obj_spec.specs:
|
||||
if el.name == 'vertex':
|
||||
vindices = vindices_x, vindices_y, vindices_z = (el.index('x'), el.index('y'), el.index('z'))
|
||||
# noindices = (el.index('nx'), el.index('ny'), el.index('nz'))
|
||||
# if -1 in noindices: noindices = None
|
||||
uvindices = (el.index('s'), el.index('t'))
|
||||
if -1 in uvindices:
|
||||
uvindices = None
|
||||
colindices = (el.index('red'), el.index('green'), el.index('blue'))
|
||||
if -1 in colindices:
|
||||
colindices = None
|
||||
elif el.name == 'face':
|
||||
findex = el.index('vertex_indices')
|
||||
|
||||
mesh_faces = []
|
||||
mesh_uvs = []
|
||||
mesh_colors = []
|
||||
|
||||
def add_face(vertices, indices, uvindices, colindices):
|
||||
mesh_faces.append(indices)
|
||||
if uvindices:
|
||||
mesh_uvs.append([(vertices[index][uvindices[0]], 1.0 - vertices[index][uvindices[1]]) for index in indices])
|
||||
if colindices:
|
||||
mesh_colors.append([(vertices[index][colindices[0]], vertices[index][colindices[1]], vertices[index][colindices[2]]) for index in indices])
|
||||
|
||||
if uvindices or colindices:
|
||||
# If we have Cols or UVs then we need to check the face order.
|
||||
add_face_simple = add_face
|
||||
|
||||
# EVIL EEKADOODLE - face order annoyance.
|
||||
def add_face(vertices, indices, uvindices, colindices):
|
||||
if len(indices) == 4:
|
||||
if indices[2] == 0 or indices[3] == 0:
|
||||
indices = indices[2], indices[3], indices[0], indices[1]
|
||||
elif len(indices) == 3:
|
||||
if indices[2] == 0:
|
||||
indices = indices[1], indices[2], indices[0]
|
||||
|
||||
add_face_simple(vertices, indices, uvindices, colindices)
|
||||
|
||||
verts = obj['vertex']
|
||||
|
||||
if 'face' in obj:
|
||||
for f in obj['face']:
|
||||
ind = f[findex]
|
||||
len_ind = len(ind)
|
||||
if len_ind <= 4:
|
||||
add_face(verts, ind, uvindices, colindices)
|
||||
else:
|
||||
# Fan fill the face
|
||||
for j in range(len_ind - 2):
|
||||
add_face(verts, (ind[0], ind[j + 1], ind[j + 2]), uvindices, colindices)
|
||||
|
||||
ply_name = bpy.path.display_name_from_filepath(filepath)
|
||||
|
||||
mesh = bpy.data.meshes.new(name=ply_name)
|
||||
|
||||
mesh.vertices.add(len(obj['vertex']))
|
||||
|
||||
mesh.vertices.foreach_set("co", [a for v in obj['vertex'] for a in (v[vindices_x], v[vindices_y], v[vindices_z])])
|
||||
|
||||
if mesh_faces:
|
||||
mesh.faces.add(len(mesh_faces))
|
||||
mesh.faces.foreach_set("vertices_raw", unpack_face_list(mesh_faces))
|
||||
|
||||
if uvindices or colindices:
|
||||
if uvindices:
|
||||
uvlay = mesh.uv_textures.new()
|
||||
if colindices:
|
||||
vcol_lay = mesh.vertex_colors.new()
|
||||
|
||||
if uvindices:
|
||||
for i, f in enumerate(uvlay.data):
|
||||
ply_uv = mesh_uvs[i]
|
||||
for j, uv in enumerate(f.uv):
|
||||
uv[:] = ply_uv[j]
|
||||
|
||||
if colindices:
|
||||
faces = obj['face']
|
||||
for i, f in enumerate(vcol_lay.data):
|
||||
# XXX, colors dont come in right, needs further investigation.
|
||||
ply_col = mesh_colors[i]
|
||||
if len(faces[i]) == 4:
|
||||
f_col = f.color1, f.color2, f.color3, f.color4
|
||||
else:
|
||||
f_col = f.color1, f.color2, f.color3
|
||||
|
||||
for j, col in enumerate(f_col):
|
||||
col.r, col.g, col.b = ply_col[j]
|
||||
|
||||
mesh.update()
|
||||
|
||||
scn = bpy.context.scene
|
||||
#scn.objects.selected = [] # XXX25
|
||||
|
||||
obj = bpy.data.objects.new(ply_name, mesh)
|
||||
scn.objects.link(obj)
|
||||
scn.objects.active = obj
|
||||
obj.select = True
|
||||
|
||||
print('\nSuccessfully imported %r in %.3f sec' % (filepath, time.time() - t))
|
||||
|
||||
|
||||
def load(operator, context, filepath=""):
|
||||
load_ply(filepath)
|
||||
return {'FINISHED'}
|
@ -1,88 +0,0 @@
|
||||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
# <pep8 compliant>
|
||||
|
||||
# To support reload properly, try to access a package var, if it's there, reload everything
|
||||
if "bpy" in locals():
|
||||
import imp
|
||||
if "import_3ds" in locals():
|
||||
imp.reload(import_3ds)
|
||||
if "export_3ds" in locals():
|
||||
imp.reload(export_3ds)
|
||||
|
||||
|
||||
import bpy
|
||||
from bpy.props import *
|
||||
from io_utils import ImportHelper, ExportHelper
|
||||
|
||||
|
||||
class Import3DS(bpy.types.Operator, ImportHelper):
|
||||
'''Import from 3DS file format (.3ds)'''
|
||||
bl_idname = "import_scene.autodesk_3ds"
|
||||
bl_label = 'Import 3DS'
|
||||
|
||||
filename_ext = ".3ds"
|
||||
filter_glob = StringProperty(default="*.3ds", options={'HIDDEN'})
|
||||
|
||||
constrain_size = FloatProperty(name="Size Constraint", description="Scale the model by 10 until it reacehs the size constraint. Zero Disables.", min=0.0, max=1000.0, soft_min=0.0, soft_max=1000.0, default=10.0)
|
||||
use_image_search = BoolProperty(name="Image Search", description="Search subdirectories for any assosiated images (Warning, may be slow)", default=True)
|
||||
use_apply_transform = BoolProperty(name="Apply Transform", description="Workaround for object transformations importing incorrectly", default=True)
|
||||
|
||||
def execute(self, context):
|
||||
from . import import_3ds
|
||||
return import_3ds.load(self, context, **self.as_keywords(ignore=("filter_glob",)))
|
||||
|
||||
|
||||
class Export3DS(bpy.types.Operator, ExportHelper):
|
||||
'''Export to 3DS file format (.3ds)'''
|
||||
bl_idname = "export_scene.autodesk_3ds"
|
||||
bl_label = 'Export 3DS'
|
||||
|
||||
filename_ext = ".3ds"
|
||||
filter_glob = StringProperty(default="*.3ds", options={'HIDDEN'})
|
||||
|
||||
def execute(self, context):
|
||||
from . import export_3ds
|
||||
return export_3ds.save(self, context, **self.as_keywords(ignore=("check_existing", "filter_glob")))
|
||||
|
||||
|
||||
# Add to a menu
|
||||
def menu_func_export(self, context):
|
||||
self.layout.operator(Export3DS.bl_idname, text="3D Studio (.3ds)")
|
||||
|
||||
|
||||
def menu_func_import(self, context):
|
||||
self.layout.operator(Import3DS.bl_idname, text="3D Studio (.3ds)")
|
||||
|
||||
|
||||
def register():
|
||||
bpy.types.INFO_MT_file_import.append(menu_func_import)
|
||||
bpy.types.INFO_MT_file_export.append(menu_func_export)
|
||||
|
||||
|
||||
def unregister():
|
||||
bpy.types.INFO_MT_file_import.remove(menu_func_import)
|
||||
bpy.types.INFO_MT_file_export.remove(menu_func_export)
|
||||
|
||||
# NOTES:
|
||||
# why add 1 extra vertex? and remove it when done? - "Answer - eekadoodle - would need to re-order UV's without this since face order isnt always what we give blender, BMesh will solve :D"
|
||||
# disabled scaling to size, this requires exposing bb (easy) and understanding how it works (needs some time)
|
||||
|
||||
if __name__ == "__main__":
|
||||
register()
|
File diff suppressed because it is too large
Load Diff
@ -1,894 +0,0 @@
|
||||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
# <pep8 compliant>
|
||||
|
||||
# Script copyright (C) Bob Holcomb
|
||||
# Contributors: Bob Holcomb, Richard L?rk?ng, Damien McGinnes, Campbell Barton, Mario Lapin, Dominique Lorre
|
||||
|
||||
import os
|
||||
import time
|
||||
import struct
|
||||
|
||||
from io_utils import load_image
|
||||
|
||||
import bpy
|
||||
import mathutils
|
||||
|
||||
BOUNDS_3DS = []
|
||||
|
||||
|
||||
######################################################
|
||||
# Data Structures
|
||||
######################################################
|
||||
|
||||
#Some of the chunks that we will see
|
||||
#----- Primary Chunk, at the beginning of each file
|
||||
PRIMARY = int('0x4D4D',16)
|
||||
|
||||
#------ Main Chunks
|
||||
OBJECTINFO = 0x3D3D #This gives the version of the mesh and is found right before the material and object information
|
||||
VERSION = 0x0002 #This gives the version of the .3ds file
|
||||
EDITKEYFRAME= 0xB000 #This is the header for all of the key frame info
|
||||
|
||||
#------ sub defines of OBJECTINFO
|
||||
MATERIAL = 45055 #0xAFFF // This stored the texture info
|
||||
OBJECT = 16384 #0x4000 // This stores the faces, vertices, etc...
|
||||
|
||||
#>------ sub defines of MATERIAL
|
||||
#------ sub defines of MATERIAL_BLOCK
|
||||
MAT_NAME = 0xA000 # This holds the material name
|
||||
MAT_AMBIENT = 0xA010 # Ambient color of the object/material
|
||||
MAT_DIFFUSE = 0xA020 # This holds the color of the object/material
|
||||
MAT_SPECULAR = 0xA030 # SPecular color of the object/material
|
||||
MAT_SHINESS = 0xA040 # ??
|
||||
MAT_TRANSPARENCY= 0xA050 # Transparency value of material
|
||||
MAT_SELF_ILLUM = 0xA080 # Self Illumination value of material
|
||||
MAT_WIRE = 0xA085 # Only render's wireframe
|
||||
|
||||
MAT_TEXTURE_MAP = 0xA200 # This is a header for a new texture map
|
||||
MAT_SPECULAR_MAP= 0xA204 # This is a header for a new specular map
|
||||
MAT_OPACITY_MAP = 0xA210 # This is a header for a new opacity map
|
||||
MAT_REFLECTION_MAP= 0xA220 # This is a header for a new reflection map
|
||||
MAT_BUMP_MAP = 0xA230 # This is a header for a new bump map
|
||||
MAT_MAP_FILEPATH = 0xA300 # This holds the file name of the texture
|
||||
|
||||
MAT_FLOAT_COLOR = 0x0010 #color defined as 3 floats
|
||||
MAT_24BIT_COLOR = 0x0011 #color defined as 3 bytes
|
||||
|
||||
#>------ sub defines of OBJECT
|
||||
OBJECT_MESH = 0x4100 # This lets us know that we are reading a new object
|
||||
OBJECT_LAMP = 0x4600 # This lets un know we are reading a light object
|
||||
OBJECT_LAMP_SPOT = 0x4610 # The light is a spotloght.
|
||||
OBJECT_LAMP_OFF = 0x4620 # The light off.
|
||||
OBJECT_LAMP_ATTENUATE = 0x4625
|
||||
OBJECT_LAMP_RAYSHADE = 0x4627
|
||||
OBJECT_LAMP_SHADOWED = 0x4630
|
||||
OBJECT_LAMP_LOCAL_SHADOW = 0x4640
|
||||
OBJECT_LAMP_LOCAL_SHADOW2 = 0x4641
|
||||
OBJECT_LAMP_SEE_CONE = 0x4650
|
||||
OBJECT_LAMP_SPOT_RECTANGULAR = 0x4651
|
||||
OBJECT_LAMP_SPOT_OVERSHOOT = 0x4652
|
||||
OBJECT_LAMP_SPOT_PROJECTOR = 0x4653
|
||||
OBJECT_LAMP_EXCLUDE = 0x4654
|
||||
OBJECT_LAMP_RANGE = 0x4655
|
||||
OBJECT_LAMP_ROLL = 0x4656
|
||||
OBJECT_LAMP_SPOT_ASPECT = 0x4657
|
||||
OBJECT_LAMP_RAY_BIAS = 0x4658
|
||||
OBJECT_LAMP_INNER_RANGE = 0x4659
|
||||
OBJECT_LAMP_OUTER_RANGE = 0x465A
|
||||
OBJECT_LAMP_MULTIPLIER = 0x465B
|
||||
OBJECT_LAMP_AMBIENT_LIGHT = 0x4680
|
||||
|
||||
|
||||
|
||||
OBJECT_CAMERA= 0x4700 # This lets un know we are reading a camera object
|
||||
|
||||
#>------ sub defines of CAMERA
|
||||
OBJECT_CAM_RANGES= 0x4720 # The camera range values
|
||||
|
||||
#>------ sub defines of OBJECT_MESH
|
||||
OBJECT_VERTICES = 0x4110 # The objects vertices
|
||||
OBJECT_FACES = 0x4120 # The objects faces
|
||||
OBJECT_MATERIAL = 0x4130 # This is found if the object has a material, either texture map or color
|
||||
OBJECT_UV = 0x4140 # The UV texture coordinates
|
||||
OBJECT_TRANS_MATRIX = 0x4160 # The Object Matrix
|
||||
|
||||
#>------ sub defines of EDITKEYFRAME
|
||||
# ED_KEY_AMBIENT_NODE = 0xB001
|
||||
ED_KEY_OBJECT_NODE = 0xB002
|
||||
# ED_KEY_CAMERA_NODE = 0xB003
|
||||
# ED_KEY_TARGET_NODE = 0xB004
|
||||
# ED_KEY_LIGHT_NODE = 0xB005
|
||||
# ED_KEY_L_TARGET_NODE = 0xB006
|
||||
# ED_KEY_SPOTLIGHT_NODE = 0xB007
|
||||
#>------ sub defines of ED_KEY_OBJECT_NODE
|
||||
# EK_OB_KEYFRAME_SEG = 0xB008
|
||||
# EK_OB_KEYFRAME_CURTIME = 0xB009
|
||||
# EK_OB_KEYFRAME_HEADER = 0xB00A
|
||||
EK_OB_NODE_HEADER = 0xB010
|
||||
EK_OB_INSTANCE_NAME = 0xB011
|
||||
# EK_OB_PRESCALE = 0xB012
|
||||
# EK_OB_PIVOT = 0xB013
|
||||
# EK_OB_BOUNDBOX = 0xB014
|
||||
# EK_OB_MORPH_SMOOTH = 0xB015
|
||||
EK_OB_POSITION_TRACK = 0xB020
|
||||
EK_OB_ROTATION_TRACK = 0xB021
|
||||
EK_OB_SCALE_TRACK = 0xB022
|
||||
# EK_OB_CAMERA_FOV_TRACK = 0xB023
|
||||
# EK_OB_CAMERA_ROLL_TRACK = 0xB024
|
||||
# EK_OB_COLOR_TRACK = 0xB025
|
||||
# EK_OB_MORPH_TRACK = 0xB026
|
||||
# EK_OB_HOTSPOT_TRACK = 0xB027
|
||||
# EK_OB_FALLOF_TRACK = 0xB028
|
||||
# EK_OB_HIDE_TRACK = 0xB029
|
||||
# EK_OB_NODE_ID = 0xB030
|
||||
|
||||
ROOT_OBJECT = 0xFFFF
|
||||
|
||||
global scn
|
||||
scn = None
|
||||
global object_dictionary # dictionary for object hierarchy
|
||||
object_dictionary = {}
|
||||
|
||||
|
||||
#the chunk class
|
||||
class chunk:
|
||||
ID = 0
|
||||
length = 0
|
||||
bytes_read = 0
|
||||
|
||||
#we don't read in the bytes_read, we compute that
|
||||
binary_format='<HI'
|
||||
|
||||
def __init__(self):
|
||||
self.ID = 0
|
||||
self.length = 0
|
||||
self.bytes_read = 0
|
||||
|
||||
def dump(self):
|
||||
print('ID: ', self.ID)
|
||||
print('ID in hex: ', hex(self.ID))
|
||||
print('length: ', self.length)
|
||||
print('bytes_read: ', self.bytes_read)
|
||||
|
||||
def read_chunk(file, chunk):
|
||||
temp_data = file.read(struct.calcsize(chunk.binary_format))
|
||||
data = struct.unpack(chunk.binary_format, temp_data)
|
||||
chunk.ID = data[0]
|
||||
chunk.length = data[1]
|
||||
#update the bytes read function
|
||||
chunk.bytes_read = 6
|
||||
|
||||
#if debugging
|
||||
#chunk.dump()
|
||||
|
||||
def read_string(file):
|
||||
#read in the characters till we get a null character
|
||||
s = b''
|
||||
while True:
|
||||
c = struct.unpack('<c', file.read(1))[0]
|
||||
if c == b'\x00':
|
||||
break
|
||||
s += c
|
||||
#print 'string: ',s
|
||||
|
||||
#remove the null character from the string
|
||||
# print("read string", s)
|
||||
return str(s, "utf-8", "replace"), len(s) + 1
|
||||
|
||||
######################################################
|
||||
# IMPORT
|
||||
######################################################
|
||||
def process_next_object_chunk(file, previous_chunk):
|
||||
new_chunk = chunk()
|
||||
temp_chunk = chunk()
|
||||
|
||||
while (previous_chunk.bytes_read < previous_chunk.length):
|
||||
#read the next chunk
|
||||
read_chunk(file, new_chunk)
|
||||
|
||||
def skip_to_end(file, skip_chunk):
|
||||
buffer_size = skip_chunk.length - skip_chunk.bytes_read
|
||||
binary_format='%ic' % buffer_size
|
||||
temp_data = file.read(struct.calcsize(binary_format))
|
||||
skip_chunk.bytes_read += buffer_size
|
||||
|
||||
|
||||
def add_texture_to_material(image, texture, material, mapto):
|
||||
#print('assigning %s to %s' % (texture, material))
|
||||
|
||||
if mapto not in ("COLOR", "SPECULARITY", "ALPHA", "NORMAL"):
|
||||
print('/tError: Cannot map to "%s"\n\tassuming diffuse color. modify material "%s" later.' % (mapto, material.name))
|
||||
mapto = "COLOR"
|
||||
|
||||
if image:
|
||||
texture.image = image
|
||||
|
||||
mtex = material.texture_slots.add()
|
||||
mtex.texture = texture
|
||||
mtex.texture_coords = 'UV'
|
||||
mtex.use_map_color_diffuse = False
|
||||
|
||||
if mapto == 'COLOR':
|
||||
mtex.use_map_color_diffuse = True
|
||||
elif mapto == 'SPECULARITY':
|
||||
mtex.use_map_specular = True
|
||||
elif mapto == 'ALPHA':
|
||||
mtex.use_map_alpha = True
|
||||
elif mapto == 'NORMAL':
|
||||
mtex.use_map_normal = True
|
||||
|
||||
|
||||
def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH):
|
||||
#print previous_chunk.bytes_read, 'BYTES READ'
|
||||
contextObName = None
|
||||
contextLamp = [None, None] # object, Data
|
||||
contextMaterial = None
|
||||
contextMatrix_rot = None # Blender.mathutils.Matrix(); contextMatrix.identity()
|
||||
#contextMatrix_tx = None # Blender.mathutils.Matrix(); contextMatrix.identity()
|
||||
contextMesh_vertls = None # flat array: (verts * 3)
|
||||
contextMesh_facels = None
|
||||
contextMeshMaterials = {} # matname:[face_idxs]
|
||||
contextMeshUV = None # flat array (verts * 2)
|
||||
|
||||
TEXTURE_DICT = {}
|
||||
MATDICT = {}
|
||||
# TEXMODE = Mesh.FaceModes['TEX']
|
||||
|
||||
# Localspace variable names, faster.
|
||||
STRUCT_SIZE_1CHAR = struct.calcsize('c')
|
||||
STRUCT_SIZE_2FLOAT = struct.calcsize('2f')
|
||||
STRUCT_SIZE_3FLOAT = struct.calcsize('3f')
|
||||
STRUCT_SIZE_4FLOAT = struct.calcsize('4f')
|
||||
STRUCT_SIZE_UNSIGNED_SHORT = struct.calcsize('H')
|
||||
STRUCT_SIZE_4UNSIGNED_SHORT = struct.calcsize('4H')
|
||||
STRUCT_SIZE_4x3MAT = struct.calcsize('ffffffffffff')
|
||||
_STRUCT_SIZE_4x3MAT = struct.calcsize('fffffffffffff')
|
||||
# STRUCT_SIZE_4x3MAT = calcsize('ffffffffffff')
|
||||
# print STRUCT_SIZE_4x3MAT, ' STRUCT_SIZE_4x3MAT'
|
||||
# only init once
|
||||
object_list = [] # for hierarchy
|
||||
object_parent = [] # index of parent in hierarchy, 0xFFFF = no parent
|
||||
|
||||
def putContextMesh(myContextMesh_vertls, myContextMesh_facels, myContextMeshMaterials):
|
||||
bmesh = bpy.data.meshes.new(contextObName)
|
||||
|
||||
if myContextMesh_facels is None:
|
||||
myContextMesh_facels = []
|
||||
|
||||
if myContextMesh_vertls:
|
||||
|
||||
bmesh.vertices.add(len(myContextMesh_vertls)//3)
|
||||
bmesh.faces.add(len(myContextMesh_facels))
|
||||
bmesh.vertices.foreach_set("co", myContextMesh_vertls)
|
||||
|
||||
eekadoodle_faces = []
|
||||
for v1, v2, v3 in myContextMesh_facels:
|
||||
eekadoodle_faces.extend([v3, v1, v2, 0] if v3 == 0 else [v1, v2, v3, 0])
|
||||
bmesh.faces.foreach_set("vertices_raw", eekadoodle_faces)
|
||||
|
||||
if bmesh.faces and contextMeshUV:
|
||||
bmesh.uv_textures.new()
|
||||
uv_faces = bmesh.uv_textures.active.data[:]
|
||||
else:
|
||||
uv_faces = None
|
||||
|
||||
for mat_idx, (matName, faces) in enumerate(myContextMeshMaterials.items()):
|
||||
if matName is None:
|
||||
bmat = None
|
||||
else:
|
||||
bmat = MATDICT[matName][1]
|
||||
img = TEXTURE_DICT.get(bmat.name)
|
||||
|
||||
bmesh.materials.append(bmat) # can be None
|
||||
|
||||
if uv_faces and img:
|
||||
for fidx in faces:
|
||||
bmesh.faces[fidx].material_index = mat_idx
|
||||
uf = uv_faces[fidx]
|
||||
uf.image = img
|
||||
uf.use_image = True
|
||||
else:
|
||||
for fidx in faces:
|
||||
bmesh.faces[fidx].material_index = mat_idx
|
||||
|
||||
if uv_faces:
|
||||
for fidx, uf in enumerate(uv_faces):
|
||||
face = myContextMesh_facels[fidx]
|
||||
v1, v2, v3 = face
|
||||
|
||||
# eekadoodle
|
||||
if v3 == 0:
|
||||
v1, v2, v3 = v3, v1, v2
|
||||
|
||||
uf.uv1 = contextMeshUV[v1 * 2:(v1 * 2) + 2]
|
||||
uf.uv2 = contextMeshUV[v2 * 2:(v2 * 2) + 2]
|
||||
uf.uv3 = contextMeshUV[v3 * 2:(v3 * 2) + 2]
|
||||
# always a tri
|
||||
|
||||
ob = bpy.data.objects.new(contextObName, bmesh)
|
||||
object_dictionary[contextObName] = ob
|
||||
SCN.objects.link(ob)
|
||||
|
||||
'''
|
||||
if contextMatrix_tx:
|
||||
ob.setMatrix(contextMatrix_tx)
|
||||
'''
|
||||
|
||||
if contextMatrix_rot:
|
||||
ob.matrix_local = contextMatrix_rot
|
||||
|
||||
importedObjects.append(ob)
|
||||
bmesh.update()
|
||||
|
||||
#a spare chunk
|
||||
new_chunk = chunk()
|
||||
temp_chunk = chunk()
|
||||
|
||||
CreateBlenderObject = False
|
||||
|
||||
def read_float_color(temp_chunk):
|
||||
temp_data = file.read(struct.calcsize('3f'))
|
||||
temp_chunk.bytes_read += 12
|
||||
return [float(col) for col in struct.unpack('<3f', temp_data)]
|
||||
|
||||
def read_byte_color(temp_chunk):
|
||||
temp_data = file.read(struct.calcsize('3B'))
|
||||
temp_chunk.bytes_read += 3
|
||||
return [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
|
||||
|
||||
def read_texture(new_chunk, temp_chunk, name, mapto):
|
||||
new_texture = bpy.data.textures.new(name, type='IMAGE')
|
||||
|
||||
img = None
|
||||
while (new_chunk.bytes_read < new_chunk.length):
|
||||
#print 'MAT_TEXTURE_MAP..while', new_chunk.bytes_read, new_chunk.length
|
||||
read_chunk(file, temp_chunk)
|
||||
|
||||
if (temp_chunk.ID == MAT_MAP_FILEPATH):
|
||||
texture_name, read_str_len = read_string(file)
|
||||
img = TEXTURE_DICT[contextMaterial.name] = load_image(texture_name, dirname)
|
||||
new_chunk.bytes_read += read_str_len #plus one for the null character that gets removed
|
||||
|
||||
else:
|
||||
skip_to_end(file, temp_chunk)
|
||||
|
||||
new_chunk.bytes_read += temp_chunk.bytes_read
|
||||
|
||||
# add the map to the material in the right channel
|
||||
if img:
|
||||
add_texture_to_material(img, new_texture, contextMaterial, mapto)
|
||||
|
||||
dirname = os.path.dirname(file.name)
|
||||
|
||||
#loop through all the data for this chunk (previous chunk) and see what it is
|
||||
while (previous_chunk.bytes_read < previous_chunk.length):
|
||||
#print '\t', previous_chunk.bytes_read, 'keep going'
|
||||
#read the next chunk
|
||||
#print 'reading a chunk'
|
||||
read_chunk(file, new_chunk)
|
||||
|
||||
#is it a Version chunk?
|
||||
if (new_chunk.ID == VERSION):
|
||||
#print 'if (new_chunk.ID == VERSION):'
|
||||
#print 'found a VERSION chunk'
|
||||
#read in the version of the file
|
||||
#it's an unsigned short (H)
|
||||
temp_data = file.read(struct.calcsize('I'))
|
||||
version = struct.unpack('<I', temp_data)[0]
|
||||
new_chunk.bytes_read += 4 #read the 4 bytes for the version number
|
||||
#this loader works with version 3 and below, but may not with 4 and above
|
||||
if (version > 3):
|
||||
print('\tNon-Fatal Error: Version greater than 3, may not load correctly: ', version)
|
||||
|
||||
#is it an object info chunk?
|
||||
elif (new_chunk.ID == OBJECTINFO):
|
||||
#print 'elif (new_chunk.ID == OBJECTINFO):'
|
||||
# print 'found an OBJECTINFO chunk'
|
||||
process_next_chunk(file, new_chunk, importedObjects, IMAGE_SEARCH)
|
||||
|
||||
#keep track of how much we read in the main chunk
|
||||
new_chunk.bytes_read += temp_chunk.bytes_read
|
||||
|
||||
#is it an object chunk?
|
||||
elif (new_chunk.ID == OBJECT):
|
||||
|
||||
if CreateBlenderObject:
|
||||
putContextMesh(contextMesh_vertls, contextMesh_facels, contextMeshMaterials)
|
||||
contextMesh_vertls = []; contextMesh_facels = []
|
||||
|
||||
## preparando para receber o proximo objeto
|
||||
contextMeshMaterials = {} # matname:[face_idxs]
|
||||
contextMeshUV = None
|
||||
#contextMesh.vertexUV = 1 # Make sticky coords.
|
||||
# Reset matrix
|
||||
contextMatrix_rot = None
|
||||
#contextMatrix_tx = None
|
||||
|
||||
CreateBlenderObject = True
|
||||
contextObName, read_str_len = read_string(file)
|
||||
new_chunk.bytes_read += read_str_len
|
||||
|
||||
#is it a material chunk?
|
||||
elif (new_chunk.ID == MATERIAL):
|
||||
|
||||
# print("read material")
|
||||
|
||||
#print 'elif (new_chunk.ID == MATERIAL):'
|
||||
contextMaterial = bpy.data.materials.new('Material')
|
||||
|
||||
elif (new_chunk.ID == MAT_NAME):
|
||||
#print 'elif (new_chunk.ID == MAT_NAME):'
|
||||
material_name, read_str_len = read_string(file)
|
||||
|
||||
# print("material name", material_name)
|
||||
|
||||
#plus one for the null character that ended the string
|
||||
new_chunk.bytes_read += read_str_len
|
||||
|
||||
contextMaterial.name = material_name.rstrip() # remove trailing whitespace
|
||||
MATDICT[material_name]= (contextMaterial.name, contextMaterial)
|
||||
|
||||
elif (new_chunk.ID == MAT_AMBIENT):
|
||||
#print 'elif (new_chunk.ID == MAT_AMBIENT):'
|
||||
read_chunk(file, temp_chunk)
|
||||
if (temp_chunk.ID == MAT_FLOAT_COLOR):
|
||||
contextMaterial.mirror_color = read_float_color(temp_chunk)
|
||||
# temp_data = file.read(struct.calcsize('3f'))
|
||||
# temp_chunk.bytes_read += 12
|
||||
# contextMaterial.mirCol = [float(col) for col in struct.unpack('<3f', temp_data)]
|
||||
elif (temp_chunk.ID == MAT_24BIT_COLOR):
|
||||
contextMaterial.mirror_color = read_byte_color(temp_chunk)
|
||||
# temp_data = file.read(struct.calcsize('3B'))
|
||||
# temp_chunk.bytes_read += 3
|
||||
# contextMaterial.mirCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
|
||||
else:
|
||||
skip_to_end(file, temp_chunk)
|
||||
new_chunk.bytes_read += temp_chunk.bytes_read
|
||||
|
||||
elif (new_chunk.ID == MAT_DIFFUSE):
|
||||
#print 'elif (new_chunk.ID == MAT_DIFFUSE):'
|
||||
read_chunk(file, temp_chunk)
|
||||
if (temp_chunk.ID == MAT_FLOAT_COLOR):
|
||||
contextMaterial.diffuse_color = read_float_color(temp_chunk)
|
||||
# temp_data = file.read(struct.calcsize('3f'))
|
||||
# temp_chunk.bytes_read += 12
|
||||
# contextMaterial.rgbCol = [float(col) for col in struct.unpack('<3f', temp_data)]
|
||||
elif (temp_chunk.ID == MAT_24BIT_COLOR):
|
||||
contextMaterial.diffuse_color = read_byte_color(temp_chunk)
|
||||
# temp_data = file.read(struct.calcsize('3B'))
|
||||
# temp_chunk.bytes_read += 3
|
||||
# contextMaterial.rgbCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
|
||||
else:
|
||||
skip_to_end(file, temp_chunk)
|
||||
|
||||
# print("read material diffuse color", contextMaterial.diffuse_color)
|
||||
|
||||
new_chunk.bytes_read += temp_chunk.bytes_read
|
||||
|
||||
elif (new_chunk.ID == MAT_SPECULAR):
|
||||
#print 'elif (new_chunk.ID == MAT_SPECULAR):'
|
||||
read_chunk(file, temp_chunk)
|
||||
if (temp_chunk.ID == MAT_FLOAT_COLOR):
|
||||
contextMaterial.specular_color = read_float_color(temp_chunk)
|
||||
# temp_data = file.read(struct.calcsize('3f'))
|
||||
# temp_chunk.bytes_read += 12
|
||||
# contextMaterial.mirCol = [float(col) for col in struct.unpack('<3f', temp_data)]
|
||||
elif (temp_chunk.ID == MAT_24BIT_COLOR):
|
||||
contextMaterial.specular_color = read_byte_color(temp_chunk)
|
||||
# temp_data = file.read(struct.calcsize('3B'))
|
||||
# temp_chunk.bytes_read += 3
|
||||
# contextMaterial.mirCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
|
||||
else:
|
||||
skip_to_end(file, temp_chunk)
|
||||
new_chunk.bytes_read += temp_chunk.bytes_read
|
||||
|
||||
elif (new_chunk.ID == MAT_TEXTURE_MAP):
|
||||
read_texture(new_chunk, temp_chunk, "Diffuse", "COLOR")
|
||||
|
||||
elif (new_chunk.ID == MAT_SPECULAR_MAP):
|
||||
read_texture(new_chunk, temp_chunk, "Specular", "SPECULARITY")
|
||||
|
||||
elif (new_chunk.ID == MAT_OPACITY_MAP):
|
||||
read_texture(new_chunk, temp_chunk, "Opacity", "ALPHA")
|
||||
|
||||
elif (new_chunk.ID == MAT_BUMP_MAP):
|
||||
read_texture(new_chunk, temp_chunk, "Bump", "NORMAL")
|
||||
|
||||
elif (new_chunk.ID == MAT_TRANSPARENCY):
|
||||
#print 'elif (new_chunk.ID == MAT_TRANSPARENCY):'
|
||||
read_chunk(file, temp_chunk)
|
||||
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
|
||||
|
||||
temp_chunk.bytes_read += 2
|
||||
contextMaterial.alpha = 1-(float(struct.unpack('<H', temp_data)[0])/100)
|
||||
new_chunk.bytes_read += temp_chunk.bytes_read
|
||||
|
||||
|
||||
elif (new_chunk.ID == OBJECT_LAMP): # Basic lamp support.
|
||||
|
||||
temp_data = file.read(STRUCT_SIZE_3FLOAT)
|
||||
|
||||
x,y,z = struct.unpack('<3f', temp_data)
|
||||
new_chunk.bytes_read += STRUCT_SIZE_3FLOAT
|
||||
|
||||
# no lamp in dict that would be confusing
|
||||
contextLamp[1] = bpy.data.lamps.new("Lamp", 'POINT')
|
||||
contextLamp[0] = ob = bpy.data.objects.new("Lamp", contextLamp[1])
|
||||
|
||||
SCN.objects.link(ob)
|
||||
importedObjects.append(contextLamp[0])
|
||||
|
||||
#print 'number of faces: ', num_faces
|
||||
#print x,y,z
|
||||
contextLamp[0].location = (x, y, z)
|
||||
# contextLamp[0].setLocation(x,y,z)
|
||||
|
||||
# Reset matrix
|
||||
contextMatrix_rot = None
|
||||
#contextMatrix_tx = None
|
||||
#print contextLamp.name,
|
||||
|
||||
elif (new_chunk.ID == OBJECT_MESH):
|
||||
# print 'Found an OBJECT_MESH chunk'
|
||||
pass
|
||||
elif (new_chunk.ID == OBJECT_VERTICES):
|
||||
'''
|
||||
Worldspace vertex locations
|
||||
'''
|
||||
# print 'elif (new_chunk.ID == OBJECT_VERTICES):'
|
||||
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
|
||||
num_verts = struct.unpack('<H', temp_data)[0]
|
||||
new_chunk.bytes_read += 2
|
||||
|
||||
# print 'number of verts: ', num_verts
|
||||
contextMesh_vertls = struct.unpack('<%df' % (num_verts * 3), file.read(STRUCT_SIZE_3FLOAT * num_verts))
|
||||
new_chunk.bytes_read += STRUCT_SIZE_3FLOAT * num_verts
|
||||
# dummyvert is not used atm!
|
||||
|
||||
#print 'object verts: bytes read: ', new_chunk.bytes_read
|
||||
|
||||
elif (new_chunk.ID == OBJECT_FACES):
|
||||
# print 'elif (new_chunk.ID == OBJECT_FACES):'
|
||||
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
|
||||
num_faces = struct.unpack('<H', temp_data)[0]
|
||||
new_chunk.bytes_read += 2
|
||||
#print 'number of faces: ', num_faces
|
||||
|
||||
# print '\ngetting a face'
|
||||
temp_data = file.read(STRUCT_SIZE_4UNSIGNED_SHORT * num_faces)
|
||||
new_chunk.bytes_read += STRUCT_SIZE_4UNSIGNED_SHORT * num_faces #4 short ints x 2 bytes each
|
||||
contextMesh_facels = struct.unpack('<%dH' % (num_faces * 4), temp_data)
|
||||
contextMesh_facels = [contextMesh_facels[i - 3:i] for i in range(3, (num_faces * 4) + 3, 4)]
|
||||
|
||||
elif (new_chunk.ID == OBJECT_MATERIAL):
|
||||
# print 'elif (new_chunk.ID == OBJECT_MATERIAL):'
|
||||
material_name, read_str_len = read_string(file)
|
||||
new_chunk.bytes_read += read_str_len # remove 1 null character.
|
||||
|
||||
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
|
||||
num_faces_using_mat = struct.unpack('<H', temp_data)[0]
|
||||
new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
|
||||
|
||||
|
||||
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * num_faces_using_mat)
|
||||
new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * num_faces_using_mat
|
||||
|
||||
contextMeshMaterials[material_name]= struct.unpack("<%dH" % (num_faces_using_mat), temp_data)
|
||||
|
||||
#look up the material in all the materials
|
||||
|
||||
elif (new_chunk.ID == OBJECT_UV):
|
||||
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
|
||||
num_uv = struct.unpack('<H', temp_data)[0]
|
||||
new_chunk.bytes_read += 2
|
||||
|
||||
temp_data = file.read(STRUCT_SIZE_2FLOAT * num_uv)
|
||||
new_chunk.bytes_read += STRUCT_SIZE_2FLOAT * num_uv
|
||||
contextMeshUV = struct.unpack('<%df' % (num_uv * 2), temp_data)
|
||||
|
||||
elif (new_chunk.ID == OBJECT_TRANS_MATRIX):
|
||||
# How do we know the matrix size? 54 == 4x4 48 == 4x3
|
||||
temp_data = file.read(STRUCT_SIZE_4x3MAT)
|
||||
data = list( struct.unpack('<ffffffffffff', temp_data) )
|
||||
new_chunk.bytes_read += STRUCT_SIZE_4x3MAT
|
||||
|
||||
contextMatrix_rot = mathutils.Matrix((data[:3] + [0], \
|
||||
data[3:6] + [0], \
|
||||
data[6:9] + [0], \
|
||||
data[9:] + [1], \
|
||||
))
|
||||
|
||||
elif (new_chunk.ID == MAT_MAP_FILEPATH):
|
||||
texture_name, read_str_len = read_string(file)
|
||||
try:
|
||||
TEXTURE_DICT[contextMaterial.name]
|
||||
except:
|
||||
#img = TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILEPATH)
|
||||
img = TEXTURE_DICT[contextMaterial.name] = load_image(texture_name, dirname)
|
||||
# img = TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILEPATH, PLACE_HOLDER=False, RECURSIVE=IMAGE_SEARCH)
|
||||
|
||||
new_chunk.bytes_read += read_str_len #plus one for the null character that gets removed
|
||||
elif new_chunk.ID == EDITKEYFRAME:
|
||||
pass
|
||||
|
||||
elif new_chunk.ID == ED_KEY_OBJECT_NODE: #another object is being processed
|
||||
child = None
|
||||
|
||||
elif new_chunk.ID == EK_OB_NODE_HEADER:
|
||||
object_name, read_str_len = read_string(file)
|
||||
new_chunk.bytes_read += read_str_len
|
||||
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2)
|
||||
new_chunk.bytes_read += 4
|
||||
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
|
||||
hierarchy = struct.unpack('<H', temp_data)[0]
|
||||
new_chunk.bytes_read += 2
|
||||
|
||||
child = object_dictionary.get(object_name)
|
||||
|
||||
if child is None:
|
||||
child = bpy.data.objects.new(object_name, None) # create an empty object
|
||||
SCN.objects.link(child)
|
||||
|
||||
object_list.append(child)
|
||||
object_parent.append(hierarchy)
|
||||
|
||||
elif new_chunk.ID == EK_OB_INSTANCE_NAME:
|
||||
object_name, read_str_len = read_string(file)
|
||||
child.name = object_name
|
||||
object_dictionary[object_name] = child
|
||||
new_chunk.bytes_read += read_str_len
|
||||
|
||||
elif new_chunk.ID == EK_OB_POSITION_TRACK: # translation
|
||||
new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 5
|
||||
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 5)
|
||||
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
|
||||
nkeys = struct.unpack('<H', temp_data)[0]
|
||||
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
|
||||
new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
|
||||
for i in range(nkeys):
|
||||
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
|
||||
nframe = struct.unpack('<H', temp_data)[0]
|
||||
new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
|
||||
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2)
|
||||
new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
|
||||
temp_data = file.read(STRUCT_SIZE_3FLOAT)
|
||||
loc = struct.unpack('<3f', temp_data)
|
||||
new_chunk.bytes_read += STRUCT_SIZE_3FLOAT
|
||||
if nframe == 0:
|
||||
child.location = loc
|
||||
|
||||
elif new_chunk.ID == EK_OB_ROTATION_TRACK: # rotation
|
||||
new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 5
|
||||
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 5)
|
||||
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
|
||||
nkeys = struct.unpack('<H', temp_data)[0]
|
||||
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
|
||||
new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
|
||||
for i in range(nkeys):
|
||||
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
|
||||
nframe = struct.unpack('<H', temp_data)[0]
|
||||
new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
|
||||
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2)
|
||||
new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
|
||||
temp_data = file.read(STRUCT_SIZE_4FLOAT)
|
||||
rad,axis_x,axis_y,axis_z = struct.unpack('<4f', temp_data)
|
||||
new_chunk.bytes_read += STRUCT_SIZE_4FLOAT
|
||||
if nframe == 0:
|
||||
child.rotation_euler = mathutils.Quaternion((axis_x, axis_y, axis_z), -rad).to_euler() # why negative?
|
||||
|
||||
elif new_chunk.ID == EK_OB_SCALE_TRACK: # translation
|
||||
new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 5
|
||||
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 5)
|
||||
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
|
||||
nkeys = struct.unpack('<H', temp_data)[0]
|
||||
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
|
||||
new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
|
||||
for i in range(nkeys):
|
||||
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
|
||||
nframe = struct.unpack('<H', temp_data)[0]
|
||||
new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
|
||||
temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2)
|
||||
new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
|
||||
temp_data = file.read(STRUCT_SIZE_3FLOAT)
|
||||
sca = struct.unpack('<3f', temp_data)
|
||||
new_chunk.bytes_read += STRUCT_SIZE_3FLOAT
|
||||
if nframe == 0:
|
||||
child.scale = sca
|
||||
|
||||
else: #(new_chunk.ID!=VERSION or new_chunk.ID!=OBJECTINFO or new_chunk.ID!=OBJECT or new_chunk.ID!=MATERIAL):
|
||||
# print 'skipping to end of this chunk'
|
||||
#print("unknown chunk: "+hex(new_chunk.ID))
|
||||
buffer_size = new_chunk.length - new_chunk.bytes_read
|
||||
binary_format='%ic' % buffer_size
|
||||
temp_data = file.read(struct.calcsize(binary_format))
|
||||
new_chunk.bytes_read += buffer_size
|
||||
|
||||
|
||||
#update the previous chunk bytes read
|
||||
# print 'previous_chunk.bytes_read += new_chunk.bytes_read'
|
||||
# print previous_chunk.bytes_read, new_chunk.bytes_read
|
||||
previous_chunk.bytes_read += new_chunk.bytes_read
|
||||
## print 'Bytes left in this chunk: ', previous_chunk.length - previous_chunk.bytes_read
|
||||
|
||||
# FINISHED LOOP
|
||||
# There will be a number of objects still not added
|
||||
if CreateBlenderObject:
|
||||
putContextMesh(contextMesh_vertls, contextMesh_facels, contextMeshMaterials)
|
||||
|
||||
|
||||
# Assign parents to objects
|
||||
for ind, ob in enumerate(object_list):
|
||||
parent = object_parent[ind]
|
||||
if parent == ROOT_OBJECT:
|
||||
ob.parent = None
|
||||
else:
|
||||
ob.parent = object_list[parent]
|
||||
|
||||
|
||||
def load_3ds(filepath, context, IMPORT_CONSTRAIN_BOUNDS=10.0, IMAGE_SEARCH=True, APPLY_MATRIX=True):
|
||||
global SCN
|
||||
|
||||
# XXX
|
||||
# if BPyMessages.Error_NoFile(filepath):
|
||||
# return
|
||||
|
||||
print("importing 3DS: %r..." % (filepath), end="")
|
||||
|
||||
time1 = time.clock()
|
||||
# time1 = Blender.sys.time()
|
||||
|
||||
current_chunk = chunk()
|
||||
|
||||
file = open(filepath, 'rb')
|
||||
|
||||
#here we go!
|
||||
# print 'reading the first chunk'
|
||||
read_chunk(file, current_chunk)
|
||||
if (current_chunk.ID!=PRIMARY):
|
||||
print('\tFatal Error: Not a valid 3ds file: %r' % filepath)
|
||||
file.close()
|
||||
return
|
||||
|
||||
|
||||
# IMPORT_AS_INSTANCE = Blender.Draw.Create(0)
|
||||
# IMPORT_CONSTRAIN_BOUNDS = Blender.Draw.Create(10.0)
|
||||
# IMAGE_SEARCH = Blender.Draw.Create(1)
|
||||
# APPLY_MATRIX = Blender.Draw.Create(0)
|
||||
|
||||
# Get USER Options
|
||||
# pup_block = [\
|
||||
# ('Size Constraint:', IMPORT_CONSTRAIN_BOUNDS, 0.0, 1000.0, 'Scale the model by 10 until it reacehs the size constraint. Zero Disables.'),\
|
||||
# ('Image Search', IMAGE_SEARCH, 'Search subdirs for any assosiated images (Warning, may be slow)'),\
|
||||
# ('Transform Fix', APPLY_MATRIX, 'Workaround for object transformations importing incorrectly'),\
|
||||
# #('Group Instance', IMPORT_AS_INSTANCE, 'Import objects into a new scene and group, creating an instance in the current scene.'),\
|
||||
# ]
|
||||
|
||||
# if PREF_UI:
|
||||
# if not Blender.Draw.PupBlock('Import 3DS...', pup_block):
|
||||
# return
|
||||
|
||||
# Blender.Window.WaitCursor(1)
|
||||
|
||||
# IMPORT_CONSTRAIN_BOUNDS = IMPORT_CONSTRAIN_BOUNDS.val
|
||||
# # IMPORT_AS_INSTANCE = IMPORT_AS_INSTANCE.val
|
||||
# IMAGE_SEARCH = IMAGE_SEARCH.val
|
||||
# APPLY_MATRIX = APPLY_MATRIX.val
|
||||
|
||||
if IMPORT_CONSTRAIN_BOUNDS:
|
||||
BOUNDS_3DS[:]= [1<<30, 1<<30, 1<<30, -1<<30, -1<<30, -1<<30]
|
||||
else:
|
||||
BOUNDS_3DS[:]= []
|
||||
|
||||
##IMAGE_SEARCH
|
||||
|
||||
# fixme, make unglobal, clear incase
|
||||
object_dictionary.clear()
|
||||
|
||||
scn = context.scene
|
||||
# scn = bpy.data.scenes.active
|
||||
SCN = scn
|
||||
# SCN_OBJECTS = scn.objects
|
||||
# SCN_OBJECTS.selected = [] # de select all
|
||||
|
||||
importedObjects = [] # Fill this list with objects
|
||||
process_next_chunk(file, current_chunk, importedObjects, IMAGE_SEARCH)
|
||||
|
||||
# fixme, make unglobal
|
||||
object_dictionary.clear()
|
||||
|
||||
# Link the objects into this scene.
|
||||
# Layers = scn.Layers
|
||||
|
||||
# REMOVE DUMMYVERT, - remove this in the next release when blenders internal are fixed.
|
||||
|
||||
if APPLY_MATRIX:
|
||||
for ob in importedObjects:
|
||||
if ob.type == 'MESH':
|
||||
me = ob.data
|
||||
me.transform(ob.matrix_local.copy().invert())
|
||||
|
||||
# Done DUMMYVERT
|
||||
"""
|
||||
if IMPORT_AS_INSTANCE:
|
||||
name = filepath.split('\\')[-1].split('/')[-1]
|
||||
# Create a group for this import.
|
||||
group_scn = Scene.New(name)
|
||||
for ob in importedObjects:
|
||||
group_scn.link(ob) # dont worry about the layers
|
||||
|
||||
grp = Blender.Group.New(name)
|
||||
grp.objects = importedObjects
|
||||
|
||||
grp_ob = Object.New('Empty', name)
|
||||
grp_ob.enableDupGroup = True
|
||||
grp_ob.DupGroup = grp
|
||||
scn.link(grp_ob)
|
||||
grp_ob.Layers = Layers
|
||||
grp_ob.sel = 1
|
||||
else:
|
||||
# Select all imported objects.
|
||||
for ob in importedObjects:
|
||||
scn.link(ob)
|
||||
ob.Layers = Layers
|
||||
ob.sel = 1
|
||||
"""
|
||||
|
||||
if 0:
|
||||
# if IMPORT_CONSTRAIN_BOUNDS!=0.0:
|
||||
# Set bounds from objecyt bounding box
|
||||
for ob in importedObjects:
|
||||
if ob.type == 'MESH':
|
||||
# if ob.type=='Mesh':
|
||||
ob.makeDisplayList() # Why dosnt this update the bounds?
|
||||
for v in ob.getBoundBox():
|
||||
for i in (0,1,2):
|
||||
if v[i] < BOUNDS_3DS[i]:
|
||||
BOUNDS_3DS[i]= v[i] # min
|
||||
|
||||
if v[i] > BOUNDS_3DS[i + 3]:
|
||||
BOUNDS_3DS[i + 3]= v[i] # min
|
||||
|
||||
# Get the max axis x/y/z
|
||||
max_axis = max(BOUNDS_3DS[3]-BOUNDS_3DS[0], BOUNDS_3DS[4]-BOUNDS_3DS[1], BOUNDS_3DS[5]-BOUNDS_3DS[2])
|
||||
# print max_axis
|
||||
if max_axis < 1 << 30: # Should never be false but just make sure.
|
||||
|
||||
# Get a new scale factor if set as an option
|
||||
SCALE = 1.0
|
||||
while (max_axis * SCALE) > IMPORT_CONSTRAIN_BOUNDS:
|
||||
SCALE/=10
|
||||
|
||||
# SCALE Matrix
|
||||
SCALE_MAT = mathutils.Matrix.Scale(SCALE, 4)
|
||||
|
||||
for ob in importedObjects:
|
||||
if ob.parent is None:
|
||||
ob.matrix_world = ob.matrix_world * SCALE_MAT
|
||||
|
||||
# Done constraining to bounds.
|
||||
|
||||
# Select all new objects.
|
||||
print(" done in %.4f sec." % (time.clock()-time1))
|
||||
file.close()
|
||||
|
||||
|
||||
def load(operator, context, filepath="", constrain_size=0.0, use_image_search=True, use_apply_transform=True):
|
||||
load_3ds(filepath, context, IMPORT_CONSTRAIN_BOUNDS=constrain_size, IMAGE_SEARCH=use_image_search, APPLY_MATRIX=use_apply_transform)
|
||||
return {'FINISHED'}
|
@ -1,109 +0,0 @@
|
||||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
# <pep8 compliant>
|
||||
|
||||
# To support reload properly, try to access a package var, if it's there, reload everything
|
||||
if "bpy" in locals():
|
||||
import imp
|
||||
if "export_fbx" in locals():
|
||||
imp.reload(export_fbx)
|
||||
|
||||
|
||||
import bpy
|
||||
from bpy.props import *
|
||||
from io_utils import ExportHelper
|
||||
|
||||
|
||||
class ExportFBX(bpy.types.Operator, ExportHelper):
|
||||
'''Selection to an ASCII Autodesk FBX'''
|
||||
bl_idname = "export_scene.fbx"
|
||||
bl_label = "Export FBX"
|
||||
bl_options = {'PRESET'}
|
||||
|
||||
filename_ext = ".fbx"
|
||||
filter_glob = StringProperty(default="*.fbx", options={'HIDDEN'})
|
||||
|
||||
# List of operator properties, the attributes will be assigned
|
||||
# to the class instance from the operator settings before calling.
|
||||
|
||||
EXP_OBS_SELECTED = BoolProperty(name="Selected Objects", description="Export selected objects on visible layers", default=True)
|
||||
# EXP_OBS_SCENE = BoolProperty(name="Scene Objects", description="Export all objects in this scene", default=True)
|
||||
TX_SCALE = FloatProperty(name="Scale", description="Scale all data, (Note! some imports dont support scaled armatures)", min=0.01, max=1000.0, soft_min=0.01, soft_max=1000.0, default=1.0)
|
||||
TX_XROT90 = BoolProperty(name="Rot X90", description="Rotate all objects 90 degrees about the X axis", default=True)
|
||||
TX_YROT90 = BoolProperty(name="Rot Y90", description="Rotate all objects 90 degrees about the Y axis", default=False)
|
||||
TX_ZROT90 = BoolProperty(name="Rot Z90", description="Rotate all objects 90 degrees about the Z axis", default=False)
|
||||
EXP_EMPTY = BoolProperty(name="Empties", description="Export empty objects", default=True)
|
||||
EXP_CAMERA = BoolProperty(name="Cameras", description="Export camera objects", default=True)
|
||||
EXP_LAMP = BoolProperty(name="Lamps", description="Export lamp objects", default=True)
|
||||
EXP_ARMATURE = BoolProperty(name="Armatures", description="Export armature objects", default=True)
|
||||
EXP_MESH = BoolProperty(name="Meshes", description="Export mesh objects", default=True)
|
||||
EXP_MESH_APPLY_MOD = BoolProperty(name="Modifiers", description="Apply modifiers to mesh objects", default=True)
|
||||
# EXP_MESH_HQ_NORMALS = BoolProperty(name="HQ Normals", description="Generate high quality normals", default=True)
|
||||
EXP_IMAGE_COPY = BoolProperty(name="Copy Image Files", description="Copy image files to the destination path", default=False)
|
||||
# armature animation
|
||||
ANIM_ENABLE = BoolProperty(name="Enable Animation", description="Export keyframe animation", default=True)
|
||||
ANIM_OPTIMIZE = BoolProperty(name="Optimize Keyframes", description="Remove double keyframes", default=True)
|
||||
ANIM_OPTIMIZE_PRECISSION = FloatProperty(name="Precision", description="Tolerence for comparing double keyframes (higher for greater accuracy)", min=1, max=16, soft_min=1, soft_max=16, default=6.0)
|
||||
# ANIM_ACTION_ALL = BoolProperty(name="Current Action", description="Use actions currently applied to the armatures (use scene start/end frame)", default=True)
|
||||
ANIM_ACTION_ALL = BoolProperty(name="All Actions", description="Use all actions for armatures, if false, use current action", default=False)
|
||||
# batch
|
||||
BATCH_ENABLE = BoolProperty(name="Enable Batch", description="Automate exporting multiple scenes or groups to files", default=False)
|
||||
BATCH_GROUP = BoolProperty(name="Group > File", description="Export each group as an FBX file, if false, export each scene as an FBX file", default=False)
|
||||
BATCH_OWN_DIR = BoolProperty(name="Own Dir", description="Create a dir for each exported file", default=True)
|
||||
BATCH_FILE_PREFIX = StringProperty(name="Prefix", description="Prefix each file with this name", maxlen=1024, default="")
|
||||
|
||||
def execute(self, context):
|
||||
import math
|
||||
from mathutils import Matrix
|
||||
if not self.filepath:
|
||||
raise Exception("filepath not set")
|
||||
|
||||
mtx4_x90n = Matrix.Rotation(-math.pi / 2.0, 4, 'X')
|
||||
mtx4_y90n = Matrix.Rotation(-math.pi / 2.0, 4, 'Y')
|
||||
mtx4_z90n = Matrix.Rotation(-math.pi / 2.0, 4, 'Z')
|
||||
|
||||
GLOBAL_MATRIX = Matrix()
|
||||
GLOBAL_MATRIX[0][0] = GLOBAL_MATRIX[1][1] = GLOBAL_MATRIX[2][2] = self.TX_SCALE
|
||||
if self.TX_XROT90:
|
||||
GLOBAL_MATRIX = mtx4_x90n * GLOBAL_MATRIX
|
||||
if self.TX_YROT90:
|
||||
GLOBAL_MATRIX = mtx4_y90n * GLOBAL_MATRIX
|
||||
if self.TX_ZROT90:
|
||||
GLOBAL_MATRIX = mtx4_z90n * GLOBAL_MATRIX
|
||||
|
||||
keywords = self.as_keywords(ignore=("TX_XROT90", "TX_YROT90", "TX_ZROT90", "TX_SCALE", "check_existing", "filter_glob"))
|
||||
keywords["GLOBAL_MATRIX"] = GLOBAL_MATRIX
|
||||
|
||||
import io_scene_fbx.export_fbx
|
||||
return io_scene_fbx.export_fbx.save(self, context, **keywords)
|
||||
|
||||
|
||||
def menu_func(self, context):
|
||||
self.layout.operator(ExportFBX.bl_idname, text="Autodesk FBX (.fbx)")
|
||||
|
||||
|
||||
def register():
|
||||
bpy.types.INFO_MT_file_export.append(menu_func)
|
||||
|
||||
|
||||
def unregister():
|
||||
bpy.types.INFO_MT_file_export.remove(menu_func)
|
||||
|
||||
if __name__ == "__main__":
|
||||
register()
|
File diff suppressed because it is too large
Load Diff
@ -1,131 +0,0 @@
|
||||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
# <pep8 compliant>
|
||||
|
||||
# To support reload properly, try to access a package var, if it's there, reload everything
|
||||
if "bpy" in locals():
|
||||
import imp
|
||||
if "import_obj" in locals():
|
||||
imp.reload(import_obj)
|
||||
if "export_obj" in locals():
|
||||
imp.reload(export_obj)
|
||||
|
||||
|
||||
import bpy
|
||||
from bpy.props import *
|
||||
from io_utils import ExportHelper, ImportHelper
|
||||
|
||||
|
||||
class ImportOBJ(bpy.types.Operator, ImportHelper):
|
||||
'''Load a Wavefront OBJ File'''
|
||||
bl_idname = "import_scene.obj"
|
||||
bl_label = "Import OBJ"
|
||||
|
||||
filename_ext = ".obj"
|
||||
filter_glob = StringProperty(default="*.obj;*.mtl", options={'HIDDEN'})
|
||||
|
||||
CREATE_SMOOTH_GROUPS = BoolProperty(name="Smooth Groups", description="Surround smooth groups by sharp edges", default=True)
|
||||
CREATE_FGONS = BoolProperty(name="NGons as FGons", description="Import faces with more then 4 verts as fgons", default=True)
|
||||
CREATE_EDGES = BoolProperty(name="Lines as Edges", description="Import lines and faces with 2 verts as edge", default=True)
|
||||
SPLIT_OBJECTS = BoolProperty(name="Object", description="Import OBJ Objects into Blender Objects", default=True)
|
||||
SPLIT_GROUPS = BoolProperty(name="Group", description="Import OBJ Groups into Blender Objects", default=True)
|
||||
# old comment: only used for user feedback
|
||||
# disabled this option because in old code a handler for it disabled SPLIT* params, it's not passed to load_obj
|
||||
# KEEP_VERT_ORDER = BoolProperty(name="Keep Vert Order", description="Keep vert and face order, disables split options, enable for morph targets", default= True)
|
||||
ROTATE_X90 = BoolProperty(name="-X90", description="Rotate X 90.", default=True)
|
||||
CLAMP_SIZE = FloatProperty(name="Clamp Scale", description="Clamp the size to this maximum (Zero to Disable)", min=0.0, max=1000.0, soft_min=0.0, soft_max=1000.0, default=0.0)
|
||||
POLYGROUPS = BoolProperty(name="Poly Groups", description="Import OBJ groups as vertex groups.", default=True)
|
||||
IMAGE_SEARCH = BoolProperty(name="Image Search", description="Search subdirs for any assosiated images (Warning, may be slow)", default=True)
|
||||
|
||||
def execute(self, context):
|
||||
# print("Selected: " + context.active_object.name)
|
||||
from . import import_obj
|
||||
return import_obj.load(self, context, **self.as_keywords(ignore=("filter_glob",)))
|
||||
|
||||
|
||||
class ExportOBJ(bpy.types.Operator, ExportHelper):
|
||||
'''Save a Wavefront OBJ File'''
|
||||
|
||||
bl_idname = "export_scene.obj"
|
||||
bl_label = 'Export OBJ'
|
||||
bl_options = {'PRESET'}
|
||||
|
||||
filename_ext = ".obj"
|
||||
filter_glob = StringProperty(default="*.obj;*.mtl", options={'HIDDEN'})
|
||||
|
||||
# List of operator properties, the attributes will be assigned
|
||||
# to the class instance from the operator settings before calling.
|
||||
|
||||
# context group
|
||||
use_selection = BoolProperty(name="Selection Only", description="Export selected objects only", default=False)
|
||||
use_all_scenes = BoolProperty(name="All Scenes", description="", default=False)
|
||||
use_animation = BoolProperty(name="Animation", description="", default=False)
|
||||
|
||||
# object group
|
||||
use_modifiers = BoolProperty(name="Apply Modifiers", description="Apply modifiers (preview resolution)", default=True)
|
||||
use_rotate_x90 = BoolProperty(name="Rotate X90", description="", default=True)
|
||||
|
||||
# extra data group
|
||||
use_edges = BoolProperty(name="Edges", description="", default=True)
|
||||
use_normals = BoolProperty(name="Normals", description="", default=False)
|
||||
use_hq_normals = BoolProperty(name="High Quality Normals", description="", default=True)
|
||||
use_uvs = BoolProperty(name="UVs", description="", default=True)
|
||||
use_materials = BoolProperty(name="Materials", description="", default=True)
|
||||
copy_images = BoolProperty(name="Copy Images", description="", default=False)
|
||||
use_triangles = BoolProperty(name="Triangulate", description="", default=False)
|
||||
use_vertex_groups = BoolProperty(name="Polygroups", description="", default=False)
|
||||
use_nurbs = BoolProperty(name="Nurbs", description="", default=False)
|
||||
|
||||
# grouping group
|
||||
use_blen_objects = BoolProperty(name="Objects as OBJ Objects", description="", default=True)
|
||||
group_by_object = BoolProperty(name="Objects as OBJ Groups ", description="", default=False)
|
||||
group_by_material = BoolProperty(name="Material Groups", description="", default=False)
|
||||
keep_vertex_order = BoolProperty(name="Keep Vertex Order", description="", default=False)
|
||||
|
||||
def execute(self, context):
|
||||
from . import export_obj
|
||||
return export_obj.save(self, context, **self.as_keywords(ignore=("check_existing", "filter_glob")))
|
||||
|
||||
|
||||
def menu_func_import(self, context):
|
||||
self.layout.operator(ImportOBJ.bl_idname, text="Wavefront (.obj)")
|
||||
|
||||
|
||||
def menu_func_export(self, context):
|
||||
self.layout.operator(ExportOBJ.bl_idname, text="Wavefront (.obj)")
|
||||
|
||||
|
||||
def register():
|
||||
bpy.types.INFO_MT_file_import.append(menu_func_import)
|
||||
bpy.types.INFO_MT_file_export.append(menu_func_export)
|
||||
|
||||
|
||||
def unregister():
|
||||
bpy.types.INFO_MT_file_import.remove(menu_func_import)
|
||||
bpy.types.INFO_MT_file_export.remove(menu_func_export)
|
||||
|
||||
|
||||
# CONVERSION ISSUES
|
||||
# - matrix problem
|
||||
# - duplis - only tested dupliverts
|
||||
# - all scenes export
|
||||
# + normals calculation
|
||||
|
||||
if __name__ == "__main__":
|
||||
register()
|
@ -1,836 +0,0 @@
|
||||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
# <pep8 compliant>
|
||||
|
||||
import os
|
||||
import time
|
||||
import shutil
|
||||
|
||||
import bpy
|
||||
import mathutils
|
||||
|
||||
def fixName(name):
|
||||
if name is None:
|
||||
return 'None'
|
||||
else:
|
||||
return name.replace(' ', '_')
|
||||
|
||||
def write_mtl(scene, filepath, copy_images, mtl_dict):
|
||||
|
||||
world = scene.world
|
||||
if world:
|
||||
worldAmb = world.ambient_color[:]
|
||||
else:
|
||||
worldAmb = 0.0, 0.0, 0.0
|
||||
|
||||
dest_dir = os.path.dirname(filepath)
|
||||
|
||||
def copy_image(image):
|
||||
fn = bpy.path.abspath(image.filepath)
|
||||
fn = os.path.normpath(fn)
|
||||
fn_strip = os.path.basename(fn)
|
||||
|
||||
if copy_images:
|
||||
rel = fn_strip
|
||||
fn_abs_dest = os.path.join(dest_dir, fn_strip)
|
||||
if not os.path.exists(fn_abs_dest):
|
||||
shutil.copy(fn, fn_abs_dest)
|
||||
elif bpy.path.is_subdir(fn, dest_dir):
|
||||
rel = os.path.relpath(fn, dest_dir)
|
||||
else:
|
||||
rel = fn
|
||||
|
||||
return rel
|
||||
|
||||
|
||||
file = open(filepath, "w", encoding='utf8')
|
||||
file.write('# Blender MTL File: %r\n' % os.path.basename(bpy.data.filepath))
|
||||
file.write('# Material Count: %i\n' % len(mtl_dict))
|
||||
# Write material/image combinations we have used.
|
||||
for key, (mtl_mat_name, mat, img) in mtl_dict.items():
|
||||
|
||||
# Get the Blender data for the material and the image.
|
||||
# Having an image named None will make a bug, dont do it :)
|
||||
|
||||
file.write('newmtl %s\n' % mtl_mat_name) # Define a new material: matname_imgname
|
||||
|
||||
if mat:
|
||||
file.write('Ns %.6f\n' % ((mat.specular_hardness-1) * 1.9607843137254901)) # Hardness, convert blenders 1-511 to MTL's
|
||||
file.write('Ka %.6f %.6f %.6f\n' % tuple(c * mat.ambient for c in worldAmb)) # Ambient, uses mirror colour,
|
||||
file.write('Kd %.6f %.6f %.6f\n' % tuple(c * mat.diffuse_intensity for c in mat.diffuse_color)) # Diffuse
|
||||
file.write('Ks %.6f %.6f %.6f\n' % tuple(c * mat.specular_intensity for c in mat.specular_color)) # Specular
|
||||
if hasattr(mat, "ior"):
|
||||
file.write('Ni %.6f\n' % mat.ior) # Refraction index
|
||||
else:
|
||||
file.write('Ni %.6f\n' % 1.0)
|
||||
file.write('d %.6f\n' % mat.alpha) # Alpha (obj uses 'd' for dissolve)
|
||||
|
||||
# 0 to disable lighting, 1 for ambient & diffuse only (specular color set to black), 2 for full lighting.
|
||||
if mat.use_shadeless:
|
||||
file.write('illum 0\n') # ignore lighting
|
||||
elif mat.specular_intensity == 0:
|
||||
file.write('illum 1\n') # no specular.
|
||||
else:
|
||||
file.write('illum 2\n') # light normaly
|
||||
|
||||
else:
|
||||
#write a dummy material here?
|
||||
file.write('Ns 0\n')
|
||||
file.write('Ka %.6f %.6f %.6f\n' % tuple(c for c in worldAmb)) # Ambient, uses mirror colour,
|
||||
file.write('Kd 0.8 0.8 0.8\n')
|
||||
file.write('Ks 0.8 0.8 0.8\n')
|
||||
file.write('d 1\n') # No alpha
|
||||
file.write('illum 2\n') # light normaly
|
||||
|
||||
# Write images!
|
||||
if img: # We have an image on the face!
|
||||
# write relative image path
|
||||
rel = copy_image(img)
|
||||
file.write('map_Kd %s\n' % rel) # Diffuse mapping image
|
||||
# file.write('map_Kd %s\n' % img.filepath.split('\\')[-1].split('/')[-1]) # Diffuse mapping image
|
||||
|
||||
elif mat: # No face image. if we havea material search for MTex image.
|
||||
for mtex in mat.texture_slots:
|
||||
if mtex and mtex.texture.type == 'IMAGE':
|
||||
try:
|
||||
filepath = copy_image(mtex.texture.image)
|
||||
# filepath = mtex.texture.image.filepath.split('\\')[-1].split('/')[-1]
|
||||
file.write('map_Kd %s\n' % repr(filepath)[1:-1]) # Diffuse mapping image
|
||||
break
|
||||
except:
|
||||
# Texture has no image though its an image type, best ignore.
|
||||
pass
|
||||
|
||||
file.write('\n\n')
|
||||
|
||||
file.close()
|
||||
|
||||
# XXX not used
|
||||
def copy_file(source, dest):
|
||||
file = open(source, 'rb')
|
||||
data = file.read()
|
||||
file.close()
|
||||
|
||||
file = open(dest, 'wb')
|
||||
file.write(data)
|
||||
file.close()
|
||||
|
||||
|
||||
# XXX not used
|
||||
def copy_images(dest_dir):
|
||||
if dest_dir[-1] != os.sep:
|
||||
dest_dir += os.sep
|
||||
|
||||
# Get unique image names
|
||||
uniqueImages = {}
|
||||
for matname, mat, image in mtl_dict.values(): # Only use image name
|
||||
# Get Texface images
|
||||
if image:
|
||||
uniqueImages[image] = image # Should use sets here. wait until Python 2.4 is default.
|
||||
|
||||
# Get MTex images
|
||||
if mat:
|
||||
for mtex in mat.texture_slots:
|
||||
if mtex and mtex.texture.type == 'IMAGE':
|
||||
image_tex = mtex.texture.image
|
||||
if image_tex:
|
||||
try:
|
||||
uniqueImages[image_tex] = image_tex
|
||||
except:
|
||||
pass
|
||||
|
||||
# Now copy images
|
||||
copyCount = 0
|
||||
|
||||
# for bImage in uniqueImages.values():
|
||||
# image_path = bpy.path.abspath(bImage.filepath)
|
||||
# if bpy.sys.exists(image_path):
|
||||
# # Make a name for the target path.
|
||||
# dest_image_path = dest_dir + image_path.split('\\')[-1].split('/')[-1]
|
||||
# if not bpy.utils.exists(dest_image_path): # Image isnt already there
|
||||
# print('\tCopying "%s" > "%s"' % (image_path, dest_image_path))
|
||||
# copy_file(image_path, dest_image_path)
|
||||
# copyCount+=1
|
||||
|
||||
# paths= bpy.util.copy_images(uniqueImages.values(), dest_dir)
|
||||
|
||||
print('\tCopied %d images' % copyCount)
|
||||
|
||||
|
||||
def test_nurbs_compat(ob):
|
||||
if ob.type != 'CURVE':
|
||||
return False
|
||||
|
||||
for nu in ob.data.splines:
|
||||
if nu.point_count_v == 1 and nu.type != 'BEZIER': # not a surface and not bezier
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def write_nurb(file, ob, ob_mat):
|
||||
tot_verts = 0
|
||||
cu = ob.data
|
||||
|
||||
# use negative indices
|
||||
for nu in cu.splines:
|
||||
if nu.type == 'POLY':
|
||||
DEG_ORDER_U = 1
|
||||
else:
|
||||
DEG_ORDER_U = nu.order_u - 1 # odd but tested to be correct
|
||||
|
||||
if nu.type == 'BEZIER':
|
||||
print("\tWarning, bezier curve:", ob.name, "only poly and nurbs curves supported")
|
||||
continue
|
||||
|
||||
if nu.point_count_v > 1:
|
||||
print("\tWarning, surface:", ob.name, "only poly and nurbs curves supported")
|
||||
continue
|
||||
|
||||
if len(nu.points) <= DEG_ORDER_U:
|
||||
print("\tWarning, order_u is lower then vert count, skipping:", ob.name)
|
||||
continue
|
||||
|
||||
pt_num = 0
|
||||
do_closed = nu.use_cyclic_u
|
||||
do_endpoints = (do_closed == 0) and nu.use_endpoint_u
|
||||
|
||||
for pt in nu.points:
|
||||
pt = ob_mat * pt.co.copy().resize3D()
|
||||
file.write('v %.6f %.6f %.6f\n' % (pt[0], pt[1], pt[2]))
|
||||
pt_num += 1
|
||||
tot_verts += pt_num
|
||||
|
||||
file.write('g %s\n' % (fixName(ob.name))) # fixName(ob.getData(1)) could use the data name too
|
||||
file.write('cstype bspline\n') # not ideal, hard coded
|
||||
file.write('deg %d\n' % DEG_ORDER_U) # not used for curves but most files have it still
|
||||
|
||||
curve_ls = [-(i+1) for i in range(pt_num)]
|
||||
|
||||
# 'curv' keyword
|
||||
if do_closed:
|
||||
if DEG_ORDER_U == 1:
|
||||
pt_num += 1
|
||||
curve_ls.append(-1)
|
||||
else:
|
||||
pt_num += DEG_ORDER_U
|
||||
curve_ls = curve_ls + curve_ls[0:DEG_ORDER_U]
|
||||
|
||||
file.write('curv 0.0 1.0 %s\n' % (' '.join([str(i) for i in curve_ls]))) # Blender has no U and V values for the curve
|
||||
|
||||
# 'parm' keyword
|
||||
tot_parm = (DEG_ORDER_U + 1) + pt_num
|
||||
tot_parm_div = float(tot_parm-1)
|
||||
parm_ls = [(i/tot_parm_div) for i in range(tot_parm)]
|
||||
|
||||
if do_endpoints: # end points, force param
|
||||
for i in range(DEG_ORDER_U+1):
|
||||
parm_ls[i] = 0.0
|
||||
parm_ls[-(1+i)] = 1.0
|
||||
|
||||
file.write('parm u %s\n' % ' '.join( [str(i) for i in parm_ls] ))
|
||||
|
||||
file.write('end\n')
|
||||
|
||||
return tot_verts
|
||||
|
||||
def write_file(filepath, objects, scene,
|
||||
EXPORT_TRI=False,
|
||||
EXPORT_EDGES=False,
|
||||
EXPORT_NORMALS=False,
|
||||
EXPORT_NORMALS_HQ=False,
|
||||
EXPORT_UV=True,
|
||||
EXPORT_MTL=True,
|
||||
EXPORT_COPY_IMAGES=False,
|
||||
EXPORT_APPLY_MODIFIERS=True,
|
||||
EXPORT_ROTX90=True,
|
||||
EXPORT_BLEN_OBS=True,
|
||||
EXPORT_GROUP_BY_OB=False,
|
||||
EXPORT_GROUP_BY_MAT=False,
|
||||
EXPORT_KEEP_VERT_ORDER=False,
|
||||
EXPORT_POLYGROUPS=False,
|
||||
EXPORT_CURVE_AS_NURBS=True):
|
||||
'''
|
||||
Basic write function. The context and options must be already set
|
||||
This can be accessed externaly
|
||||
eg.
|
||||
write( 'c:\\test\\foobar.obj', Blender.Object.GetSelected() ) # Using default options.
|
||||
'''
|
||||
|
||||
# XXX
|
||||
import math
|
||||
|
||||
def veckey3d(v):
|
||||
return round(v.x, 6), round(v.y, 6), round(v.z, 6)
|
||||
|
||||
def veckey2d(v):
|
||||
return round(v[0], 6), round(v[1], 6)
|
||||
|
||||
def findVertexGroupName(face, vWeightMap):
|
||||
"""
|
||||
Searches the vertexDict to see what groups is assigned to a given face.
|
||||
We use a frequency system in order to sort out the name because a given vetex can
|
||||
belong to two or more groups at the same time. To find the right name for the face
|
||||
we list all the possible vertex group names with their frequency and then sort by
|
||||
frequency in descend order. The top element is the one shared by the highest number
|
||||
of vertices is the face's group
|
||||
"""
|
||||
weightDict = {}
|
||||
for vert_index in face.vertices:
|
||||
# for vert in face:
|
||||
vWeights = vWeightMap[vert_index]
|
||||
# vWeights = vWeightMap[vert]
|
||||
for vGroupName, weight in vWeights:
|
||||
weightDict[vGroupName] = weightDict.get(vGroupName, 0) + weight
|
||||
|
||||
if weightDict:
|
||||
alist = [(weight,vGroupName) for vGroupName, weight in weightDict.items()] # sort least to greatest amount of weight
|
||||
alist.sort()
|
||||
return(alist[-1][1]) # highest value last
|
||||
else:
|
||||
return '(null)'
|
||||
|
||||
print('OBJ Export path: %r' % filepath)
|
||||
temp_mesh_name = '~tmp-mesh'
|
||||
|
||||
time1 = time.clock()
|
||||
# time1 = sys.time()
|
||||
# scn = Scene.GetCurrent()
|
||||
|
||||
file = open(filepath, "w")
|
||||
|
||||
# Write Header
|
||||
file.write('# Blender v%s OBJ File: %r\n' % (bpy.app.version_string, os.path.basename(bpy.data.filepath)))
|
||||
file.write('# www.blender.org\n')
|
||||
|
||||
# Tell the obj file what material file to use.
|
||||
if EXPORT_MTL:
|
||||
mtlfilepath = os.path.splitext(filepath)[0] + ".mtl"
|
||||
file.write('mtllib %s\n' % repr(os.path.basename(mtlfilepath))[1:-1]) # filepath can contain non utf8 chars, use repr
|
||||
|
||||
if EXPORT_ROTX90:
|
||||
mat_xrot90= mathutils.Matrix.Rotation(-math.pi/2, 4, 'X')
|
||||
|
||||
# Initialize totals, these are updated each object
|
||||
totverts = totuvco = totno = 1
|
||||
|
||||
face_vert_index = 1
|
||||
|
||||
globalNormals = {}
|
||||
|
||||
# A Dict of Materials
|
||||
# (material.name, image.name):matname_imagename # matname_imagename has gaps removed.
|
||||
mtl_dict = {}
|
||||
|
||||
# Get all meshes
|
||||
for ob_main in objects:
|
||||
|
||||
# ignore dupli children
|
||||
if ob_main.parent and ob_main.parent.dupli_type != 'NONE':
|
||||
# XXX
|
||||
print(ob_main.name, 'is a dupli child - ignoring')
|
||||
continue
|
||||
|
||||
obs = []
|
||||
if ob_main.dupli_type != 'NONE':
|
||||
# XXX
|
||||
print('creating dupli_list on', ob_main.name)
|
||||
ob_main.create_dupli_list(scene)
|
||||
|
||||
obs = [(dob.object, dob.matrix) for dob in ob_main.dupli_list]
|
||||
|
||||
# XXX debug print
|
||||
print(ob_main.name, 'has', len(obs), 'dupli children')
|
||||
else:
|
||||
obs = [(ob_main, ob_main.matrix_world)]
|
||||
|
||||
for ob, ob_mat in obs:
|
||||
|
||||
# Nurbs curve support
|
||||
if EXPORT_CURVE_AS_NURBS and test_nurbs_compat(ob):
|
||||
if EXPORT_ROTX90:
|
||||
ob_mat = ob_mat * mat_xrot90
|
||||
totverts += write_nurb(file, ob, ob_mat)
|
||||
continue
|
||||
# END NURBS
|
||||
|
||||
if ob.type != 'MESH':
|
||||
continue
|
||||
|
||||
me = ob.create_mesh(scene, EXPORT_APPLY_MODIFIERS, 'PREVIEW')
|
||||
|
||||
if EXPORT_ROTX90:
|
||||
me.transform(mat_xrot90 * ob_mat)
|
||||
else:
|
||||
me.transform(ob_mat)
|
||||
|
||||
# # Will work for non meshes now! :)
|
||||
# me= BPyMesh.getMeshFromObject(ob, containerMesh, EXPORT_APPLY_MODIFIERS, EXPORT_POLYGROUPS, scn)
|
||||
# if not me:
|
||||
# continue
|
||||
|
||||
if EXPORT_UV:
|
||||
faceuv = len(me.uv_textures) > 0
|
||||
if faceuv:
|
||||
uv_layer = me.uv_textures.active.data[:]
|
||||
else:
|
||||
faceuv = False
|
||||
|
||||
me_verts = me.vertices[:]
|
||||
|
||||
# Make our own list so it can be sorted to reduce context switching
|
||||
face_index_pairs = [ (face, index) for index, face in enumerate(me.faces)]
|
||||
# faces = [ f for f in me.faces ]
|
||||
|
||||
if EXPORT_EDGES:
|
||||
edges = me.edges
|
||||
else:
|
||||
edges = []
|
||||
|
||||
if not (len(face_index_pairs)+len(edges)+len(me.vertices)): # Make sure there is somthing to write
|
||||
|
||||
# clean up
|
||||
bpy.data.meshes.remove(me)
|
||||
|
||||
continue # dont bother with this mesh.
|
||||
|
||||
# XXX
|
||||
# High Quality Normals
|
||||
if EXPORT_NORMALS and face_index_pairs:
|
||||
me.calc_normals()
|
||||
# if EXPORT_NORMALS_HQ:
|
||||
# BPyMesh.meshCalcNormals(me)
|
||||
# else:
|
||||
# # transforming normals is incorrect
|
||||
# # when the matrix is scaled,
|
||||
# # better to recalculate them
|
||||
# me.calcNormals()
|
||||
|
||||
materials = me.materials
|
||||
|
||||
materialNames = []
|
||||
materialItems = [m for m in materials]
|
||||
if materials:
|
||||
for mat in materials:
|
||||
if mat:
|
||||
materialNames.append(mat.name)
|
||||
else:
|
||||
materialNames.append(None)
|
||||
# Cant use LC because some materials are None.
|
||||
# materialNames = map(lambda mat: mat.name, materials) # Bug Blender, dosent account for null materials, still broken.
|
||||
|
||||
# Possible there null materials, will mess up indicies
|
||||
# but at least it will export, wait until Blender gets fixed.
|
||||
materialNames.extend((16-len(materialNames)) * [None])
|
||||
materialItems.extend((16-len(materialItems)) * [None])
|
||||
|
||||
# Sort by Material, then images
|
||||
# so we dont over context switch in the obj file.
|
||||
if EXPORT_KEEP_VERT_ORDER:
|
||||
pass
|
||||
elif faceuv:
|
||||
face_index_pairs.sort(key=lambda a: (a[0].material_index, hash(uv_layer[a[1]].image), a[0].use_smooth))
|
||||
elif len(materials) > 1:
|
||||
face_index_pairs.sort(key = lambda a: (a[0].material_index, a[0].use_smooth))
|
||||
else:
|
||||
# no materials
|
||||
face_index_pairs.sort(key = lambda a: a[0].use_smooth)
|
||||
# if EXPORT_KEEP_VERT_ORDER:
|
||||
# pass
|
||||
# elif faceuv:
|
||||
# try: faces.sort(key = lambda a: (a.mat, a.image, a.use_smooth))
|
||||
# except: faces.sort(lambda a,b: cmp((a.mat, a.image, a.use_smooth), (b.mat, b.image, b.use_smooth)))
|
||||
# elif len(materials) > 1:
|
||||
# try: faces.sort(key = lambda a: (a.mat, a.use_smooth))
|
||||
# except: faces.sort(lambda a,b: cmp((a.mat, a.use_smooth), (b.mat, b.use_smooth)))
|
||||
# else:
|
||||
# # no materials
|
||||
# try: faces.sort(key = lambda a: a.use_smooth)
|
||||
# except: faces.sort(lambda a,b: cmp(a.use_smooth, b.use_smooth))
|
||||
|
||||
# Set the default mat to no material and no image.
|
||||
contextMat = (0, 0) # Can never be this, so we will label a new material teh first chance we get.
|
||||
contextSmooth = None # Will either be true or false, set bad to force initialization switch.
|
||||
|
||||
if EXPORT_BLEN_OBS or EXPORT_GROUP_BY_OB:
|
||||
name1 = ob.name
|
||||
name2 = ob.data.name
|
||||
if name1 == name2:
|
||||
obnamestring = fixName(name1)
|
||||
else:
|
||||
obnamestring = '%s_%s' % (fixName(name1), fixName(name2))
|
||||
|
||||
if EXPORT_BLEN_OBS:
|
||||
file.write('o %s\n' % obnamestring) # Write Object name
|
||||
else: # if EXPORT_GROUP_BY_OB:
|
||||
file.write('g %s\n' % obnamestring)
|
||||
|
||||
|
||||
# Vert
|
||||
for v in me_verts:
|
||||
file.write('v %.6f %.6f %.6f\n' % v.co[:])
|
||||
|
||||
# UV
|
||||
if faceuv:
|
||||
uv_face_mapping = [[0,0,0,0] for i in range(len(face_index_pairs))] # a bit of a waste for tri's :/
|
||||
|
||||
uv_dict = {} # could use a set() here
|
||||
uv_layer = me.uv_textures.active.data
|
||||
for f, f_index in face_index_pairs:
|
||||
for uv_index, uv in enumerate(uv_layer[f_index].uv):
|
||||
uvkey = veckey2d(uv)
|
||||
try:
|
||||
uv_face_mapping[f_index][uv_index] = uv_dict[uvkey]
|
||||
except:
|
||||
uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] = len(uv_dict)
|
||||
file.write('vt %.6f %.6f\n' % uv[:])
|
||||
|
||||
uv_unique_count = len(uv_dict)
|
||||
# del uv, uvkey, uv_dict, f_index, uv_index
|
||||
# Only need uv_unique_count and uv_face_mapping
|
||||
|
||||
# NORMAL, Smooth/Non smoothed.
|
||||
if EXPORT_NORMALS:
|
||||
for f, f_index in face_index_pairs:
|
||||
if f.use_smooth:
|
||||
for v_idx in f.vertices:
|
||||
v = me_verts[v_idx]
|
||||
noKey = veckey3d(v.normal)
|
||||
if noKey not in globalNormals:
|
||||
globalNormals[noKey] = totno
|
||||
totno +=1
|
||||
file.write('vn %.6f %.6f %.6f\n' % noKey)
|
||||
else:
|
||||
# Hard, 1 normal from the face.
|
||||
noKey = veckey3d(f.normal)
|
||||
if noKey not in globalNormals:
|
||||
globalNormals[noKey] = totno
|
||||
totno +=1
|
||||
file.write('vn %.6f %.6f %.6f\n' % noKey)
|
||||
|
||||
if not faceuv:
|
||||
f_image = None
|
||||
|
||||
# XXX
|
||||
if EXPORT_POLYGROUPS:
|
||||
# Retrieve the list of vertex groups
|
||||
vertGroupNames = [g.name for g in ob.vertex_groups]
|
||||
|
||||
currentVGroup = ''
|
||||
# Create a dictionary keyed by face id and listing, for each vertex, the vertex groups it belongs to
|
||||
vgroupsMap = [[] for _i in range(len(me_verts))]
|
||||
for v_idx, v in enumerate(me.vertices):
|
||||
for g in v.groups:
|
||||
vgroupsMap[v_idx].append((vertGroupNames[g.group], g.weight))
|
||||
|
||||
for f, f_index in face_index_pairs:
|
||||
f_smooth= f.use_smooth
|
||||
f_mat = min(f.material_index, len(materialNames)-1)
|
||||
|
||||
if faceuv:
|
||||
tface = uv_layer[f_index]
|
||||
f_image = tface.image
|
||||
|
||||
# MAKE KEY
|
||||
if faceuv and f_image: # Object is always true.
|
||||
key = materialNames[f_mat], f_image.name
|
||||
else:
|
||||
key = materialNames[f_mat], None # No image, use None instead.
|
||||
|
||||
# Write the vertex group
|
||||
if EXPORT_POLYGROUPS:
|
||||
if ob.vertex_groups:
|
||||
# find what vertext group the face belongs to
|
||||
theVGroup = findVertexGroupName(f,vgroupsMap)
|
||||
if theVGroup != currentVGroup:
|
||||
currentVGroup = theVGroup
|
||||
file.write('g %s\n' % theVGroup)
|
||||
|
||||
# CHECK FOR CONTEXT SWITCH
|
||||
if key == contextMat:
|
||||
pass # Context already switched, dont do anything
|
||||
else:
|
||||
if key[0] is None and key[1] is None:
|
||||
# Write a null material, since we know the context has changed.
|
||||
if EXPORT_GROUP_BY_MAT:
|
||||
# can be mat_image or (null)
|
||||
file.write('g %s_%s\n' % (fixName(ob.name), fixName(ob.data.name)) ) # can be mat_image or (null)
|
||||
file.write('usemtl (null)\n') # mat, image
|
||||
|
||||
else:
|
||||
mat_data= mtl_dict.get(key)
|
||||
if not mat_data:
|
||||
# First add to global dict so we can export to mtl
|
||||
# Then write mtl
|
||||
|
||||
# Make a new names from the mat and image name,
|
||||
# converting any spaces to underscores with fixName.
|
||||
|
||||
# If none image dont bother adding it to the name
|
||||
if key[1] is None:
|
||||
mat_data = mtl_dict[key] = ('%s'%fixName(key[0])), materialItems[f_mat], f_image
|
||||
else:
|
||||
mat_data = mtl_dict[key] = ('%s_%s' % (fixName(key[0]), fixName(key[1]))), materialItems[f_mat], f_image
|
||||
|
||||
if EXPORT_GROUP_BY_MAT:
|
||||
file.write('g %s_%s_%s\n' % (fixName(ob.name), fixName(ob.data.name), mat_data[0]) ) # can be mat_image or (null)
|
||||
|
||||
file.write('usemtl %s\n' % mat_data[0]) # can be mat_image or (null)
|
||||
|
||||
contextMat = key
|
||||
if f_smooth != contextSmooth:
|
||||
if f_smooth: # on now off
|
||||
file.write('s 1\n')
|
||||
contextSmooth = f_smooth
|
||||
else: # was off now on
|
||||
file.write('s off\n')
|
||||
contextSmooth = f_smooth
|
||||
|
||||
f_v_orig = [(vi, me_verts[v_idx]) for vi, v_idx in enumerate(f.vertices)]
|
||||
|
||||
if not EXPORT_TRI or len(f_v_orig) == 3:
|
||||
f_v_iter = (f_v_orig, )
|
||||
else:
|
||||
f_v_iter = (f_v_orig[0], f_v_orig[1], f_v_orig[2]), (f_v_orig[0], f_v_orig[2], f_v_orig[3])
|
||||
|
||||
# support for triangulation
|
||||
for f_v in f_v_iter:
|
||||
file.write('f')
|
||||
|
||||
if faceuv:
|
||||
if EXPORT_NORMALS:
|
||||
if f_smooth: # Smoothed, use vertex normals
|
||||
for vi, v in f_v:
|
||||
file.write( ' %d/%d/%d' % \
|
||||
(v.index + totverts,
|
||||
totuvco + uv_face_mapping[f_index][vi],
|
||||
globalNormals[ veckey3d(v.normal) ]) ) # vert, uv, normal
|
||||
|
||||
else: # No smoothing, face normals
|
||||
no = globalNormals[ veckey3d(f.normal) ]
|
||||
for vi, v in f_v:
|
||||
file.write( ' %d/%d/%d' % \
|
||||
(v.index + totverts,
|
||||
totuvco + uv_face_mapping[f_index][vi],
|
||||
no) ) # vert, uv, normal
|
||||
else: # No Normals
|
||||
for vi, v in f_v:
|
||||
file.write( ' %d/%d' % (\
|
||||
v.index + totverts,\
|
||||
totuvco + uv_face_mapping[f_index][vi])) # vert, uv
|
||||
|
||||
face_vert_index += len(f_v)
|
||||
|
||||
else: # No UV's
|
||||
if EXPORT_NORMALS:
|
||||
if f_smooth: # Smoothed, use vertex normals
|
||||
for vi, v in f_v:
|
||||
file.write( ' %d//%d' %
|
||||
(v.index + totverts, globalNormals[ veckey3d(v.normal) ]) )
|
||||
else: # No smoothing, face normals
|
||||
no = globalNormals[ veckey3d(f.normal) ]
|
||||
for vi, v in f_v:
|
||||
file.write( ' %d//%d' % (v.index + totverts, no) )
|
||||
else: # No Normals
|
||||
for vi, v in f_v:
|
||||
file.write( ' %d' % (v.index + totverts) )
|
||||
|
||||
file.write('\n')
|
||||
|
||||
# Write edges.
|
||||
if EXPORT_EDGES:
|
||||
for ed in edges:
|
||||
if ed.is_loose:
|
||||
file.write('f %d %d\n' % (ed.vertices[0] + totverts, ed.vertices[1] + totverts))
|
||||
|
||||
# Make the indicies global rather then per mesh
|
||||
totverts += len(me_verts)
|
||||
if faceuv:
|
||||
totuvco += uv_unique_count
|
||||
|
||||
# clean up
|
||||
bpy.data.meshes.remove(me)
|
||||
|
||||
if ob_main.dupli_type != 'NONE':
|
||||
ob_main.free_dupli_list()
|
||||
|
||||
file.close()
|
||||
|
||||
|
||||
# Now we have all our materials, save them
|
||||
if EXPORT_MTL:
|
||||
write_mtl(scene, mtlfilepath, EXPORT_COPY_IMAGES, mtl_dict)
|
||||
# if EXPORT_COPY_IMAGES:
|
||||
# dest_dir = os.path.basename(filepath)
|
||||
# # dest_dir = filepath
|
||||
# # # Remove chars until we are just the path.
|
||||
# # while dest_dir and dest_dir[-1] not in '\\/':
|
||||
# # dest_dir = dest_dir[:-1]
|
||||
# if dest_dir:
|
||||
# copy_images(dest_dir, mtl_dict)
|
||||
# else:
|
||||
# print('\tError: "%s" could not be used as a base for an image path.' % filepath)
|
||||
|
||||
print("OBJ Export time: %.2f" % (time.clock() - time1))
|
||||
|
||||
#
|
||||
def _write(context, filepath,
|
||||
EXPORT_TRI, # ok
|
||||
EXPORT_EDGES,
|
||||
EXPORT_NORMALS, # not yet
|
||||
EXPORT_NORMALS_HQ, # not yet
|
||||
EXPORT_UV, # ok
|
||||
EXPORT_MTL,
|
||||
EXPORT_COPY_IMAGES,
|
||||
EXPORT_APPLY_MODIFIERS, # ok
|
||||
EXPORT_ROTX90, # wrong
|
||||
EXPORT_BLEN_OBS,
|
||||
EXPORT_GROUP_BY_OB,
|
||||
EXPORT_GROUP_BY_MAT,
|
||||
EXPORT_KEEP_VERT_ORDER,
|
||||
EXPORT_POLYGROUPS,
|
||||
EXPORT_CURVE_AS_NURBS,
|
||||
EXPORT_SEL_ONLY, # ok
|
||||
EXPORT_ALL_SCENES, # XXX not working atm
|
||||
EXPORT_ANIMATION): # Not used
|
||||
|
||||
base_name, ext = os.path.splitext(filepath)
|
||||
context_name = [base_name, '', '', ext] # Base name, scene name, frame number, extension
|
||||
|
||||
orig_scene = context.scene
|
||||
|
||||
# Exit edit mode before exporting, so current object states are exported properly.
|
||||
if bpy.ops.object.mode_set.poll():
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
|
||||
# if EXPORT_ALL_SCENES:
|
||||
# export_scenes = bpy.data.scenes
|
||||
# else:
|
||||
# export_scenes = [orig_scene]
|
||||
|
||||
# XXX only exporting one scene atm since changing
|
||||
# current scene is not possible.
|
||||
# Brecht says that ideally in 2.5 we won't need such a function,
|
||||
# allowing multiple scenes open at once.
|
||||
export_scenes = [orig_scene]
|
||||
|
||||
# Export all scenes.
|
||||
for scene in export_scenes:
|
||||
# scene.makeCurrent() # If already current, this is not slow.
|
||||
# context = scene.getRenderingContext()
|
||||
orig_frame = scene.frame_current
|
||||
|
||||
if EXPORT_ALL_SCENES: # Add scene name into the context_name
|
||||
context_name[1] = '_%s' % bpy.path.clean_name(scene.name) # WARNING, its possible that this could cause a collision. we could fix if were feeling parranoied.
|
||||
|
||||
# Export an animation?
|
||||
if EXPORT_ANIMATION:
|
||||
scene_frames = range(scene.frame_start, scene.frame_end + 1) # Up to and including the end frame.
|
||||
else:
|
||||
scene_frames = [orig_frame] # Dont export an animation.
|
||||
|
||||
# Loop through all frames in the scene and export.
|
||||
for frame in scene_frames:
|
||||
if EXPORT_ANIMATION: # Add frame to the filepath.
|
||||
context_name[2] = '_%.6d' % frame
|
||||
|
||||
scene.frame_set(frame, 0.0)
|
||||
if EXPORT_SEL_ONLY:
|
||||
objects = context.selected_objects
|
||||
else:
|
||||
objects = scene.objects
|
||||
|
||||
full_path = ''.join(context_name)
|
||||
|
||||
# erm... bit of a problem here, this can overwrite files when exporting frames. not too bad.
|
||||
# EXPORT THE FILE.
|
||||
write_file(full_path, objects, scene,
|
||||
EXPORT_TRI,
|
||||
EXPORT_EDGES,
|
||||
EXPORT_NORMALS,
|
||||
EXPORT_NORMALS_HQ,
|
||||
EXPORT_UV,
|
||||
EXPORT_MTL,
|
||||
EXPORT_COPY_IMAGES,
|
||||
EXPORT_APPLY_MODIFIERS,
|
||||
EXPORT_ROTX90,
|
||||
EXPORT_BLEN_OBS,
|
||||
EXPORT_GROUP_BY_OB,
|
||||
EXPORT_GROUP_BY_MAT,
|
||||
EXPORT_KEEP_VERT_ORDER,
|
||||
EXPORT_POLYGROUPS,
|
||||
EXPORT_CURVE_AS_NURBS)
|
||||
|
||||
scene.frame_set(orig_frame, 0.0)
|
||||
|
||||
# Restore old active scene.
|
||||
# orig_scene.makeCurrent()
|
||||
# Window.WaitCursor(0)
|
||||
|
||||
|
||||
'''
|
||||
Currently the exporter lacks these features:
|
||||
* multiple scene export (only active scene is written)
|
||||
* particles
|
||||
'''
|
||||
|
||||
|
||||
def save(operator, context, filepath="",
|
||||
use_triangles=False,
|
||||
use_edges=True,
|
||||
use_normals=False,
|
||||
use_hq_normals=False,
|
||||
use_uvs=True,
|
||||
use_materials=True,
|
||||
copy_images=False,
|
||||
use_modifiers=True,
|
||||
use_rotate_x90=True,
|
||||
use_blen_objects=True,
|
||||
group_by_object=False,
|
||||
group_by_material=False,
|
||||
keep_vertex_order=False,
|
||||
use_vertex_groups=False,
|
||||
use_nurbs=True,
|
||||
use_selection=True,
|
||||
use_all_scenes=False,
|
||||
use_animation=False,
|
||||
):
|
||||
|
||||
_write(context, filepath,
|
||||
EXPORT_TRI=use_triangles,
|
||||
EXPORT_EDGES=use_edges,
|
||||
EXPORT_NORMALS=use_normals,
|
||||
EXPORT_NORMALS_HQ=use_hq_normals,
|
||||
EXPORT_UV=use_uvs,
|
||||
EXPORT_MTL=use_materials,
|
||||
EXPORT_COPY_IMAGES=copy_images,
|
||||
EXPORT_APPLY_MODIFIERS=use_modifiers,
|
||||
EXPORT_ROTX90=use_rotate_x90,
|
||||
EXPORT_BLEN_OBS=use_blen_objects,
|
||||
EXPORT_GROUP_BY_OB=group_by_object,
|
||||
EXPORT_GROUP_BY_MAT=group_by_material,
|
||||
EXPORT_KEEP_VERT_ORDER=keep_vertex_order,
|
||||
EXPORT_POLYGROUPS=use_vertex_groups,
|
||||
EXPORT_CURVE_AS_NURBS=use_nurbs,
|
||||
EXPORT_SEL_ONLY=use_selection,
|
||||
EXPORT_ALL_SCENES=use_all_scenes,
|
||||
EXPORT_ANIMATION=use_animation,
|
||||
)
|
||||
|
||||
return {'FINISHED'}
|
File diff suppressed because it is too large
Load Diff
@ -1,84 +0,0 @@
|
||||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
# <pep8 compliant>
|
||||
|
||||
# To support reload properly, try to access a package var, if it's there, reload everything
|
||||
if "bpy" in locals():
|
||||
import imp
|
||||
if "export_x3d" in locals():
|
||||
imp.reload(export_x3d)
|
||||
|
||||
|
||||
import bpy
|
||||
from bpy.props import *
|
||||
from io_utils import ImportHelper, ExportHelper
|
||||
|
||||
|
||||
class ImportX3D(bpy.types.Operator, ImportHelper):
|
||||
'''Load a BVH motion capture file'''
|
||||
bl_idname = "import_scene.x3d"
|
||||
bl_label = "Import X3D/VRML"
|
||||
|
||||
filename_ext = ".x3d"
|
||||
filter_glob = StringProperty(default="*.x3d;*.wrl", options={'HIDDEN'})
|
||||
|
||||
def execute(self, context):
|
||||
from . import import_x3d
|
||||
return import_x3d.load(self, context, **self.as_keywords(ignore=("filter_glob",)))
|
||||
|
||||
|
||||
class ExportX3D(bpy.types.Operator, ExportHelper):
|
||||
'''Export selection to Extensible 3D file (.x3d)'''
|
||||
bl_idname = "export_scene.x3d"
|
||||
bl_label = 'Export X3D'
|
||||
|
||||
filename_ext = ".x3d"
|
||||
filter_glob = StringProperty(default="*.x3d", options={'HIDDEN'})
|
||||
|
||||
use_apply_modifiers = BoolProperty(name="Apply Modifiers", description="Use transformed mesh data from each object", default=True)
|
||||
use_triangulate = BoolProperty(name="Triangulate", description="Triangulate quads.", default=False)
|
||||
use_compress = BoolProperty(name="Compress", description="GZip the resulting file, requires a full python install", default=False)
|
||||
|
||||
def execute(self, context):
|
||||
from . import export_x3d
|
||||
return export_x3d.save(self, context, **self.as_keywords(ignore=("check_existing", "filter_glob")))
|
||||
|
||||
|
||||
def menu_func_import(self, context):
|
||||
self.layout.operator(ImportX3D.bl_idname, text="X3D Extensible 3D (.x3d/.wrl)")
|
||||
|
||||
|
||||
def menu_func_export(self, context):
|
||||
self.layout.operator(ExportX3D.bl_idname, text="X3D Extensible 3D (.x3d)")
|
||||
|
||||
|
||||
def register():
|
||||
bpy.types.INFO_MT_file_import.append(menu_func_import)
|
||||
bpy.types.INFO_MT_file_export.append(menu_func_export)
|
||||
|
||||
|
||||
def unregister():
|
||||
bpy.types.INFO_MT_file_import.remove(menu_func_import)
|
||||
bpy.types.INFO_MT_file_export.remove(menu_func_export)
|
||||
|
||||
# NOTES
|
||||
# - blender version is hardcoded
|
||||
|
||||
if __name__ == "__main__":
|
||||
register()
|
@ -1,847 +0,0 @@
|
||||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
# <pep8 compliant>
|
||||
|
||||
# Contributors: bart:neeneenee*de, http://www.neeneenee.de/vrml, Campbell Barton
|
||||
|
||||
"""
|
||||
This script exports to X3D format.
|
||||
|
||||
Usage:
|
||||
Run this script from "File->Export" menu. A pop-up will ask whether you
|
||||
want to export only selected or all relevant objects.
|
||||
|
||||
Known issues:
|
||||
Doesn't handle multiple materials (don't use material indices);<br>
|
||||
Doesn't handle multiple UV textures on a single mesh (create a mesh for each texture);<br>
|
||||
Can't get the texture array associated with material * not the UV ones;
|
||||
"""
|
||||
|
||||
import math
|
||||
import os
|
||||
|
||||
import bpy
|
||||
import mathutils
|
||||
|
||||
from io_utils import create_derived_objects, free_derived_objects
|
||||
|
||||
|
||||
def round_color(col, cp):
|
||||
return tuple([round(max(min(c, 1.0), 0.0), cp) for c in col])
|
||||
|
||||
|
||||
def matrix_direction(mtx):
|
||||
return (mathutils.Vector((0.0, 0.0, -1.0)) * mtx.rotation_part()).normalize()[:]
|
||||
|
||||
|
||||
##########################################################
|
||||
# Functions for writing output file
|
||||
##########################################################
|
||||
|
||||
|
||||
class x3d_class:
|
||||
|
||||
def __init__(self, filepath):
|
||||
#--- public you can change these ---
|
||||
self.proto = 1
|
||||
self.billnode = 0
|
||||
self.halonode = 0
|
||||
self.collnode = 0
|
||||
self.verbose = 2 # level of verbosity in console 0-none, 1-some, 2-most
|
||||
self.cp = 3 # decimals for material color values 0.000 - 1.000
|
||||
self.vp = 3 # decimals for vertex coordinate values 0.000 - n.000
|
||||
self.tp = 3 # decimals for texture coordinate values 0.000 - 1.000
|
||||
self.it = 3
|
||||
|
||||
self.global_matrix = mathutils.Matrix.Rotation(-(math.pi / 2.0), 4, 'X')
|
||||
|
||||
#--- class private don't touch ---
|
||||
self.indentLevel = 0 # keeps track of current indenting
|
||||
self.filepath = filepath
|
||||
self.file = None
|
||||
if filepath.lower().endswith('.x3dz'):
|
||||
try:
|
||||
import gzip
|
||||
self.file = gzip.open(filepath, "w")
|
||||
except:
|
||||
print("failed to import compression modules, exporting uncompressed")
|
||||
self.filepath = filepath[:-1] # remove trailing z
|
||||
|
||||
if self.file is None:
|
||||
self.file = open(self.filepath, "w", encoding='utf8')
|
||||
|
||||
self.bNav = 0
|
||||
self.nodeID = 0
|
||||
self.namesReserved = ("Anchor", "Appearance", "Arc2D", "ArcClose2D", "AudioClip", "Background", "Billboard",
|
||||
"BooleanFilter", "BooleanSequencer", "BooleanToggle", "BooleanTrigger", "Box", "Circle2D",
|
||||
"Collision", "Color", "ColorInterpolator", "ColorRGBA", "component", "Cone", "connect",
|
||||
"Contour2D", "ContourPolyline2D", "Coordinate", "CoordinateDouble", "CoordinateInterpolator",
|
||||
"CoordinateInterpolator2D", "Cylinder", "CylinderSensor", "DirectionalLight", "Disk2D",
|
||||
"ElevationGrid", "EspduTransform", "EXPORT", "ExternProtoDeclare", "Extrusion", "field",
|
||||
"fieldValue", "FillProperties", "Fog", "FontStyle", "GeoCoordinate", "GeoElevationGrid",
|
||||
"GeoLocationLocation", "GeoLOD", "GeoMetadata", "GeoOrigin", "GeoPositionInterpolator",
|
||||
"GeoTouchSensor", "GeoViewpoint", "Group", "HAnimDisplacer", "HAnimHumanoid", "HAnimJoint",
|
||||
"HAnimSegment", "HAnimSite", "head", "ImageTexture", "IMPORT", "IndexedFaceSet",
|
||||
"IndexedLineSet", "IndexedTriangleFanSet", "IndexedTriangleSet", "IndexedTriangleStripSet",
|
||||
"Inline", "IntegerSequencer", "IntegerTrigger", "IS", "KeySensor", "LineProperties", "LineSet",
|
||||
"LoadSensor", "LOD", "Material", "meta", "MetadataDouble", "MetadataFloat", "MetadataInteger",
|
||||
"MetadataSet", "MetadataString", "MovieTexture", "MultiTexture", "MultiTextureCoordinate",
|
||||
"MultiTextureTransform", "NavigationInfo", "Normal", "NormalInterpolator", "NurbsCurve",
|
||||
"NurbsCurve2D", "NurbsOrientationInterpolator", "NurbsPatchSurface",
|
||||
"NurbsPositionInterpolator", "NurbsSet", "NurbsSurfaceInterpolator", "NurbsSweptSurface",
|
||||
"NurbsSwungSurface", "NurbsTextureCoordinate", "NurbsTrimmedSurface", "OrientationInterpolator",
|
||||
"PixelTexture", "PlaneSensor", "PointLight", "PointSet", "Polyline2D", "Polypoint2D",
|
||||
"PositionInterpolator", "PositionInterpolator2D", "ProtoBody", "ProtoDeclare", "ProtoInstance",
|
||||
"ProtoInterface", "ProximitySensor", "ReceiverPdu", "Rectangle2D", "ROUTE", "ScalarInterpolator",
|
||||
"Scene", "Script", "Shape", "SignalPdu", "Sound", "Sphere", "SphereSensor", "SpotLight", "StaticGroup",
|
||||
"StringSensor", "Switch", "Text", "TextureBackground", "TextureCoordinate", "TextureCoordinateGenerator",
|
||||
"TextureTransform", "TimeSensor", "TimeTrigger", "TouchSensor", "Transform", "TransmitterPdu",
|
||||
"TriangleFanSet", "TriangleSet", "TriangleSet2D", "TriangleStripSet", "Viewpoint", "VisibilitySensor",
|
||||
"WorldInfo", "X3D", "XvlShell", "VertexShader", "FragmentShader", "MultiShaderAppearance", "ShaderAppearance")
|
||||
|
||||
self.namesFog = ("", "LINEAR", "EXPONENTIAL", "")
|
||||
|
||||
##########################################################
|
||||
# Writing nodes routines
|
||||
##########################################################
|
||||
|
||||
def writeHeader(self):
|
||||
#bfile = sys.expandpath( Blender.Get('filepath') ).replace('<', '<').replace('>', '>')
|
||||
bfile = repr(os.path.basename(self.filepath).replace('<', '<').replace('>', '>'))[1:-1] # use outfile name
|
||||
self.file.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
|
||||
self.file.write("<!DOCTYPE X3D PUBLIC \"ISO//Web3D//DTD X3D 3.0//EN\" \"http://www.web3d.org/specifications/x3d-3.0.dtd\">\n")
|
||||
self.file.write("<X3D version=\"3.0\" profile=\"Immersive\" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema-instance\" xsd:noNamespaceSchemaLocation=\"http://www.web3d.org/specifications/x3d-3.0.xsd\">\n")
|
||||
self.file.write("<head>\n")
|
||||
self.file.write("\t<meta name=\"filename\" content=\"%s\" />\n" % bfile)
|
||||
# self.file.write("\t<meta name=\"filename\" content=\"%s\" />\n" % sys.basename(bfile))
|
||||
self.file.write("\t<meta name=\"generator\" content=\"Blender %s\" />\n" % bpy.app.version_string)
|
||||
# self.file.write("\t<meta name=\"generator\" content=\"Blender %s\" />\n" % Blender.Get('version'))
|
||||
self.file.write("\t<meta name=\"translator\" content=\"X3D exporter v1.55 (2006/01/17)\" />\n")
|
||||
self.file.write("</head>\n")
|
||||
self.file.write("<Scene>\n")
|
||||
|
||||
# This functionality is poorly defined, disabling for now - campbell
|
||||
'''
|
||||
def writeScript(self):
|
||||
textEditor = Blender.Text.Get()
|
||||
alltext = len(textEditor)
|
||||
for i in xrange(alltext):
|
||||
nametext = textEditor[i].name
|
||||
nlines = textEditor[i].getNLines()
|
||||
if (self.proto == 1):
|
||||
if (nametext == "proto" or nametext == "proto.js" or nametext == "proto.txt") and (nlines != None):
|
||||
nalllines = len(textEditor[i].asLines())
|
||||
alllines = textEditor[i].asLines()
|
||||
for j in xrange(nalllines):
|
||||
self.write_indented(alllines[j] + "\n")
|
||||
elif (self.proto == 0):
|
||||
if (nametext == "route" or nametext == "route.js" or nametext == "route.txt") and (nlines != None):
|
||||
nalllines = len(textEditor[i].asLines())
|
||||
alllines = textEditor[i].asLines()
|
||||
for j in xrange(nalllines):
|
||||
self.write_indented(alllines[j] + "\n")
|
||||
self.write_indented("\n")
|
||||
'''
|
||||
|
||||
def writeViewpoint(self, ob, mat, scene):
|
||||
loc, quat, scale = mat.decompose()
|
||||
self.file.write("<Viewpoint DEF=\"%s\" " % (self.cleanStr(ob.name)))
|
||||
self.file.write("description=\"%s\" " % (ob.name))
|
||||
self.file.write("centerOfRotation=\"0 0 0\" ")
|
||||
self.file.write("position=\"%3.2f %3.2f %3.2f\" " % loc[:])
|
||||
self.file.write("orientation=\"%3.2f %3.2f %3.2f %3.2f\" " % (quat.axis[:] + (quat.angle, )))
|
||||
self.file.write("fieldOfView=\"%.3f\" " % ob.data.angle)
|
||||
self.file.write(" />\n\n")
|
||||
|
||||
def writeFog(self, world):
|
||||
if world:
|
||||
mtype = world.mist_settings.falloff
|
||||
mparam = world.mist_settings
|
||||
else:
|
||||
return
|
||||
if (mtype == 'LINEAR' or mtype == 'INVERSE_QUADRATIC'):
|
||||
mtype = 1 if mtype == 'LINEAR' else 2
|
||||
# if (mtype == 1 or mtype == 2):
|
||||
self.file.write("<Fog fogType=\"%s\" " % self.namesFog[mtype])
|
||||
self.file.write("color=\"%s %s %s\" " % round_color(world.horizon_color, self.cp))
|
||||
self.file.write("visibilityRange=\"%s\" />\n\n" % round(mparam[2], self.cp))
|
||||
else:
|
||||
return
|
||||
|
||||
def writeNavigationInfo(self, scene):
|
||||
self.file.write('<NavigationInfo headlight="false" visibilityLimit="0.0" type=\'"EXAMINE","ANY"\' avatarSize="0.25, 1.75, 0.75" />\n')
|
||||
|
||||
def writeSpotLight(self, ob, mtx, lamp, world):
|
||||
safeName = self.cleanStr(ob.name)
|
||||
if world:
|
||||
ambi = world.ambient_color
|
||||
ambientIntensity = ((ambi[0] + ambi[1] + ambi[2]) / 3.0) / 2.5
|
||||
del ambi
|
||||
else:
|
||||
ambientIntensity = 0.0
|
||||
|
||||
# compute cutoff and beamwidth
|
||||
intensity = min(lamp.energy / 1.75, 1.0)
|
||||
beamWidth = lamp.spot_size * 0.37
|
||||
# beamWidth=((lamp.spotSize*math.pi)/180.0)*.37
|
||||
cutOffAngle = beamWidth * 1.3
|
||||
|
||||
dx, dy, dz = matrix_direction(mtx)
|
||||
|
||||
location = mtx.translation_part()
|
||||
|
||||
radius = lamp.distance * math.cos(beamWidth)
|
||||
# radius = lamp.dist*math.cos(beamWidth)
|
||||
self.file.write("<SpotLight DEF=\"%s\" " % safeName)
|
||||
self.file.write("radius=\"%s\" " % (round(radius, self.cp)))
|
||||
self.file.write("ambientIntensity=\"%s\" " % (round(ambientIntensity, self.cp)))
|
||||
self.file.write("intensity=\"%s\" " % (round(intensity, self.cp)))
|
||||
self.file.write("color=\"%s %s %s\" " % round_color(lamp.color, self.cp))
|
||||
self.file.write("beamWidth=\"%s\" " % (round(beamWidth, self.cp)))
|
||||
self.file.write("cutOffAngle=\"%s\" " % (round(cutOffAngle, self.cp)))
|
||||
self.file.write("direction=\"%s %s %s\" " % (round(dx, 3), round(dy, 3), round(dz, 3)))
|
||||
self.file.write("location=\"%s %s %s\" />\n\n" % (round(location[0], 3), round(location[1], 3), round(location[2], 3)))
|
||||
|
||||
def writeDirectionalLight(self, ob, mtx, lamp, world):
|
||||
safeName = self.cleanStr(ob.name)
|
||||
if world:
|
||||
ambi = world.ambient_color
|
||||
# ambi = world.amb
|
||||
ambientIntensity = ((float(ambi[0] + ambi[1] + ambi[2])) / 3.0) / 2.5
|
||||
else:
|
||||
ambi = 0
|
||||
ambientIntensity = 0
|
||||
|
||||
intensity = min(lamp.energy / 1.75, 1.0)
|
||||
dx, dy, dz = matrix_direction(mtx)
|
||||
self.file.write("<DirectionalLight DEF=\"%s\" " % safeName)
|
||||
self.file.write("ambientIntensity=\"%s\" " % (round(ambientIntensity, self.cp)))
|
||||
self.file.write("color=\"%s %s %s\" " % (round(lamp.color[0], self.cp), round(lamp.color[1], self.cp), round(lamp.color[2], self.cp)))
|
||||
self.file.write("intensity=\"%s\" " % (round(intensity, self.cp)))
|
||||
self.file.write("direction=\"%s %s %s\" />\n\n" % (round(dx, 4), round(dy, 4), round(dz, 4)))
|
||||
|
||||
def writePointLight(self, ob, mtx, lamp, world):
|
||||
safeName = self.cleanStr(ob.name)
|
||||
if world:
|
||||
ambi = world.ambient_color
|
||||
# ambi = world.amb
|
||||
ambientIntensity = ((float(ambi[0] + ambi[1] + ambi[2])) / 3) / 2.5
|
||||
else:
|
||||
ambi = 0
|
||||
ambientIntensity = 0
|
||||
|
||||
location = mtx.translation_part()
|
||||
|
||||
self.file.write("<PointLight DEF=\"%s\" " % safeName)
|
||||
self.file.write("ambientIntensity=\"%s\" " % (round(ambientIntensity, self.cp)))
|
||||
self.file.write("color=\"%s %s %s\" " % (round(lamp.color[0], self.cp), round(lamp.color[1], self.cp), round(lamp.color[2], self.cp)))
|
||||
|
||||
self.file.write("intensity=\"%s\" " % (round(min(lamp.energy / 1.75, 1.0), self.cp)))
|
||||
self.file.write("radius=\"%s\" " % lamp.distance)
|
||||
self.file.write("location=\"%s %s %s\" />\n\n" % (round(location[0], 3), round(location[1], 3), round(location[2], 3)))
|
||||
|
||||
def secureName(self, name):
|
||||
name = name + str(self.nodeID)
|
||||
self.nodeID = self.nodeID + 1
|
||||
if len(name) <= 3:
|
||||
newname = "_" + str(self.nodeID)
|
||||
return "%s" % (newname)
|
||||
else:
|
||||
for bad in ('"', '#', "'", ', ', '.', '[', '\\', ']', '{', '}'):
|
||||
name = name.replace(bad, "_")
|
||||
if name in self.namesReserved:
|
||||
newname = name[0:3] + "_" + str(self.nodeID)
|
||||
return "%s" % (newname)
|
||||
elif name[0].isdigit():
|
||||
newname = "_" + name + str(self.nodeID)
|
||||
return "%s" % (newname)
|
||||
else:
|
||||
newname = name
|
||||
return "%s" % (newname)
|
||||
|
||||
def writeIndexedFaceSet(self, ob, mesh, mtx, world, EXPORT_TRI=False):
|
||||
fw = self.file.write
|
||||
mesh_name_x3d = self.cleanStr(ob.name)
|
||||
|
||||
if not mesh.faces:
|
||||
return
|
||||
|
||||
mode = []
|
||||
# mode = 0
|
||||
if mesh.uv_textures.active:
|
||||
# if mesh.faceUV:
|
||||
for face in mesh.uv_textures.active.data:
|
||||
# for face in mesh.faces:
|
||||
if face.use_halo and 'HALO' not in mode:
|
||||
mode += ['HALO']
|
||||
if face.use_billboard and 'BILLBOARD' not in mode:
|
||||
mode += ['BILLBOARD']
|
||||
if face.use_object_color and 'OBJECT_COLOR' not in mode:
|
||||
mode += ['OBJECT_COLOR']
|
||||
if face.use_collision and 'COLLISION' not in mode:
|
||||
mode += ['COLLISION']
|
||||
# mode |= face.mode
|
||||
|
||||
if 'HALO' in mode and self.halonode == 0:
|
||||
# if mode & Mesh.FaceModes.HALO and self.halonode == 0:
|
||||
self.write_indented("<Billboard axisOfRotation=\"0 0 0\">\n", 1)
|
||||
self.halonode = 1
|
||||
elif 'BILLBOARD' in mode and self.billnode == 0:
|
||||
# elif mode & Mesh.FaceModes.BILLBOARD and self.billnode == 0:
|
||||
self.write_indented("<Billboard axisOfRotation=\"0 1 0\">\n", 1)
|
||||
self.billnode = 1
|
||||
elif 'COLLISION' not in mode and self.collnode == 0:
|
||||
# elif not mode & Mesh.FaceModes.DYNAMIC and self.collnode == 0:
|
||||
self.write_indented("<Collision enabled=\"false\">\n", 1)
|
||||
self.collnode = 1
|
||||
|
||||
loc, quat, sca = mtx.decompose()
|
||||
|
||||
self.write_indented("<Transform DEF=\"%s\" " % mesh_name_x3d, 1)
|
||||
fw("translation=\"%.6f %.6f %.6f\" " % loc[:])
|
||||
fw("scale=\"%.6f %.6f %.6f\" " % sca[:])
|
||||
fw("rotation=\"%.6f %.6f %.6f %.6f\" " % (quat.axis[:] + (quat.angle, )))
|
||||
fw(">\n")
|
||||
|
||||
if mesh.tag:
|
||||
self.write_indented("<Group USE=\"G_%s\" />\n" % mesh_name_x3d, 1)
|
||||
else:
|
||||
mesh.tag = True
|
||||
|
||||
self.write_indented("<Group DEF=\"G_%s\">\n" % mesh_name_x3d, 1)
|
||||
|
||||
is_uv = bool(mesh.uv_textures.active)
|
||||
# is_col, defined for each material
|
||||
|
||||
is_coords_written = False
|
||||
|
||||
mesh_materials = mesh.materials[:]
|
||||
if not mesh_materials:
|
||||
mesh_materials = [None]
|
||||
|
||||
mesh_material_tex = [None] * len(mesh_materials)
|
||||
mesh_material_mtex = [None] * len(mesh_materials)
|
||||
mesh_material_images = [None] * len(mesh_materials)
|
||||
|
||||
for i, material in enumerate(mesh_materials):
|
||||
if material:
|
||||
for mtex in material.texture_slots:
|
||||
if mtex:
|
||||
tex = mtex.texture
|
||||
if tex and tex.type == 'IMAGE':
|
||||
image = tex.image
|
||||
if image:
|
||||
mesh_material_tex[i] = tex
|
||||
mesh_material_mtex[i] = mtex
|
||||
mesh_material_images[i] = image
|
||||
break
|
||||
|
||||
mesh_materials_use_face_texture = [getattr(material, "use_face_texture", True) for material in mesh_materials]
|
||||
|
||||
mesh_faces = mesh.faces[:]
|
||||
mesh_faces_materials = [f.material_index for f in mesh_faces]
|
||||
|
||||
if is_uv and True in mesh_materials_use_face_texture:
|
||||
mesh_faces_image = [(fuv.image if (mesh_materials_use_face_texture[mesh_faces_materials[i]] and fuv.use_image) else mesh_material_images[mesh_faces_materials[i]]) for i, fuv in enumerate(mesh.uv_textures.active.data)]
|
||||
mesh_faces_image_unique = set(mesh_faces_image)
|
||||
elif len(set(mesh_material_images) | {None}) > 1: # make sure there is at least one image
|
||||
mesh_faces_image = [mesh_material_images[material_index] for material_index in mesh_faces_materials]
|
||||
mesh_faces_image_unique = set(mesh_faces_image)
|
||||
else:
|
||||
mesh_faces_image = [None] * len(mesh_faces)
|
||||
mesh_faces_image_unique = {None}
|
||||
|
||||
# group faces
|
||||
face_groups = {}
|
||||
for material_index in range(len(mesh_materials)):
|
||||
for image in mesh_faces_image_unique:
|
||||
face_groups[material_index, image] = []
|
||||
del mesh_faces_image_unique
|
||||
|
||||
for i, (material_index, image) in enumerate(zip(mesh_faces_materials, mesh_faces_image)):
|
||||
face_groups[material_index, image].append(i)
|
||||
|
||||
for (material_index, image), face_group in face_groups.items():
|
||||
if face_group:
|
||||
material = mesh_materials[material_index]
|
||||
|
||||
self.write_indented("<Shape>\n", 1)
|
||||
is_smooth = False
|
||||
is_col = (mesh.vertex_colors.active and (material is None or material.use_vertex_color_paint))
|
||||
|
||||
# kludge but as good as it gets!
|
||||
for i in face_group:
|
||||
if mesh_faces[i].use_smooth:
|
||||
is_smooth = True
|
||||
break
|
||||
|
||||
if image:
|
||||
self.write_indented("<Appearance>\n", 1)
|
||||
self.writeImageTexture(image)
|
||||
|
||||
if mesh_materials_use_face_texture[material_index]:
|
||||
if image.use_tiles:
|
||||
self.write_indented("<TextureTransform scale=\"%s %s\" />\n" % (image.tiles_x, image.tiles_y))
|
||||
else:
|
||||
# transform by mtex
|
||||
loc = mesh_material_mtex[material_index].offset[:2]
|
||||
|
||||
# mtex_scale * tex_repeat
|
||||
sca_x, sca_y = mesh_material_mtex[material_index].scale[:2]
|
||||
|
||||
sca_x *= mesh_material_tex[material_index].repeat_x
|
||||
sca_y *= mesh_material_tex[material_index].repeat_y
|
||||
|
||||
# flip x/y is a sampling feature, convert to transform
|
||||
if mesh_material_tex[material_index].use_flip_axis:
|
||||
rot = math.pi / -2.0
|
||||
sca_x, sca_y = sca_y, -sca_x
|
||||
else:
|
||||
rot = 0.0
|
||||
|
||||
self.write_indented("<TextureTransform ", 1)
|
||||
# fw("center=\"%.6f %.6f\" " % (0.0, 0.0))
|
||||
fw("translation=\"%.6f %.6f\" " % loc)
|
||||
fw("scale=\"%.6f %.6f\" " % (sca_x, sca_y))
|
||||
fw("rotation=\"%.6f\" " % rot)
|
||||
fw("/>\n")
|
||||
|
||||
self.write_indented("</Appearance>\n", -1)
|
||||
|
||||
elif material:
|
||||
self.write_indented("<Appearance>\n", 1)
|
||||
self.writeMaterial(material, self.cleanStr(material.name, ""), world)
|
||||
self.write_indented("</Appearance>\n", -1)
|
||||
|
||||
#-- IndexedFaceSet or IndexedLineSet
|
||||
|
||||
self.write_indented("<IndexedFaceSet ", 1)
|
||||
|
||||
# --- Write IndexedFaceSet Attributes
|
||||
if mesh.show_double_sided:
|
||||
fw("solid=\"true\" ")
|
||||
else:
|
||||
fw("solid=\"false\" ")
|
||||
|
||||
if is_smooth:
|
||||
fw("creaseAngle=\"%.4f\" " % mesh.auto_smooth_angle)
|
||||
|
||||
if is_uv:
|
||||
# "texCoordIndex"
|
||||
fw("\n\t\t\ttexCoordIndex=\"")
|
||||
j = 0
|
||||
for i in face_group:
|
||||
if len(mesh_faces[i].vertices) == 4:
|
||||
fw("%d %d %d %d -1, " % (j, j + 1, j + 2, j + 3))
|
||||
j += 4
|
||||
else:
|
||||
fw("%d %d %d -1, " % (j, j + 1, j + 2))
|
||||
j += 3
|
||||
fw("\" ")
|
||||
# --- end texCoordIndex
|
||||
|
||||
if is_col:
|
||||
fw("colorPerVertex=\"false\" ")
|
||||
|
||||
if True:
|
||||
# "coordIndex"
|
||||
fw('coordIndex="')
|
||||
if EXPORT_TRI:
|
||||
for i in face_group:
|
||||
fv = mesh_faces[i].vertices[:]
|
||||
if len(fv) == 3:
|
||||
fw("%i %i %i -1, " % fv)
|
||||
else:
|
||||
fw("%i %i %i -1, " % (fv[0], fv[1], fv[2]))
|
||||
fw("%i %i %i -1, " % (fv[0], fv[2], fv[3]))
|
||||
else:
|
||||
for i in face_group:
|
||||
fv = mesh_faces[i].vertices[:]
|
||||
if len(fv) == 3:
|
||||
fw("%i %i %i -1, " % fv)
|
||||
else:
|
||||
fw("%i %i %i %i -1, " % fv)
|
||||
|
||||
fw("\" ")
|
||||
# --- end coordIndex
|
||||
|
||||
# close IndexedFaceSet
|
||||
fw(">\n")
|
||||
|
||||
# --- Write IndexedFaceSet Elements
|
||||
if True:
|
||||
if is_coords_written:
|
||||
self.write_indented("<Coordinate USE=\"%s%s\" />\n" % ("coord_", mesh_name_x3d))
|
||||
else:
|
||||
self.write_indented("<Coordinate DEF=\"%s%s\" \n" % ("coord_", mesh_name_x3d), 1)
|
||||
fw("\t\t\t\tpoint=\"")
|
||||
for v in mesh.vertices:
|
||||
fw("%.6f %.6f %.6f, " % v.co[:])
|
||||
fw("\" />")
|
||||
self.write_indented("\n", -1)
|
||||
is_coords_written = True
|
||||
|
||||
if is_uv:
|
||||
self.write_indented("<TextureCoordinate point=\"", 1)
|
||||
fw = fw
|
||||
mesh_faces_uv = mesh.uv_textures.active.data
|
||||
for i in face_group:
|
||||
for uv in mesh_faces_uv[i].uv:
|
||||
fw("%.4f %.4f, " % uv[:])
|
||||
del mesh_faces_uv
|
||||
fw("\" />")
|
||||
self.write_indented("\n", -1)
|
||||
|
||||
if is_col:
|
||||
self.write_indented("<Color color=\"", 1)
|
||||
# XXX, 1 color per face, only
|
||||
mesh_faces_col = mesh.vertex_colors.active.data
|
||||
for i in face_group:
|
||||
fw("%.3f %.3f %.3f, " % mesh_faces_col[i].color1[:])
|
||||
del mesh_faces_col
|
||||
fw("\" />")
|
||||
self.write_indented("\n", -1)
|
||||
|
||||
#--- output vertexColors
|
||||
|
||||
#--- output closing braces
|
||||
self.write_indented("</IndexedFaceSet>\n", -1)
|
||||
self.write_indented("</Shape>\n", -1)
|
||||
|
||||
self.write_indented("</Group>\n", -1)
|
||||
|
||||
self.write_indented("</Transform>\n", -1)
|
||||
|
||||
if self.halonode == 1:
|
||||
self.write_indented("</Billboard>\n", -1)
|
||||
self.halonode = 0
|
||||
|
||||
if self.billnode == 1:
|
||||
self.write_indented("</Billboard>\n", -1)
|
||||
self.billnode = 0
|
||||
|
||||
if self.collnode == 1:
|
||||
self.write_indented("</Collision>\n", -1)
|
||||
self.collnode = 0
|
||||
|
||||
fw("\n")
|
||||
|
||||
def writeMaterial(self, mat, matName, world):
|
||||
# look up material name, use it if available
|
||||
if mat.tag:
|
||||
self.write_indented("<Material USE=\"MA_%s\" />\n" % matName)
|
||||
else:
|
||||
mat.tag = True
|
||||
|
||||
emit = mat.emit
|
||||
ambient = mat.ambient / 3.0
|
||||
diffuseColor = tuple(mat.diffuse_color)
|
||||
if world:
|
||||
ambiColor = tuple(((c * mat.ambient) * 2.0) for c in world.ambient_color)
|
||||
else:
|
||||
ambiColor = 0.0, 0.0, 0.0
|
||||
|
||||
emitColor = tuple(((c * emit) + ambiColor[i]) / 2.0 for i, c in enumerate(diffuseColor))
|
||||
shininess = mat.specular_hardness / 512.0
|
||||
specColor = tuple((c + 0.001) / (1.25 / (mat.specular_intensity + 0.001)) for c in mat.specular_color)
|
||||
transp = 1.0 - mat.alpha
|
||||
|
||||
if mat.use_shadeless:
|
||||
ambient = 1.0
|
||||
shininess = 0.0
|
||||
specColor = emitColor = diffuseColor
|
||||
|
||||
self.write_indented("<Material DEF=\"MA_%s\" " % matName, 1)
|
||||
self.file.write("diffuseColor=\"%s %s %s\" " % round_color(diffuseColor, self.cp))
|
||||
self.file.write("specularColor=\"%s %s %s\" " % round_color(specColor, self.cp))
|
||||
self.file.write("emissiveColor=\"%s %s %s\" \n" % round_color(emitColor, self.cp))
|
||||
self.write_indented("ambientIntensity=\"%s\" " % (round(ambient, self.cp)))
|
||||
self.file.write("shininess=\"%s\" " % (round(shininess, self.cp)))
|
||||
self.file.write("transparency=\"%s\" />" % (round(transp, self.cp)))
|
||||
self.write_indented("\n", -1)
|
||||
|
||||
def writeImageTexture(self, image):
|
||||
name = image.name
|
||||
filepath = os.path.basename(image.filepath)
|
||||
if image.tag:
|
||||
self.write_indented("<ImageTexture USE=\"%s\" />\n" % self.cleanStr(name))
|
||||
else:
|
||||
image.tag = True
|
||||
|
||||
self.write_indented("<ImageTexture DEF=\"%s\" " % self.cleanStr(name), 1)
|
||||
self.file.write("url=\"%s\" />" % filepath)
|
||||
self.write_indented("\n", -1)
|
||||
|
||||
def writeBackground(self, world, alltextures):
|
||||
if world:
|
||||
worldname = world.name
|
||||
else:
|
||||
return
|
||||
|
||||
blending = world.use_sky_blend, world.use_sky_paper, world.use_sky_real
|
||||
|
||||
grd_triple = round_color(world.horizon_color, self.cp)
|
||||
sky_triple = round_color(world.zenith_color, self.cp)
|
||||
mix_triple = round_color(((grd_triple[i] + sky_triple[i]) / 2.0 for i in range(3)), self.cp)
|
||||
|
||||
self.file.write("<Background DEF=\"%s\" " % self.secureName(worldname))
|
||||
# No Skytype - just Hor color
|
||||
if blending == (False, False, False):
|
||||
self.file.write("groundColor=\"%s %s %s\" " % grd_triple)
|
||||
self.file.write("skyColor=\"%s %s %s\" " % grd_triple)
|
||||
# Blend Gradient
|
||||
elif blending == (True, False, False):
|
||||
self.file.write("groundColor=\"%s %s %s, " % grd_triple)
|
||||
self.file.write("%s %s %s\" groundAngle=\"1.57, 1.57\" " % mix_triple)
|
||||
self.file.write("skyColor=\"%s %s %s, " % sky_triple)
|
||||
self.file.write("%s %s %s\" skyAngle=\"1.57, 1.57\" " % mix_triple)
|
||||
# Blend+Real Gradient Inverse
|
||||
elif blending == (True, False, True):
|
||||
self.file.write("groundColor=\"%s %s %s, " % sky_triple)
|
||||
self.file.write("%s %s %s\" groundAngle=\"1.57, 1.57\" " % mix_triple)
|
||||
self.file.write("skyColor=\"%s %s %s, " % grd_triple)
|
||||
self.file.write("%s %s %s\" skyAngle=\"1.57, 1.57\" " % mix_triple)
|
||||
# Paper - just Zen Color
|
||||
elif blending == (False, False, True):
|
||||
self.file.write("groundColor=\"%s %s %s\" " % sky_triple)
|
||||
self.file.write("skyColor=\"%s %s %s\" " % sky_triple)
|
||||
# Blend+Real+Paper - komplex gradient
|
||||
elif blending == (True, True, True):
|
||||
self.write_indented("groundColor=\"%s %s %s, " % sky_triple)
|
||||
self.write_indented("%s %s %s\" groundAngle=\"1.57, 1.57\" " % grd_triple)
|
||||
self.write_indented("skyColor=\"%s %s %s, " % sky_triple)
|
||||
self.write_indented("%s %s %s\" skyAngle=\"1.57, 1.57\" " % grd_triple)
|
||||
# Any Other two colors
|
||||
else:
|
||||
self.file.write("groundColor=\"%s %s %s\" " % grd_triple)
|
||||
self.file.write("skyColor=\"%s %s %s\" " % sky_triple)
|
||||
|
||||
alltexture = len(alltextures)
|
||||
|
||||
for i in range(alltexture):
|
||||
tex = alltextures[i]
|
||||
|
||||
if tex.type != 'IMAGE' or tex.image is None:
|
||||
continue
|
||||
|
||||
namemat = tex.name
|
||||
# namemat = alltextures[i].name
|
||||
|
||||
pic = tex.image
|
||||
|
||||
# using .expandpath just in case, os.path may not expect //
|
||||
basename = os.path.basename(bpy.path.abspath(pic.filepath))
|
||||
|
||||
pic = alltextures[i].image
|
||||
if (namemat == "back") and (pic != None):
|
||||
self.file.write("\n\tbackUrl=\"%s\" " % basename)
|
||||
elif (namemat == "bottom") and (pic != None):
|
||||
self.write_indented("bottomUrl=\"%s\" " % basename)
|
||||
elif (namemat == "front") and (pic != None):
|
||||
self.write_indented("frontUrl=\"%s\" " % basename)
|
||||
elif (namemat == "left") and (pic != None):
|
||||
self.write_indented("leftUrl=\"%s\" " % basename)
|
||||
elif (namemat == "right") and (pic != None):
|
||||
self.write_indented("rightUrl=\"%s\" " % basename)
|
||||
elif (namemat == "top") and (pic != None):
|
||||
self.write_indented("topUrl=\"%s\" " % basename)
|
||||
self.write_indented("/>\n\n")
|
||||
|
||||
##########################################################
|
||||
# export routine
|
||||
##########################################################
|
||||
|
||||
def export(self, scene, world, alltextures,
|
||||
EXPORT_APPLY_MODIFIERS=False,
|
||||
EXPORT_TRI=False,
|
||||
):
|
||||
|
||||
# tag un-exported IDs
|
||||
bpy.data.meshes.tag(False)
|
||||
bpy.data.materials.tag(False)
|
||||
bpy.data.images.tag(False)
|
||||
|
||||
print("Info: starting X3D export to %r..." % self.filepath)
|
||||
self.writeHeader()
|
||||
# self.writeScript()
|
||||
self.writeNavigationInfo(scene)
|
||||
self.writeBackground(world, alltextures)
|
||||
self.writeFog(world)
|
||||
self.proto = 0
|
||||
|
||||
for ob_main in [o for o in scene.objects if o.is_visible(scene)]:
|
||||
|
||||
free, derived = create_derived_objects(scene, ob_main)
|
||||
|
||||
if derived is None:
|
||||
continue
|
||||
|
||||
for ob, ob_mat in derived:
|
||||
objType = ob.type
|
||||
objName = ob.name
|
||||
ob_mat = self.global_matrix * ob_mat
|
||||
|
||||
if objType == 'CAMERA':
|
||||
self.writeViewpoint(ob, ob_mat, scene)
|
||||
elif objType in ('MESH', 'CURVE', 'SURF', 'FONT'):
|
||||
if EXPORT_APPLY_MODIFIERS or objType != 'MESH':
|
||||
me = ob.create_mesh(scene, EXPORT_APPLY_MODIFIERS, 'PREVIEW')
|
||||
else:
|
||||
me = ob.data
|
||||
|
||||
self.writeIndexedFaceSet(ob, me, ob_mat, world, EXPORT_TRI=EXPORT_TRI)
|
||||
|
||||
# free mesh created with create_mesh()
|
||||
if me != ob.data:
|
||||
bpy.data.meshes.remove(me)
|
||||
|
||||
elif objType == 'LAMP':
|
||||
data = ob.data
|
||||
datatype = data.type
|
||||
if datatype == 'POINT':
|
||||
self.writePointLight(ob, ob_mat, data, world)
|
||||
elif datatype == 'SPOT':
|
||||
self.writeSpotLight(ob, ob_mat, data, world)
|
||||
elif datatype == 'SUN':
|
||||
self.writeDirectionalLight(ob, ob_mat, data, world)
|
||||
else:
|
||||
self.writeDirectionalLight(ob, ob_mat, data, world)
|
||||
else:
|
||||
#print "Info: Ignoring [%s], object type [%s] not handle yet" % (object.name,object.getType)
|
||||
pass
|
||||
|
||||
if free:
|
||||
free_derived_objects(ob_main)
|
||||
|
||||
self.file.write("\n</Scene>\n</X3D>")
|
||||
|
||||
# if EXPORT_APPLY_MODIFIERS:
|
||||
# if containerMesh:
|
||||
# containerMesh.vertices = None
|
||||
|
||||
self.cleanup()
|
||||
|
||||
##########################################################
|
||||
# Utility methods
|
||||
##########################################################
|
||||
|
||||
def cleanup(self):
|
||||
self.file.close()
|
||||
self.indentLevel = 0
|
||||
print("Info: finished X3D export to %r" % self.filepath)
|
||||
|
||||
def cleanStr(self, name, prefix='rsvd_'):
|
||||
"""cleanStr(name,prefix) - try to create a valid VRML DEF name from object name"""
|
||||
|
||||
newName = name
|
||||
if len(newName) == 0:
|
||||
self.nNodeID += 1
|
||||
return "%s%d" % (prefix, self.nNodeID)
|
||||
|
||||
if newName in self.namesReserved:
|
||||
newName = '%s%s' % (prefix, newName)
|
||||
|
||||
if newName[0].isdigit():
|
||||
newName = "%s%s" % ('_', newName)
|
||||
|
||||
for bad in [' ', '"', '#', "'", ', ', '.', '[', '\\', ']', '{', '}']:
|
||||
newName = newName.replace(bad, '_')
|
||||
return newName
|
||||
|
||||
def faceToString(self, face):
|
||||
|
||||
print("Debug: face.flag=0x%x (bitflags)" % face.flag)
|
||||
if face.sel:
|
||||
print("Debug: face.sel=true")
|
||||
|
||||
print("Debug: face.mode=0x%x (bitflags)" % face.mode)
|
||||
if face.mode & Mesh.FaceModes.TWOSIDE:
|
||||
print("Debug: face.mode twosided")
|
||||
|
||||
print("Debug: face.transp=0x%x (enum)" % face.blend_type)
|
||||
if face.blend_type == Mesh.FaceTranspModes.SOLID:
|
||||
print("Debug: face.transp.SOLID")
|
||||
|
||||
if face.image:
|
||||
print("Debug: face.image=%s" % face.image.name)
|
||||
print("Debug: face.materialIndex=%d" % face.materialIndex)
|
||||
|
||||
def meshToString(self, mesh):
|
||||
# print("Debug: mesh.hasVertexUV=%d" % mesh.vertexColors)
|
||||
print("Debug: mesh.faceUV=%d" % (len(mesh.uv_textures) > 0))
|
||||
# print("Debug: mesh.faceUV=%d" % mesh.faceUV)
|
||||
print("Debug: mesh.hasVertexColours=%d" % (len(mesh.vertex_colors) > 0))
|
||||
# print("Debug: mesh.hasVertexColours=%d" % mesh.hasVertexColours())
|
||||
print("Debug: mesh.vertices=%d" % len(mesh.vertices))
|
||||
print("Debug: mesh.faces=%d" % len(mesh.faces))
|
||||
print("Debug: mesh.materials=%d" % len(mesh.materials))
|
||||
|
||||
# s="%s %s %s" % (
|
||||
# round(c.r/255.0,self.cp),
|
||||
# round(c.g/255.0,self.cp),
|
||||
# round(c.b/255.0,self.cp))
|
||||
return s
|
||||
|
||||
# For writing well formed VRML code
|
||||
#------------------------------------------------------------------------
|
||||
def write_indented(self, s, inc=0):
|
||||
if inc < 1:
|
||||
self.indentLevel = self.indentLevel + inc
|
||||
|
||||
self.file.write((self.indentLevel * "\t") + s)
|
||||
|
||||
if inc > 0:
|
||||
self.indentLevel = self.indentLevel + inc
|
||||
|
||||
##########################################################
|
||||
# Callbacks, needed before Main
|
||||
##########################################################
|
||||
|
||||
|
||||
def save(operator, context, filepath="",
|
||||
use_apply_modifiers=False,
|
||||
use_triangulate=False,
|
||||
use_compress=False):
|
||||
|
||||
if use_compress:
|
||||
if not filepath.lower().endswith('.x3dz'):
|
||||
filepath = '.'.join(filepath.split('.')[:-1]) + '.x3dz'
|
||||
else:
|
||||
if not filepath.lower().endswith('.x3d'):
|
||||
filepath = '.'.join(filepath.split('.')[:-1]) + '.x3d'
|
||||
|
||||
scene = context.scene
|
||||
world = scene.world
|
||||
|
||||
if bpy.ops.object.mode_set.poll():
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
|
||||
# XXX these are global textures while .Get() returned only scene's?
|
||||
alltextures = bpy.data.textures
|
||||
# alltextures = Blender.Texture.Get()
|
||||
|
||||
wrlexport = x3d_class(filepath)
|
||||
wrlexport.export(scene,
|
||||
world,
|
||||
alltextures,
|
||||
EXPORT_APPLY_MODIFIERS=use_apply_modifiers,
|
||||
EXPORT_TRI=use_triangulate,
|
||||
)
|
||||
|
||||
return {'FINISHED'}
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user