diff --git a/release/scripts/io/export_3ds.py b/release/scripts/io/export_3ds.py deleted file mode 100644 index dea7b6dbfe6..00000000000 --- a/release/scripts/io/export_3ds.py +++ /dev/null @@ -1,1130 +0,0 @@ -#!BPY -# coding: utf-8 -""" -Name: '3D Studio (.3ds)...' -Blender: 243 -Group: 'Export' -Tooltip: 'Export to 3DS file format (.3ds).' -""" - -__author__ = ["Campbell Barton", "Bob Holcomb", "Richard Lärkäng", "Damien McGinnes", "Mark Stijnman"] -__url__ = ("blenderartists.org", "www.blender.org", "www.gametutorials.com", "lib3ds.sourceforge.net/") -__version__ = "0.90a" -__bpydoc__ = """\ - -3ds Exporter - -This script Exports a 3ds file. - -Exporting is based on 3ds loader from www.gametutorials.com(Thanks DigiBen) and using information -from the lib3ds project (http://lib3ds.sourceforge.net/) sourcecode. -""" - -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Script copyright (C) Bob Holcomb -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - - -###################################################### -# Importing modules -###################################################### - -import struct -import os -import time - -import bpy - -# import Blender -# from BPyMesh import getMeshFromObject -# from BPyObject import getDerivedObjects -# try: -# import struct -# except: -# struct = None - -# also used by X3D exporter -# return a tuple (free, object list), free is True if memory should be freed later with free_derived_objects() -def create_derived_objects(ob): - if ob.parent and ob.parent.dupli_type != 'NONE': - return False, None - - if ob.dupli_type != 'NONE': - ob.create_dupli_list() - return True, [(dob.object, dob.matrix) for dob in ob.dupli_list] - else: - return False, [(ob, ob.matrix)] - -# also used by X3D exporter -def free_derived_objects(ob): - ob.free_dupli_list() - -# So 3ds max can open files, limit names to 12 in length -# this is verry annoying for filenames! -name_unique = [] -name_mapping = {} -def sane_name(name): - name_fixed = name_mapping.get(name) - if name_fixed != None: - return name_fixed - - if len(name) > 12: - new_name = name[:12] - else: - new_name = name - - i = 0 - - while new_name in name_unique: - new_name = new_name[:-4] + '.%.3d' % i - i+=1 - - name_unique.append(new_name) - name_mapping[name] = new_name - return new_name - -###################################################### -# Data Structures -###################################################### - -#Some of the chunks that we will export -#----- Primary Chunk, at the beginning of each file -PRIMARY= int("0x4D4D",16) - -#------ Main Chunks -OBJECTINFO = int("0x3D3D",16); #This gives the version of the mesh and is found right before the material and object information -VERSION = int("0x0002",16); #This gives the version of the .3ds file -KFDATA = int("0xB000",16); #This is the header for all of the key frame info - -#------ sub defines of OBJECTINFO -MATERIAL=45055 #0xAFFF // This stored the texture info -OBJECT=16384 #0x4000 // This stores the faces, vertices, etc... - -#>------ sub defines of MATERIAL -MATNAME = int("0xA000",16); # This holds the material name -MATAMBIENT = int("0xA010",16); # Ambient color of the object/material -MATDIFFUSE = int("0xA020",16); # This holds the color of the object/material -MATSPECULAR = int("0xA030",16); # SPecular color of the object/material -MATSHINESS = int("0xA040",16); # ?? -MATMAP = int("0xA200",16); # This is a header for a new material -MATMAPFILE = int("0xA300",16); # This holds the file name of the texture - -RGB1= int("0x0011",16) -RGB2= int("0x0012",16) - -#>------ sub defines of OBJECT -OBJECT_MESH = int("0x4100",16); # This lets us know that we are reading a new object -OBJECT_LIGHT = int("0x4600",16); # This lets un know we are reading a light object -OBJECT_CAMERA= int("0x4700",16); # This lets un know we are reading a camera object - -#>------ sub defines of CAMERA -OBJECT_CAM_RANGES= int("0x4720",16); # The camera range values - -#>------ sub defines of OBJECT_MESH -OBJECT_VERTICES = int("0x4110",16); # The objects vertices -OBJECT_FACES = int("0x4120",16); # The objects faces -OBJECT_MATERIAL = int("0x4130",16); # This is found if the object has a material, either texture map or color -OBJECT_UV = int("0x4140",16); # The UV texture coordinates -OBJECT_TRANS_MATRIX = int("0x4160",16); # The Object Matrix - -#>------ sub defines of KFDATA -KFDATA_KFHDR = int("0xB00A",16); -KFDATA_KFSEG = int("0xB008",16); -KFDATA_KFCURTIME = int("0xB009",16); -KFDATA_OBJECT_NODE_TAG = int("0xB002",16); - -#>------ sub defines of OBJECT_NODE_TAG -OBJECT_NODE_ID = int("0xB030",16); -OBJECT_NODE_HDR = int("0xB010",16); -OBJECT_PIVOT = int("0xB013",16); -OBJECT_INSTANCE_NAME = int("0xB011",16); -POS_TRACK_TAG = int("0xB020",16); -ROT_TRACK_TAG = int("0xB021",16); -SCL_TRACK_TAG = int("0xB022",16); - -def uv_key(uv): - return round(uv[0], 6), round(uv[1], 6) -# return round(uv.x, 6), round(uv.y, 6) - -# size defines: -SZ_SHORT = 2 -SZ_INT = 4 -SZ_FLOAT = 4 - -class _3ds_short(object): - '''Class representing a short (2-byte integer) for a 3ds file. - *** This looks like an unsigned short H is unsigned from the struct docs - Cam***''' - __slots__ = 'value' - def __init__(self, val=0): - self.value=val - - def get_size(self): - return SZ_SHORT - - def write(self,file): - file.write(struct.pack("= mat_ls_len: - mat_index = f.mat = 0 - mat = mat_ls[mat_index] - if mat: mat_name = mat.name - else: mat_name = None - # else there alredy set to none - - img = uf.image -# img = f.image - if img: img_name = img.name - else: img_name = None - - materialDict.setdefault((mat_name, img_name), (mat, img) ) - - - else: - for mat in mat_ls: - if mat: # material may be None so check its not. - materialDict.setdefault((mat.name, None), (mat, None) ) - - # Why 0 Why! - for f in data.faces: - if f.material_index >= mat_ls_len: -# if f.mat >= mat_ls_len: - f.material_index = 0 - # f.mat = 0 - - if free: - free_derived_objects(ob) - - - # Make material chunks for all materials used in the meshes: - for mat_and_image in materialDict.values(): - object_info.add_subchunk(make_material_chunk(mat_and_image[0], mat_and_image[1])) - - # Give all objects a unique ID and build a dictionary from object name to object id: - """ - name_to_id = {} - for ob, data in mesh_objects: - name_to_id[ob.name]= len(name_to_id) - #for ob in empty_objects: - # name_to_id[ob.name]= len(name_to_id) - """ - - # Create object chunks for all meshes: - i = 0 - for ob, blender_mesh in mesh_objects: - # create a new object chunk - object_chunk = _3ds_chunk(OBJECT) - - # set the object name - object_chunk.add_variable("name", _3ds_string(sane_name(ob.name))) - - # make a mesh chunk out of the mesh: - object_chunk.add_subchunk(make_mesh_chunk(blender_mesh, materialDict)) - object_info.add_subchunk(object_chunk) - - ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX - # make a kf object node for the object: - kfdata.add_subchunk(make_kf_obj_node(ob, name_to_id)) - ''' -# if not blender_mesh.users: - bpy.data.remove_mesh(blender_mesh) -# blender_mesh.verts = None - - i+=i - - # Create chunks for all empties: - ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX - for ob in empty_objects: - # Empties only require a kf object node: - kfdata.add_subchunk(make_kf_obj_node(ob, name_to_id)) - pass - ''' - - # Add main object info chunk to primary chunk: - primary.add_subchunk(object_info) - - ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX - # Add main keyframe data chunk to primary chunk: - primary.add_subchunk(kfdata) - ''' - - # At this point, the chunk hierarchy is completely built. - - # Check the size: - primary.get_size() - # Open the file for writing: - file = open( filename, 'wb' ) - - # Recursively write the chunks to file: - primary.write(file) - - # Close the file: - file.close() - - # Debugging only: report the exporting time: -# Blender.Window.WaitCursor(0) - print("3ds export time: %.2f" % (time.clock() - time1)) -# print("3ds export time: %.2f" % (Blender.sys.time() - time1)) - - # Debugging only: dump the chunk hierarchy: - #primary.dump() - - -# if __name__=='__main__': -# if struct: -# Blender.Window.FileSelector(save_3ds, "Export 3DS", Blender.sys.makename(ext='.3ds')) -# else: -# Blender.Draw.PupMenu("Error%t|This script requires a full python installation") -# # save_3ds('/test_b.3ds') - -class EXPORT_OT_3ds(bpy.types.Operator): - ''' - 3DS Exporter - ''' - __idname__ = "export.3ds" - __label__ = 'Export 3DS' - - # List of operator properties, the attributes will be assigned - # to the class instance from the operator settings before calling. - - __props__ = [ - # bpy.props.StringProperty(attr="filename", name="File Name", description="File name used for exporting the 3DS file", maxlen= 1024, default= ""), - bpy.props.StringProperty(attr="path", name="File Path", description="File path used for exporting the 3DS file", maxlen= 1024, default= ""), - ] - - def execute(self, context): - save_3ds(self.path, context) - return ('FINISHED',) - - def invoke(self, context, event): - wm = context.manager - wm.add_fileselect(self.__operator__) - return ('RUNNING_MODAL',) - - def poll(self, context): # Poll isnt working yet - print("Poll") - return context.active_object != None - -bpy.ops.add(EXPORT_OT_3ds) diff --git a/release/scripts/io/export_fbx.py b/release/scripts/io/export_fbx.py deleted file mode 100644 index 21b1388ebfe..00000000000 --- a/release/scripts/io/export_fbx.py +++ /dev/null @@ -1,3453 +0,0 @@ -#!BPY -""" -Name: 'Autodesk FBX (.fbx)...' -Blender: 249 -Group: 'Export' -Tooltip: 'Selection to an ASCII Autodesk FBX ' -""" -__author__ = "Campbell Barton" -__url__ = ['www.blender.org', 'blenderartists.org'] -__version__ = "1.2" - -__bpydoc__ = """\ -This script is an exporter to the FBX file format. - -http://wiki.blender.org/index.php/Scripts/Manual/Export/autodesk_fbx -""" -# -------------------------------------------------------------------------- -# FBX Export v0.1 by Campbell Barton (AKA Ideasman) -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -import os -import time -import math # math.pi -import shutil # for file copying - -# try: -# import time -# # import os # only needed for batch export, nbot used yet -# except: -# time = None # use this to check if they have python modules installed - -# for python 2.3 support -try: - set() -except: - try: - from sets import Set as set - except: - set = None # so it complains you dont have a ! - -# # os is only needed for batch 'own dir' option -# try: -# import os -# except: -# os = None - -# import Blender -import bpy -import Mathutils -# from Blender.Mathutils import Matrix, Vector, RotationMatrix - -# import BPyObject -# import BPyMesh -# import BPySys -# import BPyMessages - -## This was used to make V, but faster not to do all that -##valid = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_,.()[]{}' -##v = range(255) -##for c in valid: v.remove(ord(c)) -v = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,42,43,47,58,59,60,61,62,63,64,92,94,96,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254] -invalid = ''.join([chr(i) for i in v]) -def cleanName(name): - for ch in invalid: name = name.replace(ch, '_') - return name -# del v, i - - -def copy_file(source, dest): - file = open(source, 'rb') - data = file.read() - file.close() - - file = open(dest, 'wb') - file.write(data) - file.close() - - -# XXX not used anymore, images are copied one at a time -def copy_images(dest_dir, textures): - if not dest_dir.endswith(os.sep): - dest_dir += os.sep - - image_paths = set() - for tex in textures: - image_paths.add(Blender.sys.expandpath(tex.filename)) - - # Now copy images - copyCount = 0 - for image_path in image_paths: - if Blender.sys.exists(image_path): - # Make a name for the target path. - dest_image_path = dest_dir + image_path.split('\\')[-1].split('/')[-1] - if not Blender.sys.exists(dest_image_path): # Image isnt alredy there - print('\tCopying "%s" > "%s"' % (image_path, dest_image_path)) - try: - copy_file(image_path, dest_image_path) - copyCount+=1 - except: - print('\t\tWarning, file failed to copy, skipping.') - - print('\tCopied %d images' % copyCount) - -# I guess FBX uses degrees instead of radians (Arystan). -# Call this function just before writing to FBX. -def eulerRadToDeg(eul): - ret = Mathutils.Euler() - - ret.x = 180 / math.pi * eul[0] - ret.y = 180 / math.pi * eul[1] - ret.z = 180 / math.pi * eul[2] - - return ret - -mtx4_identity = Mathutils.Matrix() - -# testing -mtx_x90 = Mathutils.RotationMatrix( math.pi/2, 3, 'x') # used -#mtx_x90n = RotationMatrix(-90, 3, 'x') -#mtx_y90 = RotationMatrix( 90, 3, 'y') -#mtx_y90n = RotationMatrix(-90, 3, 'y') -#mtx_z90 = RotationMatrix( 90, 3, 'z') -#mtx_z90n = RotationMatrix(-90, 3, 'z') - -#mtx4_x90 = RotationMatrix( 90, 4, 'x') -mtx4_x90n = Mathutils.RotationMatrix(-math.pi/2, 4, 'x') # used -#mtx4_y90 = RotationMatrix( 90, 4, 'y') -mtx4_y90n = Mathutils.RotationMatrix(-math.pi/2, 4, 'y') # used -mtx4_z90 = Mathutils.RotationMatrix( math.pi/2, 4, 'z') # used -mtx4_z90n = Mathutils.RotationMatrix(-math.pi/2, 4, 'z') # used - -# def strip_path(p): -# return p.split('\\')[-1].split('/')[-1] - -# Used to add the scene name into the filename without using odd chars -sane_name_mapping_ob = {} -sane_name_mapping_mat = {} -sane_name_mapping_tex = {} -sane_name_mapping_take = {} -sane_name_mapping_group = {} - -# Make sure reserved names are not used -sane_name_mapping_ob['Scene'] = 'Scene_' -sane_name_mapping_ob['blend_root'] = 'blend_root_' - -def increment_string(t): - name = t - num = '' - while name and name[-1].isdigit(): - num = name[-1] + num - name = name[:-1] - if num: return '%s%d' % (name, int(num)+1) - else: return name + '_0' - - - -# todo - Disallow the name 'Scene' and 'blend_root' - it will bugger things up. -def sane_name(data, dct): - #if not data: return None - - if type(data)==tuple: # materials are paired up with images - data, other = data - use_other = True - else: - other = None - use_other = False - - if data: name = data.name - else: name = None - orig_name = name - - if other: - orig_name_other = other.name - name = '%s #%s' % (name, orig_name_other) - else: - orig_name_other = None - - # dont cache, only ever call once for each data type now, - # so as to avoid namespace collision between types - like with objects <-> bones - #try: return dct[name] - #except: pass - - if not name: - name = 'unnamed' # blank string, ASKING FOR TROUBLE! - else: - #name = BPySys.cleanName(name) - name = cleanName(name) # use our own - - while name in iter(dct.values()): name = increment_string(name) - - if use_other: # even if other is None - orig_name_other will be a string or None - dct[orig_name, orig_name_other] = name - else: - dct[orig_name] = name - - return name - -def sane_obname(data): return sane_name(data, sane_name_mapping_ob) -def sane_matname(data): return sane_name(data, sane_name_mapping_mat) -def sane_texname(data): return sane_name(data, sane_name_mapping_tex) -def sane_takename(data): return sane_name(data, sane_name_mapping_take) -def sane_groupname(data): return sane_name(data, sane_name_mapping_group) - -# def derived_paths(fname_orig, basepath, FORCE_CWD=False): -# ''' -# fname_orig - blender path, can be relative -# basepath - fname_rel will be relative to this -# FORCE_CWD - dont use the basepath, just add a ./ to the filename. -# use when we know the file will be in the basepath. -# ''' -# fname = bpy.sys.expandpath(fname_orig) -# # fname = Blender.sys.expandpath(fname_orig) -# fname_strip = os.path.basename(fname) -# # fname_strip = strip_path(fname) -# if FORCE_CWD: -# fname_rel = '.' + os.sep + fname_strip -# else: -# fname_rel = bpy.sys.relpath(fname, basepath) -# # fname_rel = Blender.sys.relpath(fname, basepath) -# if fname_rel.startswith('//'): fname_rel = '.' + os.sep + fname_rel[2:] -# return fname, fname_strip, fname_rel - - -def mat4x4str(mat): - return '%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f' % tuple([ f for v in mat for f in v ]) - -# XXX not used -# duplicated in OBJ exporter -def getVertsFromGroup(me, group_index): - ret = [] - - for i, v in enumerate(me.verts): - for g in v.groups: - if g.group == group_index: - ret.append((i, g.weight)) - - return ret - -# ob must be OB_MESH -def BPyMesh_meshWeight2List(ob): - ''' Takes a mesh and return its group names and a list of lists, one list per vertex. - aligning the each vert list with the group names, each list contains float value for the weight. - These 2 lists can be modified and then used with list2MeshWeight to apply the changes. - ''' - - me = ob.data - - # Clear the vert group. - groupNames= [g.name for g in ob.vertex_groups] - len_groupNames= len(groupNames) - - if not len_groupNames: - # no verts? return a vert aligned empty list - return [[] for i in range(len(me.verts))], [] - else: - vWeightList= [[0.0]*len_groupNames for i in range(len(me.verts))] - - for i, v in enumerate(me.verts): - for g in v.groups: - vWeightList[i][g.group] = g.weight - - return groupNames, vWeightList - -def meshNormalizedWeights(me): - try: # account for old bad BPyMesh - groupNames, vWeightList = BPyMesh_meshWeight2List(me) -# groupNames, vWeightList = BPyMesh.meshWeight2List(me) - except: - return [],[] - - if not groupNames: - return [],[] - - for i, vWeights in enumerate(vWeightList): - tot = 0.0 - for w in vWeights: - tot+=w - - if tot: - for j, w in enumerate(vWeights): - vWeights[j] = w/tot - - return groupNames, vWeightList - -header_comment = \ -'''; FBX 6.1.0 project file -; Created by Blender FBX Exporter -; for support mail: ideasman42@gmail.com -; ---------------------------------------------------- - -''' - -# This func can be called with just the filename -def write(filename, batch_objects = None, \ - context = None, - EXP_OBS_SELECTED = True, - EXP_MESH = True, - EXP_MESH_APPLY_MOD = True, -# EXP_MESH_HQ_NORMALS = False, - EXP_ARMATURE = True, - EXP_LAMP = True, - EXP_CAMERA = True, - EXP_EMPTY = True, - EXP_IMAGE_COPY = False, - GLOBAL_MATRIX = Mathutils.Matrix(), - ANIM_ENABLE = True, - ANIM_OPTIMIZE = True, - ANIM_OPTIMIZE_PRECISSION = 6, - ANIM_ACTION_ALL = False, - BATCH_ENABLE = False, - BATCH_GROUP = True, - BATCH_FILE_PREFIX = '', - BATCH_OWN_DIR = False - ): - - # ----------------- Batch support! - if BATCH_ENABLE: - if os == None: BATCH_OWN_DIR = False - - fbxpath = filename - - # get the path component of filename - tmp_exists = bpy.sys.exists(fbxpath) -# tmp_exists = Blender.sys.exists(fbxpath) - - if tmp_exists != 2: # a file, we want a path - fbxpath = os.path.dirname(fbxpath) -# while fbxpath and fbxpath[-1] not in ('/', '\\'): -# fbxpath = fbxpath[:-1] - if not fbxpath: -# if not filename: - # XXX - print('Error%t|Directory does not exist!') -# Draw.PupMenu('Error%t|Directory does not exist!') - return - - tmp_exists = bpy.sys.exists(fbxpath) -# tmp_exists = Blender.sys.exists(fbxpath) - - if tmp_exists != 2: - # XXX - print('Error%t|Directory does not exist!') -# Draw.PupMenu('Error%t|Directory does not exist!') - return - - if not fbxpath.endswith(os.sep): - fbxpath += os.sep - del tmp_exists - - - if BATCH_GROUP: - data_seq = bpy.data.groups - else: - data_seq = bpy.data.scenes - - # call this function within a loop with BATCH_ENABLE == False - orig_sce = context.scene -# orig_sce = bpy.data.scenes.active - - - new_fbxpath = fbxpath # own dir option modifies, we need to keep an original - for data in data_seq: # scene or group - newname = BATCH_FILE_PREFIX + cleanName(data.name) -# newname = BATCH_FILE_PREFIX + BPySys.cleanName(data.name) - - - if BATCH_OWN_DIR: - new_fbxpath = fbxpath + newname + os.sep - # path may alredy exist - # TODO - might exist but be a file. unlikely but should probably account for it. - - if bpy.sys.exists(new_fbxpath) == 0: -# if Blender.sys.exists(new_fbxpath) == 0: - os.mkdir(new_fbxpath) - - - filename = new_fbxpath + newname + '.fbx' - - print('\nBatch exporting %s as...\n\t"%s"' % (data, filename)) - - # XXX don't know what to do with this, probably do the same? (Arystan) - if BATCH_GROUP: #group - # group, so objects update properly, add a dummy scene. - sce = bpy.data.scenes.new() - sce.Layers = (1<<20) -1 - bpy.data.scenes.active = sce - for ob_base in data.objects: - sce.objects.link(ob_base) - - sce.update(1) - - # TODO - BUMMER! Armatures not in the group wont animate the mesh - - else:# scene - - - data_seq.active = data - - - # Call self with modified args - # Dont pass batch options since we alredy usedt them - write(filename, data.objects, - context, - False, - EXP_MESH, - EXP_MESH_APPLY_MOD, -# EXP_MESH_HQ_NORMALS, - EXP_ARMATURE, - EXP_LAMP, - EXP_CAMERA, - EXP_EMPTY, - EXP_IMAGE_COPY, - GLOBAL_MATRIX, - ANIM_ENABLE, - ANIM_OPTIMIZE, - ANIM_OPTIMIZE_PRECISSION, - ANIM_ACTION_ALL - ) - - if BATCH_GROUP: - # remove temp group scene - bpy.data.remove_scene(sce) -# bpy.data.scenes.unlink(sce) - - bpy.data.scenes.active = orig_sce - - return # so the script wont run after we have batch exported. - - # end batch support - - # Use this for working out paths relative to the export location - basepath = os.path.dirname(filename) or '.' - basepath += os.sep -# basepath = Blender.sys.dirname(filename) - - # ---------------------------------------------- - # storage classes - class my_bone_class: - __slots__ =(\ - 'blenName',\ - 'blenBone',\ - 'blenMeshes',\ - 'restMatrix',\ - 'parent',\ - 'blenName',\ - 'fbxName',\ - 'fbxArm',\ - '__pose_bone',\ - '__anim_poselist') - - def __init__(self, blenBone, fbxArm): - - # This is so 2 armatures dont have naming conflicts since FBX bones use object namespace - self.fbxName = sane_obname(blenBone) - - self.blenName = blenBone.name - self.blenBone = blenBone - self.blenMeshes = {} # fbxMeshObName : mesh - self.fbxArm = fbxArm - self.restMatrix = blenBone.armature_matrix -# self.restMatrix = blenBone.matrix['ARMATURESPACE'] - - # not used yet - # self.restMatrixInv = self.restMatrix.copy().invert() - # self.restMatrixLocal = None # set later, need parent matrix - - self.parent = None - - # not public - pose = fbxArm.blenObject.pose -# pose = fbxArm.blenObject.getPose() - self.__pose_bone = pose.pose_channels[self.blenName] -# self.__pose_bone = pose.bones[self.blenName] - - # store a list if matricies here, (poseMatrix, head, tail) - # {frame:posematrix, frame:posematrix, ...} - self.__anim_poselist = {} - - ''' - def calcRestMatrixLocal(self): - if self.parent: - self.restMatrixLocal = self.restMatrix * self.parent.restMatrix.copy().invert() - else: - self.restMatrixLocal = self.restMatrix.copy() - ''' - def setPoseFrame(self, f): - # cache pose info here, frame must be set beforehand - - # Didnt end up needing head or tail, if we do - here it is. - ''' - self.__anim_poselist[f] = (\ - self.__pose_bone.poseMatrix.copy(),\ - self.__pose_bone.head.copy(),\ - self.__pose_bone.tail.copy() ) - ''' - - self.__anim_poselist[f] = self.__pose_bone.pose_matrix.copy() -# self.__anim_poselist[f] = self.__pose_bone.poseMatrix.copy() - - # get pose from frame. - def getPoseMatrix(self, f):# ---------------------------------------------- - return self.__anim_poselist[f] - ''' - def getPoseHead(self, f): - #return self.__pose_bone.head.copy() - return self.__anim_poselist[f][1].copy() - def getPoseTail(self, f): - #return self.__pose_bone.tail.copy() - return self.__anim_poselist[f][2].copy() - ''' - # end - - def getAnimParRelMatrix(self, frame): - #arm_mat = self.fbxArm.matrixWorld - #arm_mat = self.fbxArm.parRelMatrix() - if not self.parent: - #return mtx4_z90 * (self.getPoseMatrix(frame) * arm_mat) # dont apply arm matrix anymore - return mtx4_z90 * self.getPoseMatrix(frame) - else: - #return (mtx4_z90 * ((self.getPoseMatrix(frame) * arm_mat))) * (mtx4_z90 * (self.parent.getPoseMatrix(frame) * arm_mat)).invert() - return (mtx4_z90 * (self.getPoseMatrix(frame))) * (mtx4_z90 * self.parent.getPoseMatrix(frame)).invert() - - # we need thes because cameras and lights modified rotations - def getAnimParRelMatrixRot(self, frame): - return self.getAnimParRelMatrix(frame) - - def flushAnimData(self): - self.__anim_poselist.clear() - - - class my_object_generic: - # Other settings can be applied for each type - mesh, armature etc. - def __init__(self, ob, matrixWorld = None): - self.fbxName = sane_obname(ob) - self.blenObject = ob - self.fbxGroupNames = [] - self.fbxParent = None # set later on IF the parent is in the selection. - if matrixWorld: self.matrixWorld = matrixWorld * GLOBAL_MATRIX - else: self.matrixWorld = ob.matrix * GLOBAL_MATRIX -# else: self.matrixWorld = ob.matrixWorld * GLOBAL_MATRIX - self.__anim_poselist = {} # we should only access this - - def parRelMatrix(self): - if self.fbxParent: - return self.matrixWorld * self.fbxParent.matrixWorld.copy().invert() - else: - return self.matrixWorld - - def setPoseFrame(self, f): - self.__anim_poselist[f] = self.blenObject.matrix.copy() -# self.__anim_poselist[f] = self.blenObject.matrixWorld.copy() - - def getAnimParRelMatrix(self, frame): - if self.fbxParent: - #return (self.__anim_poselist[frame] * self.fbxParent.__anim_poselist[frame].copy().invert() ) * GLOBAL_MATRIX - return (self.__anim_poselist[frame] * GLOBAL_MATRIX) * (self.fbxParent.__anim_poselist[frame] * GLOBAL_MATRIX).invert() - else: - return self.__anim_poselist[frame] * GLOBAL_MATRIX - - def getAnimParRelMatrixRot(self, frame): - type = self.blenObject.type - if self.fbxParent: - matrix_rot = (((self.__anim_poselist[frame] * GLOBAL_MATRIX) * (self.fbxParent.__anim_poselist[frame] * GLOBAL_MATRIX).invert())).rotationPart() - else: - matrix_rot = (self.__anim_poselist[frame] * GLOBAL_MATRIX).rotationPart() - - # Lamps need to be rotated - if type =='LAMP': - matrix_rot = mtx_x90 * matrix_rot - elif type =='CAMERA': -# elif ob and type =='Camera': - y = Mathutils.Vector(0,1,0) * matrix_rot - matrix_rot = matrix_rot * Mathutils.RotationMatrix(math.pi/2, 3, 'r', y) - - return matrix_rot - - # ---------------------------------------------- - - - - - - print('\nFBX export starting...', filename) - start_time = time.clock() -# start_time = Blender.sys.time() - try: - file = open(filename, 'w') - except: - return False - - sce = context.scene -# sce = bpy.data.scenes.active - world = sce.world - - - # ---------------------------- Write the header first - file.write(header_comment) - if time: - curtime = time.localtime()[0:6] - else: - curtime = (0,0,0,0,0,0) - # - file.write(\ -'''FBXHeaderExtension: { - FBXHeaderVersion: 1003 - FBXVersion: 6100 - CreationTimeStamp: { - Version: 1000 - Year: %.4i - Month: %.2i - Day: %.2i - Hour: %.2i - Minute: %.2i - Second: %.2i - Millisecond: 0 - } - Creator: "FBX SDK/FBX Plugins build 20070228" - OtherFlags: { - FlagPLE: 0 - } -}''' % (curtime)) - - file.write('\nCreationTime: "%.4i-%.2i-%.2i %.2i:%.2i:%.2i:000"' % curtime) - file.write('\nCreator: "Blender3D version 2.5"') -# file.write('\nCreator: "Blender3D version %.2f"' % Blender.Get('version')) - - pose_items = [] # list of (fbxName, matrix) to write pose data for, easier to collect allong the way - - # --------------- funcs for exporting - def object_tx(ob, loc, matrix, matrix_mod = None): - ''' - Matrix mod is so armature objects can modify their bone matricies - ''' - if isinstance(ob, bpy.types.Bone): -# if isinstance(ob, Blender.Types.BoneType): - - # we know we have a matrix - # matrix = mtx4_z90 * (ob.matrix['ARMATURESPACE'] * matrix_mod) - matrix = mtx4_z90 * ob.armature_matrix # dont apply armature matrix anymore -# matrix = mtx4_z90 * ob.matrix['ARMATURESPACE'] # dont apply armature matrix anymore - - parent = ob.parent - if parent: - #par_matrix = mtx4_z90 * (parent.matrix['ARMATURESPACE'] * matrix_mod) - par_matrix = mtx4_z90 * parent.armature_matrix # dont apply armature matrix anymore -# par_matrix = mtx4_z90 * parent.matrix['ARMATURESPACE'] # dont apply armature matrix anymore - matrix = matrix * par_matrix.copy().invert() - - matrix_rot = matrix.rotationPart() - - loc = tuple(matrix.translationPart()) - scale = tuple(matrix.scalePart()) - rot = tuple(matrix_rot.toEuler()) - - else: - # This is bad because we need the parent relative matrix from the fbx parent (if we have one), dont use anymore - #if ob and not matrix: matrix = ob.matrixWorld * GLOBAL_MATRIX - if ob and not matrix: raise Exception("error: this should never happen!") - - matrix_rot = matrix - #if matrix: - # matrix = matrix_scale * matrix - - if matrix: - loc = tuple(matrix.translationPart()) - scale = tuple(matrix.scalePart()) - - matrix_rot = matrix.rotationPart() - # Lamps need to be rotated - if ob and ob.type =='Lamp': - matrix_rot = mtx_x90 * matrix_rot - rot = tuple(matrix_rot.toEuler()) - elif ob and ob.type =='Camera': - y = Mathutils.Vector(0,1,0) * matrix_rot - matrix_rot = matrix_rot * Mathutils.RotationMatrix(math.pi/2, 3, 'r', y) - rot = tuple(matrix_rot.toEuler()) - else: - rot = tuple(matrix_rot.toEuler()) - else: - if not loc: - loc = 0,0,0 - scale = 1,1,1 - rot = 0,0,0 - - return loc, rot, scale, matrix, matrix_rot - - def write_object_tx(ob, loc, matrix, matrix_mod= None): - ''' - We have loc to set the location if non blender objects that have a location - - matrix_mod is only used for bones at the moment - ''' - loc, rot, scale, matrix, matrix_rot = object_tx(ob, loc, matrix, matrix_mod) - - file.write('\n\t\t\tProperty: "Lcl Translation", "Lcl Translation", "A+",%.15f,%.15f,%.15f' % loc) - file.write('\n\t\t\tProperty: "Lcl Rotation", "Lcl Rotation", "A+",%.15f,%.15f,%.15f' % tuple(eulerRadToDeg(rot))) -# file.write('\n\t\t\tProperty: "Lcl Rotation", "Lcl Rotation", "A+",%.15f,%.15f,%.15f' % rot) - file.write('\n\t\t\tProperty: "Lcl Scaling", "Lcl Scaling", "A+",%.15f,%.15f,%.15f' % scale) - return loc, rot, scale, matrix, matrix_rot - - def write_object_props(ob=None, loc=None, matrix=None, matrix_mod=None): - # if the type is 0 its an empty otherwise its a mesh - # only difference at the moment is one has a color - file.write(''' - Properties60: { - Property: "QuaternionInterpolate", "bool", "",0 - Property: "Visibility", "Visibility", "A+",1''') - - loc, rot, scale, matrix, matrix_rot = write_object_tx(ob, loc, matrix, matrix_mod) - - # Rotation order, note, for FBX files Iv loaded normal order is 1 - # setting to zero. - # eEULER_XYZ = 0 - # eEULER_XZY - # eEULER_YZX - # eEULER_YXZ - # eEULER_ZXY - # eEULER_ZYX - - file.write(''' - Property: "RotationOffset", "Vector3D", "",0,0,0 - Property: "RotationPivot", "Vector3D", "",0,0,0 - Property: "ScalingOffset", "Vector3D", "",0,0,0 - Property: "ScalingPivot", "Vector3D", "",0,0,0 - Property: "TranslationActive", "bool", "",0 - Property: "TranslationMin", "Vector3D", "",0,0,0 - Property: "TranslationMax", "Vector3D", "",0,0,0 - Property: "TranslationMinX", "bool", "",0 - Property: "TranslationMinY", "bool", "",0 - Property: "TranslationMinZ", "bool", "",0 - Property: "TranslationMaxX", "bool", "",0 - Property: "TranslationMaxY", "bool", "",0 - Property: "TranslationMaxZ", "bool", "",0 - Property: "RotationOrder", "enum", "",0 - Property: "RotationSpaceForLimitOnly", "bool", "",0 - Property: "AxisLen", "double", "",10 - Property: "PreRotation", "Vector3D", "",0,0,0 - Property: "PostRotation", "Vector3D", "",0,0,0 - Property: "RotationActive", "bool", "",0 - Property: "RotationMin", "Vector3D", "",0,0,0 - Property: "RotationMax", "Vector3D", "",0,0,0 - Property: "RotationMinX", "bool", "",0 - Property: "RotationMinY", "bool", "",0 - Property: "RotationMinZ", "bool", "",0 - Property: "RotationMaxX", "bool", "",0 - Property: "RotationMaxY", "bool", "",0 - Property: "RotationMaxZ", "bool", "",0 - Property: "RotationStiffnessX", "double", "",0 - Property: "RotationStiffnessY", "double", "",0 - Property: "RotationStiffnessZ", "double", "",0 - Property: "MinDampRangeX", "double", "",0 - Property: "MinDampRangeY", "double", "",0 - Property: "MinDampRangeZ", "double", "",0 - Property: "MaxDampRangeX", "double", "",0 - Property: "MaxDampRangeY", "double", "",0 - Property: "MaxDampRangeZ", "double", "",0 - Property: "MinDampStrengthX", "double", "",0 - Property: "MinDampStrengthY", "double", "",0 - Property: "MinDampStrengthZ", "double", "",0 - Property: "MaxDampStrengthX", "double", "",0 - Property: "MaxDampStrengthY", "double", "",0 - Property: "MaxDampStrengthZ", "double", "",0 - Property: "PreferedAngleX", "double", "",0 - Property: "PreferedAngleY", "double", "",0 - Property: "PreferedAngleZ", "double", "",0 - Property: "InheritType", "enum", "",0 - Property: "ScalingActive", "bool", "",0 - Property: "ScalingMin", "Vector3D", "",1,1,1 - Property: "ScalingMax", "Vector3D", "",1,1,1 - Property: "ScalingMinX", "bool", "",0 - Property: "ScalingMinY", "bool", "",0 - Property: "ScalingMinZ", "bool", "",0 - Property: "ScalingMaxX", "bool", "",0 - Property: "ScalingMaxY", "bool", "",0 - Property: "ScalingMaxZ", "bool", "",0 - Property: "GeometricTranslation", "Vector3D", "",0,0,0 - Property: "GeometricRotation", "Vector3D", "",0,0,0 - Property: "GeometricScaling", "Vector3D", "",1,1,1 - Property: "LookAtProperty", "object", "" - Property: "UpVectorProperty", "object", "" - Property: "Show", "bool", "",1 - Property: "NegativePercentShapeSupport", "bool", "",1 - Property: "DefaultAttributeIndex", "int", "",0''') - if ob and not isinstance(ob, bpy.types.Bone): -# if ob and type(ob) != Blender.Types.BoneType: - # Only mesh objects have color - file.write('\n\t\t\tProperty: "Color", "Color", "A",0.8,0.8,0.8') - file.write('\n\t\t\tProperty: "Size", "double", "",100') - file.write('\n\t\t\tProperty: "Look", "enum", "",1') - - return loc, rot, scale, matrix, matrix_rot - - - # -------------------------------------------- Armatures - #def write_bone(bone, name, matrix_mod): - def write_bone(my_bone): - file.write('\n\tModel: "Model::%s", "Limb" {' % my_bone.fbxName) - file.write('\n\t\tVersion: 232') - - #poseMatrix = write_object_props(my_bone.blenBone, None, None, my_bone.fbxArm.parRelMatrix())[3] - poseMatrix = write_object_props(my_bone.blenBone)[3] # dont apply bone matricies anymore - pose_items.append( (my_bone.fbxName, poseMatrix) ) - - - # file.write('\n\t\t\tProperty: "Size", "double", "",%.6f' % ((my_bone.blenData.head['ARMATURESPACE'] - my_bone.blenData.tail['ARMATURESPACE']) * my_bone.fbxArm.parRelMatrix()).length) - file.write('\n\t\t\tProperty: "Size", "double", "",1') - - #((my_bone.blenData.head['ARMATURESPACE'] * my_bone.fbxArm.matrixWorld) - (my_bone.blenData.tail['ARMATURESPACE'] * my_bone.fbxArm.parRelMatrix())).length) - - """ - file.write('\n\t\t\tProperty: "LimbLength", "double", "",%.6f' %\ - ((my_bone.blenBone.head['ARMATURESPACE'] - my_bone.blenBone.tail['ARMATURESPACE']) * my_bone.fbxArm.parRelMatrix()).length) - """ - - file.write('\n\t\t\tProperty: "LimbLength", "double", "",%.6f' % - (my_bone.blenBone.armature_head - my_bone.blenBone.armature_tail).length) -# (my_bone.blenBone.head['ARMATURESPACE'] - my_bone.blenBone.tail['ARMATURESPACE']).length) - - #file.write('\n\t\t\tProperty: "LimbLength", "double", "",1') - file.write('\n\t\t\tProperty: "Color", "ColorRGB", "",0.8,0.8,0.8') - file.write('\n\t\t\tProperty: "Color", "Color", "A",0.8,0.8,0.8') - file.write('\n\t\t}') - file.write('\n\t\tMultiLayer: 0') - file.write('\n\t\tMultiTake: 1') - file.write('\n\t\tShading: Y') - file.write('\n\t\tCulling: "CullingOff"') - file.write('\n\t\tTypeFlags: "Skeleton"') - file.write('\n\t}') - - def write_camera_switch(): - file.write(''' - Model: "Model::Camera Switcher", "CameraSwitcher" { - Version: 232''') - - write_object_props() - file.write(''' - Property: "Color", "Color", "A",0.8,0.8,0.8 - Property: "Camera Index", "Integer", "A+",100 - } - MultiLayer: 0 - MultiTake: 1 - Hidden: "True" - Shading: W - Culling: "CullingOff" - Version: 101 - Name: "Model::Camera Switcher" - CameraId: 0 - CameraName: 100 - CameraIndexName: - }''') - - def write_camera_dummy(name, loc, near, far, proj_type, up): - file.write('\n\tModel: "Model::%s", "Camera" {' % name ) - file.write('\n\t\tVersion: 232') - write_object_props(None, loc) - - file.write('\n\t\t\tProperty: "Color", "Color", "A",0.8,0.8,0.8') - file.write('\n\t\t\tProperty: "Roll", "Roll", "A+",0') - file.write('\n\t\t\tProperty: "FieldOfView", "FieldOfView", "A+",40') - file.write('\n\t\t\tProperty: "FieldOfViewX", "FieldOfView", "A+",1') - file.write('\n\t\t\tProperty: "FieldOfViewY", "FieldOfView", "A+",1') - file.write('\n\t\t\tProperty: "OpticalCenterX", "Real", "A+",0') - file.write('\n\t\t\tProperty: "OpticalCenterY", "Real", "A+",0') - file.write('\n\t\t\tProperty: "BackgroundColor", "Color", "A+",0.63,0.63,0.63') - file.write('\n\t\t\tProperty: "TurnTable", "Real", "A+",0') - file.write('\n\t\t\tProperty: "DisplayTurnTableIcon", "bool", "",1') - file.write('\n\t\t\tProperty: "Motion Blur Intensity", "Real", "A+",1') - file.write('\n\t\t\tProperty: "UseMotionBlur", "bool", "",0') - file.write('\n\t\t\tProperty: "UseRealTimeMotionBlur", "bool", "",1') - file.write('\n\t\t\tProperty: "ResolutionMode", "enum", "",0') - file.write('\n\t\t\tProperty: "ApertureMode", "enum", "",2') - file.write('\n\t\t\tProperty: "GateFit", "enum", "",0') - file.write('\n\t\t\tProperty: "FocalLength", "Real", "A+",21.3544940948486') - file.write('\n\t\t\tProperty: "CameraFormat", "enum", "",0') - file.write('\n\t\t\tProperty: "AspectW", "double", "",320') - file.write('\n\t\t\tProperty: "AspectH", "double", "",200') - file.write('\n\t\t\tProperty: "PixelAspectRatio", "double", "",1') - file.write('\n\t\t\tProperty: "UseFrameColor", "bool", "",0') - file.write('\n\t\t\tProperty: "FrameColor", "ColorRGB", "",0.3,0.3,0.3') - file.write('\n\t\t\tProperty: "ShowName", "bool", "",1') - file.write('\n\t\t\tProperty: "ShowGrid", "bool", "",1') - file.write('\n\t\t\tProperty: "ShowOpticalCenter", "bool", "",0') - file.write('\n\t\t\tProperty: "ShowAzimut", "bool", "",1') - file.write('\n\t\t\tProperty: "ShowTimeCode", "bool", "",0') - file.write('\n\t\t\tProperty: "NearPlane", "double", "",%.6f' % near) - file.write('\n\t\t\tProperty: "FarPlane", "double", "",%.6f' % far) - file.write('\n\t\t\tProperty: "FilmWidth", "double", "",0.816') - file.write('\n\t\t\tProperty: "FilmHeight", "double", "",0.612') - file.write('\n\t\t\tProperty: "FilmAspectRatio", "double", "",1.33333333333333') - file.write('\n\t\t\tProperty: "FilmSqueezeRatio", "double", "",1') - file.write('\n\t\t\tProperty: "FilmFormatIndex", "enum", "",4') - file.write('\n\t\t\tProperty: "ViewFrustum", "bool", "",1') - file.write('\n\t\t\tProperty: "ViewFrustumNearFarPlane", "bool", "",0') - file.write('\n\t\t\tProperty: "ViewFrustumBackPlaneMode", "enum", "",2') - file.write('\n\t\t\tProperty: "BackPlaneDistance", "double", "",100') - file.write('\n\t\t\tProperty: "BackPlaneDistanceMode", "enum", "",0') - file.write('\n\t\t\tProperty: "ViewCameraToLookAt", "bool", "",1') - file.write('\n\t\t\tProperty: "LockMode", "bool", "",0') - file.write('\n\t\t\tProperty: "LockInterestNavigation", "bool", "",0') - file.write('\n\t\t\tProperty: "FitImage", "bool", "",0') - file.write('\n\t\t\tProperty: "Crop", "bool", "",0') - file.write('\n\t\t\tProperty: "Center", "bool", "",1') - file.write('\n\t\t\tProperty: "KeepRatio", "bool", "",1') - file.write('\n\t\t\tProperty: "BackgroundMode", "enum", "",0') - file.write('\n\t\t\tProperty: "BackgroundAlphaTreshold", "double", "",0.5') - file.write('\n\t\t\tProperty: "ForegroundTransparent", "bool", "",1') - file.write('\n\t\t\tProperty: "DisplaySafeArea", "bool", "",0') - file.write('\n\t\t\tProperty: "SafeAreaDisplayStyle", "enum", "",1') - file.write('\n\t\t\tProperty: "SafeAreaAspectRatio", "double", "",1.33333333333333') - file.write('\n\t\t\tProperty: "Use2DMagnifierZoom", "bool", "",0') - file.write('\n\t\t\tProperty: "2D Magnifier Zoom", "Real", "A+",100') - file.write('\n\t\t\tProperty: "2D Magnifier X", "Real", "A+",50') - file.write('\n\t\t\tProperty: "2D Magnifier Y", "Real", "A+",50') - file.write('\n\t\t\tProperty: "CameraProjectionType", "enum", "",%i' % proj_type) - file.write('\n\t\t\tProperty: "UseRealTimeDOFAndAA", "bool", "",0') - file.write('\n\t\t\tProperty: "UseDepthOfField", "bool", "",0') - file.write('\n\t\t\tProperty: "FocusSource", "enum", "",0') - file.write('\n\t\t\tProperty: "FocusAngle", "double", "",3.5') - file.write('\n\t\t\tProperty: "FocusDistance", "double", "",200') - file.write('\n\t\t\tProperty: "UseAntialiasing", "bool", "",0') - file.write('\n\t\t\tProperty: "AntialiasingIntensity", "double", "",0.77777') - file.write('\n\t\t\tProperty: "UseAccumulationBuffer", "bool", "",0') - file.write('\n\t\t\tProperty: "FrameSamplingCount", "int", "",7') - file.write('\n\t\t}') - file.write('\n\t\tMultiLayer: 0') - file.write('\n\t\tMultiTake: 0') - file.write('\n\t\tHidden: "True"') - file.write('\n\t\tShading: Y') - file.write('\n\t\tCulling: "CullingOff"') - file.write('\n\t\tTypeFlags: "Camera"') - file.write('\n\t\tGeometryVersion: 124') - file.write('\n\t\tPosition: %.6f,%.6f,%.6f' % loc) - file.write('\n\t\tUp: %i,%i,%i' % up) - file.write('\n\t\tLookAt: 0,0,0') - file.write('\n\t\tShowInfoOnMoving: 1') - file.write('\n\t\tShowAudio: 0') - file.write('\n\t\tAudioColor: 0,1,0') - file.write('\n\t\tCameraOrthoZoom: 1') - file.write('\n\t}') - - def write_camera_default(): - # This sucks but to match FBX converter its easier to - # write the cameras though they are not needed. - write_camera_dummy('Producer Perspective', (0,71.3,287.5), 10, 4000, 0, (0,1,0)) - write_camera_dummy('Producer Top', (0,4000,0), 1, 30000, 1, (0,0,-1)) - write_camera_dummy('Producer Bottom', (0,-4000,0), 1, 30000, 1, (0,0,-1)) - write_camera_dummy('Producer Front', (0,0,4000), 1, 30000, 1, (0,1,0)) - write_camera_dummy('Producer Back', (0,0,-4000), 1, 30000, 1, (0,1,0)) - write_camera_dummy('Producer Right', (4000,0,0), 1, 30000, 1, (0,1,0)) - write_camera_dummy('Producer Left', (-4000,0,0), 1, 30000, 1, (0,1,0)) - - def write_camera(my_cam): - ''' - Write a blender camera - ''' - render = sce.render_data - width = render.resolution_x - height = render.resolution_y -# render = sce.render -# width = render.sizeX -# height = render.sizeY - aspect = float(width)/height - - data = my_cam.blenObject.data - - file.write('\n\tModel: "Model::%s", "Camera" {' % my_cam.fbxName ) - file.write('\n\t\tVersion: 232') - loc, rot, scale, matrix, matrix_rot = write_object_props(my_cam.blenObject, None, my_cam.parRelMatrix()) - - file.write('\n\t\t\tProperty: "Roll", "Roll", "A+",0') - file.write('\n\t\t\tProperty: "FieldOfView", "FieldOfView", "A+",%.6f' % data.angle) - file.write('\n\t\t\tProperty: "FieldOfViewX", "FieldOfView", "A+",1') - file.write('\n\t\t\tProperty: "FieldOfViewY", "FieldOfView", "A+",1') - file.write('\n\t\t\tProperty: "FocalLength", "Real", "A+",14.0323972702026') - file.write('\n\t\t\tProperty: "OpticalCenterX", "Real", "A+",%.6f' % data.shift_x) # not sure if this is in the correct units? -# file.write('\n\t\t\tProperty: "OpticalCenterX", "Real", "A+",%.6f' % data.shiftX) # not sure if this is in the correct units? - file.write('\n\t\t\tProperty: "OpticalCenterY", "Real", "A+",%.6f' % data.shift_y) # ditto -# file.write('\n\t\t\tProperty: "OpticalCenterY", "Real", "A+",%.6f' % data.shiftY) # ditto - file.write('\n\t\t\tProperty: "BackgroundColor", "Color", "A+",0,0,0') - file.write('\n\t\t\tProperty: "TurnTable", "Real", "A+",0') - file.write('\n\t\t\tProperty: "DisplayTurnTableIcon", "bool", "",1') - file.write('\n\t\t\tProperty: "Motion Blur Intensity", "Real", "A+",1') - file.write('\n\t\t\tProperty: "UseMotionBlur", "bool", "",0') - file.write('\n\t\t\tProperty: "UseRealTimeMotionBlur", "bool", "",1') - file.write('\n\t\t\tProperty: "ResolutionMode", "enum", "",0') - file.write('\n\t\t\tProperty: "ApertureMode", "enum", "",2') - file.write('\n\t\t\tProperty: "GateFit", "enum", "",0') - file.write('\n\t\t\tProperty: "CameraFormat", "enum", "",0') - file.write('\n\t\t\tProperty: "AspectW", "double", "",%i' % width) - file.write('\n\t\t\tProperty: "AspectH", "double", "",%i' % height) - - '''Camera aspect ratio modes. - 0 If the ratio mode is eWINDOW_SIZE, both width and height values aren't relevant. - 1 If the ratio mode is eFIXED_RATIO, the height value is set to 1.0 and the width value is relative to the height value. - 2 If the ratio mode is eFIXED_RESOLUTION, both width and height values are in pixels. - 3 If the ratio mode is eFIXED_WIDTH, the width value is in pixels and the height value is relative to the width value. - 4 If the ratio mode is eFIXED_HEIGHT, the height value is in pixels and the width value is relative to the height value. - - Definition at line 234 of file kfbxcamera.h. ''' - - file.write('\n\t\t\tProperty: "PixelAspectRatio", "double", "",2') - - file.write('\n\t\t\tProperty: "UseFrameColor", "bool", "",0') - file.write('\n\t\t\tProperty: "FrameColor", "ColorRGB", "",0.3,0.3,0.3') - file.write('\n\t\t\tProperty: "ShowName", "bool", "",1') - file.write('\n\t\t\tProperty: "ShowGrid", "bool", "",1') - file.write('\n\t\t\tProperty: "ShowOpticalCenter", "bool", "",0') - file.write('\n\t\t\tProperty: "ShowAzimut", "bool", "",1') - file.write('\n\t\t\tProperty: "ShowTimeCode", "bool", "",0') - file.write('\n\t\t\tProperty: "NearPlane", "double", "",%.6f' % data.clip_start) -# file.write('\n\t\t\tProperty: "NearPlane", "double", "",%.6f' % data.clipStart) - file.write('\n\t\t\tProperty: "FarPlane", "double", "",%.6f' % data.clip_end) -# file.write('\n\t\t\tProperty: "FarPlane", "double", "",%.6f' % data.clipStart) - file.write('\n\t\t\tProperty: "FilmWidth", "double", "",1.0') - file.write('\n\t\t\tProperty: "FilmHeight", "double", "",1.0') - file.write('\n\t\t\tProperty: "FilmAspectRatio", "double", "",%.6f' % aspect) - file.write('\n\t\t\tProperty: "FilmSqueezeRatio", "double", "",1') - file.write('\n\t\t\tProperty: "FilmFormatIndex", "enum", "",0') - file.write('\n\t\t\tProperty: "ViewFrustum", "bool", "",1') - file.write('\n\t\t\tProperty: "ViewFrustumNearFarPlane", "bool", "",0') - file.write('\n\t\t\tProperty: "ViewFrustumBackPlaneMode", "enum", "",2') - file.write('\n\t\t\tProperty: "BackPlaneDistance", "double", "",100') - file.write('\n\t\t\tProperty: "BackPlaneDistanceMode", "enum", "",0') - file.write('\n\t\t\tProperty: "ViewCameraToLookAt", "bool", "",1') - file.write('\n\t\t\tProperty: "LockMode", "bool", "",0') - file.write('\n\t\t\tProperty: "LockInterestNavigation", "bool", "",0') - file.write('\n\t\t\tProperty: "FitImage", "bool", "",0') - file.write('\n\t\t\tProperty: "Crop", "bool", "",0') - file.write('\n\t\t\tProperty: "Center", "bool", "",1') - file.write('\n\t\t\tProperty: "KeepRatio", "bool", "",1') - file.write('\n\t\t\tProperty: "BackgroundMode", "enum", "",0') - file.write('\n\t\t\tProperty: "BackgroundAlphaTreshold", "double", "",0.5') - file.write('\n\t\t\tProperty: "ForegroundTransparent", "bool", "",1') - file.write('\n\t\t\tProperty: "DisplaySafeArea", "bool", "",0') - file.write('\n\t\t\tProperty: "SafeAreaDisplayStyle", "enum", "",1') - file.write('\n\t\t\tProperty: "SafeAreaAspectRatio", "double", "",%.6f' % aspect) - file.write('\n\t\t\tProperty: "Use2DMagnifierZoom", "bool", "",0') - file.write('\n\t\t\tProperty: "2D Magnifier Zoom", "Real", "A+",100') - file.write('\n\t\t\tProperty: "2D Magnifier X", "Real", "A+",50') - file.write('\n\t\t\tProperty: "2D Magnifier Y", "Real", "A+",50') - file.write('\n\t\t\tProperty: "CameraProjectionType", "enum", "",0') - file.write('\n\t\t\tProperty: "UseRealTimeDOFAndAA", "bool", "",0') - file.write('\n\t\t\tProperty: "UseDepthOfField", "bool", "",0') - file.write('\n\t\t\tProperty: "FocusSource", "enum", "",0') - file.write('\n\t\t\tProperty: "FocusAngle", "double", "",3.5') - file.write('\n\t\t\tProperty: "FocusDistance", "double", "",200') - file.write('\n\t\t\tProperty: "UseAntialiasing", "bool", "",0') - file.write('\n\t\t\tProperty: "AntialiasingIntensity", "double", "",0.77777') - file.write('\n\t\t\tProperty: "UseAccumulationBuffer", "bool", "",0') - file.write('\n\t\t\tProperty: "FrameSamplingCount", "int", "",7') - - file.write('\n\t\t}') - file.write('\n\t\tMultiLayer: 0') - file.write('\n\t\tMultiTake: 0') - file.write('\n\t\tShading: Y') - file.write('\n\t\tCulling: "CullingOff"') - file.write('\n\t\tTypeFlags: "Camera"') - file.write('\n\t\tGeometryVersion: 124') - file.write('\n\t\tPosition: %.6f,%.6f,%.6f' % loc) - file.write('\n\t\tUp: %.6f,%.6f,%.6f' % tuple(Mathutils.Vector(0,1,0) * matrix_rot) ) - file.write('\n\t\tLookAt: %.6f,%.6f,%.6f' % tuple(Mathutils.Vector(0,0,-1)*matrix_rot) ) - - #file.write('\n\t\tUp: 0,0,0' ) - #file.write('\n\t\tLookAt: 0,0,0' ) - - file.write('\n\t\tShowInfoOnMoving: 1') - file.write('\n\t\tShowAudio: 0') - file.write('\n\t\tAudioColor: 0,1,0') - file.write('\n\t\tCameraOrthoZoom: 1') - file.write('\n\t}') - - def write_light(my_light): - light = my_light.blenObject.data - file.write('\n\tModel: "Model::%s", "Light" {' % my_light.fbxName) - file.write('\n\t\tVersion: 232') - - write_object_props(my_light.blenObject, None, my_light.parRelMatrix()) - - # Why are these values here twice?????? - oh well, follow the holy sdk's output - - # Blender light types match FBX's, funny coincidence, we just need to - # be sure that all unsupported types are made into a point light - #ePOINT, - #eDIRECTIONAL - #eSPOT - light_type_items = {'POINT': 0, 'SUN': 1, 'SPOT': 2, 'HEMI': 3, 'AREA': 4} - light_type = light_type_items[light.type] -# light_type = light.type - if light_type > 2: light_type = 1 # hemi and area lights become directional - -# mode = light.mode - if light.shadow_method == 'RAY_SHADOW' or light.shadow_method == 'BUFFER_SHADOW': -# if mode & Blender.Lamp.Modes.RayShadow or mode & Blender.Lamp.Modes.Shadows: - do_shadow = 1 - else: - do_shadow = 0 - - if light.only_shadow or (not light.diffuse and not light.specular): -# if mode & Blender.Lamp.Modes.OnlyShadow or (mode & Blender.Lamp.Modes.NoDiffuse and mode & Blender.Lamp.Modes.NoSpecular): - do_light = 0 - else: - do_light = 1 - - scale = abs(GLOBAL_MATRIX.scalePart()[0]) # scale is always uniform in this case - - file.write('\n\t\t\tProperty: "LightType", "enum", "",%i' % light_type) - file.write('\n\t\t\tProperty: "CastLightOnObject", "bool", "",1') - file.write('\n\t\t\tProperty: "DrawVolumetricLight", "bool", "",1') - file.write('\n\t\t\tProperty: "DrawGroundProjection", "bool", "",1') - file.write('\n\t\t\tProperty: "DrawFrontFacingVolumetricLight", "bool", "",0') - file.write('\n\t\t\tProperty: "GoboProperty", "object", ""') - file.write('\n\t\t\tProperty: "Color", "Color", "A+",1,1,1') - file.write('\n\t\t\tProperty: "Intensity", "Intensity", "A+",%.2f' % (min(light.energy*100, 200))) # clamp below 200 - if light.type == 'SPOT': - file.write('\n\t\t\tProperty: "Cone angle", "Cone angle", "A+",%.2f' % (light.spot_size * scale)) -# file.write('\n\t\t\tProperty: "Cone angle", "Cone angle", "A+",%.2f' % (light.spotSize * scale)) - file.write('\n\t\t\tProperty: "Fog", "Fog", "A+",50') - file.write('\n\t\t\tProperty: "Color", "Color", "A",%.2f,%.2f,%.2f' % tuple(light.color)) -# file.write('\n\t\t\tProperty: "Color", "Color", "A",%.2f,%.2f,%.2f' % tuple(light.col)) - file.write('\n\t\t\tProperty: "Intensity", "Intensity", "A+",%.2f' % (min(light.energy*100, 200))) # clamp below 200 -# - # duplication? see ^ (Arystan) -# file.write('\n\t\t\tProperty: "Cone angle", "Cone angle", "A+",%.2f' % (light.spotSize * scale)) - file.write('\n\t\t\tProperty: "Fog", "Fog", "A+",50') - file.write('\n\t\t\tProperty: "LightType", "enum", "",%i' % light_type) - file.write('\n\t\t\tProperty: "CastLightOnObject", "bool", "",%i' % do_light) - file.write('\n\t\t\tProperty: "DrawGroundProjection", "bool", "",1') - file.write('\n\t\t\tProperty: "DrawFrontFacingVolumetricLight", "bool", "",0') - file.write('\n\t\t\tProperty: "DrawVolumetricLight", "bool", "",1') - file.write('\n\t\t\tProperty: "GoboProperty", "object", ""') - file.write('\n\t\t\tProperty: "DecayType", "enum", "",0') - file.write('\n\t\t\tProperty: "DecayStart", "double", "",%.2f' % light.distance) -# file.write('\n\t\t\tProperty: "DecayStart", "double", "",%.2f' % light.dist) - file.write('\n\t\t\tProperty: "EnableNearAttenuation", "bool", "",0') - file.write('\n\t\t\tProperty: "NearAttenuationStart", "double", "",0') - file.write('\n\t\t\tProperty: "NearAttenuationEnd", "double", "",0') - file.write('\n\t\t\tProperty: "EnableFarAttenuation", "bool", "",0') - file.write('\n\t\t\tProperty: "FarAttenuationStart", "double", "",0') - file.write('\n\t\t\tProperty: "FarAttenuationEnd", "double", "",0') - file.write('\n\t\t\tProperty: "CastShadows", "bool", "",%i' % do_shadow) - file.write('\n\t\t\tProperty: "ShadowColor", "ColorRGBA", "",0,0,0,1') - file.write('\n\t\t}') - file.write('\n\t\tMultiLayer: 0') - file.write('\n\t\tMultiTake: 0') - file.write('\n\t\tShading: Y') - file.write('\n\t\tCulling: "CullingOff"') - file.write('\n\t\tTypeFlags: "Light"') - file.write('\n\t\tGeometryVersion: 124') - file.write('\n\t}') - - # matrixOnly is not used at the moment - def write_null(my_null = None, fbxName = None, matrixOnly = None): - # ob can be null - if not fbxName: fbxName = my_null.fbxName - - file.write('\n\tModel: "Model::%s", "Null" {' % fbxName) - file.write('\n\t\tVersion: 232') - - # only use this for the root matrix at the moment - if matrixOnly: - poseMatrix = write_object_props(None, None, matrixOnly)[3] - - else: # all other Null's - if my_null: poseMatrix = write_object_props(my_null.blenObject, None, my_null.parRelMatrix())[3] - else: poseMatrix = write_object_props()[3] - - pose_items.append((fbxName, poseMatrix)) - - file.write(''' - } - MultiLayer: 0 - MultiTake: 1 - Shading: Y - Culling: "CullingOff" - TypeFlags: "Null" - }''') - - # Material Settings - if world: world_amb = tuple(world.ambient_color) -# if world: world_amb = world.getAmb() - else: world_amb = (0,0,0) # Default value - - def write_material(matname, mat): - file.write('\n\tMaterial: "Material::%s", "" {' % matname) - - # Todo, add more material Properties. - if mat: - mat_cold = tuple(mat.diffuse_color) -# mat_cold = tuple(mat.rgbCol) - mat_cols = tuple(mat.specular_color) -# mat_cols = tuple(mat.specCol) - #mat_colm = tuple(mat.mirCol) # we wont use the mirror color - mat_colamb = world_amb -# mat_colamb = tuple([c for c in world_amb]) - - mat_dif = mat.diffuse_intensity -# mat_dif = mat.ref - mat_amb = mat.ambient -# mat_amb = mat.amb - mat_hard = (float(mat.specular_hardness)-1)/5.10 -# mat_hard = (float(mat.hard)-1)/5.10 - mat_spec = mat.specular_intensity/2.0 -# mat_spec = mat.spec/2.0 - mat_alpha = mat.alpha - mat_emit = mat.emit - mat_shadeless = mat.shadeless -# mat_shadeless = mat.mode & Blender.Material.Modes.SHADELESS - if mat_shadeless: - mat_shader = 'Lambert' - else: - if mat.diffuse_shader == 'LAMBERT': -# if mat.diffuseShader == Blender.Material.Shaders.DIFFUSE_LAMBERT: - mat_shader = 'Lambert' - else: - mat_shader = 'Phong' - else: - mat_cols = mat_cold = 0.8, 0.8, 0.8 - mat_colamb = 0.0,0.0,0.0 - # mat_colm - mat_dif = 1.0 - mat_amb = 0.5 - mat_hard = 20.0 - mat_spec = 0.2 - mat_alpha = 1.0 - mat_emit = 0.0 - mat_shadeless = False - mat_shader = 'Phong' - - file.write('\n\t\tVersion: 102') - file.write('\n\t\tShadingModel: "%s"' % mat_shader.lower()) - file.write('\n\t\tMultiLayer: 0') - - file.write('\n\t\tProperties60: {') - file.write('\n\t\t\tProperty: "ShadingModel", "KString", "", "%s"' % mat_shader) - file.write('\n\t\t\tProperty: "MultiLayer", "bool", "",0') - file.write('\n\t\t\tProperty: "EmissiveColor", "ColorRGB", "",%.4f,%.4f,%.4f' % mat_cold) # emit and diffuse color are he same in blender - file.write('\n\t\t\tProperty: "EmissiveFactor", "double", "",%.4f' % mat_emit) - - file.write('\n\t\t\tProperty: "AmbientColor", "ColorRGB", "",%.4f,%.4f,%.4f' % mat_colamb) - file.write('\n\t\t\tProperty: "AmbientFactor", "double", "",%.4f' % mat_amb) - file.write('\n\t\t\tProperty: "DiffuseColor", "ColorRGB", "",%.4f,%.4f,%.4f' % mat_cold) - file.write('\n\t\t\tProperty: "DiffuseFactor", "double", "",%.4f' % mat_dif) - file.write('\n\t\t\tProperty: "Bump", "Vector3D", "",0,0,0') - file.write('\n\t\t\tProperty: "TransparentColor", "ColorRGB", "",1,1,1') - file.write('\n\t\t\tProperty: "TransparencyFactor", "double", "",%.4f' % (1.0 - mat_alpha)) - if not mat_shadeless: - file.write('\n\t\t\tProperty: "SpecularColor", "ColorRGB", "",%.4f,%.4f,%.4f' % mat_cols) - file.write('\n\t\t\tProperty: "SpecularFactor", "double", "",%.4f' % mat_spec) - file.write('\n\t\t\tProperty: "ShininessExponent", "double", "",80.0') - file.write('\n\t\t\tProperty: "ReflectionColor", "ColorRGB", "",0,0,0') - file.write('\n\t\t\tProperty: "ReflectionFactor", "double", "",1') - file.write('\n\t\t\tProperty: "Emissive", "ColorRGB", "",0,0,0') - file.write('\n\t\t\tProperty: "Ambient", "ColorRGB", "",%.1f,%.1f,%.1f' % mat_colamb) - file.write('\n\t\t\tProperty: "Diffuse", "ColorRGB", "",%.1f,%.1f,%.1f' % mat_cold) - if not mat_shadeless: - file.write('\n\t\t\tProperty: "Specular", "ColorRGB", "",%.1f,%.1f,%.1f' % mat_cols) - file.write('\n\t\t\tProperty: "Shininess", "double", "",%.1f' % mat_hard) - file.write('\n\t\t\tProperty: "Opacity", "double", "",%.1f' % mat_alpha) - if not mat_shadeless: - file.write('\n\t\t\tProperty: "Reflectivity", "double", "",0') - - file.write('\n\t\t}') - file.write('\n\t}') - - def copy_image(image): - - rel = image.get_export_path(basepath, True) - base = os.path.basename(rel) - - if EXP_IMAGE_COPY: - absp = image.get_export_path(basepath, False) - if not os.path.exists(absp): - shutil.copy(image.get_abs_filename(), absp) - - return (rel, base) - - # tex is an Image (Arystan) - def write_video(texname, tex): - # Same as texture really! - file.write('\n\tVideo: "Video::%s", "Clip" {' % texname) - - file.write(''' - Type: "Clip" - Properties60: { - Property: "FrameRate", "double", "",0 - Property: "LastFrame", "int", "",0 - Property: "Width", "int", "",0 - Property: "Height", "int", "",0''') - if tex: - fname_rel, fname_strip = copy_image(tex) -# fname, fname_strip, fname_rel = derived_paths(tex.filename, basepath, EXP_IMAGE_COPY) - else: - fname = fname_strip = fname_rel = '' - - file.write('\n\t\t\tProperty: "Path", "charptr", "", "%s"' % fname_strip) - - - file.write(''' - Property: "StartFrame", "int", "",0 - Property: "StopFrame", "int", "",0 - Property: "PlaySpeed", "double", "",1 - Property: "Offset", "KTime", "",0 - Property: "InterlaceMode", "enum", "",0 - Property: "FreeRunning", "bool", "",0 - Property: "Loop", "bool", "",0 - Property: "AccessMode", "enum", "",0 - } - UseMipMap: 0''') - - file.write('\n\t\tFilename: "%s"' % fname_strip) - if fname_strip: fname_strip = '/' + fname_strip - file.write('\n\t\tRelativeFilename: "%s"' % fname_rel) # make relative - file.write('\n\t}') - - - def write_texture(texname, tex, num): - # if tex == None then this is a dummy tex - file.write('\n\tTexture: "Texture::%s", "TextureVideoClip" {' % texname) - file.write('\n\t\tType: "TextureVideoClip"') - file.write('\n\t\tVersion: 202') - # TODO, rare case _empty_ exists as a name. - file.write('\n\t\tTextureName: "Texture::%s"' % texname) - - file.write(''' - Properties60: { - Property: "Translation", "Vector", "A+",0,0,0 - Property: "Rotation", "Vector", "A+",0,0,0 - Property: "Scaling", "Vector", "A+",1,1,1''') - file.write('\n\t\t\tProperty: "Texture alpha", "Number", "A+",%i' % num) - - - # WrapModeU/V 0==rep, 1==clamp, TODO add support - file.write(''' - Property: "TextureTypeUse", "enum", "",0 - Property: "CurrentTextureBlendMode", "enum", "",1 - Property: "UseMaterial", "bool", "",0 - Property: "UseMipMap", "bool", "",0 - Property: "CurrentMappingType", "enum", "",0 - Property: "UVSwap", "bool", "",0''') - - file.write('\n\t\t\tProperty: "WrapModeU", "enum", "",%i' % tex.clamp_x) -# file.write('\n\t\t\tProperty: "WrapModeU", "enum", "",%i' % tex.clampX) - file.write('\n\t\t\tProperty: "WrapModeV", "enum", "",%i' % tex.clamp_y) -# file.write('\n\t\t\tProperty: "WrapModeV", "enum", "",%i' % tex.clampY) - - file.write(''' - Property: "TextureRotationPivot", "Vector3D", "",0,0,0 - Property: "TextureScalingPivot", "Vector3D", "",0,0,0 - Property: "VideoProperty", "object", "" - }''') - - file.write('\n\t\tMedia: "Video::%s"' % texname) - - if tex: - fname_rel, fname_strip = copy_image(tex) -# fname, fname_strip, fname_rel = derived_paths(tex.filename, basepath, EXP_IMAGE_COPY) - else: - fname = fname_strip = fname_rel = '' - - file.write('\n\t\tFileName: "%s"' % fname_strip) - file.write('\n\t\tRelativeFilename: "%s"' % fname_rel) # need some make relative command - - file.write(''' - ModelUVTranslation: 0,0 - ModelUVScaling: 1,1 - Texture_Alpha_Source: "None" - Cropping: 0,0,0,0 - }''') - - def write_deformer_skin(obname): - ''' - Each mesh has its own deformer - ''' - file.write('\n\tDeformer: "Deformer::Skin %s", "Skin" {' % obname) - file.write(''' - Version: 100 - MultiLayer: 0 - Type: "Skin" - Properties60: { - } - Link_DeformAcuracy: 50 - }''') - - # in the example was 'Bip01 L Thigh_2' - def write_sub_deformer_skin(my_mesh, my_bone, weights): - - ''' - Each subdeformer is spesific to a mesh, but the bone it links to can be used by many sub-deformers - So the SubDeformer needs the mesh-object name as a prefix to make it unique - - Its possible that there is no matching vgroup in this mesh, in that case no verts are in the subdeformer, - a but silly but dosnt really matter - ''' - file.write('\n\tDeformer: "SubDeformer::Cluster %s %s", "Cluster" {' % (my_mesh.fbxName, my_bone.fbxName)) - - file.write(''' - Version: 100 - MultiLayer: 0 - Type: "Cluster" - Properties60: { - Property: "SrcModel", "object", "" - Property: "SrcModelReference", "object", "" - } - UserData: "", ""''') - - # Support for bone parents - if my_mesh.fbxBoneParent: - if my_mesh.fbxBoneParent == my_bone: - # TODO - this is a bit lazy, we could have a simple write loop - # for this case because all weights are 1.0 but for now this is ok - # Parent Bones arent used all that much anyway. - vgroup_data = [(j, 1.0) for j in range(len(my_mesh.blenData.verts))] - else: - # This bone is not a parent of this mesh object, no weights - vgroup_data = [] - - else: - # Normal weight painted mesh - if my_bone.blenName in weights[0]: - # Before we used normalized wright list - #vgroup_data = me.getVertsFromGroup(bone.name, 1) - group_index = weights[0].index(my_bone.blenName) - vgroup_data = [(j, weight[group_index]) for j, weight in enumerate(weights[1]) if weight[group_index]] - else: - vgroup_data = [] - - file.write('\n\t\tIndexes: ') - - i = -1 - for vg in vgroup_data: - if i == -1: - file.write('%i' % vg[0]) - i=0 - else: - if i==23: - file.write('\n\t\t') - i=0 - file.write(',%i' % vg[0]) - i+=1 - - file.write('\n\t\tWeights: ') - i = -1 - for vg in vgroup_data: - if i == -1: - file.write('%.8f' % vg[1]) - i=0 - else: - if i==38: - file.write('\n\t\t') - i=0 - file.write(',%.8f' % vg[1]) - i+=1 - - if my_mesh.fbxParent: - # TODO FIXME, this case is broken in some cases. skinned meshes just shouldnt have parents where possible! - m = mtx4_z90 * (my_bone.restMatrix * my_bone.fbxArm.matrixWorld.copy() * my_mesh.matrixWorld.copy().invert() ) - else: - # Yes! this is it... - but dosnt work when the mesh is a. - m = mtx4_z90 * (my_bone.restMatrix * my_bone.fbxArm.matrixWorld.copy() * my_mesh.matrixWorld.copy().invert() ) - - #m = mtx4_z90 * my_bone.restMatrix - matstr = mat4x4str(m) - matstr_i = mat4x4str(m.invert()) - - file.write('\n\t\tTransform: %s' % matstr_i) # THIS IS __NOT__ THE GLOBAL MATRIX AS DOCUMENTED :/ - file.write('\n\t\tTransformLink: %s' % matstr) - file.write('\n\t}') - - def write_mesh(my_mesh): - - me = my_mesh.blenData - - # if there are non NULL materials on this mesh - if my_mesh.blenMaterials: do_materials = True - else: do_materials = False - - if my_mesh.blenTextures: do_textures = True - else: do_textures = False - - do_uvs = len(me.uv_textures) > 0 -# do_uvs = me.faceUV - - - file.write('\n\tModel: "Model::%s", "Mesh" {' % my_mesh.fbxName) - file.write('\n\t\tVersion: 232') # newline is added in write_object_props - - poseMatrix = write_object_props(my_mesh.blenObject, None, my_mesh.parRelMatrix())[3] - pose_items.append((my_mesh.fbxName, poseMatrix)) - - file.write('\n\t\t}') - file.write('\n\t\tMultiLayer: 0') - file.write('\n\t\tMultiTake: 1') - file.write('\n\t\tShading: Y') - file.write('\n\t\tCulling: "CullingOff"') - - - # Write the Real Mesh data here - file.write('\n\t\tVertices: ') - i=-1 - - for v in me.verts: - if i==-1: - file.write('%.6f,%.6f,%.6f' % tuple(v.co)); i=0 - else: - if i==7: - file.write('\n\t\t'); i=0 - file.write(',%.6f,%.6f,%.6f'% tuple(v.co)) - i+=1 - - file.write('\n\t\tPolygonVertexIndex: ') - i=-1 - for f in me.faces: - fi = f.verts - # fi = [v_index for j, v_index in enumerate(f.verts) if v_index != 0 or j != 3] -# fi = [v.index for v in f] - - # flip the last index, odd but it looks like - # this is how fbx tells one face from another - fi[-1] = -(fi[-1]+1) - fi = tuple(fi) - if i==-1: - if len(fi) == 3: file.write('%i,%i,%i' % fi ) -# if len(f) == 3: file.write('%i,%i,%i' % fi ) - else: file.write('%i,%i,%i,%i' % fi ) - i=0 - else: - if i==13: - file.write('\n\t\t') - i=0 - if len(fi) == 3: file.write(',%i,%i,%i' % fi ) -# if len(f) == 3: file.write(',%i,%i,%i' % fi ) - else: file.write(',%i,%i,%i,%i' % fi ) - i+=1 - - file.write('\n\t\tEdges: ') - i=-1 - for ed in me.edges: - if i==-1: - file.write('%i,%i' % (ed.verts[0], ed.verts[1])) -# file.write('%i,%i' % (ed.v1.index, ed.v2.index)) - i=0 - else: - if i==13: - file.write('\n\t\t') - i=0 - file.write(',%i,%i' % (ed.verts[0], ed.verts[1])) -# file.write(',%i,%i' % (ed.v1.index, ed.v2.index)) - i+=1 - - file.write('\n\t\tGeometryVersion: 124') - - file.write(''' - LayerElementNormal: 0 { - Version: 101 - Name: "" - MappingInformationType: "ByVertice" - ReferenceInformationType: "Direct" - Normals: ''') - - i=-1 - for v in me.verts: - if i==-1: - file.write('%.15f,%.15f,%.15f' % tuple(v.normal)); i=0 -# file.write('%.15f,%.15f,%.15f' % tuple(v.no)); i=0 - else: - if i==2: - file.write('\n '); i=0 - file.write(',%.15f,%.15f,%.15f' % tuple(v.normal)) -# file.write(',%.15f,%.15f,%.15f' % tuple(v.no)) - i+=1 - file.write('\n\t\t}') - - # Write Face Smoothing - file.write(''' - LayerElementSmoothing: 0 { - Version: 102 - Name: "" - MappingInformationType: "ByPolygon" - ReferenceInformationType: "Direct" - Smoothing: ''') - - i=-1 - for f in me.faces: - if i==-1: - file.write('%i' % f.smooth); i=0 - else: - if i==54: - file.write('\n '); i=0 - file.write(',%i' % f.smooth) - i+=1 - - file.write('\n\t\t}') - - # Write Edge Smoothing - file.write(''' - LayerElementSmoothing: 0 { - Version: 101 - Name: "" - MappingInformationType: "ByEdge" - ReferenceInformationType: "Direct" - Smoothing: ''') - -# SHARP = Blender.Mesh.EdgeFlags.SHARP - i=-1 - for ed in me.edges: - if i==-1: - file.write('%i' % (ed.sharp)); i=0 -# file.write('%i' % ((ed.flag&SHARP)!=0)); i=0 - else: - if i==54: - file.write('\n '); i=0 - file.write(',%i' % (ed.sharp)) -# file.write(',%i' % ((ed.flag&SHARP)!=0)) - i+=1 - - file.write('\n\t\t}') -# del SHARP - - # small utility function - # returns a slice of data depending on number of face verts - # data is either a MeshTextureFace or MeshColor - def face_data(data, face): - totvert = len(f.verts) - - return data[:totvert] - - - # Write VertexColor Layers - # note, no programs seem to use this info :/ - collayers = [] - if len(me.vertex_colors): -# if me.vertexColors: - collayers = me.vertex_colors -# collayers = me.getColorLayerNames() - collayer_orig = me.active_vertex_color -# collayer_orig = me.activeColorLayer - for colindex, collayer in enumerate(collayers): -# me.activeColorLayer = collayer - file.write('\n\t\tLayerElementColor: %i {' % colindex) - file.write('\n\t\t\tVersion: 101') - file.write('\n\t\t\tName: "%s"' % collayer.name) -# file.write('\n\t\t\tName: "%s"' % collayer) - - file.write(''' - MappingInformationType: "ByPolygonVertex" - ReferenceInformationType: "IndexToDirect" - Colors: ''') - - i = -1 - ii = 0 # Count how many Colors we write - - for f, cf in zip(me.faces, collayer.data): - colors = [cf.color1, cf.color2, cf.color3, cf.color4] - - # determine number of verts - colors = face_data(colors, f) - - for col in colors: - if i==-1: - file.write('%.4f,%.4f,%.4f,1' % tuple(col)) - i=0 - else: - if i==7: - file.write('\n\t\t\t\t') - i=0 - file.write(',%.4f,%.4f,%.4f,1' % tuple(col)) - i+=1 - ii+=1 # One more Color - -# for f in me.faces: -# for col in f.col: -# if i==-1: -# file.write('%.4f,%.4f,%.4f,1' % (col[0]/255.0, col[1]/255.0, col[2]/255.0)) -# i=0 -# else: -# if i==7: -# file.write('\n\t\t\t\t') -# i=0 -# file.write(',%.4f,%.4f,%.4f,1' % (col[0]/255.0, col[1]/255.0, col[2]/255.0)) -# i+=1 -# ii+=1 # One more Color - - file.write('\n\t\t\tColorIndex: ') - i = -1 - for j in range(ii): - if i == -1: - file.write('%i' % j) - i=0 - else: - if i==55: - file.write('\n\t\t\t\t') - i=0 - file.write(',%i' % j) - i+=1 - - file.write('\n\t\t}') - - - - # Write UV and texture layers. - uvlayers = [] - if do_uvs: - uvlayers = me.uv_textures -# uvlayers = me.getUVLayerNames() - uvlayer_orig = me.active_uv_texture -# uvlayer_orig = me.activeUVLayer - for uvindex, uvlayer in enumerate(me.uv_textures): -# for uvindex, uvlayer in enumerate(uvlayers): -# me.activeUVLayer = uvlayer - file.write('\n\t\tLayerElementUV: %i {' % uvindex) - file.write('\n\t\t\tVersion: 101') - file.write('\n\t\t\tName: "%s"' % uvlayer.name) -# file.write('\n\t\t\tName: "%s"' % uvlayer) - - file.write(''' - MappingInformationType: "ByPolygonVertex" - ReferenceInformationType: "IndexToDirect" - UV: ''') - - i = -1 - ii = 0 # Count how many UVs we write - - for uf in uvlayer.data: -# for f in me.faces: - for uv in uf.uv: -# for uv in f.uv: - if i==-1: - file.write('%.6f,%.6f' % tuple(uv)) - i=0 - else: - if i==7: - file.write('\n ') - i=0 - file.write(',%.6f,%.6f' % tuple(uv)) - i+=1 - ii+=1 # One more UV - - file.write('\n\t\t\tUVIndex: ') - i = -1 - for j in range(ii): - if i == -1: - file.write('%i' % j) - i=0 - else: - if i==55: - file.write('\n\t\t\t\t') - i=0 - file.write(',%i' % j) - i+=1 - - file.write('\n\t\t}') - - if do_textures: - file.write('\n\t\tLayerElementTexture: %i {' % uvindex) - file.write('\n\t\t\tVersion: 101') - file.write('\n\t\t\tName: "%s"' % uvlayer.name) -# file.write('\n\t\t\tName: "%s"' % uvlayer) - - if len(my_mesh.blenTextures) == 1: - file.write('\n\t\t\tMappingInformationType: "AllSame"') - else: - file.write('\n\t\t\tMappingInformationType: "ByPolygon"') - - file.write('\n\t\t\tReferenceInformationType: "IndexToDirect"') - file.write('\n\t\t\tBlendMode: "Translucent"') - file.write('\n\t\t\tTextureAlpha: 1') - file.write('\n\t\t\tTextureId: ') - - if len(my_mesh.blenTextures) == 1: - file.write('0') - else: - texture_mapping_local = {None:-1} - - i = 0 # 1 for dummy - for tex in my_mesh.blenTextures: - if tex: # None is set above - texture_mapping_local[tex] = i - i+=1 - - i=-1 - for f in uvlayer.data: -# for f in me.faces: - img_key = f.image - - if i==-1: - i=0 - file.write( '%s' % texture_mapping_local[img_key]) - else: - if i==55: - file.write('\n ') - i=0 - - file.write(',%s' % texture_mapping_local[img_key]) - i+=1 - - else: - file.write(''' - LayerElementTexture: 0 { - Version: 101 - Name: "" - MappingInformationType: "NoMappingInformation" - ReferenceInformationType: "IndexToDirect" - BlendMode: "Translucent" - TextureAlpha: 1 - TextureId: ''') - file.write('\n\t\t}') - -# me.activeUVLayer = uvlayer_orig - - # Done with UV/textures. - - if do_materials: - file.write('\n\t\tLayerElementMaterial: 0 {') - file.write('\n\t\t\tVersion: 101') - file.write('\n\t\t\tName: ""') - - if len(my_mesh.blenMaterials) == 1: - file.write('\n\t\t\tMappingInformationType: "AllSame"') - else: - file.write('\n\t\t\tMappingInformationType: "ByPolygon"') - - file.write('\n\t\t\tReferenceInformationType: "IndexToDirect"') - file.write('\n\t\t\tMaterials: ') - - if len(my_mesh.blenMaterials) == 1: - file.write('0') - else: - # Build a material mapping for this - material_mapping_local = {} # local-mat & tex : global index. - - for j, mat_tex_pair in enumerate(my_mesh.blenMaterials): - material_mapping_local[mat_tex_pair] = j - - len_material_mapping_local = len(material_mapping_local) - - mats = my_mesh.blenMaterialList - - if me.active_uv_texture: - uv_faces = me.active_uv_texture.data - else: - uv_faces = [None] * len(me.faces) - - i=-1 - for f, uf in zip(me.faces, uv_faces): -# for f in me.faces: - try: mat = mats[f.material_index] -# try: mat = mats[f.mat] - except:mat = None - - if do_uvs: tex = uf.image # WARNING - MULTI UV LAYER IMAGES NOT SUPPORTED :/ -# if do_uvs: tex = f.image # WARNING - MULTI UV LAYER IMAGES NOT SUPPORTED :/ - else: tex = None - - if i==-1: - i=0 - file.write( '%s' % (material_mapping_local[mat, tex])) # None for mat or tex is ok - else: - if i==55: - file.write('\n\t\t\t\t') - i=0 - - file.write(',%s' % (material_mapping_local[mat, tex])) - i+=1 - - file.write('\n\t\t}') - - file.write(''' - Layer: 0 { - Version: 100 - LayerElement: { - Type: "LayerElementNormal" - TypedIndex: 0 - }''') - - if do_materials: - file.write(''' - LayerElement: { - Type: "LayerElementMaterial" - TypedIndex: 0 - }''') - - # Always write this - if do_textures: - file.write(''' - LayerElement: { - Type: "LayerElementTexture" - TypedIndex: 0 - }''') - - if me.vertex_colors: -# if me.vertexColors: - file.write(''' - LayerElement: { - Type: "LayerElementColor" - TypedIndex: 0 - }''') - - if do_uvs: # same as me.faceUV - file.write(''' - LayerElement: { - Type: "LayerElementUV" - TypedIndex: 0 - }''') - - - file.write('\n\t\t}') - - if len(uvlayers) > 1: - for i in range(1, len(uvlayers)): - - file.write('\n\t\tLayer: %i {' % i) - file.write('\n\t\t\tVersion: 100') - - file.write(''' - LayerElement: { - Type: "LayerElementUV"''') - - file.write('\n\t\t\t\tTypedIndex: %i' % i) - file.write('\n\t\t\t}') - - if do_textures: - - file.write(''' - LayerElement: { - Type: "LayerElementTexture"''') - - file.write('\n\t\t\t\tTypedIndex: %i' % i) - file.write('\n\t\t\t}') - - file.write('\n\t\t}') - - if len(collayers) > 1: - # Take into account any UV layers - layer_offset = 0 - if uvlayers: layer_offset = len(uvlayers)-1 - - for i in range(layer_offset, len(collayers)+layer_offset): - file.write('\n\t\tLayer: %i {' % i) - file.write('\n\t\t\tVersion: 100') - - file.write(''' - LayerElement: { - Type: "LayerElementColor"''') - - file.write('\n\t\t\t\tTypedIndex: %i' % i) - file.write('\n\t\t\t}') - file.write('\n\t\t}') - file.write('\n\t}') - - def write_group(name): - file.write('\n\tGroupSelection: "GroupSelection::%s", "Default" {' % name) - - file.write(''' - Properties60: { - Property: "MultiLayer", "bool", "",0 - Property: "Pickable", "bool", "",1 - Property: "Transformable", "bool", "",1 - Property: "Show", "bool", "",1 - } - MultiLayer: 0 - }''') - - - # add meshes here to clear because they are not used anywhere. - meshes_to_clear = [] - - ob_meshes = [] - ob_lights = [] - ob_cameras = [] - # in fbx we export bones as children of the mesh - # armatures not a part of a mesh, will be added to ob_arms - ob_bones = [] - ob_arms = [] - ob_null = [] # emptys - - # List of types that have blender objects (not bones) - ob_all_typegroups = [ob_meshes, ob_lights, ob_cameras, ob_arms, ob_null] - - groups = [] # blender groups, only add ones that have objects in the selections - materials = {} # (mat, image) keys, should be a set() - textures = {} # should be a set() - - tmp_ob_type = ob_type = None # incase no objects are exported, so as not to raise an error - - # if EXP_OBS_SELECTED is false, use sceens objects - if not batch_objects: - if EXP_OBS_SELECTED: tmp_objects = context.selected_objects -# if EXP_OBS_SELECTED: tmp_objects = sce.objects.context - else: tmp_objects = sce.objects - else: - tmp_objects = batch_objects - - if EXP_ARMATURE: - # This is needed so applying modifiers dosnt apply the armature deformation, its also needed - # ...so mesh objects return their rest worldspace matrix when bone-parents are exported as weighted meshes. - # set every armature to its rest, backup the original values so we done mess up the scene - ob_arms_orig_rest = [arm.rest_position for arm in bpy.data.armatures] -# ob_arms_orig_rest = [arm.restPosition for arm in bpy.data.armatures] - - for arm in bpy.data.armatures: - arm.rest_position = True -# arm.restPosition = True - - if ob_arms_orig_rest: - for ob_base in bpy.data.objects: - #if ob_base.type == 'Armature': - ob_base.make_display_list() -# ob_base.makeDisplayList() - - # This causes the makeDisplayList command to effect the mesh - sce.set_frame(sce.current_frame) -# Blender.Set('curframe', Blender.Get('curframe')) - - - for ob_base in tmp_objects: - - # ignore dupli children - if ob_base.parent and ob_base.parent.dupli_type != 'NONE': - continue - - obs = [(ob_base, ob_base.matrix)] - if ob_base.dupli_type != 'NONE': - ob_base.create_dupli_list() - obs = [(dob.object, dob.matrix) for dob in ob_base.dupli_list] - - for ob, mtx in obs: -# for ob, mtx in BPyObject.getDerivedObjects(ob_base): - tmp_ob_type = ob.type - if tmp_ob_type == 'CAMERA': -# if tmp_ob_type == 'Camera': - if EXP_CAMERA: - ob_cameras.append(my_object_generic(ob, mtx)) - elif tmp_ob_type == 'LAMP': -# elif tmp_ob_type == 'Lamp': - if EXP_LAMP: - ob_lights.append(my_object_generic(ob, mtx)) - elif tmp_ob_type == 'ARMATURE': -# elif tmp_ob_type == 'Armature': - if EXP_ARMATURE: - # TODO - armatures dont work in dupligroups! - if ob not in ob_arms: ob_arms.append(ob) - # ob_arms.append(ob) # replace later. was "ob_arms.append(sane_obname(ob), ob)" - elif tmp_ob_type == 'EMPTY': -# elif tmp_ob_type == 'Empty': - if EXP_EMPTY: - ob_null.append(my_object_generic(ob, mtx)) - elif EXP_MESH: - origData = True - if tmp_ob_type != 'MESH': -# if tmp_ob_type != 'Mesh': -# me = bpy.data.meshes.new() - try: me = ob.create_mesh(True, 'PREVIEW') -# try: me.getFromObject(ob) - except: me = None - if me: - meshes_to_clear.append( me ) - mats = me.materials - origData = False - else: - # Mesh Type! - if EXP_MESH_APPLY_MOD: -# me = bpy.data.meshes.new() - me = ob.create_mesh(True, 'PREVIEW') -# me.getFromObject(ob) - - # so we keep the vert groups -# if EXP_ARMATURE: -# orig_mesh = ob.getData(mesh=1) -# if orig_mesh.getVertGroupNames(): -# ob.copy().link(me) -# # If new mesh has no vgroups we can try add if verts are teh same -# if not me.getVertGroupNames(): # vgroups were not kept by the modifier -# if len(me.verts) == len(orig_mesh.verts): -# groupNames, vWeightDict = BPyMesh.meshWeight2Dict(orig_mesh) -# BPyMesh.dict2MeshWeight(me, groupNames, vWeightDict) - - # print ob, me, me.getVertGroupNames() - meshes_to_clear.append( me ) - origData = False - mats = me.materials - else: - me = ob.data -# me = ob.getData(mesh=1) - mats = me.materials - -# # Support object colors -# tmp_colbits = ob.colbits -# if tmp_colbits: -# tmp_ob_mats = ob.getMaterials(1) # 1 so we get None's too. -# for i in xrange(16): -# if tmp_colbits & (1< 0: -# if me.faceUV: - uvlayer_orig = me.active_uv_texture -# uvlayer_orig = me.activeUVLayer - for uvlayer in me.uv_textures: -# for uvlayer in me.getUVLayerNames(): -# me.activeUVLayer = uvlayer - for f, uf in zip(me.faces, uvlayer.data): -# for f in me.faces: - tex = uf.image -# tex = f.image - textures[tex] = texture_mapping_local[tex] = None - - try: mat = mats[f.material_index] -# try: mat = mats[f.mat] - except: mat = None - - materials[mat, tex] = material_mapping_local[mat, tex] = None # should use sets, wait for blender 2.5 - - -# me.activeUVLayer = uvlayer_orig - else: - for mat in mats: - # 2.44 use mat.lib too for uniqueness - materials[mat, None] = material_mapping_local[mat, None] = None - else: - materials[None, None] = None - - if EXP_ARMATURE: - armob = ob.find_armature() - blenParentBoneName = None - - # parent bone - special case - if (not armob) and ob.parent and ob.parent.type == 'ARMATURE' and \ - ob.parent_type == 'BONE': -# if (not armob) and ob.parent and ob.parent.type == 'Armature' and ob.parentType == Blender.Object.ParentTypes.BONE: - armob = ob.parent - blenParentBoneName = ob.parent_bone -# blenParentBoneName = ob.parentbonename - - - if armob and armob not in ob_arms: - ob_arms.append(armob) - - else: - blenParentBoneName = armob = None - - my_mesh = my_object_generic(ob, mtx) - my_mesh.blenData = me - my_mesh.origData = origData - my_mesh.blenMaterials = list(material_mapping_local.keys()) - my_mesh.blenMaterialList = mats - my_mesh.blenTextures = list(texture_mapping_local.keys()) - - # if only 1 null texture then empty the list - if len(my_mesh.blenTextures) == 1 and my_mesh.blenTextures[0] == None: - my_mesh.blenTextures = [] - - my_mesh.fbxArm = armob # replace with my_object_generic armature instance later - my_mesh.fbxBoneParent = blenParentBoneName # replace with my_bone instance later - - ob_meshes.append( my_mesh ) - - # not forgetting to free dupli_list - if ob_base.dupli_list: ob_base.free_dupli_list() - - - if EXP_ARMATURE: - # now we have the meshes, restore the rest arm position - for i, arm in enumerate(bpy.data.armatures): - arm.rest_position = ob_arms_orig_rest[i] -# arm.restPosition = ob_arms_orig_rest[i] - - if ob_arms_orig_rest: - for ob_base in bpy.data.objects: - if ob_base.type == 'ARMATURE': -# if ob_base.type == 'Armature': - ob_base.make_display_list() -# ob_base.makeDisplayList() - # This causes the makeDisplayList command to effect the mesh - sce.set_frame(sce.current_frame) -# Blender.Set('curframe', Blender.Get('curframe')) - - del tmp_ob_type, tmp_objects - - # now we have collected all armatures, add bones - for i, ob in enumerate(ob_arms): - - ob_arms[i] = my_arm = my_object_generic(ob) - - my_arm.fbxBones = [] - my_arm.blenData = ob.data - if ob.animation_data: - my_arm.blenAction = ob.animation_data.action - else: - my_arm.blenAction = None -# my_arm.blenAction = ob.action - my_arm.blenActionList = [] - - # fbxName, blenderObject, my_bones, blenderActions - #ob_arms[i] = fbxArmObName, ob, arm_my_bones, (ob.action, []) - - for bone in my_arm.blenData.bones: -# for bone in my_arm.blenData.bones.values(): - my_bone = my_bone_class(bone, my_arm) - my_arm.fbxBones.append( my_bone ) - ob_bones.append( my_bone ) - - # add the meshes to the bones and replace the meshes armature with own armature class - #for obname, ob, mtx, me, mats, arm, armname in ob_meshes: - for my_mesh in ob_meshes: - # Replace - # ...this could be sped up with dictionary mapping but its unlikely for - # it ever to be a bottleneck - (would need 100+ meshes using armatures) - if my_mesh.fbxArm: - for my_arm in ob_arms: - if my_arm.blenObject == my_mesh.fbxArm: - my_mesh.fbxArm = my_arm - break - - for my_bone in ob_bones: - - # The mesh uses this bones armature! - if my_bone.fbxArm == my_mesh.fbxArm: - my_bone.blenMeshes[my_mesh.fbxName] = me - - - # parent bone: replace bone names with our class instances - # my_mesh.fbxBoneParent is None or a blender bone name initialy, replacing if the names match. - if my_mesh.fbxBoneParent == my_bone.blenName: - my_mesh.fbxBoneParent = my_bone - - bone_deformer_count = 0 # count how many bones deform a mesh - my_bone_blenParent = None - for my_bone in ob_bones: - my_bone_blenParent = my_bone.blenBone.parent - if my_bone_blenParent: - for my_bone_parent in ob_bones: - # Note 2.45rc2 you can compare bones normally - if my_bone_blenParent.name == my_bone_parent.blenName and my_bone.fbxArm == my_bone_parent.fbxArm: - my_bone.parent = my_bone_parent - break - - # Not used at the moment - # my_bone.calcRestMatrixLocal() - bone_deformer_count += len(my_bone.blenMeshes) - - del my_bone_blenParent - - - # Build blenObject -> fbxObject mapping - # this is needed for groups as well as fbxParenting -# for ob in bpy.data.objects: ob.tag = False -# bpy.data.objects.tag = False - - # using a list of object names for tagging (Arystan) - tagged_objects = [] - - tmp_obmapping = {} - for ob_generic in ob_all_typegroups: - for ob_base in ob_generic: - tagged_objects.append(ob_base.blenObject.name) -# ob_base.blenObject.tag = True - tmp_obmapping[ob_base.blenObject] = ob_base - - # Build Groups from objects we export - for blenGroup in bpy.data.groups: - fbxGroupName = None - for ob in blenGroup.objects: - if ob.name in tagged_objects: -# if ob.tag: - if fbxGroupName == None: - fbxGroupName = sane_groupname(blenGroup) - groups.append((fbxGroupName, blenGroup)) - - tmp_obmapping[ob].fbxGroupNames.append(fbxGroupName) # also adds to the objects fbxGroupNames - - groups.sort() # not really needed - - # Assign parents using this mapping - for ob_generic in ob_all_typegroups: - for my_ob in ob_generic: - parent = my_ob.blenObject.parent - if parent and parent.name in tagged_objects: # does it exist and is it in the mapping -# if parent and parent.tag: # does it exist and is it in the mapping - my_ob.fbxParent = tmp_obmapping[parent] - - - del tmp_obmapping - # Finished finding groups we use - - - materials = [(sane_matname(mat_tex_pair), mat_tex_pair) for mat_tex_pair in materials.keys()] - textures = [(sane_texname(tex), tex) for tex in textures.keys() if tex] - materials.sort() # sort by name - textures.sort() - - camera_count = 8 - file.write(''' - -; Object definitions -;------------------------------------------------------------------ - -Definitions: { - Version: 100 - Count: %i''' % (\ - 1+1+camera_count+\ - len(ob_meshes)+\ - len(ob_lights)+\ - len(ob_cameras)+\ - len(ob_arms)+\ - len(ob_null)+\ - len(ob_bones)+\ - bone_deformer_count+\ - len(materials)+\ - (len(textures)*2))) # add 1 for the root model 1 for global settings - - del bone_deformer_count - - file.write(''' - ObjectType: "Model" { - Count: %i - }''' % (\ - 1+camera_count+\ - len(ob_meshes)+\ - len(ob_lights)+\ - len(ob_cameras)+\ - len(ob_arms)+\ - len(ob_null)+\ - len(ob_bones))) # add 1 for the root model - - file.write(''' - ObjectType: "Geometry" { - Count: %i - }''' % len(ob_meshes)) - - if materials: - file.write(''' - ObjectType: "Material" { - Count: %i - }''' % len(materials)) - - if textures: - file.write(''' - ObjectType: "Texture" { - Count: %i - }''' % len(textures)) # add 1 for an empty tex - file.write(''' - ObjectType: "Video" { - Count: %i - }''' % len(textures)) # add 1 for an empty tex - - tmp = 0 - # Add deformer nodes - for my_mesh in ob_meshes: - if my_mesh.fbxArm: - tmp+=1 - - # Add subdeformers - for my_bone in ob_bones: - tmp += len(my_bone.blenMeshes) - - if tmp: - file.write(''' - ObjectType: "Deformer" { - Count: %i - }''' % tmp) - del tmp - - # we could avoid writing this possibly but for now just write it - - file.write(''' - ObjectType: "Pose" { - Count: 1 - }''') - - if groups: - file.write(''' - ObjectType: "GroupSelection" { - Count: %i - }''' % len(groups)) - - file.write(''' - ObjectType: "GlobalSettings" { - Count: 1 - } -}''') - - file.write(''' - -; Object properties -;------------------------------------------------------------------ - -Objects: {''') - - # To comply with other FBX FILES - write_camera_switch() - - # Write the null object - write_null(None, 'blend_root')# , GLOBAL_MATRIX) - - for my_null in ob_null: - write_null(my_null) - - for my_arm in ob_arms: - write_null(my_arm) - - for my_cam in ob_cameras: - write_camera(my_cam) - - for my_light in ob_lights: - write_light(my_light) - - for my_mesh in ob_meshes: - write_mesh(my_mesh) - - #for bonename, bone, obname, me, armob in ob_bones: - for my_bone in ob_bones: - write_bone(my_bone) - - write_camera_default() - - for matname, (mat, tex) in materials: - write_material(matname, mat) # We only need to have a material per image pair, but no need to write any image info into the material (dumb fbx standard) - - # each texture uses a video, odd - for texname, tex in textures: - write_video(texname, tex) - i = 0 - for texname, tex in textures: - write_texture(texname, tex, i) - i+=1 - - for groupname, group in groups: - write_group(groupname) - - # NOTE - c4d and motionbuilder dont need normalized weights, but deep-exploration 5 does and (max?) do. - - # Write armature modifiers - # TODO - add another MODEL? - because of this skin definition. - for my_mesh in ob_meshes: - if my_mesh.fbxArm: - write_deformer_skin(my_mesh.fbxName) - - # Get normalized weights for temorary use - if my_mesh.fbxBoneParent: - weights = None - else: - weights = meshNormalizedWeights(my_mesh.blenObject) -# weights = meshNormalizedWeights(my_mesh.blenData) - - #for bonename, bone, obname, bone_mesh, armob in ob_bones: - for my_bone in ob_bones: - if me in iter(my_bone.blenMeshes.values()): - write_sub_deformer_skin(my_mesh, my_bone, weights) - - # Write pose's really weired, only needed when an armature and mesh are used together - # each by themselves dont need pose data. for now only pose meshes and bones - - file.write(''' - Pose: "Pose::BIND_POSES", "BindPose" { - Type: "BindPose" - Version: 100 - Properties60: { - } - NbPoseNodes: ''') - file.write(str(len(pose_items))) - - - for fbxName, matrix in pose_items: - file.write('\n\t\tPoseNode: {') - file.write('\n\t\t\tNode: "Model::%s"' % fbxName ) - if matrix: file.write('\n\t\t\tMatrix: %s' % mat4x4str(matrix)) - else: file.write('\n\t\t\tMatrix: %s' % mat4x4str(mtx4_identity)) - file.write('\n\t\t}') - - file.write('\n\t}') - - - # Finish Writing Objects - # Write global settings - file.write(''' - GlobalSettings: { - Version: 1000 - Properties60: { - Property: "UpAxis", "int", "",1 - Property: "UpAxisSign", "int", "",1 - Property: "FrontAxis", "int", "",2 - Property: "FrontAxisSign", "int", "",1 - Property: "CoordAxis", "int", "",0 - Property: "CoordAxisSign", "int", "",1 - Property: "UnitScaleFactor", "double", "",100 - } - } -''') - file.write('}') - - file.write(''' - -; Object relations -;------------------------------------------------------------------ - -Relations: {''') - - file.write('\n\tModel: "Model::blend_root", "Null" {\n\t}') - - for my_null in ob_null: - file.write('\n\tModel: "Model::%s", "Null" {\n\t}' % my_null.fbxName) - - for my_arm in ob_arms: - file.write('\n\tModel: "Model::%s", "Null" {\n\t}' % my_arm.fbxName) - - for my_mesh in ob_meshes: - file.write('\n\tModel: "Model::%s", "Mesh" {\n\t}' % my_mesh.fbxName) - - # TODO - limbs can have the same name for multiple armatures, should prefix. - #for bonename, bone, obname, me, armob in ob_bones: - for my_bone in ob_bones: - file.write('\n\tModel: "Model::%s", "Limb" {\n\t}' % my_bone.fbxName) - - for my_cam in ob_cameras: - file.write('\n\tModel: "Model::%s", "Camera" {\n\t}' % my_cam.fbxName) - - for my_light in ob_lights: - file.write('\n\tModel: "Model::%s", "Light" {\n\t}' % my_light.fbxName) - - file.write(''' - Model: "Model::Producer Perspective", "Camera" { - } - Model: "Model::Producer Top", "Camera" { - } - Model: "Model::Producer Bottom", "Camera" { - } - Model: "Model::Producer Front", "Camera" { - } - Model: "Model::Producer Back", "Camera" { - } - Model: "Model::Producer Right", "Camera" { - } - Model: "Model::Producer Left", "Camera" { - } - Model: "Model::Camera Switcher", "CameraSwitcher" { - }''') - - for matname, (mat, tex) in materials: - file.write('\n\tMaterial: "Material::%s", "" {\n\t}' % matname) - - if textures: - for texname, tex in textures: - file.write('\n\tTexture: "Texture::%s", "TextureVideoClip" {\n\t}' % texname) - for texname, tex in textures: - file.write('\n\tVideo: "Video::%s", "Clip" {\n\t}' % texname) - - # deformers - modifiers - for my_mesh in ob_meshes: - if my_mesh.fbxArm: - file.write('\n\tDeformer: "Deformer::Skin %s", "Skin" {\n\t}' % my_mesh.fbxName) - - #for bonename, bone, obname, me, armob in ob_bones: - for my_bone in ob_bones: - for fbxMeshObName in my_bone.blenMeshes: # .keys() - fbxMeshObName - # is this bone effecting a mesh? - file.write('\n\tDeformer: "SubDeformer::Cluster %s %s", "Cluster" {\n\t}' % (fbxMeshObName, my_bone.fbxName)) - - # This should be at the end - # file.write('\n\tPose: "Pose::BIND_POSES", "BindPose" {\n\t}') - - for groupname, group in groups: - file.write('\n\tGroupSelection: "GroupSelection::%s", "Default" {\n\t}' % groupname) - - file.write('\n}') - file.write(''' - -; Object connections -;------------------------------------------------------------------ - -Connections: {''') - - # NOTE - The FBX SDK dosnt care about the order but some importers DO! - # for instance, defining the material->mesh connection - # before the mesh->blend_root crashes cinema4d - - - # write the fake root node - file.write('\n\tConnect: "OO", "Model::blend_root", "Model::Scene"') - - for ob_generic in ob_all_typegroups: # all blender 'Object's we support - for my_ob in ob_generic: - if my_ob.fbxParent: - file.write('\n\tConnect: "OO", "Model::%s", "Model::%s"' % (my_ob.fbxName, my_ob.fbxParent.fbxName)) - else: - file.write('\n\tConnect: "OO", "Model::%s", "Model::blend_root"' % my_ob.fbxName) - - if materials: - for my_mesh in ob_meshes: - # Connect all materials to all objects, not good form but ok for now. - for mat, tex in my_mesh.blenMaterials: - if mat: mat_name = mat.name - else: mat_name = None - - if tex: tex_name = tex.name - else: tex_name = None - - file.write('\n\tConnect: "OO", "Material::%s", "Model::%s"' % (sane_name_mapping_mat[mat_name, tex_name], my_mesh.fbxName)) - - if textures: - for my_mesh in ob_meshes: - if my_mesh.blenTextures: - # file.write('\n\tConnect: "OO", "Texture::_empty_", "Model::%s"' % my_mesh.fbxName) - for tex in my_mesh.blenTextures: - if tex: - file.write('\n\tConnect: "OO", "Texture::%s", "Model::%s"' % (sane_name_mapping_tex[tex.name], my_mesh.fbxName)) - - for texname, tex in textures: - file.write('\n\tConnect: "OO", "Video::%s", "Texture::%s"' % (texname, texname)) - - for my_mesh in ob_meshes: - if my_mesh.fbxArm: - file.write('\n\tConnect: "OO", "Deformer::Skin %s", "Model::%s"' % (my_mesh.fbxName, my_mesh.fbxName)) - - #for bonename, bone, obname, me, armob in ob_bones: - for my_bone in ob_bones: - for fbxMeshObName in my_bone.blenMeshes: # .keys() - file.write('\n\tConnect: "OO", "SubDeformer::Cluster %s %s", "Deformer::Skin %s"' % (fbxMeshObName, my_bone.fbxName, fbxMeshObName)) - - # limbs -> deformers - # for bonename, bone, obname, me, armob in ob_bones: - for my_bone in ob_bones: - for fbxMeshObName in my_bone.blenMeshes: # .keys() - file.write('\n\tConnect: "OO", "Model::%s", "SubDeformer::Cluster %s %s"' % (my_bone.fbxName, fbxMeshObName, my_bone.fbxName)) - - - #for bonename, bone, obname, me, armob in ob_bones: - for my_bone in ob_bones: - # Always parent to armature now - if my_bone.parent: - file.write('\n\tConnect: "OO", "Model::%s", "Model::%s"' % (my_bone.fbxName, my_bone.parent.fbxName) ) - else: - # the armature object is written as an empty and all root level bones connect to it - file.write('\n\tConnect: "OO", "Model::%s", "Model::%s"' % (my_bone.fbxName, my_bone.fbxArm.fbxName) ) - - # groups - if groups: - for ob_generic in ob_all_typegroups: - for ob_base in ob_generic: - for fbxGroupName in ob_base.fbxGroupNames: - file.write('\n\tConnect: "OO", "Model::%s", "GroupSelection::%s"' % (ob_base.fbxName, fbxGroupName)) - - for my_arm in ob_arms: - file.write('\n\tConnect: "OO", "Model::%s", "Model::blend_root"' % my_arm.fbxName) - - file.write('\n}') - - - # Needed for scene footer as well as animation - render = sce.render_data -# render = sce.render - - # from the FBX sdk - #define KTIME_ONE_SECOND KTime (K_LONGLONG(46186158000)) - def fbx_time(t): - # 0.5 + val is the same as rounding. - return int(0.5 + ((t/fps) * 46186158000)) - - fps = float(render.fps) - start = sce.start_frame -# start = render.sFrame - end = sce.end_frame -# end = render.eFrame - if end < start: start, end = end, start - if start==end: ANIM_ENABLE = False - - # animations for these object types - ob_anim_lists = ob_bones, ob_meshes, ob_null, ob_cameras, ob_lights, ob_arms - - if ANIM_ENABLE and [tmp for tmp in ob_anim_lists if tmp]: - - frame_orig = sce.current_frame -# frame_orig = Blender.Get('curframe') - - if ANIM_OPTIMIZE: - ANIM_OPTIMIZE_PRECISSION_FLOAT = 0.1 ** ANIM_OPTIMIZE_PRECISSION - - # default action, when no actions are avaioable - tmp_actions = [None] # None is the default action - blenActionDefault = None - action_lastcompat = None - - # instead of tagging - tagged_actions = [] - - if ANIM_ACTION_ALL: -# bpy.data.actions.tag = False - tmp_actions = list(bpy.data.actions) - - - # find which actions are compatible with the armatures - # blenActions is not yet initialized so do it now. - tmp_act_count = 0 - for my_arm in ob_arms: - - # get the default name - if not blenActionDefault: - blenActionDefault = my_arm.blenAction - - arm_bone_names = set([my_bone.blenName for my_bone in my_arm.fbxBones]) - - for action in tmp_actions: - - action_chan_names = arm_bone_names.intersection( set([g.name for g in action.groups]) ) -# action_chan_names = arm_bone_names.intersection( set(action.getChannelNames()) ) - - if action_chan_names: # at least one channel matches. - my_arm.blenActionList.append(action) - tagged_actions.append(action.name) -# action.tag = True - tmp_act_count += 1 - - # incase there is no actions applied to armatures - action_lastcompat = action - - if tmp_act_count: - # unlikely to ever happen but if no actions applied to armatures, just use the last compatible armature. - if not blenActionDefault: - blenActionDefault = action_lastcompat - - del action_lastcompat - - file.write(''' -;Takes and animation section -;---------------------------------------------------- - -Takes: {''') - - if blenActionDefault: - file.write('\n\tCurrent: "%s"' % sane_takename(blenActionDefault)) - else: - file.write('\n\tCurrent: "Default Take"') - - for blenAction in tmp_actions: - # we have tagged all actious that are used be selected armatures - if blenAction: - if blenAction.name in tagged_actions: -# if blenAction.tag: - print('\taction: "%s" exporting...' % blenAction.name) - else: - print('\taction: "%s" has no armature using it, skipping' % blenAction.name) - continue - - if blenAction == None: - # Warning, this only accounts for tmp_actions being [None] - file.write('\n\tTake: "Default Take" {') - act_start = start - act_end = end - else: - # use existing name - if blenAction == blenActionDefault: # have we alredy got the name - file.write('\n\tTake: "%s" {' % sane_name_mapping_take[blenAction.name]) - else: - file.write('\n\tTake: "%s" {' % sane_takename(blenAction)) - - act_start, act_end = blenAction.get_frame_range() -# tmp = blenAction.getFrameNumbers() -# if tmp: -# act_start = min(tmp) -# act_end = max(tmp) -# del tmp -# else: -# # Fallback on this, theres not much else we can do? :/ -# # when an action has no length -# act_start = start -# act_end = end - - # Set the action active - for my_bone in ob_arms: - if blenAction in my_bone.blenActionList: - ob.action = blenAction - # print '\t\tSetting Action!', blenAction - # sce.update(1) - - file.write('\n\t\tFileName: "Default_Take.tak"') # ??? - not sure why this is needed - file.write('\n\t\tLocalTime: %i,%i' % (fbx_time(act_start-1), fbx_time(act_end-1))) # ??? - not sure why this is needed - file.write('\n\t\tReferenceTime: %i,%i' % (fbx_time(act_start-1), fbx_time(act_end-1))) # ??? - not sure why this is needed - - file.write(''' - - ;Models animation - ;----------------------------------------------------''') - - - # set pose data for all bones - # do this here incase the action changes - ''' - for my_bone in ob_bones: - my_bone.flushAnimData() - ''' - i = act_start - while i <= act_end: - sce.set_frame(i) -# Blender.Set('curframe', i) - for ob_generic in ob_anim_lists: - for my_ob in ob_generic: - #Blender.Window.RedrawAll() - if ob_generic == ob_meshes and my_ob.fbxArm: - # We cant animate armature meshes! - pass - else: - my_ob.setPoseFrame(i) - - i+=1 - - - #for bonename, bone, obname, me, armob in ob_bones: - for ob_generic in (ob_bones, ob_meshes, ob_null, ob_cameras, ob_lights, ob_arms): - - for my_ob in ob_generic: - - if ob_generic == ob_meshes and my_ob.fbxArm: - # do nothing, - pass - else: - - file.write('\n\t\tModel: "Model::%s" {' % my_ob.fbxName) # ??? - not sure why this is needed - file.write('\n\t\t\tVersion: 1.1') - file.write('\n\t\t\tChannel: "Transform" {') - - context_bone_anim_mats = [ (my_ob.getAnimParRelMatrix(frame), my_ob.getAnimParRelMatrixRot(frame)) for frame in range(act_start, act_end+1) ] - - # ---------------- - # ---------------- - for TX_LAYER, TX_CHAN in enumerate('TRS'): # transform, rotate, scale - - if TX_CHAN=='T': context_bone_anim_vecs = [mtx[0].translationPart() for mtx in context_bone_anim_mats] - elif TX_CHAN=='S': context_bone_anim_vecs = [mtx[0].scalePart() for mtx in context_bone_anim_mats] - elif TX_CHAN=='R': - # Was.... - # elif TX_CHAN=='R': context_bone_anim_vecs = [mtx[1].toEuler() for mtx in context_bone_anim_mats] - # - # ...but we need to use the previous euler for compatible conversion. - context_bone_anim_vecs = [] - prev_eul = None - for mtx in context_bone_anim_mats: - if prev_eul: prev_eul = mtx[1].toEuler(prev_eul) - else: prev_eul = mtx[1].toEuler() - context_bone_anim_vecs.append(eulerRadToDeg(prev_eul)) -# context_bone_anim_vecs.append(prev_eul) - - file.write('\n\t\t\t\tChannel: "%s" {' % TX_CHAN) # translation - - for i in range(3): - # Loop on each axis of the bone - file.write('\n\t\t\t\t\tChannel: "%s" {'% ('XYZ'[i])) # translation - file.write('\n\t\t\t\t\t\tDefault: %.15f' % context_bone_anim_vecs[0][i] ) - file.write('\n\t\t\t\t\t\tKeyVer: 4005') - - if not ANIM_OPTIMIZE: - # Just write all frames, simple but in-eficient - file.write('\n\t\t\t\t\t\tKeyCount: %i' % (1 + act_end - act_start)) - file.write('\n\t\t\t\t\t\tKey: ') - frame = act_start - while frame <= act_end: - if frame!=act_start: - file.write(',') - - # Curve types are 'C,n' for constant, 'L' for linear - # C,n is for bezier? - linear is best for now so we can do simple keyframe removal - file.write('\n\t\t\t\t\t\t\t%i,%.15f,L' % (fbx_time(frame-1), context_bone_anim_vecs[frame-act_start][i] )) - frame+=1 - else: - # remove unneeded keys, j is the frame, needed when some frames are removed. - context_bone_anim_keys = [ (vec[i], j) for j, vec in enumerate(context_bone_anim_vecs) ] - - # last frame to fisrt frame, missing 1 frame on either side. - # removeing in a backwards loop is faster - #for j in xrange( (act_end-act_start)-1, 0, -1 ): - # j = (act_end-act_start)-1 - j = len(context_bone_anim_keys)-2 - while j > 0 and len(context_bone_anim_keys) > 2: - # print j, len(context_bone_anim_keys) - # Is this key the same as the ones next to it? - - # co-linear horizontal... - if abs(context_bone_anim_keys[j][0] - context_bone_anim_keys[j-1][0]) < ANIM_OPTIMIZE_PRECISSION_FLOAT and\ - abs(context_bone_anim_keys[j][0] - context_bone_anim_keys[j+1][0]) < ANIM_OPTIMIZE_PRECISSION_FLOAT: - - del context_bone_anim_keys[j] - - else: - frame_range = float(context_bone_anim_keys[j+1][1] - context_bone_anim_keys[j-1][1]) - frame_range_fac1 = (context_bone_anim_keys[j+1][1] - context_bone_anim_keys[j][1]) / frame_range - frame_range_fac2 = 1.0 - frame_range_fac1 - - if abs(((context_bone_anim_keys[j-1][0]*frame_range_fac1 + context_bone_anim_keys[j+1][0]*frame_range_fac2)) - context_bone_anim_keys[j][0]) < ANIM_OPTIMIZE_PRECISSION_FLOAT: - del context_bone_anim_keys[j] - else: - j-=1 - - # keep the index below the list length - if j > len(context_bone_anim_keys)-2: - j = len(context_bone_anim_keys)-2 - - if len(context_bone_anim_keys) == 2 and context_bone_anim_keys[0][0] == context_bone_anim_keys[1][0]: - # This axis has no moton, its okay to skip KeyCount and Keys in this case - pass - else: - # We only need to write these if there is at least one - file.write('\n\t\t\t\t\t\tKeyCount: %i' % len(context_bone_anim_keys)) - file.write('\n\t\t\t\t\t\tKey: ') - for val, frame in context_bone_anim_keys: - if frame != context_bone_anim_keys[0][1]: # not the first - file.write(',') - # frame is alredy one less then blenders frame - file.write('\n\t\t\t\t\t\t\t%i,%.15f,L' % (fbx_time(frame), val )) - - if i==0: file.write('\n\t\t\t\t\t\tColor: 1,0,0') - elif i==1: file.write('\n\t\t\t\t\t\tColor: 0,1,0') - elif i==2: file.write('\n\t\t\t\t\t\tColor: 0,0,1') - - file.write('\n\t\t\t\t\t}') - file.write('\n\t\t\t\t\tLayerType: %i' % (TX_LAYER+1) ) - file.write('\n\t\t\t\t}') - - # --------------- - - file.write('\n\t\t\t}') - file.write('\n\t\t}') - - # end the take - file.write('\n\t}') - - # end action loop. set original actions - # do this after every loop incase actions effect eachother. - for my_bone in ob_arms: - my_bone.blenObject.action = my_bone.blenAction - - file.write('\n}') - - sce.set_frame(frame_orig) -# Blender.Set('curframe', frame_orig) - - else: - # no animation - file.write('\n;Takes and animation section') - file.write('\n;----------------------------------------------------') - file.write('\n') - file.write('\nTakes: {') - file.write('\n\tCurrent: ""') - file.write('\n}') - - - # write meshes animation - #for obname, ob, mtx, me, mats, arm, armname in ob_meshes: - - - # Clear mesh data Only when writing with modifiers applied - for me in meshes_to_clear: - bpy.data.remove_mesh(me) -# me.verts = None - - # --------------------------- Footer - if world: - m = world.mist - has_mist = m.enabled -# has_mist = world.mode & 1 - mist_intense = m.intensity - mist_start = m.start - mist_end = m.depth - mist_height = m.height -# mist_intense, mist_start, mist_end, mist_height = world.mist - world_hor = world.horizon_color -# world_hor = world.hor - else: - has_mist = mist_intense = mist_start = mist_end = mist_height = 0 - world_hor = 0,0,0 - - file.write('\n;Version 5 settings') - file.write('\n;------------------------------------------------------------------') - file.write('\n') - file.write('\nVersion5: {') - file.write('\n\tAmbientRenderSettings: {') - file.write('\n\t\tVersion: 101') - file.write('\n\t\tAmbientLightColor: %.1f,%.1f,%.1f,0' % tuple(world_amb)) - file.write('\n\t}') - file.write('\n\tFogOptions: {') - file.write('\n\t\tFlogEnable: %i' % has_mist) - file.write('\n\t\tFogMode: 0') - file.write('\n\t\tFogDensity: %.3f' % mist_intense) - file.write('\n\t\tFogStart: %.3f' % mist_start) - file.write('\n\t\tFogEnd: %.3f' % mist_end) - file.write('\n\t\tFogColor: %.1f,%.1f,%.1f,1' % tuple(world_hor)) - file.write('\n\t}') - file.write('\n\tSettings: {') - file.write('\n\t\tFrameRate: "%i"' % int(fps)) - file.write('\n\t\tTimeFormat: 1') - file.write('\n\t\tSnapOnFrames: 0') - file.write('\n\t\tReferenceTimeIndex: -1') - file.write('\n\t\tTimeLineStartTime: %i' % fbx_time(start-1)) - file.write('\n\t\tTimeLineStopTime: %i' % fbx_time(end-1)) - file.write('\n\t}') - file.write('\n\tRendererSetting: {') - file.write('\n\t\tDefaultCamera: "Producer Perspective"') - file.write('\n\t\tDefaultViewingMode: 0') - file.write('\n\t}') - file.write('\n}') - file.write('\n') - - # Incase sombody imports this, clean up by clearing global dicts - sane_name_mapping_ob.clear() - sane_name_mapping_mat.clear() - sane_name_mapping_tex.clear() - - ob_arms[:] = [] - ob_bones[:] = [] - ob_cameras[:] = [] - ob_lights[:] = [] - ob_meshes[:] = [] - ob_null[:] = [] - - - # copy images if enabled -# if EXP_IMAGE_COPY: -# # copy_images( basepath, [ tex[1] for tex in textures if tex[1] != None ]) -# bpy.util.copy_images( [ tex[1] for tex in textures if tex[1] != None ], basepath) - - print('export finished in %.4f sec.' % (time.clock() - start_time)) -# print 'export finished in %.4f sec.' % (Blender.sys.time() - start_time) - return True - - -# -------------------------------------------- -# UI Function - not a part of the exporter. -# this is to seperate the user interface from the rest of the exporter. -# from Blender import Draw, Window -EVENT_NONE = 0 -EVENT_EXIT = 1 -EVENT_REDRAW = 2 -EVENT_FILESEL = 3 - -GLOBALS = {} - -# export opts - -def do_redraw(e,v): GLOBALS['EVENT'] = e - -# toggle between these 2, only allow one on at once -def do_obs_sel(e,v): - GLOBALS['EVENT'] = e - GLOBALS['EXP_OBS_SCENE'].val = 0 - GLOBALS['EXP_OBS_SELECTED'].val = 1 - -def do_obs_sce(e,v): - GLOBALS['EVENT'] = e - GLOBALS['EXP_OBS_SCENE'].val = 1 - GLOBALS['EXP_OBS_SELECTED'].val = 0 - -def do_batch_type_grp(e,v): - GLOBALS['EVENT'] = e - GLOBALS['BATCH_GROUP'].val = 1 - GLOBALS['BATCH_SCENE'].val = 0 - -def do_batch_type_sce(e,v): - GLOBALS['EVENT'] = e - GLOBALS['BATCH_GROUP'].val = 0 - GLOBALS['BATCH_SCENE'].val = 1 - -def do_anim_act_all(e,v): - GLOBALS['EVENT'] = e - GLOBALS['ANIM_ACTION_ALL'][0].val = 1 - GLOBALS['ANIM_ACTION_ALL'][1].val = 0 - -def do_anim_act_cur(e,v): - if GLOBALS['BATCH_ENABLE'].val and GLOBALS['BATCH_GROUP'].val: - Draw.PupMenu('Warning%t|Cant use this with batch export group option') - else: - GLOBALS['EVENT'] = e - GLOBALS['ANIM_ACTION_ALL'][0].val = 0 - GLOBALS['ANIM_ACTION_ALL'][1].val = 1 - -def fbx_ui_exit(e,v): - GLOBALS['EVENT'] = e - -def do_help(e,v): - url = 'http://wiki.blender.org/index.php/Scripts/Manual/Export/autodesk_fbx' - print('Trying to open web browser with documentation at this address...') - print('\t' + url) - - try: - import webbrowser - webbrowser.open(url) - except: - Blender.Draw.PupMenu("Error%t|Opening a webbrowser requires a full python installation") - print('...could not open a browser window.') - - - -# run when export is pressed -#def fbx_ui_write(e,v): -def fbx_ui_write(filename, context): - - # Dont allow overwriting files when saving normally - if not GLOBALS['BATCH_ENABLE'].val: - if not BPyMessages.Warning_SaveOver(filename): - return - - GLOBALS['EVENT'] = EVENT_EXIT - - # Keep the order the same as above for simplicity - # the [] is a dummy arg used for objects - - Blender.Window.WaitCursor(1) - - # Make the matrix - GLOBAL_MATRIX = mtx4_identity - GLOBAL_MATRIX[0][0] = GLOBAL_MATRIX[1][1] = GLOBAL_MATRIX[2][2] = GLOBALS['_SCALE'].val - if GLOBALS['_XROT90'].val: GLOBAL_MATRIX = GLOBAL_MATRIX * mtx4_x90n - if GLOBALS['_YROT90'].val: GLOBAL_MATRIX = GLOBAL_MATRIX * mtx4_y90n - if GLOBALS['_ZROT90'].val: GLOBAL_MATRIX = GLOBAL_MATRIX * mtx4_z90n - - ret = write(\ - filename, None,\ - context, - GLOBALS['EXP_OBS_SELECTED'].val,\ - GLOBALS['EXP_MESH'].val,\ - GLOBALS['EXP_MESH_APPLY_MOD'].val,\ - GLOBALS['EXP_MESH_HQ_NORMALS'].val,\ - GLOBALS['EXP_ARMATURE'].val,\ - GLOBALS['EXP_LAMP'].val,\ - GLOBALS['EXP_CAMERA'].val,\ - GLOBALS['EXP_EMPTY'].val,\ - GLOBALS['EXP_IMAGE_COPY'].val,\ - GLOBAL_MATRIX,\ - GLOBALS['ANIM_ENABLE'].val,\ - GLOBALS['ANIM_OPTIMIZE'].val,\ - GLOBALS['ANIM_OPTIMIZE_PRECISSION'].val,\ - GLOBALS['ANIM_ACTION_ALL'][0].val,\ - GLOBALS['BATCH_ENABLE'].val,\ - GLOBALS['BATCH_GROUP'].val,\ - GLOBALS['BATCH_SCENE'].val,\ - GLOBALS['BATCH_FILE_PREFIX'].val,\ - GLOBALS['BATCH_OWN_DIR'].val,\ - ) - - Blender.Window.WaitCursor(0) - GLOBALS.clear() - - if ret == False: - Draw.PupMenu('Error%t|Path cannot be written to!') - - -def fbx_ui(): - # Only to center the UI - x,y = GLOBALS['MOUSE'] - x-=180; y-=0 # offset... just to get it centered - - Draw.Label('Export Objects...', x+20,y+165, 200, 20) - - if not GLOBALS['BATCH_ENABLE'].val: - Draw.BeginAlign() - GLOBALS['EXP_OBS_SELECTED'] = Draw.Toggle('Selected Objects', EVENT_REDRAW, x+20, y+145, 160, 20, GLOBALS['EXP_OBS_SELECTED'].val, 'Export selected objects on visible layers', do_obs_sel) - GLOBALS['EXP_OBS_SCENE'] = Draw.Toggle('Scene Objects', EVENT_REDRAW, x+180, y+145, 160, 20, GLOBALS['EXP_OBS_SCENE'].val, 'Export all objects in this scene', do_obs_sce) - Draw.EndAlign() - - Draw.BeginAlign() - GLOBALS['_SCALE'] = Draw.Number('Scale:', EVENT_NONE, x+20, y+120, 140, 20, GLOBALS['_SCALE'].val, 0.01, 1000.0, 'Scale all data, (Note! some imports dont support scaled armatures)') - GLOBALS['_XROT90'] = Draw.Toggle('Rot X90', EVENT_NONE, x+160, y+120, 60, 20, GLOBALS['_XROT90'].val, 'Rotate all objects 90 degrese about the X axis') - GLOBALS['_YROT90'] = Draw.Toggle('Rot Y90', EVENT_NONE, x+220, y+120, 60, 20, GLOBALS['_YROT90'].val, 'Rotate all objects 90 degrese about the Y axis') - GLOBALS['_ZROT90'] = Draw.Toggle('Rot Z90', EVENT_NONE, x+280, y+120, 60, 20, GLOBALS['_ZROT90'].val, 'Rotate all objects 90 degrese about the Z axis') - Draw.EndAlign() - - y -= 35 - - Draw.BeginAlign() - GLOBALS['EXP_EMPTY'] = Draw.Toggle('Empty', EVENT_NONE, x+20, y+120, 60, 20, GLOBALS['EXP_EMPTY'].val, 'Export empty objects') - GLOBALS['EXP_CAMERA'] = Draw.Toggle('Camera', EVENT_NONE, x+80, y+120, 60, 20, GLOBALS['EXP_CAMERA'].val, 'Export camera objects') - GLOBALS['EXP_LAMP'] = Draw.Toggle('Lamp', EVENT_NONE, x+140, y+120, 60, 20, GLOBALS['EXP_LAMP'].val, 'Export lamp objects') - GLOBALS['EXP_ARMATURE'] = Draw.Toggle('Armature', EVENT_NONE, x+200, y+120, 60, 20, GLOBALS['EXP_ARMATURE'].val, 'Export armature objects') - GLOBALS['EXP_MESH'] = Draw.Toggle('Mesh', EVENT_REDRAW, x+260, y+120, 80, 20, GLOBALS['EXP_MESH'].val, 'Export mesh objects', do_redraw) #, do_axis_z) - Draw.EndAlign() - - if GLOBALS['EXP_MESH'].val: - # below mesh but - Draw.BeginAlign() - GLOBALS['EXP_MESH_APPLY_MOD'] = Draw.Toggle('Modifiers', EVENT_NONE, x+260, y+100, 80, 20, GLOBALS['EXP_MESH_APPLY_MOD'].val, 'Apply modifiers to mesh objects') #, do_axis_z) - GLOBALS['EXP_MESH_HQ_NORMALS'] = Draw.Toggle('HQ Normals', EVENT_NONE, x+260, y+80, 80, 20, GLOBALS['EXP_MESH_HQ_NORMALS'].val, 'Generate high quality normals') #, do_axis_z) - Draw.EndAlign() - - GLOBALS['EXP_IMAGE_COPY'] = Draw.Toggle('Copy Image Files', EVENT_NONE, x+20, y+80, 160, 20, GLOBALS['EXP_IMAGE_COPY'].val, 'Copy image files to the destination path') #, do_axis_z) - - - Draw.Label('Export Armature Animation...', x+20,y+45, 300, 20) - - GLOBALS['ANIM_ENABLE'] = Draw.Toggle('Enable Animation', EVENT_REDRAW, x+20, y+25, 160, 20, GLOBALS['ANIM_ENABLE'].val, 'Export keyframe animation', do_redraw) - if GLOBALS['ANIM_ENABLE'].val: - Draw.BeginAlign() - GLOBALS['ANIM_OPTIMIZE'] = Draw.Toggle('Optimize Keyframes', EVENT_REDRAW, x+20, y+0, 160, 20, GLOBALS['ANIM_OPTIMIZE'].val, 'Remove double keyframes', do_redraw) - if GLOBALS['ANIM_OPTIMIZE'].val: - GLOBALS['ANIM_OPTIMIZE_PRECISSION'] = Draw.Number('Precission: ', EVENT_NONE, x+180, y+0, 160, 20, GLOBALS['ANIM_OPTIMIZE_PRECISSION'].val, 1, 16, 'Tolerence for comparing double keyframes (higher for greater accuracy)') - Draw.EndAlign() - - Draw.BeginAlign() - GLOBALS['ANIM_ACTION_ALL'][1] = Draw.Toggle('Current Action', EVENT_REDRAW, x+20, y-25, 160, 20, GLOBALS['ANIM_ACTION_ALL'][1].val, 'Use actions currently applied to the armatures (use scene start/end frame)', do_anim_act_cur) - GLOBALS['ANIM_ACTION_ALL'][0] = Draw.Toggle('All Actions', EVENT_REDRAW, x+180,y-25, 160, 20, GLOBALS['ANIM_ACTION_ALL'][0].val, 'Use all actions for armatures', do_anim_act_all) - Draw.EndAlign() - - - Draw.Label('Export Batch...', x+20,y-60, 300, 20) - GLOBALS['BATCH_ENABLE'] = Draw.Toggle('Enable Batch', EVENT_REDRAW, x+20, y-80, 160, 20, GLOBALS['BATCH_ENABLE'].val, 'Automate exporting multiple scenes or groups to files', do_redraw) - - if GLOBALS['BATCH_ENABLE'].val: - Draw.BeginAlign() - GLOBALS['BATCH_GROUP'] = Draw.Toggle('Group > File', EVENT_REDRAW, x+20, y-105, 160, 20, GLOBALS['BATCH_GROUP'].val, 'Export each group as an FBX file', do_batch_type_grp) - GLOBALS['BATCH_SCENE'] = Draw.Toggle('Scene > File', EVENT_REDRAW, x+180, y-105, 160, 20, GLOBALS['BATCH_SCENE'].val, 'Export each scene as an FBX file', do_batch_type_sce) - - # Own dir requires OS module - if os: - GLOBALS['BATCH_OWN_DIR'] = Draw.Toggle('Own Dir', EVENT_NONE, x+20, y-125, 80, 20, GLOBALS['BATCH_OWN_DIR'].val, 'Create a dir for each exported file') - GLOBALS['BATCH_FILE_PREFIX'] = Draw.String('Prefix: ', EVENT_NONE, x+100, y-125, 240, 20, GLOBALS['BATCH_FILE_PREFIX'].val, 64, 'Prefix each file with this name ') - else: - GLOBALS['BATCH_FILE_PREFIX'] = Draw.String('Prefix: ', EVENT_NONE, x+20, y-125, 320, 20, GLOBALS['BATCH_FILE_PREFIX'].val, 64, 'Prefix each file with this name ') - - - Draw.EndAlign() - - #y+=80 - - ''' - Draw.BeginAlign() - GLOBALS['FILENAME'] = Draw.String('path: ', EVENT_NONE, x+20, y-170, 300, 20, GLOBALS['FILENAME'].val, 64, 'Prefix each file with this name ') - Draw.PushButton('..', EVENT_FILESEL, x+320, y-170, 20, 20, 'Select the path', do_redraw) - ''' - # Until batch is added - # - - - #Draw.BeginAlign() - Draw.PushButton('Online Help', EVENT_REDRAW, x+20, y-160, 100, 20, 'Open online help in a browser window', do_help) - Draw.PushButton('Cancel', EVENT_EXIT, x+130, y-160, 100, 20, 'Exit the exporter', fbx_ui_exit) - Draw.PushButton('Export', EVENT_FILESEL, x+240, y-160, 100, 20, 'Export the fbx file', do_redraw) - - #Draw.PushButton('Export', EVENT_EXIT, x+180, y-160, 160, 20, 'Export the fbx file', fbx_ui_write) - #Draw.EndAlign() - - # exit when mouse out of the view? - # GLOBALS['EVENT'] = EVENT_EXIT - -#def write_ui(filename): -def write_ui(): - - # globals - GLOBALS['EVENT'] = EVENT_REDRAW - #GLOBALS['MOUSE'] = Window.GetMouseCoords() - GLOBALS['MOUSE'] = [i/2 for i in Window.GetScreenSize()] - GLOBALS['FILENAME'] = '' - ''' - # IF called from the fileselector - if filename == None: - GLOBALS['FILENAME'] = filename # Draw.Create(Blender.sys.makename(ext='.fbx')) - else: - GLOBALS['FILENAME'].val = filename - ''' - GLOBALS['EXP_OBS_SELECTED'] = Draw.Create(1) # dont need 2 variables but just do this for clarity - GLOBALS['EXP_OBS_SCENE'] = Draw.Create(0) - - GLOBALS['EXP_MESH'] = Draw.Create(1) - GLOBALS['EXP_MESH_APPLY_MOD'] = Draw.Create(1) - GLOBALS['EXP_MESH_HQ_NORMALS'] = Draw.Create(0) - GLOBALS['EXP_ARMATURE'] = Draw.Create(1) - GLOBALS['EXP_LAMP'] = Draw.Create(1) - GLOBALS['EXP_CAMERA'] = Draw.Create(1) - GLOBALS['EXP_EMPTY'] = Draw.Create(1) - GLOBALS['EXP_IMAGE_COPY'] = Draw.Create(0) - # animation opts - GLOBALS['ANIM_ENABLE'] = Draw.Create(1) - GLOBALS['ANIM_OPTIMIZE'] = Draw.Create(1) - GLOBALS['ANIM_OPTIMIZE_PRECISSION'] = Draw.Create(4) # decimal places - GLOBALS['ANIM_ACTION_ALL'] = [Draw.Create(0), Draw.Create(1)] # not just the current action - - # batch export options - GLOBALS['BATCH_ENABLE'] = Draw.Create(0) - GLOBALS['BATCH_GROUP'] = Draw.Create(1) # cant have both of these enabled at once. - GLOBALS['BATCH_SCENE'] = Draw.Create(0) # see above - GLOBALS['BATCH_FILE_PREFIX'] = Draw.Create(Blender.sys.makename(ext='_').split('\\')[-1].split('/')[-1]) - GLOBALS['BATCH_OWN_DIR'] = Draw.Create(0) - # done setting globals - - # Used by the user interface - GLOBALS['_SCALE'] = Draw.Create(1.0) - GLOBALS['_XROT90'] = Draw.Create(True) - GLOBALS['_YROT90'] = Draw.Create(False) - GLOBALS['_ZROT90'] = Draw.Create(False) - - # best not do move the cursor - # Window.SetMouseCoords(*[i/2 for i in Window.GetScreenSize()]) - - # hack so the toggle buttons redraw. this is not nice at all - while GLOBALS['EVENT'] != EVENT_EXIT: - - if GLOBALS['BATCH_ENABLE'].val and GLOBALS['BATCH_GROUP'].val and GLOBALS['ANIM_ACTION_ALL'][1].val: - #Draw.PupMenu("Warning%t|Cant batch export groups with 'Current Action' ") - GLOBALS['ANIM_ACTION_ALL'][0].val = 1 - GLOBALS['ANIM_ACTION_ALL'][1].val = 0 - - if GLOBALS['EVENT'] == EVENT_FILESEL: - if GLOBALS['BATCH_ENABLE'].val: - txt = 'Batch FBX Dir' - name = Blender.sys.expandpath('//') - else: - txt = 'Export FBX' - name = Blender.sys.makename(ext='.fbx') - - Blender.Window.FileSelector(fbx_ui_write, txt, name) - #fbx_ui_write('/test.fbx') - break - - Draw.UIBlock(fbx_ui, 0) - - - # GLOBALS.clear() - -class EXPORT_OT_fbx(bpy.types.Operator): - ''' - Operator documentation text, will be used for the operator tooltip and python docs. - ''' - __idname__ = "export.fbx" - __label__ = "Export FBX" - - # List of operator properties, the attributes will be assigned - # to the class instance from the operator settings before calling. - - __props__ = [ - bpy.props.StringProperty(attr="path", name="File Path", description="File path used for exporting the FBX file", maxlen= 1024, default= ""), - - bpy.props.BoolProperty(attr="EXP_OBS_SELECTED", name="Selected Objects", description="Export selected objects on visible layers", default=True), -# bpy.props.BoolProperty(attr="EXP_OBS_SCENE", name="Scene Objects", description="Export all objects in this scene", default=True), - bpy.props.FloatProperty(attr="_SCALE", name="Scale", description="Scale all data, (Note! some imports dont support scaled armatures)", min=0.01, max=1000.0, soft_min=0.01, soft_max=1000.0, default=1.0), - bpy.props.BoolProperty(attr="_XROT90", name="Rot X90", description="Rotate all objects 90 degrese about the X axis", default=True), - bpy.props.BoolProperty(attr="_YROT90", name="Rot Y90", description="Rotate all objects 90 degrese about the Y axis", default=False), - bpy.props.BoolProperty(attr="_ZROT90", name="Rot Z90", description="Rotate all objects 90 degrese about the Z axis", default=False), - bpy.props.BoolProperty(attr="EXP_EMPTY", name="Empties", description="Export empty objects", default=True), - bpy.props.BoolProperty(attr="EXP_CAMERA", name="Cameras", description="Export camera objects", default=True), - bpy.props.BoolProperty(attr="EXP_LAMP", name="Lamps", description="Export lamp objects", default=True), - bpy.props.BoolProperty(attr="EXP_ARMATURE", name="Armatures", description="Export armature objects", default=True), - bpy.props.BoolProperty(attr="EXP_MESH", name="Meshes", description="Export mesh objects", default=True), - bpy.props.BoolProperty(attr="EXP_MESH_APPLY_MOD", name="Modifiers", description="Apply modifiers to mesh objects", default=True), - bpy.props.BoolProperty(attr="EXP_MESH_HQ_NORMALS", name="HQ Normals", description="Generate high quality normals", default=True), - bpy.props.BoolProperty(attr="EXP_IMAGE_COPY", name="Copy Image Files", description="Copy image files to the destination path", default=False), - # armature animation - bpy.props.BoolProperty(attr="ANIM_ENABLE", name="Enable Animation", description="Export keyframe animation", default=True), - bpy.props.BoolProperty(attr="ANIM_OPTIMIZE", name="Optimize Keyframes", description="Remove double keyframes", default=True), - bpy.props.FloatProperty(attr="ANIM_OPTIMIZE_PRECISSION", name="Precision", description="Tolerence for comparing double keyframes (higher for greater accuracy)", min=1, max=16, soft_min=1, soft_max=16, default=6.0), -# bpy.props.BoolProperty(attr="ANIM_ACTION_ALL", name="Current Action", description="Use actions currently applied to the armatures (use scene start/end frame)", default=True), - bpy.props.BoolProperty(attr="ANIM_ACTION_ALL", name="All Actions", description="Use all actions for armatures, if false, use current action", default=False), - # batch - bpy.props.BoolProperty(attr="BATCH_ENABLE", name="Enable Batch", description="Automate exporting multiple scenes or groups to files", default=False), - bpy.props.BoolProperty(attr="BATCH_GROUP", name="Group > File", description="Export each group as an FBX file, if false, export each scene as an FBX file", default=False), - bpy.props.BoolProperty(attr="BATCH_OWN_DIR", name="Own Dir", description="Create a dir for each exported file", default=True), - bpy.props.StringProperty(attr="BATCH_FILE_PREFIX", name="Prefix", description="Prefix each file with this name", maxlen= 1024, default=""), - ] - - def poll(self, context): - print("Poll") - return context.active_object != None - - def execute(self, context): - if not self.path: - raise Exception("path not set") - - GLOBAL_MATRIX = mtx4_identity - GLOBAL_MATRIX[0][0] = GLOBAL_MATRIX[1][1] = GLOBAL_MATRIX[2][2] = self._SCALE - if self._XROT90: GLOBAL_MATRIX = GLOBAL_MATRIX * mtx4_x90n - if self._YROT90: GLOBAL_MATRIX = GLOBAL_MATRIX * mtx4_y90n - if self._ZROT90: GLOBAL_MATRIX = GLOBAL_MATRIX * mtx4_z90n - - write(self.path, - None, # XXX - context, - self.EXP_OBS_SELECTED, - self.EXP_MESH, - self.EXP_MESH_APPLY_MOD, -# self.EXP_MESH_HQ_NORMALS, - self.EXP_ARMATURE, - self.EXP_LAMP, - self.EXP_CAMERA, - self.EXP_EMPTY, - self.EXP_IMAGE_COPY, - GLOBAL_MATRIX, - self.ANIM_ENABLE, - self.ANIM_OPTIMIZE, - self.ANIM_OPTIMIZE_PRECISSION, - self.ANIM_ACTION_ALL, - self.BATCH_ENABLE, - self.BATCH_GROUP, - self.BATCH_FILE_PREFIX, - self.BATCH_OWN_DIR) - - return ('FINISHED',) - - def invoke(self, context, event): - wm = context.manager - wm.add_fileselect(self.__operator__) - return ('RUNNING_MODAL',) - - -bpy.ops.add(EXPORT_OT_fbx) - -# if __name__ == "__main__": -# bpy.ops.EXPORT_OT_ply(filename="/tmp/test.ply") - - -# NOTES (all line numbers correspond to original export_fbx.py (under release/scripts) -# - Draw.PupMenu alternative in 2.5?, temporarily replaced PupMenu with print -# - get rid of cleanName somehow -# + fixed: isinstance(inst, bpy.types.*) doesn't work on RNA objects: line 565 -# + get rid of BPyObject_getObjectArmature, move it in RNA? -# - BATCH_ENABLE and BATCH_GROUP options: line 327 -# - implement all BPyMesh_* used here with RNA -# - getDerivedObjects is not fully replicated with .dupli* funcs -# - talk to Campbell, this code won't work? lines 1867-1875 -# - don't know what those colbits are, do we need them? they're said to be deprecated in DNA_object_types.h: 1886-1893 -# - no hq normals: 1900-1901 - -# TODO - -# - bpy.data.remove_scene: line 366 -# - bpy.sys.time move to bpy.sys.util? -# - new scene creation, activation: lines 327-342, 368 -# - uses bpy.sys.expandpath, *.relpath - replace at least relpath - -# SMALL or COSMETICAL -# - find a way to get blender version, and put it in bpy.util?, old was Blender.Get('version') diff --git a/release/scripts/io/export_obj.py b/release/scripts/io/export_obj.py deleted file mode 100644 index 83b400816e3..00000000000 --- a/release/scripts/io/export_obj.py +++ /dev/null @@ -1,996 +0,0 @@ -#!BPY - -""" -Name: 'Wavefront (.obj)...' -Blender: 248 -Group: 'Export' -Tooltip: 'Save a Wavefront OBJ File' -""" - -__author__ = "Campbell Barton, Jiri Hnidek, Paolo Ciccone" -__url__ = ['http://wiki.blender.org/index.php/Scripts/Manual/Export/wavefront_obj', 'www.blender.org', 'blenderartists.org'] -__version__ = "1.21" - -__bpydoc__ = """\ -This script is an exporter to OBJ file format. - -Usage: - -Select the objects you wish to export and run this script from "File->Export" menu. -Selecting the default options from the popup box will be good in most cases. -All objects that can be represented as a mesh (mesh, curve, metaball, surface, text3d) -will be exported as mesh data. -""" - - -# -------------------------------------------------------------------------- -# OBJ Export v1.1 by Campbell Barton (AKA Ideasman) -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -# import math and other in functions that use them for the sake of fast Blender startup -# import math -import os -import time - -import bpy -import Mathutils - - -# Returns a tuple - path,extension. -# 'hello.obj' > ('hello', '.obj') -def splitExt(path): - dotidx = path.rfind('.') - if dotidx == -1: - return path, '' - else: - return path[:dotidx], path[dotidx:] - -def fixName(name): - if name == None: - return 'None' - else: - return name.replace(' ', '_') - - -# this used to be in BPySys module -# frankly, I don't understand how it works -def BPySys_cleanName(name): - - v = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,58,59,60,61,62,63,64,91,92,93,94,96,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254] - - invalid = ''.join([chr(i) for i in v]) - - for ch in invalid: - name = name.replace(ch, '_') - return name - -# A Dict of Materials -# (material.name, image.name):matname_imagename # matname_imagename has gaps removed. -MTL_DICT = {} - -def write_mtl(scene, filename, copy_images): - - world = scene.world - worldAmb = world.ambient_color - - dest_dir = os.path.dirname(filename) - - def copy_image(image): - rel = image.get_export_path(dest_dir, True) - - if copy_images: - abspath = image.get_export_path(dest_dir, False) - if not os.path.exists(abs_path): - shutil.copy(image.get_abs_filename(), abs_path) - - return rel - - - file = open(filename, "w") - # XXX -# file.write('# Blender3D MTL File: %s\n' % Blender.Get('filename').split('\\')[-1].split('/')[-1]) - file.write('# Material Count: %i\n' % len(MTL_DICT)) - # Write material/image combinations we have used. - for key, (mtl_mat_name, mat, img) in MTL_DICT.items(): - - # Get the Blender data for the material and the image. - # Having an image named None will make a bug, dont do it :) - - file.write('newmtl %s\n' % mtl_mat_name) # Define a new material: matname_imgname - - if mat: - file.write('Ns %.6f\n' % ((mat.specular_hardness-1) * 1.9607843137254901) ) # Hardness, convert blenders 1-511 to MTL's - file.write('Ka %.6f %.6f %.6f\n' % tuple([c*mat.ambient for c in worldAmb]) ) # Ambient, uses mirror colour, - file.write('Kd %.6f %.6f %.6f\n' % tuple([c*mat.diffuse_intensity for c in mat.diffuse_color]) ) # Diffuse - file.write('Ks %.6f %.6f %.6f\n' % tuple([c*mat.specular_intensity for c in mat.specular_color]) ) # Specular - if hasattr(mat, "ior"): - file.write('Ni %.6f\n' % mat.ior) # Refraction index - else: - file.write('Ni %.6f\n' % 1.0) - file.write('d %.6f\n' % mat.alpha) # Alpha (obj uses 'd' for dissolve) - - # 0 to disable lighting, 1 for ambient & diffuse only (specular color set to black), 2 for full lighting. - if mat.shadeless: - file.write('illum 0\n') # ignore lighting - elif mat.specular_intensity == 0: - file.write('illum 1\n') # no specular. - else: - file.write('illum 2\n') # light normaly - - else: - #write a dummy material here? - file.write('Ns 0\n') - file.write('Ka %.6f %.6f %.6f\n' % tuple([c for c in worldAmb]) ) # Ambient, uses mirror colour, - file.write('Kd 0.8 0.8 0.8\n') - file.write('Ks 0.8 0.8 0.8\n') - file.write('d 1\n') # No alpha - file.write('illum 2\n') # light normaly - - # Write images! - if img: # We have an image on the face! - # write relative image path - rel = copy_image(img) - file.write('map_Kd %s\n' % rel) # Diffuse mapping image -# file.write('map_Kd %s\n' % img.filename.split('\\')[-1].split('/')[-1]) # Diffuse mapping image - - elif mat: # No face image. if we havea material search for MTex image. - for mtex in mat.textures: - if mtex and mtex.texture.type == 'IMAGE': - try: - filename = copy_image(mtex.texture.image) -# filename = mtex.texture.image.filename.split('\\')[-1].split('/')[-1] - file.write('map_Kd %s\n' % filename) # Diffuse mapping image - break - except: - # Texture has no image though its an image type, best ignore. - pass - - file.write('\n\n') - - file.close() - -# XXX not used -def copy_file(source, dest): - file = open(source, 'rb') - data = file.read() - file.close() - - file = open(dest, 'wb') - file.write(data) - file.close() - - -# XXX not used -def copy_images(dest_dir): - if dest_dir[-1] != os.sep: - dest_dir += os.sep -# if dest_dir[-1] != sys.sep: -# dest_dir += sys.sep - - # Get unique image names - uniqueImages = {} - for matname, mat, image in MTL_DICT.values(): # Only use image name - # Get Texface images - if image: - uniqueImages[image] = image # Should use sets here. wait until Python 2.4 is default. - - # Get MTex images - if mat: - for mtex in mat.textures: - if mtex and mtex.texture.type == 'IMAGE': - image_tex = mtex.texture.image - if image_tex: - try: - uniqueImages[image_tex] = image_tex - except: - pass - - # Now copy images - copyCount = 0 - -# for bImage in uniqueImages.values(): -# image_path = bpy.sys.expandpath(bImage.filename) -# if bpy.sys.exists(image_path): -# # Make a name for the target path. -# dest_image_path = dest_dir + image_path.split('\\')[-1].split('/')[-1] -# if not bpy.sys.exists(dest_image_path): # Image isnt alredy there -# print('\tCopying "%s" > "%s"' % (image_path, dest_image_path)) -# copy_file(image_path, dest_image_path) -# copyCount+=1 - -# paths= bpy.util.copy_images(uniqueImages.values(), dest_dir) - - print('\tCopied %d images' % copyCount) -# print('\tCopied %d images' % copyCount) - -# XXX not converted -def test_nurbs_compat(ob): - if ob.type != 'Curve': - return False - - for nu in ob.data: - if (not nu.knotsV) and nu.type != 1: # not a surface and not bezier - return True - - return False - - -# XXX not converted -def write_nurb(file, ob, ob_mat): - tot_verts = 0 - cu = ob.data - - # use negative indices - Vector = Blender.Mathutils.Vector - for nu in cu: - - if nu.type==0: DEG_ORDER_U = 1 - else: DEG_ORDER_U = nu.orderU-1 # Tested to be correct - - if nu.type==1: - print("\tWarning, bezier curve:", ob.name, "only poly and nurbs curves supported") - continue - - if nu.knotsV: - print("\tWarning, surface:", ob.name, "only poly and nurbs curves supported") - continue - - if len(nu) <= DEG_ORDER_U: - print("\tWarning, orderU is lower then vert count, skipping:", ob.name) - continue - - pt_num = 0 - do_closed = (nu.flagU & 1) - do_endpoints = (do_closed==0) and (nu.flagU & 2) - - for pt in nu: - pt = Vector(pt[0], pt[1], pt[2]) * ob_mat - file.write('v %.6f %.6f %.6f\n' % (pt[0], pt[1], pt[2])) - pt_num += 1 - tot_verts += pt_num - - file.write('g %s\n' % (fixName(ob.name))) # fixName(ob.getData(1)) could use the data name too - file.write('cstype bspline\n') # not ideal, hard coded - file.write('deg %d\n' % DEG_ORDER_U) # not used for curves but most files have it still - - curve_ls = [-(i+1) for i in range(pt_num)] - - # 'curv' keyword - if do_closed: - if DEG_ORDER_U == 1: - pt_num += 1 - curve_ls.append(-1) - else: - pt_num += DEG_ORDER_U - curve_ls = curve_ls + curve_ls[0:DEG_ORDER_U] - - file.write('curv 0.0 1.0 %s\n' % (' '.join( [str(i) for i in curve_ls] ))) # Blender has no U and V values for the curve - - # 'parm' keyword - tot_parm = (DEG_ORDER_U + 1) + pt_num - tot_parm_div = float(tot_parm-1) - parm_ls = [(i/tot_parm_div) for i in range(tot_parm)] - - if do_endpoints: # end points, force param - for i in range(DEG_ORDER_U+1): - parm_ls[i] = 0.0 - parm_ls[-(1+i)] = 1.0 - - file.write('parm u %s\n' % ' '.join( [str(i) for i in parm_ls] )) - - file.write('end\n') - - return tot_verts - -def write(filename, objects, scene, - EXPORT_TRI=False, - EXPORT_EDGES=False, - EXPORT_NORMALS=False, - EXPORT_NORMALS_HQ=False, - EXPORT_UV=True, - EXPORT_MTL=True, - EXPORT_COPY_IMAGES=False, - EXPORT_APPLY_MODIFIERS=True, - EXPORT_ROTX90=True, - EXPORT_BLEN_OBS=True, - EXPORT_GROUP_BY_OB=False, - EXPORT_GROUP_BY_MAT=False, - EXPORT_KEEP_VERT_ORDER=False, - EXPORT_POLYGROUPS=False, - EXPORT_CURVE_AS_NURBS=True): - ''' - Basic write function. The context and options must be alredy set - This can be accessed externaly - eg. - write( 'c:\\test\\foobar.obj', Blender.Object.GetSelected() ) # Using default options. - ''' - - # XXX - import math - - def veckey3d(v): - return round(v.x, 6), round(v.y, 6), round(v.z, 6) - - def veckey2d(v): - return round(v[0], 6), round(v[1], 6) - # return round(v.x, 6), round(v.y, 6) - - def findVertexGroupName(face, vWeightMap): - """ - Searches the vertexDict to see what groups is assigned to a given face. - We use a frequency system in order to sort out the name because a given vetex can - belong to two or more groups at the same time. To find the right name for the face - we list all the possible vertex group names with their frequency and then sort by - frequency in descend order. The top element is the one shared by the highest number - of vertices is the face's group - """ - weightDict = {} - for vert_index in face.verts: -# for vert in face: - vWeights = vWeightMap[vert_index] -# vWeights = vWeightMap[vert] - for vGroupName, weight in vWeights: - weightDict[vGroupName] = weightDict.get(vGroupName, 0) + weight - - if weightDict: - alist = [(weight,vGroupName) for vGroupName, weight in weightDict.items()] # sort least to greatest amount of weight - alist.sort() - return(alist[-1][1]) # highest value last - else: - return '(null)' - - # TODO: implement this in C? dunno how it should be called... - def getVertsFromGroup(me, group_index): - ret = [] - - for i, v in enumerate(me.verts): - for g in v.groups: - if g.group == group_index: - ret.append((i, g.weight)) - - return ret - - - print('OBJ Export path: "%s"' % filename) - temp_mesh_name = '~tmp-mesh' - - time1 = time.clock() -# time1 = sys.time() -# scn = Scene.GetCurrent() - - file = open(filename, "w") - - # Write Header - version = "2.5" - file.write('# Blender3D v%s OBJ File: %s\n' % (version, bpy.data.filename.split('/')[-1].split('\\')[-1] )) - file.write('# www.blender3d.org\n') - - # Tell the obj file what material file to use. - if EXPORT_MTL: - mtlfilename = '%s.mtl' % '.'.join(filename.split('.')[:-1]) - file.write('mtllib %s\n' % ( mtlfilename.split('\\')[-1].split('/')[-1] )) - - if EXPORT_ROTX90: - mat_xrot90= Mathutils.RotationMatrix(-math.pi/2, 4, 'x') - - # Initialize totals, these are updated each object - totverts = totuvco = totno = 1 - - face_vert_index = 1 - - globalNormals = {} - - # Get all meshes - for ob_main in objects: - - # ignore dupli children - if ob_main.parent and ob_main.parent.dupli_type != 'NONE': - # XXX - print(ob_main.name, 'is a dupli child - ignoring') - continue - - obs = [] - if ob_main.dupli_type != 'NONE': - # XXX - print('creating dupli_list on', ob_main.name) - ob_main.create_dupli_list() - - obs = [(dob.object, dob.matrix) for dob in ob_main.dupli_list] - - # XXX debug print - print(ob_main.name, 'has', len(obs), 'dupli children') - else: - obs = [(ob_main, ob_main.matrix)] - - for ob, ob_mat in obs: - - # XXX postponed -# # Nurbs curve support -# if EXPORT_CURVE_AS_NURBS and test_nurbs_compat(ob): -# if EXPORT_ROTX90: -# ob_mat = ob_mat * mat_xrot90 - -# totverts += write_nurb(file, ob, ob_mat) - -# continue -# end nurbs - - if ob.type != 'MESH': - continue - - me = ob.create_mesh(EXPORT_APPLY_MODIFIERS, 'PREVIEW') - - if EXPORT_ROTX90: - me.transform(ob_mat * mat_xrot90) - else: - me.transform(ob_mat) - -# # Will work for non meshes now! :) -# me= BPyMesh.getMeshFromObject(ob, containerMesh, EXPORT_APPLY_MODIFIERS, EXPORT_POLYGROUPS, scn) -# if not me: -# continue - - if EXPORT_UV: - faceuv = len(me.uv_textures) > 0 - else: - faceuv = False - - # XXX - todo, find a better way to do triangulation - # ...removed convert_to_triface because it relies on editmesh - ''' - # We have a valid mesh - if EXPORT_TRI and me.faces: - # Add a dummy object to it. - has_quads = False - for f in me.faces: - if f.verts[3] != 0: - has_quads = True - break - - if has_quads: - newob = bpy.data.add_object('MESH', 'temp_object') - newob.data = me - # if we forget to set Object.data - crash - scene.add_object(newob) - newob.convert_to_triface(scene) - # mesh will still be there - scene.remove_object(newob) - ''' - - # Make our own list so it can be sorted to reduce context switching - face_index_pairs = [ (face, index) for index, face in enumerate(me.faces)] - # faces = [ f for f in me.faces ] - - if EXPORT_EDGES: - edges = me.edges - else: - edges = [] - - if not (len(face_index_pairs)+len(edges)+len(me.verts)): # Make sure there is somthing to write - - # clean up - bpy.data.remove_mesh(me) - - continue # dont bother with this mesh. - - # XXX - # High Quality Normals - if EXPORT_NORMALS and face_index_pairs: - me.calc_normals() -# if EXPORT_NORMALS_HQ: -# BPyMesh.meshCalcNormals(me) -# else: -# # transforming normals is incorrect -# # when the matrix is scaled, -# # better to recalculate them -# me.calcNormals() - - materials = me.materials - - materialNames = [] - materialItems = [m for m in materials] - if materials: - for mat in materials: - if mat: # !=None - materialNames.append(mat.name) - else: - materialNames.append(None) - # Cant use LC because some materials are None. - # materialNames = map(lambda mat: mat.name, materials) # Bug Blender, dosent account for null materials, still broken. - - # Possible there null materials, will mess up indicies - # but at least it will export, wait until Blender gets fixed. - materialNames.extend((16-len(materialNames)) * [None]) - materialItems.extend((16-len(materialItems)) * [None]) - - # Sort by Material, then images - # so we dont over context switch in the obj file. - if EXPORT_KEEP_VERT_ORDER: - pass - elif faceuv: - # XXX update - tface = me.active_uv_texture.data - - # exception only raised if Python 2.3 or lower... - try: - face_index_pairs.sort(key = lambda a: (a[0].material_index, tface[a[1]].image, a[0].smooth)) - except: - face_index_pairs.sort(lambda a,b: cmp((a[0].material_index, tface[a[1]].image, a[0].smooth), - (b[0].material_index, tface[b[1]].image, b[0].smooth))) - elif len(materials) > 1: - try: - face_index_pairs.sort(key = lambda a: (a[0].material_index, a[0].smooth)) - except: - face_index_pairs.sort(lambda a,b: cmp((a[0].material_index, a[0].smooth), - (b[0].material_index, b[0].smooth))) - else: - # no materials - try: - face_index_pairs.sort(key = lambda a: a[0].smooth) - except: - face_index_pairs.sort(lambda a,b: cmp(a[0].smooth, b[0].smooth)) -# if EXPORT_KEEP_VERT_ORDER: -# pass -# elif faceuv: -# try: faces.sort(key = lambda a: (a.mat, a.image, a.smooth)) -# except: faces.sort(lambda a,b: cmp((a.mat, a.image, a.smooth), (b.mat, b.image, b.smooth))) -# elif len(materials) > 1: -# try: faces.sort(key = lambda a: (a.mat, a.smooth)) -# except: faces.sort(lambda a,b: cmp((a.mat, a.smooth), (b.mat, b.smooth))) -# else: -# # no materials -# try: faces.sort(key = lambda a: a.smooth) -# except: faces.sort(lambda a,b: cmp(a.smooth, b.smooth)) - - faces = [pair[0] for pair in face_index_pairs] - - # Set the default mat to no material and no image. - contextMat = (0, 0) # Can never be this, so we will label a new material teh first chance we get. - contextSmooth = None # Will either be true or false, set bad to force initialization switch. - - if EXPORT_BLEN_OBS or EXPORT_GROUP_BY_OB: - name1 = ob.name - name2 = ob.data.name - if name1 == name2: - obnamestring = fixName(name1) - else: - obnamestring = '%s_%s' % (fixName(name1), fixName(name2)) - - if EXPORT_BLEN_OBS: - file.write('o %s\n' % obnamestring) # Write Object name - else: # if EXPORT_GROUP_BY_OB: - file.write('g %s\n' % obnamestring) - - - # Vert - for v in me.verts: - file.write('v %.6f %.6f %.6f\n' % tuple(v.co)) - - # UV - if faceuv: - uv_face_mapping = [[0,0,0,0] for f in faces] # a bit of a waste for tri's :/ - - uv_dict = {} # could use a set() here - uv_layer = me.active_uv_texture - for f, f_index in face_index_pairs: - - tface = uv_layer.data[f_index] - - uvs = tface.uv - # uvs = [tface.uv1, tface.uv2, tface.uv3] - - # # add another UV if it's a quad - # if len(f.verts) == 4: - # uvs.append(tface.uv4) - - for uv_index, uv in enumerate(uvs): - uvkey = veckey2d(uv) - try: - uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] - except: - uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] = len(uv_dict) - file.write('vt %.6f %.6f\n' % tuple(uv)) - -# uv_dict = {} # could use a set() here -# for f_index, f in enumerate(faces): - -# for uv_index, uv in enumerate(f.uv): -# uvkey = veckey2d(uv) -# try: -# uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] -# except: -# uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] = len(uv_dict) -# file.write('vt %.6f %.6f\n' % tuple(uv)) - - uv_unique_count = len(uv_dict) -# del uv, uvkey, uv_dict, f_index, uv_index - # Only need uv_unique_count and uv_face_mapping - - # NORMAL, Smooth/Non smoothed. - if EXPORT_NORMALS: - for f in faces: - if f.smooth: - for v in f: - noKey = veckey3d(v.normal) - if noKey not in globalNormals: - globalNormals[noKey] = totno - totno +=1 - file.write('vn %.6f %.6f %.6f\n' % noKey) - else: - # Hard, 1 normal from the face. - noKey = veckey3d(f.normal) - if noKey not in globalNormals: - globalNormals[noKey] = totno - totno +=1 - file.write('vn %.6f %.6f %.6f\n' % noKey) - - if not faceuv: - f_image = None - - # XXX - if EXPORT_POLYGROUPS: - # Retrieve the list of vertex groups -# vertGroupNames = me.getVertGroupNames() - - currentVGroup = '' - # Create a dictionary keyed by face id and listing, for each vertex, the vertex groups it belongs to - vgroupsMap = [[] for _i in range(len(me.verts))] -# vgroupsMap = [[] for _i in xrange(len(me.verts))] - for g in ob.vertex_groups: -# for vertexGroupName in vertGroupNames: - for vIdx, vWeight in getVertsFromGroup(me, g.index): -# for vIdx, vWeight in me.getVertsFromGroup(vertexGroupName, 1): - vgroupsMap[vIdx].append((g.name, vWeight)) - - for f_index, f in enumerate(faces): - f_v = [{"index": index, "vertex": me.verts[index]} for index in f.verts] - - # if f.verts[3] == 0: - # f_v.pop() - -# f_v= f.v - f_smooth= f.smooth - f_mat = min(f.material_index, len(materialNames)-1) -# f_mat = min(f.mat, len(materialNames)-1) - if faceuv: - - tface = me.active_uv_texture.data[face_index_pairs[f_index][1]] - - f_image = tface.image - f_uv = tface.uv - # f_uv= [tface.uv1, tface.uv2, tface.uv3] - # if len(f.verts) == 4: - # f_uv.append(tface.uv4) -# f_image = f.image -# f_uv= f.uv - - # MAKE KEY - if faceuv and f_image: # Object is always true. - key = materialNames[f_mat], f_image.name - else: - key = materialNames[f_mat], None # No image, use None instead. - - # Write the vertex group - if EXPORT_POLYGROUPS: - if len(ob.vertex_groups): - # find what vertext group the face belongs to - theVGroup = findVertexGroupName(f,vgroupsMap) - if theVGroup != currentVGroup: - currentVGroup = theVGroup - file.write('g %s\n' % theVGroup) -# # Write the vertex group -# if EXPORT_POLYGROUPS: -# if vertGroupNames: -# # find what vertext group the face belongs to -# theVGroup = findVertexGroupName(f,vgroupsMap) -# if theVGroup != currentVGroup: -# currentVGroup = theVGroup -# file.write('g %s\n' % theVGroup) - - # CHECK FOR CONTEXT SWITCH - if key == contextMat: - pass # Context alredy switched, dont do anything - else: - if key[0] == None and key[1] == None: - # Write a null material, since we know the context has changed. - if EXPORT_GROUP_BY_MAT: - # can be mat_image or (null) - file.write('g %s_%s\n' % (fixName(ob.name), fixName(ob.data.name)) ) # can be mat_image or (null) - file.write('usemtl (null)\n') # mat, image - - else: - mat_data= MTL_DICT.get(key) - if not mat_data: - # First add to global dict so we can export to mtl - # Then write mtl - - # Make a new names from the mat and image name, - # converting any spaces to underscores with fixName. - - # If none image dont bother adding it to the name - if key[1] == None: - mat_data = MTL_DICT[key] = ('%s'%fixName(key[0])), materialItems[f_mat], f_image - else: - mat_data = MTL_DICT[key] = ('%s_%s' % (fixName(key[0]), fixName(key[1]))), materialItems[f_mat], f_image - - if EXPORT_GROUP_BY_MAT: - file.write('g %s_%s_%s\n' % (fixName(ob.name), fixName(ob.data.name), mat_data[0]) ) # can be mat_image or (null) - - file.write('usemtl %s\n' % mat_data[0]) # can be mat_image or (null) - - contextMat = key - if f_smooth != contextSmooth: - if f_smooth: # on now off - file.write('s 1\n') - contextSmooth = f_smooth - else: # was off now on - file.write('s off\n') - contextSmooth = f_smooth - - file.write('f') - if faceuv: - if EXPORT_NORMALS: - if f_smooth: # Smoothed, use vertex normals - for vi, v in enumerate(f_v): - file.write( ' %d/%d/%d' % \ - (v["index"] + totverts, - totuvco + uv_face_mapping[f_index][vi], - globalNormals[ veckey3d(v["vertex"].normal) ]) ) # vert, uv, normal - - else: # No smoothing, face normals - no = globalNormals[ veckey3d(f.normal) ] - for vi, v in enumerate(f_v): - file.write( ' %d/%d/%d' % \ - (v["index"] + totverts, - totuvco + uv_face_mapping[f_index][vi], - no) ) # vert, uv, normal - else: # No Normals - for vi, v in enumerate(f_v): - file.write( ' %d/%d' % (\ - v["index"] + totverts,\ - totuvco + uv_face_mapping[f_index][vi])) # vert, uv - - face_vert_index += len(f_v) - - else: # No UV's - if EXPORT_NORMALS: - if f_smooth: # Smoothed, use vertex normals - for v in f_v: - file.write( ' %d//%d' % - (v["index"] + totverts, globalNormals[ veckey3d(v["vertex"].normal) ]) ) - else: # No smoothing, face normals - no = globalNormals[ veckey3d(f.normal) ] - for v in f_v: - file.write( ' %d//%d' % (v["index"] + totverts, no) ) - else: # No Normals - for v in f_v: - file.write( ' %d' % (v["index"] + totverts) ) - - file.write('\n') - - # Write edges. - if EXPORT_EDGES: - for ed in edges: - if ed.loose: - file.write('f %d %d\n' % (ed.verts[0] + totverts, ed.verts[1] + totverts)) - - # Make the indicies global rather then per mesh - totverts += len(me.verts) - if faceuv: - totuvco += uv_unique_count - - # clean up - bpy.data.remove_mesh(me) - - if ob_main.dupli_type != 'NONE': - ob_main.free_dupli_list() - - file.close() - - - # Now we have all our materials, save them - if EXPORT_MTL: - write_mtl(scene, mtlfilename, EXPORT_COPY_IMAGES) -# if EXPORT_COPY_IMAGES: -# dest_dir = os.path.basename(filename) -# # dest_dir = filename -# # # Remove chars until we are just the path. -# # while dest_dir and dest_dir[-1] not in '\\/': -# # dest_dir = dest_dir[:-1] -# if dest_dir: -# copy_images(dest_dir) -# else: -# print('\tError: "%s" could not be used as a base for an image path.' % filename) - - print("OBJ Export time: %.2f" % (time.clock() - time1)) -# print "OBJ Export time: %.2f" % (sys.time() - time1) - -def do_export(filename, context, - EXPORT_APPLY_MODIFIERS = True, # not used - EXPORT_ROTX90 = True, # wrong - EXPORT_TRI = False, # ok - EXPORT_EDGES = False, - EXPORT_NORMALS = False, # not yet - EXPORT_NORMALS_HQ = False, # not yet - EXPORT_UV = True, # ok - EXPORT_MTL = True, - EXPORT_SEL_ONLY = True, # ok - EXPORT_ALL_SCENES = False, # XXX not working atm - EXPORT_ANIMATION = False, - EXPORT_COPY_IMAGES = False, - EXPORT_BLEN_OBS = True, - EXPORT_GROUP_BY_OB = False, - EXPORT_GROUP_BY_MAT = False, - EXPORT_KEEP_VERT_ORDER = False, - EXPORT_POLYGROUPS = False, - EXPORT_CURVE_AS_NURBS = True): - # Window.EditMode(0) - # Window.WaitCursor(1) - - base_name, ext = splitExt(filename) - context_name = [base_name, '', '', ext] # Base name, scene name, frame number, extension - - orig_scene = context.scene - -# if EXPORT_ALL_SCENES: -# export_scenes = bpy.data.scenes -# else: -# export_scenes = [orig_scene] - - # XXX only exporting one scene atm since changing - # current scene is not possible. - # Brecht says that ideally in 2.5 we won't need such a function, - # allowing multiple scenes open at once. - export_scenes = [orig_scene] - - # Export all scenes. - for scn in export_scenes: - # scn.makeCurrent() # If already current, this is not slow. - # context = scn.getRenderingContext() - orig_frame = scn.current_frame - - if EXPORT_ALL_SCENES: # Add scene name into the context_name - context_name[1] = '_%s' % BPySys_cleanName(scn.name) # WARNING, its possible that this could cause a collision. we could fix if were feeling parranoied. - - # Export an animation? - if EXPORT_ANIMATION: - scene_frames = range(scn.start_frame, context.end_frame+1) # Up to and including the end frame. - else: - scene_frames = [orig_frame] # Dont export an animation. - - # Loop through all frames in the scene and export. - for frame in scene_frames: - if EXPORT_ANIMATION: # Add frame to the filename. - context_name[2] = '_%.6d' % frame - - scn.current_frame = frame - if EXPORT_SEL_ONLY: - export_objects = context.selected_objects - else: - export_objects = scn.objects - - full_path= ''.join(context_name) - - # erm... bit of a problem here, this can overwrite files when exporting frames. not too bad. - # EXPORT THE FILE. - write(full_path, export_objects, scn, - EXPORT_TRI, EXPORT_EDGES, EXPORT_NORMALS, - EXPORT_NORMALS_HQ, EXPORT_UV, EXPORT_MTL, - EXPORT_COPY_IMAGES, EXPORT_APPLY_MODIFIERS, - EXPORT_ROTX90, EXPORT_BLEN_OBS, - EXPORT_GROUP_BY_OB, EXPORT_GROUP_BY_MAT, EXPORT_KEEP_VERT_ORDER, - EXPORT_POLYGROUPS, EXPORT_CURVE_AS_NURBS) - - - scn.current_frame = orig_frame - - # Restore old active scene. -# orig_scene.makeCurrent() -# Window.WaitCursor(0) - - -class EXPORT_OT_obj(bpy.types.Operator): - ''' - Currently the exporter lacks these features: - * nurbs - * multiple scene export (only active scene is written) - * particles - ''' - __idname__ = "export.obj" - __label__ = 'Export OBJ' - - # List of operator properties, the attributes will be assigned - # to the class instance from the operator settings before calling. - - __props__ = [ - bpy.props.StringProperty(attr="path", name="File Path", description="File path used for exporting the OBJ file", maxlen= 1024, default= ""), - - # context group - bpy.props.BoolProperty(attr="use_selection", name="Selection Only", description="", default= False), - bpy.props.BoolProperty(attr="use_all_scenes", name="All Scenes", description="", default= False), - bpy.props.BoolProperty(attr="use_animation", name="All Animation", description="", default= False), - - # object group - bpy.props.BoolProperty(attr="use_modifiers", name="Apply Modifiers", description="", default= True), - bpy.props.BoolProperty(attr="use_rotate90", name="Rotate X90", description="", default= True), - - # extra data group - bpy.props.BoolProperty(attr="use_edges", name="Edges", description="", default= True), - bpy.props.BoolProperty(attr="use_normals", name="Normals", description="", default= False), - bpy.props.BoolProperty(attr="use_hq_normals", name="High Quality Normals", description="", default= True), - bpy.props.BoolProperty(attr="use_uvs", name="UVs", description="", default= True), - bpy.props.BoolProperty(attr="use_materials", name="Materials", description="", default= True), - bpy.props.BoolProperty(attr="copy_images", name="Copy Images", description="", default= False), - bpy.props.BoolProperty(attr="use_triangles", name="Triangulate", description="", default= False), - bpy.props.BoolProperty(attr="use_vertex_groups", name="Polygroups", description="", default= False), - bpy.props.BoolProperty(attr="use_nurbs", name="Nurbs", description="", default= False), - - # grouping group - bpy.props.BoolProperty(attr="use_blen_objects", name="Objects as OBJ Objects", description="", default= True), - bpy.props.BoolProperty(attr="group_by_object", name="Objects as OBJ Groups ", description="", default= False), - bpy.props.BoolProperty(attr="group_by_material", name="Material Groups", description="", default= False), - bpy.props.BoolProperty(attr="keep_vertex_order", name="Keep Vertex Order", description="", default= False) - ] - - def execute(self, context): - - do_export(self.path, context, - EXPORT_TRI=self.use_triangles, - EXPORT_EDGES=self.use_edges, - EXPORT_NORMALS=self.use_normals, - EXPORT_NORMALS_HQ=self.use_hq_normals, - EXPORT_UV=self.use_uvs, - EXPORT_MTL=self.use_materials, - EXPORT_COPY_IMAGES=self.copy_images, - EXPORT_APPLY_MODIFIERS=self.use_modifiers, - EXPORT_ROTX90=self.use_rotate90, - EXPORT_BLEN_OBS=self.use_blen_objects, - EXPORT_GROUP_BY_OB=self.group_by_object, - EXPORT_GROUP_BY_MAT=self.group_by_material, - EXPORT_KEEP_VERT_ORDER=self.keep_vertex_order, - EXPORT_POLYGROUPS=self.use_vertex_groups, - EXPORT_CURVE_AS_NURBS=self.use_nurbs, - EXPORT_SEL_ONLY=self.use_selection, - EXPORT_ALL_SCENES=self.use_all_scenes) - - return ('FINISHED',) - - def invoke(self, context, event): - wm = context.manager - wm.add_fileselect(self.__operator__) - return ('RUNNING_MODAL',) - - def poll(self, context): # Poll isnt working yet - print("Poll") - return context.active_object != None - -bpy.ops.add(EXPORT_OT_obj) - -if __name__ == "__main__": - bpy.ops.EXPORT_OT_obj(filename="/tmp/test.obj") - -# CONVERSION ISSUES -# - matrix problem -# - duplis - only tested dupliverts -# - NURBS - needs API additions -# - all scenes export -# + normals calculation -# - get rid of cleanName somehow diff --git a/release/scripts/io/export_ply.py b/release/scripts/io/export_ply.py deleted file mode 100644 index 8e79c3741bb..00000000000 --- a/release/scripts/io/export_ply.py +++ /dev/null @@ -1,279 +0,0 @@ -import bpy - -__author__ = "Bruce Merry" -__version__ = "0.93" -__bpydoc__ = """\ -This script exports Stanford PLY files from Blender. It supports normals, -colours, and texture coordinates per face or per vertex. -Only one mesh can be exported at a time. -""" - -# Copyright (C) 2004, 2005: Bruce Merry, bmerry@cs.uct.ac.za -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# Vector rounding se we can use as keys -# -# Updated on Aug 11, 2008 by Campbell Barton -# - added 'comment' prefix to comments - Needed to comply with the PLY spec. -# -# Updated on Jan 1, 2007 by Gabe Ghearing -# - fixed normals so they are correctly smooth/flat -# - fixed crash when the model doesn't have uv coords or vertex colors -# - fixed crash when the model has vertex colors but doesn't have uv coords -# - changed float32 to float and uint8 to uchar for compatibility -# Errata/Notes as of Jan 1, 2007 -# - script exports texture coords if they exist even if TexFace isn't selected (not a big deal to me) -# - ST(R) should probably be renamed UV(T) like in most PLY files (importer needs to be updated to take either) -# -# Updated on Jan 3, 2007 by Gabe Ghearing -# - fixed "sticky" vertex UV exporting -# - added pupmenu to enable/disable exporting normals, uv coords, and colors -# Errata/Notes as of Jan 3, 2007 -# - ST(R) coords should probably be renamed UV(T) like in most PLY files (importer needs to be updated to take either) -# - edges should be exported since PLY files support them -# - code is getting spaghettish, it should be refactored... -# - - -def rvec3d(v): return round(v[0], 6), round(v[1], 6), round(v[2], 6) -def rvec2d(v): return round(v[0], 6), round(v[1], 6) - -def write(filename, scene, ob, \ - EXPORT_APPLY_MODIFIERS= True,\ - EXPORT_NORMALS= True,\ - EXPORT_UV= True,\ - EXPORT_COLORS= True\ - ): - - if not filename.lower().endswith('.ply'): - filename += '.ply' - - if not ob: - raise Exception("Error, Select 1 active object") - return - - file = open(filename, 'w') - - - #EXPORT_EDGES = Draw.Create(0) - """ - is_editmode = Blender.Window.EditMode() - if is_editmode: - Blender.Window.EditMode(0, '', 0) - - Window.WaitCursor(1) - """ - - #mesh = BPyMesh.getMeshFromObject(ob, None, EXPORT_APPLY_MODIFIERS, False, scn) # XXX - if EXPORT_APPLY_MODIFIERS: - mesh = ob.create_mesh(True, 'PREVIEW') - else: - mesh = ob.data - - if not mesh: - raise ("Error, could not get mesh data from active object") - return - - # mesh.transform(ob.matrixWorld) # XXX - - faceUV = len(mesh.uv_textures) > 0 - vertexUV = len(mesh.sticky) > 0 - vertexColors = len(mesh.vertex_colors) > 0 - - if (not faceUV) and (not vertexUV): EXPORT_UV = False - if not vertexColors: EXPORT_COLORS = False - - if not EXPORT_UV: faceUV = vertexUV = False - if not EXPORT_COLORS: vertexColors = False - - if faceUV: - active_uv_layer = None - for lay in mesh.uv_textures: - if lay.active: - active_uv_layer= lay.data - break - if not active_uv_layer: - EXPORT_UV = False - faceUV = None - - if vertexColors: - active_col_layer = None - for lay in mesh.vertex_colors: - if lay.active: - active_col_layer= lay.data - if not active_col_layer: - EXPORT_COLORS = False - vertexColors = None - - # incase - color = uvcoord = uvcoord_key = normal = normal_key = None - - mesh_verts = mesh.verts # save a lookup - ply_verts = [] # list of dictionaries - # vdict = {} # (index, normal, uv) -> new index - vdict = [{} for i in range(len(mesh_verts))] - ply_faces = [[] for f in range(len(mesh.faces))] - vert_count = 0 - for i, f in enumerate(mesh.faces): - - - smooth = f.smooth - if not smooth: - normal = tuple(f.normal) - normal_key = rvec3d(normal) - - if faceUV: - uv = active_uv_layer[i] - uv = uv.uv1, uv.uv2, uv.uv3, uv.uv4 # XXX - crufty :/ - if vertexColors: - col = active_col_layer[i] - col = col.color1, col.color2, col.color3, col.color4 - - f_verts= f.verts - - pf= ply_faces[i] - for j, vidx in enumerate(f_verts): - v = mesh_verts[vidx] - - if smooth: - normal= tuple(v.normal) - normal_key = rvec3d(normal) - - if faceUV: - uvcoord= uv[j][0], 1.0-uv[j][1] - uvcoord_key = rvec2d(uvcoord) - elif vertexUV: - uvcoord= v.uvco[0], 1.0-v.uvco[1] - uvcoord_key = rvec2d(uvcoord) - - if vertexColors: - color= col[j] - color= int(color[0]*255.0), int(color[1]*255.0), int(color[2]*255.0) - - - key = normal_key, uvcoord_key, color - - vdict_local = vdict[vidx] - pf_vidx = vdict_local.get(key) # Will be None initially - - if pf_vidx == None: # same as vdict_local.has_key(key) - pf_vidx = vdict_local[key] = vert_count; - ply_verts.append((vidx, normal, uvcoord, color)) - vert_count += 1 - - pf.append(pf_vidx) - - file.write('ply\n') - file.write('format ascii 1.0\n') - version = "2.5" # Blender.Get('version') - file.write('comment Created by Blender3D %s - www.blender.org, source file: %s\n' % (version, bpy.data.filename.split('/')[-1].split('\\')[-1] )) - - file.write('element vertex %d\n' % len(ply_verts)) - - file.write('property float x\n') - file.write('property float y\n') - file.write('property float z\n') - - # XXX - """ - if EXPORT_NORMALS: - file.write('property float nx\n') - file.write('property float ny\n') - file.write('property float nz\n') - """ - if EXPORT_UV: - file.write('property float s\n') - file.write('property float t\n') - if EXPORT_COLORS: - file.write('property uchar red\n') - file.write('property uchar green\n') - file.write('property uchar blue\n') - - file.write('element face %d\n' % len(mesh.faces)) - file.write('property list uchar uint vertex_indices\n') - file.write('end_header\n') - - for i, v in enumerate(ply_verts): - file.write('%.6f %.6f %.6f ' % tuple(mesh_verts[v[0]].co)) # co - """ - if EXPORT_NORMALS: - file.write('%.6f %.6f %.6f ' % v[1]) # no - """ - if EXPORT_UV: file.write('%.6f %.6f ' % v[2]) # uv - if EXPORT_COLORS: file.write('%u %u %u' % v[3]) # col - file.write('\n') - - for pf in ply_faces: - if len(pf)==3: file.write('3 %d %d %d\n' % tuple(pf)) - else: file.write('4 %d %d %d %d\n' % tuple(pf)) - - file.close() - print("writing", filename, "done") - - if EXPORT_APPLY_MODIFIERS: - bpy.data.remove_mesh(mesh) - - # XXX - """ - if is_editmode: - Blender.Window.EditMode(1, '', 0) - """ - -class EXPORT_OT_ply(bpy.types.Operator): - '''Export a single object as a stanford PLY with normals, colours and texture coordinates.''' - __idname__ = "export.ply" - __label__ = "Export PLY" - - # List of operator properties, the attributes will be assigned - # to the class instance from the operator settings before calling. - - __props__ = [ - bpy.props.StringProperty(attr="path", name="File Path", description="File path used for exporting the PLY file", maxlen= 1024, default= ""), - bpy.props.BoolProperty(attr="use_modifiers", name="Apply Modifiers", description="Apply Modifiers to the exported mesh", default= True), - bpy.props.BoolProperty(attr="use_normals", name="Export Normals", description="Export Normals for smooth and hard shaded faces", default= True), - bpy.props.BoolProperty(attr="use_uvs", name="Export UVs", description="Exort the active UV layer", default= True), - bpy.props.BoolProperty(attr="use_colors", name="Export Vertex Colors", description="Exort the active vertex color layer", default= True) - ] - - def poll(self, context): - return context.active_object != None - - def execute(self, context): - # print("Selected: " + context.active_object.name) - - if not self.path: - raise Exception("filename not set") - - write(self.path, context.scene, context.active_object,\ - EXPORT_APPLY_MODIFIERS = self.use_modifiers, - EXPORT_NORMALS = self.use_normals, - EXPORT_UV = self.use_uvs, - EXPORT_COLORS = self.use_colors, - ) - - return ('FINISHED',) - - def invoke(self, context, event): - wm = context.manager - wm.add_fileselect(self.__operator__) - return ('RUNNING_MODAL',) - - -bpy.ops.add(EXPORT_OT_ply) - -if __name__ == "__main__": - bpy.ops.EXPORT_OT_ply(path="/tmp/test.ply") - - diff --git a/release/scripts/io/export_x3d.py b/release/scripts/io/export_x3d.py deleted file mode 100644 index db29afc7d6d..00000000000 --- a/release/scripts/io/export_x3d.py +++ /dev/null @@ -1,1240 +0,0 @@ -#!BPY -""" Registration info for Blender menus: -Name: 'X3D Extensible 3D (.x3d)...' -Blender: 245 -Group: 'Export' -Tooltip: 'Export selection to Extensible 3D file (.x3d)' -""" - -__author__ = ("Bart", "Campbell Barton") -__email__ = ["Bart, bart:neeneenee*de"] -__url__ = ["Author's (Bart) homepage, http://www.neeneenee.de/vrml"] -__version__ = "2006/01/17" -__bpydoc__ = """\ -This script exports to X3D format. - -Usage: - -Run this script from "File->Export" menu. A pop-up will ask whether you -want to export only selected or all relevant objects. - -Known issues:
- Doesn't handle multiple materials (don't use material indices);
- Doesn't handle multiple UV textures on a single mesh (create a mesh for each texture);
- Can't get the texture array associated with material * not the UV ones; -""" - - -# $Id$ -# -#------------------------------------------------------------------------ -# X3D exporter for blender 2.36 or above -# -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# - -#################################### -# Library dependancies -#################################### - -import math -import os - -import bpy -import Mathutils - -from export_3ds import create_derived_objects, free_derived_objects - -# import Blender -# from Blender import Object, Lamp, Draw, Image, Text, sys, Mesh -# from Blender.Scene import Render -# import BPyObject -# import BPyMesh - -# -DEG2RAD=0.017453292519943295 -MATWORLD= Mathutils.RotationMatrix(-90, 4, 'x') - -#################################### -# Global Variables -#################################### - -filename = "" -# filename = Blender.Get('filename') -_safeOverwrite = True - -extension = '' - -########################################################## -# Functions for writing output file -########################################################## - -class x3d_class: - - def __init__(self, filename): - #--- public you can change these --- - self.writingcolor = 0 - self.writingtexture = 0 - self.writingcoords = 0 - self.proto = 1 - self.matonly = 0 - self.share = 0 - self.billnode = 0 - self.halonode = 0 - self.collnode = 0 - self.tilenode = 0 - self.verbose=2 # level of verbosity in console 0-none, 1-some, 2-most - self.cp=3 # decimals for material color values 0.000 - 1.000 - self.vp=3 # decimals for vertex coordinate values 0.000 - n.000 - self.tp=3 # decimals for texture coordinate values 0.000 - 1.000 - self.it=3 - - #--- class private don't touch --- - self.texNames={} # dictionary of textureNames - self.matNames={} # dictionary of materiaNames - self.meshNames={} # dictionary of meshNames - self.indentLevel=0 # keeps track of current indenting - self.filename=filename - self.file = None - if filename.lower().endswith('.x3dz'): - try: - import gzip - self.file = gzip.open(filename, "w") - except: - print("failed to import compression modules, exporting uncompressed") - self.filename = filename[:-1] # remove trailing z - - if self.file == None: - self.file = open(self.filename, "w") - - self.bNav=0 - self.nodeID=0 - self.namesReserved=[ "Anchor","Appearance","Arc2D","ArcClose2D","AudioClip","Background","Billboard", - "BooleanFilter","BooleanSequencer","BooleanToggle","BooleanTrigger","Box","Circle2D", - "Collision","Color","ColorInterpolator","ColorRGBA","component","Cone","connect", - "Contour2D","ContourPolyline2D","Coordinate","CoordinateDouble","CoordinateInterpolator", - "CoordinateInterpolator2D","Cylinder","CylinderSensor","DirectionalLight","Disk2D", - "ElevationGrid","EspduTransform","EXPORT","ExternProtoDeclare","Extrusion","field", - "fieldValue","FillProperties","Fog","FontStyle","GeoCoordinate","GeoElevationGrid", - "GeoLocationLocation","GeoLOD","GeoMetadata","GeoOrigin","GeoPositionInterpolator", - "GeoTouchSensor","GeoViewpoint","Group","HAnimDisplacer","HAnimHumanoid","HAnimJoint", - "HAnimSegment","HAnimSite","head","ImageTexture","IMPORT","IndexedFaceSet", - "IndexedLineSet","IndexedTriangleFanSet","IndexedTriangleSet","IndexedTriangleStripSet", - "Inline","IntegerSequencer","IntegerTrigger","IS","KeySensor","LineProperties","LineSet", - "LoadSensor","LOD","Material","meta","MetadataDouble","MetadataFloat","MetadataInteger", - "MetadataSet","MetadataString","MovieTexture","MultiTexture","MultiTextureCoordinate", - "MultiTextureTransform","NavigationInfo","Normal","NormalInterpolator","NurbsCurve", - "NurbsCurve2D","NurbsOrientationInterpolator","NurbsPatchSurface", - "NurbsPositionInterpolator","NurbsSet","NurbsSurfaceInterpolator","NurbsSweptSurface", - "NurbsSwungSurface","NurbsTextureCoordinate","NurbsTrimmedSurface","OrientationInterpolator", - "PixelTexture","PlaneSensor","PointLight","PointSet","Polyline2D","Polypoint2D", - "PositionInterpolator","PositionInterpolator2D","ProtoBody","ProtoDeclare","ProtoInstance", - "ProtoInterface","ProximitySensor","ReceiverPdu","Rectangle2D","ROUTE","ScalarInterpolator", - "Scene","Script","Shape","SignalPdu","Sound","Sphere","SphereSensor","SpotLight","StaticGroup", - "StringSensor","Switch","Text","TextureBackground","TextureCoordinate","TextureCoordinateGenerator", - "TextureTransform","TimeSensor","TimeTrigger","TouchSensor","Transform","TransmitterPdu", - "TriangleFanSet","TriangleSet","TriangleSet2D","TriangleStripSet","Viewpoint","VisibilitySensor", - "WorldInfo","X3D","XvlShell","VertexShader","FragmentShader","MultiShaderAppearance","ShaderAppearance" ] - self.namesStandard=[ "Empty","Empty.000","Empty.001","Empty.002","Empty.003","Empty.004","Empty.005", - "Empty.006","Empty.007","Empty.008","Empty.009","Empty.010","Empty.011","Empty.012", - "Scene.001","Scene.002","Scene.003","Scene.004","Scene.005","Scene.06","Scene.013", - "Scene.006","Scene.007","Scene.008","Scene.009","Scene.010","Scene.011","Scene.012", - "World","World.000","World.001","World.002","World.003","World.004","World.005" ] - self.namesFog=[ "","LINEAR","EXPONENTIAL","" ] - -########################################################## -# Writing nodes routines -########################################################## - - def writeHeader(self): - #bfile = sys.expandpath( Blender.Get('filename') ).replace('<', '<').replace('>', '>') - bfile = self.filename.replace('<', '<').replace('>', '>') # use outfile name - self.file.write("\n") - self.file.write("\n") - self.file.write("\n") - self.file.write("\n") - self.file.write("\t\n" % os.path.basename(bfile)) - # self.file.write("\t\n" % sys.basename(bfile)) - self.file.write("\t\n" % '2.5') - # self.file.write("\t\n" % Blender.Get('version')) - self.file.write("\t\n") - self.file.write("\n") - self.file.write("\n") - - # This functionality is poorly defined, disabling for now - campbell - ''' - def writeInline(self): - inlines = Blender.Scene.Get() - allinlines = len(inlines) - if scene != inlines[0]: - return - else: - for i in xrange(allinlines): - nameinline=inlines[i].name - if (nameinline not in self.namesStandard) and (i > 0): - self.file.write("" % nameinline) - self.file.write("\n\n") - - - def writeScript(self): - textEditor = Blender.Text.Get() - alltext = len(textEditor) - for i in xrange(alltext): - nametext = textEditor[i].name - nlines = textEditor[i].getNLines() - if (self.proto == 1): - if (nametext == "proto" or nametext == "proto.js" or nametext == "proto.txt") and (nlines != None): - nalllines = len(textEditor[i].asLines()) - alllines = textEditor[i].asLines() - for j in xrange(nalllines): - self.writeIndented(alllines[j] + "\n") - elif (self.proto == 0): - if (nametext == "route" or nametext == "route.js" or nametext == "route.txt") and (nlines != None): - nalllines = len(textEditor[i].asLines()) - alllines = textEditor[i].asLines() - for j in xrange(nalllines): - self.writeIndented(alllines[j] + "\n") - self.writeIndented("\n") - ''' - - def writeViewpoint(self, ob, mat, scene): - context = scene.render_data - # context = scene.render - ratio = float(context.resolution_x)/float(context.resolution_y) - # ratio = float(context.imageSizeY())/float(context.imageSizeX()) - lens = (360* (math.atan(ratio *16 / ob.data.lens) / math.pi))*(math.pi/180) - # lens = (360* (math.atan(ratio *16 / ob.data.getLens()) / math.pi))*(math.pi/180) - lens = min(lens, math.pi) - - # get the camera location, subtract 90 degress from X to orient like X3D does - # mat = ob.matrixWorld - mat is now passed! - - loc = self.rotatePointForVRML(mat.translationPart()) - rot = mat.toEuler() - rot = (((rot[0]-90)), rot[1], rot[2]) - # rot = (((rot[0]-90)*DEG2RAD), rot[1]*DEG2RAD, rot[2]*DEG2RAD) - nRot = self.rotatePointForVRML( rot ) - # convert to Quaternion and to Angle Axis - Q = self.eulerToQuaternions(nRot[0], nRot[1], nRot[2]) - Q1 = self.multiplyQuaternions(Q[0], Q[1]) - Qf = self.multiplyQuaternions(Q1, Q[2]) - angleAxis = self.quaternionToAngleAxis(Qf) - self.file.write("\n\n" % (lens)) - - def writeFog(self, world): - if world: - mtype = world.mist.falloff - # mtype = world.getMistype() - mparam = world.mist - # mparam = world.getMist() - grd = world.horizon_color - # grd = world.getHor() - grd0, grd1, grd2 = grd[0], grd[1], grd[2] - else: - return - if (mtype == 'LINEAR' or mtype == 'INVERSE_QUADRATIC'): - mtype = 1 if mtype == 'LINEAR' else 2 - # if (mtype == 1 or mtype == 2): - self.file.write("\n\n" % round(mparam[2],self.cp)) - else: - return - - def writeNavigationInfo(self, scene): - self.file.write('\n') - - def writeSpotLight(self, ob, mtx, lamp, world): - safeName = self.cleanStr(ob.name) - if world: - ambi = world.ambient_color - # ambi = world.amb - ambientIntensity = ((float(ambi[0] + ambi[1] + ambi[2]))/3)/2.5 - else: - ambi = 0 - ambientIntensity = 0 - - # compute cutoff and beamwidth - intensity=min(lamp.energy/1.75,1.0) - beamWidth=((lamp.spot_size*math.pi)/180.0)*.37; - # beamWidth=((lamp.spotSize*math.pi)/180.0)*.37; - cutOffAngle=beamWidth*1.3 - - dx,dy,dz=self.computeDirection(mtx) - # note -dx seems to equal om[3][0] - # note -dz seems to equal om[3][1] - # note dy seems to equal om[3][2] - - #location=(ob.matrixWorld*MATWORLD).translationPart() # now passed - location=(mtx*MATWORLD).translationPart() - - radius = lamp.distance*math.cos(beamWidth) - # radius = lamp.dist*math.cos(beamWidth) - self.file.write("\n\n" % (round(location[0],3), round(location[1],3), round(location[2],3))) - - - def writeDirectionalLight(self, ob, mtx, lamp, world): - safeName = self.cleanStr(ob.name) - if world: - ambi = world.ambient_color - # ambi = world.amb - ambientIntensity = ((float(ambi[0] + ambi[1] + ambi[2]))/3)/2.5 - else: - ambi = 0 - ambientIntensity = 0 - - intensity=min(lamp.energy/1.75,1.0) - (dx,dy,dz)=self.computeDirection(mtx) - self.file.write("\n\n" % (round(dx,4),round(dy,4),round(dz,4))) - - def writePointLight(self, ob, mtx, lamp, world): - safeName = self.cleanStr(ob.name) - if world: - ambi = world.ambient_color - # ambi = world.amb - ambientIntensity = ((float(ambi[0] + ambi[1] + ambi[2]))/3)/2.5 - else: - ambi = 0 - ambientIntensity = 0 - - # location=(ob.matrixWorld*MATWORLD).translationPart() # now passed - location= (mtx*MATWORLD).translationPart() - - self.file.write("\n\n" % (round(location[0],3), round(location[1],3), round(location[2],3))) - ''' - def writeNode(self, ob, mtx): - obname=str(ob.name) - if obname in self.namesStandard: - return - else: - dx,dy,dz = self.computeDirection(mtx) - # location=(ob.matrixWorld*MATWORLD).translationPart() - location=(mtx*MATWORLD).translationPart() - self.writeIndented("<%s\n" % obname,1) - self.writeIndented("direction=\"%s %s %s\"\n" % (round(dx,3),round(dy,3),round(dz,3))) - self.writeIndented("location=\"%s %s %s\"\n" % (round(location[0],3), round(location[1],3), round(location[2],3))) - self.writeIndented("/>\n",-1) - self.writeIndented("\n") - ''' - def secureName(self, name): - name = name + str(self.nodeID) - self.nodeID=self.nodeID+1 - if len(name) <= 3: - newname = "_" + str(self.nodeID) - return "%s" % (newname) - else: - for bad in ['"','#',"'",',','.','[','\\',']','{','}']: - name=name.replace(bad,'_') - if name in self.namesReserved: - newname = name[0:3] + "_" + str(self.nodeID) - return "%s" % (newname) - elif name[0].isdigit(): - newname = "_" + name + str(self.nodeID) - return "%s" % (newname) - else: - newname = name - return "%s" % (newname) - - def writeIndexedFaceSet(self, ob, mesh, mtx, world, EXPORT_TRI = False): - imageMap={} # set of used images - sided={} # 'one':cnt , 'two':cnt - vColors={} # 'multi':1 - meshName = self.cleanStr(ob.name) - - meshME = self.cleanStr(ob.data.name) # We dont care if its the mesh name or not - # meshME = self.cleanStr(ob.getData(mesh=1).name) # We dont care if its the mesh name or not - if len(mesh.faces) == 0: return - mode = [] - # mode = 0 - if mesh.active_uv_texture: - # if mesh.faceUV: - for face in mesh.active_uv_texture.data: - # for face in mesh.faces: - if face.halo and 'HALO' not in mode: - mode += ['HALO'] - if face.billboard and 'BILLBOARD' not in mode: - mode += ['BILLBOARD'] - if face.object_color and 'OBJECT_COLOR' not in mode: - mode += ['OBJECT_COLOR'] - if face.collision and 'COLLISION' not in mode: - mode += ['COLLISION'] - # mode |= face.mode - - if 'HALO' in mode and self.halonode == 0: - # if mode & Mesh.FaceModes.HALO and self.halonode == 0: - self.writeIndented("\n",1) - self.halonode = 1 - elif 'BILLBOARD' in mode and self.billnode == 0: - # elif mode & Mesh.FaceModes.BILLBOARD and self.billnode == 0: - self.writeIndented("\n",1) - self.billnode = 1 - elif 'OBJECT_COLOR' in mode and self.matonly == 0: - # elif mode & Mesh.FaceModes.OBCOL and self.matonly == 0: - self.matonly = 1 - # TF_TILES is marked as deprecated in DNA_meshdata_types.h - # elif mode & Mesh.FaceModes.TILES and self.tilenode == 0: - # self.tilenode = 1 - elif 'COLLISION' not in mode and self.collnode == 0: - # elif not mode & Mesh.FaceModes.DYNAMIC and self.collnode == 0: - self.writeIndented("\n",1) - self.collnode = 1 - - nIFSCnt=self.countIFSSetsNeeded(mesh, imageMap, sided, vColors) - - if nIFSCnt > 1: - self.writeIndented("\n" % ("G_", meshName),1) - - if 'two' in sided and sided['two'] > 0: - bTwoSided=1 - else: - bTwoSided=0 - - # mtx = ob.matrixWorld * MATWORLD # mtx is now passed - mtx = mtx * MATWORLD - - loc= mtx.translationPart() - sca= mtx.scalePart() - quat = mtx.toQuat() - rot= quat.axis - - self.writeIndented('\n' % \ - (meshName, loc[0], loc[1], loc[2], sca[0], sca[1], sca[2], rot[0], rot[1], rot[2], quat.angle) ) - # self.writeIndented('\n' % \ - # (meshName, loc[0], loc[1], loc[2], sca[0], sca[1], sca[2], rot[0], rot[1], rot[2], quat.angle*DEG2RAD) ) - - self.writeIndented("\n",1) - maters=mesh.materials - hasImageTexture=0 - issmooth=0 - - if len(maters) > 0 or mesh.active_uv_texture: - # if len(maters) > 0 or mesh.faceUV: - self.writeIndented("\n", 1) - # right now this script can only handle a single material per mesh. - if len(maters) >= 1: - mat=maters[0] - # matFlags = mat.getMode() - if not mat.face_texture: - # if not matFlags & Blender.Material.Modes['TEXFACE']: - self.writeMaterial(mat, self.cleanStr(mat.name,''), world) - # self.writeMaterial(mat, self.cleanStr(maters[0].name,''), world) - if len(maters) > 1: - print("Warning: mesh named %s has multiple materials" % meshName) - print("Warning: only one material per object handled") - - #-- textures - face = None - if mesh.active_uv_texture: - # if mesh.faceUV: - for face in mesh.active_uv_texture.data: - # for face in mesh.faces: - if face.image: - # if (hasImageTexture == 0) and (face.image): - self.writeImageTexture(face.image) - # hasImageTexture=1 # keep track of face texture - break - if self.tilenode == 1 and face and face.image: - # if self.tilenode == 1: - self.writeIndented("\n" % (face.image.xrep, face.image.yrep)) - self.tilenode = 0 - self.writeIndented("\n", -1) - - #-- IndexedFaceSet or IndexedLineSet - - # user selected BOUNDS=1, SOLID=3, SHARED=4, or TEXTURE=5 - ifStyle="IndexedFaceSet" - # look up mesh name, use it if available - if meshME in self.meshNames: - self.writeIndented("<%s USE=\"ME_%s\">" % (ifStyle, meshME), 1) - self.meshNames[meshME]+=1 - else: - if int(mesh.users) > 1: - self.writeIndented("<%s DEF=\"ME_%s\" " % (ifStyle, meshME), 1) - self.meshNames[meshME]=1 - else: - self.writeIndented("<%s " % ifStyle, 1) - - if bTwoSided == 1: - self.file.write("solid=\"false\" ") - else: - self.file.write("solid=\"true\" ") - - for face in mesh.faces: - if face.smooth: - issmooth=1 - break - if issmooth==1: - creaseAngle=(mesh.autosmooth_angle)*(math.pi/180.0) - # creaseAngle=(mesh.degr)*(math.pi/180.0) - self.file.write("creaseAngle=\"%s\" " % (round(creaseAngle,self.cp))) - - #--- output textureCoordinates if UV texture used - if mesh.active_uv_texture: - # if mesh.faceUV: - if self.matonly == 1 and self.share == 1: - self.writeFaceColors(mesh) - elif hasImageTexture == 1: - self.writeTextureCoordinates(mesh) - #--- output coordinates - self.writeCoordinates(ob, mesh, meshName, EXPORT_TRI) - - self.writingcoords = 1 - self.writingtexture = 1 - self.writingcolor = 1 - self.writeCoordinates(ob, mesh, meshName, EXPORT_TRI) - - #--- output textureCoordinates if UV texture used - if mesh.active_uv_texture: - # if mesh.faceUV: - if hasImageTexture == 1: - self.writeTextureCoordinates(mesh) - elif self.matonly == 1 and self.share == 1: - self.writeFaceColors(mesh) - #--- output vertexColors - self.matonly = 0 - self.share = 0 - - self.writingcoords = 0 - self.writingtexture = 0 - self.writingcolor = 0 - #--- output closing braces - self.writeIndented("\n" % ifStyle, -1) - self.writeIndented("\n", -1) - self.writeIndented("\n", -1) - - if self.halonode == 1: - self.writeIndented("\n", -1) - self.halonode = 0 - - if self.billnode == 1: - self.writeIndented("\n", -1) - self.billnode = 0 - - if self.collnode == 1: - self.writeIndented("\n", -1) - self.collnode = 0 - - if nIFSCnt > 1: - self.writeIndented("\n", -1) - - self.file.write("\n") - - def writeCoordinates(self, ob, mesh, meshName, EXPORT_TRI = False): - # create vertex list and pre rotate -90 degrees X for VRML - - if self.writingcoords == 0: - self.file.write('coordIndex="') - for face in mesh.faces: - fv = face.verts - # fv = face.v - - if len(fv)==3: - # if len(face)==3: - self.file.write("%i %i %i -1, " % (fv[0], fv[1], fv[2])) - # self.file.write("%i %i %i -1, " % (fv[0].index, fv[1].index, fv[2].index)) - else: - if EXPORT_TRI: - self.file.write("%i %i %i -1, " % (fv[0], fv[1], fv[2])) - # self.file.write("%i %i %i -1, " % (fv[0].index, fv[1].index, fv[2].index)) - self.file.write("%i %i %i -1, " % (fv[0], fv[2], fv[3])) - # self.file.write("%i %i %i -1, " % (fv[0].index, fv[2].index, fv[3].index)) - else: - self.file.write("%i %i %i %i -1, " % (fv[0], fv[1], fv[2], fv[3])) - # self.file.write("%i %i %i %i -1, " % (fv[0].index, fv[1].index, fv[2].index, fv[3].index)) - - self.file.write("\">\n") - else: - #-- vertices - # mesh.transform(ob.matrixWorld) - self.writeIndented("") - self.writeIndented("\n", -1) - - def writeTextureCoordinates(self, mesh): - texCoordList=[] - texIndexList=[] - j=0 - - for face in mesh.active_uv_texture.data: - # for face in mesh.faces: - uvs = face.uv - # uvs = [face.uv1, face.uv2, face.uv3, face.uv4] if face.verts[3] else [face.uv1, face.uv2, face.uv3] - - for uv in uvs: - # for uv in face.uv: - texIndexList.append(j) - texCoordList.append(uv) - j=j+1 - texIndexList.append(-1) - if self.writingtexture == 0: - self.file.write("\n\t\t\ttexCoordIndex=\"") - texIndxStr="" - for i in range(len(texIndexList)): - texIndxStr = texIndxStr + "%d, " % texIndexList[i] - if texIndexList[i]==-1: - self.file.write(texIndxStr) - texIndxStr="" - self.file.write("\"\n\t\t\t") - else: - self.writeIndented("") - self.writeIndented("\n", -1) - - def writeFaceColors(self, mesh): - if self.writingcolor == 0: - self.file.write("colorPerVertex=\"false\" ") - elif mesh.active_vertex_color: - # else: - self.writeIndented(" 2: - print("Debug: face.col r=%d g=%d b=%d" % (c[0], c[1], c[2])) - # print("Debug: face.col r=%d g=%d b=%d" % (c.r, c.g, c.b)) - aColor = self.rgbToFS(c) - self.file.write("%s, " % aColor) - - # for face in mesh.faces: - # if face.col: - # c=face.col[0] - # if self.verbose > 2: - # print("Debug: face.col r=%d g=%d b=%d" % (c.r, c.g, c.b)) - # aColor = self.rgbToFS(c) - # self.file.write("%s, " % aColor) - self.file.write("\" />") - self.writeIndented("\n",-1) - - def writeMaterial(self, mat, matName, world): - # look up material name, use it if available - if matName in self.matNames: - self.writeIndented("\n" % matName) - self.matNames[matName]+=1 - return; - - self.matNames[matName]=1 - - ambient = mat.ambient/3 - # ambient = mat.amb/3 - diffuseR, diffuseG, diffuseB = tuple(mat.diffuse_color) - # diffuseR, diffuseG, diffuseB = mat.rgbCol[0], mat.rgbCol[1],mat.rgbCol[2] - if world: - ambi = world.ambient_color - # ambi = world.getAmb() - ambi0, ambi1, ambi2 = (ambi[0]*mat.ambient)*2, (ambi[1]*mat.ambient)*2, (ambi[2]*mat.ambient)*2 - # ambi0, ambi1, ambi2 = (ambi[0]*mat.amb)*2, (ambi[1]*mat.amb)*2, (ambi[2]*mat.amb)*2 - else: - ambi0, ambi1, ambi2 = 0, 0, 0 - emisR, emisG, emisB = (diffuseR*mat.emit+ambi0)/2, (diffuseG*mat.emit+ambi1)/2, (diffuseB*mat.emit+ambi2)/2 - - shininess = mat.specular_hardness/512.0 - # shininess = mat.hard/512.0 - specR = (mat.specular_color[0]+0.001)/(1.25/(mat.specular_intensity+0.001)) - # specR = (mat.specCol[0]+0.001)/(1.25/(mat.spec+0.001)) - specG = (mat.specular_color[1]+0.001)/(1.25/(mat.specular_intensity+0.001)) - # specG = (mat.specCol[1]+0.001)/(1.25/(mat.spec+0.001)) - specB = (mat.specular_color[2]+0.001)/(1.25/(mat.specular_intensity+0.001)) - # specB = (mat.specCol[2]+0.001)/(1.25/(mat.spec+0.001)) - transp = 1-mat.alpha - # matFlags = mat.getMode() - if mat.shadeless: - # if matFlags & Blender.Material.Modes['SHADELESS']: - ambient = 1 - shine = 1 - specR = emitR = diffuseR - specG = emitG = diffuseG - specB = emitB = diffuseB - self.writeIndented("" % (round(transp,self.cp))) - self.writeIndented("\n",-1) - - def writeImageTexture(self, image): - name = image.name - filename = image.filename.split('/')[-1].split('\\')[-1] - if name in self.texNames: - self.writeIndented("\n" % self.cleanStr(name)) - self.texNames[name] += 1 - return - else: - self.writeIndented("" % name) - self.writeIndented("\n",-1) - self.texNames[name] = 1 - - def writeBackground(self, world, alltextures): - if world: worldname = world.name - else: return - blending = (world.blend_sky, world.paper_sky, world.real_sky) - # blending = world.getSkytype() - grd = world.horizon_color - # grd = world.getHor() - grd0, grd1, grd2 = grd[0], grd[1], grd[2] - sky = world.zenith_color - # sky = world.getZen() - sky0, sky1, sky2 = sky[0], sky[1], sky[2] - mix0, mix1, mix2 = grd[0]+sky[0], grd[1]+sky[1], grd[2]+sky[2] - mix0, mix1, mix2 = mix0/2, mix1/2, mix2/2 - self.file.write("\n\n") - -########################################################## -# export routine -########################################################## - - def export(self, scene, world, alltextures,\ - EXPORT_APPLY_MODIFIERS = False,\ - EXPORT_TRI= False,\ - ): - - print("Info: starting X3D export to " + self.filename + "...") - self.writeHeader() - # self.writeScript() - self.writeNavigationInfo(scene) - self.writeBackground(world, alltextures) - self.writeFog(world) - self.proto = 0 - - - # # COPIED FROM OBJ EXPORTER - # if EXPORT_APPLY_MODIFIERS: - # temp_mesh_name = '~tmp-mesh' - - # # Get the container mesh. - used for applying modifiers and non mesh objects. - # containerMesh = meshName = tempMesh = None - # for meshName in Blender.NMesh.GetNames(): - # if meshName.startswith(temp_mesh_name): - # tempMesh = Mesh.Get(meshName) - # if not tempMesh.users: - # containerMesh = tempMesh - # if not containerMesh: - # containerMesh = Mesh.New(temp_mesh_name) - # -------------------------- - - - for ob_main in [o for o in scene.objects if o.is_visible()]: - # for ob_main in scene.objects.context: - - free, derived = create_derived_objects(ob_main) - - if derived == None: continue - - for ob, ob_mat in derived: - # for ob, ob_mat in BPyObject.getDerivedObjects(ob_main): - objType=ob.type - objName=ob.name - self.matonly = 0 - if objType == "CAMERA": - # if objType == "Camera": - self.writeViewpoint(ob, ob_mat, scene) - elif objType in ("MESH", "CURVE", "SURF", "TEXT") : - # elif objType in ("Mesh", "Curve", "Surf", "Text") : - if EXPORT_APPLY_MODIFIERS or objType != 'MESH': - # if EXPORT_APPLY_MODIFIERS or objType != 'Mesh': - me = ob.create_mesh(EXPORT_APPLY_MODIFIERS, 'PREVIEW') - # me= BPyMesh.getMeshFromObject(ob, containerMesh, EXPORT_APPLY_MODIFIERS, False, scene) - else: - me = ob.data - # me = ob.getData(mesh=1) - - self.writeIndexedFaceSet(ob, me, ob_mat, world, EXPORT_TRI = EXPORT_TRI) - - # free mesh created with create_mesh() - if me != ob.data: - bpy.data.remove_mesh(me) - - elif objType == "LAMP": - # elif objType == "Lamp": - data= ob.data - datatype=data.type - if datatype == 'POINT': - # if datatype == Lamp.Types.Lamp: - self.writePointLight(ob, ob_mat, data, world) - elif datatype == 'SPOT': - # elif datatype == Lamp.Types.Spot: - self.writeSpotLight(ob, ob_mat, data, world) - elif datatype == 'SUN': - # elif datatype == Lamp.Types.Sun: - self.writeDirectionalLight(ob, ob_mat, data, world) - else: - self.writeDirectionalLight(ob, ob_mat, data, world) - # do you think x3d could document what to do with dummy objects? - #elif objType == "Empty" and objName != "Empty": - # self.writeNode(ob, ob_mat) - else: - #print "Info: Ignoring [%s], object type [%s] not handle yet" % (object.name,object.getType) - pass - - if free: - free_derived_objects(ob_main) - - self.file.write("\n\n") - - # if EXPORT_APPLY_MODIFIERS: - # if containerMesh: - # containerMesh.verts = None - - self.cleanup() - -########################################################## -# Utility methods -########################################################## - - def cleanup(self): - self.file.close() - self.texNames={} - self.matNames={} - self.indentLevel=0 - print("Info: finished X3D export to %s\n" % self.filename) - - def cleanStr(self, name, prefix='rsvd_'): - """cleanStr(name,prefix) - try to create a valid VRML DEF name from object name""" - - newName=name[:] - if len(newName) == 0: - self.nNodeID+=1 - return "%s%d" % (prefix, self.nNodeID) - - if newName in self.namesReserved: - newName='%s%s' % (prefix,newName) - - if newName[0].isdigit(): - newName='%s%s' % ('_',newName) - - for bad in [' ','"','#',"'",',','.','[','\\',']','{','}']: - newName=newName.replace(bad,'_') - return newName - - def countIFSSetsNeeded(self, mesh, imageMap, sided, vColors): - """ - countIFFSetsNeeded() - should look at a blender mesh to determine - how many VRML IndexFaceSets or IndexLineSets are needed. A - new mesh created under the following conditions: - - o - split by UV Textures / one per mesh - o - split by face, one sided and two sided - o - split by smooth and flat faces - o - split when faces only have 2 vertices * needs to be an IndexLineSet - """ - - imageNameMap={} - faceMap={} - nFaceIndx=0 - - if mesh.active_uv_texture: - # if mesh.faceUV: - for face in mesh.active_uv_texture.data: - # for face in mesh.faces: - sidename=''; - if face.twoside: - # if face.mode & Mesh.FaceModes.TWOSIDE: - sidename='two' - else: - sidename='one' - - if sidename in sided: - sided[sidename]+=1 - else: - sided[sidename]=1 - - image = face.image - if image: - faceName="%s_%s" % (face.image.name, sidename); - try: - imageMap[faceName].append(face) - except: - imageMap[faceName]=[face.image.name,sidename,face] - - if self.verbose > 2: - for faceName in imageMap.keys(): - ifs=imageMap[faceName] - print("Debug: faceName=%s image=%s, solid=%s facecnt=%d" % \ - (faceName, ifs[0], ifs[1], len(ifs)-2)) - - return len(imageMap) - - def faceToString(self,face): - - print("Debug: face.flag=0x%x (bitflags)" % face.flag) - if face.sel: - print("Debug: face.sel=true") - - print("Debug: face.mode=0x%x (bitflags)" % face.mode) - if face.mode & Mesh.FaceModes.TWOSIDE: - print("Debug: face.mode twosided") - - print("Debug: face.transp=0x%x (enum)" % face.transp) - if face.transp == Mesh.FaceTranspModes.SOLID: - print("Debug: face.transp.SOLID") - - if face.image: - print("Debug: face.image=%s" % face.image.name) - print("Debug: face.materialIndex=%d" % face.materialIndex) - - # XXX not used - # def getVertexColorByIndx(self, mesh, indx): - # c = None - # for face in mesh.faces: - # j=0 - # for vertex in face.v: - # if vertex.index == indx: - # c=face.col[j] - # break - # j=j+1 - # if c: break - # return c - - def meshToString(self,mesh): - # print("Debug: mesh.hasVertexUV=%d" % mesh.vertexColors) - print("Debug: mesh.faceUV=%d" % (len(mesh.uv_textures) > 0)) - # print("Debug: mesh.faceUV=%d" % mesh.faceUV) - print("Debug: mesh.hasVertexColours=%d" % (len(mesh.vertex_colors) > 0)) - # print("Debug: mesh.hasVertexColours=%d" % mesh.hasVertexColours()) - print("Debug: mesh.verts=%d" % len(mesh.verts)) - print("Debug: mesh.faces=%d" % len(mesh.faces)) - print("Debug: mesh.materials=%d" % len(mesh.materials)) - - def rgbToFS(self, c): - s="%s %s %s" % (round(c[0]/255.0,self.cp), - round(c[1]/255.0,self.cp), - round(c[2]/255.0,self.cp)) - - # s="%s %s %s" % ( - # round(c.r/255.0,self.cp), - # round(c.g/255.0,self.cp), - # round(c.b/255.0,self.cp)) - return s - - def computeDirection(self, mtx): - x,y,z=(0,-1.0,0) # point down - - ax,ay,az = (mtx*MATWORLD).toEuler() - - # ax *= DEG2RAD - # ay *= DEG2RAD - # az *= DEG2RAD - - # rot X - x1=x - y1=y*math.cos(ax)-z*math.sin(ax) - z1=y*math.sin(ax)+z*math.cos(ax) - - # rot Y - x2=x1*math.cos(ay)+z1*math.sin(ay) - y2=y1 - z2=z1*math.cos(ay)-x1*math.sin(ay) - - # rot Z - x3=x2*math.cos(az)-y2*math.sin(az) - y3=x2*math.sin(az)+y2*math.cos(az) - z3=z2 - - return [x3,y3,z3] - - - # swap Y and Z to handle axis difference between Blender and VRML - #------------------------------------------------------------------------ - def rotatePointForVRML(self, v): - x = v[0] - y = v[2] - z = -v[1] - - vrmlPoint=[x, y, z] - return vrmlPoint - - # For writing well formed VRML code - #------------------------------------------------------------------------ - def writeIndented(self, s, inc=0): - if inc < 1: - self.indentLevel = self.indentLevel + inc - - spaces="" - for x in range(self.indentLevel): - spaces = spaces + "\t" - self.file.write(spaces + s) - - if inc > 0: - self.indentLevel = self.indentLevel + inc - - # Converts a Euler to three new Quaternions - # Angles of Euler are passed in as radians - #------------------------------------------------------------------------ - def eulerToQuaternions(self, x, y, z): - Qx = [math.cos(x/2), math.sin(x/2), 0, 0] - Qy = [math.cos(y/2), 0, math.sin(y/2), 0] - Qz = [math.cos(z/2), 0, 0, math.sin(z/2)] - - quaternionVec=[Qx,Qy,Qz] - return quaternionVec - - # Multiply two Quaternions together to get a new Quaternion - #------------------------------------------------------------------------ - def multiplyQuaternions(self, Q1, Q2): - result = [((Q1[0] * Q2[0]) - (Q1[1] * Q2[1]) - (Q1[2] * Q2[2]) - (Q1[3] * Q2[3])), - ((Q1[0] * Q2[1]) + (Q1[1] * Q2[0]) + (Q1[2] * Q2[3]) - (Q1[3] * Q2[2])), - ((Q1[0] * Q2[2]) + (Q1[2] * Q2[0]) + (Q1[3] * Q2[1]) - (Q1[1] * Q2[3])), - ((Q1[0] * Q2[3]) + (Q1[3] * Q2[0]) + (Q1[1] * Q2[2]) - (Q1[2] * Q2[1]))] - - return result - - # Convert a Quaternion to an Angle Axis (ax, ay, az, angle) - # angle is in radians - #------------------------------------------------------------------------ - def quaternionToAngleAxis(self, Qf): - scale = math.pow(Qf[1],2) + math.pow(Qf[2],2) + math.pow(Qf[3],2) - ax = Qf[1] - ay = Qf[2] - az = Qf[3] - - if scale > .0001: - ax/=scale - ay/=scale - az/=scale - - angle = 2 * math.acos(Qf[0]) - - result = [ax, ay, az, angle] - return result - -########################################################## -# Callbacks, needed before Main -########################################################## - -def x3d_export(filename, - context, - EXPORT_APPLY_MODIFIERS=False, - EXPORT_TRI=False, - EXPORT_GZIP=False): - - if EXPORT_GZIP: - if not filename.lower().endswith('.x3dz'): - filename = '.'.join(filename.split('.')[:-1]) + '.x3dz' - else: - if not filename.lower().endswith('.x3d'): - filename = '.'.join(filename.split('.')[:-1]) + '.x3d' - - - scene = context.scene - # scene = Blender.Scene.GetCurrent() - world = scene.world - - # XXX these are global textures while .Get() returned only scene's? - alltextures = bpy.data.textures - # alltextures = Blender.Texture.Get() - - wrlexport=x3d_class(filename) - wrlexport.export(\ - scene,\ - world,\ - alltextures,\ - \ - EXPORT_APPLY_MODIFIERS = EXPORT_APPLY_MODIFIERS,\ - EXPORT_TRI = EXPORT_TRI,\ - ) - - -def x3d_export_ui(filename): - if not filename.endswith(extension): - filename += extension - #if _safeOverwrite and sys.exists(filename): - # result = Draw.PupMenu("File Already Exists, Overwrite?%t|Yes%x1|No%x0") - #if(result != 1): - # return - - # Get user options - EXPORT_APPLY_MODIFIERS = Draw.Create(1) - EXPORT_TRI = Draw.Create(0) - EXPORT_GZIP = Draw.Create( filename.lower().endswith('.x3dz') ) - - # Get USER Options - pup_block = [\ - ('Apply Modifiers', EXPORT_APPLY_MODIFIERS, 'Use transformed mesh data from each object.'),\ - ('Triangulate', EXPORT_TRI, 'Triangulate quads.'),\ - ('Compress', EXPORT_GZIP, 'GZip the resulting file, requires a full python install'),\ - ] - - if not Draw.PupBlock('Export...', pup_block): - return - - Blender.Window.EditMode(0) - Blender.Window.WaitCursor(1) - - x3d_export(filename,\ - EXPORT_APPLY_MODIFIERS = EXPORT_APPLY_MODIFIERS.val,\ - EXPORT_TRI = EXPORT_TRI.val,\ - EXPORT_GZIP = EXPORT_GZIP.val\ - ) - - Blender.Window.WaitCursor(0) - - - -######################################################### -# main routine -######################################################### - - -# if __name__ == '__main__': -# Blender.Window.FileSelector(x3d_export_ui,"Export X3D", Blender.Get('filename').replace('.blend', '.x3d')) - -class EXPORT_OT_x3d(bpy.types.Operator): - ''' - X3D Exporter - ''' - __idname__ = "export.x3d" - __label__ = 'Export X3D' - - # List of operator properties, the attributes will be assigned - # to the class instance from the operator settings before calling. - - __props__ = [ - bpy.props.StringProperty(attr="path", name="File Path", description="File path used for exporting the X3D file", maxlen= 1024, default= ""), - - bpy.props.BoolProperty(attr="apply_modifiers", name="Apply Modifiers", description="Use transformed mesh data from each object.", default=True), - bpy.props.BoolProperty(attr="triangulate", name="Triangulate", description="Triangulate quads.", default=False), - bpy.props.BoolProperty(attr="compress", name="Compress", description="GZip the resulting file, requires a full python install.", default=False), - ] - - def execute(self, context): - x3d_export(self.path, context, self.apply_modifiers, self.triangulate, self.compress) - return ('FINISHED',) - - def invoke(self, context, event): - wm = context.manager - wm.add_fileselect(self.__operator__) - return ('RUNNING_MODAL',) - - def poll(self, context): # Poll isnt working yet - print("Poll") - return context.active_object != None - -bpy.ops.add(EXPORT_OT_x3d) - -# NOTES -# - blender version is hardcoded diff --git a/release/scripts/io/import_3ds.py b/release/scripts/io/import_3ds.py deleted file mode 100644 index 339fac839ea..00000000000 --- a/release/scripts/io/import_3ds.py +++ /dev/null @@ -1,1167 +0,0 @@ -#!BPY -""" -Name: '3D Studio (.3ds)...' -Blender: 244 -Group: 'Import' -Tooltip: 'Import from 3DS file format (.3ds)' -""" - -__author__= ['Bob Holcomb', 'Richard L?rk?ng', 'Damien McGinnes', 'Campbell Barton', 'Mario Lapin'] -__url__ = ("blenderartists.org", "www.blender.org", "www.gametutorials.com", "lib3ds.sourceforge.net/") -__version__= '0.996' -__bpydoc__= '''\ - -3ds Importer - -This script imports a 3ds file and the materials into Blender for editing. - -Loader is based on 3ds loader from www.gametutorials.com (Thanks DigiBen). - -0.996 by Mario Lapin (mario.lapin@gmail.com) 13/04/200
- - Implemented workaround to correct association between name, geometry and materials of - imported meshes. - - Without this patch, version 0.995 of this importer would associate to each mesh object the - geometry and the materials of the previously parsed mesh object. By so, the name of the - first mesh object would be thrown away, and the name of the last mesh object would be - automatically merged with a '.001' at the end. No object would desappear, however object's - names and materials would be completely jumbled. - -0.995 by Campbell Barton
-- workaround for buggy mesh vert delete -- minor tweaks - -0.99 by Bob Holcomb
-- added support for floating point color values that previously broke on import. - -0.98 by Campbell Barton
-- import faces and verts to lists instead of a mesh, convert to a mesh later -- use new index mapping feature of mesh to re-map faces that were not added. - -0.97 by Campbell Barton
-- Strip material names of spaces -- Added import as instance to import the 3ds into its own - scene and add a group instance to the current scene -- New option to scale down imported objects so they are within a limited bounding area. - -0.96 by Campbell Barton
-- Added workaround for bug in setting UV's for Zero vert index UV faces. -- Removed unique name function, let blender make the names unique. - -0.95 by Campbell Barton
-- Removed workarounds for Blender 2.41 -- Mesh objects split by material- many 3ds objects used more then 16 per mesh. -- Removed a lot of unneeded variable creation. - -0.94 by Campbell Barton
-- Face import tested to be about overall 16x speedup over 0.93. -- Material importing speedup. -- Tested with more models. -- Support some corrupt models. - -0.93 by Campbell Barton
-- Tested with 400 3ds files from turbosquid and samples. -- Tactfully ignore faces that used the same verts twice. -- Rollback to 0.83 sloppy un-reorganized code, this broke UV coord loading. -- Converted from NMesh to Mesh. -- Faster and cleaner new names. -- Use external comprehensive image loader. -- Re intergrated 0.92 and 0.9 changes -- Fixes for 2.41 compat. -- Non textured faces do not use a texture flag. - -0.92
-- Added support for diffuse, alpha, spec, bump maps in a single material - -0.9
-- Reorganized code into object/material block functions
-- Use of Matrix() to copy matrix data
-- added support for material transparency
- -0.83 2005-08-07: Campell Barton -- Aggressive image finding and case insensitivy for posisx systems. - -0.82a 2005-07-22 -- image texture loading (both for face uv and renderer) - -0.82 - image texture loading (for face uv) - -0.81a (fork- not 0.9) Campbell Barton 2005-06-08 -- Simplified import code -- Never overwrite data -- Faster list handling -- Leaves import selected - -0.81 Damien McGinnes 2005-01-09 -- handle missing images better - -0.8 Damien McGinnes 2005-01-08 -- copies sticky UV coords to face ones -- handles images better -- Recommend that you run 'RemoveDoubles' on each imported mesh after using this script - -''' - -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Script copyright (C) Bob Holcomb -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -# Importing modules - -import os -import time -import struct - -from import_obj import unpack_face_list, load_image - -import bpy -import Mathutils - -# import Blender -# from Blender import Mesh, Object, Material, Image, Texture, Lamp, Mathutils -# from Blender.Mathutils import Vector -# import BPyImage - -# import BPyMessages - -# try: -# from struct import calcsize, unpack -# except: -# calcsize= unpack= None - - - -# # If python version is less than 2.4, try to get set stuff from module -# try: -# set -# except: -# from sets import Set as set - -BOUNDS_3DS = [] - - -#this script imports uvcoords as sticky vertex coords -#this parameter enables copying these to face uv coords -#which shold be more useful. - -def createBlenderTexture(material, name, image): - texture = bpy.data.textures.new(name) - texture.setType('Image') - texture.image = image - material.setTexture(0, texture, Texture.TexCo.UV, Texture.MapTo.COL) - - - -###################################################### -# Data Structures -###################################################### - -#Some of the chunks that we will see -#----- Primary Chunk, at the beginning of each file -PRIMARY = int('0x4D4D',16) - -#------ Main Chunks -OBJECTINFO = int('0x3D3D',16); #This gives the version of the mesh and is found right before the material and object information -VERSION = int('0x0002',16); #This gives the version of the .3ds file -EDITKEYFRAME= int('0xB000',16); #This is the header for all of the key frame info - -#------ sub defines of OBJECTINFO -MATERIAL = 45055 #0xAFFF // This stored the texture info -OBJECT = 16384 #0x4000 // This stores the faces, vertices, etc... - -#>------ sub defines of MATERIAL -#------ sub defines of MATERIAL_BLOCK -MAT_NAME = int('0xA000',16) # This holds the material name -MAT_AMBIENT = int('0xA010',16) # Ambient color of the object/material -MAT_DIFFUSE = int('0xA020',16) # This holds the color of the object/material -MAT_SPECULAR = int('0xA030',16) # SPecular color of the object/material -MAT_SHINESS = int('0xA040',16) # ?? -MAT_TRANSPARENCY= int('0xA050',16) # Transparency value of material -MAT_SELF_ILLUM = int('0xA080',16) # Self Illumination value of material -MAT_WIRE = int('0xA085',16) # Only render's wireframe - -MAT_TEXTURE_MAP = int('0xA200',16) # This is a header for a new texture map -MAT_SPECULAR_MAP= int('0xA204',16) # This is a header for a new specular map -MAT_OPACITY_MAP = int('0xA210',16) # This is a header for a new opacity map -MAT_REFLECTION_MAP= int('0xA220',16) # This is a header for a new reflection map -MAT_BUMP_MAP = int('0xA230',16) # This is a header for a new bump map -MAT_MAP_FILENAME = int('0xA300',16) # This holds the file name of the texture - -MAT_FLOAT_COLOR = int ('0x0010', 16) #color defined as 3 floats -MAT_24BIT_COLOR = int ('0x0011', 16) #color defined as 3 bytes - -#>------ sub defines of OBJECT -OBJECT_MESH = int('0x4100',16); # This lets us know that we are reading a new object -OBJECT_LAMP = int('0x4600',16); # This lets un know we are reading a light object -OBJECT_LAMP_SPOT = int('0x4610',16); # The light is a spotloght. -OBJECT_LAMP_OFF = int('0x4620',16); # The light off. -OBJECT_LAMP_ATTENUATE = int('0x4625',16); -OBJECT_LAMP_RAYSHADE = int('0x4627',16); -OBJECT_LAMP_SHADOWED = int('0x4630',16); -OBJECT_LAMP_LOCAL_SHADOW = int('0x4640',16); -OBJECT_LAMP_LOCAL_SHADOW2 = int('0x4641',16); -OBJECT_LAMP_SEE_CONE = int('0x4650',16); -OBJECT_LAMP_SPOT_RECTANGULAR = int('0x4651',16); -OBJECT_LAMP_SPOT_OVERSHOOT = int('0x4652',16); -OBJECT_LAMP_SPOT_PROJECTOR = int('0x4653',16); -OBJECT_LAMP_EXCLUDE = int('0x4654',16); -OBJECT_LAMP_RANGE = int('0x4655',16); -OBJECT_LAMP_ROLL = int('0x4656',16); -OBJECT_LAMP_SPOT_ASPECT = int('0x4657',16); -OBJECT_LAMP_RAY_BIAS = int('0x4658',16); -OBJECT_LAMP_INNER_RANGE = int('0x4659',16); -OBJECT_LAMP_OUTER_RANGE = int('0x465A',16); -OBJECT_LAMP_MULTIPLIER = int('0x465B',16); -OBJECT_LAMP_AMBIENT_LIGHT = int('0x4680',16); - - - -OBJECT_CAMERA= int('0x4700',16); # This lets un know we are reading a camera object - -#>------ sub defines of CAMERA -OBJECT_CAM_RANGES= int('0x4720',16); # The camera range values - -#>------ sub defines of OBJECT_MESH -OBJECT_VERTICES = int('0x4110',16); # The objects vertices -OBJECT_FACES = int('0x4120',16); # The objects faces -OBJECT_MATERIAL = int('0x4130',16); # This is found if the object has a material, either texture map or color -OBJECT_UV = int('0x4140',16); # The UV texture coordinates -OBJECT_TRANS_MATRIX = int('0x4160',16); # The Object Matrix - -global scn -scn = None - -#the chunk class -class chunk: - ID = 0 - length = 0 - bytes_read = 0 - - #we don't read in the bytes_read, we compute that - binary_format=' 3): - print('\tNon-Fatal Error: Version greater than 3, may not load correctly: ', version) - - #is it an object info chunk? - elif (new_chunk.ID == OBJECTINFO): - #print 'elif (new_chunk.ID == OBJECTINFO):' - # print 'found an OBJECTINFO chunk' - process_next_chunk(file, new_chunk, importedObjects, IMAGE_SEARCH) - - #keep track of how much we read in the main chunk - new_chunk.bytes_read += temp_chunk.bytes_read - - #is it an object chunk? - elif (new_chunk.ID == OBJECT): - - if CreateBlenderObject: - putContextMesh(contextMesh_vertls, contextMesh_facels, contextMeshMaterials) - contextMesh_vertls = []; contextMesh_facels = [] - - ## preparando para receber o proximo objeto - contextMeshMaterials = {} # matname:[face_idxs] - contextMeshUV = None - #contextMesh.vertexUV = 1 # Make sticky coords. - # Reset matrix - contextMatrix_rot = None - #contextMatrix_tx = None - - CreateBlenderObject = True - tempName = read_string(file) - contextObName = tempName - new_chunk.bytes_read += len(tempName)+1 - - #is it a material chunk? - elif (new_chunk.ID == MATERIAL): - -# print("read material") - - #print 'elif (new_chunk.ID == MATERIAL):' - contextMaterial = bpy.data.add_material('Material') -# contextMaterial = bpy.data.materials.new('Material') - - elif (new_chunk.ID == MAT_NAME): - #print 'elif (new_chunk.ID == MAT_NAME):' - material_name = read_string(file) - -# print("material name", material_name) - - #plus one for the null character that ended the string - new_chunk.bytes_read += len(material_name)+1 - - contextMaterial.name = material_name.rstrip() # remove trailing whitespace - MATDICT[material_name]= (contextMaterial.name, contextMaterial) - - elif (new_chunk.ID == MAT_AMBIENT): - #print 'elif (new_chunk.ID == MAT_AMBIENT):' - read_chunk(file, temp_chunk) - if (temp_chunk.ID == MAT_FLOAT_COLOR): - contextMaterial.mirror_color = read_float_color(temp_chunk) -# temp_data = file.read(struct.calcsize('3f')) -# temp_chunk.bytes_read += 12 -# contextMaterial.mirCol = [float(col) for col in struct.unpack('<3f', temp_data)] - elif (temp_chunk.ID == MAT_24BIT_COLOR): - contextMaterial.mirror_color = read_byte_color(temp_chunk) -# temp_data = file.read(struct.calcsize('3B')) -# temp_chunk.bytes_read += 3 -# contextMaterial.mirCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb - else: - skip_to_end(file, temp_chunk) - new_chunk.bytes_read += temp_chunk.bytes_read - - elif (new_chunk.ID == MAT_DIFFUSE): - #print 'elif (new_chunk.ID == MAT_DIFFUSE):' - read_chunk(file, temp_chunk) - if (temp_chunk.ID == MAT_FLOAT_COLOR): - contextMaterial.diffuse_color = read_float_color(temp_chunk) -# temp_data = file.read(struct.calcsize('3f')) -# temp_chunk.bytes_read += 12 -# contextMaterial.rgbCol = [float(col) for col in struct.unpack('<3f', temp_data)] - elif (temp_chunk.ID == MAT_24BIT_COLOR): - contextMaterial.diffuse_color = read_byte_color(temp_chunk) -# temp_data = file.read(struct.calcsize('3B')) -# temp_chunk.bytes_read += 3 -# contextMaterial.rgbCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb - else: - skip_to_end(file, temp_chunk) - -# print("read material diffuse color", contextMaterial.diffuse_color) - - new_chunk.bytes_read += temp_chunk.bytes_read - - elif (new_chunk.ID == MAT_SPECULAR): - #print 'elif (new_chunk.ID == MAT_SPECULAR):' - read_chunk(file, temp_chunk) - if (temp_chunk.ID == MAT_FLOAT_COLOR): - contextMaterial.specular_color = read_float_color(temp_chunk) -# temp_data = file.read(struct.calcsize('3f')) -# temp_chunk.bytes_read += 12 -# contextMaterial.mirCol = [float(col) for col in struct.unpack('<3f', temp_data)] - elif (temp_chunk.ID == MAT_24BIT_COLOR): - contextMaterial.specular_color = read_byte_color(temp_chunk) -# temp_data = file.read(struct.calcsize('3B')) -# temp_chunk.bytes_read += 3 -# contextMaterial.mirCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb - else: - skip_to_end(file, temp_chunk) - new_chunk.bytes_read += temp_chunk.bytes_read - - elif (new_chunk.ID == MAT_TEXTURE_MAP): - read_texture(new_chunk, temp_chunk, "Diffuse", "COLOR") -# #print 'elif (new_chunk.ID==MAT_TEXTURE_MAP):' -# new_texture= bpy.data.textures.new('Diffuse') -# new_texture.setType('Image') -# img = None -# while (new_chunk.bytes_read BOUNDS_3DS[i + 3]: - BOUNDS_3DS[i + 3]= v[i] # min - - # Get the max axis x/y/z - max_axis = max(BOUNDS_3DS[3]-BOUNDS_3DS[0], BOUNDS_3DS[4]-BOUNDS_3DS[1], BOUNDS_3DS[5]-BOUNDS_3DS[2]) - # print max_axis - if max_axis < 1 << 30: # Should never be false but just make sure. - - # Get a new scale factor if set as an option - SCALE = 1.0 - while (max_axis * SCALE) > IMPORT_CONSTRAIN_BOUNDS: - SCALE/=10 - - # SCALE Matrix - SCALE_MAT = Mathutils.Matrix([SCALE,0,0,0],[0,SCALE,0,0],[0,0,SCALE,0],[0,0,0,1]) -# SCALE_MAT = Blender.Mathutils.Matrix([SCALE,0,0,0],[0,SCALE,0,0],[0,0,SCALE,0],[0,0,0,1]) - - for ob in importedObjects: - ob.setMatrix(ob.matrixWorld * SCALE_MAT) - - # Done constraining to bounds. - - # Select all new objects. - print('finished importing: "%s" in %.4f sec.' % (filename, (time.clock()-time1))) -# print('finished importing: "%s" in %.4f sec.' % (filename, (Blender.sys.time()-time1))) - file.close() -# Blender.Window.WaitCursor(0) - - -DEBUG = False -# if __name__=='__main__' and not DEBUG: -# if calcsize == None: -# Blender.Draw.PupMenu('Error%t|a full python installation not found') -# else: -# Blender.Window.FileSelector(load_3ds, 'Import 3DS', '*.3ds') - -# For testing compatibility -#load_3ds('/metavr/convert/vehicle/truck_002/TruckTanker1.3DS', False) -#load_3ds('/metavr/archive/convert/old/arranged_3ds_to_hpx-2/only-need-engine-trains/Engine2.3DS', False) -''' - -else: - import os - # DEBUG ONLY - TIME = Blender.sys.time() - import os - print 'Searching for files' - os.system('find /metavr/ -iname "*.3ds" > /tmp/temp3ds_list') - # os.system('find /storage/ -iname "*.3ds" > /tmp/temp3ds_list') - print '...Done' - file = open('/tmp/temp3ds_list', 'r') - lines = file.readlines() - file.close() - # sort by filesize for faster testing - lines_size = [(os.path.getsize(f[:-1]), f[:-1]) for f in lines] - lines_size.sort() - lines = [f[1] for f in lines_size] - - - def between(v,a,b): - if v <= max(a,b) and v >= min(a,b): - return True - return False - - for i, _3ds in enumerate(lines): - if between(i, 650,800): - #_3ds= _3ds[:-1] - print 'Importing', _3ds, '\nNUMBER', i, 'of', len(lines) - _3ds_file= _3ds.split('/')[-1].split('\\')[-1] - newScn = Blender.Scene.New(_3ds_file) - newScn.makeCurrent() - load_3ds(_3ds, False) - - print 'TOTAL TIME: %.6f' % (Blender.sys.time() - TIME) - -''' - -class IMPORT_OT_3ds(bpy.types.Operator): - ''' - 3DS Importer - ''' - __idname__ = "import.3ds" - __label__ = 'Import 3DS' - - # List of operator properties, the attributes will be assigned - # to the class instance from the operator settings before calling. - - __props__ = [ - bpy.props.StringProperty(attr="path", name="File Path", description="File path used for importing the 3DS file", maxlen= 1024, default= ""), - -# bpy.props.FloatProperty(attr="size_constraint", name="Size Constraint", description="Scale the model by 10 until it reacehs the size constraint. Zero Disables.", min=0.0, max=1000.0, soft_min=0.0, soft_max=1000.0, default=10.0), -# bpy.props.BoolProperty(attr="search_images", name="Image Search", description="Search subdirectories for any assosiated images (Warning, may be slow)", default=True), -# bpy.props.BoolProperty(attr="apply_matrix", name="Transform Fix", description="Workaround for object transformations importing incorrectly", default=False), - ] - - def execute(self, context): - load_3ds(self.path, context, 0.0, False, False) - return ('FINISHED',) - - def invoke(self, context, event): - wm = context.manager - wm.add_fileselect(self.__operator__) - return ('RUNNING_MODAL',) - ''' - def poll(self, context): - print("Poll") - return context.active_object != None''' - -bpy.ops.add(IMPORT_OT_3ds) - -# NOTES: -# why add 1 extra vertex? and remove it when done? -# disabled scaling to size, this requires exposing bb (easy) and understanding how it works (needs some time) diff --git a/release/scripts/io/import_obj.py b/release/scripts/io/import_obj.py deleted file mode 100644 index a762005ae7d..00000000000 --- a/release/scripts/io/import_obj.py +++ /dev/null @@ -1,1638 +0,0 @@ -#!BPY - -""" -Name: 'Wavefront (.obj)...' -Blender: 249 -Group: 'Import' -Tooltip: 'Load a Wavefront OBJ File, Shift: batch import all dir.' -""" - -__author__= "Campbell Barton", "Jiri Hnidek", "Paolo Ciccone" -__url__= ['http://wiki.blender.org/index.php/Scripts/Manual/Import/wavefront_obj', 'blender.org', 'blenderartists.org'] -__version__= "2.11" - -__bpydoc__= """\ -This script imports a Wavefront OBJ files to Blender. - -Usage: -Run this script from "File->Import" menu and then load the desired OBJ file. -Note, This loads mesh objects and materials only, nurbs and curves are not supported. -""" - -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Script copyright (C) Campbell J Barton 2007 -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -import os -import time -import bpy -import Mathutils -import Geometry - -# from Blender import Mesh, Draw, Window, Texture, Material, sys -# # import BPyMesh -# import BPyImage -# import BPyMessages - -# try: import os -# except: os= False - -# Generic path functions -def stripFile(path): - '''Return directory, where the file is''' - lastSlash= max(path.rfind('\\'), path.rfind('/')) - if lastSlash != -1: - path= path[:lastSlash] - return '%s%s' % (path, os.sep) -# return '%s%s' % (path, sys.sep) - -def stripPath(path): - '''Strips the slashes from the back of a string''' - return path.split('/')[-1].split('\\')[-1] - -def stripExt(name): # name is a string - '''Strips the prefix off the name before writing''' - index= name.rfind('.') - if index != -1: - return name[ : index ] - else: - return name -# end path funcs - -def unpack_list(list_of_tuples): - l = [] - for t in list_of_tuples: - l.extend(t) - return l - -# same as above except that it adds 0 for triangle faces -def unpack_face_list(list_of_tuples): - l = [] - for t in list_of_tuples: - face = [i for i in t] - - if len(face) != 3 and len(face) != 4: - raise RuntimeError("{0} vertices in face.".format(len(face))) - - # rotate indices if the 4th is 0 - if len(face) == 4 and face[3] == 0: - face = [face[3], face[0], face[1], face[2]] - - if len(face) == 3: - face.append(0) - - l.extend(face) - - return l - -def BPyMesh_ngon(from_data, indices, PREF_FIX_LOOPS= True): - ''' - Takes a polyline of indices (fgon) - and returns a list of face indicie lists. - Designed to be used for importers that need indices for an fgon to create from existing verts. - - from_data: either a mesh, or a list/tuple of vectors. - indices: a list of indicies to use this list is the ordered closed polyline to fill, and can be a subset of the data given. - PREF_FIX_LOOPS: If this is enabled polylines that use loops to make multiple polylines are delt with correctly. - ''' - - if not set: # Need sets for this, otherwise do a normal fill. - PREF_FIX_LOOPS= False - - Vector= Mathutils.Vector - if not indices: - return [] - - # return [] - def rvec(co): return round(co.x, 6), round(co.y, 6), round(co.z, 6) - def mlen(co): return abs(co[0])+abs(co[1])+abs(co[2]) # manhatten length of a vector, faster then length - - def vert_treplet(v, i): - return v, rvec(v), i, mlen(v) - - def ed_key_mlen(v1, v2): - if v1[3] > v2[3]: - return v2[1], v1[1] - else: - return v1[1], v2[1] - - - if not PREF_FIX_LOOPS: - ''' - Normal single concave loop filling - ''' - if type(from_data) in (tuple, list): - verts= [Vector(from_data[i]) for ii, i in enumerate(indices)] - else: - verts= [from_data.verts[i].co for ii, i in enumerate(indices)] - - for i in range(len(verts)-1, 0, -1): # same as reversed(xrange(1, len(verts))): - if verts[i][1]==verts[i-1][0]: - verts.pop(i-1) - - fill= Geometry.PolyFill([verts]) - - else: - ''' - Seperate this loop into multiple loops be finding edges that are used twice - This is used by lightwave LWO files a lot - ''' - - if type(from_data) in (tuple, list): - verts= [vert_treplet(Vector(from_data[i]), ii) for ii, i in enumerate(indices)] - else: - verts= [vert_treplet(from_data.verts[i].co, ii) for ii, i in enumerate(indices)] - - edges= [(i, i-1) for i in range(len(verts))] - if edges: - edges[0]= (0,len(verts)-1) - - if not verts: - return [] - - - edges_used= set() - edges_doubles= set() - # We need to check if any edges are used twice location based. - for ed in edges: - edkey= ed_key_mlen(verts[ed[0]], verts[ed[1]]) - if edkey in edges_used: - edges_doubles.add(edkey) - else: - edges_used.add(edkey) - - # Store a list of unconnected loop segments split by double edges. - # will join later - loop_segments= [] - - v_prev= verts[0] - context_loop= [v_prev] - loop_segments= [context_loop] - - for v in verts: - if v!=v_prev: - # Are we crossing an edge we removed? - if ed_key_mlen(v, v_prev) in edges_doubles: - context_loop= [v] - loop_segments.append(context_loop) - else: - if context_loop and context_loop[-1][1]==v[1]: - #raise "as" - pass - else: - context_loop.append(v) - - v_prev= v - # Now join loop segments - - def join_seg(s1,s2): - if s2[-1][1]==s1[0][1]: # - s1,s2= s2,s1 - elif s1[-1][1]==s2[0][1]: - pass - else: - return False - - # If were stuill here s1 and s2 are 2 segments in the same polyline - s1.pop() # remove the last vert from s1 - s1.extend(s2) # add segment 2 to segment 1 - - if s1[0][1]==s1[-1][1]: # remove endpoints double - s1.pop() - - s2[:]= [] # Empty this segment s2 so we dont use it again. - return True - - joining_segments= True - while joining_segments: - joining_segments= False - segcount= len(loop_segments) - - for j in range(segcount-1, -1, -1): #reversed(range(segcount)): - seg_j= loop_segments[j] - if seg_j: - for k in range(j-1, -1, -1): # reversed(range(j)): - if not seg_j: - break - seg_k= loop_segments[k] - - if seg_k and join_seg(seg_j, seg_k): - joining_segments= True - - loop_list= loop_segments - - for verts in loop_list: - while verts and verts[0][1]==verts[-1][1]: - verts.pop() - - loop_list= [verts for verts in loop_list if len(verts)>2] - # DONE DEALING WITH LOOP FIXING - - - # vert mapping - vert_map= [None]*len(indices) - ii=0 - for verts in loop_list: - if len(verts)>2: - for i, vert in enumerate(verts): - vert_map[i+ii]= vert[2] - ii+=len(verts) - - fill= Geometry.PolyFill([ [v[0] for v in loop] for loop in loop_list ]) - #draw_loops(loop_list) - #raise 'done loop' - # map to original indicies - fill= [[vert_map[i] for i in reversed(f)] for f in fill] - - - if not fill: - print('Warning Cannot scanfill, fallback on a triangle fan.') - fill= [ [0, i-1, i] for i in range(2, len(indices)) ] - else: - # Use real scanfill. - # See if its flipped the wrong way. - flip= None - for fi in fill: - if flip != None: - break - for i, vi in enumerate(fi): - if vi==0 and fi[i-1]==1: - flip= False - break - elif vi==1 and fi[i-1]==0: - flip= True - break - - if not flip: - for i, fi in enumerate(fill): - fill[i]= tuple([ii for ii in reversed(fi)]) - - return fill - -def line_value(line_split): - ''' - Returns 1 string represneting the value for this line - None will be returned if theres only 1 word - ''' - length= len(line_split) - if length == 1: - return None - - elif length == 2: - return line_split[1] - - elif length > 2: - return ' '.join( line_split[1:] ) - -# limited replacement for BPyImage.comprehensiveImageLoad -def load_image(imagepath, dirname): - - if os.path.exists(imagepath): - return bpy.data.add_image(imagepath) - - variants = [os.path.join(dirname, imagepath), os.path.join(dirname, os.path.basename(imagepath))] - - for path in variants: - if os.path.exists(path): - return bpy.data.add_image(path) - else: - print(path, "doesn't exist") - - # TODO comprehensiveImageLoad also searched in bpy.config.textureDir - return None - -def obj_image_load(imagepath, DIR, IMAGE_SEARCH): - - if '_' in imagepath: - image= load_image(imagepath.replace('_', ' '), DIR) - if image: return image - - return load_image(imagepath, DIR) - -# def obj_image_load(imagepath, DIR, IMAGE_SEARCH): -# ''' -# Mainly uses comprehensiveImageLoad -# but tries to replace '_' with ' ' for Max's exporter replaces spaces with underscores. -# ''' - -# if '_' in imagepath: -# image= BPyImage.comprehensiveImageLoad(imagepath, DIR, PLACE_HOLDER= False, RECURSIVE= IMAGE_SEARCH) -# if image: return image -# # Did the exporter rename the image? -# image= BPyImage.comprehensiveImageLoad(imagepath.replace('_', ' '), DIR, PLACE_HOLDER= False, RECURSIVE= IMAGE_SEARCH) -# if image: return image - -# # Return an image, placeholder if it dosnt exist -# image= BPyImage.comprehensiveImageLoad(imagepath, DIR, PLACE_HOLDER= True, RECURSIVE= IMAGE_SEARCH) -# return image - - -def create_materials(filepath, material_libs, unique_materials, unique_material_images, IMAGE_SEARCH): - ''' - Create all the used materials in this obj, - assign colors and images to the materials from all referenced material libs - ''' - DIR= stripFile(filepath) - - #==================================================================================# - # This function sets textures defined in .mtl file # - #==================================================================================# - def load_material_image(blender_material, context_material_name, imagepath, type): - - texture= bpy.data.add_texture(type) - texture.type= 'IMAGE' -# texture= bpy.data.textures.new(type) -# texture.setType('Image') - - # Absolute path - c:\.. etc would work here - image= obj_image_load(imagepath, DIR, IMAGE_SEARCH) - has_data = image.has_data if image else False - - if image: - texture.image = image - - # Adds textures for materials (rendering) - if type == 'Kd': - if has_data and image.depth == 32: - # Image has alpha - - # XXX bitmask won't work? - blender_material.add_texture(texture, "UV", ("COLOR", "ALPHA")) - texture.mipmap = True - texture.interpolation = True - texture.use_alpha = True - blender_material.z_transparency = True - blender_material.alpha = 0.0 - -# blender_material.setTexture(0, texture, Texture.TexCo.UV, Texture.MapTo.COL | Texture.MapTo.ALPHA) -# texture.setImageFlags('MipMap', 'InterPol', 'UseAlpha') -# blender_material.mode |= Material.Modes.ZTRANSP -# blender_material.alpha = 0.0 - else: - blender_material.add_texture(texture, "UV", "COLOR") -# blender_material.setTexture(0, texture, Texture.TexCo.UV, Texture.MapTo.COL) - - # adds textures to faces (Textured/Alt-Z mode) - # Only apply the diffuse texture to the face if the image has not been set with the inline usemat func. - unique_material_images[context_material_name]= image, has_data # set the texface image - - elif type == 'Ka': - blender_material.add_texture(texture, "UV", "AMBIENT") -# blender_material.setTexture(1, texture, Texture.TexCo.UV, Texture.MapTo.CMIR) # TODO- Add AMB to BPY API - - elif type == 'Ks': - blender_material.add_texture(texture, "UV", "SPECULARITY") -# blender_material.setTexture(2, texture, Texture.TexCo.UV, Texture.MapTo.SPEC) - - elif type == 'Bump': - blender_material.add_texture(texture, "UV", "NORMAL") -# blender_material.setTexture(3, texture, Texture.TexCo.UV, Texture.MapTo.NOR) - elif type == 'D': - blender_material.add_texture(texture, "UV", "ALPHA") - blender_material.z_transparency = True - blender_material.alpha = 0.0 -# blender_material.setTexture(4, texture, Texture.TexCo.UV, Texture.MapTo.ALPHA) -# blender_material.mode |= Material.Modes.ZTRANSP -# blender_material.alpha = 0.0 - # Todo, unset deffuse material alpha if it has an alpha channel - - elif type == 'refl': - blender_material.add_texture(texture, "UV", "REFLECTION") -# blender_material.setTexture(5, texture, Texture.TexCo.UV, Texture.MapTo.REF) - - - # Add an MTL with the same name as the obj if no MTLs are spesified. - temp_mtl= stripExt(stripPath(filepath))+ '.mtl' - - if os.path.exists(DIR + temp_mtl) and temp_mtl not in material_libs: -# if sys.exists(DIR + temp_mtl) and temp_mtl not in material_libs: - material_libs.append( temp_mtl ) - del temp_mtl - - #Create new materials - for name in unique_materials: # .keys() - if name != None: - unique_materials[name]= bpy.data.add_material(name) -# unique_materials[name]= bpy.data.materials.new(name) - unique_material_images[name]= None, False # assign None to all material images to start with, add to later. - - unique_materials[None]= None - unique_material_images[None]= None, False - - for libname in material_libs: - mtlpath= DIR + libname - if not os.path.exists(mtlpath): -# if not sys.exists(mtlpath): - #print '\tError Missing MTL: "%s"' % mtlpath - pass - else: - #print '\t\tloading mtl: "%s"' % mtlpath - context_material= None - mtl= open(mtlpath, 'rU') - for line in mtl: #.xreadlines(): - if line.startswith('newmtl'): - context_material_name= line_value(line.split()) - if context_material_name in unique_materials: - context_material = unique_materials[ context_material_name ] - else: - context_material = None - - elif context_material: - # we need to make a material to assign properties to it. - line_split= line.split() - line_lower= line.lower().lstrip() - if line_lower.startswith('ka'): - context_material.mirror_color = (float(line_split[1]), float(line_split[2]), float(line_split[3])) -# context_material.setMirCol((float(line_split[1]), float(line_split[2]), float(line_split[3]))) - elif line_lower.startswith('kd'): - context_material.diffuse_color = (float(line_split[1]), float(line_split[2]), float(line_split[3])) -# context_material.setRGBCol((float(line_split[1]), float(line_split[2]), float(line_split[3]))) - elif line_lower.startswith('ks'): - context_material.specular_color = (float(line_split[1]), float(line_split[2]), float(line_split[3])) -# context_material.setSpecCol((float(line_split[1]), float(line_split[2]), float(line_split[3]))) - elif line_lower.startswith('ns'): - context_material.specular_hardness = int((float(line_split[1])*0.51)) -# context_material.setHardness( int((float(line_split[1])*0.51)) ) - elif line_lower.startswith('ni'): # Refraction index - context_material.ior = max(1, min(float(line_split[1]), 3)) -# context_material.setIOR( max(1, min(float(line_split[1]), 3))) # Between 1 and 3 - elif line_lower.startswith('d') or line_lower.startswith('tr'): - context_material.alpha = float(line_split[1]) -# context_material.setAlpha(float(line_split[1])) - elif line_lower.startswith('map_ka'): - img_filepath= line_value(line.split()) - if img_filepath: - load_material_image(context_material, context_material_name, img_filepath, 'Ka') - elif line_lower.startswith('map_ks'): - img_filepath= line_value(line.split()) - if img_filepath: - load_material_image(context_material, context_material_name, img_filepath, 'Ks') - elif line_lower.startswith('map_kd'): - img_filepath= line_value(line.split()) - if img_filepath: - load_material_image(context_material, context_material_name, img_filepath, 'Kd') - elif line_lower.startswith('map_bump'): - img_filepath= line_value(line.split()) - if img_filepath: - load_material_image(context_material, context_material_name, img_filepath, 'Bump') - elif line_lower.startswith('map_d') or line_lower.startswith('map_tr'): # Alpha map - Dissolve - img_filepath= line_value(line.split()) - if img_filepath: - load_material_image(context_material, context_material_name, img_filepath, 'D') - - elif line_lower.startswith('refl'): # Reflectionmap - img_filepath= line_value(line.split()) - if img_filepath: - load_material_image(context_material, context_material_name, img_filepath, 'refl') - mtl.close() - - - - -def split_mesh(verts_loc, faces, unique_materials, filepath, SPLIT_OB_OR_GROUP, SPLIT_MATERIALS): - ''' - Takes vert_loc and faces, and seperates into multiple sets of - (verts_loc, faces, unique_materials, dataname) - This is done so objects do not overload the 16 material limit. - ''' - - filename = stripExt(stripPath(filepath)) - - if not SPLIT_OB_OR_GROUP and not SPLIT_MATERIALS: - # use the filename for the object name since we arnt chopping up the mesh. - return [(verts_loc, faces, unique_materials, filename)] - - - def key_to_name(key): - # if the key is a tuple, join it to make a string - if type(key) == tuple: - return '%s_%s' % key - elif not key: - return filename # assume its a string. make sure this is true if the splitting code is changed - else: - return key - - # Return a key that makes the faces unique. - if SPLIT_OB_OR_GROUP and not SPLIT_MATERIALS: - def face_key(face): - return face[4] # object - - elif not SPLIT_OB_OR_GROUP and SPLIT_MATERIALS: - def face_key(face): - return face[2] # material - - else: # Both - def face_key(face): - return face[4], face[2] # object,material - - - face_split_dict= {} - - oldkey= -1 # initialize to a value that will never match the key - - for face in faces: - - key= face_key(face) - - if oldkey != key: - # Check the key has changed. - try: - verts_split, faces_split, unique_materials_split, vert_remap= face_split_dict[key] - except KeyError: - faces_split= [] - verts_split= [] - unique_materials_split= {} - vert_remap= [-1]*len(verts_loc) - - face_split_dict[key]= (verts_split, faces_split, unique_materials_split, vert_remap) - - oldkey= key - - face_vert_loc_indicies= face[0] - - # Remap verts to new vert list and add where needed - for enum, i in enumerate(face_vert_loc_indicies): - if vert_remap[i] == -1: - new_index= len(verts_split) - vert_remap[i]= new_index # set the new remapped index so we only add once and can reference next time. - face_vert_loc_indicies[enum] = new_index # remap to the local index - verts_split.append( verts_loc[i] ) # add the vert to the local verts - - else: - face_vert_loc_indicies[enum] = vert_remap[i] # remap to the local index - - matname= face[2] - if matname and matname not in unique_materials_split: - unique_materials_split[matname] = unique_materials[matname] - - faces_split.append(face) - - - # remove one of the itemas and reorder - return [(value[0], value[1], value[2], key_to_name(key)) for key, value in list(face_split_dict.items())] - - -def create_mesh(scn, new_objects, has_ngons, CREATE_FGONS, CREATE_EDGES, verts_loc, verts_tex, faces, unique_materials, unique_material_images, unique_smooth_groups, vertex_groups, dataname): - ''' - Takes all the data gathered and generates a mesh, adding the new object to new_objects - deals with fgons, sharp edges and assigning materials - ''' - if not has_ngons: - CREATE_FGONS= False - - if unique_smooth_groups: - sharp_edges= {} - smooth_group_users= dict([ (context_smooth_group, {}) for context_smooth_group in list(unique_smooth_groups.keys()) ]) - context_smooth_group_old= -1 - - # Split fgons into tri's - fgon_edges= {} # Used for storing fgon keys - if CREATE_EDGES: - edges= [] - - context_object= None - - # reverse loop through face indicies - for f_idx in range(len(faces)-1, -1, -1): - - face_vert_loc_indicies,\ - face_vert_tex_indicies,\ - context_material,\ - context_smooth_group,\ - context_object= faces[f_idx] - - len_face_vert_loc_indicies = len(face_vert_loc_indicies) - - if len_face_vert_loc_indicies==1: - faces.pop(f_idx)# cant add single vert faces - - elif not face_vert_tex_indicies or len_face_vert_loc_indicies == 2: # faces that have no texture coords are lines - if CREATE_EDGES: - # generators are better in python 2.4+ but can't be used in 2.3 - # edges.extend( (face_vert_loc_indicies[i], face_vert_loc_indicies[i+1]) for i in xrange(len_face_vert_loc_indicies-1) ) - edges.extend( [(face_vert_loc_indicies[i], face_vert_loc_indicies[i+1]) for i in range(len_face_vert_loc_indicies-1)] ) - - faces.pop(f_idx) - else: - - # Smooth Group - if unique_smooth_groups and context_smooth_group: - # Is a part of of a smooth group and is a face - if context_smooth_group_old is not context_smooth_group: - edge_dict= smooth_group_users[context_smooth_group] - context_smooth_group_old= context_smooth_group - - for i in range(len_face_vert_loc_indicies): - i1= face_vert_loc_indicies[i] - i2= face_vert_loc_indicies[i-1] - if i1>i2: i1,i2= i2,i1 - - try: - edge_dict[i1,i2]+= 1 - except KeyError: - edge_dict[i1,i2]= 1 - - # FGons into triangles - if has_ngons and len_face_vert_loc_indicies > 4: - - ngon_face_indices= BPyMesh_ngon(verts_loc, face_vert_loc_indicies) - faces.extend(\ - [(\ - [face_vert_loc_indicies[ngon[0]], face_vert_loc_indicies[ngon[1]], face_vert_loc_indicies[ngon[2]] ],\ - [face_vert_tex_indicies[ngon[0]], face_vert_tex_indicies[ngon[1]], face_vert_tex_indicies[ngon[2]] ],\ - context_material,\ - context_smooth_group,\ - context_object)\ - for ngon in ngon_face_indices]\ - ) - - # edges to make fgons - if CREATE_FGONS: - edge_users= {} - for ngon in ngon_face_indices: - for i in (0,1,2): - i1= face_vert_loc_indicies[ngon[i ]] - i2= face_vert_loc_indicies[ngon[i-1]] - if i1>i2: i1,i2= i2,i1 - - try: - edge_users[i1,i2]+=1 - except KeyError: - edge_users[i1,i2]= 1 - - for key, users in edge_users.items(): - if users>1: - fgon_edges[key]= None - - # remove all after 3, means we dont have to pop this one. - faces.pop(f_idx) - - - # Build sharp edges - if unique_smooth_groups: - for edge_dict in list(smooth_group_users.values()): - for key, users in list(edge_dict.items()): - if users==1: # This edge is on the boundry of a group - sharp_edges[key]= None - - - # map the material names to an index - material_mapping= dict([(name, i) for i, name in enumerate(unique_materials)]) # enumerate over unique_materials keys() - - materials= [None] * len(unique_materials) - - for name, index in list(material_mapping.items()): - materials[index]= unique_materials[name] - - me= bpy.data.add_mesh(dataname) -# me= bpy.data.meshes.new(dataname) - - # make sure the list isnt too big - for material in materials[0:16]: - me.add_material(material) -# me.materials= materials[0:16] # make sure the list isnt too big. - #me.verts.extend([(0,0,0)]) # dummy vert - - me.add_geometry(len(verts_loc), 0, len(faces)) - - # verts_loc is a list of (x, y, z) tuples - me.verts.foreach_set("co", unpack_list(verts_loc)) -# me.verts.extend(verts_loc) - - # faces is a list of (vert_indices, texco_indices, ...) tuples - # XXX faces should contain either 3 or 4 verts - # XXX no check for valid face indices - me.faces.foreach_set("verts_raw", unpack_face_list([f[0] for f in faces])) -# face_mapping= me.faces.extend([f[0] for f in faces], indexList=True) - - if verts_tex and me.faces: - me.add_uv_texture() -# me.faceUV= 1 - # TEXMODE= Mesh.FaceModes['TEX'] - - context_material_old= -1 # avoid a dict lookup - mat= 0 # rare case it may be un-initialized. - me_faces= me.faces -# ALPHA= Mesh.FaceTranspModes.ALPHA - - for i, face in enumerate(faces): - if len(face[0]) < 2: - pass #raise "bad face" - elif len(face[0])==2: - if CREATE_EDGES: - edges.append(face[0]) - else: -# face_index_map= face_mapping[i] - - # since we use foreach_set to add faces, all of them are added - if 1: -# if face_index_map!=None: # None means the face wasnt added - - blender_face = me.faces[i] -# blender_face= me_faces[face_index_map] - - face_vert_loc_indicies,\ - face_vert_tex_indicies,\ - context_material,\ - context_smooth_group,\ - context_object= face - - - - if context_smooth_group: - blender_face.smooth= True - - if context_material: - if context_material_old is not context_material: - mat= material_mapping[context_material] - if mat>15: - mat= 15 - context_material_old= context_material - - blender_face.material_index= mat -# blender_face.mat= mat - - - if verts_tex: - - blender_tface= me.uv_textures[0].data[i] - - if context_material: - image, has_data= unique_material_images[context_material] - if image: # Can be none if the material dosnt have an image. - blender_tface.image= image -# blender_face.image= image - if has_data: -# if has_data and image.depth == 32: - blender_tface.transp = 'ALPHA' -# blender_face.transp |= ALPHA - - # BUG - Evil eekadoodle problem where faces that have vert index 0 location at 3 or 4 are shuffled. - if len(face_vert_loc_indicies)==4: - if face_vert_loc_indicies[2]==0 or face_vert_loc_indicies[3]==0: - face_vert_tex_indicies= face_vert_tex_indicies[2], face_vert_tex_indicies[3], face_vert_tex_indicies[0], face_vert_tex_indicies[1] - else: # length of 3 - if face_vert_loc_indicies[2]==0: - face_vert_tex_indicies= face_vert_tex_indicies[1], face_vert_tex_indicies[2], face_vert_tex_indicies[0] - # END EEEKADOODLE FIX - - # assign material, uv's and image - blender_tface.uv1= verts_tex[face_vert_tex_indicies[0]] - blender_tface.uv2= verts_tex[face_vert_tex_indicies[1]] - blender_tface.uv3= verts_tex[face_vert_tex_indicies[2]] - - if blender_face.verts[3] != 0: - blender_tface.uv4= verts_tex[face_vert_tex_indicies[3]] - -# for ii, uv in enumerate(blender_face.uv): -# uv.x, uv.y= verts_tex[face_vert_tex_indicies[ii]] - del me_faces -# del ALPHA - - if CREATE_EDGES: - - me.add_geometry(0, len(edges), 0) - - # edges should be a list of (a, b) tuples - me.edges.foreach_set("verts", unpack_list(edges)) -# me_edges.extend( edges ) - -# del me_edges - - # Add edge faces. -# me_edges= me.edges - - def edges_match(e1, e2): - return (e1[0] == e2[0] and e1[1] == e2[1]) or (e1[0] == e2[1] and e1[1] == e2[0]) - - # XXX slow -# if CREATE_FGONS and fgon_edges: -# for fgon_edge in fgon_edges.keys(): -# for ed in me.edges: -# if edges_match(fgon_edge, ed.verts): -# ed.fgon = True - -# if CREATE_FGONS and fgon_edges: -# FGON= Mesh.EdgeFlags.FGON -# for ed in me.findEdges( fgon_edges.keys() ): -# if ed!=None: -# me_edges[ed].flag |= FGON -# del FGON - - # XXX slow -# if unique_smooth_groups and sharp_edges: -# for sharp_edge in sharp_edges.keys(): -# for ed in me.edges: -# if edges_match(sharp_edge, ed.verts): -# ed.sharp = True - -# if unique_smooth_groups and sharp_edges: -# SHARP= Mesh.EdgeFlags.SHARP -# for ed in me.findEdges( sharp_edges.keys() ): -# if ed!=None: -# me_edges[ed].flag |= SHARP -# del SHARP - - me.update() -# me.calcNormals() - - ob= bpy.data.add_object("MESH", "Mesh") - ob.data= me - scn.add_object(ob) -# ob= scn.objects.new(me) - new_objects.append(ob) - - # Create the vertex groups. No need to have the flag passed here since we test for the - # content of the vertex_groups. If the user selects to NOT have vertex groups saved then - # the following test will never run - for group_name, group_indicies in vertex_groups.items(): - group= ob.add_vertex_group(group_name) -# me.addVertGroup(group_name) - for vertex_index in group_indicies: - ob.add_vertex_to_group(vertex_index, group, 1.0, 'REPLACE') -# me.assignVertsToGroup(group_name, group_indicies, 1.00, Mesh.AssignModes.REPLACE) - - -def create_nurbs(scn, context_nurbs, vert_loc, new_objects): - ''' - Add nurbs object to blender, only support one type at the moment - ''' - deg = context_nurbs.get('deg', (3,)) - curv_range = context_nurbs.get('curv_range', None) - curv_idx = context_nurbs.get('curv_idx', []) - parm_u = context_nurbs.get('parm_u', []) - parm_v = context_nurbs.get('parm_v', []) - name = context_nurbs.get('name', 'ObjNurb') - cstype = context_nurbs.get('cstype', None) - - if cstype == None: - print('\tWarning, cstype not found') - return - if cstype != 'bspline': - print('\tWarning, cstype is not supported (only bspline)') - return - if not curv_idx: - print('\tWarning, curv argument empty or not set') - return - if len(deg) > 1 or parm_v: - print('\tWarning, surfaces not supported') - return - - cu = bpy.data.curves.new(name, 'Curve') - cu.flag |= 1 # 3D curve - - nu = None - for pt in curv_idx: - - pt = vert_loc[pt] - pt = (pt[0], pt[1], pt[2], 1.0) - - if nu == None: - nu = cu.appendNurb(pt) - else: - nu.append(pt) - - nu.orderU = deg[0]+1 - - # get for endpoint flag from the weighting - if curv_range and len(parm_u) > deg[0]+1: - do_endpoints = True - for i in range(deg[0]+1): - - if abs(parm_u[i]-curv_range[0]) > 0.0001: - do_endpoints = False - break - - if abs(parm_u[-(i+1)]-curv_range[1]) > 0.0001: - do_endpoints = False - break - - else: - do_endpoints = False - - if do_endpoints: - nu.flagU |= 2 - - - # close - ''' - do_closed = False - if len(parm_u) > deg[0]+1: - for i in xrange(deg[0]+1): - #print curv_idx[i], curv_idx[-(i+1)] - - if curv_idx[i]==curv_idx[-(i+1)]: - do_closed = True - break - - if do_closed: - nu.flagU |= 1 - ''' - - ob = scn.objects.new(cu) - new_objects.append(ob) - - -def strip_slash(line_split): - if line_split[-1][-1]== '\\': - if len(line_split[-1])==1: - line_split.pop() # remove the \ item - else: - line_split[-1]= line_split[-1][:-1] # remove the \ from the end last number - return True - return False - - - -def get_float_func(filepath): - ''' - find the float function for this obj file - - weather to replace commas or not - ''' - file= open(filepath, 'rU') - for line in file: #.xreadlines(): - line = line.lstrip() - if line.startswith('v'): # vn vt v - if ',' in line: - return lambda f: float(f.replace(',', '.')) - elif '.' in line: - return float - - # incase all vert values were ints - return float - -def load_obj(filepath, - context, - CLAMP_SIZE= 0.0, - CREATE_FGONS= True, - CREATE_SMOOTH_GROUPS= True, - CREATE_EDGES= True, - SPLIT_OBJECTS= True, - SPLIT_GROUPS= True, - SPLIT_MATERIALS= True, - ROTATE_X90= True, - IMAGE_SEARCH=True, - POLYGROUPS=False): - ''' - Called by the user interface or another script. - load_obj(path) - should give acceptable results. - This function passes the file and sends the data off - to be split into objects and then converted into mesh objects - ''' - print('\nimporting obj "%s"' % filepath) - - if SPLIT_OBJECTS or SPLIT_GROUPS or SPLIT_MATERIALS: - POLYGROUPS = False - - time_main= time.time() -# time_main= sys.time() - - verts_loc= [] - verts_tex= [] - faces= [] # tuples of the faces - material_libs= [] # filanems to material libs this uses - vertex_groups = {} # when POLYGROUPS is true - - # Get the string to float conversion func for this file- is 'float' for almost all files. - float_func= get_float_func(filepath) - - # Context variables - context_material= None - context_smooth_group= None - context_object= None - context_vgroup = None - - # Nurbs - context_nurbs = {} - nurbs = [] - context_parm = '' # used by nurbs too but could be used elsewhere - - has_ngons= False - # has_smoothgroups= False - is explicit with len(unique_smooth_groups) being > 0 - - # Until we can use sets - unique_materials= {} - unique_material_images= {} - unique_smooth_groups= {} - # unique_obects= {} - no use for this variable since the objects are stored in the face. - - # when there are faces that end with \ - # it means they are multiline- - # since we use xreadline we cant skip to the next line - # so we need to know weather - context_multi_line= '' - - print('\tparsing obj file "%s"...' % filepath) - time_sub= time.time() -# time_sub= sys.time() - - file= open(filepath, 'rU') - for line in file: #.xreadlines(): - line = line.lstrip() # rare cases there is white space at the start of the line - - if line.startswith('v '): - line_split= line.split() - # rotate X90: (x,-z,y) - verts_loc.append( (float_func(line_split[1]), -float_func(line_split[3]), float_func(line_split[2])) ) - - elif line.startswith('vn '): - pass - - elif line.startswith('vt '): - line_split= line.split() - verts_tex.append( (float_func(line_split[1]), float_func(line_split[2])) ) - - # Handel faces lines (as faces) and the second+ lines of fa multiline face here - # use 'f' not 'f ' because some objs (very rare have 'fo ' for faces) - elif line.startswith('f') or context_multi_line == 'f': - - if context_multi_line: - # use face_vert_loc_indicies and face_vert_tex_indicies previously defined and used the obj_face - line_split= line.split() - - else: - line_split= line[2:].split() - face_vert_loc_indicies= [] - face_vert_tex_indicies= [] - - # Instance a face - faces.append((\ - face_vert_loc_indicies,\ - face_vert_tex_indicies,\ - context_material,\ - context_smooth_group,\ - context_object\ - )) - - if strip_slash(line_split): - context_multi_line = 'f' - else: - context_multi_line = '' - - for v in line_split: - obj_vert= v.split('/') - - vert_loc_index= int(obj_vert[0])-1 - # Add the vertex to the current group - # *warning*, this wont work for files that have groups defined around verts - if POLYGROUPS and context_vgroup: - vertex_groups[context_vgroup].append(vert_loc_index) - - # Make relative negative vert indicies absolute - if vert_loc_index < 0: - vert_loc_index= len(verts_loc) + vert_loc_index + 1 - - face_vert_loc_indicies.append(vert_loc_index) - - if len(obj_vert)>1 and obj_vert[1]: - # formatting for faces with normals and textures us - # loc_index/tex_index/nor_index - - vert_tex_index= int(obj_vert[1])-1 - # Make relative negative vert indicies absolute - if vert_tex_index < 0: - vert_tex_index= len(verts_tex) + vert_tex_index + 1 - - face_vert_tex_indicies.append(vert_tex_index) - else: - # dummy - face_vert_tex_indicies.append(0) - - if len(face_vert_loc_indicies) > 4: - has_ngons= True - - elif CREATE_EDGES and (line.startswith('l ') or context_multi_line == 'l'): - # very similar to the face load function above with some parts removed - - if context_multi_line: - # use face_vert_loc_indicies and face_vert_tex_indicies previously defined and used the obj_face - line_split= line.split() - - else: - line_split= line[2:].split() - face_vert_loc_indicies= [] - face_vert_tex_indicies= [] - - # Instance a face - faces.append((\ - face_vert_loc_indicies,\ - face_vert_tex_indicies,\ - context_material,\ - context_smooth_group,\ - context_object\ - )) - - if strip_slash(line_split): - context_multi_line = 'l' - else: - context_multi_line = '' - - isline= line.startswith('l') - - for v in line_split: - vert_loc_index= int(v)-1 - - # Make relative negative vert indicies absolute - if vert_loc_index < 0: - vert_loc_index= len(verts_loc) + vert_loc_index + 1 - - face_vert_loc_indicies.append(vert_loc_index) - - elif line.startswith('s'): - if CREATE_SMOOTH_GROUPS: - context_smooth_group= line_value(line.split()) - if context_smooth_group=='off': - context_smooth_group= None - elif context_smooth_group: # is not None - unique_smooth_groups[context_smooth_group]= None - - elif line.startswith('o'): - if SPLIT_OBJECTS: - context_object= line_value(line.split()) - # unique_obects[context_object]= None - - elif line.startswith('g'): - if SPLIT_GROUPS: - context_object= line_value(line.split()) - # print 'context_object', context_object - # unique_obects[context_object]= None - elif POLYGROUPS: - context_vgroup = line_value(line.split()) - if context_vgroup and context_vgroup != '(null)': - vertex_groups.setdefault(context_vgroup, []) - else: - context_vgroup = None # dont assign a vgroup - - elif line.startswith('usemtl'): - context_material= line_value(line.split()) - unique_materials[context_material]= None - elif line.startswith('mtllib'): # usemap or usemat - material_libs.extend( line.split()[1:] ) # can have multiple mtllib filenames per line - - - # Nurbs support - elif line.startswith('cstype '): - context_nurbs['cstype']= line_value(line.split()) # 'rat bspline' / 'bspline' - elif line.startswith('curv ') or context_multi_line == 'curv': - line_split= line.split() - - curv_idx = context_nurbs['curv_idx'] = context_nurbs.get('curv_idx', []) # incase were multiline - - if not context_multi_line: - context_nurbs['curv_range'] = float_func(line_split[1]), float_func(line_split[2]) - line_split[0:3] = [] # remove first 3 items - - if strip_slash(line_split): - context_multi_line = 'curv' - else: - context_multi_line = '' - - - for i in line_split: - vert_loc_index = int(i)-1 - - if vert_loc_index < 0: - vert_loc_index= len(verts_loc) + vert_loc_index + 1 - - curv_idx.append(vert_loc_index) - - elif line.startswith('parm') or context_multi_line == 'parm': - line_split= line.split() - - if context_multi_line: - context_multi_line = '' - else: - context_parm = line_split[1] - line_split[0:2] = [] # remove first 2 - - if strip_slash(line_split): - context_multi_line = 'parm' - else: - context_multi_line = '' - - if context_parm.lower() == 'u': - context_nurbs.setdefault('parm_u', []).extend( [float_func(f) for f in line_split] ) - elif context_parm.lower() == 'v': # surfaces not suported yet - context_nurbs.setdefault('parm_v', []).extend( [float_func(f) for f in line_split] ) - # else: # may want to support other parm's ? - - elif line.startswith('deg '): - context_nurbs['deg']= [int(i) for i in line.split()[1:]] - elif line.startswith('end'): - # Add the nurbs curve - if context_object: - context_nurbs['name'] = context_object - nurbs.append(context_nurbs) - context_nurbs = {} - context_parm = '' - - ''' # How to use usemap? depricated? - elif line.startswith('usema'): # usemap or usemat - context_image= line_value(line.split()) - ''' - - file.close() - time_new= time.time() -# time_new= sys.time() - print('%.4f sec' % (time_new-time_sub)) - time_sub= time_new - - - print('\tloading materials and images...') - create_materials(filepath, material_libs, unique_materials, unique_material_images, IMAGE_SEARCH) - - time_new= time.time() -# time_new= sys.time() - print('%.4f sec' % (time_new-time_sub)) - time_sub= time_new - - if not ROTATE_X90: - verts_loc[:] = [(v[0], v[2], -v[1]) for v in verts_loc] - - # deselect all -# if context.selected_objects: -# bpy.ops.OBJECT_OT_select_all_toggle() - - scene = context.scene -# scn = bpy.data.scenes.active -# scn.objects.selected = [] - new_objects= [] # put new objects here - - print('\tbuilding geometry...\n\tverts:%i faces:%i materials: %i smoothgroups:%i ...' % ( len(verts_loc), len(faces), len(unique_materials), len(unique_smooth_groups) )) - # Split the mesh by objects/materials, may - if SPLIT_OBJECTS or SPLIT_GROUPS: SPLIT_OB_OR_GROUP = True - else: SPLIT_OB_OR_GROUP = False - - for verts_loc_split, faces_split, unique_materials_split, dataname in split_mesh(verts_loc, faces, unique_materials, filepath, SPLIT_OB_OR_GROUP, SPLIT_MATERIALS): - # Create meshes from the data, warning 'vertex_groups' wont support splitting - create_mesh(scene, new_objects, has_ngons, CREATE_FGONS, CREATE_EDGES, verts_loc_split, verts_tex, faces_split, unique_materials_split, unique_material_images, unique_smooth_groups, vertex_groups, dataname) - - # nurbs support -# for context_nurbs in nurbs: -# create_nurbs(scn, context_nurbs, verts_loc, new_objects) - - - axis_min= [ 1000000000]*3 - axis_max= [-1000000000]*3 - -# if CLAMP_SIZE: -# # Get all object bounds -# for ob in new_objects: -# for v in ob.getBoundBox(): -# for axis, value in enumerate(v): -# if axis_min[axis] > value: axis_min[axis]= value -# if axis_max[axis] < value: axis_max[axis]= value - -# # Scale objects -# max_axis= max(axis_max[0]-axis_min[0], axis_max[1]-axis_min[1], axis_max[2]-axis_min[2]) -# scale= 1.0 - -# while CLAMP_SIZE < max_axis * scale: -# scale= scale/10.0 - -# for ob in new_objects: -# ob.setSize(scale, scale, scale) - - # Better rotate the vert locations - #if not ROTATE_X90: - # for ob in new_objects: - # ob.RotX = -1.570796326794896558 - - time_new= time.time() -# time_new= sys.time() - - print('%.4f sec' % (time_new-time_sub)) - print('finished importing: "%s" in %.4f sec.' % (filepath, (time_new-time_main))) - - -DEBUG= True - - -def load_obj_ui(filepath, BATCH_LOAD= False): - if BPyMessages.Error_NoFile(filepath): - return - - global CREATE_SMOOTH_GROUPS, CREATE_FGONS, CREATE_EDGES, SPLIT_OBJECTS, SPLIT_GROUPS, SPLIT_MATERIALS, CLAMP_SIZE, IMAGE_SEARCH, POLYGROUPS, KEEP_VERT_ORDER, ROTATE_X90 - - CREATE_SMOOTH_GROUPS= Draw.Create(0) - CREATE_FGONS= Draw.Create(1) - CREATE_EDGES= Draw.Create(1) - SPLIT_OBJECTS= Draw.Create(0) - SPLIT_GROUPS= Draw.Create(0) - SPLIT_MATERIALS= Draw.Create(0) - CLAMP_SIZE= Draw.Create(10.0) - IMAGE_SEARCH= Draw.Create(1) - POLYGROUPS= Draw.Create(0) - KEEP_VERT_ORDER= Draw.Create(1) - ROTATE_X90= Draw.Create(1) - - - # Get USER Options - # Note, Works but not pretty, instead use a more complicated GUI - ''' - pup_block= [\ - 'Import...',\ - ('Smooth Groups', CREATE_SMOOTH_GROUPS, 'Surround smooth groups by sharp edges'),\ - ('Create FGons', CREATE_FGONS, 'Import faces with more then 4 verts as fgons.'),\ - ('Lines', CREATE_EDGES, 'Import lines and faces with 2 verts as edges'),\ - 'Separate objects from obj...',\ - ('Object', SPLIT_OBJECTS, 'Import OBJ Objects into Blender Objects'),\ - ('Group', SPLIT_GROUPS, 'Import OBJ Groups into Blender Objects'),\ - ('Material', SPLIT_MATERIALS, 'Import each material into a seperate mesh (Avoids > 16 per mesh error)'),\ - 'Options...',\ - ('Keep Vert Order', KEEP_VERT_ORDER, 'Keep vert and face order, disables some other options.'),\ - ('Clamp Scale:', CLAMP_SIZE, 0.0, 1000.0, 'Clamp the size to this maximum (Zero to Disable)'),\ - ('Image Search', IMAGE_SEARCH, 'Search subdirs for any assosiated images (Warning, may be slow)'),\ - ] - - if not Draw.PupBlock('Import OBJ...', pup_block): - return - - if KEEP_VERT_ORDER.val: - SPLIT_OBJECTS.val = False - SPLIT_GROUPS.val = False - SPLIT_MATERIALS.val = False - ''' - - - - # BEGIN ALTERNATIVE UI ******************* - if True: - - EVENT_NONE = 0 - EVENT_EXIT = 1 - EVENT_REDRAW = 2 - EVENT_IMPORT = 3 - - GLOBALS = {} - GLOBALS['EVENT'] = EVENT_REDRAW - #GLOBALS['MOUSE'] = Window.GetMouseCoords() - GLOBALS['MOUSE'] = [i/2 for i in Window.GetScreenSize()] - - def obj_ui_set_event(e,v): - GLOBALS['EVENT'] = e - - def do_split(e,v): - global SPLIT_OBJECTS, SPLIT_GROUPS, SPLIT_MATERIALS, KEEP_VERT_ORDER, POLYGROUPS - if SPLIT_OBJECTS.val or SPLIT_GROUPS.val or SPLIT_MATERIALS.val: - KEEP_VERT_ORDER.val = 0 - POLYGROUPS.val = 0 - else: - KEEP_VERT_ORDER.val = 1 - - def do_vertorder(e,v): - global SPLIT_OBJECTS, SPLIT_GROUPS, SPLIT_MATERIALS, KEEP_VERT_ORDER - if KEEP_VERT_ORDER.val: - SPLIT_OBJECTS.val = SPLIT_GROUPS.val = SPLIT_MATERIALS.val = 0 - else: - if not (SPLIT_OBJECTS.val or SPLIT_GROUPS.val or SPLIT_MATERIALS.val): - KEEP_VERT_ORDER.val = 1 - - def do_polygroups(e,v): - global SPLIT_OBJECTS, SPLIT_GROUPS, SPLIT_MATERIALS, KEEP_VERT_ORDER, POLYGROUPS - if POLYGROUPS.val: - SPLIT_OBJECTS.val = SPLIT_GROUPS.val = SPLIT_MATERIALS.val = 0 - - def do_help(e,v): - url = __url__[0] - print('Trying to open web browser with documentation at this address...') - print('\t' + url) - - try: - import webbrowser - webbrowser.open(url) - except: - print('...could not open a browser window.') - - def obj_ui(): - ui_x, ui_y = GLOBALS['MOUSE'] - - # Center based on overall pup size - ui_x -= 165 - ui_y -= 90 - - global CREATE_SMOOTH_GROUPS, CREATE_FGONS, CREATE_EDGES, SPLIT_OBJECTS, SPLIT_GROUPS, SPLIT_MATERIALS, CLAMP_SIZE, IMAGE_SEARCH, POLYGROUPS, KEEP_VERT_ORDER, ROTATE_X90 - - Draw.Label('Import...', ui_x+9, ui_y+159, 220, 21) - Draw.BeginAlign() - CREATE_SMOOTH_GROUPS = Draw.Toggle('Smooth Groups', EVENT_NONE, ui_x+9, ui_y+139, 110, 20, CREATE_SMOOTH_GROUPS.val, 'Surround smooth groups by sharp edges') - CREATE_FGONS = Draw.Toggle('NGons as FGons', EVENT_NONE, ui_x+119, ui_y+139, 110, 20, CREATE_FGONS.val, 'Import faces with more then 4 verts as fgons') - CREATE_EDGES = Draw.Toggle('Lines as Edges', EVENT_NONE, ui_x+229, ui_y+139, 110, 20, CREATE_EDGES.val, 'Import lines and faces with 2 verts as edges') - Draw.EndAlign() - - Draw.Label('Separate objects by OBJ...', ui_x+9, ui_y+110, 220, 20) - Draw.BeginAlign() - SPLIT_OBJECTS = Draw.Toggle('Object', EVENT_REDRAW, ui_x+9, ui_y+89, 55, 21, SPLIT_OBJECTS.val, 'Import OBJ Objects into Blender Objects', do_split) - SPLIT_GROUPS = Draw.Toggle('Group', EVENT_REDRAW, ui_x+64, ui_y+89, 55, 21, SPLIT_GROUPS.val, 'Import OBJ Groups into Blender Objects', do_split) - SPLIT_MATERIALS = Draw.Toggle('Material', EVENT_REDRAW, ui_x+119, ui_y+89, 60, 21, SPLIT_MATERIALS.val, 'Import each material into a seperate mesh (Avoids > 16 per mesh error)', do_split) - Draw.EndAlign() - - # Only used for user feedback - KEEP_VERT_ORDER = Draw.Toggle('Keep Vert Order', EVENT_REDRAW, ui_x+184, ui_y+89, 113, 21, KEEP_VERT_ORDER.val, 'Keep vert and face order, disables split options, enable for morph targets', do_vertorder) - - ROTATE_X90 = Draw.Toggle('-X90', EVENT_REDRAW, ui_x+302, ui_y+89, 38, 21, ROTATE_X90.val, 'Rotate X 90.') - - Draw.Label('Options...', ui_x+9, ui_y+60, 211, 20) - CLAMP_SIZE = Draw.Number('Clamp Scale: ', EVENT_NONE, ui_x+9, ui_y+39, 130, 21, CLAMP_SIZE.val, 0.0, 1000.0, 'Clamp the size to this maximum (Zero to Disable)') - POLYGROUPS = Draw.Toggle('Poly Groups', EVENT_REDRAW, ui_x+144, ui_y+39, 90, 21, POLYGROUPS.val, 'Import OBJ groups as vertex groups.', do_polygroups) - IMAGE_SEARCH = Draw.Toggle('Image Search', EVENT_NONE, ui_x+239, ui_y+39, 100, 21, IMAGE_SEARCH.val, 'Search subdirs for any assosiated images (Warning, may be slow)') - Draw.BeginAlign() - Draw.PushButton('Online Help', EVENT_REDRAW, ui_x+9, ui_y+9, 110, 21, 'Load the wiki page for this script', do_help) - Draw.PushButton('Cancel', EVENT_EXIT, ui_x+119, ui_y+9, 110, 21, '', obj_ui_set_event) - Draw.PushButton('Import', EVENT_IMPORT, ui_x+229, ui_y+9, 110, 21, 'Import with these settings', obj_ui_set_event) - Draw.EndAlign() - - - # hack so the toggle buttons redraw. this is not nice at all - while GLOBALS['EVENT'] not in (EVENT_EXIT, EVENT_IMPORT): - Draw.UIBlock(obj_ui, 0) - - if GLOBALS['EVENT'] != EVENT_IMPORT: - return - - # END ALTERNATIVE UI ********************* - - - - - - - - Window.WaitCursor(1) - - if BATCH_LOAD: # load the dir - try: - files= [ f for f in os.listdir(filepath) if f.lower().endswith('.obj') ] - except: - Window.WaitCursor(0) - Draw.PupMenu('Error%t|Could not open path ' + filepath) - return - - if not files: - Window.WaitCursor(0) - Draw.PupMenu('Error%t|No files at path ' + filepath) - return - - for f in files: - scn= bpy.data.scenes.new( stripExt(f) ) - scn.makeCurrent() - - load_obj(sys.join(filepath, f),\ - CLAMP_SIZE.val,\ - CREATE_FGONS.val,\ - CREATE_SMOOTH_GROUPS.val,\ - CREATE_EDGES.val,\ - SPLIT_OBJECTS.val,\ - SPLIT_GROUPS.val,\ - SPLIT_MATERIALS.val,\ - ROTATE_X90.val,\ - IMAGE_SEARCH.val,\ - POLYGROUPS.val - ) - - else: # Normal load - load_obj(filepath,\ - CLAMP_SIZE.val,\ - CREATE_FGONS.val,\ - CREATE_SMOOTH_GROUPS.val,\ - CREATE_EDGES.val,\ - SPLIT_OBJECTS.val,\ - SPLIT_GROUPS.val,\ - SPLIT_MATERIALS.val,\ - ROTATE_X90.val,\ - IMAGE_SEARCH.val,\ - POLYGROUPS.val - ) - - Window.WaitCursor(0) - - -def load_obj_ui_batch(file): - load_obj_ui(file, True) - -DEBUG= False - -# if __name__=='__main__' and not DEBUG: -# if os and Window.GetKeyQualifiers() & Window.Qual.SHIFT: -# Window.FileSelector(load_obj_ui_batch, 'Import OBJ Dir', '') -# else: -# Window.FileSelector(load_obj_ui, 'Import a Wavefront OBJ', '*.obj') - - # For testing compatibility -''' -else: - # DEBUG ONLY - TIME= sys.time() - DIR = '/fe/obj' - import os - print 'Searching for files' - def fileList(path): - for dirpath, dirnames, filenames in os.walk(path): - for filename in filenames: - yield os.path.join(dirpath, filename) - - files = [f for f in fileList(DIR) if f.lower().endswith('.obj')] - files.sort() - - for i, obj_file in enumerate(files): - if 0 < i < 20: - print 'Importing', obj_file, '\nNUMBER', i, 'of', len(files) - newScn= bpy.data.scenes.new(os.path.basename(obj_file)) - newScn.makeCurrent() - load_obj(obj_file, False, IMAGE_SEARCH=0) - - print 'TOTAL TIME: %.6f' % (sys.time() - TIME) -''' -#load_obj('/test.obj') -#load_obj('/fe/obj/mba1.obj') - - - -class IMPORT_OT_obj(bpy.types.Operator): - ''' - Operator documentation text, will be used for the operator tooltip and python docs. - ''' - __idname__ = "import.obj" - __label__ = "Import OBJ" - - # List of operator properties, the attributes will be assigned - # to the class instance from the operator settings before calling. - - __props__ = [ - bpy.props.StringProperty(attr="path", name="File Path", description="File path used for importing the OBJ file", maxlen= 1024, default= ""), - - bpy.props.BoolProperty(attr="CREATE_SMOOTH_GROUPS", name="Smooth Groups", description="Surround smooth groups by sharp edges", default= True), - bpy.props.BoolProperty(attr="CREATE_FGONS", name="NGons as FGons", description="Import faces with more then 4 verts as fgons", default= True), - bpy.props.BoolProperty(attr="CREATE_EDGES", name="Lines as Edges", description="Import lines and faces with 2 verts as edge", default= True), - bpy.props.BoolProperty(attr="SPLIT_OBJECTS", name="Object", description="Import OBJ Objects into Blender Objects", default= True), - bpy.props.BoolProperty(attr="SPLIT_GROUPS", name="Group", description="Import OBJ Groups into Blender Objects", default= True), - bpy.props.BoolProperty(attr="SPLIT_MATERIALS", name="Material", description="Import each material into a seperate mesh (Avoids > 16 per mesh error)", default= True), - # old comment: only used for user feedback - # disabled this option because in old code a handler for it disabled SPLIT* params, it's not passed to load_obj - # bpy.props.BoolProperty(attr="KEEP_VERT_ORDER", name="Keep Vert Order", description="Keep vert and face order, disables split options, enable for morph targets", default= True), - bpy.props.BoolProperty(attr="ROTATE_X90", name="-X90", description="Rotate X 90.", default= True), - bpy.props.FloatProperty(attr="CLAMP_SIZE", name="Clamp Scale", description="Clamp the size to this maximum (Zero to Disable)", min=0.01, max=1000.0, soft_min=0.0, soft_max=1000.0, default=0.0), - bpy.props.BoolProperty(attr="POLYGROUPS", name="Poly Groups", description="Import OBJ groups as vertex groups.", default= True), - bpy.props.BoolProperty(attr="IMAGE_SEARCH", name="Image Search", description="Search subdirs for any assosiated images (Warning, may be slow)", default= True), - ] - - ''' - def poll(self, context): - return True ''' - - def execute(self, context): - # print("Selected: " + context.active_object.name) - - load_obj(self.path, - context, - self.CLAMP_SIZE, - self.CREATE_FGONS, - self.CREATE_SMOOTH_GROUPS, - self.CREATE_EDGES, - self.SPLIT_OBJECTS, - self.SPLIT_GROUPS, - self.SPLIT_MATERIALS, - self.ROTATE_X90, - self.IMAGE_SEARCH, - self.POLYGROUPS) - - return ('FINISHED',) - - def invoke(self, context, event): - wm = context.manager - wm.add_fileselect(self.__operator__) - return ('RUNNING_MODAL',) - - -bpy.ops.add(IMPORT_OT_obj) - - -# NOTES (all line numbers refer to 2.4x import_obj.py, not this file) -# check later: line 489 -# can convert now: edge flags, edges: lines 508-528 -# ngon (uses python module BPyMesh): 384-414 -# nurbs: 947- -# NEXT clamp size: get bound box with RNA -# get back to l 140 (here) -# search image in bpy.config.textureDir - load_image -# replaced BPyImage.comprehensiveImageLoad with a simplified version that only checks additional directory specified, but doesn't search dirs recursively (obj_image_load) -# bitmask won't work? - 132 -# uses operator bpy.ops.OBJECT_OT_select_all_toggle() to deselect all (not necessary?) -# uses bpy.sys.time()