Remove non-stable gpuCache loader for models (for stable release)

This commit is contained in:
Roy Nieterau 2017-09-28 14:40:39 +02:00
parent 19cc3af18e
commit abe2dcc253

View file

@ -31,46 +31,4 @@ class ModelLoader(api.Loader):
groupReference=True,
groupName="{}:{}".format(namespace, name))
self[:] = nodes
class ModelGPUCacheLoader(api.Loader):
"""Import a GPU Cache"""
families = ["colorbleed.model"]
representations = ["abc"]
label = "Import GPU Cache"
order = -1
icon = "download"
def process(self, name, namespace, context, data):
import maya.cmds as cmds
# from avalon import maya
# todo: This will likely not be entirely safe with "containerize"
# also this cannot work in the manager because it only works
# on references at the moment!
# especially in cases of duplicating the gpu cache node this will
# mess up the "containered" workflow in the avalon core for maya
print("WARNING: Importing gpuCaches isn't fully tested yet")
path = self.fname
cmds.loadPlugin("gpuCache", quiet=True)
# Create transform with shape
node_name = "{0}Shape".format(name)
transform = cmds.createNode("transform", name=name)
cache = cmds.createNode("gpuCache", parent=transform, name=node_name)
# Set the cache filepath
cmds.setAttr('{}.cacheFileName'.format(cache), path, type="string")
cmds.setAttr('{}.cacheGeomPath'.format(cache), "|", type="string") # root
# Select the transform
cmds.select(transform, r=1)
# Store the created nodes
self[:] = [transform, cache]
self[:] = nodes