mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-25 05:14:40 +01:00
Massive cleanup removing untested/unstable families or plug-ins
This commit is contained in:
parent
df287f411c
commit
36166f5332
42 changed files with 3 additions and 2351 deletions
|
|
@ -1,9 +0,0 @@
|
|||
import avalon.maya
|
||||
|
||||
|
||||
class CreateGroom(avalon.maya.Creator):
|
||||
"""Hair / fur definition for an asset"""
|
||||
|
||||
name = "groomDefault"
|
||||
label = "Groom"
|
||||
family = "colorbleed.groom"
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
import avalon.maya
|
||||
|
||||
|
||||
class CreateInstance(avalon.maya.Creator):
|
||||
"""Maya instancer using cached particles"""
|
||||
|
||||
name = "instanceDefault"
|
||||
label = "Instance"
|
||||
family = "colorbleed.instance"
|
||||
|
|
@ -1,37 +0,0 @@
|
|||
from collections import OrderedDict
|
||||
import avalon.maya
|
||||
from colorbleed.maya import lib
|
||||
|
||||
|
||||
class CreateLayout(avalon.maya.Creator):
|
||||
"""The layout of a episode / sequence / shot """
|
||||
|
||||
name = "layoutDefault"
|
||||
label = "Layout"
|
||||
family = "colorbleed.layout"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateLayout, self).__init__(*args, **kwargs)
|
||||
|
||||
# create an ordered dict with the existing data first
|
||||
data = OrderedDict(**self.data)
|
||||
|
||||
# get basic animation data : start / end / handles / steps
|
||||
for key, value in lib.collect_animation_data().items():
|
||||
data[key] = value
|
||||
|
||||
# Write vertex colors with the geometry.
|
||||
data["writeColorSets"] = False
|
||||
|
||||
# Write GPU cache as placeholder cube in stead of full data
|
||||
data["placeholder"] = False
|
||||
|
||||
# Include only renderable visible shapes.
|
||||
# Skips locators and empty transforms
|
||||
data["renderableOnly"] = False
|
||||
|
||||
# Include only nodes that are visible at least once during the
|
||||
# frame range.
|
||||
data["visibleOnly"] = False
|
||||
|
||||
self.data = data
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
import avalon.maya
|
||||
|
||||
|
||||
class CreateTexture(avalon.maya.Creator):
|
||||
"""Polygonal geometry for animation"""
|
||||
|
||||
name = "texturesDefault"
|
||||
label = "Textures"
|
||||
family = "colorbleed.texture"
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
from collections import OrderedDict
|
||||
import avalon.maya
|
||||
from colorbleed.maya import lib
|
||||
|
||||
|
||||
class CreateYetiFur(avalon.maya.Creator):
|
||||
"""Cached yeti fur extraction"""
|
||||
|
||||
name = "yetiFur"
|
||||
label = "Yeti Fur"
|
||||
family = "colorbleed.yetifur"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateYetiFur, self).__init__(*args, **kwargs)
|
||||
|
||||
# get scene values as defaults
|
||||
data = OrderedDict(**self.data)
|
||||
animation_data = lib.collect_animation_data()
|
||||
for key, value in animation_data.items():
|
||||
data[key] = value
|
||||
|
||||
self.data = data
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
import avalon.maya
|
||||
|
||||
|
||||
class CreateSetdress(avalon.maya.Creator):
|
||||
"""Skeleton and controls for manipulation of the geometry"""
|
||||
|
||||
name = "setDress"
|
||||
label = "Setdress"
|
||||
family = "colorbleed.setdress"
|
||||
|
|
@ -1,28 +0,0 @@
|
|||
from maya import cmds
|
||||
from avalon import api
|
||||
|
||||
|
||||
class HistoryLookLoader(api.Loader):
|
||||
"""Specific loader for lookdev"""
|
||||
|
||||
families = ["colorbleed.historyLookdev"]
|
||||
representations = ["ma"]
|
||||
|
||||
label = "Reference look history"
|
||||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
def process(self, name, namespace, context, data):
|
||||
from avalon import maya
|
||||
with maya.maintained_selection():
|
||||
nodes = cmds.file(
|
||||
self.fname,
|
||||
namespace=namespace,
|
||||
reference=True,
|
||||
returnNewNodes=True,
|
||||
groupReference=True,
|
||||
groupName=namespace + ":" + name
|
||||
)
|
||||
|
||||
self[:] = nodes
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectAlembicCBAttrs(pyblish.api.InstancePlugin):
|
||||
"""Collects settings for the Alembic extractor"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.499
|
||||
families = ['colorbleed.model', 'colorbleed.pointcache']
|
||||
label = "Alembic Colorbleed Attrs"
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
attrPrefix = instance.data.get("attrPrefix", [])
|
||||
attrPrefix.append("cb")
|
||||
instance.data['attrPrefix'] = attrPrefix
|
||||
|
||||
# Ensure visibility keys are written
|
||||
instance.data['writeVisibility'] = True
|
||||
|
||||
# Write creases
|
||||
instance.data['writeCreases'] = True
|
||||
|
||||
# Ensure UVs are written
|
||||
instance.data['uvWrite'] = True
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class DebugPlugin(pyblish.api.InstancePlugin):
|
||||
|
||||
label = "Debug"
|
||||
order = pyblish.api.IntegratorOrder - 0.4
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
import pprint
|
||||
|
||||
self.log("\n\n----------------------")
|
||||
self.log("Instance")
|
||||
pprint.pprint(instance)
|
||||
|
|
@ -1,51 +0,0 @@
|
|||
import os
|
||||
|
||||
from maya import cmds
|
||||
|
||||
import avalon.maya
|
||||
import colorbleed.api
|
||||
|
||||
|
||||
class ExtractCameraRaw(colorbleed.api.Extractor):
|
||||
"""Extract as Maya Ascii
|
||||
|
||||
Includes constraints and channels
|
||||
|
||||
"""
|
||||
|
||||
label = "Camera Raw (Maya Ascii)"
|
||||
hosts = ["maya"]
|
||||
families = ["colorbleed.camera"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
# Define extract output file path
|
||||
dir_path = self.staging_dir(instance)
|
||||
filename = "{0}.raw.ma".format(instance.name)
|
||||
path = os.path.join(dir_path, filename)
|
||||
|
||||
# get cameras
|
||||
cameras = cmds.ls(instance.data['setMembers'], leaf=True,
|
||||
shapes=True, dag=True, type='camera')
|
||||
|
||||
# Perform extraction
|
||||
self.log.info("Performing extraction..")
|
||||
with avalon.maya.maintained_selection():
|
||||
cmds.select(cameras, noExpand=True)
|
||||
cmds.file(path,
|
||||
force=True,
|
||||
typ="mayaAscii",
|
||||
exportSelected=True,
|
||||
preserveReferences=False,
|
||||
constructionHistory=False,
|
||||
channels=True, # allow animation
|
||||
constraints=True,
|
||||
shader=False,
|
||||
expressions=False)
|
||||
|
||||
if "files" not in instance.data:
|
||||
instance.data["files"] = list()
|
||||
|
||||
instance.data["files"].append(filename)
|
||||
|
||||
self.log.info("Extracted instance '%s' to: %s" % (instance.name, path))
|
||||
|
|
@ -1,43 +0,0 @@
|
|||
import os
|
||||
|
||||
from maya import cmds
|
||||
|
||||
import avalon.maya
|
||||
import colorbleed.api
|
||||
|
||||
|
||||
class ExtractLayoutMayaAscii(colorbleed.api.Extractor):
|
||||
"""Extract as Maya Ascii"""
|
||||
|
||||
label = "Layout (Maya ASCII)"
|
||||
hosts = ["maya"]
|
||||
families = ["colorbleed.layout"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
# Define extract output file path
|
||||
dir_path = self.staging_dir(instance)
|
||||
filename = "{0}.ma".format(instance.name)
|
||||
path = os.path.join(dir_path, filename)
|
||||
|
||||
# Perform extraction
|
||||
self.log.info("Performing extraction..")
|
||||
with avalon.maya.maintained_selection():
|
||||
cmds.select(instance, noExpand=True)
|
||||
cmds.file(path,
|
||||
force=True,
|
||||
typ="mayaAscii",
|
||||
exportSelected=True,
|
||||
preserveReferences=True,
|
||||
channels=True,
|
||||
constraints=True,
|
||||
expressions=True,
|
||||
constructionHistory=True)
|
||||
|
||||
if "files" not in instance.data:
|
||||
instance.data["files"] = list()
|
||||
|
||||
instance.data["files"].append(filename)
|
||||
|
||||
self.log.info("Extracted instance '{0}' to: {1}".format(
|
||||
instance.name, path))
|
||||
|
|
@ -1,21 +0,0 @@
|
|||
import os
|
||||
import json
|
||||
import colorbleed.api
|
||||
|
||||
|
||||
class ExtractMetadata(colorbleed.api.Extractor):
|
||||
"""Extract origin metadata from scene"""
|
||||
|
||||
label = "Metadata"
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
temp_dir = self.staging_dir(instance)
|
||||
temp_file = os.path.join(temp_dir, "metadata.meta")
|
||||
|
||||
metadata = instance.data("metadata")
|
||||
self.log.info("Extracting %s" % metadata)
|
||||
with open(temp_file, "w") as f:
|
||||
json.dump(metadata, f, indent=2, sort_keys=True)
|
||||
|
||||
self.log.info("Written to %s" % temp_file)
|
||||
|
|
@ -1,47 +0,0 @@
|
|||
import os
|
||||
import shutil
|
||||
import pyblish.api
|
||||
|
||||
import colorbleed.api
|
||||
|
||||
|
||||
class IntegrateFiles(colorbleed.api.Integrator):
|
||||
"""Integrate Files
|
||||
|
||||
Copies the transfer queue to the destinations.
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.IntegratorOrder + 0.1
|
||||
label = "Transfer Files"
|
||||
|
||||
def process(self, instance):
|
||||
"""Copy textures from srcPath to destPath
|
||||
|
||||
The files should be stored in the "integrateFiles" data on the instance. Each item in the
|
||||
list should be a dictionary with 'srcPath' and 'destPath' key values.
|
||||
|
||||
- srcPath: Source path (must be absolute!)
|
||||
- destPath: Destination path (can be relative)
|
||||
|
||||
"""
|
||||
super(IntegrateFiles, self).process(instance)
|
||||
|
||||
# Get unique texture transfers
|
||||
# (since different nodes might load same texture)
|
||||
transfers = instance.data.get("transfers", [])
|
||||
|
||||
for src, dest in transfers:
|
||||
|
||||
self.log.info("Copying: {0} -> {1}".format(src, dest))
|
||||
|
||||
# Source is destination
|
||||
if os.path.normpath(dest) == os.path.normpath(src):
|
||||
self.log.info("Skip copy of resource file: {0}".format(src))
|
||||
continue
|
||||
|
||||
# Ensure folder exists
|
||||
folder = os.path.dirname(dest)
|
||||
if not os.path.exists(folder):
|
||||
os.makedirs(folder)
|
||||
shutil.copyfile(src, dest)
|
||||
|
|
@ -1,46 +0,0 @@
|
|||
import re
|
||||
import os
|
||||
|
||||
import maya.cmds as cmds
|
||||
|
||||
import pyblish.api
|
||||
import colorbleed.api
|
||||
|
||||
|
||||
class ValidateFileNameConvention(pyblish.api.InstancePlugin):
|
||||
|
||||
label = ""
|
||||
families = ["colorbleed.look"]
|
||||
host = ["maya"]
|
||||
optional = True
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
actions = [colorbleed.api.SelectInvalidAction]
|
||||
|
||||
@staticmethod
|
||||
def get_invalid(instance):
|
||||
|
||||
invalid = []
|
||||
# todo: change pattern to company standard
|
||||
pattern = re.compile("[a-zA-Z]+_[A-Z]{3}")
|
||||
|
||||
nodes = cmds.ls(instance, type="file")
|
||||
for node in nodes:
|
||||
# get texture path
|
||||
texture = cmds.getAttr("{}.fileTextureName".format(node))
|
||||
if not texture:
|
||||
self.log.error("")
|
||||
invalid.append(node)
|
||||
filename = os.path.split(os.path.basename(texture))[0]
|
||||
match = pattern.match(filename)
|
||||
if not match:
|
||||
invalid.append(node)
|
||||
|
||||
return invalid
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
self.log.error("Found invalid naming convention. Failed noted :\n"
|
||||
"%s" % invalid)
|
||||
|
|
@ -1,87 +0,0 @@
|
|||
import pyblish.api
|
||||
import maya.cmds as cmds
|
||||
import colorbleed.api
|
||||
import avalon.maya
|
||||
|
||||
import cb.utils.maya.dag as dag
|
||||
import cbra.utils.maya.layout as layout
|
||||
|
||||
|
||||
class ValidateLayoutContent(pyblish.api.InstancePlugin):
|
||||
"""Validates that layout contains at least a gpuCache or mesh shape node
|
||||
|
||||
Also validates that (at the current frame that this is tested at) at least
|
||||
a single shape is visible.
|
||||
|
||||
Without any shape nodes the layout would simply cache 'nothing' visually
|
||||
and would seem redundant.
|
||||
|
||||
Note: Theoretically this validation does disable the possibility to just
|
||||
cache some "transforms" to be used elsewhere. As such currently the
|
||||
'layout' family is only intended to be used for visual shapes.
|
||||
|
||||
"""
|
||||
|
||||
order = colorbleed.api.ValidateContentsOrder
|
||||
label = 'Layout Content'
|
||||
families = ['colorbleed.layout']
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
placeholder = instance.data.get("placeholder", False)
|
||||
|
||||
# Ensure any meshes or gpuCaches in instance
|
||||
if not cmds.ls(instance, type=("mesh", "gpuCache", "nurbsCurve"),
|
||||
long=True):
|
||||
raise RuntimeError(
|
||||
"Layout has no mesh, gpuCache or nurbsCurve children: "
|
||||
"{0}".format(instance))
|
||||
|
||||
# Ensure at least any extract nodes readily available after filtering
|
||||
with avalon.maya.maintained_selection():
|
||||
|
||||
nodes = instance.data['setMembers']
|
||||
cmds.select(nodes, r=1, hierarchy=True)
|
||||
hierarchy = cmds.ls(sl=True, long=True)
|
||||
extract_nodes = layout.filter_nodes(hierarchy)
|
||||
|
||||
if not extract_nodes:
|
||||
self.log.info("Set members: {0}".format(nodes))
|
||||
self.log.info("Hierarchy: {0}".format(hierarchy))
|
||||
raise RuntimeError("No nodes to extract after "
|
||||
"filtering: {0}".format(extract_nodes))
|
||||
|
||||
# If no meshes in layout the gpuCache command will crash as such
|
||||
# we consider this invalid, unless "placeholder" is set to True
|
||||
meshes = cmds.ls(cmds.ls(extract_nodes,
|
||||
dag=True,
|
||||
leaf=True,
|
||||
shapes=True,
|
||||
noIntermediate=True,
|
||||
long=True),
|
||||
type=("mesh", "gpuCache"),
|
||||
long=True)
|
||||
if not meshes and not placeholder:
|
||||
raise RuntimeError("No meshes in layout. "
|
||||
"Set placeholder to True on instance to allow "
|
||||
"extraction without meshes")
|
||||
|
||||
# Ensure at least one MESH shape is visible
|
||||
extract_shapes = cmds.ls(extract_nodes,
|
||||
shapes=True,
|
||||
long=True)
|
||||
|
||||
if not placeholder:
|
||||
# We validate that at least one shape is visible to avoid erroneous
|
||||
# extractions of invisible-only content.
|
||||
for shape in extract_shapes:
|
||||
if dag.is_visible(shape,
|
||||
displayLayer=False,
|
||||
intermediateObject=True,
|
||||
visibility=True,
|
||||
parentHidden=True):
|
||||
break
|
||||
else:
|
||||
raise RuntimeError("No extract shape is visible. "
|
||||
"Layout requires at least one "
|
||||
"shape to be visible.")
|
||||
|
|
@ -1,43 +0,0 @@
|
|||
import pyblish.api
|
||||
import colorbleed.api
|
||||
import colorbleed.maya.lib as lib
|
||||
|
||||
|
||||
class ValidateLayoutNodeIds(pyblish.api.InstancePlugin):
|
||||
"""Validate nodes have colorbleed id attributes
|
||||
|
||||
All non-referenced transform nodes in the hierarchy should have unique IDs
|
||||
|
||||
"""
|
||||
|
||||
order = colorbleed.api.ValidatePipelineOrder
|
||||
families = ['colorbleed.layout']
|
||||
hosts = ['maya']
|
||||
label = 'Layout Transform Ids'
|
||||
actions = [colorbleed.api.SelectInvalidAction,
|
||||
colorbleed.api.GenerateUUIDsOnInvalidAction]
|
||||
|
||||
@staticmethod
|
||||
def get_invalid(instance):
|
||||
|
||||
from maya import cmds
|
||||
|
||||
nodes = cmds.ls(instance, type='transform', long=True)
|
||||
referenced = cmds.ls(nodes, referencedNodes=True, long=True)
|
||||
non_referenced = set(nodes) - set(referenced)
|
||||
|
||||
invalid = []
|
||||
for node in non_referenced:
|
||||
if not lib.get_id(node):
|
||||
invalid.append(node)
|
||||
|
||||
return invalid
|
||||
|
||||
def process(self, instance):
|
||||
"""Process all meshes"""
|
||||
|
||||
invalid = self.get_invalid(instance)
|
||||
|
||||
if invalid:
|
||||
raise RuntimeError("Transforms (non-referenced) found in layout "
|
||||
"without asset IDs: {0}".format(invalid))
|
||||
|
|
@ -1,71 +0,0 @@
|
|||
import maya.cmds as cmds
|
||||
|
||||
import pyblish.api
|
||||
import colorbleed.api
|
||||
|
||||
from cb.utils.maya.core import getHighestInHierarchy, iterParents
|
||||
|
||||
_IDENTITY = [1.0, 0.0, 0.0, 0.0,
|
||||
0.0, 1.0, 0.0, 0.0,
|
||||
0.0, 0.0, 1.0, 0.0,
|
||||
0.0, 0.0, 0.0, 1.0]
|
||||
|
||||
_ATTRS = ['tx', 'ty', 'tz',
|
||||
'rx', 'ry', 'rz',
|
||||
'sx', 'sy', 'sz',
|
||||
'shearXY', 'shearXZ', 'shearYZ']
|
||||
|
||||
|
||||
def is_identity(node, tolerance=1e-30):
|
||||
mat = cmds.xform(node, query=True, matrix=True, objectSpace=True)
|
||||
if not all(abs(x - y) < tolerance for x, y in zip(_IDENTITY, mat)):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def is_animated(node):
|
||||
return any(cmds.listConnections("{}.{}".format(node, attr), source=True,
|
||||
destination=False) for attr in _ATTRS)
|
||||
|
||||
|
||||
class ValidateLayoutParentNoTransforms(pyblish.api.InstancePlugin):
|
||||
"""Validate layout parents have no transformations.
|
||||
|
||||
The parent nodes above the extracted layout contents MUST have zero
|
||||
transformation (no offsets in translate, rotate, scale) for this pass
|
||||
validly.
|
||||
|
||||
This is required to ensure no offsets are lacking from extracted caches.
|
||||
|
||||
"""
|
||||
|
||||
order = colorbleed.api.ValidatePipelineOrder
|
||||
families = ['colorbleed.layout']
|
||||
hosts = ['maya']
|
||||
label = 'Layout No Parent Transforms'
|
||||
actions = [colorbleed.api.SelectInvalidAction]
|
||||
|
||||
@staticmethod
|
||||
def get_invalid(instance):
|
||||
|
||||
invalid = []
|
||||
|
||||
# Get highest in hierarchy
|
||||
nodes = instance.data["setMembers"]
|
||||
highest = getHighestInHierarchy(nodes)
|
||||
|
||||
for node in highest:
|
||||
for parent in iterParents(node):
|
||||
if not is_identity(parent) or is_animated(parent):
|
||||
invalid.append(parent)
|
||||
|
||||
return invalid
|
||||
|
||||
def process(self, instance):
|
||||
"""Process all meshes"""
|
||||
|
||||
invalid = self.get_invalid(instance)
|
||||
|
||||
if invalid:
|
||||
raise RuntimeError("Transforms (non-referenced) found in layout "
|
||||
"without asset IDs: {0}".format(invalid))
|
||||
|
|
@ -1,149 +0,0 @@
|
|||
from maya import cmds
|
||||
|
||||
import pyblish.api
|
||||
import colorbleed.api
|
||||
import colorbleed.maya.lib as lib
|
||||
|
||||
|
||||
def get_id_from_history(node):
|
||||
"""Return the ID from the first node in the history of the same type
|
||||
|
||||
If the node itself has an ID that will be returned. If no ID found None is
|
||||
returned.
|
||||
|
||||
Returns:
|
||||
str: The id on first node in history
|
||||
|
||||
"""
|
||||
|
||||
nodeType = cmds.nodeType(node)
|
||||
history = cmds.listHistory(node, leaf=False) or []
|
||||
similar = cmds.ls(history, exactType=nodeType, long=True)
|
||||
|
||||
for node in similar:
|
||||
id = lib.get_id(node)
|
||||
if id:
|
||||
return id
|
||||
|
||||
|
||||
class CopyUUIDsFromHistoryAction(pyblish.api.Action):
|
||||
"""Copy UUIDs from the history of a node.
|
||||
|
||||
This allows a deformed Shape to take its UUID from the original shape.
|
||||
|
||||
"""
|
||||
|
||||
label = "Copy UUIDs from History"
|
||||
on = "failed" # This action is only available on a failed plug-in
|
||||
icon = "wrench" # Icon from Awesome Icon
|
||||
|
||||
def process(self, context, plugin):
|
||||
|
||||
self.log.info("Finding bad nodes..")
|
||||
|
||||
# Get the errored instances
|
||||
errored_instances = []
|
||||
for result in context.data["results"]:
|
||||
if result["error"] is not None and result["instance"] is not None:
|
||||
if result["error"]:
|
||||
instance = result["instance"]
|
||||
errored_instances.append(instance)
|
||||
|
||||
# Apply pyblish.logic to get the instances for the plug-in
|
||||
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
|
||||
|
||||
# Get the nodes from the all instances that ran through this plug-in
|
||||
invalid = []
|
||||
for instance in instances:
|
||||
invalid_nodes = plugin.get_invalid(instance)
|
||||
invalid.extend(invalid_nodes)
|
||||
|
||||
# Ensure unique
|
||||
invalid = list(set(invalid))
|
||||
|
||||
if not invalid:
|
||||
self.log.info("No invalid nodes found.")
|
||||
return
|
||||
|
||||
# Generate a mapping of UUIDs using history
|
||||
mapping = dict()
|
||||
for shape in invalid:
|
||||
id = get_id_from_history(shape)
|
||||
if not id:
|
||||
self.log.info("No ID found in history of: {0}".format(shape))
|
||||
continue
|
||||
mapping[shape] = id
|
||||
|
||||
# Add the ids to the nodes
|
||||
# id_utils.add_ids(mapping)
|
||||
self.log.info("Generated ids on nodes: {0}".format(mapping.values()))
|
||||
|
||||
|
||||
class ValidateLayoutShapeNodeIds(pyblish.api.InstancePlugin):
|
||||
"""Validate shapes nodes have colorbleed id attributes
|
||||
|
||||
All non-referenced transforms in the hierarchy should have unique IDs.
|
||||
This does not check for unique shape ids to allow a same non-referenced
|
||||
shape in the output (e.g. when multiple of the same characters are in
|
||||
the scene with a deformer on it).
|
||||
|
||||
How?
|
||||
|
||||
This usually happens when a node was created locally and did not come
|
||||
from a correctly published asset.
|
||||
|
||||
In the case you're entirely sure you still want to publish the shapes
|
||||
you can forcefully generate ids for them. USE WITH CARE! Select the
|
||||
nodes (shapes!) and run:
|
||||
> scripts > pyblish > utilities > regenerate_uuids
|
||||
|
||||
Why?
|
||||
|
||||
The pipeline needs the ids to be able to identify "what" an object is.
|
||||
When it knows that it's able to correctly assign its shaders or do all
|
||||
kinds of other magic with it!
|
||||
|
||||
"""
|
||||
|
||||
order = colorbleed.api.ValidatePipelineOrder
|
||||
families = ['colorbleed.layout']
|
||||
hosts = ['maya']
|
||||
label = 'Layout Shape Ids'
|
||||
actions = [colorbleed.api.SelectInvalidAction,
|
||||
CopyUUIDsFromHistoryAction]
|
||||
|
||||
@staticmethod
|
||||
def get_invalid(instance):
|
||||
|
||||
nodes = cmds.ls(instance, shapes=True, long=True)
|
||||
referenced = cmds.ls(nodes, referencedNodes=True, long=True)
|
||||
non_referenced = set(nodes) - set(referenced)
|
||||
|
||||
# Ignore specific node types
|
||||
# `deformFunc` = deformer shapes
|
||||
IGNORED = ("gpuCache",
|
||||
"constraint",
|
||||
"lattice",
|
||||
"baseLattice",
|
||||
"geometryFilter",
|
||||
"deformFunc",
|
||||
"locator")
|
||||
|
||||
ignored_nodes = cmds.ls(list(non_referenced), type=IGNORED, long=True)
|
||||
if ignored_nodes:
|
||||
non_referenced -= set(ignored_nodes)
|
||||
|
||||
invalid = []
|
||||
for node in non_referenced:
|
||||
if not lib.get_id(node):
|
||||
invalid.append(node)
|
||||
|
||||
return invalid
|
||||
|
||||
def process(self, instance):
|
||||
"""Process all meshes"""
|
||||
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError("Shapes (non-referenced) found in layout "
|
||||
"without asset IDs: {0}".format(invalid))
|
||||
|
|
@ -1,63 +0,0 @@
|
|||
import pyblish.api
|
||||
import colorbleed.api
|
||||
import colorbleed.maya.lib as lib
|
||||
|
||||
|
||||
class ValidateLayoutUniqueNodeIds(pyblish.api.InstancePlugin):
|
||||
"""Validate nodes have unique colorbleed id attributes"""
|
||||
|
||||
order = colorbleed.api.ValidatePipelineOrder
|
||||
families = ['colorbleed.layout']
|
||||
hosts = ['maya']
|
||||
label = 'Layout Transform Unique Ids'
|
||||
actions = [colorbleed.api.SelectInvalidAction,
|
||||
colorbleed.api.GenerateUUIDsOnInvalidAction]
|
||||
|
||||
@staticmethod
|
||||
def get_invalid_dict(instance):
|
||||
"""Return a dictionary mapping of id key to list of member nodes"""
|
||||
from maya import cmds
|
||||
|
||||
nodes = cmds.ls(instance, type='transform', long=True)
|
||||
referenced = cmds.ls(nodes, referencedNodes=True, long=True)
|
||||
non_referenced = set(nodes) - set(referenced)
|
||||
members = non_referenced
|
||||
|
||||
# Collect each id with their members
|
||||
from collections import defaultdict
|
||||
ids = defaultdict(list)
|
||||
for member in members:
|
||||
id = lib.get_id(member)
|
||||
ids[id].append(member)
|
||||
|
||||
# Skip those without IDs (if everything should have an ID that should
|
||||
# be another validation)
|
||||
ids.pop(None, None)
|
||||
|
||||
# Take only the ids with more than one member
|
||||
invalid = dict((id, members) for id, members in ids.iteritems() if
|
||||
len(members) > 1)
|
||||
return invalid
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
"""Return the member nodes that are invalid"""
|
||||
|
||||
invalid_dict = cls.get_invalid_dict(instance)
|
||||
|
||||
# Take only the ids with more than one member
|
||||
invalid = list()
|
||||
for members in invalid_dict.itervalues():
|
||||
invalid.extend(members)
|
||||
|
||||
return invalid
|
||||
|
||||
def process(self, instance):
|
||||
"""Process all meshes"""
|
||||
|
||||
# Ensure all nodes have a cbId
|
||||
invalid = self.get_invalid_dict(instance)
|
||||
|
||||
if invalid:
|
||||
raise RuntimeError("Transforms found with non-unique "
|
||||
"asset IDs: {0}".format(invalid))
|
||||
|
|
@ -1,39 +0,0 @@
|
|||
import pyblish.api
|
||||
import colorbleed.api
|
||||
|
||||
|
||||
class ValidateLookMembers(pyblish.api.InstancePlugin):
|
||||
"""Validate look members have colorbleed id attributes
|
||||
|
||||
Looks up all relationship members and check if all the members have the
|
||||
cbId (colorbleed id) and return all the nodes who fail the test.
|
||||
|
||||
"""
|
||||
|
||||
order = colorbleed.api.ValidatePipelineOrder
|
||||
families = ['colorbleed.look']
|
||||
hosts = ['maya']
|
||||
label = 'Look Members (ID)'
|
||||
actions = [colorbleed.api.SelectInvalidAction,
|
||||
colorbleed.api.GenerateUUIDsOnInvalidAction]
|
||||
|
||||
def process(self, instance):
|
||||
"""Process all meshes"""
|
||||
|
||||
invalid_ids = self.get_invalid(instance)
|
||||
if invalid_ids:
|
||||
raise RuntimeError("Found invalid nodes.\nNo ID : "
|
||||
"{}".format(invalid_ids))
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
relationships = instance.data["lookData"]["relationships"]
|
||||
members = []
|
||||
for relationship in relationships.values():
|
||||
members.extend(relationship["members"])
|
||||
|
||||
# get the name of the node when there is no UUID
|
||||
invalid = [m["name"] for m in members if not m["uuid"]]
|
||||
|
||||
return invalid
|
||||
|
|
@ -1,62 +0,0 @@
|
|||
from maya import cmds
|
||||
|
||||
import pyblish.api
|
||||
import colorbleed.api
|
||||
|
||||
|
||||
class ValidateRigNodeIds(pyblish.api.InstancePlugin):
|
||||
"""Validate nodes in instance have colorbleed id attributes
|
||||
|
||||
To fix this use the action to select the invalid nodes. Identify these
|
||||
are nodes created locally to the rig; if they are not they should've gotten
|
||||
their ID elsewhere! This is important, because then you should NOT fix it
|
||||
in your scene but earlier in the pipeline. If these invalid nodes are local
|
||||
to your rig then you should generate ids for them.
|
||||
|
||||
For Dummies:
|
||||
For the pipeline it's important in further stages to identify exactly
|
||||
"what nodes is what node". Basically it saying: Hey! It's me! To
|
||||
accompany that each node stores an ID, like its own passport. This
|
||||
validator will tell you if there are nodes that have no such
|
||||
passport (ID).
|
||||
|
||||
Warning:
|
||||
This does NOT validate the IDs are unique in the instance.
|
||||
|
||||
"""
|
||||
|
||||
order = colorbleed.api.ValidatePipelineOrder
|
||||
families = ['colorbleed.rig',
|
||||
'colorbleed.rigcontrols',
|
||||
"colorbleed.rigpointcache"]
|
||||
hosts = ['maya']
|
||||
label = 'Rig Id Attributes'
|
||||
actions = [colorbleed.api.SelectInvalidAction]
|
||||
|
||||
# includes: yeti grooms and v-ray fur, etc.
|
||||
TYPES = ("transform", "mesh", "nurbsCurve", "geometryShape")
|
||||
|
||||
@staticmethod
|
||||
def get_invalid(instance):
|
||||
|
||||
# filter to nodes of specific types
|
||||
dag = cmds.ls(instance, noIntermediate=True,
|
||||
long=True, type=ValidateRigNodeIds.TYPES)
|
||||
|
||||
# Ensure all nodes have a cbId
|
||||
invalid = list()
|
||||
for node in dag:
|
||||
# todo: refactor `mbId` when attribute is updated
|
||||
uuid = cmds.attributeQuery("mbId", node=node, exists=True)
|
||||
if not uuid:
|
||||
invalid.append(node)
|
||||
|
||||
return invalid
|
||||
|
||||
def process(self, instance):
|
||||
"""Process all meshes"""
|
||||
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError("Nodes found without "
|
||||
"asset IDs: {0}".format(invalid))
|
||||
|
|
@ -1,85 +0,0 @@
|
|||
from collections import defaultdict
|
||||
|
||||
import pyblish.api
|
||||
import colorbleed.api
|
||||
|
||||
|
||||
class ValidateRigPointcacheNodeIds(pyblish.api.InstancePlugin):
|
||||
"""Validate rig out_SET nodes have ids
|
||||
|
||||
The nodes in a rig's out_SET must all have node IDs
|
||||
that are all unique.
|
||||
|
||||
Geometry in a rig should be using published model's geometry.
|
||||
As such when this validation doesn't pass it means you're using
|
||||
local newly created nodes that are not coming from a published
|
||||
model file. Ensure you update the ids from the model.
|
||||
|
||||
"""
|
||||
|
||||
order = colorbleed.api.ValidateContentsOrder
|
||||
families = ['colorbleed.rig', "colorbleed.rigpointcache"]
|
||||
hosts = ['maya']
|
||||
label = 'Rig Pointcache Node Ids'
|
||||
actions = [colorbleed.api.SelectInvalidAction]
|
||||
|
||||
ignore_types = ("constraint",)
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
from maya import cmds
|
||||
|
||||
# Get out_SET
|
||||
sets = cmds.ls(instance, type='objectSet')
|
||||
pointcache_sets = [x for x in sets if x == 'out_SET']
|
||||
|
||||
nodes = list()
|
||||
for s in pointcache_sets:
|
||||
members = cmds.sets(s, query=True)
|
||||
members = cmds.ls(members, long=True) # ensure long names
|
||||
descendents = cmds.listRelatives(members,
|
||||
allDescendents=True,
|
||||
fullPath=True) or []
|
||||
descendents = cmds.ls(descendents, noIntermediate=True, long=True)
|
||||
hierarchy = members + descendents
|
||||
nodes.extend(hierarchy)
|
||||
|
||||
# ignore certain node types (e.g. constraints)
|
||||
ignore = cmds.ls(nodes, type=cls.ignore_types, long=True)
|
||||
if ignore:
|
||||
ignore = set(ignore)
|
||||
nodes = [node for node in nodes if node not in ignore]
|
||||
|
||||
# Missing ids
|
||||
missing = list()
|
||||
ids = defaultdict(list)
|
||||
for node in nodes:
|
||||
has_id = cmds.attributeQuery("mbId", node=node, exists=True)
|
||||
if not has_id:
|
||||
missing.append(node)
|
||||
continue
|
||||
|
||||
uuid = cmds.getAttr("{}.mbId".format(node))
|
||||
ids[uuid].append(node)
|
||||
|
||||
non_uniques = list()
|
||||
for uuid, nodes in ids.iteritems():
|
||||
if len(nodes) > 1:
|
||||
non_uniques.extend(nodes)
|
||||
|
||||
if missing:
|
||||
cls.log.warning("Missing node ids: {0}".format(missing))
|
||||
|
||||
if non_uniques:
|
||||
cls.log.warning("Non unique node ids: {0}".format(non_uniques))
|
||||
|
||||
invalid = missing + non_uniques
|
||||
return invalid
|
||||
|
||||
def process(self, instance):
|
||||
"""Process all meshes"""
|
||||
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError("Missing or non-unique node IDs: "
|
||||
"{0}".format(invalid))
|
||||
|
|
@ -1,86 +0,0 @@
|
|||
from maya import cmds
|
||||
|
||||
import pyblish.api
|
||||
import colorbleed.api
|
||||
|
||||
|
||||
class ValidateRigPointcacheShapeRenderStats(pyblish.api.Validator):
|
||||
"""Ensure all render stats are set to the default values."""
|
||||
|
||||
order = colorbleed.api.ValidateMeshOrder
|
||||
families = ['colorbleed.model']
|
||||
hosts = ['maya']
|
||||
category = 'model'
|
||||
optional = False
|
||||
version = (0, 1, 0)
|
||||
label = 'Rig Pointcache Shape Default Render Stats'
|
||||
actions = [colorbleed.api.SelectInvalidAction]
|
||||
|
||||
defaults = {'castsShadows': 1,
|
||||
'receiveShadows': 1,
|
||||
'motionBlur': 1,
|
||||
'primaryVisibility': 1,
|
||||
'smoothShading': 1,
|
||||
'visibleInReflections': 1,
|
||||
'visibleInRefractions': 1,
|
||||
'doubleSided': 1,
|
||||
'opposite': 0}
|
||||
|
||||
ignore_types = ("constraint",)
|
||||
|
||||
@classmethod
|
||||
def get_pointcache_nodes(cls, instance):
|
||||
|
||||
# Get out_SET
|
||||
sets = cmds.ls(instance, type='objectSet')
|
||||
pointcache_sets = [x for x in sets if x == 'out_SET']
|
||||
|
||||
nodes = list()
|
||||
for s in pointcache_sets:
|
||||
members = cmds.sets(s, q=1)
|
||||
members = cmds.ls(members, long=True) # ensure long names
|
||||
descendents = cmds.listRelatives(members,
|
||||
allDescendents=True,
|
||||
fullPath=True) or []
|
||||
descendents = cmds.ls(descendents,
|
||||
noIntermediate=True,
|
||||
long=True)
|
||||
hierarchy = members + descendents
|
||||
nodes.extend(hierarchy)
|
||||
|
||||
# ignore certain node types (e.g. constraints)
|
||||
ignore = cmds.ls(nodes, type=cls.ignore_types, long=True)
|
||||
if ignore:
|
||||
ignore = set(ignore)
|
||||
nodes = [node for node in nodes if node not in ignore]
|
||||
|
||||
return nodes
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
# It seems the "surfaceShape" and those derived from it have
|
||||
# `renderStat` attributes.
|
||||
|
||||
nodes = cls.get_pointcache_nodes(instance)
|
||||
|
||||
shapes = cmds.ls(nodes, long=True, type='surfaceShape')
|
||||
invalid = []
|
||||
for shape in shapes:
|
||||
for attr, requiredValue in \
|
||||
ValidateRigPointcacheShapeRenderStats.defaults.iteritems():
|
||||
|
||||
if cmds.attributeQuery(attr, node=shape, exists=True):
|
||||
value = cmds.getAttr('{node}.{attr}'.format(node=shape,
|
||||
attr=attr))
|
||||
if value != requiredValue:
|
||||
invalid.append(shape)
|
||||
|
||||
return invalid
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
invalid = self.get_invalid(instance)
|
||||
|
||||
if invalid:
|
||||
raise ValueError("Shapes with non-standard renderStats "
|
||||
"found: {0}".format(invalid))
|
||||
|
|
@ -1,56 +0,0 @@
|
|||
import maya.cmds as cmds
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectInstancerCaches(pyblish.api.InstancePlugin):
|
||||
"""For an Instancer collect the history.
|
||||
|
||||
This would collect its particles with nucleus and cacheFile
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.495
|
||||
families = ['colorbleed.instancer']
|
||||
label = "Instancer Cache Files"
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
members = instance.data.get("exactExportMembers", None)
|
||||
assert members, "Instancer must have members"
|
||||
|
||||
resources = instance.data.get("resources", [])
|
||||
caches = cmds.ls(members, type="cacheFile")
|
||||
|
||||
errors = False
|
||||
for cache in caches:
|
||||
|
||||
self.log.debug("Collecting cache files for: {0}".format(cache))
|
||||
|
||||
files = cmds.cacheFile(cache, query=True, fileName=True)
|
||||
|
||||
# Ensure there are any files and the cacheFile is linked
|
||||
# correctly.
|
||||
if not files:
|
||||
errors = True
|
||||
self.log.error("Cache has no files: %s" % cache)
|
||||
continue
|
||||
|
||||
source = files[0] # The first file is the .xml file
|
||||
|
||||
# TODO: Filter the files to only contain the required frame range.
|
||||
|
||||
resource = {"tags": ["maya", "node", "cacheFile"],
|
||||
"node": cache,
|
||||
"source": source,
|
||||
"files": files,
|
||||
"subfolder": "caches"}
|
||||
|
||||
resources.append(resource)
|
||||
|
||||
# Store on the instance
|
||||
instance.data['resources'] = resources
|
||||
|
||||
if errors:
|
||||
raise RuntimeError("Errors during collecting caches. "
|
||||
"Are the caches linked correctly?")
|
||||
|
|
@ -1,63 +0,0 @@
|
|||
import pyblish.api
|
||||
import maya.cmds as cmds
|
||||
|
||||
|
||||
class CollectInstancerHistory(pyblish.api.InstancePlugin):
|
||||
"""For an Instancer collect the history.
|
||||
|
||||
This would collect its particles with nucleus and cacheFile
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.49
|
||||
families = ['colorbleed.instancer']
|
||||
label = "Instancer History"
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
members = instance.data["setMembers"]
|
||||
|
||||
# Include history of the instancer
|
||||
instancers = cmds.ls(members, type="instancer")
|
||||
if not instancers:
|
||||
self.log.info("No instancers found")
|
||||
return
|
||||
|
||||
export = instancers[:]
|
||||
|
||||
# Get the required inputs of the particles from history
|
||||
history = cmds.listHistory(instancers) or []
|
||||
particles = cmds.ls(history, type="nParticle")
|
||||
export.extend(particles)
|
||||
if particles:
|
||||
self.log.info("Particles: {0}".format(particles))
|
||||
|
||||
particles_history = cmds.listHistory(particles) or []
|
||||
self.log.debug("Particle history: {0}".format(particles_history))
|
||||
|
||||
nucleus = cmds.ls(particles_history, long=True, type="nucleus")
|
||||
self.log.info("Collected nucleus: {0}".format(nucleus))
|
||||
export.extend(nucleus)
|
||||
|
||||
caches = cmds.ls(particles_history, long=True, type="cacheFile")
|
||||
self.log.info("Collected caches: {0}".format(caches))
|
||||
export.extend(caches)
|
||||
|
||||
# Collect input shapes for the instancer
|
||||
for instancer in cmds.ls(instancers, exactType="instancer", long=True):
|
||||
attr = "{}.inputHierarchy".format(instancer)
|
||||
inputs = cmds.listConnections(attr, source=True,
|
||||
destination=False) or []
|
||||
export.extend(inputs)
|
||||
|
||||
# Add it to the instance
|
||||
data = instance[:]
|
||||
data.extend(export)
|
||||
# Ensure unique objects only
|
||||
data = list(set(data))
|
||||
self.log.info("Setting members to {0}".format(data))
|
||||
instance[:] = data
|
||||
|
||||
# Store the recommended export selection so the export can do it
|
||||
# accordingly
|
||||
instance.data["exactExportMembers"] = export
|
||||
|
|
@ -1,47 +0,0 @@
|
|||
import maya.cmds as cmds
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectParticlesHistory(pyblish.api.InstancePlugin):
|
||||
"""For a Particle system collect the history.
|
||||
|
||||
This would collect its nucleus and cache files.
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.499
|
||||
families = ['colorbleed.particles']
|
||||
label = "Particles History"
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
# Include history of the instancer
|
||||
particles = cmds.ls(instance, dag=True, shapes=True,
|
||||
leaf=True, long=True)
|
||||
particles = cmds.ls(particles, type="nParticle", long=True)
|
||||
if not particles:
|
||||
self.log.info("No particles found")
|
||||
return
|
||||
|
||||
export = particles
|
||||
|
||||
# Get the required inputs of the particles from its history
|
||||
particles_history = cmds.listHistory(particles) or []
|
||||
if particles_history:
|
||||
nucleus = cmds.ls(particles_history, type="nucleus")
|
||||
export.extend(nucleus)
|
||||
caches = cmds.ls(particles_history, type="cacheFile")
|
||||
export.extend(caches)
|
||||
|
||||
# Add it to the instance
|
||||
data = instance[:]
|
||||
data.extend(export)
|
||||
# Ensure unique objects only
|
||||
data = list(set(data))
|
||||
self.log.info("Setting members to {0}".format(data))
|
||||
instance[:] = data
|
||||
|
||||
# Store the recommended export selection so the export can do it
|
||||
# accordingly
|
||||
instance.data["exactExportMembers"] = export
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
from maya import cmds
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectSetdress(pyblish.api.InstancePlugin):
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.499
|
||||
label = 'Collect Model Data'
|
||||
families = ["colorbleed.setdress"]
|
||||
|
||||
def process(self, instance):
|
||||
pass
|
||||
|
|
@ -1,284 +0,0 @@
|
|||
from maya import cmds
|
||||
|
||||
import pyblish.api
|
||||
import cb.utils.maya.shaders as shaders
|
||||
|
||||
TAGS = ["maya", "attribute", "look"]
|
||||
TAGS_LOOKUP = set(TAGS)
|
||||
|
||||
|
||||
class SelectTextureNodesAction(pyblish.api.Action):
|
||||
"""Select the nodes related to the collected file textures"""
|
||||
|
||||
label = "Select texture nodes"
|
||||
on = "succeeded" # This action is only available on a failed plug-in
|
||||
icon = "search" # Icon from Awesome Icon
|
||||
|
||||
def process(self, context, plugin):
|
||||
|
||||
self.log.info("Finding textures..")
|
||||
|
||||
# Get the errored instances
|
||||
instances = []
|
||||
for result in context.data["results"]:
|
||||
instance = result["instance"]
|
||||
if instance is None:
|
||||
continue
|
||||
|
||||
instances.append(instance)
|
||||
|
||||
# Apply pyblish.logic to get the instances for the plug-in
|
||||
instances = pyblish.api.instances_by_plugin(instances, plugin)
|
||||
|
||||
# Get the texture nodes from the instances
|
||||
nodes = []
|
||||
for instance in instances:
|
||||
for resource in instance.data.get("resources", []):
|
||||
if self.is_texture_resource(resource):
|
||||
node = resource['node']
|
||||
nodes.append(node)
|
||||
|
||||
# Ensure unique
|
||||
nodes = list(set(nodes))
|
||||
|
||||
if nodes:
|
||||
self.log.info("Selecting texture nodes: %s" % ", ".join(nodes))
|
||||
cmds.select(nodes, r=True, noExpand=True)
|
||||
else:
|
||||
self.log.info("No texture nodes found.")
|
||||
cmds.select(deselect=True)
|
||||
|
||||
def is_texture_resource(self, resource):
|
||||
"""Return whether the resource is a texture"""
|
||||
|
||||
tags = resource.get("tags", [])
|
||||
if not TAGS_LOOKUP.issubset(tags):
|
||||
return False
|
||||
|
||||
if resource.get("subfolder", None) != "textures":
|
||||
return False
|
||||
|
||||
if "node" not in resource:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class CollectLookTextures(pyblish.api.InstancePlugin):
|
||||
"""Collect look textures
|
||||
|
||||
Includes the link from source to destination.
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.35
|
||||
label = 'Collect Look Textures'
|
||||
families = ["colorbleed.texture"]
|
||||
actions = [SelectTextureNodesAction]
|
||||
|
||||
IGNORE = ["out_SET", "controls_SET", "_INST"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
verbose = instance.data.get("verbose", False)
|
||||
|
||||
# Get all texture nodes from the shader networks
|
||||
sets = self.gather_sets(instance)
|
||||
instance_members = {str(i) for i in cmds.ls(instance, long=True,
|
||||
absoluteName=True)}
|
||||
|
||||
self.log.info("Gathering set relations..")
|
||||
for objset in sets:
|
||||
self.log.debug("From %s.." % objset)
|
||||
content = cmds.sets(objset, query=True)
|
||||
objset_members = sets[objset]["members"]
|
||||
for member in cmds.ls(content, long=True, absoluteName=True):
|
||||
member_data = self.collect_member_data(member,
|
||||
objset_members,
|
||||
instance_members,
|
||||
verbose)
|
||||
if not member_data:
|
||||
continue
|
||||
|
||||
# Get the file nodes
|
||||
history = cmds.listHistory(sets.keys()) or []
|
||||
files = cmds.ls(history, type="file")
|
||||
files = list(set(files))
|
||||
|
||||
resources = instance.data.get("resources", [])
|
||||
for node in files:
|
||||
resource = self.collect_resources(node, verbose)
|
||||
if not resource:
|
||||
continue
|
||||
resources.append(resource)
|
||||
|
||||
instance.data['resources'] = resources
|
||||
|
||||
def gather_sets(self, instance):
|
||||
"""Gather all objectSets which are of importance for publishing
|
||||
|
||||
It checks if all nodes in the instance are related to any objectSet
|
||||
which need to be
|
||||
|
||||
Args:
|
||||
instance (list): all nodes to be published
|
||||
|
||||
Returns:
|
||||
dict
|
||||
"""
|
||||
|
||||
# Get view sets (so we can ignore those sets later)
|
||||
sets = dict()
|
||||
view_sets = set()
|
||||
for panel in cmds.getPanel(type="modelPanel"):
|
||||
view_set = cmds.modelEditor(panel, query=True,
|
||||
viewObjects=True)
|
||||
if view_set:
|
||||
view_sets.add(view_set)
|
||||
|
||||
for node in instance:
|
||||
related_sets = self.get_related_sets(node, view_sets)
|
||||
if not related_sets:
|
||||
continue
|
||||
|
||||
for objset in related_sets:
|
||||
if objset in sets:
|
||||
continue
|
||||
unique_id = cmds.getAttr("%s.cbId" % objset)
|
||||
sets[objset] = {"name": objset,
|
||||
"uuid": unique_id,
|
||||
"members": list()}
|
||||
return sets
|
||||
|
||||
def collect_resources(self, node, verbose=False):
|
||||
"""Collect the link to the file(s) used (resource)
|
||||
Args:
|
||||
node (str): name of the node
|
||||
verbose (bool): enable debug information
|
||||
|
||||
Returns:
|
||||
dict
|
||||
"""
|
||||
|
||||
# assure node includes full path
|
||||
node = cmds.ls(node, long=True)[0]
|
||||
attribute = "{}.fileTextureName".format(node)
|
||||
source = cmds.getAttr(attribute)
|
||||
|
||||
# Get the computed file path (e.g. the one with the <UDIM> pattern
|
||||
# in it) So we can reassign it this computed file path whenever
|
||||
# we need to.
|
||||
|
||||
computed_attribute = "{}.computedFileTextureNamePattern".format(node)
|
||||
computed_source = cmds.getAttr(computed_attribute)
|
||||
if source != computed_source:
|
||||
if verbose:
|
||||
self.log.debug("File node computed pattern differs from "
|
||||
"original pattern: {0} "
|
||||
"({1} -> {2})".format(node,
|
||||
source,
|
||||
computed_source))
|
||||
|
||||
# We replace backslashes with forward slashes because V-Ray
|
||||
# can't handle the UDIM files with the backslashes in the
|
||||
# paths as the computed patterns
|
||||
source = computed_source.replace("\\", "/")
|
||||
|
||||
files = shaders.get_file_node_files(node)
|
||||
if not files:
|
||||
self.log.error("File node does not have a texture set: "
|
||||
"{0}".format(node))
|
||||
return
|
||||
|
||||
# Define the resource
|
||||
# todo: find a way to generate the destination for the publisher
|
||||
resource = {"tags": TAGS[:],
|
||||
"node": node,
|
||||
"attribute": attribute,
|
||||
"source": source, # required for resources
|
||||
"files": files} # required for resources
|
||||
|
||||
return resource
|
||||
|
||||
def collect_member_data(self, member, objset_members, instance_members,
|
||||
verbose=False):
|
||||
"""Get all information of the node
|
||||
Args:
|
||||
member (str): the name of the node to check
|
||||
objset_members (list): the objectSet members
|
||||
instance_members (set): the collected instance members
|
||||
verbose (bool): get debug information
|
||||
|
||||
Returns:
|
||||
dict
|
||||
|
||||
"""
|
||||
|
||||
node, components = (member.rsplit(".", 1) + [None])[:2]
|
||||
|
||||
# Only include valid members of the instance
|
||||
if node not in instance_members:
|
||||
if verbose:
|
||||
self.log.info("Skipping member %s" % member)
|
||||
return
|
||||
|
||||
if member in [m["name"] for m in objset_members]:
|
||||
return
|
||||
|
||||
if verbose:
|
||||
self.log.debug("Such as %s.." % member)
|
||||
|
||||
member_data = {"name": node,
|
||||
"uuid": cmds.getAttr("{}.cbId".format(node, ))}
|
||||
|
||||
# Include components information when components are assigned
|
||||
if components:
|
||||
member_data["components"] = components
|
||||
|
||||
return member_data
|
||||
|
||||
def get_related_sets(self, node, view_sets):
|
||||
"""Get the sets which do not belong to any specific group
|
||||
|
||||
Filters out based on:
|
||||
- id attribute is NOT `pyblish.avalon.container`
|
||||
- shapes and deformer shapes (alembic creates meshShapeDeformed)
|
||||
- set name ends with any from a predefined list
|
||||
- set in not in viewport set (isolate selected for example)
|
||||
|
||||
Args:
|
||||
node (str): name of the current not to check
|
||||
"""
|
||||
|
||||
ignored = ["pyblish.avalon.instance", "pyblish.avalon.container"]
|
||||
|
||||
related_sets = cmds.listSets(object=node, extendToShape=False)
|
||||
if not related_sets:
|
||||
return []
|
||||
|
||||
# Ignore containers
|
||||
sets = [s for s in related_sets if
|
||||
not cmds.attributeQuery("id", node=s, exists=True) or
|
||||
not cmds.getAttr("%s.id" % s) in ignored]
|
||||
|
||||
# Exclude deformer sets
|
||||
# Autodesk documentation on listSets command:
|
||||
# type(uint) : Returns all sets in the scene of the given
|
||||
# >>> type:
|
||||
# >>> 1 - all rendering sets
|
||||
# >>> 2 - all deformer sets
|
||||
deformer_sets = cmds.listSets(object=node, extendToShape=False,
|
||||
type=2) or []
|
||||
deformer_sets = set(deformer_sets) # optimize lookup
|
||||
sets = [s for s in sets if s not in deformer_sets]
|
||||
|
||||
# Ignore specifically named sets
|
||||
sets = [s for s in sets if not any(s.endswith(x) for x in self.IGNORE)]
|
||||
|
||||
# Ignore viewport filter view sets (from isolate select and
|
||||
# viewports)
|
||||
sets = [s for s in sets if s not in view_sets]
|
||||
|
||||
self.log.info("Found sets %s for %s" % (related_sets, node))
|
||||
|
||||
return sets
|
||||
|
|
@ -1,157 +0,0 @@
|
|||
import os
|
||||
import contextlib
|
||||
|
||||
import maya.cmds as cmds
|
||||
|
||||
import avalon.maya
|
||||
import colorbleed.api
|
||||
|
||||
import cb.utils.maya.context as context
|
||||
|
||||
|
||||
def _set_cache_file_path(node, path):
|
||||
"""Forces a cacheFile.cachePath attribute to be set to path.
|
||||
|
||||
When the given path does not exist Maya will raise an error
|
||||
when using `maya.cmds.setAttr` to set the "cachePath" attribute.
|
||||
|
||||
Arguments:
|
||||
node (str): Name of cacheFile node.
|
||||
path (str): Path value to set.
|
||||
|
||||
"""
|
||||
|
||||
path = str(path)
|
||||
|
||||
# Temporary unique attribute name
|
||||
attr = "__tmp_path"
|
||||
while cmds.attributeQuery(attr, node=node, exists=True):
|
||||
attr += "_"
|
||||
|
||||
# Create the temporary attribute, set its value and connect
|
||||
# it to the `.cachePath` attribute to force the value to be
|
||||
# set and applied without errors.
|
||||
cmds.addAttr(node, longName=attr, dataType="string")
|
||||
plug = "{0}.{1}".format(node, attr)
|
||||
try:
|
||||
cmds.setAttr(plug, path, type="string")
|
||||
cmds.connectAttr(plug,
|
||||
"{0}.cachePath".format(node),
|
||||
force=True)
|
||||
finally:
|
||||
# Ensure the temporary attribute is deleted
|
||||
cmds.deleteAttr(plug)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def cache_file_paths(mapping):
|
||||
"""Set the cacheFile paths during context.
|
||||
|
||||
This is a workaround context manager that allows
|
||||
to set the .cachePath attribute to a folder that
|
||||
doesn't actually exist since using regular
|
||||
`maya.cmds.setAttr` results in an error.
|
||||
|
||||
Arguments:
|
||||
mapping (dict): node -> path mapping
|
||||
|
||||
"""
|
||||
|
||||
# Store the original values
|
||||
original = dict()
|
||||
for node in mapping:
|
||||
original[node] = cmds.getAttr("{}.cachePath".format(node))
|
||||
|
||||
try:
|
||||
for node, path in mapping.items():
|
||||
_set_cache_file_path(node, path)
|
||||
yield
|
||||
finally:
|
||||
for node, path in original.items():
|
||||
_set_cache_file_path(node, path)
|
||||
|
||||
|
||||
def is_cache_resource(resource):
|
||||
"""Return whether resource is a cacheFile resource"""
|
||||
start_tags = ["maya", "node", "cacheFile"]
|
||||
required = set(start_tags)
|
||||
tags = resource.get("tags", [])
|
||||
return required.issubset(tags)
|
||||
|
||||
|
||||
class ExtractInstancerMayaAscii(colorbleed.api.Extractor):
|
||||
"""Extract as Maya Ascii"""
|
||||
|
||||
label = "Instancer (Maya Ascii)"
|
||||
hosts = ["maya"]
|
||||
families = ["colorbleed.instancer"]
|
||||
|
||||
# TODO: Find other solution than expanding vars to fix lack of support
|
||||
# TODO: of cacheFile
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
export = instance.data("exactExportMembers")
|
||||
|
||||
# Set up cacheFile path remapping.
|
||||
resources = instance.data.get("resources", [])
|
||||
attr_remap, cache_remap = self.process_resources(resources)
|
||||
|
||||
# Define extract output file path
|
||||
dir_path = self.staging_dir(instance)
|
||||
filename = "{0}.ma".format(instance.name)
|
||||
path = os.path.join(dir_path, filename)
|
||||
|
||||
# Perform extraction
|
||||
self.log.info("Performing extraction..")
|
||||
with avalon.maya.maintained_selection():
|
||||
with cache_file_paths(cache_remap):
|
||||
with context.attribute_values(attr_remap):
|
||||
cmds.select(export, noExpand=True)
|
||||
cmds.file(path,
|
||||
force=True,
|
||||
typ="mayaAscii",
|
||||
exportSelected=True,
|
||||
preserveReferences=False,
|
||||
constructionHistory=False,
|
||||
channels=True, # allow animation
|
||||
constraints=False,
|
||||
shader=False,
|
||||
expressions=False)
|
||||
|
||||
if "files" not in instance.data:
|
||||
instance.data["files"] = list()
|
||||
|
||||
instance.data["files"].append(filename)
|
||||
|
||||
self.log.info("Extracted instance '{0}' to: {1}".format(
|
||||
instance.name, path))
|
||||
|
||||
def process_resources(self, resources):
|
||||
|
||||
attr_remap = dict()
|
||||
cache_remap = dict()
|
||||
for resource in resources:
|
||||
if not is_cache_resource(resource):
|
||||
continue
|
||||
|
||||
node = resource['node']
|
||||
destination = resource['destination']
|
||||
|
||||
folder = os.path.dirname(destination)
|
||||
fname = os.path.basename(destination)
|
||||
if fname.endswith(".xml"):
|
||||
fname = fname[:-4]
|
||||
|
||||
# Ensure the folder path ends with a slash
|
||||
if not folder.endswith("\\") and not folder.endswith("/"):
|
||||
folder += "/"
|
||||
|
||||
# Set path and name
|
||||
attr_remap["{0}.cacheName".format(node)] = os.path.expandvars(
|
||||
fname)
|
||||
cache_remap[node] = os.path.expandvars(folder)
|
||||
|
||||
self.log.info("Mapping {0} to {1}".format(node, destination))
|
||||
|
||||
return attr_remap, cache_remap
|
||||
|
|
@ -1,48 +0,0 @@
|
|||
import os
|
||||
|
||||
from maya import cmds
|
||||
|
||||
import avalon.maya
|
||||
import colorbleed.api
|
||||
|
||||
|
||||
class ExtractParticlesMayaAscii(colorbleed.api.Extractor):
|
||||
"""Extract as Maya Ascii"""
|
||||
|
||||
label = "Particles (Maya Ascii)"
|
||||
hosts = ["maya"]
|
||||
families = ["colorbleed.particles"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
# Define extract output file path
|
||||
dir_path = self.staging_dir(instance)
|
||||
filename = "{0}.ma".format(instance.name)
|
||||
path = os.path.join(dir_path, filename)
|
||||
|
||||
export = instance.data("exactExportMembers")
|
||||
|
||||
# TODO: Transfer cache files and relink temporarily on the particles
|
||||
|
||||
# Perform extraction
|
||||
self.log.info("Performing extraction..")
|
||||
with avalon.maya.maintained_selection():
|
||||
cmds.select(export, noExpand=True)
|
||||
cmds.file(path,
|
||||
force=True,
|
||||
typ="mayaAscii",
|
||||
exportSelected=True,
|
||||
preserveReferences=False,
|
||||
constructionHistory=False,
|
||||
channels=True, # allow animation
|
||||
constraints=False,
|
||||
shader=False,
|
||||
expressions=False)
|
||||
|
||||
if "files" not in instance.data:
|
||||
instance.data["files"] = list()
|
||||
|
||||
instance.data["files"].append(filename)
|
||||
|
||||
self.log.info("Extracted instance '{0}' to: {1}".format(
|
||||
instance.name, path))
|
||||
|
|
@ -1,41 +0,0 @@
|
|||
import json
|
||||
import os
|
||||
import shutil
|
||||
|
||||
import pyblish.api
|
||||
import colorbleed.api
|
||||
import colorbleed.maya.lib as lib
|
||||
|
||||
|
||||
class ExtractTextures(colorbleed.api.Extractor):
|
||||
|
||||
label = "Extract Textures"
|
||||
hosts = ["maya"]
|
||||
families = ["colorbleed.texture"]
|
||||
order = pyblish.api.ExtractorOrder + 0.1
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
self.log.info("Extracting textures ...")
|
||||
|
||||
dir_path = self.staging_dir(instance)
|
||||
resources = instance.data["resources"]
|
||||
for resource in resources:
|
||||
self.copy_files(dir_path, resource["files"])
|
||||
|
||||
self.log.info("Storing cross instance information ...")
|
||||
self.store_data(resources)
|
||||
|
||||
def store_data(self, data):
|
||||
tmp_dir = lib.maya_temp_folder()
|
||||
tmp_file = os.path.join(tmp_dir, "resources.json")
|
||||
with open(tmp_file, "w") as f:
|
||||
json.dump(data, fp=f,
|
||||
separators=[",", ":"],
|
||||
ensure_ascii=False)
|
||||
|
||||
def copy_files(self, dest, files):
|
||||
for f in files:
|
||||
fname = os.path.basename(f)
|
||||
dest_file = os.path.join(dest, fname)
|
||||
shutil.copy(f, dest_file)
|
||||
|
|
@ -1,65 +0,0 @@
|
|||
import os
|
||||
|
||||
from maya import cmds
|
||||
|
||||
import avalon.maya
|
||||
import colorbleed.api
|
||||
|
||||
import cb.utils.maya.context as context
|
||||
|
||||
|
||||
class ExtractFurYeti(colorbleed.api.Extractor):
|
||||
"""Extract as Yeti nodes"""
|
||||
|
||||
label = "Yeti Nodes"
|
||||
hosts = ["maya"]
|
||||
families = ["colorbleed.groom"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
# Define extract output file path
|
||||
dir_path = self.staging_dir(instance)
|
||||
filename = "{0}.ma".format(instance.name)
|
||||
path = os.path.join(dir_path, filename)
|
||||
|
||||
# Perform extraction
|
||||
self.log.info("Performing extraction..")
|
||||
|
||||
# Get only the shape contents we need in such a way that we avoid
|
||||
# taking along intermediateObjects
|
||||
members = instance.data("setMembers")
|
||||
members = cmds.ls(members,
|
||||
dag=True,
|
||||
shapes=True,
|
||||
type="pgYetiMaya",
|
||||
noIntermediate=True,
|
||||
long=True)
|
||||
|
||||
# Remap cache files names and ensure fileMode is set to load from cache
|
||||
resource_remap = dict()
|
||||
# required tags to be a yeti resource
|
||||
required_tags = ["maya", "yeti", "attribute"]
|
||||
resources = instance.data.get("resources", [])
|
||||
for resource in resources:
|
||||
resource_tags = resource.get("tags", [])
|
||||
if all(tag in resource_tags for tag in required_tags):
|
||||
attribute = resource['attribute']
|
||||
destination = resource['destination']
|
||||
resource_remap[attribute] = destination
|
||||
|
||||
# Perform extraction
|
||||
with avalon.maya.maintained_selection():
|
||||
with context.attribute_values(resource_remap):
|
||||
cmds.select(members, r=True, noExpand=True)
|
||||
cmds.file(path,
|
||||
force=True,
|
||||
typ="mayaAscii",
|
||||
exportSelected=True,
|
||||
preserveReferences=False,
|
||||
constructionHistory=False,
|
||||
shader=False)
|
||||
|
||||
instance.data["files"] = [filename]
|
||||
|
||||
self.log.info("Extracted instance '{0}' to: {1}".format(
|
||||
instance.name, path))
|
||||
|
|
@ -1,87 +0,0 @@
|
|||
import re
|
||||
from collections import defaultdict
|
||||
|
||||
import maya.cmds as cmds
|
||||
|
||||
import pyblish.api
|
||||
import colorbleed.api
|
||||
|
||||
|
||||
def get_gpu_cache_subnodes(cache):
|
||||
"""Return the amount of subnodes in the cache
|
||||
|
||||
This uses `maya.cmds.gpuCache(showStats=True)` and parses
|
||||
the resulting stats for the amount of internal sub nodes.
|
||||
|
||||
Args:
|
||||
cache (str): gpuCache node name.
|
||||
|
||||
Returns:
|
||||
int: Amount of subnodes in loaded gpuCache
|
||||
|
||||
Raises:
|
||||
TypeError: when `cache` is not a gpuCache object type.
|
||||
RuntimeError: when `maya.cmds.gpuCache(showStats=True)`
|
||||
does not return stats from which we can parse the
|
||||
amount of subnodes.
|
||||
"""
|
||||
|
||||
# Ensure gpuCache
|
||||
if not cmds.objectType(cache, isType="gpuCache"):
|
||||
raise TypeError("Node is not a gpuCache: {0}".format(cache))
|
||||
|
||||
stats = cmds.gpuCache(cache, query=True, showStats=True)
|
||||
for line in stats.splitlines():
|
||||
match = re.search('nb of internal sub nodes: ([0-9]+)$', line)
|
||||
if match:
|
||||
return int(match.group(1))
|
||||
|
||||
raise RuntimeError("Couldn't parse amount of subnodes "
|
||||
"in cache stats: {0}".format(cache))
|
||||
|
||||
|
||||
def get_empty_gpu_caches(caches):
|
||||
empty = list()
|
||||
|
||||
# Group caches per path (optimization) so
|
||||
# we check each file only once
|
||||
caches_per_path = defaultdict(list)
|
||||
for cache in caches:
|
||||
path = cmds.getAttr(cache + ".cacheFileName")
|
||||
caches_per_path[path].append(cache)
|
||||
|
||||
# We consider the cache empty if its stats
|
||||
# result in 0 subnodes
|
||||
for path, path_caches in caches_per_path.items():
|
||||
|
||||
cache = path_caches[0]
|
||||
num = get_gpu_cache_subnodes(cache)
|
||||
if num == 0:
|
||||
empty.extend(path_caches)
|
||||
|
||||
return empty
|
||||
|
||||
|
||||
class ValidateGPUCacheNotEmpty(pyblish.api.InstancePlugin):
|
||||
"""Validates that gpuCaches have at least one visible shape in them.
|
||||
|
||||
This is tested using the `maya.cmds.gpuCache(cache, showStats=True)`
|
||||
command.
|
||||
"""
|
||||
|
||||
order = colorbleed.api.ValidateContentsOrder
|
||||
label = 'GpuCache has subnodes'
|
||||
families = ['colorbleed.layout']
|
||||
actions = [colorbleed.api.SelectInvalidAction]
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
caches = cmds.ls(instance, type="gpuCache", long=True)
|
||||
invalid = get_empty_gpu_caches(caches)
|
||||
|
||||
return invalid
|
||||
|
||||
def process(self, instance):
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError("Invalid nodes found: {0}".format(invalid))
|
||||
|
|
@ -18,7 +18,7 @@ class ValidateMeshNoNegativeScale(pyblish.api.Validator):
|
|||
|
||||
order = colorbleed.api.ValidateMeshOrder
|
||||
hosts = ['maya']
|
||||
families = ['colorbleed.proxy', 'colorbleed.model']
|
||||
families = ['colorbleed.model']
|
||||
label = 'Mesh No Negative Scale'
|
||||
actions = [colorbleed.api.SelectInvalidAction]
|
||||
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ class ValidateNoUnknownNodes(pyblish.api.InstancePlugin):
|
|||
|
||||
order = colorbleed.api.ValidateContentsOrder
|
||||
hosts = ['maya']
|
||||
families = ['colorbleed.model', 'colorbleed.layout', 'colorbleed.rig']
|
||||
families = ['colorbleed.model', 'colorbleed.rig']
|
||||
optional = True
|
||||
label = "Unknown Nodes"
|
||||
actions = [colorbleed.api.SelectInvalidAction]
|
||||
|
|
|
|||
|
|
@ -1,43 +0,0 @@
|
|||
import pyblish.api
|
||||
import colorbleed.api
|
||||
|
||||
from maya import cmds
|
||||
import cb.utils.maya.dag as dag
|
||||
|
||||
|
||||
class ValidateNodesVisible(pyblish.api.InstancePlugin):
|
||||
"""Validate all shape nodes are currently visible.
|
||||
|
||||
"""
|
||||
|
||||
order = colorbleed.api.ValidateContentsOrder
|
||||
families = ['colorbleed.furYeti']
|
||||
hosts = ['maya']
|
||||
label = "Nodes Visible"
|
||||
actions = [colorbleed.api.SelectInvalidAction]
|
||||
|
||||
@staticmethod
|
||||
def get_invalid(instance):
|
||||
|
||||
members = instance.data["setMembers"]
|
||||
members = cmds.ls(members,
|
||||
dag=True,
|
||||
shapes=True,
|
||||
long=True,
|
||||
noIntermediate=True)
|
||||
|
||||
invalid = []
|
||||
for node in members:
|
||||
if not dag.is_visible(node, displayLayer=False):
|
||||
invalid.append(node)
|
||||
|
||||
return invalid
|
||||
|
||||
def process(self, instance):
|
||||
"""Process all the nodes in the instance 'objectSet'"""
|
||||
|
||||
invalid = self.get_invalid(instance)
|
||||
|
||||
if invalid:
|
||||
raise ValueError("Instance contains invisible shapes: "
|
||||
"{0}".format(invalid))
|
||||
|
|
@ -1,112 +0,0 @@
|
|||
from maya import cmds
|
||||
|
||||
import pyblish.api
|
||||
import colorbleed.api
|
||||
|
||||
|
||||
class RepairFailedEditsAction(pyblish.api.Action):
|
||||
label = "Remove failed edits"
|
||||
on = "failed" # This action is only available on a failed plug-in
|
||||
icon = "wrench" # Icon from Awesome Icon
|
||||
|
||||
def process(self, context, plugin):
|
||||
self.log.info("Finding bad nodes..")
|
||||
|
||||
# Get the errored instances
|
||||
errored_instances = []
|
||||
for result in context.data["results"]:
|
||||
if result["error"] is not None and result["instance"] is not None:
|
||||
if result["error"]:
|
||||
instance = result["instance"]
|
||||
errored_instances.append(instance)
|
||||
|
||||
# Apply pyblish.logic to get the instances for the plug-in
|
||||
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
|
||||
|
||||
# Get the nodes from the all instances that ran through this plug-in
|
||||
invalid = []
|
||||
for instance in instances:
|
||||
invalid_nodes = plugin.get_invalid(instance)
|
||||
invalid.extend(invalid_nodes)
|
||||
|
||||
if not invalid:
|
||||
self.log.info("No invalid nodes found.")
|
||||
return
|
||||
|
||||
for ref in invalid:
|
||||
self.log.info("Remove failed edits for: {0}".format(ref))
|
||||
cmds.referenceEdit(ref,
|
||||
removeEdits=True,
|
||||
failedEdits=True,
|
||||
successfulEdits=False)
|
||||
self.log.info("Removed failed edits")
|
||||
|
||||
|
||||
class ValidateReferencesNoFailedEdits(pyblish.api.InstancePlugin):
|
||||
"""Validate that all referenced nodes' reference nodes don't have failed
|
||||
reference edits.
|
||||
|
||||
Failed reference edits can happen if you apply a change to a referenced
|
||||
object in the scene and then change the source of the reference
|
||||
(referenced file) to remove the object. The reference edit can't be
|
||||
applied to the node because it is missing, hence a "failed edit". This
|
||||
could unnecessarily bloat file sizes and degrade load/save speed.
|
||||
|
||||
To investigate reference edits you can "List edits" on a reference
|
||||
and look for those edits that appear as failed. Usually failed edits
|
||||
are near the bottom of the list.
|
||||
|
||||
"""
|
||||
|
||||
order = colorbleed.api.ValidateContentsOrder
|
||||
hosts = ['maya']
|
||||
families = ['colorbleed.layout']
|
||||
category = 'layout'
|
||||
optional = True
|
||||
version = (0, 1, 0)
|
||||
label = 'References Failed Edits'
|
||||
actions = [colorbleed.api.SelectInvalidAction,
|
||||
RepairFailedEditsAction]
|
||||
|
||||
@staticmethod
|
||||
def get_invalid(instance):
|
||||
"""Return invalid reference nodes in the instance
|
||||
|
||||
Terminology:
|
||||
reference node: The node that is the actual reference containing
|
||||
the nodes (type: reference)
|
||||
referenced nodes: The nodes contained within the reference
|
||||
(type: any type of nodes)
|
||||
|
||||
"""
|
||||
referenced_nodes = cmds.ls(instance, referencedNodes=True, long=True)
|
||||
if not referenced_nodes:
|
||||
return list()
|
||||
|
||||
# Get reference nodes from referenced nodes
|
||||
# (note that reference_nodes != referenced_nodes)
|
||||
reference_nodes = set()
|
||||
for node in referenced_nodes:
|
||||
reference_node = cmds.referenceQuery(node, referenceNode=True)
|
||||
if reference_node:
|
||||
reference_nodes.add(reference_node)
|
||||
|
||||
# Check for failed edits on each reference node.
|
||||
invalid = []
|
||||
for reference_node in reference_nodes:
|
||||
failed_edits = cmds.referenceQuery(reference_node,
|
||||
editNodes=True,
|
||||
failedEdits=True,
|
||||
successfulEdits=False)
|
||||
if failed_edits:
|
||||
invalid.append(reference_node)
|
||||
|
||||
return invalid
|
||||
|
||||
def process(self, instance):
|
||||
"""Process all the nodes in the instance"""
|
||||
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise ValueError("Reference nodes found with failed "
|
||||
"reference edits: {0}".format(invalid))
|
||||
|
|
@ -12,7 +12,7 @@ class ValidateSingleAssembly(pyblish.api.InstancePlugin):
|
|||
|
||||
order = colorbleed.api.ValidateContentsOrder
|
||||
hosts = ['maya']
|
||||
families = ['colorbleed.rig', 'colorbleed.layout', 'colorbleed.animation']
|
||||
families = ['colorbleed.rig', 'colorbleed.animation']
|
||||
label = 'Single Assembly'
|
||||
|
||||
def process(self, instance):
|
||||
|
|
|
|||
|
|
@ -1,90 +0,0 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
import colorbleed.api
|
||||
|
||||
|
||||
class ValidateYetiCacheFrames(pyblish.api.InstancePlugin):
|
||||
"""Validates Yeti nodes have existing cache frames"""
|
||||
|
||||
order = colorbleed.api.ValidateContentsOrder
|
||||
label = 'Yeti Cache Frames'
|
||||
families = ['colorbleed.furYeti']
|
||||
actions = [colorbleed.api.SelectInvalidAction]
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
# Check if all frames cache exists for given node.
|
||||
start_frame = instance.data.get("startFrame")
|
||||
end_frame = instance.data.get("endFrame")
|
||||
required = range(int(start_frame), int(end_frame) + 1)
|
||||
|
||||
yeti_caches = instance.data.get('yetiCaches', {})
|
||||
invalid = []
|
||||
|
||||
for node, data in yeti_caches.items():
|
||||
cls.log.info("Validating node: {0}".format(node))
|
||||
|
||||
source = data.get("source", None)
|
||||
sequences = data.get("sequences", [])
|
||||
|
||||
if not source:
|
||||
invalid.append(node)
|
||||
cls.log.warning("Node has no cache file name set: "
|
||||
"{0}".format(node))
|
||||
continue
|
||||
|
||||
folder = os.path.dirname(source)
|
||||
if not folder or not os.path.exists(folder):
|
||||
invalid.append(node)
|
||||
cls.log.warning("Cache folder does not exist: "
|
||||
"{0} {1}".format(node, folder))
|
||||
continue
|
||||
|
||||
if not sequences:
|
||||
invalid.append(node)
|
||||
cls.log.warning("Sequence does not exist: "
|
||||
"{0} {1}".format(node, source))
|
||||
continue
|
||||
|
||||
if len(sequences) != 1:
|
||||
invalid.append(node)
|
||||
cls.log.warning("More than one sequence found? "
|
||||
"{0} {1}".format(node, source))
|
||||
cls.log.warning("Found caches: {0}".format(sequences))
|
||||
continue
|
||||
|
||||
sequence = sequences[0]
|
||||
|
||||
start = sequence.start()
|
||||
end = sequence.end()
|
||||
if start > start_frame or end < end_frame:
|
||||
invalid.append(node)
|
||||
cls.log.warning("Sequence does not have enough "
|
||||
"frames: {0}-{1} (requires: {2}-{3})"
|
||||
"".format(start, end,
|
||||
start_frame,
|
||||
end_frame))
|
||||
continue
|
||||
|
||||
# Ensure all frames are present
|
||||
missing = set(sequence.missing())
|
||||
required_missing = [x for x in required if x in missing]
|
||||
if required_missing:
|
||||
|
||||
invalid.append(node)
|
||||
cls.log.warning("Sequence is missing required frames: "
|
||||
"{0}".format(required_missing))
|
||||
continue
|
||||
|
||||
return invalid
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
invalid = self.get_invalid(instance)
|
||||
|
||||
if invalid:
|
||||
self.log.error("Invalid nodes: {0}".format(invalid))
|
||||
raise RuntimeError("Invalid yeti nodes in instance. "
|
||||
"See logs for details.")
|
||||
|
|
@ -1,43 +0,0 @@
|
|||
import pyblish.api
|
||||
import colorbleed.api
|
||||
|
||||
|
||||
class ValidateYetiCacheNonPublish(pyblish.api.InstancePlugin):
|
||||
"""Validates Yeti caches are not published FROM published caches"""
|
||||
|
||||
order = colorbleed.api.ValidateContentsOrder
|
||||
label = 'Yeti Cache Non Publish'
|
||||
families = ['colorbleed.furYeti']
|
||||
actions = [colorbleed.api.SelectInvalidAction]
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
import cbra.lib
|
||||
|
||||
invalid = list()
|
||||
for node, data in instance.data['yetiCaches'].items():
|
||||
|
||||
source = data['source']
|
||||
|
||||
# Published folder has at least "publish" in its path
|
||||
if "publish" not in source.lower():
|
||||
continue
|
||||
|
||||
try:
|
||||
context = cbra.lib.parse_context(source)
|
||||
except RuntimeError:
|
||||
continue
|
||||
|
||||
if "family" in context or "subset" in context:
|
||||
invalid.append(node)
|
||||
|
||||
return invalid
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
self.log.error("Invalid nodes: {0}".format(invalid))
|
||||
raise RuntimeError("Invalid yeti nodes in instance. "
|
||||
"See logs for details.")
|
||||
|
|
@ -1,81 +0,0 @@
|
|||
import os
|
||||
import re
|
||||
from collections import defaultdict
|
||||
|
||||
import pyblish.api
|
||||
import colorbleed.api
|
||||
|
||||
import maya.cmds as cmds
|
||||
|
||||
|
||||
class ValidateYetiCacheUniqueFilenames(pyblish.api.InstancePlugin):
|
||||
"""Validates Yeti nodes in instance have unique filename patterns.
|
||||
|
||||
This is to ensure Yeti caches in a single instance don't overwrite each
|
||||
other's files when published to a single flat folder structure.
|
||||
|
||||
For example:
|
||||
cache1: path/to/arm.%04d.fur
|
||||
cache2: other/path/to/arm.%04d.fur
|
||||
|
||||
Both these caches point to unique files, though they have the same filename
|
||||
pattern. When copied to a single folder they would overwrite each other,
|
||||
and as such are considered invalid. To fix this rename the caches filenames
|
||||
to be unique, like `left_arm.%04d.fur` and `right_arm.%04d.fur`.
|
||||
|
||||
"""
|
||||
|
||||
order = colorbleed.api.ValidateContentsOrder
|
||||
label = 'Yeti Cache Unique Filenames'
|
||||
families = ['colorbleed.furYeti']
|
||||
actions = [colorbleed.api.SelectInvalidAction]
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
members = instance.data["setMembers"]
|
||||
shapes = cmds.ls(members, dag=True, leaf=True, shapes=True, long=True)
|
||||
yeti_nodes = cmds.ls(shapes, type="pgYetiMaya", long=True)
|
||||
|
||||
if not yeti_nodes:
|
||||
raise RuntimeError("No pgYetiMaya nodes in instance.")
|
||||
|
||||
def _to_pattern(path):
|
||||
"""Path to pattern that pyseq.get_sequences can use"""
|
||||
return re.sub(r"([0-9]+|%[0-9]+d)(.fur)$", r"[0-9]*\2", path)
|
||||
|
||||
invalid = list()
|
||||
|
||||
# Collect cache patterns
|
||||
cache_patterns = defaultdict(list)
|
||||
for node in yeti_nodes:
|
||||
|
||||
path = cmds.getAttr(node + ".cacheFileName")
|
||||
if not path:
|
||||
invalid.append(node)
|
||||
cls.log.warning("Node has no cache file name set: "
|
||||
"{0}".format(node))
|
||||
continue
|
||||
|
||||
filename = os.path.basename(path)
|
||||
pattern = _to_pattern(filename)
|
||||
|
||||
cache_patterns[pattern].append(node)
|
||||
|
||||
# Identify non-unique cache patterns
|
||||
for pattern, nodes in cache_patterns.iteritems():
|
||||
if len(nodes) > 1:
|
||||
cls.log.warning("Nodes have same filename pattern ({0}): "
|
||||
"{1}".format(pattern, nodes))
|
||||
invalid.extend(nodes)
|
||||
|
||||
return invalid
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
invalid = self.get_invalid(instance)
|
||||
|
||||
if invalid:
|
||||
self.log.error("Invalid nodes: {0}".format(invalid))
|
||||
raise RuntimeError("Invalid yeti nodes in instance. "
|
||||
"See logs for details.")
|
||||
|
|
@ -1,63 +0,0 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class ValidateMindbenderDeadlineDone(pyblish.api.InstancePlugin):
|
||||
"""Ensure render is finished before publishing the resulting images"""
|
||||
|
||||
label = "Rendered Successfully"
|
||||
order = pyblish.api.ValidatorOrder
|
||||
hosts = ["shell"]
|
||||
families = ["mindbender.imagesequence"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
from avalon import api
|
||||
from avalon.vendor import requests
|
||||
|
||||
# From Deadline documentation
|
||||
# https://docs.thinkboxsoftware.com/products/deadline/8.0/
|
||||
# 1_User%20Manual/manual/rest-jobs.html#job-property-values
|
||||
states = {
|
||||
0: "Unknown",
|
||||
1: "Active",
|
||||
2: "Suspended",
|
||||
3: "Completed",
|
||||
4: "Failed",
|
||||
6: "Pending",
|
||||
}
|
||||
|
||||
assert "AVALON_DEADLINE" in api.Session, ("Environment variable "
|
||||
"missing: 'AVALON_DEADLINE'")
|
||||
AVALON_DEADLINE = api.Session["AVALON_DEADLINE"]
|
||||
url = "{}/api/jobs?JobID=%s".format(AVALON_DEADLINE)
|
||||
|
||||
for job in instance.data["metadata"]["jobs"]:
|
||||
response = requests.get(url % job["_id"])
|
||||
|
||||
if response.ok:
|
||||
data = response.json()
|
||||
assert data, ValueError("Can't find information about "
|
||||
"this Deadline job: "
|
||||
"{}".format(job["_id"]))
|
||||
|
||||
state = states.get(data[0]["Stat"])
|
||||
if state in (None, "Unknown"):
|
||||
raise Exception("State of this render is unknown")
|
||||
|
||||
elif state == "Active":
|
||||
raise Exception("This render is still currently active")
|
||||
|
||||
elif state == "Suspended":
|
||||
raise Exception("This render is suspended")
|
||||
|
||||
elif state == "Failed":
|
||||
raise Exception("This render was not successful")
|
||||
|
||||
elif state == "Pending":
|
||||
raise Exception("This render is pending")
|
||||
else:
|
||||
self.log.info("%s was rendered successfully" % instance)
|
||||
|
||||
else:
|
||||
raise Exception("Could not determine the current status "
|
||||
" of this render")
|
||||
Loading…
Add table
Add a link
Reference in a new issue