mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
commit
b8bb1dd04f
82 changed files with 2111 additions and 556 deletions
|
|
@ -87,6 +87,6 @@ class RepairContextAction(pyblish.api.Action):
|
|||
# Apply pyblish.logic to get the instances for the plug-in
|
||||
if plugin in errored_plugins:
|
||||
self.log.info("Attempting fix ...")
|
||||
plugin.repair()
|
||||
plugin.repair(context)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -35,10 +35,11 @@ def install():
|
|||
|
||||
log.info("Installing callbacks ... ")
|
||||
avalon.on("init", on_init)
|
||||
avalon.before("save", before_save)
|
||||
avalon.on("save", on_save)
|
||||
avalon.on("open", on_open)
|
||||
|
||||
log.info("Overriding existing event 'taskChanged'")
|
||||
pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled)
|
||||
|
||||
log.info("Setting default family states for loader..")
|
||||
avalon.data["familiesStateToggled"] = ["colorbleed.imagesequence"]
|
||||
|
|
@ -48,6 +49,10 @@ def on_init(*args):
|
|||
houdini.on_houdini_initialize()
|
||||
|
||||
|
||||
def before_save(*args):
|
||||
return lib.validate_fps()
|
||||
|
||||
|
||||
def on_save(*args):
|
||||
|
||||
avalon.logger.info("Running callback on save..")
|
||||
|
|
@ -72,7 +77,6 @@ def on_open(*args):
|
|||
|
||||
# Get main window
|
||||
parent = hou.ui.mainQtWindow()
|
||||
|
||||
if parent is None:
|
||||
log.info("Skipping outdated content pop-up "
|
||||
"because Maya window can't be found.")
|
||||
|
|
@ -89,3 +93,20 @@ def on_open(*args):
|
|||
"your Maya scene.")
|
||||
dialog.on_show.connect(_on_show_inventory)
|
||||
dialog.show()
|
||||
|
||||
|
||||
def on_pyblish_instance_toggled(instance, new_value, old_value):
|
||||
"""Toggle saver tool passthrough states on instance toggles."""
|
||||
|
||||
nodes = instance[:]
|
||||
if not nodes:
|
||||
return
|
||||
|
||||
# Assume instance node is first node
|
||||
instance_node = nodes[0]
|
||||
|
||||
if instance_node.isBypassed() != (not old_value):
|
||||
print("%s old bypass state didn't match old instance state, "
|
||||
"updating anyway.." % instance_node.path())
|
||||
|
||||
instance_node.bypass(not new_value)
|
||||
|
|
|
|||
|
|
@ -4,15 +4,17 @@ from contextlib import contextmanager
|
|||
|
||||
import hou
|
||||
|
||||
from colorbleed import lib
|
||||
|
||||
from avalon import api, io
|
||||
from avalon.houdini import lib
|
||||
from avalon.houdini import lib as houdini
|
||||
|
||||
|
||||
def set_id(node, unique_id, overwrite=False):
|
||||
|
||||
exists = node.parm("id")
|
||||
if not exists:
|
||||
lib.imprint(node, {"id": unique_id})
|
||||
houdini.imprint(node, {"id": unique_id})
|
||||
|
||||
if not exists and overwrite:
|
||||
node.setParm("id", unique_id)
|
||||
|
|
@ -188,3 +190,45 @@ def attribute_values(node, data):
|
|||
pass
|
||||
finally:
|
||||
node.setParms(previous_attrs)
|
||||
|
||||
|
||||
def set_scene_fps(fps):
|
||||
hou.setFps(fps)
|
||||
|
||||
|
||||
# Valid FPS
|
||||
def validate_fps():
|
||||
"""Validate current scene FPS and show pop-up when it is incorrect
|
||||
|
||||
Returns:
|
||||
bool
|
||||
|
||||
"""
|
||||
|
||||
fps = lib.get_asset_fps()
|
||||
current_fps = hou.fps() # returns float
|
||||
|
||||
if current_fps != fps:
|
||||
|
||||
from ..widgets import popup
|
||||
|
||||
# Find main window
|
||||
parent = hou.ui.mainQtWindow()
|
||||
if parent is None:
|
||||
pass
|
||||
else:
|
||||
dialog = popup.Popup2(parent=parent)
|
||||
dialog.setModal(True)
|
||||
dialog.setWindowTitle("Maya scene not in line with project")
|
||||
dialog.setMessage("The FPS is out of sync, please fix")
|
||||
|
||||
# Set new text for button (add optional argument for the popup?)
|
||||
toggle = dialog.widgets["toggle"]
|
||||
toggle.setEnabled(False)
|
||||
dialog.on_show.connect(lambda: set_scene_fps(fps))
|
||||
|
||||
dialog.show()
|
||||
|
||||
return False
|
||||
|
||||
return True
|
||||
|
|
|
|||
|
|
@ -99,12 +99,23 @@ def on_init(_):
|
|||
except Exception as exc:
|
||||
print(exc)
|
||||
|
||||
# Force load Alembic so referenced alembics
|
||||
# work correctly on scene open
|
||||
cmds.loadPlugin("AbcImport", quiet=True)
|
||||
cmds.loadPlugin("AbcExport", quiet=True)
|
||||
|
||||
from .customize import override_component_mask_commands
|
||||
# Force load objExport plug-in (requested by artists)
|
||||
cmds.loadPlugin("objExport", quiet=True)
|
||||
|
||||
from .customize import (
|
||||
override_component_mask_commands,
|
||||
override_toolbox_ui
|
||||
)
|
||||
safe_deferred(override_component_mask_commands)
|
||||
|
||||
if not IS_HEADLESS:
|
||||
safe_deferred(override_toolbox_ui)
|
||||
|
||||
|
||||
def on_before_save(return_code, _):
|
||||
"""Run validation for scene's FPS prior to saving"""
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@
|
|||
import maya.cmds as mc
|
||||
import maya.mel as mel
|
||||
from functools import partial
|
||||
import os
|
||||
import logging
|
||||
|
||||
|
||||
|
|
@ -17,7 +18,7 @@ def override_component_mask_commands():
|
|||
This implements special behavior for Maya's component
|
||||
mask menu items where a ctrl+click will instantly make
|
||||
it an isolated behavior disabling all others.
|
||||
|
||||
|
||||
Tested in Maya 2016 and 2018
|
||||
|
||||
"""
|
||||
|
|
@ -64,3 +65,93 @@ def override_component_mask_commands():
|
|||
original = COMPONENT_MASK_ORIGINAL[btn]
|
||||
new_fn = partial(on_changed_callback, original)
|
||||
mc.iconTextCheckBox(btn, edit=True, cc=new_fn)
|
||||
|
||||
|
||||
def override_toolbox_ui():
|
||||
"""Add custom buttons in Toolbox as replacement for Maya web help icon."""
|
||||
|
||||
import colorbleed
|
||||
res = os.path.join(os.path.dirname(os.path.dirname(colorbleed.__file__)),
|
||||
"res")
|
||||
icons = os.path.join(res, "icons")
|
||||
|
||||
import avalon.tools.cbsceneinventory as inventory
|
||||
import avalon.tools.cbloader as loader
|
||||
from avalon.maya.pipeline import launch_workfiles_app
|
||||
|
||||
# Ensure the maya web icon on toolbox exists
|
||||
web_button = "ToolBox|MainToolboxLayout|mayaWebButton"
|
||||
if not mc.iconTextButton(web_button, query=True, exists=True):
|
||||
return
|
||||
|
||||
mc.iconTextButton(web_button, edit=True, visible=False)
|
||||
|
||||
# real = 32, but 36 with padding - according to toolbox mel script
|
||||
icon_size = 36
|
||||
parent = web_button.rsplit("|", 1)[0]
|
||||
|
||||
# Ensure the parent is a formLayout
|
||||
if not mc.objectTypeUI(parent) == "formLayout":
|
||||
return
|
||||
|
||||
# Create our controls
|
||||
background_color = (0.267, 0.267, 0.267)
|
||||
controls = []
|
||||
|
||||
control = mc.iconTextButton(
|
||||
"colorbleed_toolbox_workfiles",
|
||||
annotation="Work Files",
|
||||
label="Work Files",
|
||||
image=os.path.join(icons, "workfiles.png"),
|
||||
command=lambda: launch_workfiles_app(),
|
||||
bgc=background_color,
|
||||
width=icon_size,
|
||||
height=icon_size,
|
||||
parent=parent)
|
||||
controls.append(control)
|
||||
|
||||
control = mc.iconTextButton(
|
||||
"colorbleed_toolbox_loader",
|
||||
annotation="Loader",
|
||||
label="Loader",
|
||||
image=os.path.join(icons, "loader.png"),
|
||||
command=lambda: loader.show(use_context=True),
|
||||
bgc=background_color,
|
||||
width=icon_size,
|
||||
height=icon_size,
|
||||
parent=parent)
|
||||
controls.append(control)
|
||||
|
||||
control = mc.iconTextButton(
|
||||
"colorbleed_toolbox_manager",
|
||||
annotation="Inventory",
|
||||
label="Inventory",
|
||||
image=os.path.join(icons, "inventory.png"),
|
||||
command=lambda: inventory.show(),
|
||||
bgc=background_color,
|
||||
width=icon_size,
|
||||
height=icon_size,
|
||||
parent=parent)
|
||||
controls.append(control)
|
||||
|
||||
control = mc.iconTextButton(
|
||||
"colorbleed_toolbox",
|
||||
annotation="Colorbleed",
|
||||
label="Colorbleed",
|
||||
image=os.path.join(icons, "colorbleed_logo_36x36.png"),
|
||||
bgc=background_color,
|
||||
width=icon_size,
|
||||
height=icon_size,
|
||||
parent=parent)
|
||||
controls.append(control)
|
||||
|
||||
# Add the buttons on the bottom and stack
|
||||
# them above each other with side padding
|
||||
controls.reverse()
|
||||
for i, control in enumerate(controls):
|
||||
previous = controls[i - 1] if i > 0 else web_button
|
||||
|
||||
mc.formLayout(parent, edit=True,
|
||||
attachControl=[control, "bottom", 0, previous],
|
||||
attachForm=([control, "left", 1],
|
||||
[control, "right", 1]))
|
||||
|
|
|
|||
|
|
@ -371,26 +371,6 @@ def evaluation(mode="off"):
|
|||
cmds.evaluationManager(mode=original)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def no_refresh():
|
||||
"""Temporarily disables Maya's UI updates
|
||||
|
||||
Note:
|
||||
This only disabled the main pane and will sometimes still
|
||||
trigger updates in torn off panels.
|
||||
|
||||
"""
|
||||
|
||||
pane = _get_mel_global('gMainPane')
|
||||
state = cmds.paneLayout(pane, query=True, manage=True)
|
||||
cmds.paneLayout(pane, edit=True, manage=False)
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
cmds.paneLayout(pane, edit=True, manage=state)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def empty_sets(sets, force=False):
|
||||
"""Remove all members of the sets during the context"""
|
||||
|
|
@ -519,12 +499,15 @@ def no_undo(flush=False):
|
|||
cmds.undoInfo(**{keyword: original})
|
||||
|
||||
|
||||
def get_shader_assignments_from_shapes(shapes):
|
||||
def get_shader_assignments_from_shapes(shapes, components=True):
|
||||
"""Return the shape assignment per related shading engines.
|
||||
|
||||
Returns a dictionary where the keys are shadingGroups and the values are
|
||||
lists of assigned shapes or shape-components.
|
||||
|
||||
Since `maya.cmds.sets` returns shader members on the shapes as components
|
||||
on the transform we correct that in this method too.
|
||||
|
||||
For the 'shapes' this will return a dictionary like:
|
||||
{
|
||||
"shadingEngineX": ["nodeX", "nodeY"],
|
||||
|
|
@ -533,6 +516,7 @@ def get_shader_assignments_from_shapes(shapes):
|
|||
|
||||
Args:
|
||||
shapes (list): The shapes to collect the assignments for.
|
||||
components (bool): Whether to include the component assignments.
|
||||
|
||||
Returns:
|
||||
dict: The {shadingEngine: shapes} relationships
|
||||
|
|
@ -541,7 +525,6 @@ def get_shader_assignments_from_shapes(shapes):
|
|||
|
||||
shapes = cmds.ls(shapes,
|
||||
long=True,
|
||||
selection=True,
|
||||
shapes=True,
|
||||
objectsOnly=True)
|
||||
if not shapes:
|
||||
|
|
@ -560,7 +543,37 @@ def get_shader_assignments_from_shapes(shapes):
|
|||
type="shadingEngine") or []
|
||||
shading_groups = list(set(shading_groups))
|
||||
for shading_group in shading_groups:
|
||||
assignments[shading_group].add(shape)
|
||||
assignments[shading_group].append(shape)
|
||||
|
||||
if components:
|
||||
# Note: Components returned from maya.cmds.sets are "listed" as if
|
||||
# being assigned to the transform like: pCube1.f[0] as opposed
|
||||
# to pCubeShape1.f[0] so we correct that here too.
|
||||
|
||||
# Build a mapping from parent to shapes to include in lookup.
|
||||
transforms = {shape.rsplit("|", 1)[0]: shape for shape in shapes}
|
||||
lookup = set(shapes + transforms.keys())
|
||||
|
||||
component_assignments = defaultdict(list)
|
||||
for shading_group in assignments.keys():
|
||||
members = cmds.ls(cmds.sets(shading_group, query=True), long=True)
|
||||
for member in members:
|
||||
|
||||
node = member.split(".", 1)[0]
|
||||
if node not in lookup:
|
||||
continue
|
||||
|
||||
# Component
|
||||
if "." in member:
|
||||
|
||||
# Fix transform to shape as shaders are assigned to shapes
|
||||
if node in transforms:
|
||||
shape = transforms[node]
|
||||
component = member.split(".", 1)[1]
|
||||
member = "{0}.{1}".format(shape, component)
|
||||
|
||||
component_assignments[shading_group].append(member)
|
||||
assignments = component_assignments
|
||||
|
||||
return dict(assignments)
|
||||
|
||||
|
|
@ -569,7 +582,7 @@ def get_shader_assignments_from_shapes(shapes):
|
|||
def shader(nodes, shadingEngine="initialShadingGroup"):
|
||||
"""Assign a shader to nodes during the context"""
|
||||
|
||||
shapes = cmds.ls(nodes, dag=1, o=1, shapes=1, long=1)
|
||||
shapes = cmds.ls(nodes, dag=1, objectsOnly=1, shapes=1, long=1)
|
||||
original = get_shader_assignments_from_shapes(shapes)
|
||||
|
||||
try:
|
||||
|
|
@ -582,7 +595,7 @@ def shader(nodes, shadingEngine="initialShadingGroup"):
|
|||
# Assign original shaders
|
||||
for sg, members in original.items():
|
||||
if members:
|
||||
cmds.sets(shapes, edit=True, forceElement=shadingEngine)
|
||||
cmds.sets(members, edit=True, forceElement=sg)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
|
|
@ -927,6 +940,18 @@ def extract_alembic(file,
|
|||
raise TypeError("Alembic option unsupported type: "
|
||||
"{0} (expected {1})".format(value, valid_types))
|
||||
|
||||
# Ignore empty values, like an empty string, since they mess up how
|
||||
# job arguments are built
|
||||
if isinstance(value, (list, tuple)):
|
||||
value = [x for x in value if x.strip()]
|
||||
|
||||
# Ignore option completely if no values remaining
|
||||
if not value:
|
||||
options.pop(key)
|
||||
continue
|
||||
|
||||
options[key] = value
|
||||
|
||||
# The `writeCreases` argument was changed to `autoSubd` in Maya 2018+
|
||||
maya_version = int(cmds.about(version=True))
|
||||
if maya_version >= 2018:
|
||||
|
|
@ -993,9 +1018,14 @@ def get_id_required_nodes(referenced_nodes=False, nodes=None):
|
|||
nodes (set): list of filtered nodes
|
||||
"""
|
||||
|
||||
lookup = None
|
||||
if nodes is None:
|
||||
# Consider all nodes
|
||||
nodes = cmds.ls()
|
||||
else:
|
||||
# Build a lookup for the only allowed nodes in output based
|
||||
# on `nodes` input of the function (+ ensure long names)
|
||||
lookup = set(cmds.ls(nodes, long=True))
|
||||
|
||||
def _node_type_exists(node_type):
|
||||
try:
|
||||
|
|
@ -1004,8 +1034,8 @@ def get_id_required_nodes(referenced_nodes=False, nodes=None):
|
|||
except RuntimeError:
|
||||
return False
|
||||
|
||||
# `readOnly` flag is obsolete as of Maya 2016 therefor we explicitly remove
|
||||
# default nodes and reference nodes
|
||||
# `readOnly` flag is obsolete as of Maya 2016 therefore we explicitly
|
||||
# remove default nodes and reference nodes
|
||||
camera_shapes = ["frontShape", "sideShape", "topShape", "perspShape"]
|
||||
|
||||
ignore = set()
|
||||
|
|
@ -1029,8 +1059,7 @@ def get_id_required_nodes(referenced_nodes=False, nodes=None):
|
|||
if cmds.pluginInfo("pgYetiMaya", query=True, loaded=True):
|
||||
types.append("pgYetiMaya")
|
||||
|
||||
# We *always* ignore intermediate shapes, so we filter them out
|
||||
# directly
|
||||
# We *always* ignore intermediate shapes, so we filter them out directly
|
||||
nodes = cmds.ls(nodes, type=types, long=True, noIntermediate=True)
|
||||
|
||||
# The items which need to pass the id to their parent
|
||||
|
|
@ -1047,6 +1076,12 @@ def get_id_required_nodes(referenced_nodes=False, nodes=None):
|
|||
if not nodes:
|
||||
return nodes
|
||||
|
||||
# Ensure only nodes from the input `nodes` are returned when a
|
||||
# filter was applied on function call because we also iterated
|
||||
# to parents and alike
|
||||
if lookup is not None:
|
||||
nodes &= lookup
|
||||
|
||||
# Avoid locked nodes
|
||||
nodes_list = list(nodes)
|
||||
locked = cmds.lockNode(nodes_list, query=True, lock=True)
|
||||
|
|
@ -2032,3 +2067,90 @@ def bake_to_world_space(nodes,
|
|||
shape=shape)
|
||||
|
||||
return world_space_nodes
|
||||
|
||||
|
||||
def get_attr_in_layer(attr, layer):
|
||||
"""Return attribute value in specified renderlayer.
|
||||
|
||||
Same as cmds.getAttr but this gets the attribute's value in a
|
||||
given render layer without having to switch to it.
|
||||
|
||||
Warning for parent attribute overrides:
|
||||
Attributes that have render layer overrides to their parent attribute
|
||||
are not captured correctly since they do not have a direct connection.
|
||||
For example, an override to sphere.rotate when querying sphere.rotateX
|
||||
will not return correctly!
|
||||
|
||||
Note: This is much faster for Maya's renderLayer system, yet the code
|
||||
does no optimized query for render setup.
|
||||
|
||||
Args:
|
||||
attr (str): attribute name, ex. "node.attribute"
|
||||
layer (str): layer name
|
||||
|
||||
Returns:
|
||||
The return value from `maya.cmds.getAttr`
|
||||
|
||||
"""
|
||||
|
||||
if cmds.mayaHasRenderSetup():
|
||||
log.debug("lib.get_attr_in_layer is not optimized for render setup")
|
||||
with renderlayer(layer):
|
||||
return cmds.getAttr(attr)
|
||||
|
||||
# Ignore complex query if we're in the layer anyway
|
||||
current_layer = cmds.editRenderLayerGlobals(query=True,
|
||||
currentRenderLayer=True)
|
||||
if layer == current_layer:
|
||||
return cmds.getAttr(attr)
|
||||
|
||||
connections = cmds.listConnections(attr,
|
||||
plugs=True,
|
||||
source=False,
|
||||
destination=True,
|
||||
type="renderLayer") or []
|
||||
connections = filter(lambda x: x.endswith(".plug"), connections)
|
||||
if not connections:
|
||||
return cmds.getAttr(attr)
|
||||
|
||||
# Some value types perform a conversion when assigning
|
||||
# TODO: See if there's a maya method to allow this conversion
|
||||
# instead of computing it ourselves.
|
||||
attr_type = cmds.getAttr(attr, type=True)
|
||||
conversion = None
|
||||
if attr_type == "time":
|
||||
conversion = mel.eval('currentTimeUnitToFPS()') # returns float
|
||||
elif attr_type == "doubleAngle":
|
||||
# Radians to Degrees: 180 / pi
|
||||
# TODO: This will likely only be correct when Maya units are set
|
||||
# to degrees
|
||||
conversion = 57.2957795131
|
||||
elif attr_type == "doubleLinear":
|
||||
raise NotImplementedError("doubleLinear conversion not implemented.")
|
||||
|
||||
for connection in connections:
|
||||
if connection.startswith(layer + "."):
|
||||
attr_split = connection.split(".")
|
||||
if attr_split[0] == layer:
|
||||
attr = ".".join(attr_split[0:-1])
|
||||
value = cmds.getAttr("%s.value" % attr)
|
||||
if conversion:
|
||||
value *= conversion
|
||||
return value
|
||||
|
||||
else:
|
||||
# When connections are present, but none
|
||||
# to the specific renderlayer than the layer
|
||||
# should have the "defaultRenderLayer"'s value
|
||||
layer = "defaultRenderLayer"
|
||||
for connection in connections:
|
||||
if connection.startswith(layer):
|
||||
attr_split = connection.split(".")
|
||||
if attr_split[0] == "defaultRenderLayer":
|
||||
attr = ".".join(attr_split[0:-1])
|
||||
value = cmds.getAttr("%s.value" % attr)
|
||||
if conversion:
|
||||
value *= conversion
|
||||
return value
|
||||
|
||||
return cmds.getAttr(attr)
|
||||
|
|
|
|||
|
|
@ -1443,6 +1443,15 @@
|
|||
"title": "Remove Unknown Nodes",
|
||||
"tooltip": "Remove all unknown nodes"
|
||||
},
|
||||
{
|
||||
"type": "action",
|
||||
"command": "$COLORBLEED_SCRIPTS\\cleanup\\removeUnknownPlugins.py",
|
||||
"sourcetype": "file",
|
||||
"tags": ["cleanup",
|
||||
"removeUnknownPlugins"],
|
||||
"title": "Remove Unknown Plugins UI",
|
||||
"tooltip": "Remove unknown plugins UI"
|
||||
},
|
||||
{
|
||||
"type": "action",
|
||||
"command": "$COLORBLEED_SCRIPTS\\cleanup\\removeUnloadedReferences.py",
|
||||
|
|
|
|||
|
|
@ -131,7 +131,8 @@ class ReferenceLoader(api.Loader):
|
|||
file_type = {
|
||||
"ma": "mayaAscii",
|
||||
"mb": "mayaBinary",
|
||||
"abc": "Alembic"
|
||||
"abc": "Alembic",
|
||||
"fbx": "FBX"
|
||||
}.get(representation["name"])
|
||||
|
||||
assert file_type, "Unsupported representation: %s" % representation
|
||||
|
|
|
|||
|
|
@ -32,3 +32,38 @@ class Extractor(pyblish.api.InstancePlugin):
|
|||
instance.data['stagingDir'] = staging_dir
|
||||
|
||||
return staging_dir
|
||||
|
||||
|
||||
def contextplugin_should_run(plugin, context):
|
||||
"""Return whether the ContextPlugin should run on the given context.
|
||||
|
||||
This is a helper function to work around a bug pyblish-base#250
|
||||
Whenever a ContextPlugin sets specific families it will still trigger even
|
||||
when no instances are present that have those families.
|
||||
|
||||
This actually checks it correctly and returns whether it should run.
|
||||
|
||||
"""
|
||||
required = set(plugin.families)
|
||||
|
||||
# When no filter always run
|
||||
if "*" in required:
|
||||
return True
|
||||
|
||||
for instance in context:
|
||||
|
||||
# Ignore inactive instances
|
||||
if (not instance.data.get("publish", True) or
|
||||
not instance.data.get("active", True)):
|
||||
continue
|
||||
|
||||
families = instance.data.get("families", [])
|
||||
if any(f in required for f in families):
|
||||
return True
|
||||
|
||||
family = instance.data.get("family")
|
||||
if family and family in required:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import os
|
|||
import subprocess
|
||||
|
||||
import pyblish.api
|
||||
from colorbleed.plugin import contextplugin_should_run
|
||||
|
||||
CREATE_NO_WINDOW = 0x08000000
|
||||
|
||||
|
|
@ -40,6 +41,10 @@ class CollectDeadlineUser(pyblish.api.ContextPlugin):
|
|||
def process(self, context):
|
||||
"""Inject the current working file"""
|
||||
|
||||
# Workaround bug pyblish-base#250
|
||||
if not contextplugin_should_run(self, context):
|
||||
return
|
||||
|
||||
user = deadline_command("GetCurrentUserName").strip()
|
||||
|
||||
if not user:
|
||||
|
|
|
|||
14
colorbleed/plugins/global/publish/collect_machine_name.py
Normal file
14
colorbleed/plugins/global/publish/collect_machine_name.py
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectMachineName(pyblish.api.ContextPlugin):
|
||||
label = "Local Machine Name"
|
||||
order = pyblish.api.CollectorOrder
|
||||
hosts = ["*"]
|
||||
|
||||
def process(self, context):
|
||||
import socket
|
||||
|
||||
machine_name = socket.gethostname()
|
||||
self.log.info("Machine name: %s" % machine_name)
|
||||
context.data["machine"] = machine_name
|
||||
|
|
@ -25,6 +25,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
order = pyblish.api.IntegratorOrder
|
||||
families = ["colorbleed.animation",
|
||||
"colorbleed.camera",
|
||||
"colorbleed.fbx",
|
||||
"colorbleed.imagesequence",
|
||||
"colorbleed.look",
|
||||
"colorbleed.mayaAscii",
|
||||
|
|
@ -339,7 +340,9 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
"time": context.data["time"],
|
||||
"author": context.data["user"],
|
||||
"source": source,
|
||||
"comment": context.data.get("comment")}
|
||||
"comment": context.data.get("comment"),
|
||||
"machine": context.data.get("machine"),
|
||||
"fps": context.data.get("fps")}
|
||||
|
||||
# Include optional data if present in
|
||||
optionals = ["startFrame", "endFrame", "step", "handles"]
|
||||
|
|
|
|||
|
|
@ -123,7 +123,9 @@ class SubmitDependentImageSequenceJobDeadline(pyblish.api.InstancePlugin):
|
|||
label = "Submit image sequence jobs to Deadline"
|
||||
order = pyblish.api.IntegratorOrder + 0.1
|
||||
hosts = ["fusion", "maya"]
|
||||
families = ["colorbleed.saver.deadline", "colorbleed.renderlayer"]
|
||||
families = ["colorbleed.saver.deadline",
|
||||
"colorbleed.renderlayer",
|
||||
"colorbleed.vrayscene"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
@ -145,15 +147,18 @@ class SubmitDependentImageSequenceJobDeadline(pyblish.api.InstancePlugin):
|
|||
subset=subset
|
||||
)
|
||||
|
||||
# Add in start/end frame
|
||||
# Get start/end frame from instance, if not available get from context
|
||||
context = instance.context
|
||||
start = instance.data.get("startFrame", context.data["startFrame"])
|
||||
end = instance.data.get("endFrame", context.data["endFrame"])
|
||||
resources = []
|
||||
start = instance.data.get("startFrame")
|
||||
if start is None:
|
||||
start = context.data["startFrame"]
|
||||
end = instance.data.get("endFrame")
|
||||
if end is None:
|
||||
end = context.data["endFrame"]
|
||||
|
||||
# Add in regex for sequence filename
|
||||
# This assumes the output files start with subset name and ends with
|
||||
# a file extension.
|
||||
# a file extension. The "ext" key includes the dot with the extension.
|
||||
if "ext" in instance.data:
|
||||
ext = re.escape(instance.data["ext"])
|
||||
else:
|
||||
|
|
@ -162,8 +167,10 @@ class SubmitDependentImageSequenceJobDeadline(pyblish.api.InstancePlugin):
|
|||
regex = "^{subset}.*\d+{ext}$".format(subset=re.escape(subset),
|
||||
ext=ext)
|
||||
|
||||
# Remove deadline submission job, not needed in metadata
|
||||
data.pop("deadlineSubmissionJob")
|
||||
|
||||
# Write metadata for publish job
|
||||
render_job = data.pop("deadlineSubmissionJob")
|
||||
metadata = {
|
||||
"regex": regex,
|
||||
"startFrame": start,
|
||||
|
|
@ -183,13 +190,14 @@ class SubmitDependentImageSequenceJobDeadline(pyblish.api.InstancePlugin):
|
|||
if not os.path.isdir(output_dir):
|
||||
os.makedirs(output_dir)
|
||||
|
||||
resources = []
|
||||
if data.get("extendFrames", False):
|
||||
|
||||
family = "colorbleed.imagesequence"
|
||||
override = data["overrideExistingFrame"]
|
||||
|
||||
# override = data.get("overrideExistingFrame", False)
|
||||
out_file = render_job.get("OutFile")
|
||||
out_file = job.get("OutFile")
|
||||
if not out_file:
|
||||
raise RuntimeError("OutFile not found in render job!")
|
||||
|
||||
|
|
|
|||
|
|
@ -1,9 +1,8 @@
|
|||
from collections import OrderedDict
|
||||
|
||||
from avalon import houdini
|
||||
|
||||
|
||||
class CreateAlembicCamera(houdini.Creator):
|
||||
"""Single baked camera from Alembic ROP"""
|
||||
|
||||
name = "camera"
|
||||
label = "Camera (Abc)"
|
||||
|
|
@ -22,13 +21,25 @@ class CreateAlembicCamera(houdini.Creator):
|
|||
def process(self):
|
||||
instance = super(CreateAlembicCamera, self).process()
|
||||
|
||||
parms = {"use_sop_path": True,
|
||||
"build_from_path": True,
|
||||
"path_attrib": "path",
|
||||
"filename": "$HIP/pyblish/%s.abc" % self.name}
|
||||
parms = {
|
||||
"filename": "$HIP/pyblish/%s.abc" % self.name,
|
||||
"use_sop_path": False
|
||||
}
|
||||
|
||||
if self.nodes:
|
||||
node = self.nodes[0]
|
||||
parms.update({"sop_path": node.path()})
|
||||
path = node.path()
|
||||
|
||||
# Split the node path into the first root and the remainder
|
||||
# So we can set the root and objects parameters correctly
|
||||
_, root, remainder = path.split("/", 2)
|
||||
parms.update({
|
||||
"root": "/" + root,
|
||||
"objects": remainder
|
||||
})
|
||||
|
||||
instance.setParms(parms)
|
||||
|
||||
# Lock the Use Sop Path setting so the
|
||||
# user doesn't accidentally enable it.
|
||||
instance.parm("use_sop_path").lock(True)
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ from avalon import houdini
|
|||
|
||||
|
||||
class CreatePointCache(houdini.Creator):
|
||||
"""Alembic pointcache for animated data"""
|
||||
"""Alembic ROP to pointcache"""
|
||||
|
||||
name = "pointcache"
|
||||
label = "Point Cache"
|
||||
|
|
@ -22,7 +22,7 @@ class CreatePointCache(houdini.Creator):
|
|||
|
||||
parms = {"use_sop_path": True, # Export single node from SOP Path
|
||||
"build_from_path": True, # Direct path of primitive in output
|
||||
"path_attrib": "path", # Pass path attribute for output\
|
||||
"path_attrib": "path", # Pass path attribute for output
|
||||
"prim_to_detail_pattern": "cbId",
|
||||
"format": 2, # Set format to Ogawa
|
||||
"filename": "$HIP/pyblish/%s.abc" % self.name}
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ from avalon import houdini
|
|||
|
||||
|
||||
class CreateVDBCache(houdini.Creator):
|
||||
"""Alembic pointcache for animated data"""
|
||||
"""OpenVDB from Geometry ROP"""
|
||||
|
||||
name = "vbdcache"
|
||||
label = "VDB Cache"
|
||||
|
|
@ -15,10 +15,8 @@ class CreateVDBCache(houdini.Creator):
|
|||
# Remove the active, we are checking the bypass flag of the nodes
|
||||
self.data.pop("active", None)
|
||||
|
||||
self.data.update({
|
||||
"node_type": "geometry", # Set node type to create for output
|
||||
"executeBackground": True # Render node in background
|
||||
})
|
||||
# Set node type to create for output
|
||||
self.data["node_type"] = "geometry"
|
||||
|
||||
def process(self):
|
||||
instance = super(CreateVDBCache, self).process()
|
||||
|
|
@ -28,6 +26,6 @@ class CreateVDBCache(houdini.Creator):
|
|||
|
||||
if self.nodes:
|
||||
node = self.nodes[0]
|
||||
parms.update({"sop_path": node.path()})
|
||||
parms.update({"soppath": node.path()})
|
||||
|
||||
instance.setParms(parms)
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
import os
|
||||
import hou
|
||||
|
||||
import pyblish.api
|
||||
|
|
@ -12,4 +13,24 @@ class CollectHoudiniCurrentFile(pyblish.api.ContextPlugin):
|
|||
|
||||
def process(self, context):
|
||||
"""Inject the current working file"""
|
||||
context.data['currentFile'] = hou.hipFile.path()
|
||||
|
||||
filepath = hou.hipFile.path()
|
||||
if not os.path.exists(filepath):
|
||||
# By default Houdini will even point a new scene to a path.
|
||||
# However if the file is not saved at all and does not exist,
|
||||
# we assume the user never set it.
|
||||
filepath = ""
|
||||
|
||||
elif os.path.basename(filepath) == "untitled.hip":
|
||||
# Due to even a new file being called 'untitled.hip' we are unable
|
||||
# to confirm the current scene was ever saved because the file
|
||||
# could have existed already. We will allow it if the file exists,
|
||||
# but show a warning for this edge case to clarify the potential
|
||||
# false positive.
|
||||
self.log.warning("Current file is 'untitled.hip' and we are "
|
||||
"unable to detect whether the current scene is "
|
||||
"saved correctly.")
|
||||
|
||||
context.data['currentFile'] = filepath
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -15,8 +15,8 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
id (str): "pyblish.avalon.instance
|
||||
|
||||
Specific node:
|
||||
The specific node is important because it dictates in which way the subset
|
||||
is being exported.
|
||||
The specific node is important because it dictates in which way the
|
||||
subset is being exported.
|
||||
|
||||
alembic: will export Alembic file which supports cascading attributes
|
||||
like 'cbId' and 'path'
|
||||
|
|
@ -30,8 +30,6 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
|
||||
def process(self, context):
|
||||
|
||||
instances = []
|
||||
|
||||
nodes = hou.node("/out").children()
|
||||
for node in nodes:
|
||||
|
||||
|
|
@ -55,11 +53,9 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
|
||||
data.update(self.get_frame_data(node))
|
||||
|
||||
# Create nice name
|
||||
# All nodes in the Outputs graph have the 'Valid Frame Range'
|
||||
# attribute, we check here if any frames are set
|
||||
# Create nice name if the instance has a frame range.
|
||||
label = data.get("name", node.name())
|
||||
if "startFrame" in data:
|
||||
if "startFrame" in data and "endFrame" in data:
|
||||
frames = "[{startFrame} - {endFrame}]".format(**data)
|
||||
label = "{} {}".format(label, frames)
|
||||
|
||||
|
|
@ -68,8 +64,6 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
instance[:] = [node]
|
||||
instance.data.update(data)
|
||||
|
||||
instances.append(instance)
|
||||
|
||||
def sort_by_family(instance):
|
||||
"""Sort by family"""
|
||||
return instance.data.get("families", instance.data.get("family"))
|
||||
|
|
|
|||
|
|
@ -1,13 +1,14 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectOutputNode(pyblish.api.InstancePlugin):
|
||||
"""Collect the out node which of the instance"""
|
||||
class CollectOutputSOPPath(pyblish.api.InstancePlugin):
|
||||
"""Collect the out node's SOP Path value."""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
families = ["*"]
|
||||
families = ["colorbleed.pointcache",
|
||||
"colorbleed.vdbcache"]
|
||||
hosts = ["houdini"]
|
||||
label = "Collect Output Node"
|
||||
label = "Collect Output SOP Path"
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
|
|||
15
colorbleed/plugins/houdini/publish/collect_workscene_fps.py
Normal file
15
colorbleed/plugins/houdini/publish/collect_workscene_fps.py
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
import pyblish.api
|
||||
import hou
|
||||
|
||||
|
||||
class CollectWorksceneFPS(pyblish.api.ContextPlugin):
|
||||
"""Get the FPS of the work scene"""
|
||||
|
||||
label = "Workscene FPS"
|
||||
order = pyblish.api.CollectorOrder
|
||||
hosts = ["houdini"]
|
||||
|
||||
def process(self, context):
|
||||
fps = hou.fps()
|
||||
self.log.info("Workscene FPS: %s" % fps)
|
||||
context.data.update({"fps": fps})
|
||||
|
|
@ -13,6 +13,8 @@ class ExtractAlembic(colorbleed.api.Extractor):
|
|||
|
||||
def process(self, instance):
|
||||
|
||||
import hou
|
||||
|
||||
ropnode = instance[0]
|
||||
|
||||
# Get the filename from the filename parameter
|
||||
|
|
@ -23,8 +25,17 @@ class ExtractAlembic(colorbleed.api.Extractor):
|
|||
file_name = os.path.basename(output)
|
||||
|
||||
# We run the render
|
||||
self.log.info("Writing alembic '%s' to '%s'" % (file_name, staging_dir))
|
||||
ropnode.render()
|
||||
self.log.info("Writing alembic '%s' to '%s'" % (file_name,
|
||||
staging_dir))
|
||||
try:
|
||||
ropnode.render()
|
||||
except hou.Error as exc:
|
||||
# The hou.Error is not inherited from a Python Exception class,
|
||||
# so we explicitly capture the houdini error, otherwise pyblish
|
||||
# will remain hanging.
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
raise RuntimeError("Render failed: {0}".format(exc))
|
||||
|
||||
if "files" not in instance.data:
|
||||
instance.data["files"] = []
|
||||
|
|
|
|||
|
|
@ -13,6 +13,8 @@ class ExtractVDBCache(colorbleed.api.Extractor):
|
|||
|
||||
def process(self, instance):
|
||||
|
||||
import hou
|
||||
|
||||
ropnode = instance[0]
|
||||
|
||||
# Get the filename from the filename parameter
|
||||
|
|
@ -20,13 +22,18 @@ class ExtractVDBCache(colorbleed.api.Extractor):
|
|||
sop_output = ropnode.evalParm("sopoutput")
|
||||
staging_dir = os.path.normpath(os.path.dirname(sop_output))
|
||||
instance.data["stagingDir"] = staging_dir
|
||||
file_name = os.path.basename(sop_output)
|
||||
|
||||
if instance.data.get("executeBackground", True):
|
||||
self.log.info("Creating background task..")
|
||||
ropnode.parm("executebackground").pressButton()
|
||||
self.log.info("Finished")
|
||||
else:
|
||||
self.log.info("Writing VDB '%s' to '%s'" % (file_name, staging_dir))
|
||||
try:
|
||||
ropnode.render()
|
||||
except hou.Error as exc:
|
||||
# The hou.Error is not inherited from a Python Exception class,
|
||||
# so we explicitly capture the houdini error, otherwise pyblish
|
||||
# will remain hanging.
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
raise RuntimeError("Render failed: {0}".format(exc))
|
||||
|
||||
if "files" not in instance.data:
|
||||
instance.data["files"] = []
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ class ValidateAlembicInputNode(pyblish.api.InstancePlugin):
|
|||
|
||||
The connected node cannot be of the following types for Alembic:
|
||||
- VDB
|
||||
- Volumne
|
||||
- Volume
|
||||
|
||||
"""
|
||||
|
||||
|
|
|
|||
34
colorbleed/plugins/houdini/publish/validate_bypass.py
Normal file
34
colorbleed/plugins/houdini/publish/validate_bypass.py
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
import pyblish.api
|
||||
import colorbleed.api
|
||||
|
||||
|
||||
class ValidateBypassed(pyblish.api.InstancePlugin):
|
||||
"""Validate all primitives build hierarchy from attribute when enabled.
|
||||
|
||||
The name of the attribute must exist on the prims and have the same name
|
||||
as Build Hierarchy from Attribute's `Path Attribute` value on the Alembic
|
||||
ROP node whenever Build Hierarchy from Attribute is enabled.
|
||||
|
||||
"""
|
||||
|
||||
order = colorbleed.api.ValidateContentsOrder - 0.1
|
||||
families = ["*"]
|
||||
hosts = ["houdini"]
|
||||
label = "Validate ROP Bypass"
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
rop = invalid[0]
|
||||
raise RuntimeError(
|
||||
"ROP node %s is set to bypass, publishing cannot continue.." %
|
||||
rop.path()
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
rop = instance[0]
|
||||
if rop.isBypassed():
|
||||
return [rop]
|
||||
41
colorbleed/plugins/houdini/publish/validate_camera_rop.py
Normal file
41
colorbleed/plugins/houdini/publish/validate_camera_rop.py
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
import pyblish.api
|
||||
import colorbleed.api
|
||||
|
||||
|
||||
class ValidateCameraROP(pyblish.api.InstancePlugin):
|
||||
"""Validate Camera ROP settings."""
|
||||
|
||||
order = colorbleed.api.ValidateContentsOrder
|
||||
families = ['colorbleed.camera']
|
||||
hosts = ['houdini']
|
||||
label = 'Camera ROP'
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
import hou
|
||||
|
||||
node = instance[0]
|
||||
if node.parm("use_sop_path").eval():
|
||||
raise RuntimeError("Alembic ROP for Camera export should not be "
|
||||
"set to 'Use Sop Path'. Please disable.")
|
||||
|
||||
# Get the root and objects parameter of the Alembic ROP node
|
||||
root = node.parm("root").eval()
|
||||
objects = node.parm("objects").eval()
|
||||
assert root, "Root parameter must be set on Alembic ROP"
|
||||
assert root.startswith("/"), "Root parameter must start with slash /"
|
||||
assert objects, "Objects parameter must be set on Alembic ROP"
|
||||
assert len(objects.split(" ")) == 1, "Must have only a single object."
|
||||
|
||||
# Check if the object exists and is a camera
|
||||
path = root + "/" + objects
|
||||
camera = hou.node(path)
|
||||
|
||||
if not camera:
|
||||
raise ValueError("Camera path does not exist: %s" % path)
|
||||
|
||||
if not camera.type().name() == "cam":
|
||||
raise ValueError("Object set in Alembic ROP is not a camera: "
|
||||
"%s (type: %s)" % (camera, camera.type().name()))
|
||||
|
||||
|
||||
|
|
@ -3,16 +3,12 @@ import colorbleed.api
|
|||
|
||||
|
||||
class ValidateIntermediateDirectoriesChecked(pyblish.api.InstancePlugin):
|
||||
"""Validate if node attribute Create intermediate Directories is turned on
|
||||
|
||||
Rules:
|
||||
* The node must have Create intermediate Directories turned on to
|
||||
ensure the output file will be created
|
||||
|
||||
"""
|
||||
"""Validate Create Intermediate Directories is enabled on ROP node."""
|
||||
|
||||
order = colorbleed.api.ValidateContentsOrder
|
||||
families = ['colorbleed.pointcache']
|
||||
families = ['colorbleed.pointcache',
|
||||
'colorbleed.camera',
|
||||
'colorbleed.vdbcache']
|
||||
hosts = ['houdini']
|
||||
label = 'Create Intermediate Directories Checked'
|
||||
|
||||
|
|
@ -20,8 +16,8 @@ class ValidateIntermediateDirectoriesChecked(pyblish.api.InstancePlugin):
|
|||
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError("Found ROP nodes with Create Intermediate "
|
||||
"Directories turned off")
|
||||
raise RuntimeError("Found ROP node with Create Intermediate "
|
||||
"Directories turned off: %s" % invalid)
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
|
|
|||
|
|
@ -1,50 +0,0 @@
|
|||
import pyblish.api
|
||||
import colorbleed.api
|
||||
|
||||
|
||||
class ValidatOutputNodeExists(pyblish.api.InstancePlugin):
|
||||
"""Validate if node attribute Create intermediate Directories is turned on
|
||||
|
||||
Rules:
|
||||
* The node must have Create intermediate Directories turned on to
|
||||
ensure the output file will be created
|
||||
|
||||
"""
|
||||
|
||||
order = colorbleed.api.ValidateContentsOrder
|
||||
families = ["*"]
|
||||
hosts = ['houdini']
|
||||
label = "Output Node Exists"
|
||||
|
||||
def process(self, instance):
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError("Could not find output node(s)!")
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
import hou
|
||||
|
||||
result = set()
|
||||
|
||||
node = instance[0]
|
||||
if node.type().name() == "alembic":
|
||||
soppath_parm = "sop_path"
|
||||
else:
|
||||
# Fall back to geometry node
|
||||
soppath_parm = "soppath"
|
||||
|
||||
sop_path = node.parm(soppath_parm).eval()
|
||||
output_node = hou.node(sop_path)
|
||||
|
||||
if output_node is None:
|
||||
cls.log.error("Node at '%s' does not exist" % sop_path)
|
||||
result.add(node.path())
|
||||
|
||||
# Added cam as this is a legit output type (cameras can't
|
||||
if output_node.type().name() not in ["output", "cam"]:
|
||||
cls.log.error("SOP Path does not end path at output node")
|
||||
result.add(node.path())
|
||||
|
||||
return result
|
||||
|
|
@ -2,13 +2,20 @@ import pyblish.api
|
|||
|
||||
|
||||
class ValidateOutputNode(pyblish.api.InstancePlugin):
|
||||
"""Validate if output node:
|
||||
- exists
|
||||
- is of type 'output'
|
||||
- has an input"""
|
||||
"""Validate the instance SOP Output Node.
|
||||
|
||||
This will ensure:
|
||||
- The SOP Path is set.
|
||||
- The SOP Path refers to an existing object.
|
||||
- The SOP Path node is a SOP node.
|
||||
- The SOP Path node has at least one input connection (has an input)
|
||||
- The SOP Path has geometry data.
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["*"]
|
||||
families = ["colorbleed.pointcache",
|
||||
"colorbleed.vdbcache"]
|
||||
hosts = ["houdini"]
|
||||
label = "Validate Output Node"
|
||||
|
||||
|
|
@ -16,30 +23,51 @@ class ValidateOutputNode(pyblish.api.InstancePlugin):
|
|||
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError("Output node(s) `%s` are incorrect" % invalid)
|
||||
raise RuntimeError("Output node(s) `%s` are incorrect. "
|
||||
"See plug-in log for details." % invalid)
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
import hou
|
||||
|
||||
output_node = instance.data["output_node"]
|
||||
|
||||
if output_node is None:
|
||||
node = instance[0]
|
||||
cls.log.error("Output node at '%s' does not exist, see source" %
|
||||
node.path())
|
||||
cls.log.error("SOP Output node in '%s' does not exist. "
|
||||
"Ensure a valid SOP output path is set."
|
||||
% node.path())
|
||||
|
||||
return node.path()
|
||||
return [node.path()]
|
||||
|
||||
# Check if type is correct
|
||||
type_name = output_node.type().name()
|
||||
if type_name not in ["output", "cam"]:
|
||||
cls.log.error("Output node `%s` is not an accepted type `output` "
|
||||
"or `camera`" %
|
||||
output_node.path())
|
||||
# Output node must be a Sop node.
|
||||
if not isinstance(output_node, hou.SopNode):
|
||||
cls.log.error("Output node %s is not a SOP node. "
|
||||
"SOP Path must point to a SOP node, "
|
||||
"instead found category type: %s" % (
|
||||
output_node.path(),
|
||||
output_node.type().category().name()
|
||||
)
|
||||
)
|
||||
return [output_node.path()]
|
||||
|
||||
# For the sake of completeness also assert the category type
|
||||
# is Sop to avoid potential edge case scenarios even though
|
||||
# the isinstance check above should be stricter than this category
|
||||
assert output_node.type().category().name() == "Sop", (
|
||||
"Output node %s is not of category Sop. This is a bug.." %
|
||||
output_node.path()
|
||||
)
|
||||
|
||||
# Check if output node has incoming connections
|
||||
if type_name == "output" and not output_node.inputConnections():
|
||||
if not output_node.inputConnections():
|
||||
cls.log.error("Output node `%s` has no incoming connections"
|
||||
% output_node.path())
|
||||
return [output_node.path()]
|
||||
|
||||
# Ensure the output node has at least Geometry data
|
||||
if not output_node.geometry():
|
||||
cls.log.error("Output node `%s` has no geometry data."
|
||||
% output_node.path())
|
||||
return [output_node.path()]
|
||||
|
|
|
|||
|
|
@ -0,0 +1,75 @@
|
|||
import pyblish.api
|
||||
import colorbleed.api
|
||||
|
||||
|
||||
class ValidatePrimitiveHierarchyPaths(pyblish.api.InstancePlugin):
|
||||
"""Validate all primitives build hierarchy from attribute when enabled.
|
||||
|
||||
The name of the attribute must exist on the prims and have the same name
|
||||
as Build Hierarchy from Attribute's `Path Attribute` value on the Alembic
|
||||
ROP node whenever Build Hierarchy from Attribute is enabled.
|
||||
|
||||
"""
|
||||
|
||||
order = colorbleed.api.ValidateContentsOrder + 0.1
|
||||
families = ["colorbleed.pointcache"]
|
||||
hosts = ["houdini"]
|
||||
label = "Validate Prims Hierarchy Path"
|
||||
|
||||
def process(self, instance):
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError("See log for details. "
|
||||
"Invalid nodes: {0}".format(invalid))
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
import hou
|
||||
|
||||
output = instance.data["output_node"]
|
||||
prims = output.geometry().prims()
|
||||
|
||||
rop = instance[0]
|
||||
build_from_path = rop.parm("build_from_path").eval()
|
||||
if not build_from_path:
|
||||
cls.log.debug("Alembic ROP has 'Build from Path' disabled. "
|
||||
"Validation is ignored..")
|
||||
return
|
||||
|
||||
path_attr = rop.parm("path_attrib").eval()
|
||||
if not path_attr:
|
||||
cls.log.error("The Alembic ROP node has no Path Attribute"
|
||||
"value set, but 'Build Hierarchy from Attribute'"
|
||||
"is enabled.")
|
||||
return [rop.path()]
|
||||
|
||||
cls.log.debug("Checking for attribute: %s" % path_attr)
|
||||
|
||||
missing_attr = []
|
||||
invalid_attr = []
|
||||
for prim in prims:
|
||||
|
||||
try:
|
||||
path = prim.stringAttribValue(path_attr)
|
||||
except hou.OperationFailed:
|
||||
# Attribute does not exist.
|
||||
missing_attr.append(prim)
|
||||
continue
|
||||
|
||||
if not path:
|
||||
# Empty path value is invalid.
|
||||
invalid_attr.append(prim)
|
||||
continue
|
||||
|
||||
if missing_attr:
|
||||
cls.log.info("Prims are missing attribute `%s`" % path_attr)
|
||||
|
||||
if invalid_attr:
|
||||
cls.log.info("Prims have no value for attribute `%s` "
|
||||
"(%s of %s prims)" % (path_attr,
|
||||
len(invalid_attr),
|
||||
len(prims)))
|
||||
|
||||
if missing_attr or invalid_attr:
|
||||
return [output.path()]
|
||||
|
|
@ -1,5 +1,3 @@
|
|||
from collections import OrderedDict
|
||||
|
||||
import avalon.maya
|
||||
from colorbleed.maya import lib
|
||||
|
||||
|
|
@ -16,21 +14,24 @@ class CreateAnimation(avalon.maya.Creator):
|
|||
super(CreateAnimation, self).__init__(*args, **kwargs)
|
||||
|
||||
# create an ordered dict with the existing data first
|
||||
data = OrderedDict(**self.data)
|
||||
|
||||
# get basic animation data : start / end / handles / steps
|
||||
for key, value in lib.collect_animation_data().items():
|
||||
data[key] = value
|
||||
self.data[key] = value
|
||||
|
||||
# Write vertex colors with the geometry.
|
||||
data["writeColorSets"] = False
|
||||
self.data["writeColorSets"] = False
|
||||
|
||||
# Include only renderable visible shapes.
|
||||
# Skips locators and empty transforms
|
||||
data["renderableOnly"] = False
|
||||
self.data["renderableOnly"] = False
|
||||
|
||||
# Include only nodes that are visible at least once during the
|
||||
# frame range.
|
||||
data["visibleOnly"] = False
|
||||
self.data["visibleOnly"] = False
|
||||
|
||||
self.data = data
|
||||
# Include the groups above the out_SET content
|
||||
self.data["includeParentHierarchy"] = False # Include parent groups
|
||||
|
||||
# Default to exporting world-space
|
||||
self.data["worldSpace"] = True
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
from collections import OrderedDict
|
||||
import avalon.maya
|
||||
from colorbleed.maya import lib
|
||||
|
||||
|
|
@ -15,13 +14,11 @@ class CreateCamera(avalon.maya.Creator):
|
|||
super(CreateCamera, self).__init__(*args, **kwargs)
|
||||
|
||||
# get basic animation data : start / end / handles / steps
|
||||
data = OrderedDict(**self.data)
|
||||
animation_data = lib.collect_animation_data()
|
||||
for key, value in animation_data.items():
|
||||
data[key] = value
|
||||
self.data[key] = value
|
||||
|
||||
# Bake to world space by default, when this is False it will also
|
||||
# include the parent hierarchy in the baked results
|
||||
data['bakeToWorldSpace'] = True
|
||||
self.data['bakeToWorldSpace'] = True
|
||||
|
||||
self.data = data
|
||||
|
|
|
|||
18
colorbleed/plugins/maya/create/colorbleed_fbx.py
Normal file
18
colorbleed/plugins/maya/create/colorbleed_fbx.py
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
import avalon.maya
|
||||
from colorbleed.maya import lib
|
||||
|
||||
|
||||
class CreateFBX(avalon.maya.Creator):
|
||||
"""FBX Export"""
|
||||
|
||||
name = "fbxDefault"
|
||||
label = "FBX"
|
||||
family = "colorbleed.fbx"
|
||||
icon = "plug"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateFBX, self).__init__(*args, **kwargs)
|
||||
|
||||
# get basic animation data : start / end / handles / steps
|
||||
for key, value in lib.collect_animation_data().items():
|
||||
self.data[key] = value
|
||||
|
|
@ -1,4 +1,3 @@
|
|||
from collections import OrderedDict
|
||||
import avalon.maya
|
||||
from colorbleed.maya import lib
|
||||
|
||||
|
|
@ -14,7 +13,4 @@ class CreateLook(avalon.maya.Creator):
|
|||
def __init__(self, *args, **kwargs):
|
||||
super(CreateLook, self).__init__(*args, **kwargs)
|
||||
|
||||
data = OrderedDict(**self.data)
|
||||
data["renderlayer"] = lib.get_current_renderlayer()
|
||||
|
||||
self.data = data
|
||||
self.data["renderlayer"] = lib.get_current_renderlayer()
|
||||
|
|
|
|||
|
|
@ -1,5 +1,3 @@
|
|||
from collections import OrderedDict
|
||||
|
||||
import avalon.maya
|
||||
|
||||
|
||||
|
|
@ -14,10 +12,12 @@ class CreateModel(avalon.maya.Creator):
|
|||
def __init__(self, *args, **kwargs):
|
||||
super(CreateModel, self).__init__(*args, **kwargs)
|
||||
|
||||
# create an ordered dict with the existing data first
|
||||
data = OrderedDict(**self.data)
|
||||
# Vertex colors with the geometry
|
||||
self.data["writeColorSets"] = False
|
||||
|
||||
# Write vertex colors with the geometry.
|
||||
data["writeColorSets"] = True
|
||||
# Include attributes by attribute name or prefix
|
||||
self.data["attr"] = ""
|
||||
self.data["attrPrefix"] = ""
|
||||
|
||||
self.data = data
|
||||
# Whether to include parent hierarchy of nodes in the instance
|
||||
self.data["includeParentHierarchy"] = False
|
||||
|
|
|
|||
|
|
@ -1,5 +1,3 @@
|
|||
from collections import OrderedDict
|
||||
|
||||
import avalon.maya
|
||||
from colorbleed.maya import lib
|
||||
|
||||
|
|
@ -15,22 +13,15 @@ class CreatePointCache(avalon.maya.Creator):
|
|||
def __init__(self, *args, **kwargs):
|
||||
super(CreatePointCache, self).__init__(*args, **kwargs)
|
||||
|
||||
# create an ordered dict with the existing data first
|
||||
data = OrderedDict(**self.data)
|
||||
# Add animation data
|
||||
self.data.update(lib.collect_animation_data())
|
||||
|
||||
# get basic animation data : start / end / handles / steps
|
||||
for key, value in lib.collect_animation_data().items():
|
||||
data[key] = value
|
||||
self.data["writeColorSets"] = False # Vertex colors with the geometry.
|
||||
self.data["renderableOnly"] = False # Only renderable visible shapes
|
||||
self.data["visibleOnly"] = False # only nodes that are visible
|
||||
self.data["includeParentHierarchy"] = False # Include parent groups
|
||||
self.data["worldSpace"] = True # Default to exporting world-space
|
||||
|
||||
# Write vertex colors with the geometry.
|
||||
data["writeColorSets"] = False
|
||||
|
||||
# Include only renderable visible shapes.
|
||||
# Skips locators and empty transforms
|
||||
data["renderableOnly"] = False
|
||||
|
||||
# Include only nodes that are visible at least once during the
|
||||
# frame range.
|
||||
data["visibleOnly"] = False
|
||||
|
||||
self.data = data
|
||||
# Add options for custom attributes
|
||||
self.data["attr"] = ""
|
||||
self.data["attrPrefix"] = ""
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
from collections import OrderedDict
|
||||
|
||||
from maya import cmds
|
||||
|
||||
import colorbleed.maya.lib as lib
|
||||
|
||||
from avalon.vendor import requests
|
||||
import avalon.maya
|
||||
from avalon import api
|
||||
|
|
@ -19,7 +19,7 @@ class CreateRenderGlobals(avalon.maya.Creator):
|
|||
# We won't be publishing this one
|
||||
self.data["id"] = "avalon.renderglobals"
|
||||
|
||||
# get pools
|
||||
# Get available Deadline pools
|
||||
AVALON_DEADLINE = api.Session["AVALON_DEADLINE"]
|
||||
argument = "{}/api/pools?NamesOnly=true".format(AVALON_DEADLINE)
|
||||
response = requests.get(argument)
|
||||
|
|
@ -34,33 +34,31 @@ class CreateRenderGlobals(avalon.maya.Creator):
|
|||
self.data.pop("asset", None)
|
||||
self.data.pop("active", None)
|
||||
|
||||
data = OrderedDict(**self.data)
|
||||
|
||||
data["suspendPublishJob"] = False
|
||||
data["extendFrames"] = False
|
||||
data["overrideExistingFrame"] = True
|
||||
data["useLegacyRenderLayers"] = True
|
||||
data["priority"] = 50
|
||||
data["framesPerTask"] = 1
|
||||
data["whitelist"] = False
|
||||
data["machineList"] = ""
|
||||
data["useMayaBatch"] = True
|
||||
data["primaryPool"] = pools
|
||||
self.data["suspendPublishJob"] = False
|
||||
self.data["extendFrames"] = False
|
||||
self.data["overrideExistingFrame"] = True
|
||||
self.data["useLegacyRenderLayers"] = True
|
||||
self.data["priority"] = 50
|
||||
self.data["framesPerTask"] = 1
|
||||
self.data["whitelist"] = False
|
||||
self.data["machineList"] = ""
|
||||
self.data["useMayaBatch"] = True
|
||||
self.data["primaryPool"] = pools
|
||||
# We add a string "-" to allow the user to not set any secondary pools
|
||||
data["secondaryPool"] = ["-"] + pools
|
||||
self.data["secondaryPool"] = ["-"] + pools
|
||||
|
||||
self.data = data
|
||||
self.options = {"useSelection": False} # Force no content
|
||||
|
||||
def process(self):
|
||||
|
||||
exists = cmds.ls(self.name)
|
||||
assert len(exists) <= 1, (
|
||||
"More than one renderglobal exists, this is a bug")
|
||||
"More than one renderglobal exists, this is a bug"
|
||||
)
|
||||
|
||||
if exists:
|
||||
return cmds.warning("%s already exists." % exists[0])
|
||||
|
||||
super(CreateRenderGlobals, self).process()
|
||||
|
||||
cmds.setAttr("{}.machineList".format(self.name), lock=True)
|
||||
with lib.undo_chunk():
|
||||
super(CreateRenderGlobals, self).process()
|
||||
cmds.setAttr("{}.machineList".format(self.name), lock=True)
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
from maya import cmds
|
||||
|
||||
import colorbleed.maya.lib as lib
|
||||
import avalon.maya
|
||||
|
||||
|
||||
|
|
@ -12,10 +13,11 @@ class CreateRig(avalon.maya.Creator):
|
|||
icon = "wheelchair"
|
||||
|
||||
def process(self):
|
||||
instance = super(CreateRig, self).process()
|
||||
|
||||
self.log.info("Creating Rig instance set up ...")
|
||||
with lib.undo_chunk():
|
||||
instance = super(CreateRig, self).process()
|
||||
|
||||
controls = cmds.sets(name="controls_SET", empty=True)
|
||||
pointcache = cmds.sets(name="out_SET", empty=True)
|
||||
cmds.sets([controls, pointcache], forceElement=instance)
|
||||
self.log.info("Creating Rig instance set up ...")
|
||||
controls = cmds.sets(name="controls_SET", empty=True)
|
||||
pointcache = cmds.sets(name="out_SET", empty=True)
|
||||
cmds.sets([controls, pointcache], forceElement=instance)
|
||||
|
|
|
|||
|
|
@ -1,5 +1,3 @@
|
|||
from collections import OrderedDict
|
||||
|
||||
import avalon.maya
|
||||
|
||||
|
||||
|
|
@ -14,13 +12,10 @@ class CreateVrayProxy(avalon.maya.Creator):
|
|||
def __init__(self, *args, **kwargs):
|
||||
super(CreateVrayProxy, self).__init__(*args, **kwargs)
|
||||
|
||||
data = OrderedDict(**self.data)
|
||||
|
||||
data["animation"] = False
|
||||
data["startFrame"] = 1
|
||||
data["endFrame"] = 1
|
||||
self.data["animation"] = False
|
||||
self.data["startFrame"] = 1
|
||||
self.data["endFrame"] = 1
|
||||
|
||||
# Write vertex colors
|
||||
data["vertexColors"] = False
|
||||
self.data["vertexColors"] = False
|
||||
|
||||
self.data.update(data)
|
||||
|
|
|
|||
27
colorbleed/plugins/maya/create/colorbleed_vrayscene.py
Normal file
27
colorbleed/plugins/maya/create/colorbleed_vrayscene.py
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
import avalon.maya
|
||||
|
||||
|
||||
class CreateVRayScene(avalon.maya.Creator):
|
||||
|
||||
label = "VRay Scene"
|
||||
family = "colorbleed.vrayscene"
|
||||
icon = "cubes"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateVRayScene, self).__init__(*args, **kwargs)
|
||||
|
||||
# We don't need subset or asset attributes
|
||||
self.data.pop("subset", None)
|
||||
self.data.pop("asset", None)
|
||||
self.data.pop("active", None)
|
||||
|
||||
self.data.update({
|
||||
"id": "avalon.vrayscene", # We won't be publishing this one
|
||||
"suspendRenderJob": False,
|
||||
"suspendPublishJob": False,
|
||||
"extendFrames": False,
|
||||
"pools": "",
|
||||
"framesPerTask": 1
|
||||
})
|
||||
|
||||
self.options = {"useSelection": False} # Force no content
|
||||
|
|
@ -15,12 +15,13 @@ class CreateYetiCache(avalon.maya.Creator):
|
|||
def __init__(self, *args, **kwargs):
|
||||
super(CreateYetiCache, self).__init__(*args, **kwargs)
|
||||
|
||||
data = OrderedDict(**self.data)
|
||||
data["peroll"] = 0
|
||||
self.data["preroll"] = 0
|
||||
|
||||
# Add animation data without step and handles
|
||||
anim_data = lib.collect_animation_data()
|
||||
data.update({"startFrame": anim_data["startFrame"],
|
||||
"endFrame": anim_data["endFrame"],
|
||||
"samples": 3})
|
||||
anim_data.pop("step")
|
||||
anim_data.pop("handles")
|
||||
self.data.update(anim_data)
|
||||
|
||||
self.data = data
|
||||
# Add samples
|
||||
self.data["samples"] = 3
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
from maya import cmds
|
||||
|
||||
import colorbleed.maya.lib as lib
|
||||
import avalon.maya
|
||||
|
||||
|
||||
|
|
@ -12,9 +13,9 @@ class CreateYetiRig(avalon.maya.Creator):
|
|||
|
||||
def process(self):
|
||||
|
||||
instance = super(CreateYetiRig, self).process()
|
||||
with lib.undo_chunk():
|
||||
instance = super(CreateYetiRig, self).process()
|
||||
|
||||
self.log.info("Creating Rig instance set up ...")
|
||||
|
||||
input_meshes = cmds.sets(name="input_SET", empty=True)
|
||||
cmds.sets(input_meshes, forceElement=instance)
|
||||
self.log.info("Creating Rig instance set up ...")
|
||||
input_meshes = cmds.sets(name="input_SET", empty=True)
|
||||
cmds.sets(input_meshes, forceElement=instance)
|
||||
|
|
|
|||
|
|
@ -1,48 +0,0 @@
|
|||
import colorbleed.maya.plugin
|
||||
|
||||
|
||||
class AbcLoader(colorbleed.maya.plugin.ReferenceLoader):
|
||||
"""Specific loader of Alembic for the avalon.animation family"""
|
||||
|
||||
families = ["colorbleed.animation",
|
||||
"colorbleed.camera",
|
||||
"colorbleed.pointcache"]
|
||||
representations = ["abc"]
|
||||
|
||||
label = "Reference animation"
|
||||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
def process_reference(self, context, name, namespace, data):
|
||||
|
||||
import maya.cmds as cmds
|
||||
from avalon import maya
|
||||
|
||||
cmds.loadPlugin("AbcImport.mll", quiet=True)
|
||||
# Prevent identical alembic nodes from being shared
|
||||
# Create unique namespace for the cameras
|
||||
|
||||
# Get name from asset being loaded
|
||||
# Assuming name is subset name from the animation, we split the number
|
||||
# suffix from the name to ensure the namespace is unique
|
||||
name = name.split("_")[0]
|
||||
namespace = maya.unique_namespace("{}_".format(name),
|
||||
format="%03d",
|
||||
suffix="_abc")
|
||||
|
||||
# hero_001 (abc)
|
||||
# asset_counter{optional}
|
||||
|
||||
nodes = cmds.file(self.fname,
|
||||
namespace=namespace,
|
||||
sharedReferenceFile=False,
|
||||
groupReference=True,
|
||||
groupName="{}:{}".format(namespace, name),
|
||||
reference=True,
|
||||
returnNewNodes=True)
|
||||
|
||||
# load colorbleed ID attribute
|
||||
self[:] = nodes
|
||||
|
||||
return nodes
|
||||
36
colorbleed/plugins/maya/load/load_fbx.py
Normal file
36
colorbleed/plugins/maya/load/load_fbx.py
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
import colorbleed.maya.plugin
|
||||
|
||||
|
||||
class FBXLoader(colorbleed.maya.plugin.ReferenceLoader):
|
||||
"""Load the FBX"""
|
||||
|
||||
families = ["colorbleed.fbx"]
|
||||
representations = ["fbx"]
|
||||
|
||||
label = "Reference FBX"
|
||||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
def process_reference(self, context, name, namespace, data):
|
||||
|
||||
import maya.cmds as cmds
|
||||
from avalon import maya
|
||||
|
||||
# Ensure FBX plug-in is loaded
|
||||
cmds.loadPlugin("fbxmaya", quiet=True)
|
||||
|
||||
with maya.maintained_selection():
|
||||
nodes = cmds.file(self.fname,
|
||||
namespace=namespace,
|
||||
reference=True,
|
||||
returnNewNodes=True,
|
||||
groupReference=True,
|
||||
groupName="{}:{}".format(namespace, name))
|
||||
|
||||
self[:] = nodes
|
||||
|
||||
return nodes
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
|
@ -12,29 +12,14 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
Identifier:
|
||||
id (str): "pyblish.avalon.instance"
|
||||
|
||||
Supported Families:
|
||||
avalon.model: Geometric representation of artwork
|
||||
avalon.rig: An articulated model for animators.
|
||||
A rig may contain a series of sets in which to identify
|
||||
its contents.
|
||||
|
||||
- cache_SEL: Should contain cachable polygonal meshes
|
||||
- controls_SEL: Should contain animatable controllers for animators
|
||||
- resources_SEL: Should contain nodes that reference external files
|
||||
|
||||
Limitations:
|
||||
- Only Maya is supported
|
||||
- One (1) rig per scene file
|
||||
- Unmanaged history, it is up to the TD to ensure
|
||||
history is up to par.
|
||||
avalon.animation: Pointcache of `avalon.rig`
|
||||
|
||||
Limitations:
|
||||
- Does not take into account nodes connected to those
|
||||
within an objectSet. Extractors are assumed to export
|
||||
with history preserved, but this limits what they will
|
||||
be able to achieve and the amount of data available
|
||||
to validators.
|
||||
to validators. An additional collector could also
|
||||
append this input data into the instance, as we do
|
||||
for `colorbleed.rig` with collect_history.
|
||||
|
||||
"""
|
||||
|
||||
|
|
@ -99,7 +84,11 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
fullPath=True) or []
|
||||
children = cmds.ls(children, noIntermediate=True, long=True)
|
||||
|
||||
parents = self.get_all_parents(members)
|
||||
parents = []
|
||||
if data.get("includeParentHierarchy", True):
|
||||
# If `includeParentHierarchy` then include the parents
|
||||
# so they will also be picked up in the instance by validators
|
||||
parents = self.get_all_parents(members)
|
||||
members_hierarchy = list(set(members + children + parents))
|
||||
|
||||
# Create the instance
|
||||
|
|
|
|||
|
|
@ -6,10 +6,9 @@ import colorbleed.maya.lib as lib
|
|||
|
||||
|
||||
class CollectRenderLayerAOVS(pyblish.api.InstancePlugin):
|
||||
"""Validate all render layer's AOVs / Render Elements are registered in
|
||||
the database
|
||||
"""Collect all render layer's AOVs / Render Elements that will render.
|
||||
|
||||
This validator is important to be able to Extend Frames
|
||||
This collector is important to be able to Extend Frames.
|
||||
|
||||
Technical information:
|
||||
Each renderer uses different logic to work with render passes.
|
||||
|
|
@ -37,8 +36,7 @@ class CollectRenderLayerAOVS(pyblish.api.InstancePlugin):
|
|||
return
|
||||
|
||||
# Get renderer
|
||||
renderer = cmds.getAttr("defaultRenderGlobals.currentRenderer")
|
||||
|
||||
renderer = instance.data["renderer"]
|
||||
self.log.info("Renderer found: {}".format(renderer))
|
||||
|
||||
rp_node_types = {"vray": ["VRayRenderElement", "VRayRenderElementSet"],
|
||||
|
|
@ -53,21 +51,20 @@ class CollectRenderLayerAOVS(pyblish.api.InstancePlugin):
|
|||
|
||||
# Collect all AOVs / Render Elements
|
||||
layer = instance.data["setMembers"]
|
||||
with lib.renderlayer(layer):
|
||||
node_type = rp_node_types[renderer]
|
||||
render_elements = cmds.ls(type=node_type)
|
||||
|
||||
node_type = rp_node_types[renderer]
|
||||
render_elements = cmds.ls(type=node_type)
|
||||
# Check if AOVs / Render Elements are enabled
|
||||
for element in render_elements:
|
||||
enabled = lib.get_attr_in_layer("{}.enabled".format(element),
|
||||
layer=layer)
|
||||
if not enabled:
|
||||
continue
|
||||
|
||||
# Check if AOVs / Render Elements are enabled
|
||||
for element in render_elements:
|
||||
enabled = cmds.getAttr("{}.enabled".format(element))
|
||||
if not enabled:
|
||||
continue
|
||||
pass_name = self.get_pass_name(renderer, element)
|
||||
render_pass = "%s.%s" % (instance.data["subset"], pass_name)
|
||||
|
||||
pass_name = self.get_pass_name(renderer, element)
|
||||
render_pass = "%s.%s" % (instance.data["subset"], pass_name)
|
||||
|
||||
result.append(render_pass)
|
||||
result.append(render_pass)
|
||||
|
||||
self.log.info("Found {} render elements / AOVs for "
|
||||
"'{}'".format(len(result), instance.data["subset"]))
|
||||
|
|
|
|||
26
colorbleed/plugins/maya/publish/collect_renderable_camera.py
Normal file
26
colorbleed/plugins/maya/publish/collect_renderable_camera.py
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
import pyblish.api
|
||||
|
||||
from maya import cmds
|
||||
|
||||
from colorbleed.maya import lib
|
||||
|
||||
|
||||
class CollectRenderableCamera(pyblish.api.InstancePlugin):
|
||||
"""Collect the renderable camera(s) for the render layer"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.01
|
||||
label = "Collect Renderable Camera(s)"
|
||||
hosts = ["maya"]
|
||||
families = ["colorbleed.vrayscene",
|
||||
"colorbleed.renderlayer"]
|
||||
|
||||
def process(self, instance):
|
||||
layer = instance.data["setMembers"]
|
||||
|
||||
cameras = cmds.ls(type="camera", long=True)
|
||||
renderable = [c for c in cameras if
|
||||
lib.get_attr_in_layer("%s.renderable" % c, layer=layer)]
|
||||
|
||||
self.log.info("Found cameras %s: %s" % (len(renderable), renderable))
|
||||
|
||||
instance.data["cameras"] = renderable
|
||||
|
|
@ -22,16 +22,10 @@ class CollectMayaRenderlayers(pyblish.api.ContextPlugin):
|
|||
try:
|
||||
render_globals = cmds.ls("renderglobalsDefault")[0]
|
||||
except IndexError:
|
||||
self.log.error("Cannot collect renderlayers without "
|
||||
"renderGlobals node")
|
||||
self.log.info("Skipping renderlayer collection, no "
|
||||
"renderGlobalsDefault found..")
|
||||
return
|
||||
|
||||
# Get start and end frame
|
||||
start_frame = self.get_render_attribute("startFrame")
|
||||
end_frame = self.get_render_attribute("endFrame")
|
||||
context.data["startFrame"] = start_frame
|
||||
context.data["endFrame"] = end_frame
|
||||
|
||||
# Get all valid renderlayers
|
||||
# This is how Maya populates the renderlayer display
|
||||
rlm_attribute = "renderLayerManager.renderLayerId"
|
||||
|
|
@ -59,30 +53,34 @@ class CollectMayaRenderlayers(pyblish.api.ContextPlugin):
|
|||
if layer.endswith("defaultRenderLayer"):
|
||||
layername = "masterLayer"
|
||||
else:
|
||||
# Remove Maya render setup prefix `rs_`
|
||||
layername = layer.split("rs_", 1)[-1]
|
||||
|
||||
# Get layer specific settings, might be overrides
|
||||
with lib.renderlayer(layer):
|
||||
data = {
|
||||
"subset": layername,
|
||||
"setMembers": layer,
|
||||
"publish": True,
|
||||
"startFrame": self.get_render_attribute("startFrame"),
|
||||
"endFrame": self.get_render_attribute("endFrame"),
|
||||
"byFrameStep": self.get_render_attribute("byFrameStep"),
|
||||
"renderer": self.get_render_attribute("currentRenderer"),
|
||||
data = {
|
||||
"subset": layername,
|
||||
"setMembers": layer,
|
||||
"publish": True,
|
||||
"startFrame": self.get_render_attribute("startFrame",
|
||||
layer=layer),
|
||||
"endFrame": self.get_render_attribute("endFrame",
|
||||
layer=layer),
|
||||
"byFrameStep": self.get_render_attribute("byFrameStep",
|
||||
layer=layer),
|
||||
"renderer": self.get_render_attribute("currentRenderer",
|
||||
layer=layer),
|
||||
|
||||
# instance subset
|
||||
"family": "Render Layers",
|
||||
"families": ["colorbleed.renderlayer"],
|
||||
"asset": asset,
|
||||
"time": api.time(),
|
||||
"author": context.data["user"],
|
||||
# instance subset
|
||||
"family": "Render Layers",
|
||||
"families": ["colorbleed.renderlayer"],
|
||||
"asset": asset,
|
||||
"time": api.time(),
|
||||
"author": context.data["user"],
|
||||
|
||||
# Add source to allow tracing back to the scene from
|
||||
# which was submitted originally
|
||||
"source": filepath
|
||||
}
|
||||
# Add source to allow tracing back to the scene from
|
||||
# which was submitted originally
|
||||
"source": filepath
|
||||
}
|
||||
|
||||
# Apply each user defined attribute as data
|
||||
for attr in cmds.listAttr(layer, userDefined=True) or list():
|
||||
|
|
@ -112,8 +110,9 @@ class CollectMayaRenderlayers(pyblish.api.ContextPlugin):
|
|||
instance.data["label"] = label
|
||||
instance.data.update(data)
|
||||
|
||||
def get_render_attribute(self, attr):
|
||||
return cmds.getAttr("defaultRenderGlobals.{}".format(attr))
|
||||
def get_render_attribute(self, attr, layer):
|
||||
return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr),
|
||||
layer=layer)
|
||||
|
||||
def parse_options(self, render_globals):
|
||||
"""Get all overrides with a value, skip those without
|
||||
|
|
|
|||
110
colorbleed/plugins/maya/publish/collect_vray_scene.py
Normal file
110
colorbleed/plugins/maya/publish/collect_vray_scene.py
Normal file
|
|
@ -0,0 +1,110 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from maya import cmds
|
||||
|
||||
from avalon import api
|
||||
|
||||
|
||||
class CollectVRayScene(pyblish.api.ContextPlugin):
|
||||
"""Collect all information prior for exporting vrscenes
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
label = "Collect VRay Scene"
|
||||
hosts = ["maya"]
|
||||
|
||||
def process(self, context):
|
||||
|
||||
# Sort by displayOrder
|
||||
def sort_by_display_order(layer):
|
||||
return cmds.getAttr("%s.displayOrder" % layer)
|
||||
|
||||
host = api.registered_host()
|
||||
|
||||
asset = api.Session["AVALON_ASSET"]
|
||||
work_dir = context.data["workspaceDir"]
|
||||
|
||||
# Get VRay Scene instance
|
||||
vray_scenes = host.lsattr("family", "colorbleed.vrayscene")
|
||||
if not vray_scenes:
|
||||
self.log.info("Skipping vrayScene collection, no "
|
||||
"colorbleed.vrayscene instance found..")
|
||||
return
|
||||
|
||||
assert len(vray_scenes) == 1, "Multiple vrayscene instances found!"
|
||||
vray_scene = vray_scenes[0]
|
||||
|
||||
vrscene_data = host.read(vray_scene)
|
||||
|
||||
assert cmds.ls("vraySettings", type="VRaySettingsNode"), (
|
||||
"VRay Settings node does not exists. "
|
||||
"Please ensure V-Ray is the current renderer."
|
||||
)
|
||||
|
||||
# Output data
|
||||
start_frame = int(cmds.getAttr("defaultRenderGlobals.startFrame"))
|
||||
end_frame = int(cmds.getAttr("defaultRenderGlobals.endFrame"))
|
||||
|
||||
# Create output file path with template
|
||||
file_name = context.data["currentFile"].replace("\\", "/")
|
||||
vrscene = ("vrayscene", "<Scene>", "<Scene>_<Layer>", "<Layer>")
|
||||
vrscene_output = os.path.join(work_dir, *vrscene)
|
||||
|
||||
# Check and create render output template for render job
|
||||
# outputDir is required for submit_publish_job
|
||||
if not vrscene_data.get("suspendRenderJob", False):
|
||||
renders = ("renders", "<Scene>", "<Scene>_<Layer>", "<Layer>")
|
||||
output_renderpath = os.path.join(work_dir, *renders)
|
||||
vrscene_data["outputDir"] = output_renderpath
|
||||
|
||||
# Get resolution
|
||||
resolution = (cmds.getAttr("defaultResolution.width"),
|
||||
cmds.getAttr("defaultResolution.height"))
|
||||
|
||||
# Get format extension
|
||||
extension = cmds.getAttr("vraySettings.imageFormatStr")
|
||||
|
||||
# Get render layers
|
||||
render_layers = [i for i in cmds.ls(type="renderLayer") if
|
||||
cmds.getAttr("{}.renderable".format(i)) and not
|
||||
cmds.referenceQuery(i, isNodeReferenced=True)]
|
||||
|
||||
render_layers = sorted(render_layers, key=sort_by_display_order)
|
||||
for layer in render_layers:
|
||||
|
||||
subset = layer
|
||||
if subset == "defaultRenderLayer":
|
||||
subset = "masterLayer"
|
||||
|
||||
data = {
|
||||
"subset": subset,
|
||||
"setMembers": layer,
|
||||
|
||||
"startFrame": start_frame,
|
||||
"endFrame": end_frame,
|
||||
"renderer": "vray",
|
||||
"resolution": resolution,
|
||||
"ext": ".{}".format(extension),
|
||||
|
||||
# instance subset
|
||||
"family": "VRay Scene",
|
||||
"families": ["colorbleed.vrayscene"],
|
||||
"asset": asset,
|
||||
"time": api.time(),
|
||||
"author": context.data["user"],
|
||||
|
||||
# Add source to allow tracing back to the scene from
|
||||
# which was submitted originally
|
||||
"source": file_name,
|
||||
|
||||
# Store VRay Scene additional data
|
||||
"vrsceneOutput": vrscene_output
|
||||
}
|
||||
|
||||
data.update(vrscene_data)
|
||||
|
||||
instance = context.create_instance(subset)
|
||||
self.log.info("Created: %s" % instance.name)
|
||||
instance.data.update(data)
|
||||
15
colorbleed/plugins/maya/publish/collect_workscene_fps.py
Normal file
15
colorbleed/plugins/maya/publish/collect_workscene_fps.py
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
import pyblish.api
|
||||
from maya import mel
|
||||
|
||||
|
||||
class CollectWorksceneFPS(pyblish.api.ContextPlugin):
|
||||
"""Get the FPS of the work scene"""
|
||||
|
||||
label = "Workscene FPS"
|
||||
order = pyblish.api.CollectorOrder
|
||||
hosts = ["maya"]
|
||||
|
||||
def process(self, context):
|
||||
fps = mel.eval('currentTimeUnitToFPS()')
|
||||
self.log.info("Workscene FPS: %s" % fps)
|
||||
context.data.update({"fps": fps})
|
||||
|
|
@ -6,6 +6,7 @@ from maya import cmds
|
|||
import pyblish.api
|
||||
|
||||
from colorbleed.maya import lib
|
||||
from colorbleed.lib import pairwise
|
||||
|
||||
|
||||
SETTINGS = {"renderDensity",
|
||||
|
|
@ -29,6 +30,27 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
|
|||
assert "input_SET" in instance.data["setMembers"], (
|
||||
"Yeti Rig must have an input_SET")
|
||||
|
||||
input_connections = self.collect_input_connections(instance)
|
||||
|
||||
# Collect any textures if used
|
||||
yeti_resources = []
|
||||
yeti_nodes = cmds.ls(instance[:], type="pgYetiMaya", long=True)
|
||||
for node in yeti_nodes:
|
||||
# Get Yeti resources (textures)
|
||||
resources = self.get_yeti_resources(node)
|
||||
yeti_resources.extend(resources)
|
||||
|
||||
instance.data["rigsettings"] = {"inputs": input_connections}
|
||||
|
||||
instance.data["resources"] = yeti_resources
|
||||
|
||||
# Force frame range for export
|
||||
instance.data["startFrame"] = 1
|
||||
instance.data["endFrame"] = 1
|
||||
|
||||
def collect_input_connections(self, instance):
|
||||
"""Collect the inputs for all nodes in the input_SET"""
|
||||
|
||||
# Get the input meshes information
|
||||
input_content = cmds.ls(cmds.sets("input_SET", query=True), long=True)
|
||||
|
||||
|
|
@ -39,44 +61,38 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
|
|||
|
||||
# Ignore intermediate objects
|
||||
input_content = cmds.ls(input_content, long=True, noIntermediate=True)
|
||||
if not input_content:
|
||||
return []
|
||||
|
||||
# Store all connections
|
||||
connections = cmds.listConnections(input_content,
|
||||
source=True,
|
||||
destination=False,
|
||||
connections=True,
|
||||
# Only allow inputs from dagNodes
|
||||
# (avoid display layers, etc.)
|
||||
type="dagNode",
|
||||
plugs=True) or []
|
||||
|
||||
# Group per source, destination pair. We need to reverse the connection
|
||||
# list as it comes in with the shape used to query first while that
|
||||
# shape is the destination of the connection
|
||||
grouped = [(connections[i+1], item) for i, item in
|
||||
enumerate(connections) if i % 2 == 0]
|
||||
connections = cmds.ls(connections, long=True) # Ensure long names
|
||||
|
||||
inputs = []
|
||||
for src, dest in grouped:
|
||||
for dest, src in pairwise(connections):
|
||||
source_node, source_attr = src.split(".", 1)
|
||||
dest_node, dest_attr = dest.split(".", 1)
|
||||
|
||||
# Ensure the source of the connection is not included in the
|
||||
# current instance's hierarchy. If so, we ignore that connection
|
||||
# as we will want to preserve it even over a publish.
|
||||
if source_node in instance:
|
||||
self.log.debug("Ignoring input connection between nodes "
|
||||
"inside the instance: %s -> %s" % (src, dest))
|
||||
continue
|
||||
|
||||
inputs.append({"connections": [source_attr, dest_attr],
|
||||
"sourceID": lib.get_id(source_node),
|
||||
"destinationID": lib.get_id(dest_node)})
|
||||
|
||||
# Collect any textures if used
|
||||
yeti_resources = []
|
||||
yeti_nodes = cmds.ls(instance[:], type="pgYetiMaya", long=True)
|
||||
for node in yeti_nodes:
|
||||
# Get Yeti resources (textures)
|
||||
resources = self.get_yeti_resources(node)
|
||||
yeti_resources.extend(resources)
|
||||
|
||||
instance.data["rigsettings"] = {"inputs": inputs}
|
||||
|
||||
instance.data["resources"] = yeti_resources
|
||||
|
||||
# Force frame range for export
|
||||
instance.data["startFrame"] = 1
|
||||
instance.data["endFrame"] = 1
|
||||
return inputs
|
||||
|
||||
def get_yeti_resources(self, node):
|
||||
"""Get all resource file paths
|
||||
|
|
@ -96,7 +112,13 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
|
|||
list
|
||||
"""
|
||||
resources = []
|
||||
image_search_path = cmds.getAttr("{}.imageSearchPath".format(node))
|
||||
|
||||
image_search_paths = cmds.getAttr("{}.imageSearchPath".format(node))
|
||||
|
||||
# TODO: Somehow this uses OS environment path separator, `:` vs `;`
|
||||
# Later on check whether this is pipeline OS cross-compatible.
|
||||
image_search_paths = [p for p in
|
||||
image_search_paths.split(os.path.pathsep) if p]
|
||||
|
||||
# List all related textures
|
||||
texture_filenames = cmds.pgYetiCommand(node, listTextures=True)
|
||||
|
|
@ -108,36 +130,51 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
|
|||
type="reference")
|
||||
self.log.info("Found %i reference node(s)" % len(reference_nodes))
|
||||
|
||||
if texture_filenames and not image_search_path:
|
||||
if texture_filenames and not image_search_paths:
|
||||
raise ValueError("pgYetiMaya node '%s' is missing the path to the "
|
||||
"files in the 'imageSearchPath "
|
||||
"atttribute'" % node)
|
||||
|
||||
# Collect all texture files
|
||||
for texture in texture_filenames:
|
||||
item = {"files": [], "source": texture, "node": node}
|
||||
texture_filepath = os.path.join(image_search_path, texture)
|
||||
if len(texture.split(".")) > 2:
|
||||
|
||||
# For UDIM based textures (tiles)
|
||||
if "<UDIM>" in texture:
|
||||
sequences = self.get_sequence(texture_filepath,
|
||||
pattern="<UDIM>")
|
||||
item["files"].extend(sequences)
|
||||
|
||||
# Based textures (animated masks f.e)
|
||||
elif "%04d" in texture:
|
||||
sequences = self.get_sequence(texture_filepath,
|
||||
pattern="%04d")
|
||||
item["files"].extend(sequences)
|
||||
# Assuming it is a fixed name
|
||||
else:
|
||||
item["files"].append(texture_filepath)
|
||||
files = []
|
||||
if os.path.isabs(texture):
|
||||
self.log.debug("Texture is absolute path, ignoring "
|
||||
"image search paths for: %s" % texture)
|
||||
files = self.search_textures(texture)
|
||||
else:
|
||||
item["files"].append(texture_filepath)
|
||||
for root in image_search_paths:
|
||||
filepath = os.path.join(root, texture)
|
||||
files = self.search_textures(filepath)
|
||||
if files:
|
||||
# Break out on first match in search paths..
|
||||
break
|
||||
|
||||
if not files:
|
||||
self.log.warning(
|
||||
"No texture found for: %s "
|
||||
"(searched: %s)" % (texture, image_search_paths))
|
||||
|
||||
item = {
|
||||
"files": files,
|
||||
"source": texture,
|
||||
"node": node
|
||||
}
|
||||
|
||||
resources.append(item)
|
||||
|
||||
# For now validate that every texture has at least a single file
|
||||
# resolved. Since a 'resource' does not have the requirement of having
|
||||
# a `files` explicitly mapped it's not explicitly validated.
|
||||
# TODO: Validate this as a validator
|
||||
invalid_resources = []
|
||||
for resource in resources:
|
||||
if not resource['files']:
|
||||
invalid_resources.append(resource)
|
||||
if invalid_resources:
|
||||
raise RuntimeError("Invalid resources")
|
||||
|
||||
# Collect all referenced files
|
||||
for reference_node in reference_nodes:
|
||||
ref_file = cmds.pgYetiGraph(node,
|
||||
|
|
@ -145,35 +182,83 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
|
|||
param="reference_file",
|
||||
getParamValue=True)
|
||||
|
||||
if not os.path.isfile(ref_file):
|
||||
raise RuntimeError("Reference file must be a full file path!")
|
||||
|
||||
# Create resource dict
|
||||
item = {"files": [],
|
||||
"source": ref_file,
|
||||
"node": node,
|
||||
"graphnode": reference_node,
|
||||
"param": "reference_file"}
|
||||
item = {
|
||||
"source": ref_file,
|
||||
"node": node,
|
||||
"graphnode": reference_node,
|
||||
"param": "reference_file",
|
||||
"files": []
|
||||
}
|
||||
|
||||
ref_file_name = os.path.basename(ref_file)
|
||||
if "%04d" in ref_file_name:
|
||||
ref_files = self.get_sequence(ref_file)
|
||||
item["files"].extend(ref_files)
|
||||
item["files"] = self.get_sequence(ref_file)
|
||||
else:
|
||||
item["files"].append(ref_file)
|
||||
if os.path.exists(ref_file) and os.path.isfile(ref_file):
|
||||
item["files"] = [ref_file]
|
||||
|
||||
if not item["files"]:
|
||||
self.log.warning("Reference node '%s' has no valid file "
|
||||
"path set: %s" % (reference_node, ref_file))
|
||||
# TODO: This should allow to pass and fail in Validator instead
|
||||
raise RuntimeError("Reference node must be a full file path!")
|
||||
|
||||
resources.append(item)
|
||||
|
||||
return resources
|
||||
|
||||
def get_sequence(self, filename, pattern="%04d"):
|
||||
"""Get sequence from filename
|
||||
def search_textures(self, filepath):
|
||||
"""Search all texture files on disk.
|
||||
|
||||
This also parses to full sequences for those with dynamic patterns
|
||||
like <UDIM> and %04d in the filename.
|
||||
|
||||
Args:
|
||||
filepath (str): The full path to the file, including any
|
||||
dynamic patterns like <UDIM> or %04d
|
||||
|
||||
Returns:
|
||||
list: The files found on disk
|
||||
|
||||
"""
|
||||
filename = os.path.basename(filepath)
|
||||
|
||||
# Collect full sequence if it matches a sequence pattern
|
||||
if len(filename.split(".")) > 2:
|
||||
|
||||
# For UDIM based textures (tiles)
|
||||
if "<UDIM>" in filename:
|
||||
sequences = self.get_sequence(filepath,
|
||||
pattern="<UDIM>")
|
||||
if sequences:
|
||||
return sequences
|
||||
|
||||
# Frame/time - Based textures (animated masks f.e)
|
||||
elif "%04d" in filename:
|
||||
sequences = self.get_sequence(filepath,
|
||||
pattern="%04d")
|
||||
if sequences:
|
||||
return sequences
|
||||
|
||||
# Assuming it is a fixed name (single file)
|
||||
if os.path.exists(filepath):
|
||||
return [filepath]
|
||||
|
||||
return []
|
||||
|
||||
def get_sequence(self, filepath, pattern="%04d"):
|
||||
"""Get sequence from filename.
|
||||
|
||||
This will only return files if they exist on disk as it tries
|
||||
to collect the sequence using the filename pattern and searching
|
||||
for them on disk.
|
||||
|
||||
Supports negative frame ranges like -001, 0000, 0001 and -0001,
|
||||
0000, 0001.
|
||||
|
||||
Arguments:
|
||||
filename (str): The full path to filename containing the given
|
||||
filepath (str): The full path to filename containing the given
|
||||
pattern.
|
||||
pattern (str): The pattern to swap with the variable frame number.
|
||||
|
||||
|
|
@ -183,10 +268,10 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
from avalon.vendor import clique
|
||||
|
||||
escaped = re.escape(filename)
|
||||
escaped = re.escape(filepath)
|
||||
re_pattern = escaped.replace(pattern, "-?[0-9]+")
|
||||
|
||||
source_dir = os.path.dirname(filename)
|
||||
source_dir = os.path.dirname(filepath)
|
||||
files = [f for f in os.listdir(source_dir)
|
||||
if re.match(re_pattern, f)]
|
||||
|
||||
|
|
|
|||
|
|
@ -27,12 +27,12 @@ class ExtractColorbleedAnimation(colorbleed.api.Extractor):
|
|||
raise RuntimeError("Couldn't find exactly one out_SET: "
|
||||
"{0}".format(out_sets))
|
||||
out_set = out_sets[0]
|
||||
nodes = cmds.sets(out_set, query=True)
|
||||
roots = cmds.sets(out_set, query=True)
|
||||
|
||||
# Include all descendants
|
||||
nodes += cmds.listRelatives(nodes,
|
||||
allDescendents=True,
|
||||
fullPath=True) or []
|
||||
nodes = roots + cmds.listRelatives(roots,
|
||||
allDescendents=True,
|
||||
fullPath=True) or []
|
||||
|
||||
# Collect the start and end including handles
|
||||
start = instance.data["startFrame"]
|
||||
|
|
@ -55,9 +55,16 @@ class ExtractColorbleedAnimation(colorbleed.api.Extractor):
|
|||
"writeVisibility": True,
|
||||
"writeCreases": True,
|
||||
"uvWrite": True,
|
||||
"selection": True
|
||||
"selection": True,
|
||||
"worldSpace": instance.data.get("worldSpace", True)
|
||||
}
|
||||
|
||||
if not instance.data.get("includeParentHierarchy", True):
|
||||
# Set the root nodes if we don't want to include parents
|
||||
# The roots are to be considered the ones that are the actual
|
||||
# direct members of the set
|
||||
options["root"] = roots
|
||||
|
||||
if int(cmds.about(version=True)) >= 2017:
|
||||
# Since Maya 2017 alembic supports multiple uv sets - write them.
|
||||
options["writeUVSets"] = True
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ class ExtractCameraAlembic(colorbleed.api.Extractor):
|
|||
job_str += ' -file "{0}"'.format(path)
|
||||
|
||||
with lib.evaluation("off"):
|
||||
with lib.no_refresh():
|
||||
with avalon.maya.suspended_refresh():
|
||||
cmds.AbcExport(j=job_str, verbose=False)
|
||||
|
||||
if "files" not in instance.data:
|
||||
|
|
|
|||
|
|
@ -127,7 +127,7 @@ class ExtractCameraMayaAscii(colorbleed.api.Extractor):
|
|||
self.log.info("Performing camera bakes for: {0}".format(transform))
|
||||
with avalon.maya.maintained_selection():
|
||||
with lib.evaluation("off"):
|
||||
with lib.no_refresh():
|
||||
with avalon.maya.suspended_refresh():
|
||||
baked = lib.bake_to_world_space(
|
||||
transform,
|
||||
frame_range=range_with_handles,
|
||||
|
|
|
|||
216
colorbleed/plugins/maya/publish/extract_fbx.py
Normal file
216
colorbleed/plugins/maya/publish/extract_fbx.py
Normal file
|
|
@ -0,0 +1,216 @@
|
|||
import os
|
||||
|
||||
from maya import cmds
|
||||
import maya.mel as mel
|
||||
|
||||
import pyblish.api
|
||||
import avalon.maya
|
||||
|
||||
import colorbleed.api
|
||||
|
||||
|
||||
class ExtractFBX(colorbleed.api.Extractor):
|
||||
"""Extract FBX from Maya.
|
||||
|
||||
This extracts reproducible FBX exports ignoring any of the settings set
|
||||
on the local machine in the FBX export options window.
|
||||
|
||||
All export settings are applied with the `FBXExport*` commands prior
|
||||
to the `FBXExport` call itself. The options can be overridden with their
|
||||
nice names as seen in the "options" property on this class.
|
||||
|
||||
For more information on FBX exports see:
|
||||
- https://knowledge.autodesk.com/support/maya/learn-explore/caas
|
||||
/CloudHelp/cloudhelp/2016/ENU/Maya/files/GUID-6CCE943A-2ED4-4CEE-96D4
|
||||
-9CB19C28F4E0-htm.html
|
||||
- http://forums.cgsociety.org/archive/index.php?t-1032853.html
|
||||
- https://groups.google.com/forum/#!msg/python_inside_maya/cLkaSo361oE
|
||||
/LKs9hakE28kJ
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder
|
||||
label = "Extract FBX"
|
||||
families = ["colorbleed.fbx"]
|
||||
|
||||
@property
|
||||
def options(self):
|
||||
"""Overridable options for FBX Export
|
||||
|
||||
Given in the following format
|
||||
- {NAME: EXPECTED TYPE}
|
||||
|
||||
If the overridden option's type does not match,
|
||||
the option is not included and a warning is logged.
|
||||
|
||||
"""
|
||||
|
||||
return {
|
||||
"cameras": bool,
|
||||
"smoothingGroups": bool,
|
||||
"hardEdges": bool,
|
||||
"tangents": bool,
|
||||
"smoothMesh": bool,
|
||||
"instances": bool,
|
||||
# "referencedContainersContent": bool, # deprecated in Maya 2016+
|
||||
"bakeComplexAnimation": int,
|
||||
"bakeComplexStart": int,
|
||||
"bakeComplexEnd": int,
|
||||
"bakeComplexStep": int,
|
||||
"bakeResampleAnimation": bool,
|
||||
"animationOnly": bool,
|
||||
"useSceneName": bool,
|
||||
"quaternion": str, # "euler"
|
||||
"shapes": bool,
|
||||
"skins": bool,
|
||||
"constraints": bool,
|
||||
"lights": bool,
|
||||
"embeddedTextures": bool,
|
||||
"inputConnections": bool,
|
||||
"upAxis": str, # x, y or z,
|
||||
"triangulate": bool
|
||||
}
|
||||
|
||||
@property
|
||||
def default_options(self):
|
||||
"""The default options for FBX extraction.
|
||||
|
||||
This includes shapes, skins, constraints, lights and incoming
|
||||
connections and exports with the Y-axis as up-axis.
|
||||
|
||||
By default this uses the time sliders start and end time.
|
||||
|
||||
"""
|
||||
|
||||
start_frame = int(cmds.playbackOptions(query=True,
|
||||
animationStartTime=True))
|
||||
end_frame = int(cmds.playbackOptions(query=True,
|
||||
animationEndTime=True))
|
||||
|
||||
return {
|
||||
"cameras": False,
|
||||
"smoothingGroups": False,
|
||||
"hardEdges": False,
|
||||
"tangents": False,
|
||||
"smoothMesh": False,
|
||||
"instances": False,
|
||||
"bakeComplexAnimation": True,
|
||||
"bakeComplexStart": start_frame,
|
||||
"bakeComplexEnd": end_frame,
|
||||
"bakeComplexStep": 1,
|
||||
"bakeResampleAnimation": True,
|
||||
"animationOnly": False,
|
||||
"useSceneName": False,
|
||||
"quaternion": "euler",
|
||||
"shapes": True,
|
||||
"skins": True,
|
||||
"constraints": False,
|
||||
"lights": True,
|
||||
"embeddedTextures": True,
|
||||
"inputConnections": True,
|
||||
"upAxis": "y",
|
||||
"triangulate": False
|
||||
}
|
||||
|
||||
def parse_overrides(self, instance, options):
|
||||
"""Inspect data of instance to determine overridden options
|
||||
|
||||
An instance may supply any of the overridable options
|
||||
as data, the option is then added to the extraction.
|
||||
|
||||
"""
|
||||
|
||||
for key in instance.data:
|
||||
if key not in self.options:
|
||||
continue
|
||||
|
||||
# Ensure the data is of correct type
|
||||
value = instance.data[key]
|
||||
if not isinstance(value, self.options[key]):
|
||||
self.log.warning(
|
||||
"Overridden attribute {key} was of "
|
||||
"the wrong type: {invalid_type} "
|
||||
"- should have been {valid_type}".format(
|
||||
key=key,
|
||||
invalid_type=type(value).__name__,
|
||||
valid_type=self.options[key].__name__))
|
||||
continue
|
||||
|
||||
options[key] = value
|
||||
|
||||
return options
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
# Ensure FBX plug-in is loaded
|
||||
cmds.loadPlugin("fbxmaya", quiet=True)
|
||||
|
||||
# Define output path
|
||||
directory = self.staging_dir(instance)
|
||||
filename = "{0}.fbx".format(instance.name)
|
||||
path = os.path.join(directory, filename)
|
||||
|
||||
# The export requires forward slashes because we need
|
||||
# to format it into a string in a mel expression
|
||||
path = path.replace('\\', '/')
|
||||
|
||||
self.log.info("Extracting FBX to: {0}".format(path))
|
||||
|
||||
members = instance.data["setMembers"]
|
||||
self.log.info("Members: {0}".format(members))
|
||||
self.log.info("Instance: {0}".format(instance[:]))
|
||||
|
||||
# Parse export options
|
||||
options = self.default_options
|
||||
options = self.parse_overrides(instance, options)
|
||||
self.log.info("Export options: {0}".format(options))
|
||||
|
||||
# Collect the start and end including handles
|
||||
start = instance.data["startFrame"]
|
||||
end = instance.data["endFrame"]
|
||||
handles = instance.data.get("handles", 0)
|
||||
if handles:
|
||||
start -= handles
|
||||
end += handles
|
||||
|
||||
options['bakeComplexStart'] = start
|
||||
options['bakeComplexEnd'] = end
|
||||
|
||||
# First apply the default export settings to be fully consistent
|
||||
# each time for successive publishes
|
||||
mel.eval("FBXResetExport")
|
||||
|
||||
# Apply the FBX overrides through MEL since the commands
|
||||
# only work correctly in MEL according to online
|
||||
# available discussions on the topic
|
||||
for option, value in options.iteritems():
|
||||
key = option[0].upper() + option[1:] # uppercase first letter
|
||||
|
||||
# Boolean must be passed as lower-case strings
|
||||
# as to MEL standards
|
||||
if isinstance(value, bool):
|
||||
value = str(value).lower()
|
||||
|
||||
template = "FBXExport{0} -v {1}"
|
||||
if key == "UpAxis":
|
||||
template = "FBXExport{0} {1}"
|
||||
|
||||
cmd = template.format(key, value)
|
||||
self.log.info(cmd)
|
||||
mel.eval(cmd)
|
||||
|
||||
# Never show the UI or generate a log
|
||||
mel.eval("FBXExportShowUI -v false")
|
||||
mel.eval("FBXExportGenerateLog -v false")
|
||||
|
||||
# Export
|
||||
with avalon.maya.maintained_selection():
|
||||
cmds.select(members, r=1, noExpand=True)
|
||||
mel.eval('FBXExport -f "{}" -s'.format(path))
|
||||
|
||||
if "files" not in instance.data:
|
||||
instance.data["files"] = list()
|
||||
|
||||
instance.data["files"].append(filename)
|
||||
|
||||
self.log.info("Extract FBX successful to: {0}".format(path))
|
||||
|
|
@ -1,5 +1,7 @@
|
|||
import os
|
||||
import json
|
||||
import tempfile
|
||||
import contextlib
|
||||
from collections import OrderedDict
|
||||
|
||||
from maya import cmds
|
||||
|
|
@ -11,6 +13,38 @@ import colorbleed.api
|
|||
import colorbleed.maya.lib as lib
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def no_workspace_dir():
|
||||
"""Force maya to a fake temporary workspace directory.
|
||||
|
||||
Note: This is not maya.cmds.workspace 'rootDirectory' but the 'directory'
|
||||
|
||||
This helps to avoid Maya automatically remapping image paths to files
|
||||
relative to the currently set directory.
|
||||
|
||||
"""
|
||||
|
||||
# Store current workspace
|
||||
original = cmds.workspace(query=True, directory=True)
|
||||
|
||||
# Set a fake workspace
|
||||
fake_workspace_dir = tempfile.mkdtemp()
|
||||
cmds.workspace(directory=fake_workspace_dir)
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
try:
|
||||
cmds.workspace(directory=original)
|
||||
except RuntimeError:
|
||||
# If the original workspace directory didn't exist either
|
||||
# ignore the fact that it fails to reset it to the old path
|
||||
pass
|
||||
|
||||
# Remove the temporary directory
|
||||
os.rmdir(fake_workspace_dir)
|
||||
|
||||
|
||||
class ExtractLook(colorbleed.api.Extractor):
|
||||
"""Extract Look (Maya Ascii + JSON)
|
||||
|
||||
|
|
@ -65,18 +99,23 @@ class ExtractLook(colorbleed.api.Extractor):
|
|||
with lib.renderlayer(layer):
|
||||
# TODO: Ensure membership edits don't become renderlayer overrides
|
||||
with lib.empty_sets(sets, force=True):
|
||||
with lib.attribute_values(remap):
|
||||
with avalon.maya.maintained_selection():
|
||||
cmds.select(sets, noExpand=True)
|
||||
cmds.file(maya_path,
|
||||
force=True,
|
||||
typ="mayaAscii",
|
||||
exportSelected=True,
|
||||
preserveReferences=False,
|
||||
channels=True,
|
||||
constraints=True,
|
||||
expressions=True,
|
||||
constructionHistory=True)
|
||||
# To avoid Maya trying to automatically remap the file
|
||||
# textures relative to the `workspace -directory` we force
|
||||
# it to a fake temporary workspace. This fixes textures
|
||||
# getting incorrectly remapped. (LKD-17, PLN-101)
|
||||
with no_workspace_dir():
|
||||
with lib.attribute_values(remap):
|
||||
with avalon.maya.maintained_selection():
|
||||
cmds.select(sets, noExpand=True)
|
||||
cmds.file(maya_path,
|
||||
force=True,
|
||||
typ="mayaAscii",
|
||||
exportSelected=True,
|
||||
preserveReferences=False,
|
||||
channels=True,
|
||||
constraints=True,
|
||||
expressions=True,
|
||||
constructionHistory=True)
|
||||
|
||||
# Write the JSON data
|
||||
self.log.info("Extract json..")
|
||||
|
|
|
|||
|
|
@ -32,6 +32,13 @@ class ExtractColorbleedAlembic(colorbleed.api.Extractor):
|
|||
start -= handles
|
||||
end += handles
|
||||
|
||||
attrs = instance.data.get("attr", "").split(";")
|
||||
attrs = [value for value in attrs if value.strip()]
|
||||
attrs += ["cbId"]
|
||||
|
||||
attr_prefixes = instance.data.get("attrPrefix", "").split(";")
|
||||
attr_prefixes = [value for value in attr_prefixes if value.strip()]
|
||||
|
||||
# Get extra export arguments
|
||||
writeColorSets = instance.data.get("writeColorSets", False)
|
||||
|
||||
|
|
@ -44,14 +51,22 @@ class ExtractColorbleedAlembic(colorbleed.api.Extractor):
|
|||
|
||||
options = {
|
||||
"step": instance.data.get("step", 1.0),
|
||||
"attr": ["cbId"],
|
||||
"attr": attrs,
|
||||
"attrPrefix": attr_prefixes,
|
||||
"writeVisibility": True,
|
||||
"writeCreases": True,
|
||||
"writeColorSets": writeColorSets,
|
||||
"uvWrite": True,
|
||||
"selection": True
|
||||
"selection": True,
|
||||
"worldSpace": instance.data.get("worldSpace", True)
|
||||
}
|
||||
|
||||
if not instance.data.get("includeParentHierarchy", True):
|
||||
# Set the root nodes if we don't want to include parents
|
||||
# The roots are to be considered the ones that are the actual
|
||||
# direct members of the set
|
||||
options["root"] = instance.data.get("setMembers")
|
||||
|
||||
if int(cmds.about(version=True)) >= 2017:
|
||||
# Since Maya 2017 alembic supports multiple uv sets - write them.
|
||||
options["writeUVSets"] = True
|
||||
|
|
|
|||
|
|
@ -11,7 +11,8 @@ class IncrementCurrentFileDeadline(pyblish.api.ContextPlugin):
|
|||
label = "Increment current file"
|
||||
order = pyblish.api.IntegratorOrder + 9.0
|
||||
hosts = ["maya"]
|
||||
families = ["colorbleed.renderlayer"]
|
||||
families = ["colorbleed.renderlayer",
|
||||
"colorbleed.vrayscene"]
|
||||
optional = True
|
||||
|
||||
def process(self, context):
|
||||
|
|
|
|||
|
|
@ -207,9 +207,9 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
# todo: This is a temporary fix for yeti variables
|
||||
"PEREGRINEL_LICENSE",
|
||||
"REDSHIFT_MAYAEXTENSIONSPATH",
|
||||
"REDSHIFT_DISABLEOUTPUTLOCKFILES"
|
||||
"VRAY_FOR_MAYA2018_PLUGINS_X64",
|
||||
"VRAY_PLUGINS_X64",
|
||||
"REDSHIFT_DISABLEOUTPUTLOCKFILES",
|
||||
"VRAY_FOR_MAYA2018_PLUGINS",
|
||||
"VRAY_PLUGINS",
|
||||
"VRAY_USE_THREAD_AFFINITY",
|
||||
"MAYA_MODULE_PATH"
|
||||
]
|
||||
274
colorbleed/plugins/maya/publish/submit_vray_deadline.py
Normal file
274
colorbleed/plugins/maya/publish/submit_vray_deadline.py
Normal file
|
|
@ -0,0 +1,274 @@
|
|||
import getpass
|
||||
import json
|
||||
import os
|
||||
from copy import deepcopy
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from avalon import api
|
||||
from avalon.vendor import requests
|
||||
|
||||
from maya import cmds
|
||||
|
||||
|
||||
class VraySubmitDeadline(pyblish.api.InstancePlugin):
|
||||
"""Export the scene to `.vrscene` files per frame per render layer
|
||||
|
||||
vrscene files will be written out based on the following template:
|
||||
<project>/vrayscene/<Scene>/<Scene>_<Layer>/<Layer>
|
||||
|
||||
A dependency job will be added for each layer to render the framer
|
||||
through VRay Standalone
|
||||
|
||||
"""
|
||||
label = "Submit to Deadline ( vrscene )"
|
||||
order = pyblish.api.IntegratorOrder
|
||||
hosts = ["maya"]
|
||||
families = ["colorbleed.vrayscene"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
AVALON_DEADLINE = api.Session.get("AVALON_DEADLINE",
|
||||
"http://localhost:8082")
|
||||
assert AVALON_DEADLINE, "Requires AVALON_DEADLINE"
|
||||
|
||||
context = instance.context
|
||||
|
||||
deadline_url = "{}/api/jobs".format(AVALON_DEADLINE)
|
||||
deadline_user = context.data.get("deadlineUser", getpass.getuser())
|
||||
|
||||
filepath = context.data["currentFile"]
|
||||
filename = os.path.basename(filepath)
|
||||
task_name = "{} - {}".format(filename, instance.name)
|
||||
|
||||
batch_name = "{} - (vrscene)".format(filename)
|
||||
|
||||
# Get the output template for vrscenes
|
||||
vrscene_output = instance.data["vrsceneOutput"]
|
||||
|
||||
# This is also the input file for the render job
|
||||
first_file = self.format_output_filename(instance,
|
||||
filename,
|
||||
vrscene_output)
|
||||
|
||||
start_frame = int(instance.data["startFrame"])
|
||||
end_frame = int(instance.data["endFrame"])
|
||||
|
||||
# Primary job
|
||||
self.log.info("Submitting export job ..")
|
||||
|
||||
payload = {
|
||||
"JobInfo": {
|
||||
# Top-level group name
|
||||
"BatchName": batch_name,
|
||||
|
||||
# Job name, as seen in Monitor
|
||||
"Name": "Export {} [{}-{}]".format(task_name,
|
||||
start_frame,
|
||||
end_frame),
|
||||
|
||||
# Arbitrary username, for visualisation in Monitor
|
||||
"UserName": deadline_user,
|
||||
|
||||
"Plugin": "MayaBatch",
|
||||
"Frames": "{}-{}".format(start_frame, end_frame),
|
||||
"FramesPerTask": instance.data.get("framesPerTask", 1),
|
||||
|
||||
"Comment": context.data.get("comment", ""),
|
||||
|
||||
"OutputFilename0": os.path.dirname(first_file),
|
||||
},
|
||||
"PluginInfo": {
|
||||
|
||||
# Renderer
|
||||
"Renderer": "vray",
|
||||
|
||||
# Mandatory for Deadline
|
||||
"Version": cmds.about(version=True),
|
||||
|
||||
# Input
|
||||
"SceneFile": filepath,
|
||||
|
||||
"SkipExistingFrames": True,
|
||||
|
||||
"UsingRenderLayers": True,
|
||||
|
||||
"UseLegacyRenderLayers": True
|
||||
},
|
||||
|
||||
# Mandatory for Deadline, may be empty
|
||||
"AuxFiles": []
|
||||
}
|
||||
|
||||
environment = dict(AVALON_TOOLS="global;python36;maya2018")
|
||||
environment.update(api.Session.copy())
|
||||
|
||||
jobinfo_environment = self.build_jobinfo_environment(environment)
|
||||
|
||||
payload["JobInfo"].update(jobinfo_environment)
|
||||
|
||||
self.log.info("Job Data:\n{}".format(json.dumps(payload)))
|
||||
|
||||
response = requests.post(url=deadline_url, json=payload)
|
||||
if not response.ok:
|
||||
raise RuntimeError(response.text)
|
||||
|
||||
# Secondary job
|
||||
# Store job to create dependency chain
|
||||
dependency = response.json()
|
||||
|
||||
if instance.data["suspendRenderJob"]:
|
||||
self.log.info("Skipping render job and publish job")
|
||||
return
|
||||
|
||||
self.log.info("Submitting render job ..")
|
||||
|
||||
start_frame = int(instance.data["startFrame"])
|
||||
end_frame = int(instance.data["endFrame"])
|
||||
ext = instance.data.get("ext", "exr")
|
||||
|
||||
# Create output directory for renders
|
||||
render_ouput = self.format_output_filename(instance,
|
||||
filename,
|
||||
instance.data["outputDir"],
|
||||
dir=True)
|
||||
|
||||
self.log.info("Render output: %s" % render_ouput)
|
||||
|
||||
# Update output dir
|
||||
instance.data["outputDir"] = render_ouput
|
||||
|
||||
# Format output file name
|
||||
sequence_filename = ".".join([instance.name, ext])
|
||||
output_filename = os.path.join(render_ouput, sequence_filename)
|
||||
|
||||
# Ensure folder exists:
|
||||
if not os.path.exists(render_ouput):
|
||||
os.makedirs(render_ouput)
|
||||
|
||||
payload_b = {
|
||||
"JobInfo": {
|
||||
|
||||
"JobDependency0": dependency["_id"],
|
||||
"BatchName": batch_name,
|
||||
"Name": "Render {} [{}-{}]".format(task_name,
|
||||
start_frame,
|
||||
end_frame),
|
||||
"UserName": deadline_user,
|
||||
|
||||
"Frames": "{}-{}".format(start_frame, end_frame),
|
||||
|
||||
"Plugin": "Vray",
|
||||
"OverrideTaskExtraInfoNames": False,
|
||||
|
||||
"OutputFilename0": render_ouput,
|
||||
},
|
||||
"PluginInfo": {
|
||||
|
||||
"InputFilename": first_file,
|
||||
"OutputFilename": output_filename,
|
||||
"SeparateFilesPerFrame": True,
|
||||
"VRayEngine": "V-Ray",
|
||||
|
||||
"Width": instance.data["resolution"][0],
|
||||
"Height": instance.data["resolution"][1],
|
||||
|
||||
},
|
||||
"AuxFiles": [],
|
||||
}
|
||||
|
||||
# Add vray renderslave to environment
|
||||
tools = environment["AVALON_TOOLS"] + ";vrayrenderslave"
|
||||
environment_b = deepcopy(environment)
|
||||
environment_b["AVALON_TOOLS"] = tools
|
||||
|
||||
jobinfo_environment_b = self.build_jobinfo_environment(environment_b)
|
||||
payload_b["JobInfo"].update(jobinfo_environment_b)
|
||||
|
||||
self.log.info(json.dumps(payload_b))
|
||||
|
||||
# Post job to deadline
|
||||
response_b = requests.post(url=deadline_url, json=payload_b)
|
||||
if not response_b.ok:
|
||||
raise RuntimeError(response_b.text)
|
||||
|
||||
# Add job for publish job
|
||||
if not instance.data.get("suspendPublishJob", False):
|
||||
instance.data["deadlineSubmissionJob"] = response_b.json()
|
||||
|
||||
def build_command(self, instance):
|
||||
"""Create command for Render.exe to export vray scene
|
||||
|
||||
Args:
|
||||
instance
|
||||
|
||||
Returns:
|
||||
str
|
||||
|
||||
"""
|
||||
|
||||
cmd = ('-r vray -proj {project} -cam {cam} -noRender -s {startFrame} '
|
||||
'-e {endFrame} -rl {layer} -exportFramesSeparate')
|
||||
|
||||
# Get the camera
|
||||
cammera = instance.data["cameras"][0]
|
||||
|
||||
return cmd.format(project=instance.context.data["workspaceDir"],
|
||||
cam=cammera,
|
||||
startFrame=instance.data["startFrame"],
|
||||
endFrame=instance.data["endFrame"],
|
||||
layer=instance.name)
|
||||
|
||||
def build_jobinfo_environment(self, env):
|
||||
"""Format environment keys and values to match Deadline rquirements
|
||||
|
||||
Args:
|
||||
env(dict): environment dictionary
|
||||
|
||||
Returns:
|
||||
dict
|
||||
|
||||
"""
|
||||
return {"EnvironmentKeyValue%d" % index: "%s=%s" % (k, env[k])
|
||||
for index, k in enumerate(env)}
|
||||
|
||||
def format_output_filename(self, instance, filename, template, dir=False):
|
||||
"""Format the expected output file of the Export job
|
||||
|
||||
Example:
|
||||
<Scene>/<Scene>_<Layer>/<Layer>
|
||||
"shot010_v006/shot010_v006_CHARS/CHARS"
|
||||
|
||||
Args:
|
||||
instance:
|
||||
filename(str):
|
||||
dir(bool):
|
||||
|
||||
Returns:
|
||||
str
|
||||
|
||||
"""
|
||||
|
||||
def smart_replace(string, key_values):
|
||||
new_string = string
|
||||
for key, value in key_values.items():
|
||||
new_string = new_string.replace(key, value)
|
||||
return new_string
|
||||
|
||||
# Ensure filename has no extension
|
||||
file_name, _ = os.path.splitext(filename)
|
||||
|
||||
# Reformat without tokens
|
||||
output_path = smart_replace(template,
|
||||
{"<Scene>": file_name,
|
||||
"<Layer>": instance.name})
|
||||
|
||||
if dir:
|
||||
return output_path.replace("\\", "/")
|
||||
|
||||
start_frame = int(instance.data["startFrame"])
|
||||
filename_zero = "{}_{:04d}.vrscene".format(output_path, start_frame)
|
||||
|
||||
result = filename_zero.replace("\\", "/")
|
||||
|
||||
return result
|
||||
|
|
@ -1,6 +1,7 @@
|
|||
import pyblish.api
|
||||
|
||||
from maya import cmds
|
||||
from colorbleed.plugin import contextplugin_should_run
|
||||
|
||||
|
||||
class ValidateCurrentRenderLayerIsRenderable(pyblish.api.ContextPlugin):
|
||||
|
|
@ -20,7 +21,12 @@ class ValidateCurrentRenderLayerIsRenderable(pyblish.api.ContextPlugin):
|
|||
hosts = ["maya"]
|
||||
families = ["colorbleed.renderlayer"]
|
||||
|
||||
def process(self, instance):
|
||||
def process(self, context):
|
||||
|
||||
# Workaround bug pyblish-base#250
|
||||
if not contextplugin_should_run(self, context):
|
||||
return
|
||||
|
||||
layer = cmds.editRenderLayerGlobals(query=True, currentRenderLayer=True)
|
||||
cameras = cmds.ls(type="camera", long=True)
|
||||
renderable = any(c for c in cameras if cmds.getAttr(c + ".renderable"))
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import pyblish.api
|
|||
|
||||
import avalon.api as api
|
||||
from avalon.vendor import requests
|
||||
from colorbleed.plugin import contextplugin_should_run
|
||||
|
||||
|
||||
class ValidateDeadlineConnection(pyblish.api.ContextPlugin):
|
||||
|
|
@ -12,7 +13,11 @@ class ValidateDeadlineConnection(pyblish.api.ContextPlugin):
|
|||
hosts = ["maya"]
|
||||
families = ["colorbleed.renderlayer"]
|
||||
|
||||
def process(self, instance):
|
||||
def process(self, context):
|
||||
|
||||
# Workaround bug pyblish-base#250
|
||||
if not contextplugin_should_run(self, context):
|
||||
return
|
||||
|
||||
AVALON_DEADLINE = api.Session.get("AVALON_DEADLINE",
|
||||
"http://localhost:8082")
|
||||
|
|
@ -24,4 +29,4 @@ class ValidateDeadlineConnection(pyblish.api.ContextPlugin):
|
|||
assert response.ok, "Response must be ok"
|
||||
assert response.text.startswith("Deadline Web Service "), (
|
||||
"Web service did not respond with 'Deadline Web Service'"
|
||||
)
|
||||
)
|
||||
|
|
|
|||
|
|
@ -6,11 +6,11 @@ import colorbleed.api
|
|||
|
||||
|
||||
class ValidateLookSets(pyblish.api.InstancePlugin):
|
||||
"""Validate if any sets are missing from the instance and look data
|
||||
"""Validate if any sets relationships are not being collected.
|
||||
|
||||
A shader can be assigned to a node that is missing a Colorbleed ID.
|
||||
Because it is missing the ID it has not been collected in the instance.
|
||||
This validator ensures no relationships and thus considers it invalid
|
||||
This validator ensures those relationships and thus considers it invalid
|
||||
if a relationship was not collected.
|
||||
|
||||
When the relationship needs to be maintained the artist might need to
|
||||
|
|
@ -25,8 +25,10 @@ class ValidateLookSets(pyblish.api.InstancePlugin):
|
|||
|
||||
- Displacement objectSets (like V-Ray):
|
||||
|
||||
It is best practice to add the transform group of the shape to the
|
||||
displacement objectSet.
|
||||
It is best practice to add the transform of the shape to the
|
||||
displacement objectSet. Any parent groups will not work as groups
|
||||
do not receive a Colorbleed Id. As such the assignments need to be
|
||||
made to the shapes and their transform.
|
||||
|
||||
Example content:
|
||||
[asset_GRP|geometry_GRP|body_GES,
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ class ValidateMayaUnits(pyblish.api.ContextPlugin):
|
|||
assert fps and fps == asset_fps, "Scene must be %s FPS" % asset_fps
|
||||
|
||||
@classmethod
|
||||
def repair(cls):
|
||||
def repair(cls, context):
|
||||
"""Fix the current FPS setting of the scene, set to PAL(25.0 fps)"""
|
||||
|
||||
cls.log.info("Setting angular unit to 'degrees'")
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ def get_invalid_sets(shape):
|
|||
"""
|
||||
|
||||
invalid = []
|
||||
sets = cmds.listSets(object=shape, t=1, extendToShape=False)
|
||||
sets = cmds.listSets(object=shape, t=1, extendToShape=False) or []
|
||||
for s in sets:
|
||||
members = cmds.sets(s, query=True, nodesOnly=True)
|
||||
if not members:
|
||||
|
|
@ -93,7 +93,9 @@ class ValidateMeshShaderConnections(pyblish.api.InstancePlugin):
|
|||
def get_invalid(instance):
|
||||
|
||||
shapes = cmds.ls(instance[:], dag=1, leaf=1, shapes=1, long=True)
|
||||
shapes = cmds.ls(shapes, shapes=True, noIntermediate=True, long=True)
|
||||
|
||||
# todo: allow to check anything that can have a shader
|
||||
shapes = cmds.ls(shapes, noIntermediate=True, long=True, type="mesh")
|
||||
|
||||
invalid = []
|
||||
for shape in shapes:
|
||||
|
|
|
|||
|
|
@ -63,7 +63,8 @@ class ValidateModelContent(pyblish.api.InstancePlugin):
|
|||
cls.log.error("Must have exactly one top group")
|
||||
if len(assemblies) == 0:
|
||||
cls.log.warning("No top group found. "
|
||||
"(Are there objects in the instance?)")
|
||||
"(Are there objects in the instance?"
|
||||
" Or is it parented in another group?)")
|
||||
return assemblies or True
|
||||
|
||||
def _is_visible(node):
|
||||
|
|
|
|||
|
|
@ -23,7 +23,8 @@ class ValidateNodeIdsInDatabase(pyblish.api.InstancePlugin):
|
|||
hosts = ['maya']
|
||||
families = ["*"]
|
||||
|
||||
actions = [colorbleed.maya.action.SelectInvalidAction]
|
||||
actions = [colorbleed.maya.action.SelectInvalidAction,
|
||||
colorbleed.maya.action.GenerateUUIDsOnInvalidAction]
|
||||
|
||||
def process(self, instance):
|
||||
invalid = self.get_invalid(instance)
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ from maya import cmds
|
|||
import pyblish.api
|
||||
import colorbleed.api
|
||||
import colorbleed.maya.action
|
||||
import colorbleed.maya.lib as lib
|
||||
|
||||
|
||||
class ValidateRenderNoDefaultCameras(pyblish.api.InstancePlugin):
|
||||
|
|
@ -18,20 +17,14 @@ class ValidateRenderNoDefaultCameras(pyblish.api.InstancePlugin):
|
|||
@staticmethod
|
||||
def get_invalid(instance):
|
||||
|
||||
layer = instance.data["setMembers"]
|
||||
renderable = set(instance.data["cameras"])
|
||||
|
||||
# Collect default cameras
|
||||
cameras = cmds.ls(type='camera', long=True)
|
||||
defaults = [cam for cam in cameras if
|
||||
cmds.camera(cam, query=True, startupCamera=True)]
|
||||
defaults = set(cam for cam in cameras if
|
||||
cmds.camera(cam, query=True, startupCamera=True))
|
||||
|
||||
invalid = []
|
||||
with lib.renderlayer(layer):
|
||||
for cam in defaults:
|
||||
if cmds.getAttr(cam + ".renderable"):
|
||||
invalid.append(cam)
|
||||
|
||||
return invalid
|
||||
return [cam for cam in renderable if cam in defaults]
|
||||
|
||||
def process(self, instance):
|
||||
"""Process all the cameras in the instance"""
|
||||
|
|
|
|||
|
|
@ -1,9 +1,6 @@
|
|||
from maya import cmds
|
||||
|
||||
import pyblish.api
|
||||
import colorbleed.api
|
||||
import colorbleed.maya.action
|
||||
import colorbleed.maya.lib as lib
|
||||
|
||||
|
||||
class ValidateRenderSingleCamera(pyblish.api.InstancePlugin):
|
||||
|
|
@ -18,32 +15,30 @@ class ValidateRenderSingleCamera(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
|
||||
order = colorbleed.api.ValidateContentsOrder
|
||||
hosts = ['maya']
|
||||
families = ['colorbleed.renderlayer']
|
||||
label = "Render Single Camera"
|
||||
hosts = ['maya']
|
||||
families = ["colorbleed.renderlayer",
|
||||
"colorbleed.vrayscene"]
|
||||
actions = [colorbleed.maya.action.SelectInvalidAction]
|
||||
|
||||
@staticmethod
|
||||
def get_invalid(instance):
|
||||
|
||||
layer = instance.data["setMembers"]
|
||||
|
||||
cameras = cmds.ls(type='camera', long=True)
|
||||
|
||||
with lib.renderlayer(layer):
|
||||
renderable = [cam for cam in cameras if
|
||||
cmds.getAttr(cam + ".renderable")]
|
||||
|
||||
if len(renderable) == 0:
|
||||
raise RuntimeError("No renderable cameras found.")
|
||||
elif len(renderable) > 1:
|
||||
return renderable
|
||||
else:
|
||||
return []
|
||||
|
||||
def process(self, instance):
|
||||
"""Process all the cameras in the instance"""
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError("Multiple renderable cameras"
|
||||
"found: {0}".format(invalid))
|
||||
raise RuntimeError("Invalid cameras for render.")
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
cameras = instance.data.get("cameras", [])
|
||||
|
||||
if len(cameras) > 1:
|
||||
cls.log.error("Multiple renderable cameras found for %s: %s " %
|
||||
(instance.data["setMembers"], cameras))
|
||||
return [instance.data["setMembers"]] + cameras
|
||||
|
||||
elif len(cameras) < 1:
|
||||
cls.log.error("No renderable cameras found for %s " %
|
||||
instance.data["setMembers"])
|
||||
return [instance.data["setMembers"]]
|
||||
|
||||
|
|
|
|||
|
|
@ -50,37 +50,33 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
invalid = False
|
||||
|
||||
renderer = instance.data['renderer']
|
||||
layer_node = instance.data['setMembers']
|
||||
layer = instance.data['setMembers']
|
||||
|
||||
# Collect the filename prefix in the render layer
|
||||
with lib.renderlayer(layer_node):
|
||||
# Get the node attributes for current renderer
|
||||
attrs = lib.RENDER_ATTRS.get(renderer, lib.RENDER_ATTRS['default'])
|
||||
prefix = lib.get_attr_in_layer("{node}.{prefix}".format(**attrs),
|
||||
layer=layer)
|
||||
padding = lib.get_attr_in_layer("{node}.{padding}".format(**attrs),
|
||||
layer=layer)
|
||||
|
||||
render_attrs = lib.RENDER_ATTRS.get(renderer,
|
||||
lib.RENDER_ATTRS['default'])
|
||||
node = render_attrs["node"]
|
||||
padding_attr = render_attrs["padding"]
|
||||
prefix_attr = render_attrs["prefix"]
|
||||
anim_override = lib.get_attr_in_layer("defaultRenderGlobals.animation",
|
||||
layer=layer)
|
||||
if not anim_override:
|
||||
invalid = True
|
||||
cls.log.error("Animation needs to be enabled. Use the same "
|
||||
"frame for start and end to render single frame")
|
||||
|
||||
prefix = cmds.getAttr("{}.{}".format(node, prefix_attr))
|
||||
padding = cmds.getAttr("{}.{}".format(node, padding_attr))
|
||||
fname_prefix = cls.RENDERER_PREFIX.get(renderer,
|
||||
cls.DEFAULT_PREFIX)
|
||||
if prefix != fname_prefix:
|
||||
invalid = True
|
||||
cls.log.error("Wrong file name prefix: %s (expected: %s)"
|
||||
% (prefix, fname_prefix))
|
||||
|
||||
anim_override = cmds.getAttr("defaultRenderGlobals.animation")
|
||||
if not anim_override:
|
||||
invalid = True
|
||||
cls.log.error("Animation needs to be enabled. Use the same "
|
||||
"frame for start and end to render single frame")
|
||||
|
||||
fname_prefix = cls.RENDERER_PREFIX.get(renderer,
|
||||
cls.DEFAULT_PREFIX)
|
||||
if prefix != fname_prefix:
|
||||
invalid = True
|
||||
cls.log.error("Wrong file name prefix, expecting %s"
|
||||
% fname_prefix)
|
||||
|
||||
if padding != cls.DEFAULT_PADDING:
|
||||
invalid = True
|
||||
cls.log.error("Expecting padding of {} ( {} )".format(
|
||||
cls.DEFAULT_PADDING, "0" * cls.DEFAULT_PADDING))
|
||||
if padding != cls.DEFAULT_PADDING:
|
||||
invalid = True
|
||||
cls.log.error("Expecting padding of {} ( {} )".format(
|
||||
cls.DEFAULT_PADDING, "0" * cls.DEFAULT_PADDING))
|
||||
|
||||
return invalid
|
||||
|
||||
|
|
|
|||
|
|
@ -30,7 +30,6 @@ class ValidateSceneSetWorkspace(pyblish.api.ContextPlugin):
|
|||
|
||||
order = colorbleed.api.ValidatePipelineOrder
|
||||
hosts = ['maya']
|
||||
families = ['colorbleed.model']
|
||||
category = 'scene'
|
||||
version = (0, 1, 0)
|
||||
label = 'Maya Workspace Set'
|
||||
|
|
|
|||
|
|
@ -0,0 +1,72 @@
|
|||
from maya import cmds
|
||||
|
||||
import pyblish.api
|
||||
import colorbleed.api
|
||||
import colorbleed.maya.action
|
||||
|
||||
|
||||
class ValidateSkinclusterDeformerSet(pyblish.api.InstancePlugin):
|
||||
"""Validate skinClusters on meshes have valid member relationships.
|
||||
|
||||
In rare cases it can happen that a mesh has a skinCluster in its history
|
||||
but it is *not* included in the deformer relationship history. If this is
|
||||
the case then FBX will not export the skinning.
|
||||
|
||||
"""
|
||||
|
||||
order = colorbleed.api.ValidateContentsOrder
|
||||
hosts = ['maya']
|
||||
families = ['colorbleed.fbx']
|
||||
label = "Skincluster Deformer Relationships"
|
||||
actions = [colorbleed.maya.action.SelectInvalidAction]
|
||||
|
||||
def process(self, instance):
|
||||
"""Process all the transform nodes in the instance"""
|
||||
invalid = self.get_invalid(instance)
|
||||
|
||||
if invalid:
|
||||
raise ValueError("Invalid skinCluster relationships "
|
||||
"found on meshes: {0}".format(invalid))
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
meshes = cmds.ls(instance, type="mesh", noIntermediate=True, long=True)
|
||||
invalid = list()
|
||||
|
||||
for mesh in meshes:
|
||||
history = cmds.listHistory(mesh) or []
|
||||
skins = cmds.ls(history, type="skinCluster")
|
||||
|
||||
# Ensure at most one skinCluster
|
||||
assert len(skins) <= 1, "Cannot have more than one skinCluster"
|
||||
|
||||
if skins:
|
||||
skin = skins[0]
|
||||
|
||||
# Ensure the mesh is also in the skinCluster set
|
||||
# otherwise the skin will not be exported correctly
|
||||
# by the FBX Exporter.
|
||||
deformer_sets = cmds.listSets(object=mesh, type=2)
|
||||
for deformer_set in deformer_sets:
|
||||
used_by = cmds.listConnections(deformer_set + ".usedBy",
|
||||
source=True,
|
||||
destination=False)
|
||||
|
||||
# Ignore those that don't seem to have a usedBy connection
|
||||
if not used_by:
|
||||
continue
|
||||
|
||||
# We have a matching deformer set relationship
|
||||
if skin in set(used_by):
|
||||
break
|
||||
|
||||
else:
|
||||
invalid.append(mesh)
|
||||
cls.log.warning(
|
||||
"Mesh has skinCluster in history but is not included "
|
||||
"in its deformer relationship set: "
|
||||
"{0} (skinCluster: {1})".format(mesh, skin)
|
||||
)
|
||||
|
||||
return invalid
|
||||
|
|
@ -0,0 +1,55 @@
|
|||
import pyblish.api
|
||||
import colorbleed.api
|
||||
import colorbleed.maya.lib as lib
|
||||
|
||||
from maya import cmds
|
||||
|
||||
|
||||
class ValidateVRayDistributedRendering(pyblish.api.InstancePlugin):
|
||||
"""Validate V-Ray Distributed Rendering is ignored in batch mode.
|
||||
|
||||
Whenever Distributed Rendering is enabled for V-Ray in the render settings
|
||||
ensure that the "Ignore in batch mode" is enabled so the submitted job
|
||||
won't try to render each frame with all machines resulting in faulty
|
||||
errors.
|
||||
|
||||
"""
|
||||
|
||||
order = colorbleed.api.ValidateContentsOrder
|
||||
label = "VRay Distributed Rendering"
|
||||
families = ["colorbleed.renderlayer"]
|
||||
actions = [colorbleed.api.RepairAction]
|
||||
|
||||
# V-Ray attribute names
|
||||
enabled_attr = "vraySettings.sys_distributed_rendering_on"
|
||||
ignored_attr = "vraySettings.sys_distributed_rendering_ignore_batch"
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
if instance.data.get("renderer") != "vray":
|
||||
# If not V-Ray ignore..
|
||||
return
|
||||
|
||||
vray_settings = cmds.ls("vraySettings", type="VRaySettingsNode")
|
||||
assert vray_settings, "Please ensure a VRay Settings Node is present"
|
||||
|
||||
renderlayer = instance.data['setMembers']
|
||||
|
||||
if not lib.get_attr_in_layer(self.enabled_attr, layer=renderlayer):
|
||||
# If not distributed rendering enabled, ignore..
|
||||
return
|
||||
|
||||
# If distributed rendering is enabled but it is *not* set to ignore
|
||||
# during batch mode we invalidate the instance
|
||||
if not lib.get_attr_in_layer(self.ignored_attr, layer=renderlayer):
|
||||
raise RuntimeError("Renderlayer has distributed rendering enabled "
|
||||
"but is not set to ignore in batch mode.")
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
|
||||
renderlayer = instance.data.get("setMembers")
|
||||
with lib.renderlayer(renderlayer):
|
||||
cls.log.info("Enabling Distributed Rendering "
|
||||
"ignore in batch mode..")
|
||||
cmds.setAttr(cls.ignored_attr, True)
|
||||
|
|
@ -0,0 +1,69 @@
|
|||
import pyblish.api
|
||||
import colorbleed.api
|
||||
from colorbleed.plugin import contextplugin_should_run
|
||||
|
||||
from maya import cmds
|
||||
|
||||
|
||||
class ValidateVRayTranslatorEnabled(pyblish.api.ContextPlugin):
|
||||
|
||||
order = colorbleed.api.ValidateContentsOrder
|
||||
label = "VRay Translator Settings"
|
||||
families = ["colorbleed.vrayscene"]
|
||||
actions = [colorbleed.api.RepairContextAction]
|
||||
|
||||
def process(self, context):
|
||||
|
||||
# Workaround bug pyblish-base#250
|
||||
if not contextplugin_should_run(self, context):
|
||||
return
|
||||
|
||||
invalid = self.get_invalid(context)
|
||||
if invalid:
|
||||
raise RuntimeError("Found invalid VRay Translator settings!")
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, context):
|
||||
|
||||
invalid = False
|
||||
|
||||
# Get vraySettings node
|
||||
vray_settings = cmds.ls(type="VRaySettingsNode")
|
||||
assert vray_settings, "Please ensure a VRay Settings Node is present"
|
||||
|
||||
node = vray_settings[0]
|
||||
|
||||
if cmds.setAttr("{}.vrscene_render_on".format(node)):
|
||||
cls.log.error("Render is enabled, this should be disabled")
|
||||
invalid = True
|
||||
|
||||
if not cmds.getAttr("{}.vrscene_on".format(node)):
|
||||
cls.log.error("Export vrscene not enabled")
|
||||
invalid = True
|
||||
|
||||
if not cmds.getAttr("{}.misc_eachFrameInFile".format(node)):
|
||||
cls.log.error("Each Frame in File not enabled")
|
||||
invalid = True
|
||||
|
||||
vrscene_filename = cmds.getAttr("{}.vrscene_filename".format(node))
|
||||
if vrscene_filename != "vrayscene/<Scene>/<Scene>_<Layer>/<Layer>":
|
||||
cls.log.error("Template for file name is wrong")
|
||||
invalid = True
|
||||
|
||||
return invalid
|
||||
|
||||
@classmethod
|
||||
def repair(cls, context):
|
||||
|
||||
vray_settings = cmds.ls(type="VRaySettingsNode")
|
||||
if not vray_settings:
|
||||
node = cmds.createNode("VRaySettingsNode")
|
||||
else:
|
||||
node = vray_settings[0]
|
||||
|
||||
cmds.setAttr("{}.vrscene_render_on".format(node), False)
|
||||
cmds.setAttr("{}.vrscene_on".format(node), True)
|
||||
cmds.setAttr("{}.misc_eachFrameInFile".format(node), True)
|
||||
cmds.setAttr("{}.vrscene_filename".format(node),
|
||||
"vrayscene/<Scene>/<Scene>_<Layer>/<Layer>",
|
||||
type="string")
|
||||
|
|
@ -25,6 +25,17 @@ class ValidateYetiRenderScriptCallbacks(pyblish.api.InstancePlugin):
|
|||
hosts = ["maya"]
|
||||
families = ["colorbleed.renderlayer"]
|
||||
|
||||
# Settings per renderer
|
||||
callbacks = {
|
||||
"vray": {
|
||||
"pre": "catch(`pgYetiVRayPreRender`)",
|
||||
"post": "catch(`pgYetiVRayPostRender`)"
|
||||
},
|
||||
"arnold": {
|
||||
"pre": "pgYetiPreRender"
|
||||
}
|
||||
}
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
invalid = self.get_invalid(instance)
|
||||
|
|
@ -35,14 +46,6 @@ class ValidateYetiRenderScriptCallbacks(pyblish.api.InstancePlugin):
|
|||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
# lookup per render
|
||||
render_scripts = {"vray":
|
||||
{"pre": "catch(`pgYetiVRayPreRender`)",
|
||||
"post": "catch(`pgYetiVRayPostRender`)"},
|
||||
"arnold":
|
||||
{"pre": "pgYetiPreRender"}
|
||||
}
|
||||
|
||||
yeti_loaded = cmds.pluginInfo("pgYetiMaya", query=True, loaded=True)
|
||||
|
||||
renderer = instance.data["renderer"]
|
||||
|
|
@ -50,22 +53,29 @@ class ValidateYetiRenderScriptCallbacks(pyblish.api.InstancePlugin):
|
|||
cls.log.info("Redshift ignores any pre and post render callbacks")
|
||||
return False
|
||||
|
||||
callback_lookup = render_scripts.get(renderer, {})
|
||||
callback_lookup = cls.callbacks.get(renderer, {})
|
||||
if not callback_lookup:
|
||||
cls.log.warning("Renderer '%s' is not supported in this plugin"
|
||||
% renderer)
|
||||
return False
|
||||
|
||||
pre_render_callback = cmds.getAttr("defaultRenderGlobals.preMel")
|
||||
post_render_callback = cmds.getAttr("defaultRenderGlobals.postMel")
|
||||
pre_mel = cmds.getAttr("defaultRenderGlobals.preMel") or ""
|
||||
post_mel = cmds.getAttr("defaultRenderGlobals.postMel") or ""
|
||||
|
||||
pre_callbacks = pre_render_callback.split(";")
|
||||
post_callbacks = post_render_callback.split(";")
|
||||
if pre_mel.strip():
|
||||
cls.log.debug("Found pre mel: `%s`" % pre_mel)
|
||||
|
||||
if post_mel.strip():
|
||||
cls.log.debug("Found post mel: `%s`" % post_mel)
|
||||
|
||||
# Strip callbacks and turn into a set for quick lookup
|
||||
pre_callbacks = {cmd.strip() for cmd in pre_mel.split(";")}
|
||||
post_callbacks = {cmd.strip() for cmd in post_mel.split(";")}
|
||||
|
||||
pre_script = callback_lookup.get("pre", "")
|
||||
post_script = callback_lookup.get("post", "")
|
||||
|
||||
# If not loaded
|
||||
# If Yeti is not loaded
|
||||
invalid = False
|
||||
if not yeti_loaded:
|
||||
if pre_script and pre_script in pre_callbacks:
|
||||
|
|
@ -77,18 +87,19 @@ class ValidateYetiRenderScriptCallbacks(pyblish.api.InstancePlugin):
|
|||
cls.log.error("Found post render callback '%s which is "
|
||||
"not used!" % post_script)
|
||||
invalid = True
|
||||
else:
|
||||
if pre_script:
|
||||
if pre_script not in pre_callbacks:
|
||||
cls.log.error(
|
||||
"Could not find required pre render callback "
|
||||
"`%s`" % pre_script)
|
||||
invalid = True
|
||||
|
||||
if post_script:
|
||||
if post_script not in post_callbacks:
|
||||
cls.log.error("Could not find required post render callback"
|
||||
" `%s`" % post_script)
|
||||
invalid = True
|
||||
# If Yeti is loaded
|
||||
else:
|
||||
if pre_script and pre_script not in pre_callbacks:
|
||||
cls.log.error(
|
||||
"Could not find required pre render callback "
|
||||
"`%s`" % pre_script)
|
||||
invalid = True
|
||||
|
||||
if post_script and post_script not in post_callbacks:
|
||||
cls.log.error(
|
||||
"Could not find required post render callback"
|
||||
" `%s`" % post_script)
|
||||
invalid = True
|
||||
|
||||
return invalid
|
||||
|
|
|
|||
|
|
@ -2,32 +2,46 @@ import pyblish.api
|
|||
|
||||
|
||||
class ValidateYetiRigSettings(pyblish.api.InstancePlugin):
|
||||
"""Validate Yeti Rig Settings have collected input connections.
|
||||
|
||||
The input connections are collected for the nodes in the `input_SET`.
|
||||
When no input connections are found a warning is logged but it is allowed
|
||||
to pass validation.
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Yeti Rig Settings"
|
||||
label = "Yeti Rig Settings"
|
||||
families = ["colorbleed.yetiRig"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise RuntimeError("Detected invalid Yeti Rig data. "
|
||||
raise RuntimeError("Detected invalid Yeti Rig data. (See log) "
|
||||
"Tip: Save the scene")
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
rigsettings = instance.data.get("rigsettings", {})
|
||||
if not rigsettings:
|
||||
rigsettings = instance.data.get("rigsettings", None)
|
||||
if rigsettings is None:
|
||||
cls.log.error("MAJOR ERROR: No rig settings found!")
|
||||
return True
|
||||
|
||||
# Get inputs
|
||||
inputs = rigsettings.get("inputs", [])
|
||||
if not inputs:
|
||||
# Empty rig settings dictionary
|
||||
cls.log.warning("No rig inputs found. This can happen when "
|
||||
"the rig has no inputs from outside the rig.")
|
||||
return False
|
||||
|
||||
for input in inputs:
|
||||
source_id = input["sourceID"]
|
||||
if source_id is None:
|
||||
cls.log.error("Discovered source with 'None' as ID, please "
|
||||
"check if the input shape has an cbId")
|
||||
"check if the input shape has a cbId")
|
||||
return True
|
||||
|
||||
destination_id = input["destinationID"]
|
||||
|
|
|
|||
4
colorbleed/vendor/pather/core.py
vendored
4
colorbleed/vendor/pather/core.py
vendored
|
|
@ -9,8 +9,8 @@ import glob
|
|||
from .error import ParseError
|
||||
|
||||
# Regex pattern that matches valid file
|
||||
# TODO: Implement complete pattern if required
|
||||
RE_FILENAME = '[-\w.,; \[\]]'
|
||||
# A filename may not contain \/:*?"<>|
|
||||
RE_FILENAME = r"[^\\/:\"*?<>|]"
|
||||
|
||||
|
||||
def format(pattern, data, allow_partial=True):
|
||||
|
|
|
|||
2
colorbleed/vendor/pather/version.py
vendored
2
colorbleed/vendor/pather/version.py
vendored
|
|
@ -1,7 +1,7 @@
|
|||
|
||||
VERSION_MAJOR = 0
|
||||
VERSION_MINOR = 1
|
||||
VERSION_PATCH = 0
|
||||
VERSION_PATCH = 1
|
||||
|
||||
version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
|
||||
version = '%i.%i.%i' % version_info
|
||||
|
|
|
|||
BIN
res/icons/colorbleed_logo_36x36.png
Normal file
BIN
res/icons/colorbleed_logo_36x36.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 20 KiB |
BIN
res/icons/inventory.png
Normal file
BIN
res/icons/inventory.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 15 KiB |
BIN
res/icons/loader.png
Normal file
BIN
res/icons/loader.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 408 B |
BIN
res/icons/workfiles.png
Normal file
BIN
res/icons/workfiles.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 205 B |
Loading…
Add table
Add a link
Reference in a new issue