cleanup conflicts with CB code

This commit is contained in:
Milan Kolar 2019-01-28 22:59:57 +01:00
commit 77f02101f6
126 changed files with 2842 additions and 566 deletions

View file

@ -87,6 +87,4 @@ class RepairContextAction(pyblish.api.Action):
# Apply pyblish.logic to get the instances for the plug-in
if plugin in errored_plugins:
self.log.info("Attempting fix ...")
plugin.repair()
plugin.repair(context)

View file

@ -35,10 +35,11 @@ def install():
log.info("Installing callbacks ... ")
avalon.on("init", on_init)
avalon.before("save", before_save)
avalon.on("save", on_save)
avalon.on("open", on_open)
log.info("Overriding existing event 'taskChanged'")
pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled)
log.info("Setting default family states for loader..")
avalon.data["familiesStateToggled"] = ["imagesequence"]
@ -48,6 +49,10 @@ def on_init(*args):
houdini.on_houdini_initialize()
def before_save(*args):
return lib.validate_fps()
def on_save(*args):
avalon.logger.info("Running callback on save..")
@ -72,7 +77,6 @@ def on_open(*args):
# Get main window
parent = hou.ui.mainQtWindow()
if parent is None:
log.info("Skipping outdated content pop-up "
"because Maya window can't be found.")
@ -89,3 +93,20 @@ def on_open(*args):
"your Maya scene.")
dialog.on_show.connect(_on_show_inventory)
dialog.show()
def on_pyblish_instance_toggled(instance, new_value, old_value):
"""Toggle saver tool passthrough states on instance toggles."""
nodes = instance[:]
if not nodes:
return
# Assume instance node is first node
instance_node = nodes[0]
if instance_node.isBypassed() != (not old_value):
print("%s old bypass state didn't match old instance state, "
"updating anyway.." % instance_node.path())
instance_node.bypass(not new_value)

View file

@ -4,15 +4,17 @@ from contextlib import contextmanager
import hou
from pype import lib
from avalon import api, io
from avalon.houdini import lib
from avalon.houdini import lib as houdini
def set_id(node, unique_id, overwrite=False):
exists = node.parm("id")
if not exists:
lib.imprint(node, {"id": unique_id})
houdini.imprint(node, {"id": unique_id})
if not exists and overwrite:
node.setParm("id", unique_id)
@ -188,3 +190,45 @@ def attribute_values(node, data):
pass
finally:
node.setParms(previous_attrs)
def set_scene_fps(fps):
hou.setFps(fps)
# Valid FPS
def validate_fps():
"""Validate current scene FPS and show pop-up when it is incorrect
Returns:
bool
"""
fps = lib.get_asset_fps()
current_fps = hou.fps() # returns float
if current_fps != fps:
from ..widgets import popup
# Find main window
parent = hou.ui.mainQtWindow()
if parent is None:
pass
else:
dialog = popup.Popup2(parent=parent)
dialog.setModal(True)
dialog.setWindowTitle("Maya scene not in line with project")
dialog.setMessage("The FPS is out of sync, please fix")
# Set new text for button (add optional argument for the popup?)
toggle = dialog.widgets["toggle"]
toggle.setEnabled(False)
dialog.on_show.connect(lambda: set_scene_fps(fps))
dialog.show()
return False
return True

View file

@ -99,12 +99,23 @@ def on_init(_):
except Exception as exc:
print(exc)
# Force load Alembic so referenced alembics
# work correctly on scene open
cmds.loadPlugin("AbcImport", quiet=True)
cmds.loadPlugin("AbcExport", quiet=True)
from .customize import override_component_mask_commands
# Force load objExport plug-in (requested by artists)
cmds.loadPlugin("objExport", quiet=True)
from .customize import (
override_component_mask_commands,
override_toolbox_ui
)
safe_deferred(override_component_mask_commands)
if not IS_HEADLESS:
safe_deferred(override_toolbox_ui)
def on_before_save(return_code, _):
"""Run validation for scene's FPS prior to saving"""
@ -120,8 +131,8 @@ def on_save(_):
avalon.logger.info("Running callback on save..")
# # Update current task for the current scene
# update_task_from_path(cmds.file(query=True, sceneName=True))
# Update current task for the current scene
update_task_from_path(cmds.file(query=True, sceneName=True))
# Generate ids of the current context on nodes in the scene
nodes = lib.get_id_required_nodes(referenced_nodes=False)

View file

@ -3,6 +3,7 @@
import maya.cmds as mc
import maya.mel as mel
from functools import partial
import os
import logging
@ -17,7 +18,7 @@ def override_component_mask_commands():
This implements special behavior for Maya's component
mask menu items where a ctrl+click will instantly make
it an isolated behavior disabling all others.
Tested in Maya 2016 and 2018
"""
@ -64,3 +65,93 @@ def override_component_mask_commands():
original = COMPONENT_MASK_ORIGINAL[btn]
new_fn = partial(on_changed_callback, original)
mc.iconTextCheckBox(btn, edit=True, cc=new_fn)
def override_toolbox_ui():
"""Add custom buttons in Toolbox as replacement for Maya web help icon."""
import pype
res = os.path.join(os.path.dirname(os.path.dirname(pype.__file__)),
"res")
icons = os.path.join(res, "icons")
import avalon.tools.cbsceneinventory as inventory
import avalon.tools.cbloader as loader
from avalon.maya.pipeline import launch_workfiles_app
# Ensure the maya web icon on toolbox exists
web_button = "ToolBox|MainToolboxLayout|mayaWebButton"
if not mc.iconTextButton(web_button, query=True, exists=True):
return
mc.iconTextButton(web_button, edit=True, visible=False)
# real = 32, but 36 with padding - according to toolbox mel script
icon_size = 36
parent = web_button.rsplit("|", 1)[0]
# Ensure the parent is a formLayout
if not mc.objectTypeUI(parent) == "formLayout":
return
# Create our controls
background_color = (0.267, 0.267, 0.267)
controls = []
control = mc.iconTextButton(
"pype_toolbox_workfiles",
annotation="Work Files",
label="Work Files",
image=os.path.join(icons, "workfiles.png"),
command=lambda: launch_workfiles_app(),
bgc=background_color,
width=icon_size,
height=icon_size,
parent=parent)
controls.append(control)
control = mc.iconTextButton(
"pype_toolbox_loader",
annotation="Loader",
label="Loader",
image=os.path.join(icons, "loader.png"),
command=lambda: loader.show(use_context=True),
bgc=background_color,
width=icon_size,
height=icon_size,
parent=parent)
controls.append(control)
control = mc.iconTextButton(
"pype_toolbox_manager",
annotation="Inventory",
label="Inventory",
image=os.path.join(icons, "inventory.png"),
command=lambda: inventory.show(),
bgc=background_color,
width=icon_size,
height=icon_size,
parent=parent)
controls.append(control)
control = mc.iconTextButton(
"pype_toolbox",
annotation="Colorbleed",
label="Colorbleed",
image=os.path.join(icons, "pype_logo_36x36.png"),
bgc=background_color,
width=icon_size,
height=icon_size,
parent=parent)
controls.append(control)
# Add the buttons on the bottom and stack
# them above each other with side padding
controls.reverse()
for i, control in enumerate(controls):
previous = controls[i - 1] if i > 0 else web_button
mc.formLayout(parent, edit=True,
attachControl=[control, "bottom", 0, previous],
attachForm=([control, "left", 1],
[control, "right", 1]))

View file

@ -521,12 +521,15 @@ def no_undo(flush=False):
cmds.undoInfo(**{keyword: original})
def get_shader_assignments_from_shapes(shapes):
def get_shader_assignments_from_shapes(shapes, components=True):
"""Return the shape assignment per related shading engines.
Returns a dictionary where the keys are shadingGroups and the values are
lists of assigned shapes or shape-components.
Since `maya.cmds.sets` returns shader members on the shapes as components
on the transform we correct that in this method too.
For the 'shapes' this will return a dictionary like:
{
"shadingEngineX": ["nodeX", "nodeY"],
@ -535,6 +538,7 @@ def get_shader_assignments_from_shapes(shapes):
Args:
shapes (list): The shapes to collect the assignments for.
components (bool): Whether to include the component assignments.
Returns:
dict: The {shadingEngine: shapes} relationships
@ -543,7 +547,6 @@ def get_shader_assignments_from_shapes(shapes):
shapes = cmds.ls(shapes,
long=True,
selection=True,
shapes=True,
objectsOnly=True)
if not shapes:
@ -562,7 +565,37 @@ def get_shader_assignments_from_shapes(shapes):
type="shadingEngine") or []
shading_groups = list(set(shading_groups))
for shading_group in shading_groups:
assignments[shading_group].add(shape)
assignments[shading_group].append(shape)
if components:
# Note: Components returned from maya.cmds.sets are "listed" as if
# being assigned to the transform like: pCube1.f[0] as opposed
# to pCubeShape1.f[0] so we correct that here too.
# Build a mapping from parent to shapes to include in lookup.
transforms = {shape.rsplit("|", 1)[0]: shape for shape in shapes}
lookup = set(shapes + transforms.keys())
component_assignments = defaultdict(list)
for shading_group in assignments.keys():
members = cmds.ls(cmds.sets(shading_group, query=True), long=True)
for member in members:
node = member.split(".", 1)[0]
if node not in lookup:
continue
# Component
if "." in member:
# Fix transform to shape as shaders are assigned to shapes
if node in transforms:
shape = transforms[node]
component = member.split(".", 1)[1]
member = "{0}.{1}".format(shape, component)
component_assignments[shading_group].append(member)
assignments = component_assignments
return dict(assignments)
@ -571,7 +604,7 @@ def get_shader_assignments_from_shapes(shapes):
def shader(nodes, shadingEngine="initialShadingGroup"):
"""Assign a shader to nodes during the context"""
shapes = cmds.ls(nodes, dag=1, o=1, shapes=1, long=1)
shapes = cmds.ls(nodes, dag=1, objectsOnly=1, shapes=1, long=1)
original = get_shader_assignments_from_shapes(shapes)
try:
@ -584,7 +617,7 @@ def shader(nodes, shadingEngine="initialShadingGroup"):
# Assign original shaders
for sg, members in original.items():
if members:
cmds.sets(shapes, edit=True, forceElement=shadingEngine)
cmds.sets(members, edit=True, forceElement=sg)
@contextlib.contextmanager
@ -929,6 +962,18 @@ def extract_alembic(file,
raise TypeError("Alembic option unsupported type: "
"{0} (expected {1})".format(value, valid_types))
# Ignore empty values, like an empty string, since they mess up how
# job arguments are built
if isinstance(value, (list, tuple)):
value = [x for x in value if x.strip()]
# Ignore option completely if no values remaining
if not value:
options.pop(key)
continue
options[key] = value
# The `writeCreases` argument was changed to `autoSubd` in Maya 2018+
maya_version = int(cmds.about(version=True))
if maya_version >= 2018:
@ -995,9 +1040,14 @@ def get_id_required_nodes(referenced_nodes=False, nodes=None):
nodes (set): list of filtered nodes
"""
lookup = None
if nodes is None:
# Consider all nodes
nodes = cmds.ls()
else:
# Build a lookup for the only allowed nodes in output based
# on `nodes` input of the function (+ ensure long names)
lookup = set(cmds.ls(nodes, long=True))
def _node_type_exists(node_type):
try:
@ -1006,8 +1056,8 @@ def get_id_required_nodes(referenced_nodes=False, nodes=None):
except RuntimeError:
return False
# `readOnly` flag is obsolete as of Maya 2016 therefor we explicitly remove
# default nodes and reference nodes
# `readOnly` flag is obsolete as of Maya 2016 therefore we explicitly
# remove default nodes and reference nodes
camera_shapes = ["frontShape", "sideShape", "topShape", "perspShape"]
ignore = set()
@ -1031,8 +1081,7 @@ def get_id_required_nodes(referenced_nodes=False, nodes=None):
if cmds.pluginInfo("pgYetiMaya", query=True, loaded=True):
types.append("pgYetiMaya")
# We *always* ignore intermediate shapes, so we filter them out
# directly
# We *always* ignore intermediate shapes, so we filter them out directly
nodes = cmds.ls(nodes, type=types, long=True, noIntermediate=True)
# The items which need to pass the id to their parent
@ -1049,6 +1098,12 @@ def get_id_required_nodes(referenced_nodes=False, nodes=None):
if not nodes:
return nodes
# Ensure only nodes from the input `nodes` are returned when a
# filter was applied on function call because we also iterated
# to parents and alike
if lookup is not None:
nodes &= lookup
# Avoid locked nodes
nodes_list = list(nodes)
locked = cmds.lockNode(nodes_list, query=True, lock=True)
@ -2051,7 +2106,6 @@ def bake_to_world_space(nodes,
return world_space_nodes
def load_capture_preset(path):
import capture_gui
import capture
@ -2150,3 +2204,89 @@ def load_capture_preset(path):
# options['display_options'] = temp_options
return options
def get_attr_in_layer(attr, layer):
"""Return attribute value in specified renderlayer.
Same as cmds.getAttr but this gets the attribute's value in a
given render layer without having to switch to it.
Warning for parent attribute overrides:
Attributes that have render layer overrides to their parent attribute
are not captured correctly since they do not have a direct connection.
For example, an override to sphere.rotate when querying sphere.rotateX
will not return correctly!
Note: This is much faster for Maya's renderLayer system, yet the code
does no optimized query for render setup.
Args:
attr (str): attribute name, ex. "node.attribute"
layer (str): layer name
Returns:
The return value from `maya.cmds.getAttr`
"""
if cmds.mayaHasRenderSetup():
log.debug("lib.get_attr_in_layer is not optimized for render setup")
with renderlayer(layer):
return cmds.getAttr(attr)
# Ignore complex query if we're in the layer anyway
current_layer = cmds.editRenderLayerGlobals(query=True,
currentRenderLayer=True)
if layer == current_layer:
return cmds.getAttr(attr)
connections = cmds.listConnections(attr,
plugs=True,
source=False,
destination=True,
type="renderLayer") or []
connections = filter(lambda x: x.endswith(".plug"), connections)
if not connections:
return cmds.getAttr(attr)
# Some value types perform a conversion when assigning
# TODO: See if there's a maya method to allow this conversion
# instead of computing it ourselves.
attr_type = cmds.getAttr(attr, type=True)
conversion = None
if attr_type == "time":
conversion = mel.eval('currentTimeUnitToFPS()') # returns float
elif attr_type == "doubleAngle":
# Radians to Degrees: 180 / pi
# TODO: This will likely only be correct when Maya units are set
# to degrees
conversion = 57.2957795131
elif attr_type == "doubleLinear":
raise NotImplementedError("doubleLinear conversion not implemented.")
for connection in connections:
if connection.startswith(layer + "."):
attr_split = connection.split(".")
if attr_split[0] == layer:
attr = ".".join(attr_split[0:-1])
value = cmds.getAttr("%s.value" % attr)
if conversion:
value *= conversion
return value
else:
# When connections are present, but none
# to the specific renderlayer than the layer
# should have the "defaultRenderLayer"'s value
layer = "defaultRenderLayer"
for connection in connections:
if connection.startswith(layer):
attr_split = connection.split(".")
if attr_split[0] == "defaultRenderLayer":
attr = ".".join(attr_split[0:-1])
value = cmds.getAttr("%s.value" % attr)
if conversion:
value *= conversion
return value
return cmds.getAttr(attr)

View file

@ -32,3 +32,37 @@ class Extractor(pyblish.api.InstancePlugin):
instance.data['stagingDir'] = staging_dir
return staging_dir
def contextplugin_should_run(plugin, context):
"""Return whether the ContextPlugin should run on the given context.
This is a helper function to work around a bug pyblish-base#250
Whenever a ContextPlugin sets specific families it will still trigger even
when no instances are present that have those families.
This actually checks it correctly and returns whether it should run.
"""
required = set(plugin.families)
# When no filter always run
if "*" in required:
return True
for instance in context:
# Ignore inactive instances
if (not instance.data.get("publish", True) or
not instance.data.get("active", True)):
continue
families = instance.data.get("families", [])
if any(f in required for f in families):
return True
family = instance.data.get("family")
if family and family in required:
return True
return False

View file

@ -1,10 +1,10 @@
from avalon import api, style
from avalon.vendor.Qt import QtGui, QtWidgets
import avalon.nuke
import avalon.fusion
class NukeSetToolColor(api.InventoryAction):
class FusionSetToolColor(api.InventoryAction):
"""Update the color of the selected tools"""
label = "Set Tool Color"
@ -16,20 +16,15 @@ class NukeSetToolColor(api.InventoryAction):
"""Color all selected tools the selected colors"""
result = []
comp = avalon.fusion.get_current_comp()
# Get tool color
first = containers[0]
node = first["_tool"]
color = node["tile_color"].value()
hex = '%08x' % color
rgba = [
float(int(hex[0:2], 16)) / 255.0,
float(int(hex[2:4], 16)) / 255.0,
float(int(hex[4:6], 16)) / 255.0
]
tool = first["_tool"]
color = tool.TileColor
if color is not None:
qcolor = QtGui.QColor().fromRgbF(rgba[0], rgba[1], rgba[2])
qcolor = QtGui.QColor().fromRgbF(color["R"], color["G"], color["B"])
else:
qcolor = self._fallback_color
@ -38,21 +33,15 @@ class NukeSetToolColor(api.InventoryAction):
if not picked_color:
return
with avalon.nuke.viewer_update_and_undo_stop():
with avalon.fusion.comp_lock_and_undo_chunk(comp):
for container in containers:
# Convert color to RGB 0-1 floats
rgb_f = picked_color.getRgbF()
hexColour = int(
'%02x%02x%02x%02x' % (
rgb_f[0]*255,
rgb_f[1]*255,
rgb_f[2]*255,
1),
16
)
rgb_f_table = {"R": rgb_f[0], "G": rgb_f[1], "B": rgb_f[2]}
# Update tool
node = container["_tool"]
node['tile_color'].value(hexColour)
tool = container["_tool"]
tool.TileColor = rgb_f_table
result.append(container)

View file

@ -13,7 +13,7 @@ class CollectFusionRenderMode(pyblish.api.InstancePlugin):
available tool does not visualize which render mode is set for the
current comp, please run the following line in the console (Py2)
comp.GetData("rendermode")
comp.GetData("pype.rendermode")
This will return the name of the current render mode as seen above under
Options.
@ -34,7 +34,7 @@ class CollectFusionRenderMode(pyblish.api.InstancePlugin):
raise RuntimeError("No comp previously collected, unable to "
"retrieve Fusion version.")
rendermode = comp.GetData("rendermode") or "renderlocal"
rendermode = comp.GetData("pype.rendermode") or "renderlocal"
assert rendermode in options, "Must be supported render mode"
self.log.info("Render mode: {0}".format(rendermode))

View file

@ -14,7 +14,7 @@ def _get_script():
# todo: use a more elegant way to get the python script
try:
from pype.fusion.scripts import publish_filesequence
from pype.scripts import publish_filesequence
except Exception:
raise RuntimeError("Expected module 'publish_imagesequence'"
"to be available")

View file

@ -0,0 +1,57 @@
import os
import subprocess
import pyblish.api
from pype.plugin import contextplugin_should_run
CREATE_NO_WINDOW = 0x08000000
def deadline_command(cmd):
# Find Deadline
path = os.environ.get("DEADLINE_PATH", None)
assert path is not None, "Variable 'DEADLINE_PATH' must be set"
executable = os.path.join(path, "deadlinecommand")
if os.name == "nt":
executable += ".exe"
assert os.path.exists(
executable), "Deadline executable not found at %s" % executable
assert cmd, "Must have a command"
query = (executable, cmd)
process = subprocess.Popen(query, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
creationflags=CREATE_NO_WINDOW)
out, err = process.communicate()
return out
class CollectDeadlineUser(pyblish.api.ContextPlugin):
"""Retrieve the local active Deadline user"""
order = pyblish.api.CollectorOrder + 0.499
label = "Deadline User"
hosts = ['maya', 'fusion']
families = ["renderlayer", "saver.deadline"]
def process(self, context):
"""Inject the current working file"""
# Workaround bug pyblish-base#250
if not contextplugin_should_run(self, context):
return
user = deadline_command("GetCurrentUserName").strip()
if not user:
self.log.warning("No Deadline user found. "
"Do you have Deadline installed?")
return
self.log.info("Found Deadline user: {}".format(user))
context.data['deadlineUser'] = user

View file

@ -0,0 +1,14 @@
import pyblish.api
class CollectMachineName(pyblish.api.ContextPlugin):
label = "Local Machine Name"
order = pyblish.api.CollectorOrder
hosts = ["*"]
def process(self, context):
import socket
machine_name = socket.gethostname()
self.log.info("Machine name: %s" % machine_name)
context.data["machine"] = machine_name

View file

@ -303,7 +303,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
self.log.info("Subset '%s' not found, creating.." % subset_name)
_id = io.insert_one({
"schema": "pype:subset-2.0",
"schema": "avalon-core:subset-2.0",
"type": "subset",
"name": subset_name,
"data": {},
@ -329,7 +329,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
version_locations = [location for location in locations if
location is not None]
return {"schema": "pype:version-2.0",
return {"schema": "avalon-core:version-2.0",
"type": "version",
"parent": subset["_id"],
"name": version_number,
@ -370,7 +370,9 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
"time": context.data["time"],
"author": context.data["user"],
"source": source,
"comment": context.data.get("comment")}
"comment": context.data.get("comment"),
"machine": context.data.get("machine"),
"fps": context.data.get("fps")}
# Include optional data if present in
optionals = ["startFrame", "endFrame", "step", "handles"]

View file

@ -1,5 +1,6 @@
import os
import json
import pprint
import re
from avalon import api, io
@ -11,7 +12,7 @@ import pyblish.api
def _get_script():
"""Get path to the image sequence script"""
try:
from pype.fusion.scripts import publish_filesequence
from pype.scripts import publish_filesequence
except Exception as e:
raise RuntimeError("Expected module 'publish_imagesequence'"
"to be available")
@ -156,15 +157,18 @@ class SubmitDependentImageSequenceJobDeadline(pyblish.api.InstancePlugin):
subset=subset
)
# Add in start/end frame
# Get start/end frame from instance, if not available get from context
context = instance.context
start = instance.data.get("startFrame", context.data["startFrame"])
end = instance.data.get("endFrame", context.data["endFrame"])
resources = []
start = instance.data.get("startFrame")
if start is None:
start = context.data["startFrame"]
end = instance.data.get("endFrame")
if end is None:
end = context.data["endFrame"]
# Add in regex for sequence filename
# This assumes the output files start with subset name and ends with
# a file extension.
# a file extension. The "ext" key includes the dot with the extension.
if "ext" in instance.data:
ext = re.escape(instance.data["ext"])
else:

View file

@ -1,9 +1,8 @@
from collections import OrderedDict
from avalon import houdini
class CreateAlembicCamera(houdini.Creator):
"""Single baked camera from Alembic ROP"""
name = "camera"
label = "Camera (Abc)"
@ -22,13 +21,25 @@ class CreateAlembicCamera(houdini.Creator):
def process(self):
instance = super(CreateAlembicCamera, self).process()
parms = {"use_sop_path": True,
"build_from_path": True,
"path_attrib": "path",
"filename": "$HIP/pyblish/%s.abc" % self.name}
parms = {
"filename": "$HIP/pyblish/%s.abc" % self.name,
"use_sop_path": False
}
if self.nodes:
node = self.nodes[0]
parms.update({"sop_path": node.path()})
path = node.path()
# Split the node path into the first root and the remainder
# So we can set the root and objects parameters correctly
_, root, remainder = path.split("/", 2)
parms.update({
"root": "/" + root,
"objects": remainder
})
instance.setParms(parms)
# Lock the Use Sop Path setting so the
# user doesn't accidentally enable it.
instance.parm("use_sop_path").lock(True)

View file

@ -2,7 +2,7 @@ from avalon import houdini
class CreatePointCache(houdini.Creator):
"""Alembic pointcache for animated data"""
"""Alembic ROP to pointcache"""
name = "pointcache"
label = "Point Cache"
@ -22,7 +22,7 @@ class CreatePointCache(houdini.Creator):
parms = {"use_sop_path": True, # Export single node from SOP Path
"build_from_path": True, # Direct path of primitive in output
"path_attrib": "path", # Pass path attribute for output\
"path_attrib": "path", # Pass path attribute for output
"prim_to_detail_pattern": "cbId",
"format": 2, # Set format to Ogawa
"filename": "$HIP/pyblish/%s.abc" % self.name}

View file

@ -2,7 +2,7 @@ from avalon import houdini
class CreateVDBCache(houdini.Creator):
"""Alembic pointcache for animated data"""
"""OpenVDB from Geometry ROP"""
name = "vbdcache"
label = "VDB Cache"
@ -15,10 +15,8 @@ class CreateVDBCache(houdini.Creator):
# Remove the active, we are checking the bypass flag of the nodes
self.data.pop("active", None)
self.data.update({
"node_type": "geometry", # Set node type to create for output
"executeBackground": True # Render node in background
})
# Set node type to create for output
self.data["node_type"] = "geometry"
def process(self):
instance = super(CreateVDBCache, self).process()
@ -28,6 +26,6 @@ class CreateVDBCache(houdini.Creator):
if self.nodes:
node = self.nodes[0]
parms.update({"sop_path": node.path()})
parms.update({"soppath": node.path()})
instance.setParms(parms)

View file

@ -1,3 +1,4 @@
import os
import hou
import pyblish.api
@ -12,4 +13,22 @@ class CollectHoudiniCurrentFile(pyblish.api.ContextPlugin):
def process(self, context):
"""Inject the current working file"""
context.data['currentFile'] = hou.hipFile.path()
filepath = hou.hipFile.path()
if not os.path.exists(filepath):
# By default Houdini will even point a new scene to a path.
# However if the file is not saved at all and does not exist,
# we assume the user never set it.
filepath = ""
elif os.path.basename(filepath) == "untitled.hip":
# Due to even a new file being called 'untitled.hip' we are unable
# to confirm the current scene was ever saved because the file
# could have existed already. We will allow it if the file exists,
# but show a warning for this edge case to clarify the potential
# false positive.
self.log.warning("Current file is 'untitled.hip' and we are "
"unable to detect whether the current scene is "
"saved correctly.")
context.data['currentFile'] = filepath

View file

@ -15,8 +15,8 @@ class CollectInstances(pyblish.api.ContextPlugin):
id (str): "pyblish.avalon.instance
Specific node:
The specific node is important because it dictates in which way the subset
is being exported.
The specific node is important because it dictates in which way the
subset is being exported.
alembic: will export Alembic file which supports cascading attributes
like 'cbId' and 'path'
@ -30,8 +30,6 @@ class CollectInstances(pyblish.api.ContextPlugin):
def process(self, context):
instances = []
nodes = hou.node("/out").children()
for node in nodes:
@ -55,11 +53,9 @@ class CollectInstances(pyblish.api.ContextPlugin):
data.update(self.get_frame_data(node))
# Create nice name
# All nodes in the Outputs graph have the 'Valid Frame Range'
# attribute, we check here if any frames are set
# Create nice name if the instance has a frame range.
label = data.get("name", node.name())
if "startFrame" in data:
if "startFrame" in data and "endFrame" in data:
frames = "[{startFrame} - {endFrame}]".format(**data)
label = "{} {}".format(label, frames)
@ -68,8 +64,6 @@ class CollectInstances(pyblish.api.ContextPlugin):
instance[:] = [node]
instance.data.update(data)
instances.append(instance)
def sort_by_family(instance):
"""Sort by family"""
return instance.data.get("families", instance.data.get("family"))

View file

@ -1,13 +1,14 @@
import pyblish.api
class CollectOutputNode(pyblish.api.InstancePlugin):
"""Collect the out node which of the instance"""
class CollectOutputSOPPath(pyblish.api.InstancePlugin):
"""Collect the out node's SOP Path value."""
order = pyblish.api.CollectorOrder
families = ["*"]
families = ["pointcache",
"vdbcache"]
hosts = ["houdini"]
label = "Collect Output Node"
label = "Collect Output SOP Path"
def process(self, instance):

View file

@ -0,0 +1,15 @@
import pyblish.api
import hou
class CollectWorksceneFPS(pyblish.api.ContextPlugin):
"""Get the FPS of the work scene"""
label = "Workscene FPS"
order = pyblish.api.CollectorOrder
hosts = ["houdini"]
def process(self, context):
fps = hou.fps()
self.log.info("Workscene FPS: %s" % fps)
context.data.update({"fps": fps})

View file

@ -13,6 +13,8 @@ class ExtractAlembic(pype.api.Extractor):
def process(self, instance):
import hou
ropnode = instance[0]
# Get the filename from the filename parameter
@ -23,8 +25,17 @@ class ExtractAlembic(pype.api.Extractor):
file_name = os.path.basename(output)
# We run the render
self.log.info("Writing alembic '%s' to '%s'" % (file_name, staging_dir))
ropnode.render()
self.log.info("Writing alembic '%s' to '%s'" % (file_name,
staging_dir))
try:
ropnode.render()
except hou.Error as exc:
# The hou.Error is not inherited from a Python Exception class,
# so we explicitly capture the houdini error, otherwise pyblish
# will remain hanging.
import traceback
traceback.print_exc()
raise RuntimeError("Render failed: {0}".format(exc))
if "files" not in instance.data:
instance.data["files"] = []

View file

@ -13,6 +13,8 @@ class ExtractVDBCache(pype.api.Extractor):
def process(self, instance):
import hou
ropnode = instance[0]
# Get the filename from the filename parameter
@ -20,13 +22,18 @@ class ExtractVDBCache(pype.api.Extractor):
sop_output = ropnode.evalParm("sopoutput")
staging_dir = os.path.normpath(os.path.dirname(sop_output))
instance.data["stagingDir"] = staging_dir
file_name = os.path.basename(sop_output)
if instance.data.get("executeBackground", True):
self.log.info("Creating background task..")
ropnode.parm("executebackground").pressButton()
self.log.info("Finished")
else:
self.log.info("Writing VDB '%s' to '%s'" % (file_name, staging_dir))
try:
ropnode.render()
except hou.Error as exc:
# The hou.Error is not inherited from a Python Exception class,
# so we explicitly capture the houdini error, otherwise pyblish
# will remain hanging.
import traceback
traceback.print_exc()
raise RuntimeError("Render failed: {0}".format(exc))
if "files" not in instance.data:
instance.data["files"] = []

View file

@ -7,7 +7,7 @@ class ValidateAlembicInputNode(pyblish.api.InstancePlugin):
The connected node cannot be of the following types for Alembic:
- VDB
- Volumne
- Volume
"""

View file

@ -0,0 +1,34 @@
import pyblish.api
import pype.api
class ValidateBypassed(pyblish.api.InstancePlugin):
"""Validate all primitives build hierarchy from attribute when enabled.
The name of the attribute must exist on the prims and have the same name
as Build Hierarchy from Attribute's `Path Attribute` value on the Alembic
ROP node whenever Build Hierarchy from Attribute is enabled.
"""
order = pype.api.ValidateContentsOrder - 0.1
families = ["*"]
hosts = ["houdini"]
label = "Validate ROP Bypass"
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
rop = invalid[0]
raise RuntimeError(
"ROP node %s is set to bypass, publishing cannot continue.." %
rop.path()
)
@classmethod
def get_invalid(cls, instance):
rop = instance[0]
if rop.isBypassed():
return [rop]

View file

@ -0,0 +1,41 @@
import pyblish.api
import pype.api
class ValidateCameraROP(pyblish.api.InstancePlugin):
"""Validate Camera ROP settings."""
order = pype.api.ValidateContentsOrder
families = ['camera']
hosts = ['houdini']
label = 'Camera ROP'
def process(self, instance):
import hou
node = instance[0]
if node.parm("use_sop_path").eval():
raise RuntimeError("Alembic ROP for Camera export should not be "
"set to 'Use Sop Path'. Please disable.")
# Get the root and objects parameter of the Alembic ROP node
root = node.parm("root").eval()
objects = node.parm("objects").eval()
assert root, "Root parameter must be set on Alembic ROP"
assert root.startswith("/"), "Root parameter must start with slash /"
assert objects, "Objects parameter must be set on Alembic ROP"
assert len(objects.split(" ")) == 1, "Must have only a single object."
# Check if the object exists and is a camera
path = root + "/" + objects
camera = hou.node(path)
if not camera:
raise ValueError("Camera path does not exist: %s" % path)
if not camera.type().name() == "cam":
raise ValueError("Object set in Alembic ROP is not a camera: "
"%s (type: %s)" % (camera, camera.type().name()))

View file

@ -3,16 +3,12 @@ import pype.api
class ValidateIntermediateDirectoriesChecked(pyblish.api.InstancePlugin):
"""Validate if node attribute Create intermediate Directories is turned on
Rules:
* The node must have Create intermediate Directories turned on to
ensure the output file will be created
"""
"""Validate Create Intermediate Directories is enabled on ROP node."""
order = pype.api.ValidateContentsOrder
families = ["pointcache']
families = ['pointcache',
'camera',
'vdbcache']
hosts = ['houdini']
label = 'Create Intermediate Directories Checked'
@ -20,8 +16,8 @@ class ValidateIntermediateDirectoriesChecked(pyblish.api.InstancePlugin):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError("Found ROP nodes with Create Intermediate "
"Directories turned off")
raise RuntimeError("Found ROP node with Create Intermediate "
"Directories turned off: %s" % invalid)
@classmethod
def get_invalid(cls, instance):
@ -34,5 +30,3 @@ class ValidateIntermediateDirectoriesChecked(pyblish.api.InstancePlugin):
result.append(node.path())
return result

View file

@ -2,13 +2,20 @@ import pyblish.api
class ValidateOutputNode(pyblish.api.InstancePlugin):
"""Validate if output node:
- exists
- is of type 'output'
- has an input"""
"""Validate the instance SOP Output Node.
This will ensure:
- The SOP Path is set.
- The SOP Path refers to an existing object.
- The SOP Path node is a SOP node.
- The SOP Path node has at least one input connection (has an input)
- The SOP Path has geometry data.
"""
order = pyblish.api.ValidatorOrder
families = ["*"]
families = ["pointcache",
"vdbcache"]
hosts = ["houdini"]
label = "Validate Output Node"
@ -16,30 +23,51 @@ class ValidateOutputNode(pyblish.api.InstancePlugin):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError("Output node(s) `%s` are incorrect" % invalid)
raise RuntimeError("Output node(s) `%s` are incorrect. "
"See plug-in log for details." % invalid)
@classmethod
def get_invalid(cls, instance):
import hou
output_node = instance.data["output_node"]
if output_node is None:
node = instance[0]
cls.log.error("Output node at '%s' does not exist, see source" %
node.path())
cls.log.error("SOP Output node in '%s' does not exist. "
"Ensure a valid SOP output path is set."
% node.path())
return node.path()
return [node.path()]
# Check if type is correct
type_name = output_node.type().name()
if type_name not in ["output", "cam"]:
cls.log.error("Output node `%s` is not an accepted type `output` "
"or `camera`" %
output_node.path())
# Output node must be a Sop node.
if not isinstance(output_node, hou.SopNode):
cls.log.error("Output node %s is not a SOP node. "
"SOP Path must point to a SOP node, "
"instead found category type: %s" % (
output_node.path(),
output_node.type().category().name()
)
)
return [output_node.path()]
# For the sake of completeness also assert the category type
# is Sop to avoid potential edge case scenarios even though
# the isinstance check above should be stricter than this category
assert output_node.type().category().name() == "Sop", (
"Output node %s is not of category Sop. This is a bug.." %
output_node.path()
)
# Check if output node has incoming connections
if type_name == "output" and not output_node.inputConnections():
if not output_node.inputConnections():
cls.log.error("Output node `%s` has no incoming connections"
% output_node.path())
return [output_node.path()]
# Ensure the output node has at least Geometry data
if not output_node.geometry():
cls.log.error("Output node `%s` has no geometry data."
% output_node.path())
return [output_node.path()]

View file

@ -0,0 +1,75 @@
import pyblish.api
import pype.api
class ValidatePrimitiveHierarchyPaths(pyblish.api.InstancePlugin):
"""Validate all primitives build hierarchy from attribute when enabled.
The name of the attribute must exist on the prims and have the same name
as Build Hierarchy from Attribute's `Path Attribute` value on the Alembic
ROP node whenever Build Hierarchy from Attribute is enabled.
"""
order = pype.api.ValidateContentsOrder + 0.1
families = ["pointcache"]
hosts = ["houdini"]
label = "Validate Prims Hierarchy Path"
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError("See log for details. "
"Invalid nodes: {0}".format(invalid))
@classmethod
def get_invalid(cls, instance):
import hou
output = instance.data["output_node"]
prims = output.geometry().prims()
rop = instance[0]
build_from_path = rop.parm("build_from_path").eval()
if not build_from_path:
cls.log.debug("Alembic ROP has 'Build from Path' disabled. "
"Validation is ignored..")
return
path_attr = rop.parm("path_attrib").eval()
if not path_attr:
cls.log.error("The Alembic ROP node has no Path Attribute"
"value set, but 'Build Hierarchy from Attribute'"
"is enabled.")
return [rop.path()]
cls.log.debug("Checking for attribute: %s" % path_attr)
missing_attr = []
invalid_attr = []
for prim in prims:
try:
path = prim.stringAttribValue(path_attr)
except hou.OperationFailed:
# Attribute does not exist.
missing_attr.append(prim)
continue
if not path:
# Empty path value is invalid.
invalid_attr.append(prim)
continue
if missing_attr:
cls.log.info("Prims are missing attribute `%s`" % path_attr)
if invalid_attr:
cls.log.info("Prims have no value for attribute `%s` "
"(%s of %s prims)" % (path_attr,
len(invalid_attr),
len(prims)))
if missing_attr or invalid_attr:
return [output.path()]

View file

@ -0,0 +1,46 @@
import pyblish.api
import pype.api
class ValidateVDBInputNode(pyblish.api.InstancePlugin):
"""Validate that the node connected to the output node is of type VDB
Regardless of the amount of VDBs create the output will need to have an
equal amount of VDBs, points, primitives and vertices
A VDB is an inherited type of Prim, holds the following data:
- Primitives: 1
- Points: 1
- Vertices: 1
- VDBs: 1
"""
order = pype.api.ValidateContentsOrder + 0.1
families = ["vdbcache"]
hosts = ["houdini"]
label = "Validate Input Node (VDB)"
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError("Node connected to the output node is not"
"of type VDB!")
@classmethod
def get_invalid(cls, instance):
node = instance.data["output_node"]
prims = node.geometry().prims()
nr_of_prims = len(prims)
nr_of_points = len(node.geometry().points())
if nr_of_points != nr_of_prims:
cls.log.error("The number of primitives and points do not match")
return [instance]
for prim in prims:
if prim.numVertices() != 1:
cls.log.error("Found primitive with more than 1 vertex!")
return [instance]

View file

@ -1,5 +1,3 @@
from collections import OrderedDict
import avalon.maya
from pype.maya import lib
@ -16,21 +14,24 @@ class CreateAnimation(avalon.maya.Creator):
super(CreateAnimation, self).__init__(*args, **kwargs)
# create an ordered dict with the existing data first
data = OrderedDict(**self.data)
# get basic animation data : start / end / handles / steps
for key, value in lib.collect_animation_data().items():
data[key] = value
self.data[key] = value
# Write vertex colors with the geometry.
data["writeColorSets"] = False
self.data["writeColorSets"] = False
# Include only renderable visible shapes.
# Skips locators and empty transforms
data["renderableOnly"] = False
self.data["renderableOnly"] = False
# Include only nodes that are visible at least once during the
# frame range.
data["visibleOnly"] = False
self.data["visibleOnly"] = False
self.data = data
# Include the groups above the out_SET content
self.data["includeParentHierarchy"] = False # Include parent groups
# Default to exporting world-space
self.data["worldSpace"] = True

View file

@ -1,4 +1,3 @@
from collections import OrderedDict
import avalon.maya
from pype.maya import lib
@ -15,13 +14,10 @@ class CreateCamera(avalon.maya.Creator):
super(CreateCamera, self).__init__(*args, **kwargs)
# get basic animation data : start / end / handles / steps
data = OrderedDict(**self.data)
animation_data = lib.collect_animation_data()
for key, value in animation_data.items():
data[key] = value
self.data[key] = value
# Bake to world space by default, when this is False it will also
# include the parent hierarchy in the baked results
data['bakeToWorldSpace'] = True
self.data = data
self.data['bakeToWorldSpace'] = True

View file

@ -1,4 +1,3 @@
from collections import OrderedDict
import avalon.maya
from pype.maya import lib
@ -14,7 +13,4 @@ class CreateLook(avalon.maya.Creator):
def __init__(self, *args, **kwargs):
super(CreateLook, self).__init__(*args, **kwargs)
data = OrderedDict(**self.data)
data["renderlayer"] = lib.get_current_renderlayer()
self.data = data
self.data["renderlayer"] = lib.get_current_renderlayer()

View file

@ -1,5 +1,3 @@
from collections import OrderedDict
import avalon.maya
@ -14,10 +12,12 @@ class CreateModel(avalon.maya.Creator):
def __init__(self, *args, **kwargs):
super(CreateModel, self).__init__(*args, **kwargs)
# create an ordered dict with the existing data first
data = OrderedDict(**self.data)
# Vertex colors with the geometry
self.data["writeColorSets"] = False
# Write vertex colors with the geometry.
data["writeColorSets"] = True
# Include attributes by attribute name or prefix
self.data["attr"] = ""
self.data["attrPrefix"] = ""
self.data = data
# Whether to include parent hierarchy of nodes in the instance
self.data["includeParentHierarchy"] = False

View file

@ -1,5 +1,3 @@
from collections import OrderedDict
import avalon.maya
from pype.maya import lib
@ -15,22 +13,15 @@ class CreatePointCache(avalon.maya.Creator):
def __init__(self, *args, **kwargs):
super(CreatePointCache, self).__init__(*args, **kwargs)
# create an ordered dict with the existing data first
data = OrderedDict(**self.data)
# Add animation data
self.data.update(lib.collect_animation_data())
# get basic animation data : start / end / handles / steps
for key, value in lib.collect_animation_data().items():
data[key] = value
self.data["writeColorSets"] = False # Vertex colors with the geometry.
self.data["renderableOnly"] = False # Only renderable visible shapes
self.data["visibleOnly"] = False # only nodes that are visible
self.data["includeParentHierarchy"] = False # Include parent groups
self.data["worldSpace"] = True # Default to exporting world-space
# Write vertex colors with the geometry.
data["writeColorSets"] = False
# Include only renderable visible shapes.
# Skips locators and empty transforms
data["renderableOnly"] = False
# Include only nodes that are visible at least once during the
# frame range.
data["visibleOnly"] = False
self.data = data
# Add options for custom attributes
self.data["attr"] = ""
self.data["attrPrefix"] = ""

View file

@ -1,10 +1,10 @@
from collections import OrderedDict
from maya import cmds
import pype.maya.lib as lib
from avalon.vendor import requests
import avalon.maya
import os
from avalon import api
class CreateRenderGlobals(avalon.maya.Creator):
@ -19,13 +19,13 @@ class CreateRenderGlobals(avalon.maya.Creator):
# We won't be publishing this one
self.data["id"] = "avalon.renderglobals"
# get pools
# Get available Deadline pools
try:
deadline_url = os.environ["DEADLINE_REST_URL"]
except KeyError:
self.log.error("Deadline REST API url not found.")
argument = "{}/api/pools?NamesOnly=true".format(deadline_url)
argument = "{}/api/pools?NamesOnly=true".format(AVALON_DEADLINE)
response = requests.get(argument)
if not response.ok:
self.log.warning("No pools retrieved")
@ -38,33 +38,31 @@ class CreateRenderGlobals(avalon.maya.Creator):
self.data.pop("asset", None)
self.data.pop("active", None)
data = OrderedDict(**self.data)
data["suspendPublishJob"] = False
data["extendFrames"] = False
data["overrideExistingFrame"] = True
data["useLegacyRenderLayers"] = True
data["priority"] = 50
data["framesPerTask"] = 1
data["whitelist"] = False
data["machineList"] = ""
data["useMayaBatch"] = True
data["primaryPool"] = pools
self.data["suspendPublishJob"] = False
self.data["extendFrames"] = False
self.data["overrideExistingFrame"] = True
self.data["useLegacyRenderLayers"] = True
self.data["priority"] = 50
self.data["framesPerTask"] = 1
self.data["whitelist"] = False
self.data["machineList"] = ""
self.data["useMayaBatch"] = True
self.data["primaryPool"] = pools
# We add a string "-" to allow the user to not set any secondary pools
data["secondaryPool"] = ["-"] + pools
self.data["secondaryPool"] = ["-"] + pools
self.data = data
self.options = {"useSelection": False} # Force no content
def process(self):
exists = cmds.ls(self.name)
assert len(exists) <= 1, (
"More than one renderglobal exists, this is a bug")
"More than one renderglobal exists, this is a bug"
)
if exists:
return cmds.warning("%s already exists." % exists[0])
super(CreateRenderGlobals, self).process()
cmds.setAttr("{}.machineList".format(self.name), lock=True)
with lib.undo_chunk():
super(CreateRenderGlobals, self).process()
cmds.setAttr("{}.machineList".format(self.name), lock=True)

View file

@ -1,5 +1,6 @@
from maya import cmds
import pype.maya.lib as lib
import avalon.maya
@ -12,10 +13,11 @@ class CreateRig(avalon.maya.Creator):
icon = "wheelchair"
def process(self):
instance = super(CreateRig, self).process()
self.log.info("Creating Rig instance set up ...")
with lib.undo_chunk():
instance = super(CreateRig, self).process()
controls = cmds.sets(name="controls_SET", empty=True)
pointcache = cmds.sets(name="out_SET", empty=True)
cmds.sets([controls, pointcache], forceElement=instance)
self.log.info("Creating Rig instance set up ...")
controls = cmds.sets(name="controls_SET", empty=True)
pointcache = cmds.sets(name="out_SET", empty=True)
cmds.sets([controls, pointcache], forceElement=instance)

View file

@ -1,5 +1,3 @@
from collections import OrderedDict
import avalon.maya
@ -14,13 +12,9 @@ class CreateVrayProxy(avalon.maya.Creator):
def __init__(self, *args, **kwargs):
super(CreateVrayProxy, self).__init__(*args, **kwargs)
data = OrderedDict(**self.data)
data["animation"] = False
data["startFrame"] = 1
data["endFrame"] = 1
self.data["animation"] = False
self.data["startFrame"] = 1
self.data["endFrame"] = 1
# Write vertex colors
data["vertexColors"] = False
self.data.update(data)
self.data["vertexColors"] = False

View file

@ -0,0 +1,27 @@
import avalon.maya
class CreateVRayScene(avalon.maya.Creator):
label = "VRay Scene"
family = "vrayscene"
icon = "cubes"
def __init__(self, *args, **kwargs):
super(CreateVRayScene, self).__init__(*args, **kwargs)
# We don't need subset or asset attributes
self.data.pop("subset", None)
self.data.pop("asset", None)
self.data.pop("active", None)
self.data.update({
"id": "avalon.vrayscene", # We won't be publishing this one
"suspendRenderJob": False,
"suspendPublishJob": False,
"extendFrames": False,
"pools": "",
"framesPerTask": 1
})
self.options = {"useSelection": False} # Force no content

View file

@ -15,12 +15,13 @@ class CreateYetiCache(avalon.maya.Creator):
def __init__(self, *args, **kwargs):
super(CreateYetiCache, self).__init__(*args, **kwargs)
data = OrderedDict(**self.data)
data["peroll"] = 0
self.data["preroll"] = 0
# Add animation data without step and handles
anim_data = lib.collect_animation_data()
data.update({"startFrame": anim_data["startFrame"],
"endFrame": anim_data["endFrame"],
"samples": 3})
anim_data.pop("step")
anim_data.pop("handles")
self.data.update(anim_data)
self.data = data
# Add samples
self.data["samples"] = 3

View file

@ -1,5 +1,6 @@
from maya import cmds
import pype.maya.lib as lib
import avalon.maya
@ -12,9 +13,9 @@ class CreateYetiRig(avalon.maya.Creator):
def process(self):
instance = super(CreateYetiRig, self).process()
with lib.undo_chunk():
instance = super(CreateYetiRig, self).process()
self.log.info("Creating Rig instance set up ...")
input_meshes = cmds.sets(name="input_SET", empty=True)
cmds.sets(input_meshes, forceElement=instance)
self.log.info("Creating Rig instance set up ...")
input_meshes = cmds.sets(name="input_SET", empty=True)
cmds.sets(input_meshes, forceElement=instance)

View file

@ -2,7 +2,7 @@ import pype.maya.plugin
class AbcLoader(pype.maya.plugin.ReferenceLoader):
"""Specific loader of Alembic for the studio.animation family"""
"""Specific loader of Alembic for the pype.animation family"""
families = ["animation",
"pointcache"]

View file

@ -2,7 +2,7 @@ import pype.maya.plugin
class CameraLoader(pype.maya.plugin.ReferenceLoader):
"""Specific loader of Alembic for the studio.camera family"""
"""Specific loader of Alembic for the pype.camera family"""
families = ["camera"]
label = "Reference camera"

View file

@ -0,0 +1,36 @@
import pype.maya.plugin
class FBXLoader(pype.maya.plugin.ReferenceLoader):
"""Load the FBX"""
families = ["fbx"]
representations = ["fbx"]
label = "Reference FBX"
order = -10
icon = "code-fork"
color = "orange"
def process_reference(self, context, name, namespace, data):
import maya.cmds as cmds
from avalon import maya
# Ensure FBX plug-in is loaded
cmds.loadPlugin("fbxmaya", quiet=True)
with maya.maintained_selection():
nodes = cmds.file(self.fname,
namespace=namespace,
reference=True,
returnNewNodes=True,
groupReference=True,
groupName="{}:{}".format(namespace, name))
self[:] = nodes
return nodes
def switch(self, container, representation):
self.update(container, representation)

View file

@ -15,7 +15,7 @@ class CollectAnimationOutputGeometry(pyblish.api.InstancePlugin):
"""
order = pyblish.api.CollectorOrder + 0.2
order = pyblish.api.CollectorOrder + 0.4
families = ["animation"]
label = "Collect Animation Output Geometry"
hosts = ["maya"]
@ -43,11 +43,9 @@ class CollectAnimationOutputGeometry(pyblish.api.InstancePlugin):
descendants = cmds.ls(descendants, noIntermediate=True, long=True)
# Add members and descendants together for a complete overview
# hierarchy = members + descendants
hierarchy = members
self.log.info(members)
self.log.info(hierarchy)
hierarchy = members + descendants
# Ignore certain node types (e.g. constraints)
ignore = cmds.ls(hierarchy, type=self.ignore_type, long=True)

View file

@ -1,8 +1,6 @@
from maya import cmds
import pyblish.api
import os
from pype.maya import lib
class CollectMayaCurrentFile(pyblish.api.ContextPlugin):

View file

@ -12,29 +12,14 @@ class CollectInstances(pyblish.api.ContextPlugin):
Identifier:
id (str): "pyblish.avalon.instance"
Supported Families:
avalon.model: Geometric representation of artwork
avalon.rig: An articulated model for animators.
A rig may contain a series of sets in which to identify
its contents.
- cache_SEL: Should contain cachable polygonal meshes
- controls_SEL: Should contain animatable controllers for animators
- resources_SEL: Should contain nodes that reference external files
Limitations:
- Only Maya is supported
- One (1) rig per scene file
- Unmanaged history, it is up to the TD to ensure
history is up to par.
avalon.animation: Pointcache of `avalon.rig`
Limitations:
- Does not take into account nodes connected to those
within an objectSet. Extractors are assumed to export
with history preserved, but this limits what they will
be able to achieve and the amount of data available
to validators.
to validators. An additional collector could also
append this input data into the instance, as we do
for `pype.rig` with collect_history.
"""
@ -101,7 +86,11 @@ class CollectInstances(pyblish.api.ContextPlugin):
fullPath=True) or []
children = cmds.ls(children, noIntermediate=True, long=True)
parents = self.get_all_parents(members)
parents = []
if data.get("includeParentHierarchy", True):
# If `includeParentHierarchy` then include the parents
# so they will also be picked up in the instance by validators
parents = self.get_all_parents(members)
members_hierarchy = list(set(members + children + parents))
# Create the instance

View file

@ -45,10 +45,8 @@ def get_look_attrs(node):
if cmds.objectType(node, isAType="shape"):
attrs = cmds.listAttr(node, changedSinceFileOpen=True) or []
for attr in attrs:
result.append(attr)
# if attr in SHAPE_ATTRS:
# result.append(attr)
if attr in SHAPE_ATTRS:
result.append(attr)
return result
@ -109,7 +107,6 @@ def seq_to_glob(path):
"<f>": "<f>"
}
lower = path.lower()
has_pattern = False
for pattern, regex_pattern in patterns.items():
@ -205,7 +202,7 @@ class CollectLook(pyblish.api.InstancePlugin):
"""
order = pyblish.api.CollectorOrder + 0.2
order = pyblish.api.CollectorOrder + 0.4
families = ["look"]
label = "Collect Look"
hosts = ["maya"]
@ -364,8 +361,6 @@ class CollectLook(pyblish.api.InstancePlugin):
# Collect changes to "custom" attributes
node_attrs = get_look_attrs(node)
self.log.info('attr: {}'.format(node_attrs))
# Only include if there are any properties we care about
if not node_attrs:
continue

View file

@ -9,14 +9,13 @@ class CollectModelData(pyblish.api.InstancePlugin):
Ensures always only a single frame is extracted (current frame).
Note:
This is a workaround so that the `studio.model` family can use the
This is a workaround so that the `pype.model` family can use the
same pointcache extractor implementation as animation and pointcaches.
This always enforces the "current" frame to be published.
"""
order = pyblish.api.CollectorOrder + 0.2
order = pyblish.api.CollectorOrder + 0.499
label = 'Collect Model Data'
families = ["model"]

View file

@ -6,10 +6,9 @@ import pype.maya.lib as lib
class CollectRenderLayerAOVS(pyblish.api.InstancePlugin):
"""Validate all render layer's AOVs / Render Elements are registered in
the database
"""Collect all render layer's AOVs / Render Elements that will render.
This validator is important to be able to Extend Frames
This collector is important to be able to Extend Frames.
Technical information:
Each renderer uses different logic to work with render passes.
@ -37,8 +36,7 @@ class CollectRenderLayerAOVS(pyblish.api.InstancePlugin):
return
# Get renderer
renderer = cmds.getAttr("defaultRenderGlobals.currentRenderer")
renderer = instance.data["renderer"]
self.log.info("Renderer found: {}".format(renderer))
rp_node_types = {"vray": ["VRayRenderElement", "VRayRenderElementSet"],
@ -53,21 +51,20 @@ class CollectRenderLayerAOVS(pyblish.api.InstancePlugin):
# Collect all AOVs / Render Elements
layer = instance.data["setMembers"]
with lib.renderlayer(layer):
node_type = rp_node_types[renderer]
render_elements = cmds.ls(type=node_type)
node_type = rp_node_types[renderer]
render_elements = cmds.ls(type=node_type)
# Check if AOVs / Render Elements are enabled
for element in render_elements:
enabled = lib.get_attr_in_layer("{}.enabled".format(element),
layer=layer)
if not enabled:
continue
# Check if AOVs / Render Elements are enabled
for element in render_elements:
enabled = cmds.getAttr("{}.enabled".format(element))
if not enabled:
continue
pass_name = self.get_pass_name(renderer, element)
render_pass = "%s.%s" % (instance.data["subset"], pass_name)
pass_name = self.get_pass_name(renderer, element)
render_pass = "%s.%s" % (instance.data["subset"], pass_name)
result.append(render_pass)
result.append(render_pass)
self.log.info("Found {} render elements / AOVs for "
"'{}'".format(len(result), instance.data["subset"]))

View file

@ -0,0 +1,26 @@
import pyblish.api
from maya import cmds
from pype.maya import lib
class CollectRenderableCamera(pyblish.api.InstancePlugin):
"""Collect the renderable camera(s) for the render layer"""
order = pyblish.api.CollectorOrder + 0.01
label = "Collect Renderable Camera(s)"
hosts = ["maya"]
families = ["vrayscene",
"renderlayer"]
def process(self, instance):
layer = instance.data["setMembers"]
cameras = cmds.ls(type="camera", long=True)
renderable = [c for c in cameras if
lib.get_attr_in_layer("%s.renderable" % c, layer=layer)]
self.log.info("Found cameras %s: %s" % (len(renderable), renderable))
instance.data["cameras"] = renderable

View file

@ -22,16 +22,10 @@ class CollectMayaRenderlayers(pyblish.api.ContextPlugin):
try:
render_globals = cmds.ls("renderglobalsDefault")[0]
except IndexError:
self.log.error("Cannot collect renderlayers without "
"renderGlobals node")
self.log.info("Skipping renderlayer collection, no "
"renderGlobalsDefault found..")
return
# Get start and end frame
start_frame = self.get_render_attribute("startFrame")
end_frame = self.get_render_attribute("endFrame")
context.data["startFrame"] = start_frame
context.data["endFrame"] = end_frame
# Get all valid renderlayers
# This is how Maya populates the renderlayer display
rlm_attribute = "renderLayerManager.renderLayerId"
@ -59,30 +53,34 @@ class CollectMayaRenderlayers(pyblish.api.ContextPlugin):
if layer.endswith("defaultRenderLayer"):
layername = "masterLayer"
else:
# Remove Maya render setup prefix `rs_`
layername = layer.split("rs_", 1)[-1]
# Get layer specific settings, might be overrides
with lib.renderlayer(layer):
data = {
"subset": layername,
"setMembers": layer,
"publish": True,
"startFrame": self.get_render_attribute("startFrame"),
"endFrame": self.get_render_attribute("endFrame"),
"byFrameStep": self.get_render_attribute("byFrameStep"),
"renderer": self.get_render_attribute("currentRenderer"),
data = {
"subset": layername,
"setMembers": layer,
"publish": True,
"startFrame": self.get_render_attribute("startFrame",
layer=layer),
"endFrame": self.get_render_attribute("endFrame",
layer=layer),
"byFrameStep": self.get_render_attribute("byFrameStep",
layer=layer),
"renderer": self.get_render_attribute("currentRenderer",
layer=layer),
# instance subset
"family": "Render Layers",
"families": ["renderlayer"],
"asset": asset,
"time": api.time(),
"author": context.data["user"],
# instance subset
"family": "Render Layers",
"families": ["renderlayer"],
"asset": asset,
"time": api.time(),
"author": context.data["user"],
# Add source to allow tracing back to the scene from
# which was submitted originally
"source": filepath
}
# Add source to allow tracing back to the scene from
# which was submitted originally
"source": filepath
}
# Apply each user defined attribute as data
for attr in cmds.listAttr(layer, userDefined=True) or list():
@ -112,8 +110,9 @@ class CollectMayaRenderlayers(pyblish.api.ContextPlugin):
instance.data["label"] = label
instance.data.update(data)
def get_render_attribute(self, attr):
return cmds.getAttr("defaultRenderGlobals.{}".format(attr))
def get_render_attribute(self, attr, layer):
return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr),
layer=layer)
def parse_options(self, render_globals):
"""Get all overrides with a value, skip those without

View file

@ -23,7 +23,7 @@ class CollectSetDress(pyblish.api.InstancePlugin):
"""
order = pyblish.api.CollectorOrder + 0.2
order = pyblish.api.CollectorOrder + 0.49
label = "Set Dress"
families = ["setdress"]
@ -32,7 +32,6 @@ class CollectSetDress(pyblish.api.InstancePlugin):
# Find containers
containers = avalon.ls()
# Get all content from the instance
instance_lookup = set(cmds.ls(instance, type="transform", long=True))
data = defaultdict(list)
@ -41,7 +40,6 @@ class CollectSetDress(pyblish.api.InstancePlugin):
for container in containers:
root = lib.get_container_transforms(container, root=True)
self.log.debug(root)
if not root or root not in instance_lookup:
continue

View file

@ -0,0 +1,110 @@
import os
import pyblish.api
from maya import cmds
from avalon import api
class CollectVRayScene(pyblish.api.ContextPlugin):
"""Collect all information prior for exporting vrscenes
"""
order = pyblish.api.CollectorOrder
label = "Collect VRay Scene"
hosts = ["maya"]
def process(self, context):
# Sort by displayOrder
def sort_by_display_order(layer):
return cmds.getAttr("%s.displayOrder" % layer)
host = api.registered_host()
asset = api.Session["AVALON_ASSET"]
work_dir = context.data["workspaceDir"]
# Get VRay Scene instance
vray_scenes = host.lsattr("family", "vrayscene")
if not vray_scenes:
self.log.info("Skipping vrayScene collection, no "
"vrayscene instance found..")
return
assert len(vray_scenes) == 1, "Multiple vrayscene instances found!"
vray_scene = vray_scenes[0]
vrscene_data = host.read(vray_scene)
assert cmds.ls("vraySettings", type="VRaySettingsNode"), (
"VRay Settings node does not exists. "
"Please ensure V-Ray is the current renderer."
)
# Output data
start_frame = int(cmds.getAttr("defaultRenderGlobals.startFrame"))
end_frame = int(cmds.getAttr("defaultRenderGlobals.endFrame"))
# Create output file path with template
file_name = context.data["currentFile"].replace("\\", "/")
vrscene = ("vrayscene", "<Scene>", "<Scene>_<Layer>", "<Layer>")
vrscene_output = os.path.join(work_dir, *vrscene)
# Check and create render output template for render job
# outputDir is required for submit_publish_job
if not vrscene_data.get("suspendRenderJob", False):
renders = ("renders", "<Scene>", "<Scene>_<Layer>", "<Layer>")
output_renderpath = os.path.join(work_dir, *renders)
vrscene_data["outputDir"] = output_renderpath
# Get resolution
resolution = (cmds.getAttr("defaultResolution.width"),
cmds.getAttr("defaultResolution.height"))
# Get format extension
extension = cmds.getAttr("vraySettings.imageFormatStr")
# Get render layers
render_layers = [i for i in cmds.ls(type="renderLayer") if
cmds.getAttr("{}.renderable".format(i)) and not
cmds.referenceQuery(i, isNodeReferenced=True)]
render_layers = sorted(render_layers, key=sort_by_display_order)
for layer in render_layers:
subset = layer
if subset == "defaultRenderLayer":
subset = "masterLayer"
data = {
"subset": subset,
"setMembers": layer,
"startFrame": start_frame,
"endFrame": end_frame,
"renderer": "vray",
"resolution": resolution,
"ext": ".{}".format(extension),
# instance subset
"family": "VRay Scene",
"families": ["vrayscene"],
"asset": asset,
"time": api.time(),
"author": context.data["user"],
# Add source to allow tracing back to the scene from
# which was submitted originally
"source": file_name,
# Store VRay Scene additional data
"vrsceneOutput": vrscene_output
}
data.update(vrscene_data)
instance = context.create_instance(subset)
self.log.info("Created: %s" % instance.name)
instance.data.update(data)

View file

@ -0,0 +1,15 @@
import pyblish.api
from maya import mel
class CollectWorksceneFPS(pyblish.api.ContextPlugin):
"""Get the FPS of the work scene"""
label = "Workscene FPS"
order = pyblish.api.CollectorOrder
hosts = ["maya"]
def process(self, context):
fps = mel.eval('currentTimeUnitToFPS()')
self.log.info("Workscene FPS: %s" % fps)
context.data.update({"fps": fps})

View file

@ -26,7 +26,7 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
Other information is the name of the transform and it's Colorbleed ID
"""
order = pyblish.api.CollectorOrder + 0.2
order = pyblish.api.CollectorOrder + 0.45
label = "Collect Yeti Cache"
families = ["yetiRig", "yeticache"]
hosts = ["maya"]

View file

@ -6,6 +6,7 @@ from maya import cmds
import pyblish.api
from pype.maya import lib
from pype.lib import pairwise
SETTINGS = {"renderDensity",
@ -19,7 +20,7 @@ SETTINGS = {"renderDensity",
class CollectYetiRig(pyblish.api.InstancePlugin):
"""Collect all information of the Yeti Rig"""
order = pyblish.api.CollectorOrder + 0.2
order = pyblish.api.CollectorOrder + 0.4
label = "Collect Yeti Rig"
families = ["yetiRig"]
hosts = ["maya"]
@ -29,6 +30,27 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
assert "input_SET" in instance.data["setMembers"], (
"Yeti Rig must have an input_SET")
input_connections = self.collect_input_connections(instance)
# Collect any textures if used
yeti_resources = []
yeti_nodes = cmds.ls(instance[:], type="pgYetiMaya", long=True)
for node in yeti_nodes:
# Get Yeti resources (textures)
resources = self.get_yeti_resources(node)
yeti_resources.extend(resources)
instance.data["rigsettings"] = {"inputs": input_connections}
instance.data["resources"] = yeti_resources
# Force frame range for export
instance.data["startFrame"] = 1
instance.data["endFrame"] = 1
def collect_input_connections(self, instance):
"""Collect the inputs for all nodes in the input_SET"""
# Get the input meshes information
input_content = cmds.ls(cmds.sets("input_SET", query=True), long=True)
@ -39,44 +61,38 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
# Ignore intermediate objects
input_content = cmds.ls(input_content, long=True, noIntermediate=True)
if not input_content:
return []
# Store all connections
connections = cmds.listConnections(input_content,
source=True,
destination=False,
connections=True,
# Only allow inputs from dagNodes
# (avoid display layers, etc.)
type="dagNode",
plugs=True) or []
# Group per source, destination pair. We need to reverse the connection
# list as it comes in with the shape used to query first while that
# shape is the destination of the connection
grouped = [(connections[i+1], item) for i, item in
enumerate(connections) if i % 2 == 0]
connections = cmds.ls(connections, long=True) # Ensure long names
inputs = []
for src, dest in grouped:
for dest, src in pairwise(connections):
source_node, source_attr = src.split(".", 1)
dest_node, dest_attr = dest.split(".", 1)
# Ensure the source of the connection is not included in the
# current instance's hierarchy. If so, we ignore that connection
# as we will want to preserve it even over a publish.
if source_node in instance:
self.log.debug("Ignoring input connection between nodes "
"inside the instance: %s -> %s" % (src, dest))
continue
inputs.append({"connections": [source_attr, dest_attr],
"sourceID": lib.get_id(source_node),
"destinationID": lib.get_id(dest_node)})
# Collect any textures if used
yeti_resources = []
yeti_nodes = cmds.ls(instance[:], type="pgYetiMaya", long=True)
for node in yeti_nodes:
# Get Yeti resources (textures)
resources = self.get_yeti_resources(node)
yeti_resources.extend(resources)
instance.data["rigsettings"] = {"inputs": inputs}
instance.data["resources"] = yeti_resources
# Force frame range for export
instance.data["startFrame"] = 1
instance.data["endFrame"] = 1
return inputs
def get_yeti_resources(self, node):
"""Get all resource file paths
@ -96,7 +112,13 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
list
"""
resources = []
image_search_path = cmds.getAttr("{}.imageSearchPath".format(node))
image_search_paths = cmds.getAttr("{}.imageSearchPath".format(node))
# TODO: Somehow this uses OS environment path separator, `:` vs `;`
# Later on check whether this is pipeline OS cross-compatible.
image_search_paths = [p for p in
image_search_paths.split(os.path.pathsep) if p]
# List all related textures
texture_filenames = cmds.pgYetiCommand(node, listTextures=True)
@ -108,36 +130,51 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
type="reference")
self.log.info("Found %i reference node(s)" % len(reference_nodes))
if texture_filenames and not image_search_path:
if texture_filenames and not image_search_paths:
raise ValueError("pgYetiMaya node '%s' is missing the path to the "
"files in the 'imageSearchPath "
"atttribute'" % node)
# Collect all texture files
for texture in texture_filenames:
item = {"files": [], "source": texture, "node": node}
texture_filepath = os.path.join(image_search_path, texture)
if len(texture.split(".")) > 2:
# For UDIM based textures (tiles)
if "<UDIM>" in texture:
sequences = self.get_sequence(texture_filepath,
pattern="<UDIM>")
item["files"].extend(sequences)
# Based textures (animated masks f.e)
elif "%04d" in texture:
sequences = self.get_sequence(texture_filepath,
pattern="%04d")
item["files"].extend(sequences)
# Assuming it is a fixed name
else:
item["files"].append(texture_filepath)
files = []
if os.path.isabs(texture):
self.log.debug("Texture is absolute path, ignoring "
"image search paths for: %s" % texture)
files = self.search_textures(texture)
else:
item["files"].append(texture_filepath)
for root in image_search_paths:
filepath = os.path.join(root, texture)
files = self.search_textures(filepath)
if files:
# Break out on first match in search paths..
break
if not files:
self.log.warning(
"No texture found for: %s "
"(searched: %s)" % (texture, image_search_paths))
item = {
"files": files,
"source": texture,
"node": node
}
resources.append(item)
# For now validate that every texture has at least a single file
# resolved. Since a 'resource' does not have the requirement of having
# a `files` explicitly mapped it's not explicitly validated.
# TODO: Validate this as a validator
invalid_resources = []
for resource in resources:
if not resource['files']:
invalid_resources.append(resource)
if invalid_resources:
raise RuntimeError("Invalid resources")
# Collect all referenced files
for reference_node in reference_nodes:
ref_file = cmds.pgYetiGraph(node,
@ -145,35 +182,83 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
param="reference_file",
getParamValue=True)
if not os.path.isfile(ref_file):
raise RuntimeError("Reference file must be a full file path!")
# Create resource dict
item = {"files": [],
"source": ref_file,
"node": node,
"graphnode": reference_node,
"param": "reference_file"}
item = {
"source": ref_file,
"node": node,
"graphnode": reference_node,
"param": "reference_file",
"files": []
}
ref_file_name = os.path.basename(ref_file)
if "%04d" in ref_file_name:
ref_files = self.get_sequence(ref_file)
item["files"].extend(ref_files)
item["files"] = self.get_sequence(ref_file)
else:
item["files"].append(ref_file)
if os.path.exists(ref_file) and os.path.isfile(ref_file):
item["files"] = [ref_file]
if not item["files"]:
self.log.warning("Reference node '%s' has no valid file "
"path set: %s" % (reference_node, ref_file))
# TODO: This should allow to pass and fail in Validator instead
raise RuntimeError("Reference node must be a full file path!")
resources.append(item)
return resources
def get_sequence(self, filename, pattern="%04d"):
"""Get sequence from filename
def search_textures(self, filepath):
"""Search all texture files on disk.
This also parses to full sequences for those with dynamic patterns
like <UDIM> and %04d in the filename.
Args:
filepath (str): The full path to the file, including any
dynamic patterns like <UDIM> or %04d
Returns:
list: The files found on disk
"""
filename = os.path.basename(filepath)
# Collect full sequence if it matches a sequence pattern
if len(filename.split(".")) > 2:
# For UDIM based textures (tiles)
if "<UDIM>" in filename:
sequences = self.get_sequence(filepath,
pattern="<UDIM>")
if sequences:
return sequences
# Frame/time - Based textures (animated masks f.e)
elif "%04d" in filename:
sequences = self.get_sequence(filepath,
pattern="%04d")
if sequences:
return sequences
# Assuming it is a fixed name (single file)
if os.path.exists(filepath):
return [filepath]
return []
def get_sequence(self, filepath, pattern="%04d"):
"""Get sequence from filename.
This will only return files if they exist on disk as it tries
to collect the sequence using the filename pattern and searching
for them on disk.
Supports negative frame ranges like -001, 0000, 0001 and -0001,
0000, 0001.
Arguments:
filename (str): The full path to filename containing the given
filepath (str): The full path to filename containing the given
pattern.
pattern (str): The pattern to swap with the variable frame number.
@ -183,10 +268,10 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
"""
from avalon.vendor import clique
escaped = re.escape(filename)
escaped = re.escape(filepath)
re_pattern = escaped.replace(pattern, "-?[0-9]+")
source_dir = os.path.dirname(filename)
source_dir = os.path.dirname(filepath)
files = [f for f in os.listdir(source_dir)
if re.match(re_pattern, f)]

View file

@ -27,14 +27,12 @@ class ExtractAnimation(pype.api.Extractor):
raise RuntimeError("Couldn't find exactly one out_SET: "
"{0}".format(out_sets))
out_set = out_sets[0]
nodes = cmds.sets(out_set, query=True)
self.log.info('nodes to export: {}'.format(str(nodes)))
roots = cmds.sets(out_set, query=True)
# Include all descendants
# nodes += cmds.listRelatives(nodes,
# allDescendents=True,
# fullPath=True) or []
nodes = roots + cmds.listRelatives(roots,
allDescendents=True,
fullPath=True) or []
# Collect the start and end including handles
start = instance.data["startFrame"]
@ -57,21 +55,27 @@ class ExtractAnimation(pype.api.Extractor):
"writeVisibility": True,
"writeCreases": True,
"uvWrite": True,
"selection": False,
"root": nodes
"selection": True,
"worldSpace": instance.data.get("worldSpace", True)
}
if not instance.data.get("includeParentHierarchy", True):
# Set the root nodes if we don't want to include parents
# The roots are to be considered the ones that are the actual
# direct members of the set
options["root"] = roots
if int(cmds.about(version=True)) >= 2017:
# Since Maya 2017 alembic supports multiple uv sets - write them.
options["writeUVSets"] = True
with avalon.maya.suspended_refresh():
# with avalon.maya.maintained_selection():
cmds.select(nodes, noExpand=True)
extract_alembic(file=path,
startFrame=start,
endFrame=end,
**options)
with avalon.maya.maintained_selection():
cmds.select(nodes, noExpand=True)
extract_alembic(file=path,
startFrame=start,
endFrame=end,
**options)
if "files" not in instance.data:
instance.data["files"] = list()

View file

@ -67,7 +67,7 @@ class ExtractCameraAlembic(pype.api.Extractor):
job_str += ' -file "{0}"'.format(path)
with lib.evaluation("off"):
with lib.no_refresh():
with avalon.maya.suspended_refresh():
cmds.AbcExport(j=job_str, verbose=False)
if "files" not in instance.data:

View file

@ -127,7 +127,7 @@ class ExtractCameraMayaAscii(pype.api.Extractor):
self.log.info("Performing camera bakes for: {0}".format(transform))
with avalon.maya.maintained_selection():
with lib.evaluation("off"):
with lib.no_refresh():
with avalon.maya.suspended_refresh():
baked = lib.bake_to_world_space(
transform,
frame_range=range_with_handles,

View file

@ -0,0 +1,216 @@
import os
from maya import cmds
import maya.mel as mel
import pyblish.api
import avalon.maya
import pype.api
class ExtractFBX(pype.api.Extractor):
"""Extract FBX from Maya.
This extracts reproducible FBX exports ignoring any of the settings set
on the local machine in the FBX export options window.
All export settings are applied with the `FBXExport*` commands prior
to the `FBXExport` call itself. The options can be overridden with their
nice names as seen in the "options" property on this class.
For more information on FBX exports see:
- https://knowledge.autodesk.com/support/maya/learn-explore/caas
/CloudHelp/cloudhelp/2016/ENU/Maya/files/GUID-6CCE943A-2ED4-4CEE-96D4
-9CB19C28F4E0-htm.html
- http://forums.cgsociety.org/archive/index.php?t-1032853.html
- https://groups.google.com/forum/#!msg/python_inside_maya/cLkaSo361oE
/LKs9hakE28kJ
"""
order = pyblish.api.ExtractorOrder
label = "Extract FBX"
families = ["fbx"]
@property
def options(self):
"""Overridable options for FBX Export
Given in the following format
- {NAME: EXPECTED TYPE}
If the overridden option's type does not match,
the option is not included and a warning is logged.
"""
return {
"cameras": bool,
"smoothingGroups": bool,
"hardEdges": bool,
"tangents": bool,
"smoothMesh": bool,
"instances": bool,
# "referencedContainersContent": bool, # deprecated in Maya 2016+
"bakeComplexAnimation": int,
"bakeComplexStart": int,
"bakeComplexEnd": int,
"bakeComplexStep": int,
"bakeResampleAnimation": bool,
"animationOnly": bool,
"useSceneName": bool,
"quaternion": str, # "euler"
"shapes": bool,
"skins": bool,
"constraints": bool,
"lights": bool,
"embeddedTextures": bool,
"inputConnections": bool,
"upAxis": str, # x, y or z,
"triangulate": bool
}
@property
def default_options(self):
"""The default options for FBX extraction.
This includes shapes, skins, constraints, lights and incoming
connections and exports with the Y-axis as up-axis.
By default this uses the time sliders start and end time.
"""
start_frame = int(cmds.playbackOptions(query=True,
animationStartTime=True))
end_frame = int(cmds.playbackOptions(query=True,
animationEndTime=True))
return {
"cameras": False,
"smoothingGroups": False,
"hardEdges": False,
"tangents": False,
"smoothMesh": False,
"instances": False,
"bakeComplexAnimation": True,
"bakeComplexStart": start_frame,
"bakeComplexEnd": end_frame,
"bakeComplexStep": 1,
"bakeResampleAnimation": True,
"animationOnly": False,
"useSceneName": False,
"quaternion": "euler",
"shapes": True,
"skins": True,
"constraints": False,
"lights": True,
"embeddedTextures": True,
"inputConnections": True,
"upAxis": "y",
"triangulate": False
}
def parse_overrides(self, instance, options):
"""Inspect data of instance to determine overridden options
An instance may supply any of the overridable options
as data, the option is then added to the extraction.
"""
for key in instance.data:
if key not in self.options:
continue
# Ensure the data is of correct type
value = instance.data[key]
if not isinstance(value, self.options[key]):
self.log.warning(
"Overridden attribute {key} was of "
"the wrong type: {invalid_type} "
"- should have been {valid_type}".format(
key=key,
invalid_type=type(value).__name__,
valid_type=self.options[key].__name__))
continue
options[key] = value
return options
def process(self, instance):
# Ensure FBX plug-in is loaded
cmds.loadPlugin("fbxmaya", quiet=True)
# Define output path
directory = self.staging_dir(instance)
filename = "{0}.fbx".format(instance.name)
path = os.path.join(directory, filename)
# The export requires forward slashes because we need
# to format it into a string in a mel expression
path = path.replace('\\', '/')
self.log.info("Extracting FBX to: {0}".format(path))
members = instance.data["setMembers"]
self.log.info("Members: {0}".format(members))
self.log.info("Instance: {0}".format(instance[:]))
# Parse export options
options = self.default_options
options = self.parse_overrides(instance, options)
self.log.info("Export options: {0}".format(options))
# Collect the start and end including handles
start = instance.data["startFrame"]
end = instance.data["endFrame"]
handles = instance.data.get("handles", 0)
if handles:
start -= handles
end += handles
options['bakeComplexStart'] = start
options['bakeComplexEnd'] = end
# First apply the default export settings to be fully consistent
# each time for successive publishes
mel.eval("FBXResetExport")
# Apply the FBX overrides through MEL since the commands
# only work correctly in MEL according to online
# available discussions on the topic
for option, value in options.iteritems():
key = option[0].upper() + option[1:] # uppercase first letter
# Boolean must be passed as lower-case strings
# as to MEL standards
if isinstance(value, bool):
value = str(value).lower()
template = "FBXExport{0} -v {1}"
if key == "UpAxis":
template = "FBXExport{0} {1}"
cmd = template.format(key, value)
self.log.info(cmd)
mel.eval(cmd)
# Never show the UI or generate a log
mel.eval("FBXExportShowUI -v false")
mel.eval("FBXExportGenerateLog -v false")
# Export
with avalon.maya.maintained_selection():
cmds.select(members, r=1, noExpand=True)
mel.eval('FBXExport -f "{}" -s'.format(path))
if "files" not in instance.data:
instance.data["files"] = list()
instance.data["files"].append(filename)
self.log.info("Extract FBX successful to: {0}".format(path))

View file

@ -1,5 +1,7 @@
import os
import json
import tempfile
import contextlib
from collections import OrderedDict
from maya import cmds
@ -11,6 +13,38 @@ import pype.api
import pype.maya.lib as lib
@contextlib.contextmanager
def no_workspace_dir():
"""Force maya to a fake temporary workspace directory.
Note: This is not maya.cmds.workspace 'rootDirectory' but the 'directory'
This helps to avoid Maya automatically remapping image paths to files
relative to the currently set directory.
"""
# Store current workspace
original = cmds.workspace(query=True, directory=True)
# Set a fake workspace
fake_workspace_dir = tempfile.mkdtemp()
cmds.workspace(directory=fake_workspace_dir)
try:
yield
finally:
try:
cmds.workspace(directory=original)
except RuntimeError:
# If the original workspace directory didn't exist either
# ignore the fact that it fails to reset it to the old path
pass
# Remove the temporary directory
os.rmdir(fake_workspace_dir)
class ExtractLook(pype.api.Extractor):
"""Extract Look (Maya Ascii + JSON)
@ -47,10 +81,6 @@ class ExtractLook(pype.api.Extractor):
resources = instance.data["resources"]
frame = cmds.currentTime(query=True)
instance.data['startFrame'] = frame
instance.data['endFrame'] = frame
remap = OrderedDict() # needs to be ordered, see color space values
for resource in resources:
attr = resource['attribute']
@ -69,18 +99,23 @@ class ExtractLook(pype.api.Extractor):
with lib.renderlayer(layer):
# TODO: Ensure membership edits don't become renderlayer overrides
with lib.empty_sets(sets, force=True):
with lib.attribute_values(remap):
with avalon.maya.maintained_selection():
cmds.select(sets, noExpand=True)
cmds.file(maya_path,
force=True,
typ="mayaAscii",
exportSelected=True,
preserveReferences=False,
channels=True,
constraints=True,
expressions=True,
constructionHistory=True)
# To avoid Maya trying to automatically remap the file
# textures relative to the `workspace -directory` we force
# it to a fake temporary workspace. This fixes textures
# getting incorrectly remapped. (LKD-17, PLN-101)
with no_workspace_dir():
with lib.attribute_values(remap):
with avalon.maya.maintained_selection():
cmds.select(sets, noExpand=True)
cmds.file(maya_path,
force=True,
typ="mayaAscii",
exportSelected=True,
preserveReferences=False,
channels=True,
constraints=True,
expressions=True,
constructionHistory=True)
# Write the JSON data
self.log.info("Extract json..")

View file

@ -32,6 +32,13 @@ class ExtractAlembic(pype.api.Extractor):
start -= handles
end += handles
attrs = instance.data.get("attr", "").split(";")
attrs = [value for value in attrs if value.strip()]
attrs += ["cbId"]
attr_prefixes = instance.data.get("attrPrefix", "").split(";")
attr_prefixes = [value for value in attr_prefixes if value.strip()]
# Get extra export arguments
writeColorSets = instance.data.get("writeColorSets", False)
@ -44,14 +51,22 @@ class ExtractAlembic(pype.api.Extractor):
options = {
"step": instance.data.get("step", 1.0),
"attr": ["cbId"],
"attr": attrs,
"attrPrefix": attr_prefixes,
"writeVisibility": True,
"writeCreases": True,
"writeColorSets": writeColorSets,
"uvWrite": True,
"selection": True
"selection": True,
"worldSpace": instance.data.get("worldSpace", True)
}
if not instance.data.get("includeParentHierarchy", True):
# Set the root nodes if we don't want to include parents
# The roots are to be considered the ones that are the actual
# direct members of the set
options["root"] = instance.data.get("setMembers")
if int(cmds.about(version=True)) >= 2017:
# Since Maya 2017 alembic supports multiple uv sets - write them.
options["writeUVSets"] = True

View file

@ -33,7 +33,7 @@ class ExtractSetDress(pype.api.Extractor):
json.dump(instance.data["scenedata"], filepath, ensure_ascii=False)
self.log.info("Extracting point cache ..")
cmds.select(instance.data["\\"])
cmds.select(instance.data["hierarchy"])
# Run basic alembic exporter
extract_alembic(file=hierarchy_path,

View file

@ -11,7 +11,8 @@ class IncrementCurrentFileDeadline(pyblish.api.ContextPlugin):
label = "Increment current file"
order = pyblish.api.IntegratorOrder + 9.0
hosts = ["maya"]
families = ["renderlayer"]
families = ["renderlayer",
"vrayscene"]
optional = True
def process(self, context):

View file

@ -0,0 +1,264 @@
import os
import json
import getpass
from maya import cmds
from avalon import api
from avalon.vendor import requests
import pyblish.api
import pype.maya.lib as lib
def get_renderer_variables(renderlayer=None):
"""Retrieve the extension which has been set in the VRay settings
Will return None if the current renderer is not VRay
For Maya 2016.5 and up the renderSetup creates renderSetupLayer node which
start with `rs`. Use the actual node name, do NOT use the `nice name`
Args:
renderlayer (str): the node name of the renderlayer.
Returns:
dict
"""
renderer = lib.get_renderer(renderlayer or lib.get_current_renderlayer())
render_attrs = lib.RENDER_ATTRS.get(renderer, lib.RENDER_ATTRS["default"])
padding = cmds.getAttr("{}.{}".format(render_attrs["node"],
render_attrs["padding"]))
filename_0 = cmds.renderSettings(fullPath=True, firstImageName=True)[0]
if renderer == "vray":
# Maya's renderSettings function does not return V-Ray file extension
# so we get the extension from vraySettings
extension = cmds.getAttr("vraySettings.imageFormatStr")
# When V-Ray image format has not been switched once from default .png
# the getAttr command above returns None. As such we explicitly set
# it to `.png`
if extension is None:
extension = "png"
filename_prefix = "<Scene>/<Scene>_<Layer>/<Layer>"
else:
# Get the extension, getAttr defaultRenderGlobals.imageFormat
# returns an index number.
filename_base = os.path.basename(filename_0)
extension = os.path.splitext(filename_base)[-1].strip(".")
filename_prefix = "<Scene>/<Scene>_<RenderLayer>/<RenderLayer>"
return {"ext": extension,
"filename_prefix": filename_prefix,
"padding": padding,
"filename_0": filename_0}
def preview_fname(folder, scene, layer, padding, ext):
"""Return output file path with #### for padding.
Deadline requires the path to be formatted with # in place of numbers.
For example `/path/to/render.####.png`
Args:
folder (str): The root output folder (image path)
scene (str): The scene name
layer (str): The layer name to be rendered
padding (int): The padding length
ext(str): The output file extension
Returns:
str
"""
# Following hardcoded "<Scene>/<Scene>_<Layer>/<Layer>"
output = "{scene}/{scene}_{layer}/{layer}.{number}.{ext}".format(
scene=scene,
layer=layer,
number="#" * padding,
ext=ext
)
return os.path.join(folder, output)
class MayaSubmitDeadline(pyblish.api.InstancePlugin):
"""Submit available render layers to Deadline
Renders are submitted to a Deadline Web Service as
supplied via the environment variable AVALON_DEADLINE
"""
label = "Submit to Deadline"
order = pyblish.api.IntegratorOrder
hosts = ["maya"]
families = ["renderlayer"]
def process(self, instance):
AVALON_DEADLINE = api.Session.get("AVALON_DEADLINE",
"http://localhost:8082")
assert AVALON_DEADLINE, "Requires AVALON_DEADLINE"
context = instance.context
workspace = context.data["workspaceDir"]
filepath = context.data["currentFile"]
filename = os.path.basename(filepath)
comment = context.data.get("comment", "")
scene = os.path.splitext(filename)[0]
dirname = os.path.join(workspace, "renders")
renderlayer = instance.data['setMembers'] # rs_beauty
renderlayer_name = instance.data['subset'] # beauty
renderlayer_globals = instance.data["renderGlobals"]
legacy_layers = renderlayer_globals["UseLegacyRenderLayers"]
deadline_user = context.data.get("deadlineUser", getpass.getuser())
jobname = "%s - %s" % (filename, instance.name)
# Get the variables depending on the renderer
render_variables = get_renderer_variables(renderlayer)
output_filename_0 = preview_fname(folder=dirname,
scene=scene,
layer=renderlayer_name,
padding=render_variables["padding"],
ext=render_variables["ext"])
try:
# Ensure render folder exists
os.makedirs(dirname)
except OSError:
pass
# Documentation for keys available at:
# https://docs.thinkboxsoftware.com
# /products/deadline/8.0/1_User%20Manual/manual
# /manual-submission.html#job-info-file-options
payload = {
"JobInfo": {
# Top-level group name
"BatchName": filename,
# Job name, as seen in Monitor
"Name": jobname,
# Arbitrary username, for visualisation in Monitor
"UserName": deadline_user,
"Plugin": instance.data.get("mayaRenderPlugin", "MayaBatch"),
"Frames": "{start}-{end}x{step}".format(
start=int(instance.data["startFrame"]),
end=int(instance.data["endFrame"]),
step=int(instance.data["byFrameStep"]),
),
"Comment": comment,
# Optional, enable double-click to preview rendered
# frames from Deadline Monitor
"OutputFilename0": output_filename_0.replace("\\", "/"),
},
"PluginInfo": {
# Input
"SceneFile": filepath,
# Output directory and filename
"OutputFilePath": dirname.replace("\\", "/"),
"OutputFilePrefix": render_variables["filename_prefix"],
# Mandatory for Deadline
"Version": cmds.about(version=True),
# Only render layers are considered renderable in this pipeline
"UsingRenderLayers": True,
# Use legacy Render Layer system
"UseLegacyRenderLayers": legacy_layers,
# Render only this layer
"RenderLayer": renderlayer,
# Determine which renderer to use from the file itself
"Renderer": instance.data["renderer"],
# Resolve relative references
"ProjectPath": workspace,
},
# Mandatory for Deadline, may be empty
"AuxFiles": []
}
# Include critical environment variables with submission
keys = [
# This will trigger `userSetup.py` on the slave
# such that proper initialisation happens the same
# way as it does on a local machine.
# TODO(marcus): This won't work if the slaves don't
# have accesss to these paths, such as if slaves are
# running Linux and the submitter is on Windows.
"PYTHONPATH",
# todo: This is a temporary fix for yeti variables
"PEREGRINEL_LICENSE",
"REDSHIFT_MAYAEXTENSIONSPATH",
"REDSHIFT_DISABLEOUTPUTLOCKFILES",
"VRAY_FOR_MAYA2018_PLUGINS",
"VRAY_PLUGINS",
"VRAY_USE_THREAD_AFFINITY",
"MAYA_MODULE_PATH"
]
environment = dict({key: os.environ[key] for key in keys
if key in os.environ}, **api.Session)
PATHS = os.environ["PATH"].split(";")
environment["PATH"] = ";".join([p for p in PATHS
if p.startswith("P:")])
payload["JobInfo"].update({
"EnvironmentKeyValue%d" % index: "{key}={value}".format(
key=key,
value=environment[key]
) for index, key in enumerate(environment)
})
# Include optional render globals
render_globals = instance.data.get("renderGlobals", {})
payload["JobInfo"].update(render_globals)
plugin = payload["JobInfo"]["Plugin"]
self.log.info("using render plugin : {}".format(plugin))
self.preflight_check(instance)
self.log.info("Submitting..")
self.log.info(json.dumps(payload, indent=4, sort_keys=True))
# E.g. http://192.168.0.1:8082/api/jobs
url = "{}/api/jobs".format(AVALON_DEADLINE)
response = requests.post(url, json=payload)
if not response.ok:
raise Exception(response.text)
# Store output dir for unified publisher (filesequence)
instance.data["outputDir"] = os.path.dirname(output_filename_0)
instance.data["deadlineSubmissionJob"] = response.json()
def preflight_check(self, instance):
"""Ensure the startFrame, endFrame and byFrameStep are integers"""
for key in ("startFrame", "endFrame", "byFrameStep"):
value = instance.data[key]
if int(value) == value:
continue
self.log.warning(
"%f=%d was rounded off to nearest integer"
% (value, int(value))
)

View file

@ -0,0 +1,274 @@
import getpass
import json
import os
from copy import deepcopy
import pyblish.api
from avalon import api
from avalon.vendor import requests
from maya import cmds
class VraySubmitDeadline(pyblish.api.InstancePlugin):
"""Export the scene to `.vrscene` files per frame per render layer
vrscene files will be written out based on the following template:
<project>/vrayscene/<Scene>/<Scene>_<Layer>/<Layer>
A dependency job will be added for each layer to render the framer
through VRay Standalone
"""
label = "Submit to Deadline ( vrscene )"
order = pyblish.api.IntegratorOrder
hosts = ["maya"]
families = ["vrayscene"]
def process(self, instance):
AVALON_DEADLINE = api.Session.get("AVALON_DEADLINE",
"http://localhost:8082")
assert AVALON_DEADLINE, "Requires AVALON_DEADLINE"
context = instance.context
deadline_url = "{}/api/jobs".format(AVALON_DEADLINE)
deadline_user = context.data.get("deadlineUser", getpass.getuser())
filepath = context.data["currentFile"]
filename = os.path.basename(filepath)
task_name = "{} - {}".format(filename, instance.name)
batch_name = "{} - (vrscene)".format(filename)
# Get the output template for vrscenes
vrscene_output = instance.data["vrsceneOutput"]
# This is also the input file for the render job
first_file = self.format_output_filename(instance,
filename,
vrscene_output)
start_frame = int(instance.data["startFrame"])
end_frame = int(instance.data["endFrame"])
# Primary job
self.log.info("Submitting export job ..")
payload = {
"JobInfo": {
# Top-level group name
"BatchName": batch_name,
# Job name, as seen in Monitor
"Name": "Export {} [{}-{}]".format(task_name,
start_frame,
end_frame),
# Arbitrary username, for visualisation in Monitor
"UserName": deadline_user,
"Plugin": "MayaBatch",
"Frames": "{}-{}".format(start_frame, end_frame),
"FramesPerTask": instance.data.get("framesPerTask", 1),
"Comment": context.data.get("comment", ""),
"OutputFilename0": os.path.dirname(first_file),
},
"PluginInfo": {
# Renderer
"Renderer": "vray",
# Mandatory for Deadline
"Version": cmds.about(version=True),
# Input
"SceneFile": filepath,
"SkipExistingFrames": True,
"UsingRenderLayers": True,
"UseLegacyRenderLayers": True
},
# Mandatory for Deadline, may be empty
"AuxFiles": []
}
environment = dict(AVALON_TOOLS="global;python36;maya2018")
environment.update(api.Session.copy())
jobinfo_environment = self.build_jobinfo_environment(environment)
payload["JobInfo"].update(jobinfo_environment)
self.log.info("Job Data:\n{}".format(json.dumps(payload)))
response = requests.post(url=deadline_url, json=payload)
if not response.ok:
raise RuntimeError(response.text)
# Secondary job
# Store job to create dependency chain
dependency = response.json()
if instance.data["suspendRenderJob"]:
self.log.info("Skipping render job and publish job")
return
self.log.info("Submitting render job ..")
start_frame = int(instance.data["startFrame"])
end_frame = int(instance.data["endFrame"])
ext = instance.data.get("ext", "exr")
# Create output directory for renders
render_ouput = self.format_output_filename(instance,
filename,
instance.data["outputDir"],
dir=True)
self.log.info("Render output: %s" % render_ouput)
# Update output dir
instance.data["outputDir"] = render_ouput
# Format output file name
sequence_filename = ".".join([instance.name, ext])
output_filename = os.path.join(render_ouput, sequence_filename)
# Ensure folder exists:
if not os.path.exists(render_ouput):
os.makedirs(render_ouput)
payload_b = {
"JobInfo": {
"JobDependency0": dependency["_id"],
"BatchName": batch_name,
"Name": "Render {} [{}-{}]".format(task_name,
start_frame,
end_frame),
"UserName": deadline_user,
"Frames": "{}-{}".format(start_frame, end_frame),
"Plugin": "Vray",
"OverrideTaskExtraInfoNames": False,
"OutputFilename0": render_ouput,
},
"PluginInfo": {
"InputFilename": first_file,
"OutputFilename": output_filename,
"SeparateFilesPerFrame": True,
"VRayEngine": "V-Ray",
"Width": instance.data["resolution"][0],
"Height": instance.data["resolution"][1],
},
"AuxFiles": [],
}
# Add vray renderslave to environment
tools = environment["AVALON_TOOLS"] + ";vrayrenderslave"
environment_b = deepcopy(environment)
environment_b["AVALON_TOOLS"] = tools
jobinfo_environment_b = self.build_jobinfo_environment(environment_b)
payload_b["JobInfo"].update(jobinfo_environment_b)
self.log.info(json.dumps(payload_b))
# Post job to deadline
response_b = requests.post(url=deadline_url, json=payload_b)
if not response_b.ok:
raise RuntimeError(response_b.text)
# Add job for publish job
if not instance.data.get("suspendPublishJob", False):
instance.data["deadlineSubmissionJob"] = response_b.json()
def build_command(self, instance):
"""Create command for Render.exe to export vray scene
Args:
instance
Returns:
str
"""
cmd = ('-r vray -proj {project} -cam {cam} -noRender -s {startFrame} '
'-e {endFrame} -rl {layer} -exportFramesSeparate')
# Get the camera
cammera = instance.data["cameras"][0]
return cmd.format(project=instance.context.data["workspaceDir"],
cam=cammera,
startFrame=instance.data["startFrame"],
endFrame=instance.data["endFrame"],
layer=instance.name)
def build_jobinfo_environment(self, env):
"""Format environment keys and values to match Deadline rquirements
Args:
env(dict): environment dictionary
Returns:
dict
"""
return {"EnvironmentKeyValue%d" % index: "%s=%s" % (k, env[k])
for index, k in enumerate(env)}
def format_output_filename(self, instance, filename, template, dir=False):
"""Format the expected output file of the Export job
Example:
<Scene>/<Scene>_<Layer>/<Layer>
"shot010_v006/shot010_v006_CHARS/CHARS"
Args:
instance:
filename(str):
dir(bool):
Returns:
str
"""
def smart_replace(string, key_values):
new_string = string
for key, value in key_values.items():
new_string = new_string.replace(key, value)
return new_string
# Ensure filename has no extension
file_name, _ = os.path.splitext(filename)
# Reformat without tokens
output_path = smart_replace(template,
{"<Scene>": file_name,
"<Layer>": instance.name})
if dir:
return output_path.replace("\\", "/")
start_frame = int(instance.data["startFrame"])
filename_zero = "{}_{:04d}.vrscene".format(output_path, start_frame)
result = filename_zero.replace("\\", "/")
return result

View file

@ -17,7 +17,7 @@ class ValidateOutRelatedNodeIds(pyblish.api.InstancePlugin):
"""
order = pype.api.ValidateContentsOrder
families = ["animation", "pointcache"]
families = ['animation', "pointcache"]
hosts = ['maya']
label = 'Animation Out Set Related Node Ids'
actions = [pype.maya.action.SelectInvalidAction, pype.api.RepairAction]

View file

@ -15,7 +15,7 @@ class ValidateCameraAttributes(pyblish.api.InstancePlugin):
"""
order = pype.api.ValidateContentsOrder
families = ["camera"]
families = ['camera']
hosts = ['maya']
label = 'Camera Attributes'
actions = [pype.maya.action.SelectInvalidAction]

View file

@ -16,7 +16,7 @@ class ValidateCameraContents(pyblish.api.InstancePlugin):
"""
order = pype.api.ValidateContentsOrder
families = ["camera"]
families = ['camera']
hosts = ['maya']
label = 'Camera Contents'
actions = [pype.maya.action.SelectInvalidAction]

View file

@ -1,6 +1,7 @@
import pyblish.api
from maya import cmds
from pype.plugin import contextplugin_should_run
class ValidateCurrentRenderLayerIsRenderable(pyblish.api.ContextPlugin):
@ -20,7 +21,12 @@ class ValidateCurrentRenderLayerIsRenderable(pyblish.api.ContextPlugin):
hosts = ["maya"]
families = ["renderlayer"]
def process(self, instance):
def process(self, context):
# Workaround bug pyblish-base#250
if not contextplugin_should_run(self, context):
return
layer = cmds.editRenderLayerGlobals(query=True, currentRenderLayer=True)
cameras = cmds.ls(type="camera", long=True)
renderable = any(c for c in cameras if cmds.getAttr(c + ".renderable"))

View file

@ -1,8 +1,8 @@
import pyblish.api
import os
import avalon.api as api
from avalon.vendor import requests
from pype.plugin import contextplugin_should_run
class ValidateDeadlineConnection(pyblish.api.ContextPlugin):
@ -13,12 +13,11 @@ class ValidateDeadlineConnection(pyblish.api.ContextPlugin):
hosts = ["maya"]
families = ["renderlayer"]
def process(self, instance):
def process(self, context):
# AVALON_DEADLINE = api.Session.get("AVALON_DEADLINE",
# "http://localhost:8082")
#
# assert AVALON_DEADLINE is not None, "Requires AVALON_DEADLINE"
# Workaround bug pyblish-base#250
if not contextplugin_should_run(self, context):
return
try:
deadline_url = os.environ["DEADLINE_REST_URL"]
@ -26,7 +25,7 @@ class ValidateDeadlineConnection(pyblish.api.ContextPlugin):
self.log.error("Deadline REST API url not found.")
# Check response
response = requests.get(deadline_url)
response = requests.get(AVALON_DEADLINE)
assert response.ok, "Response must be ok"
assert response.text.startswith("Deadline Web Service "), (
"Web service did not respond with 'Deadline Web Service'"

View file

@ -12,7 +12,7 @@ class ValidateInstancerContent(pyblish.api.InstancePlugin):
"""
order = pyblish.api.ValidatorOrder
label = 'Instancer Content'
families = ["instancer"]
families = ['instancer']
def process(self, instance):

View file

@ -44,7 +44,7 @@ class ValidateInstancerFrameRanges(pyblish.api.InstancePlugin):
"""
order = pyblish.api.ValidatorOrder
label = 'Instancer Cache Frame Ranges'
families = ["instancer"]
families = ['instancer']
@classmethod
def get_invalid(cls, instance):

View file

@ -19,7 +19,7 @@ class ValidateJointsHidden(pyblish.api.InstancePlugin):
order = pype.api.ValidateContentsOrder
hosts = ['maya']
families = ["rig"]
families = ['rig']
category = 'rig'
version = (0, 1, 0)
label = "Joints Hidden"

View file

@ -18,7 +18,7 @@ class ValidateLookContents(pyblish.api.InstancePlugin):
"""
order = pype.api.ValidateContentsOrder
families = ["look"]
families = ['look']
hosts = ['maya']
label = 'Look Data Contents'
actions = [pype.maya.action.SelectInvalidAction]

View file

@ -17,7 +17,7 @@ class ValidateLookDefaultShadersConnections(pyblish.api.InstancePlugin):
"""
order = pype.api.ValidateContentsOrder
families = ["look"]
families = ['look']
hosts = ['maya']
label = 'Look Default Shader Connections'

View file

@ -17,7 +17,7 @@ class ValidateLookIdReferenceEdits(pyblish.api.InstancePlugin):
"""
order = pype.api.ValidateContentsOrder
families = ["look"]
families = ['look']
hosts = ['maya']
label = 'Look Id Reference Edits'
actions = [pype.maya.action.SelectInvalidAction,

View file

@ -23,7 +23,7 @@ class ValidateUniqueRelationshipMembers(pyblish.api.InstancePlugin):
order = pype.api.ValidatePipelineOrder
label = 'Look members unique'
hosts = ['maya']
families = ["look"]
families = ['look']
actions = [pype.maya.action.SelectInvalidAction,
pype.maya.action.GenerateUUIDsOnInvalidAction]

View file

@ -24,13 +24,13 @@ class ValidateLookNoDefaultShaders(pyblish.api.InstancePlugin):
"""
order = pype.api.ValidateContentsOrder + 0.01
families = ["look"]
families = ['look']
hosts = ['maya']
label = 'Look No Default Shaders'
actions = [pype.maya.action.SelectInvalidAction]
DEFAULT_SHADERS = {"lambert1", "initialShadingGroup",
"initialParticleSE", "particleCloud1"}
"initialParticleSE", "particleCloud1"}
def process(self, instance):
"""Process all the nodes in the instance"""

View file

@ -6,11 +6,11 @@ import pype.api
class ValidateLookSets(pyblish.api.InstancePlugin):
"""Validate if any sets are missing from the instance and look data
"""Validate if any sets relationships are not being collected.
A shader can be assigned to a node that is missing a Colorbleed ID.
Because it is missing the ID it has not been collected in the instance.
This validator ensures no relationships and thus considers it invalid
This validator ensures those relationships and thus considers it invalid
if a relationship was not collected.
When the relationship needs to be maintained the artist might need to
@ -25,8 +25,10 @@ class ValidateLookSets(pyblish.api.InstancePlugin):
- Displacement objectSets (like V-Ray):
It is best practice to add the transform group of the shape to the
displacement objectSet.
It is best practice to add the transform of the shape to the
displacement objectSet. Any parent groups will not work as groups
do not receive a Colorbleed Id. As such the assignments need to be
made to the shapes and their transform.
Example content:
[asset_GRP|geometry_GRP|body_GES,
@ -37,7 +39,7 @@ class ValidateLookSets(pyblish.api.InstancePlugin):
"""
order = pype.api.ValidateContentsOrder
families = ["look"]
families = ['look']
hosts = ['maya']
label = 'Look Sets'
actions = [pype.maya.action.SelectInvalidAction]
@ -70,13 +72,6 @@ class ValidateLookSets(pyblish.api.InstancePlugin):
# check if any objectSets are not present ion the relationships
missing_sets = [s for s in sets if s not in relationships]
for set in missing_sets:
if set.endswith("_SET"):
missing_sets.remove(set)
cls.log.info("Missing Sets "
"'{}'".format(missing_sets))
if missing_sets:
# A set of this node is not coming along, this is wrong!
cls.log.error("Missing sets '{}' for node "

View file

@ -13,7 +13,7 @@ class ValidateSingleShader(pyblish.api.InstancePlugin):
"""
order = pype.api.ValidateContentsOrder
families = ["look"]
families = ['look']
hosts = ['maya']
label = 'Look Single Shader Per Shape'
actions = [pype.maya.action.SelectInvalidAction]

View file

@ -36,7 +36,7 @@ class ValidateMayaUnits(pyblish.api.ContextPlugin):
assert fps and fps == asset_fps, "Scene must be %s FPS" % asset_fps
@classmethod
def repair(cls):
def repair(cls, context):
"""Fix the current FPS setting of the scene, set to PAL(25.0 fps)"""
cls.log.info("Setting angular unit to 'degrees'")

View file

@ -47,7 +47,7 @@ class ValidateMeshHasUVs(pyblish.api.InstancePlugin):
order = pype.api.ValidateMeshOrder
hosts = ['maya']
families = ["model"]
families = ['model']
category = 'geometry'
label = 'Mesh Has UVs'
actions = [pype.maya.action.SelectInvalidAction]

View file

@ -14,7 +14,7 @@ class ValidateMeshLaminaFaces(pyblish.api.InstancePlugin):
order = pype.api.ValidateMeshOrder
hosts = ['maya']
families = ["model"]
families = ['model']
category = 'geometry'
version = (0, 1, 0)
label = 'Mesh Lamina Faces'

View file

@ -19,7 +19,7 @@ class ValidateMeshNoNegativeScale(pyblish.api.Validator):
order = pype.api.ValidateMeshOrder
hosts = ['maya']
families = ["model"]
families = ['model']
label = 'Mesh No Negative Scale'
actions = [pype.maya.action.SelectInvalidAction]

View file

@ -15,7 +15,7 @@ class ValidateMeshNonManifold(pyblish.api.Validator):
order = pype.api.ValidateMeshOrder
hosts = ['maya']
families = ["model"]
families = ['model']
label = 'Mesh Non-Manifold Vertices/Edges'
actions = [pype.maya.action.SelectInvalidAction]

View file

@ -17,7 +17,7 @@ class ValidateMeshNonZeroEdgeLength(pyblish.api.InstancePlugin):
"""
order = pype.api.ValidateMeshOrder
families = ["model"]
families = ['model']
hosts = ['maya']
category = 'geometry'
version = (0, 1, 0)

View file

@ -15,7 +15,7 @@ class ValidateMeshNormalsUnlocked(pyblish.api.Validator):
order = pype.api.ValidateMeshOrder
hosts = ['maya']
families = ["model"]
families = ['model']
category = 'geometry'
version = (0, 1, 0)
label = 'Mesh Normals Unlocked'

View file

@ -24,7 +24,7 @@ def get_invalid_sets(shape):
"""
invalid = []
sets = cmds.listSets(object=shape, t=1, extendToShape=False)
sets = cmds.listSets(object=shape, t=1, extendToShape=False) or []
for s in sets:
members = cmds.sets(s, query=True, nodesOnly=True)
if not members:
@ -75,7 +75,7 @@ class ValidateMeshShaderConnections(pyblish.api.InstancePlugin):
order = pype.api.ValidateMeshOrder
hosts = ['maya']
families = ["model"]
families = ['model']
label = "Mesh Shader Connections"
actions = [pype.maya.action.SelectInvalidAction,
pype.api.RepairAction]
@ -93,7 +93,9 @@ class ValidateMeshShaderConnections(pyblish.api.InstancePlugin):
def get_invalid(instance):
shapes = cmds.ls(instance[:], dag=1, leaf=1, shapes=1, long=True)
shapes = cmds.ls(shapes, shapes=True, noIntermediate=True, long=True)
# todo: allow to check anything that can have a shader
shapes = cmds.ls(shapes, noIntermediate=True, long=True, type="mesh")
invalid = []
for shape in shapes:

View file

@ -17,7 +17,7 @@ class ValidateMeshSingleUVSet(pyblish.api.InstancePlugin):
order = pype.api.ValidateMeshOrder
hosts = ['maya']
families = ["model", 'studio.pointcache']
families = ['model', 'pointcache']
category = 'uv'
optional = True
version = (0, 1, 0)

View file

@ -17,7 +17,7 @@ class ValidateMeshUVSetMap1(pyblish.api.InstancePlugin):
order = pype.api.ValidateMeshOrder
hosts = ['maya']
families = ["model"]
families = ['model']
optional = True
label = "Mesh has map1 UV Set"
actions = [pype.maya.action.SelectInvalidAction,

View file

@ -59,7 +59,7 @@ class ValidateMeshVerticesHaveEdges(pyblish.api.InstancePlugin):
order = pype.api.ValidateMeshOrder
hosts = ['maya']
families = ["model"]
families = ['model']
category = 'geometry'
label = 'Mesh Vertices Have Edges'
actions = [pype.maya.action.SelectInvalidAction,

View file

@ -63,7 +63,8 @@ class ValidateModelContent(pyblish.api.InstancePlugin):
cls.log.error("Must have exactly one top group")
if len(assemblies) == 0:
cls.log.warning("No top group found. "
"(Are there objects in the instance?)")
"(Are there objects in the instance?"
" Or is it parented in another group?)")
return assemblies or True
def _is_visible(node):

View file

@ -15,7 +15,7 @@ class ValidateNoDefaultCameras(pyblish.api.InstancePlugin):
order = pype.api.ValidateContentsOrder
hosts = ['maya']
families = ["camera"]
families = ['camera']
version = (0, 1, 0)
label = "No Default Cameras"
actions = [pype.maya.action.SelectInvalidAction]

View file

@ -18,7 +18,7 @@ class ValidateNoNamespace(pyblish.api.InstancePlugin):
order = pype.api.ValidateContentsOrder
hosts = ['maya']
families = ["model"]
families = ['model']
category = 'cleanup'
version = (0, 1, 0)
label = 'No Namespaces'

View file

@ -39,7 +39,7 @@ class ValidateNoNullTransforms(pyblish.api.InstancePlugin):
order = pype.api.ValidateContentsOrder
hosts = ['maya']
families = ["model"]
families = ['model']
category = 'cleanup'
version = (0, 1, 0)
label = 'No Empty/Null Transforms'

View file

@ -18,7 +18,7 @@ class ValidateNoUnknownNodes(pyblish.api.InstancePlugin):
order = pype.api.ValidateContentsOrder
hosts = ['maya']
families = ["model", 'studio.rig']
families = ['model', 'rig']
optional = True
label = "Unknown Nodes"
actions = [pype.maya.action.SelectInvalidAction]

View file

@ -17,7 +17,7 @@ class ValidateNodeIdsDeformedShape(pyblish.api.InstancePlugin):
"""
order = pype.api.ValidateContentsOrder
families = ["look"]
families = ['look']
hosts = ['maya']
label = 'Deformed shape ids'
actions = [pype.maya.action.SelectInvalidAction, pype.api.RepairAction]

View file

@ -23,7 +23,8 @@ class ValidateNodeIdsInDatabase(pyblish.api.InstancePlugin):
hosts = ['maya']
families = ["*"]
actions = [pype.maya.action.SelectInvalidAction]
actions = [pype.maya.action.SelectInvalidAction,
pype.maya.action.GenerateUUIDsOnInvalidAction]
def process(self, instance):
invalid = self.get_invalid(instance)

Some files were not shown because too many files have changed in this diff Show more