diff --git a/pype/action.py b/pype/action.py index ea1f5e13f4..3fc6dd1a8f 100644 --- a/pype/action.py +++ b/pype/action.py @@ -87,6 +87,4 @@ class RepairContextAction(pyblish.api.Action): # Apply pyblish.logic to get the instances for the plug-in if plugin in errored_plugins: self.log.info("Attempting fix ...") - plugin.repair() - - + plugin.repair(context) diff --git a/pype/houdini/__init__.py b/pype/houdini/__init__.py index 02309bde7f..f432a4a5e5 100644 --- a/pype/houdini/__init__.py +++ b/pype/houdini/__init__.py @@ -35,10 +35,11 @@ def install(): log.info("Installing callbacks ... ") avalon.on("init", on_init) + avalon.before("save", before_save) avalon.on("save", on_save) avalon.on("open", on_open) - log.info("Overriding existing event 'taskChanged'") + pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled) log.info("Setting default family states for loader..") avalon.data["familiesStateToggled"] = ["imagesequence"] @@ -48,6 +49,10 @@ def on_init(*args): houdini.on_houdini_initialize() +def before_save(*args): + return lib.validate_fps() + + def on_save(*args): avalon.logger.info("Running callback on save..") @@ -72,7 +77,6 @@ def on_open(*args): # Get main window parent = hou.ui.mainQtWindow() - if parent is None: log.info("Skipping outdated content pop-up " "because Maya window can't be found.") @@ -89,3 +93,20 @@ def on_open(*args): "your Maya scene.") dialog.on_show.connect(_on_show_inventory) dialog.show() + + +def on_pyblish_instance_toggled(instance, new_value, old_value): + """Toggle saver tool passthrough states on instance toggles.""" + + nodes = instance[:] + if not nodes: + return + + # Assume instance node is first node + instance_node = nodes[0] + + if instance_node.isBypassed() != (not old_value): + print("%s old bypass state didn't match old instance state, " + "updating anyway.." % instance_node.path()) + + instance_node.bypass(not new_value) diff --git a/pype/houdini/lib.py b/pype/houdini/lib.py index 78adbc5790..e1e95912ee 100644 --- a/pype/houdini/lib.py +++ b/pype/houdini/lib.py @@ -4,15 +4,17 @@ from contextlib import contextmanager import hou +from pype import lib + from avalon import api, io -from avalon.houdini import lib +from avalon.houdini import lib as houdini def set_id(node, unique_id, overwrite=False): exists = node.parm("id") if not exists: - lib.imprint(node, {"id": unique_id}) + houdini.imprint(node, {"id": unique_id}) if not exists and overwrite: node.setParm("id", unique_id) @@ -188,3 +190,45 @@ def attribute_values(node, data): pass finally: node.setParms(previous_attrs) + + +def set_scene_fps(fps): + hou.setFps(fps) + + +# Valid FPS +def validate_fps(): + """Validate current scene FPS and show pop-up when it is incorrect + + Returns: + bool + + """ + + fps = lib.get_asset_fps() + current_fps = hou.fps() # returns float + + if current_fps != fps: + + from ..widgets import popup + + # Find main window + parent = hou.ui.mainQtWindow() + if parent is None: + pass + else: + dialog = popup.Popup2(parent=parent) + dialog.setModal(True) + dialog.setWindowTitle("Maya scene not in line with project") + dialog.setMessage("The FPS is out of sync, please fix") + + # Set new text for button (add optional argument for the popup?) + toggle = dialog.widgets["toggle"] + toggle.setEnabled(False) + dialog.on_show.connect(lambda: set_scene_fps(fps)) + + dialog.show() + + return False + + return True diff --git a/pype/maya/__init__.py b/pype/maya/__init__.py index 288cdc8150..68af530bcd 100644 --- a/pype/maya/__init__.py +++ b/pype/maya/__init__.py @@ -99,12 +99,23 @@ def on_init(_): except Exception as exc: print(exc) + # Force load Alembic so referenced alembics + # work correctly on scene open cmds.loadPlugin("AbcImport", quiet=True) cmds.loadPlugin("AbcExport", quiet=True) - from .customize import override_component_mask_commands + # Force load objExport plug-in (requested by artists) + cmds.loadPlugin("objExport", quiet=True) + + from .customize import ( + override_component_mask_commands, + override_toolbox_ui + ) safe_deferred(override_component_mask_commands) + if not IS_HEADLESS: + safe_deferred(override_toolbox_ui) + def on_before_save(return_code, _): """Run validation for scene's FPS prior to saving""" @@ -120,8 +131,8 @@ def on_save(_): avalon.logger.info("Running callback on save..") - # # Update current task for the current scene - # update_task_from_path(cmds.file(query=True, sceneName=True)) + # Update current task for the current scene + update_task_from_path(cmds.file(query=True, sceneName=True)) # Generate ids of the current context on nodes in the scene nodes = lib.get_id_required_nodes(referenced_nodes=False) diff --git a/pype/maya/customize.py b/pype/maya/customize.py index 64f33d5aae..46c9ceb652 100644 --- a/pype/maya/customize.py +++ b/pype/maya/customize.py @@ -3,6 +3,7 @@ import maya.cmds as mc import maya.mel as mel from functools import partial +import os import logging @@ -17,7 +18,7 @@ def override_component_mask_commands(): This implements special behavior for Maya's component mask menu items where a ctrl+click will instantly make it an isolated behavior disabling all others. - + Tested in Maya 2016 and 2018 """ @@ -64,3 +65,93 @@ def override_component_mask_commands(): original = COMPONENT_MASK_ORIGINAL[btn] new_fn = partial(on_changed_callback, original) mc.iconTextCheckBox(btn, edit=True, cc=new_fn) + + +def override_toolbox_ui(): + """Add custom buttons in Toolbox as replacement for Maya web help icon.""" + + import pype + res = os.path.join(os.path.dirname(os.path.dirname(pype.__file__)), + "res") + icons = os.path.join(res, "icons") + + import avalon.tools.cbsceneinventory as inventory + import avalon.tools.cbloader as loader + from avalon.maya.pipeline import launch_workfiles_app + + # Ensure the maya web icon on toolbox exists + web_button = "ToolBox|MainToolboxLayout|mayaWebButton" + if not mc.iconTextButton(web_button, query=True, exists=True): + return + + mc.iconTextButton(web_button, edit=True, visible=False) + + # real = 32, but 36 with padding - according to toolbox mel script + icon_size = 36 + parent = web_button.rsplit("|", 1)[0] + + # Ensure the parent is a formLayout + if not mc.objectTypeUI(parent) == "formLayout": + return + + # Create our controls + background_color = (0.267, 0.267, 0.267) + controls = [] + + control = mc.iconTextButton( + "pype_toolbox_workfiles", + annotation="Work Files", + label="Work Files", + image=os.path.join(icons, "workfiles.png"), + command=lambda: launch_workfiles_app(), + bgc=background_color, + width=icon_size, + height=icon_size, + parent=parent) + controls.append(control) + + control = mc.iconTextButton( + "pype_toolbox_loader", + annotation="Loader", + label="Loader", + image=os.path.join(icons, "loader.png"), + command=lambda: loader.show(use_context=True), + bgc=background_color, + width=icon_size, + height=icon_size, + parent=parent) + controls.append(control) + + control = mc.iconTextButton( + "pype_toolbox_manager", + annotation="Inventory", + label="Inventory", + image=os.path.join(icons, "inventory.png"), + command=lambda: inventory.show(), + bgc=background_color, + width=icon_size, + height=icon_size, + parent=parent) + controls.append(control) + + control = mc.iconTextButton( + "pype_toolbox", + annotation="Colorbleed", + label="Colorbleed", + image=os.path.join(icons, "pype_logo_36x36.png"), + bgc=background_color, + width=icon_size, + height=icon_size, + parent=parent) + controls.append(control) + + # Add the buttons on the bottom and stack + # them above each other with side padding + controls.reverse() + for i, control in enumerate(controls): + previous = controls[i - 1] if i > 0 else web_button + + mc.formLayout(parent, edit=True, + attachControl=[control, "bottom", 0, previous], + attachForm=([control, "left", 1], + [control, "right", 1])) diff --git a/pype/maya/lib.py b/pype/maya/lib.py index 8957fd3de2..13aae92ec9 100644 --- a/pype/maya/lib.py +++ b/pype/maya/lib.py @@ -521,12 +521,15 @@ def no_undo(flush=False): cmds.undoInfo(**{keyword: original}) -def get_shader_assignments_from_shapes(shapes): +def get_shader_assignments_from_shapes(shapes, components=True): """Return the shape assignment per related shading engines. Returns a dictionary where the keys are shadingGroups and the values are lists of assigned shapes or shape-components. + Since `maya.cmds.sets` returns shader members on the shapes as components + on the transform we correct that in this method too. + For the 'shapes' this will return a dictionary like: { "shadingEngineX": ["nodeX", "nodeY"], @@ -535,6 +538,7 @@ def get_shader_assignments_from_shapes(shapes): Args: shapes (list): The shapes to collect the assignments for. + components (bool): Whether to include the component assignments. Returns: dict: The {shadingEngine: shapes} relationships @@ -543,7 +547,6 @@ def get_shader_assignments_from_shapes(shapes): shapes = cmds.ls(shapes, long=True, - selection=True, shapes=True, objectsOnly=True) if not shapes: @@ -562,7 +565,37 @@ def get_shader_assignments_from_shapes(shapes): type="shadingEngine") or [] shading_groups = list(set(shading_groups)) for shading_group in shading_groups: - assignments[shading_group].add(shape) + assignments[shading_group].append(shape) + + if components: + # Note: Components returned from maya.cmds.sets are "listed" as if + # being assigned to the transform like: pCube1.f[0] as opposed + # to pCubeShape1.f[0] so we correct that here too. + + # Build a mapping from parent to shapes to include in lookup. + transforms = {shape.rsplit("|", 1)[0]: shape for shape in shapes} + lookup = set(shapes + transforms.keys()) + + component_assignments = defaultdict(list) + for shading_group in assignments.keys(): + members = cmds.ls(cmds.sets(shading_group, query=True), long=True) + for member in members: + + node = member.split(".", 1)[0] + if node not in lookup: + continue + + # Component + if "." in member: + + # Fix transform to shape as shaders are assigned to shapes + if node in transforms: + shape = transforms[node] + component = member.split(".", 1)[1] + member = "{0}.{1}".format(shape, component) + + component_assignments[shading_group].append(member) + assignments = component_assignments return dict(assignments) @@ -571,7 +604,7 @@ def get_shader_assignments_from_shapes(shapes): def shader(nodes, shadingEngine="initialShadingGroup"): """Assign a shader to nodes during the context""" - shapes = cmds.ls(nodes, dag=1, o=1, shapes=1, long=1) + shapes = cmds.ls(nodes, dag=1, objectsOnly=1, shapes=1, long=1) original = get_shader_assignments_from_shapes(shapes) try: @@ -584,7 +617,7 @@ def shader(nodes, shadingEngine="initialShadingGroup"): # Assign original shaders for sg, members in original.items(): if members: - cmds.sets(shapes, edit=True, forceElement=shadingEngine) + cmds.sets(members, edit=True, forceElement=sg) @contextlib.contextmanager @@ -929,6 +962,18 @@ def extract_alembic(file, raise TypeError("Alembic option unsupported type: " "{0} (expected {1})".format(value, valid_types)) + # Ignore empty values, like an empty string, since they mess up how + # job arguments are built + if isinstance(value, (list, tuple)): + value = [x for x in value if x.strip()] + + # Ignore option completely if no values remaining + if not value: + options.pop(key) + continue + + options[key] = value + # The `writeCreases` argument was changed to `autoSubd` in Maya 2018+ maya_version = int(cmds.about(version=True)) if maya_version >= 2018: @@ -995,9 +1040,14 @@ def get_id_required_nodes(referenced_nodes=False, nodes=None): nodes (set): list of filtered nodes """ + lookup = None if nodes is None: # Consider all nodes nodes = cmds.ls() + else: + # Build a lookup for the only allowed nodes in output based + # on `nodes` input of the function (+ ensure long names) + lookup = set(cmds.ls(nodes, long=True)) def _node_type_exists(node_type): try: @@ -1006,8 +1056,8 @@ def get_id_required_nodes(referenced_nodes=False, nodes=None): except RuntimeError: return False - # `readOnly` flag is obsolete as of Maya 2016 therefor we explicitly remove - # default nodes and reference nodes + # `readOnly` flag is obsolete as of Maya 2016 therefore we explicitly + # remove default nodes and reference nodes camera_shapes = ["frontShape", "sideShape", "topShape", "perspShape"] ignore = set() @@ -1031,8 +1081,7 @@ def get_id_required_nodes(referenced_nodes=False, nodes=None): if cmds.pluginInfo("pgYetiMaya", query=True, loaded=True): types.append("pgYetiMaya") - # We *always* ignore intermediate shapes, so we filter them out - # directly + # We *always* ignore intermediate shapes, so we filter them out directly nodes = cmds.ls(nodes, type=types, long=True, noIntermediate=True) # The items which need to pass the id to their parent @@ -1049,6 +1098,12 @@ def get_id_required_nodes(referenced_nodes=False, nodes=None): if not nodes: return nodes + # Ensure only nodes from the input `nodes` are returned when a + # filter was applied on function call because we also iterated + # to parents and alike + if lookup is not None: + nodes &= lookup + # Avoid locked nodes nodes_list = list(nodes) locked = cmds.lockNode(nodes_list, query=True, lock=True) @@ -2051,7 +2106,6 @@ def bake_to_world_space(nodes, return world_space_nodes - def load_capture_preset(path): import capture_gui import capture @@ -2150,3 +2204,89 @@ def load_capture_preset(path): # options['display_options'] = temp_options return options + +def get_attr_in_layer(attr, layer): + """Return attribute value in specified renderlayer. + + Same as cmds.getAttr but this gets the attribute's value in a + given render layer without having to switch to it. + + Warning for parent attribute overrides: + Attributes that have render layer overrides to their parent attribute + are not captured correctly since they do not have a direct connection. + For example, an override to sphere.rotate when querying sphere.rotateX + will not return correctly! + + Note: This is much faster for Maya's renderLayer system, yet the code + does no optimized query for render setup. + + Args: + attr (str): attribute name, ex. "node.attribute" + layer (str): layer name + + Returns: + The return value from `maya.cmds.getAttr` + + """ + + if cmds.mayaHasRenderSetup(): + log.debug("lib.get_attr_in_layer is not optimized for render setup") + with renderlayer(layer): + return cmds.getAttr(attr) + + # Ignore complex query if we're in the layer anyway + current_layer = cmds.editRenderLayerGlobals(query=True, + currentRenderLayer=True) + if layer == current_layer: + return cmds.getAttr(attr) + + connections = cmds.listConnections(attr, + plugs=True, + source=False, + destination=True, + type="renderLayer") or [] + connections = filter(lambda x: x.endswith(".plug"), connections) + if not connections: + return cmds.getAttr(attr) + + # Some value types perform a conversion when assigning + # TODO: See if there's a maya method to allow this conversion + # instead of computing it ourselves. + attr_type = cmds.getAttr(attr, type=True) + conversion = None + if attr_type == "time": + conversion = mel.eval('currentTimeUnitToFPS()') # returns float + elif attr_type == "doubleAngle": + # Radians to Degrees: 180 / pi + # TODO: This will likely only be correct when Maya units are set + # to degrees + conversion = 57.2957795131 + elif attr_type == "doubleLinear": + raise NotImplementedError("doubleLinear conversion not implemented.") + + for connection in connections: + if connection.startswith(layer + "."): + attr_split = connection.split(".") + if attr_split[0] == layer: + attr = ".".join(attr_split[0:-1]) + value = cmds.getAttr("%s.value" % attr) + if conversion: + value *= conversion + return value + + else: + # When connections are present, but none + # to the specific renderlayer than the layer + # should have the "defaultRenderLayer"'s value + layer = "defaultRenderLayer" + for connection in connections: + if connection.startswith(layer): + attr_split = connection.split(".") + if attr_split[0] == "defaultRenderLayer": + attr = ".".join(attr_split[0:-1]) + value = cmds.getAttr("%s.value" % attr) + if conversion: + value *= conversion + return value + + return cmds.getAttr(attr) diff --git a/pype/plugin.py b/pype/plugin.py index 0ba1fe5ded..f88cd6e34b 100644 --- a/pype/plugin.py +++ b/pype/plugin.py @@ -32,3 +32,37 @@ class Extractor(pyblish.api.InstancePlugin): instance.data['stagingDir'] = staging_dir return staging_dir + + +def contextplugin_should_run(plugin, context): + """Return whether the ContextPlugin should run on the given context. + + This is a helper function to work around a bug pyblish-base#250 + Whenever a ContextPlugin sets specific families it will still trigger even + when no instances are present that have those families. + + This actually checks it correctly and returns whether it should run. + + """ + required = set(plugin.families) + + # When no filter always run + if "*" in required: + return True + + for instance in context: + + # Ignore inactive instances + if (not instance.data.get("publish", True) or + not instance.data.get("active", True)): + continue + + families = instance.data.get("families", []) + if any(f in required for f in families): + return True + + family = instance.data.get("family") + if family and family in required: + return True + + return False diff --git a/pype/plugins/fusion/inventory/set_tool_color.py b/pype/plugins/fusion/inventory/set_tool_color.py index f2357c2b3e..940a0e9941 100644 --- a/pype/plugins/fusion/inventory/set_tool_color.py +++ b/pype/plugins/fusion/inventory/set_tool_color.py @@ -1,10 +1,10 @@ from avalon import api, style from avalon.vendor.Qt import QtGui, QtWidgets -import avalon.nuke +import avalon.fusion -class NukeSetToolColor(api.InventoryAction): +class FusionSetToolColor(api.InventoryAction): """Update the color of the selected tools""" label = "Set Tool Color" @@ -16,20 +16,15 @@ class NukeSetToolColor(api.InventoryAction): """Color all selected tools the selected colors""" result = [] + comp = avalon.fusion.get_current_comp() # Get tool color first = containers[0] - node = first["_tool"] - color = node["tile_color"].value() - hex = '%08x' % color - rgba = [ - float(int(hex[0:2], 16)) / 255.0, - float(int(hex[2:4], 16)) / 255.0, - float(int(hex[4:6], 16)) / 255.0 - ] + tool = first["_tool"] + color = tool.TileColor if color is not None: - qcolor = QtGui.QColor().fromRgbF(rgba[0], rgba[1], rgba[2]) + qcolor = QtGui.QColor().fromRgbF(color["R"], color["G"], color["B"]) else: qcolor = self._fallback_color @@ -38,21 +33,15 @@ class NukeSetToolColor(api.InventoryAction): if not picked_color: return - with avalon.nuke.viewer_update_and_undo_stop(): + with avalon.fusion.comp_lock_and_undo_chunk(comp): for container in containers: # Convert color to RGB 0-1 floats rgb_f = picked_color.getRgbF() - hexColour = int( - '%02x%02x%02x%02x' % ( - rgb_f[0]*255, - rgb_f[1]*255, - rgb_f[2]*255, - 1), - 16 - ) + rgb_f_table = {"R": rgb_f[0], "G": rgb_f[1], "B": rgb_f[2]} + # Update tool - node = container["_tool"] - node['tile_color'].value(hexColour) + tool = container["_tool"] + tool.TileColor = rgb_f_table result.append(container) diff --git a/pype/plugins/fusion/publish/collect_render_target.py b/pype/plugins/fusion/publish/collect_render_target.py index df0a3bbe75..b6217f1ddf 100644 --- a/pype/plugins/fusion/publish/collect_render_target.py +++ b/pype/plugins/fusion/publish/collect_render_target.py @@ -13,7 +13,7 @@ class CollectFusionRenderMode(pyblish.api.InstancePlugin): available tool does not visualize which render mode is set for the current comp, please run the following line in the console (Py2) - comp.GetData("rendermode") + comp.GetData("pype.rendermode") This will return the name of the current render mode as seen above under Options. @@ -34,7 +34,7 @@ class CollectFusionRenderMode(pyblish.api.InstancePlugin): raise RuntimeError("No comp previously collected, unable to " "retrieve Fusion version.") - rendermode = comp.GetData("rendermode") or "renderlocal" + rendermode = comp.GetData("pype.rendermode") or "renderlocal" assert rendermode in options, "Must be supported render mode" self.log.info("Render mode: {0}".format(rendermode)) diff --git a/pype/plugins/fusion/publish/publish_image_sequences.py b/pype/plugins/fusion/publish/publish_image_sequences.py index e8b468e94a..26ae74676f 100644 --- a/pype/plugins/fusion/publish/publish_image_sequences.py +++ b/pype/plugins/fusion/publish/publish_image_sequences.py @@ -14,7 +14,7 @@ def _get_script(): # todo: use a more elegant way to get the python script try: - from pype.fusion.scripts import publish_filesequence + from pype.scripts import publish_filesequence except Exception: raise RuntimeError("Expected module 'publish_imagesequence'" "to be available") diff --git a/pype/plugins/global/publish/collect_deadline_user.py b/pype/plugins/global/publish/collect_deadline_user.py new file mode 100644 index 0000000000..624e455251 --- /dev/null +++ b/pype/plugins/global/publish/collect_deadline_user.py @@ -0,0 +1,57 @@ +import os +import subprocess + +import pyblish.api +from pype.plugin import contextplugin_should_run + +CREATE_NO_WINDOW = 0x08000000 + + +def deadline_command(cmd): + # Find Deadline + path = os.environ.get("DEADLINE_PATH", None) + assert path is not None, "Variable 'DEADLINE_PATH' must be set" + + executable = os.path.join(path, "deadlinecommand") + if os.name == "nt": + executable += ".exe" + assert os.path.exists( + executable), "Deadline executable not found at %s" % executable + assert cmd, "Must have a command" + + query = (executable, cmd) + + process = subprocess.Popen(query, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + creationflags=CREATE_NO_WINDOW) + out, err = process.communicate() + + return out + + +class CollectDeadlineUser(pyblish.api.ContextPlugin): + """Retrieve the local active Deadline user""" + + order = pyblish.api.CollectorOrder + 0.499 + label = "Deadline User" + hosts = ['maya', 'fusion'] + families = ["renderlayer", "saver.deadline"] + + def process(self, context): + """Inject the current working file""" + + # Workaround bug pyblish-base#250 + if not contextplugin_should_run(self, context): + return + + user = deadline_command("GetCurrentUserName").strip() + + if not user: + self.log.warning("No Deadline user found. " + "Do you have Deadline installed?") + return + + self.log.info("Found Deadline user: {}".format(user)) + context.data['deadlineUser'] = user + diff --git a/pype/plugins/global/publish/collect_machine_name.py b/pype/plugins/global/publish/collect_machine_name.py new file mode 100644 index 0000000000..02360cff04 --- /dev/null +++ b/pype/plugins/global/publish/collect_machine_name.py @@ -0,0 +1,14 @@ +import pyblish.api + + +class CollectMachineName(pyblish.api.ContextPlugin): + label = "Local Machine Name" + order = pyblish.api.CollectorOrder + hosts = ["*"] + + def process(self, context): + import socket + + machine_name = socket.gethostname() + self.log.info("Machine name: %s" % machine_name) + context.data["machine"] = machine_name diff --git a/pype/plugins/global/publish/integrate.py b/pype/plugins/global/publish/integrate.py index 781f388032..f7c14d990c 100644 --- a/pype/plugins/global/publish/integrate.py +++ b/pype/plugins/global/publish/integrate.py @@ -303,7 +303,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin): self.log.info("Subset '%s' not found, creating.." % subset_name) _id = io.insert_one({ - "schema": "pype:subset-2.0", + "schema": "avalon-core:subset-2.0", "type": "subset", "name": subset_name, "data": {}, @@ -329,7 +329,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin): version_locations = [location for location in locations if location is not None] - return {"schema": "pype:version-2.0", + return {"schema": "avalon-core:version-2.0", "type": "version", "parent": subset["_id"], "name": version_number, @@ -370,7 +370,9 @@ class IntegrateAsset(pyblish.api.InstancePlugin): "time": context.data["time"], "author": context.data["user"], "source": source, - "comment": context.data.get("comment")} + "comment": context.data.get("comment"), + "machine": context.data.get("machine"), + "fps": context.data.get("fps")} # Include optional data if present in optionals = ["startFrame", "endFrame", "step", "handles"] diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index c7ec295ae5..e4a674dc65 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -1,5 +1,6 @@ import os import json +import pprint import re from avalon import api, io @@ -11,7 +12,7 @@ import pyblish.api def _get_script(): """Get path to the image sequence script""" try: - from pype.fusion.scripts import publish_filesequence + from pype.scripts import publish_filesequence except Exception as e: raise RuntimeError("Expected module 'publish_imagesequence'" "to be available") @@ -156,15 +157,18 @@ class SubmitDependentImageSequenceJobDeadline(pyblish.api.InstancePlugin): subset=subset ) - # Add in start/end frame + # Get start/end frame from instance, if not available get from context context = instance.context - start = instance.data.get("startFrame", context.data["startFrame"]) - end = instance.data.get("endFrame", context.data["endFrame"]) - resources = [] + start = instance.data.get("startFrame") + if start is None: + start = context.data["startFrame"] + end = instance.data.get("endFrame") + if end is None: + end = context.data["endFrame"] # Add in regex for sequence filename # This assumes the output files start with subset name and ends with - # a file extension. + # a file extension. The "ext" key includes the dot with the extension. if "ext" in instance.data: ext = re.escape(instance.data["ext"]) else: diff --git a/pype/plugins/houdini/create/create_alembic_camera.py b/pype/plugins/houdini/create/create_alembic_camera.py index fbd8ca8fe8..cf8ac41b62 100644 --- a/pype/plugins/houdini/create/create_alembic_camera.py +++ b/pype/plugins/houdini/create/create_alembic_camera.py @@ -1,9 +1,8 @@ -from collections import OrderedDict - from avalon import houdini class CreateAlembicCamera(houdini.Creator): + """Single baked camera from Alembic ROP""" name = "camera" label = "Camera (Abc)" @@ -22,13 +21,25 @@ class CreateAlembicCamera(houdini.Creator): def process(self): instance = super(CreateAlembicCamera, self).process() - parms = {"use_sop_path": True, - "build_from_path": True, - "path_attrib": "path", - "filename": "$HIP/pyblish/%s.abc" % self.name} + parms = { + "filename": "$HIP/pyblish/%s.abc" % self.name, + "use_sop_path": False + } if self.nodes: node = self.nodes[0] - parms.update({"sop_path": node.path()}) + path = node.path() + + # Split the node path into the first root and the remainder + # So we can set the root and objects parameters correctly + _, root, remainder = path.split("/", 2) + parms.update({ + "root": "/" + root, + "objects": remainder + }) instance.setParms(parms) + + # Lock the Use Sop Path setting so the + # user doesn't accidentally enable it. + instance.parm("use_sop_path").lock(True) diff --git a/pype/plugins/houdini/create/create_pointcache.py b/pype/plugins/houdini/create/create_pointcache.py index fd470b3235..ae7e845083 100644 --- a/pype/plugins/houdini/create/create_pointcache.py +++ b/pype/plugins/houdini/create/create_pointcache.py @@ -2,7 +2,7 @@ from avalon import houdini class CreatePointCache(houdini.Creator): - """Alembic pointcache for animated data""" + """Alembic ROP to pointcache""" name = "pointcache" label = "Point Cache" @@ -22,7 +22,7 @@ class CreatePointCache(houdini.Creator): parms = {"use_sop_path": True, # Export single node from SOP Path "build_from_path": True, # Direct path of primitive in output - "path_attrib": "path", # Pass path attribute for output\ + "path_attrib": "path", # Pass path attribute for output "prim_to_detail_pattern": "cbId", "format": 2, # Set format to Ogawa "filename": "$HIP/pyblish/%s.abc" % self.name} diff --git a/pype/plugins/houdini/create/create_vbd_cache.py b/pype/plugins/houdini/create/create_vbd_cache.py index 2b82a52cf4..e862d5c96d 100644 --- a/pype/plugins/houdini/create/create_vbd_cache.py +++ b/pype/plugins/houdini/create/create_vbd_cache.py @@ -2,7 +2,7 @@ from avalon import houdini class CreateVDBCache(houdini.Creator): - """Alembic pointcache for animated data""" + """OpenVDB from Geometry ROP""" name = "vbdcache" label = "VDB Cache" @@ -15,10 +15,8 @@ class CreateVDBCache(houdini.Creator): # Remove the active, we are checking the bypass flag of the nodes self.data.pop("active", None) - self.data.update({ - "node_type": "geometry", # Set node type to create for output - "executeBackground": True # Render node in background - }) + # Set node type to create for output + self.data["node_type"] = "geometry" def process(self): instance = super(CreateVDBCache, self).process() @@ -28,6 +26,6 @@ class CreateVDBCache(houdini.Creator): if self.nodes: node = self.nodes[0] - parms.update({"sop_path": node.path()}) + parms.update({"soppath": node.path()}) instance.setParms(parms) diff --git a/pype/plugins/houdini/publish/collect_current_file.py b/pype/plugins/houdini/publish/collect_current_file.py index 7852943b34..b35a943833 100644 --- a/pype/plugins/houdini/publish/collect_current_file.py +++ b/pype/plugins/houdini/publish/collect_current_file.py @@ -1,3 +1,4 @@ +import os import hou import pyblish.api @@ -12,4 +13,22 @@ class CollectHoudiniCurrentFile(pyblish.api.ContextPlugin): def process(self, context): """Inject the current working file""" - context.data['currentFile'] = hou.hipFile.path() + + filepath = hou.hipFile.path() + if not os.path.exists(filepath): + # By default Houdini will even point a new scene to a path. + # However if the file is not saved at all and does not exist, + # we assume the user never set it. + filepath = "" + + elif os.path.basename(filepath) == "untitled.hip": + # Due to even a new file being called 'untitled.hip' we are unable + # to confirm the current scene was ever saved because the file + # could have existed already. We will allow it if the file exists, + # but show a warning for this edge case to clarify the potential + # false positive. + self.log.warning("Current file is 'untitled.hip' and we are " + "unable to detect whether the current scene is " + "saved correctly.") + + context.data['currentFile'] = filepath diff --git a/pype/plugins/houdini/publish/collect_instances.py b/pype/plugins/houdini/publish/collect_instances.py index 61d4cdbe0b..5f9fc7d6c9 100644 --- a/pype/plugins/houdini/publish/collect_instances.py +++ b/pype/plugins/houdini/publish/collect_instances.py @@ -15,8 +15,8 @@ class CollectInstances(pyblish.api.ContextPlugin): id (str): "pyblish.avalon.instance Specific node: - The specific node is important because it dictates in which way the subset - is being exported. + The specific node is important because it dictates in which way the + subset is being exported. alembic: will export Alembic file which supports cascading attributes like 'cbId' and 'path' @@ -30,8 +30,6 @@ class CollectInstances(pyblish.api.ContextPlugin): def process(self, context): - instances = [] - nodes = hou.node("/out").children() for node in nodes: @@ -55,11 +53,9 @@ class CollectInstances(pyblish.api.ContextPlugin): data.update(self.get_frame_data(node)) - # Create nice name - # All nodes in the Outputs graph have the 'Valid Frame Range' - # attribute, we check here if any frames are set + # Create nice name if the instance has a frame range. label = data.get("name", node.name()) - if "startFrame" in data: + if "startFrame" in data and "endFrame" in data: frames = "[{startFrame} - {endFrame}]".format(**data) label = "{} {}".format(label, frames) @@ -68,8 +64,6 @@ class CollectInstances(pyblish.api.ContextPlugin): instance[:] = [node] instance.data.update(data) - instances.append(instance) - def sort_by_family(instance): """Sort by family""" return instance.data.get("families", instance.data.get("family")) diff --git a/pype/plugins/houdini/publish/collect_output_node.py b/pype/plugins/houdini/publish/collect_output_node.py index dbfe8a5890..c0587d5336 100644 --- a/pype/plugins/houdini/publish/collect_output_node.py +++ b/pype/plugins/houdini/publish/collect_output_node.py @@ -1,13 +1,14 @@ import pyblish.api -class CollectOutputNode(pyblish.api.InstancePlugin): - """Collect the out node which of the instance""" +class CollectOutputSOPPath(pyblish.api.InstancePlugin): + """Collect the out node's SOP Path value.""" order = pyblish.api.CollectorOrder - families = ["*"] + families = ["pointcache", + "vdbcache"] hosts = ["houdini"] - label = "Collect Output Node" + label = "Collect Output SOP Path" def process(self, instance): diff --git a/pype/plugins/houdini/publish/collect_workscene_fps.py b/pype/plugins/houdini/publish/collect_workscene_fps.py new file mode 100644 index 0000000000..c145eea519 --- /dev/null +++ b/pype/plugins/houdini/publish/collect_workscene_fps.py @@ -0,0 +1,15 @@ +import pyblish.api +import hou + + +class CollectWorksceneFPS(pyblish.api.ContextPlugin): + """Get the FPS of the work scene""" + + label = "Workscene FPS" + order = pyblish.api.CollectorOrder + hosts = ["houdini"] + + def process(self, context): + fps = hou.fps() + self.log.info("Workscene FPS: %s" % fps) + context.data.update({"fps": fps}) diff --git a/pype/plugins/houdini/publish/extract_alembic.py b/pype/plugins/houdini/publish/extract_alembic.py index cb5219f9f1..f40f3d2d0e 100644 --- a/pype/plugins/houdini/publish/extract_alembic.py +++ b/pype/plugins/houdini/publish/extract_alembic.py @@ -13,6 +13,8 @@ class ExtractAlembic(pype.api.Extractor): def process(self, instance): + import hou + ropnode = instance[0] # Get the filename from the filename parameter @@ -23,8 +25,17 @@ class ExtractAlembic(pype.api.Extractor): file_name = os.path.basename(output) # We run the render - self.log.info("Writing alembic '%s' to '%s'" % (file_name, staging_dir)) - ropnode.render() + self.log.info("Writing alembic '%s' to '%s'" % (file_name, + staging_dir)) + try: + ropnode.render() + except hou.Error as exc: + # The hou.Error is not inherited from a Python Exception class, + # so we explicitly capture the houdini error, otherwise pyblish + # will remain hanging. + import traceback + traceback.print_exc() + raise RuntimeError("Render failed: {0}".format(exc)) if "files" not in instance.data: instance.data["files"] = [] diff --git a/pype/plugins/houdini/publish/extract_vdb_cache.py b/pype/plugins/houdini/publish/extract_vdb_cache.py index 34c09c7e81..cfd9104744 100644 --- a/pype/plugins/houdini/publish/extract_vdb_cache.py +++ b/pype/plugins/houdini/publish/extract_vdb_cache.py @@ -13,6 +13,8 @@ class ExtractVDBCache(pype.api.Extractor): def process(self, instance): + import hou + ropnode = instance[0] # Get the filename from the filename parameter @@ -20,13 +22,18 @@ class ExtractVDBCache(pype.api.Extractor): sop_output = ropnode.evalParm("sopoutput") staging_dir = os.path.normpath(os.path.dirname(sop_output)) instance.data["stagingDir"] = staging_dir + file_name = os.path.basename(sop_output) - if instance.data.get("executeBackground", True): - self.log.info("Creating background task..") - ropnode.parm("executebackground").pressButton() - self.log.info("Finished") - else: + self.log.info("Writing VDB '%s' to '%s'" % (file_name, staging_dir)) + try: ropnode.render() + except hou.Error as exc: + # The hou.Error is not inherited from a Python Exception class, + # so we explicitly capture the houdini error, otherwise pyblish + # will remain hanging. + import traceback + traceback.print_exc() + raise RuntimeError("Render failed: {0}".format(exc)) if "files" not in instance.data: instance.data["files"] = [] diff --git a/pype/plugins/houdini/publish/validate_alembic_input_node.py b/pype/plugins/houdini/publish/validate_alembic_input_node.py index 8f9a59a420..c68e3cdf5c 100644 --- a/pype/plugins/houdini/publish/validate_alembic_input_node.py +++ b/pype/plugins/houdini/publish/validate_alembic_input_node.py @@ -7,7 +7,7 @@ class ValidateAlembicInputNode(pyblish.api.InstancePlugin): The connected node cannot be of the following types for Alembic: - VDB - - Volumne + - Volume """ diff --git a/pype/plugins/houdini/publish/validate_bypass.py b/pype/plugins/houdini/publish/validate_bypass.py new file mode 100644 index 0000000000..70505f0731 --- /dev/null +++ b/pype/plugins/houdini/publish/validate_bypass.py @@ -0,0 +1,34 @@ +import pyblish.api +import pype.api + + +class ValidateBypassed(pyblish.api.InstancePlugin): + """Validate all primitives build hierarchy from attribute when enabled. + + The name of the attribute must exist on the prims and have the same name + as Build Hierarchy from Attribute's `Path Attribute` value on the Alembic + ROP node whenever Build Hierarchy from Attribute is enabled. + + """ + + order = pype.api.ValidateContentsOrder - 0.1 + families = ["*"] + hosts = ["houdini"] + label = "Validate ROP Bypass" + + def process(self, instance): + + invalid = self.get_invalid(instance) + if invalid: + rop = invalid[0] + raise RuntimeError( + "ROP node %s is set to bypass, publishing cannot continue.." % + rop.path() + ) + + @classmethod + def get_invalid(cls, instance): + + rop = instance[0] + if rop.isBypassed(): + return [rop] diff --git a/pype/plugins/houdini/publish/validate_camera_rop.py b/pype/plugins/houdini/publish/validate_camera_rop.py new file mode 100644 index 0000000000..48335cfb37 --- /dev/null +++ b/pype/plugins/houdini/publish/validate_camera_rop.py @@ -0,0 +1,41 @@ +import pyblish.api +import pype.api + + +class ValidateCameraROP(pyblish.api.InstancePlugin): + """Validate Camera ROP settings.""" + + order = pype.api.ValidateContentsOrder + families = ['camera'] + hosts = ['houdini'] + label = 'Camera ROP' + + def process(self, instance): + + import hou + + node = instance[0] + if node.parm("use_sop_path").eval(): + raise RuntimeError("Alembic ROP for Camera export should not be " + "set to 'Use Sop Path'. Please disable.") + + # Get the root and objects parameter of the Alembic ROP node + root = node.parm("root").eval() + objects = node.parm("objects").eval() + assert root, "Root parameter must be set on Alembic ROP" + assert root.startswith("/"), "Root parameter must start with slash /" + assert objects, "Objects parameter must be set on Alembic ROP" + assert len(objects.split(" ")) == 1, "Must have only a single object." + + # Check if the object exists and is a camera + path = root + "/" + objects + camera = hou.node(path) + + if not camera: + raise ValueError("Camera path does not exist: %s" % path) + + if not camera.type().name() == "cam": + raise ValueError("Object set in Alembic ROP is not a camera: " + "%s (type: %s)" % (camera, camera.type().name())) + + diff --git a/pype/plugins/houdini/publish/validate_mkpaths_toggled.py b/pype/plugins/houdini/publish/validate_mkpaths_toggled.py index e6ee1cd25c..c2b72b4e43 100644 --- a/pype/plugins/houdini/publish/validate_mkpaths_toggled.py +++ b/pype/plugins/houdini/publish/validate_mkpaths_toggled.py @@ -3,16 +3,12 @@ import pype.api class ValidateIntermediateDirectoriesChecked(pyblish.api.InstancePlugin): - """Validate if node attribute Create intermediate Directories is turned on - - Rules: - * The node must have Create intermediate Directories turned on to - ensure the output file will be created - - """ + """Validate Create Intermediate Directories is enabled on ROP node.""" order = pype.api.ValidateContentsOrder - families = ["pointcache'] + families = ['pointcache', + 'camera', + 'vdbcache'] hosts = ['houdini'] label = 'Create Intermediate Directories Checked' @@ -20,8 +16,8 @@ class ValidateIntermediateDirectoriesChecked(pyblish.api.InstancePlugin): invalid = self.get_invalid(instance) if invalid: - raise RuntimeError("Found ROP nodes with Create Intermediate " - "Directories turned off") + raise RuntimeError("Found ROP node with Create Intermediate " + "Directories turned off: %s" % invalid) @classmethod def get_invalid(cls, instance): @@ -34,5 +30,3 @@ class ValidateIntermediateDirectoriesChecked(pyblish.api.InstancePlugin): result.append(node.path()) return result - - diff --git a/pype/plugins/houdini/publish/validate_output_node.py b/pype/plugins/houdini/publish/validate_output_node.py index d3393e1c33..5e20ee40d6 100644 --- a/pype/plugins/houdini/publish/validate_output_node.py +++ b/pype/plugins/houdini/publish/validate_output_node.py @@ -2,13 +2,20 @@ import pyblish.api class ValidateOutputNode(pyblish.api.InstancePlugin): - """Validate if output node: - - exists - - is of type 'output' - - has an input""" + """Validate the instance SOP Output Node. + + This will ensure: + - The SOP Path is set. + - The SOP Path refers to an existing object. + - The SOP Path node is a SOP node. + - The SOP Path node has at least one input connection (has an input) + - The SOP Path has geometry data. + + """ order = pyblish.api.ValidatorOrder - families = ["*"] + families = ["pointcache", + "vdbcache"] hosts = ["houdini"] label = "Validate Output Node" @@ -16,30 +23,51 @@ class ValidateOutputNode(pyblish.api.InstancePlugin): invalid = self.get_invalid(instance) if invalid: - raise RuntimeError("Output node(s) `%s` are incorrect" % invalid) + raise RuntimeError("Output node(s) `%s` are incorrect. " + "See plug-in log for details." % invalid) @classmethod def get_invalid(cls, instance): + import hou + output_node = instance.data["output_node"] if output_node is None: node = instance[0] - cls.log.error("Output node at '%s' does not exist, see source" % - node.path()) + cls.log.error("SOP Output node in '%s' does not exist. " + "Ensure a valid SOP output path is set." + % node.path()) - return node.path() + return [node.path()] - # Check if type is correct - type_name = output_node.type().name() - if type_name not in ["output", "cam"]: - cls.log.error("Output node `%s` is not an accepted type `output` " - "or `camera`" % - output_node.path()) + # Output node must be a Sop node. + if not isinstance(output_node, hou.SopNode): + cls.log.error("Output node %s is not a SOP node. " + "SOP Path must point to a SOP node, " + "instead found category type: %s" % ( + output_node.path(), + output_node.type().category().name() + ) + ) return [output_node.path()] + # For the sake of completeness also assert the category type + # is Sop to avoid potential edge case scenarios even though + # the isinstance check above should be stricter than this category + assert output_node.type().category().name() == "Sop", ( + "Output node %s is not of category Sop. This is a bug.." % + output_node.path() + ) + # Check if output node has incoming connections - if type_name == "output" and not output_node.inputConnections(): + if not output_node.inputConnections(): cls.log.error("Output node `%s` has no incoming connections" % output_node.path()) return [output_node.path()] + + # Ensure the output node has at least Geometry data + if not output_node.geometry(): + cls.log.error("Output node `%s` has no geometry data." + % output_node.path()) + return [output_node.path()] diff --git a/pype/plugins/houdini/publish/validate_primitive_hierarchy_paths.py b/pype/plugins/houdini/publish/validate_primitive_hierarchy_paths.py new file mode 100644 index 0000000000..70e7873d3b --- /dev/null +++ b/pype/plugins/houdini/publish/validate_primitive_hierarchy_paths.py @@ -0,0 +1,75 @@ +import pyblish.api +import pype.api + + +class ValidatePrimitiveHierarchyPaths(pyblish.api.InstancePlugin): + """Validate all primitives build hierarchy from attribute when enabled. + + The name of the attribute must exist on the prims and have the same name + as Build Hierarchy from Attribute's `Path Attribute` value on the Alembic + ROP node whenever Build Hierarchy from Attribute is enabled. + + """ + + order = pype.api.ValidateContentsOrder + 0.1 + families = ["pointcache"] + hosts = ["houdini"] + label = "Validate Prims Hierarchy Path" + + def process(self, instance): + invalid = self.get_invalid(instance) + if invalid: + raise RuntimeError("See log for details. " + "Invalid nodes: {0}".format(invalid)) + + @classmethod + def get_invalid(cls, instance): + + import hou + + output = instance.data["output_node"] + prims = output.geometry().prims() + + rop = instance[0] + build_from_path = rop.parm("build_from_path").eval() + if not build_from_path: + cls.log.debug("Alembic ROP has 'Build from Path' disabled. " + "Validation is ignored..") + return + + path_attr = rop.parm("path_attrib").eval() + if not path_attr: + cls.log.error("The Alembic ROP node has no Path Attribute" + "value set, but 'Build Hierarchy from Attribute'" + "is enabled.") + return [rop.path()] + + cls.log.debug("Checking for attribute: %s" % path_attr) + + missing_attr = [] + invalid_attr = [] + for prim in prims: + + try: + path = prim.stringAttribValue(path_attr) + except hou.OperationFailed: + # Attribute does not exist. + missing_attr.append(prim) + continue + + if not path: + # Empty path value is invalid. + invalid_attr.append(prim) + continue + + if missing_attr: + cls.log.info("Prims are missing attribute `%s`" % path_attr) + + if invalid_attr: + cls.log.info("Prims have no value for attribute `%s` " + "(%s of %s prims)" % (path_attr, + len(invalid_attr), + len(prims))) + + if missing_attr or invalid_attr: + return [output.path()] diff --git a/pype/plugins/houdini/publish/validate_vdb_input_node.py b/pype/plugins/houdini/publish/validate_vdb_input_node.py new file mode 100644 index 0000000000..e5bc118306 --- /dev/null +++ b/pype/plugins/houdini/publish/validate_vdb_input_node.py @@ -0,0 +1,46 @@ +import pyblish.api +import pype.api + + +class ValidateVDBInputNode(pyblish.api.InstancePlugin): + """Validate that the node connected to the output node is of type VDB + + Regardless of the amount of VDBs create the output will need to have an + equal amount of VDBs, points, primitives and vertices + + A VDB is an inherited type of Prim, holds the following data: + - Primitives: 1 + - Points: 1 + - Vertices: 1 + - VDBs: 1 + + """ + + order = pype.api.ValidateContentsOrder + 0.1 + families = ["vdbcache"] + hosts = ["houdini"] + label = "Validate Input Node (VDB)" + + def process(self, instance): + invalid = self.get_invalid(instance) + if invalid: + raise RuntimeError("Node connected to the output node is not" + "of type VDB!") + + @classmethod + def get_invalid(cls, instance): + + node = instance.data["output_node"] + + prims = node.geometry().prims() + nr_of_prims = len(prims) + + nr_of_points = len(node.geometry().points()) + if nr_of_points != nr_of_prims: + cls.log.error("The number of primitives and points do not match") + return [instance] + + for prim in prims: + if prim.numVertices() != 1: + cls.log.error("Found primitive with more than 1 vertex!") + return [instance] diff --git a/pype/plugins/maya/create/create_animation.py b/pype/plugins/maya/create/create_animation.py index ea8e8ebb39..5eef2ac225 100644 --- a/pype/plugins/maya/create/create_animation.py +++ b/pype/plugins/maya/create/create_animation.py @@ -1,5 +1,3 @@ -from collections import OrderedDict - import avalon.maya from pype.maya import lib @@ -16,21 +14,24 @@ class CreateAnimation(avalon.maya.Creator): super(CreateAnimation, self).__init__(*args, **kwargs) # create an ordered dict with the existing data first - data = OrderedDict(**self.data) # get basic animation data : start / end / handles / steps for key, value in lib.collect_animation_data().items(): - data[key] = value + self.data[key] = value # Write vertex colors with the geometry. - data["writeColorSets"] = False + self.data["writeColorSets"] = False # Include only renderable visible shapes. # Skips locators and empty transforms - data["renderableOnly"] = False + self.data["renderableOnly"] = False # Include only nodes that are visible at least once during the # frame range. - data["visibleOnly"] = False + self.data["visibleOnly"] = False - self.data = data \ No newline at end of file + # Include the groups above the out_SET content + self.data["includeParentHierarchy"] = False # Include parent groups + + # Default to exporting world-space + self.data["worldSpace"] = True diff --git a/pype/plugins/maya/create/create_camera.py b/pype/plugins/maya/create/create_camera.py index 7f954c2461..16293deb57 100644 --- a/pype/plugins/maya/create/create_camera.py +++ b/pype/plugins/maya/create/create_camera.py @@ -1,4 +1,3 @@ -from collections import OrderedDict import avalon.maya from pype.maya import lib @@ -15,13 +14,10 @@ class CreateCamera(avalon.maya.Creator): super(CreateCamera, self).__init__(*args, **kwargs) # get basic animation data : start / end / handles / steps - data = OrderedDict(**self.data) animation_data = lib.collect_animation_data() for key, value in animation_data.items(): - data[key] = value + self.data[key] = value # Bake to world space by default, when this is False it will also # include the parent hierarchy in the baked results - data['bakeToWorldSpace'] = True - - self.data = data + self.data['bakeToWorldSpace'] = True diff --git a/pype/plugins/maya/create/create_look.py b/pype/plugins/maya/create/create_look.py index 17e8bcd7e3..23e4f034b2 100644 --- a/pype/plugins/maya/create/create_look.py +++ b/pype/plugins/maya/create/create_look.py @@ -1,4 +1,3 @@ -from collections import OrderedDict import avalon.maya from pype.maya import lib @@ -14,7 +13,4 @@ class CreateLook(avalon.maya.Creator): def __init__(self, *args, **kwargs): super(CreateLook, self).__init__(*args, **kwargs) - data = OrderedDict(**self.data) - data["renderlayer"] = lib.get_current_renderlayer() - - self.data = data + self.data["renderlayer"] = lib.get_current_renderlayer() diff --git a/pype/plugins/maya/create/create_model.py b/pype/plugins/maya/create/create_model.py index 7290d7bd71..449a5642be 100644 --- a/pype/plugins/maya/create/create_model.py +++ b/pype/plugins/maya/create/create_model.py @@ -1,5 +1,3 @@ -from collections import OrderedDict - import avalon.maya @@ -14,10 +12,12 @@ class CreateModel(avalon.maya.Creator): def __init__(self, *args, **kwargs): super(CreateModel, self).__init__(*args, **kwargs) - # create an ordered dict with the existing data first - data = OrderedDict(**self.data) + # Vertex colors with the geometry + self.data["writeColorSets"] = False - # Write vertex colors with the geometry. - data["writeColorSets"] = True + # Include attributes by attribute name or prefix + self.data["attr"] = "" + self.data["attrPrefix"] = "" - self.data = data + # Whether to include parent hierarchy of nodes in the instance + self.data["includeParentHierarchy"] = False diff --git a/pype/plugins/maya/create/create_pointcache.py b/pype/plugins/maya/create/create_pointcache.py index a660485e94..e5c82f7e10 100644 --- a/pype/plugins/maya/create/create_pointcache.py +++ b/pype/plugins/maya/create/create_pointcache.py @@ -1,5 +1,3 @@ -from collections import OrderedDict - import avalon.maya from pype.maya import lib @@ -15,22 +13,15 @@ class CreatePointCache(avalon.maya.Creator): def __init__(self, *args, **kwargs): super(CreatePointCache, self).__init__(*args, **kwargs) - # create an ordered dict with the existing data first - data = OrderedDict(**self.data) + # Add animation data + self.data.update(lib.collect_animation_data()) - # get basic animation data : start / end / handles / steps - for key, value in lib.collect_animation_data().items(): - data[key] = value + self.data["writeColorSets"] = False # Vertex colors with the geometry. + self.data["renderableOnly"] = False # Only renderable visible shapes + self.data["visibleOnly"] = False # only nodes that are visible + self.data["includeParentHierarchy"] = False # Include parent groups + self.data["worldSpace"] = True # Default to exporting world-space - # Write vertex colors with the geometry. - data["writeColorSets"] = False - - # Include only renderable visible shapes. - # Skips locators and empty transforms - data["renderableOnly"] = False - - # Include only nodes that are visible at least once during the - # frame range. - data["visibleOnly"] = False - - self.data = data \ No newline at end of file + # Add options for custom attributes + self.data["attr"] = "" + self.data["attrPrefix"] = "" diff --git a/pype/plugins/maya/create/create_renderglobals.py b/pype/plugins/maya/create/create_renderglobals.py index 61fd76f6ef..81850fe830 100644 --- a/pype/plugins/maya/create/create_renderglobals.py +++ b/pype/plugins/maya/create/create_renderglobals.py @@ -1,10 +1,10 @@ -from collections import OrderedDict - from maya import cmds +import pype.maya.lib as lib + from avalon.vendor import requests import avalon.maya -import os +from avalon import api class CreateRenderGlobals(avalon.maya.Creator): @@ -19,13 +19,13 @@ class CreateRenderGlobals(avalon.maya.Creator): # We won't be publishing this one self.data["id"] = "avalon.renderglobals" - # get pools + # Get available Deadline pools try: deadline_url = os.environ["DEADLINE_REST_URL"] except KeyError: self.log.error("Deadline REST API url not found.") - argument = "{}/api/pools?NamesOnly=true".format(deadline_url) + argument = "{}/api/pools?NamesOnly=true".format(AVALON_DEADLINE) response = requests.get(argument) if not response.ok: self.log.warning("No pools retrieved") @@ -38,33 +38,31 @@ class CreateRenderGlobals(avalon.maya.Creator): self.data.pop("asset", None) self.data.pop("active", None) - data = OrderedDict(**self.data) - - data["suspendPublishJob"] = False - data["extendFrames"] = False - data["overrideExistingFrame"] = True - data["useLegacyRenderLayers"] = True - data["priority"] = 50 - data["framesPerTask"] = 1 - data["whitelist"] = False - data["machineList"] = "" - data["useMayaBatch"] = True - data["primaryPool"] = pools + self.data["suspendPublishJob"] = False + self.data["extendFrames"] = False + self.data["overrideExistingFrame"] = True + self.data["useLegacyRenderLayers"] = True + self.data["priority"] = 50 + self.data["framesPerTask"] = 1 + self.data["whitelist"] = False + self.data["machineList"] = "" + self.data["useMayaBatch"] = True + self.data["primaryPool"] = pools # We add a string "-" to allow the user to not set any secondary pools - data["secondaryPool"] = ["-"] + pools + self.data["secondaryPool"] = ["-"] + pools - self.data = data self.options = {"useSelection": False} # Force no content def process(self): exists = cmds.ls(self.name) assert len(exists) <= 1, ( - "More than one renderglobal exists, this is a bug") + "More than one renderglobal exists, this is a bug" + ) if exists: return cmds.warning("%s already exists." % exists[0]) - super(CreateRenderGlobals, self).process() - - cmds.setAttr("{}.machineList".format(self.name), lock=True) + with lib.undo_chunk(): + super(CreateRenderGlobals, self).process() + cmds.setAttr("{}.machineList".format(self.name), lock=True) diff --git a/pype/plugins/maya/create/create_rig.py b/pype/plugins/maya/create/create_rig.py index e8bcd2baa2..2c8f3d78b9 100644 --- a/pype/plugins/maya/create/create_rig.py +++ b/pype/plugins/maya/create/create_rig.py @@ -1,5 +1,6 @@ from maya import cmds +import pype.maya.lib as lib import avalon.maya @@ -12,10 +13,11 @@ class CreateRig(avalon.maya.Creator): icon = "wheelchair" def process(self): - instance = super(CreateRig, self).process() - self.log.info("Creating Rig instance set up ...") + with lib.undo_chunk(): + instance = super(CreateRig, self).process() - controls = cmds.sets(name="controls_SET", empty=True) - pointcache = cmds.sets(name="out_SET", empty=True) - cmds.sets([controls, pointcache], forceElement=instance) + self.log.info("Creating Rig instance set up ...") + controls = cmds.sets(name="controls_SET", empty=True) + pointcache = cmds.sets(name="out_SET", empty=True) + cmds.sets([controls, pointcache], forceElement=instance) diff --git a/pype/plugins/maya/create/create_vrayproxy.py b/pype/plugins/maya/create/create_vrayproxy.py index 49b9850866..f3e6124e1f 100644 --- a/pype/plugins/maya/create/create_vrayproxy.py +++ b/pype/plugins/maya/create/create_vrayproxy.py @@ -1,5 +1,3 @@ -from collections import OrderedDict - import avalon.maya @@ -14,13 +12,9 @@ class CreateVrayProxy(avalon.maya.Creator): def __init__(self, *args, **kwargs): super(CreateVrayProxy, self).__init__(*args, **kwargs) - data = OrderedDict(**self.data) - - data["animation"] = False - data["startFrame"] = 1 - data["endFrame"] = 1 + self.data["animation"] = False + self.data["startFrame"] = 1 + self.data["endFrame"] = 1 # Write vertex colors - data["vertexColors"] = False - - self.data.update(data) + self.data["vertexColors"] = False diff --git a/pype/plugins/maya/create/create_vrayscene.py b/pype/plugins/maya/create/create_vrayscene.py new file mode 100644 index 0000000000..df1c232858 --- /dev/null +++ b/pype/plugins/maya/create/create_vrayscene.py @@ -0,0 +1,27 @@ +import avalon.maya + + +class CreateVRayScene(avalon.maya.Creator): + + label = "VRay Scene" + family = "vrayscene" + icon = "cubes" + + def __init__(self, *args, **kwargs): + super(CreateVRayScene, self).__init__(*args, **kwargs) + + # We don't need subset or asset attributes + self.data.pop("subset", None) + self.data.pop("asset", None) + self.data.pop("active", None) + + self.data.update({ + "id": "avalon.vrayscene", # We won't be publishing this one + "suspendRenderJob": False, + "suspendPublishJob": False, + "extendFrames": False, + "pools": "", + "framesPerTask": 1 + }) + + self.options = {"useSelection": False} # Force no content diff --git a/pype/plugins/maya/create/create_yeti_cache.py b/pype/plugins/maya/create/create_yeti_cache.py index 6b540461db..aaa94ce01d 100644 --- a/pype/plugins/maya/create/create_yeti_cache.py +++ b/pype/plugins/maya/create/create_yeti_cache.py @@ -15,12 +15,13 @@ class CreateYetiCache(avalon.maya.Creator): def __init__(self, *args, **kwargs): super(CreateYetiCache, self).__init__(*args, **kwargs) - data = OrderedDict(**self.data) - data["peroll"] = 0 + self.data["preroll"] = 0 + # Add animation data without step and handles anim_data = lib.collect_animation_data() - data.update({"startFrame": anim_data["startFrame"], - "endFrame": anim_data["endFrame"], - "samples": 3}) + anim_data.pop("step") + anim_data.pop("handles") + self.data.update(anim_data) - self.data = data + # Add samples + self.data["samples"] = 3 diff --git a/pype/plugins/maya/create/create_yeti_rig.py b/pype/plugins/maya/create/create_yeti_rig.py index ba44c7bf27..c061084dd7 100644 --- a/pype/plugins/maya/create/create_yeti_rig.py +++ b/pype/plugins/maya/create/create_yeti_rig.py @@ -1,5 +1,6 @@ from maya import cmds +import pype.maya.lib as lib import avalon.maya @@ -12,9 +13,9 @@ class CreateYetiRig(avalon.maya.Creator): def process(self): - instance = super(CreateYetiRig, self).process() + with lib.undo_chunk(): + instance = super(CreateYetiRig, self).process() - self.log.info("Creating Rig instance set up ...") - - input_meshes = cmds.sets(name="input_SET", empty=True) - cmds.sets(input_meshes, forceElement=instance) + self.log.info("Creating Rig instance set up ...") + input_meshes = cmds.sets(name="input_SET", empty=True) + cmds.sets(input_meshes, forceElement=instance) diff --git a/pype/plugins/maya/load/load_alembic.py b/pype/plugins/maya/load/load_alembic.py index 9c04a2e51d..9e08702521 100644 --- a/pype/plugins/maya/load/load_alembic.py +++ b/pype/plugins/maya/load/load_alembic.py @@ -2,7 +2,7 @@ import pype.maya.plugin class AbcLoader(pype.maya.plugin.ReferenceLoader): - """Specific loader of Alembic for the studio.animation family""" + """Specific loader of Alembic for the pype.animation family""" families = ["animation", "pointcache"] diff --git a/pype/plugins/maya/load/load_camera.py b/pype/plugins/maya/load/load_camera.py index 7b2595f3ce..eb75c3a63d 100644 --- a/pype/plugins/maya/load/load_camera.py +++ b/pype/plugins/maya/load/load_camera.py @@ -2,7 +2,7 @@ import pype.maya.plugin class CameraLoader(pype.maya.plugin.ReferenceLoader): - """Specific loader of Alembic for the studio.camera family""" + """Specific loader of Alembic for the pype.camera family""" families = ["camera"] label = "Reference camera" diff --git a/pype/plugins/maya/load/load_fbx.py b/pype/plugins/maya/load/load_fbx.py new file mode 100644 index 0000000000..2ee3e5fdbd --- /dev/null +++ b/pype/plugins/maya/load/load_fbx.py @@ -0,0 +1,36 @@ +import pype.maya.plugin + + +class FBXLoader(pype.maya.plugin.ReferenceLoader): + """Load the FBX""" + + families = ["fbx"] + representations = ["fbx"] + + label = "Reference FBX" + order = -10 + icon = "code-fork" + color = "orange" + + def process_reference(self, context, name, namespace, data): + + import maya.cmds as cmds + from avalon import maya + + # Ensure FBX plug-in is loaded + cmds.loadPlugin("fbxmaya", quiet=True) + + with maya.maintained_selection(): + nodes = cmds.file(self.fname, + namespace=namespace, + reference=True, + returnNewNodes=True, + groupReference=True, + groupName="{}:{}".format(namespace, name)) + + self[:] = nodes + + return nodes + + def switch(self, container, representation): + self.update(container, representation) diff --git a/pype/plugins/maya/publish/collect_animation.py b/pype/plugins/maya/publish/collect_animation.py index 90884fae67..9b1e38fd0a 100644 --- a/pype/plugins/maya/publish/collect_animation.py +++ b/pype/plugins/maya/publish/collect_animation.py @@ -15,7 +15,7 @@ class CollectAnimationOutputGeometry(pyblish.api.InstancePlugin): """ - order = pyblish.api.CollectorOrder + 0.2 + order = pyblish.api.CollectorOrder + 0.4 families = ["animation"] label = "Collect Animation Output Geometry" hosts = ["maya"] @@ -43,11 +43,9 @@ class CollectAnimationOutputGeometry(pyblish.api.InstancePlugin): descendants = cmds.ls(descendants, noIntermediate=True, long=True) # Add members and descendants together for a complete overview - # hierarchy = members + descendants - hierarchy = members - self.log.info(members) - self.log.info(hierarchy) + hierarchy = members + descendants + # Ignore certain node types (e.g. constraints) ignore = cmds.ls(hierarchy, type=self.ignore_type, long=True) diff --git a/pype/plugins/maya/publish/collect_current_file.py b/pype/plugins/maya/publish/collect_current_file.py index 2c955da29b..0b38ebcf3d 100644 --- a/pype/plugins/maya/publish/collect_current_file.py +++ b/pype/plugins/maya/publish/collect_current_file.py @@ -1,8 +1,6 @@ from maya import cmds import pyblish.api -import os -from pype.maya import lib class CollectMayaCurrentFile(pyblish.api.ContextPlugin): diff --git a/pype/plugins/maya/publish/collect_instances.py b/pype/plugins/maya/publish/collect_instances.py index e0f929f004..fd22085556 100644 --- a/pype/plugins/maya/publish/collect_instances.py +++ b/pype/plugins/maya/publish/collect_instances.py @@ -12,29 +12,14 @@ class CollectInstances(pyblish.api.ContextPlugin): Identifier: id (str): "pyblish.avalon.instance" - Supported Families: - avalon.model: Geometric representation of artwork - avalon.rig: An articulated model for animators. - A rig may contain a series of sets in which to identify - its contents. - - - cache_SEL: Should contain cachable polygonal meshes - - controls_SEL: Should contain animatable controllers for animators - - resources_SEL: Should contain nodes that reference external files - - Limitations: - - Only Maya is supported - - One (1) rig per scene file - - Unmanaged history, it is up to the TD to ensure - history is up to par. - avalon.animation: Pointcache of `avalon.rig` - Limitations: - Does not take into account nodes connected to those within an objectSet. Extractors are assumed to export with history preserved, but this limits what they will be able to achieve and the amount of data available - to validators. + to validators. An additional collector could also + append this input data into the instance, as we do + for `pype.rig` with collect_history. """ @@ -101,7 +86,11 @@ class CollectInstances(pyblish.api.ContextPlugin): fullPath=True) or [] children = cmds.ls(children, noIntermediate=True, long=True) - parents = self.get_all_parents(members) + parents = [] + if data.get("includeParentHierarchy", True): + # If `includeParentHierarchy` then include the parents + # so they will also be picked up in the instance by validators + parents = self.get_all_parents(members) members_hierarchy = list(set(members + children + parents)) # Create the instance diff --git a/pype/plugins/maya/publish/collect_look.py b/pype/plugins/maya/publish/collect_look.py index e38144814d..a0e0f53fea 100644 --- a/pype/plugins/maya/publish/collect_look.py +++ b/pype/plugins/maya/publish/collect_look.py @@ -45,10 +45,8 @@ def get_look_attrs(node): if cmds.objectType(node, isAType="shape"): attrs = cmds.listAttr(node, changedSinceFileOpen=True) or [] for attr in attrs: - result.append(attr) - # if attr in SHAPE_ATTRS: - # result.append(attr) - + if attr in SHAPE_ATTRS: + result.append(attr) return result @@ -109,7 +107,6 @@ def seq_to_glob(path): "": "" } - lower = path.lower() has_pattern = False for pattern, regex_pattern in patterns.items(): @@ -205,7 +202,7 @@ class CollectLook(pyblish.api.InstancePlugin): """ - order = pyblish.api.CollectorOrder + 0.2 + order = pyblish.api.CollectorOrder + 0.4 families = ["look"] label = "Collect Look" hosts = ["maya"] @@ -364,8 +361,6 @@ class CollectLook(pyblish.api.InstancePlugin): # Collect changes to "custom" attributes node_attrs = get_look_attrs(node) - self.log.info('attr: {}'.format(node_attrs)) - # Only include if there are any properties we care about if not node_attrs: continue diff --git a/pype/plugins/maya/publish/collect_model.py b/pype/plugins/maya/publish/collect_model.py index 4feea7e145..fa6a0eee1c 100644 --- a/pype/plugins/maya/publish/collect_model.py +++ b/pype/plugins/maya/publish/collect_model.py @@ -9,14 +9,13 @@ class CollectModelData(pyblish.api.InstancePlugin): Ensures always only a single frame is extracted (current frame). Note: - This is a workaround so that the `studio.model` family can use the + This is a workaround so that the `pype.model` family can use the same pointcache extractor implementation as animation and pointcaches. This always enforces the "current" frame to be published. """ - order = pyblish.api.CollectorOrder + 0.2 - + order = pyblish.api.CollectorOrder + 0.499 label = 'Collect Model Data' families = ["model"] diff --git a/pype/plugins/maya/publish/collect_render_layer_aovs.py b/pype/plugins/maya/publish/collect_render_layer_aovs.py index f570aa9158..d8d2995515 100644 --- a/pype/plugins/maya/publish/collect_render_layer_aovs.py +++ b/pype/plugins/maya/publish/collect_render_layer_aovs.py @@ -6,10 +6,9 @@ import pype.maya.lib as lib class CollectRenderLayerAOVS(pyblish.api.InstancePlugin): - """Validate all render layer's AOVs / Render Elements are registered in - the database + """Collect all render layer's AOVs / Render Elements that will render. - This validator is important to be able to Extend Frames + This collector is important to be able to Extend Frames. Technical information: Each renderer uses different logic to work with render passes. @@ -37,8 +36,7 @@ class CollectRenderLayerAOVS(pyblish.api.InstancePlugin): return # Get renderer - renderer = cmds.getAttr("defaultRenderGlobals.currentRenderer") - + renderer = instance.data["renderer"] self.log.info("Renderer found: {}".format(renderer)) rp_node_types = {"vray": ["VRayRenderElement", "VRayRenderElementSet"], @@ -53,21 +51,20 @@ class CollectRenderLayerAOVS(pyblish.api.InstancePlugin): # Collect all AOVs / Render Elements layer = instance.data["setMembers"] - with lib.renderlayer(layer): + node_type = rp_node_types[renderer] + render_elements = cmds.ls(type=node_type) - node_type = rp_node_types[renderer] - render_elements = cmds.ls(type=node_type) + # Check if AOVs / Render Elements are enabled + for element in render_elements: + enabled = lib.get_attr_in_layer("{}.enabled".format(element), + layer=layer) + if not enabled: + continue - # Check if AOVs / Render Elements are enabled - for element in render_elements: - enabled = cmds.getAttr("{}.enabled".format(element)) - if not enabled: - continue + pass_name = self.get_pass_name(renderer, element) + render_pass = "%s.%s" % (instance.data["subset"], pass_name) - pass_name = self.get_pass_name(renderer, element) - render_pass = "%s.%s" % (instance.data["subset"], pass_name) - - result.append(render_pass) + result.append(render_pass) self.log.info("Found {} render elements / AOVs for " "'{}'".format(len(result), instance.data["subset"])) diff --git a/pype/plugins/maya/publish/collect_renderable_camera.py b/pype/plugins/maya/publish/collect_renderable_camera.py new file mode 100644 index 0000000000..9bfc010204 --- /dev/null +++ b/pype/plugins/maya/publish/collect_renderable_camera.py @@ -0,0 +1,26 @@ +import pyblish.api + +from maya import cmds + +from pype.maya import lib + + +class CollectRenderableCamera(pyblish.api.InstancePlugin): + """Collect the renderable camera(s) for the render layer""" + + order = pyblish.api.CollectorOrder + 0.01 + label = "Collect Renderable Camera(s)" + hosts = ["maya"] + families = ["vrayscene", + "renderlayer"] + + def process(self, instance): + layer = instance.data["setMembers"] + + cameras = cmds.ls(type="camera", long=True) + renderable = [c for c in cameras if + lib.get_attr_in_layer("%s.renderable" % c, layer=layer)] + + self.log.info("Found cameras %s: %s" % (len(renderable), renderable)) + + instance.data["cameras"] = renderable diff --git a/pype/plugins/maya/publish/collect_renderlayers.py b/pype/plugins/maya/publish/collect_renderlayers.py index 0a02f8f49d..c926baf803 100644 --- a/pype/plugins/maya/publish/collect_renderlayers.py +++ b/pype/plugins/maya/publish/collect_renderlayers.py @@ -22,16 +22,10 @@ class CollectMayaRenderlayers(pyblish.api.ContextPlugin): try: render_globals = cmds.ls("renderglobalsDefault")[0] except IndexError: - self.log.error("Cannot collect renderlayers without " - "renderGlobals node") + self.log.info("Skipping renderlayer collection, no " + "renderGlobalsDefault found..") return - # Get start and end frame - start_frame = self.get_render_attribute("startFrame") - end_frame = self.get_render_attribute("endFrame") - context.data["startFrame"] = start_frame - context.data["endFrame"] = end_frame - # Get all valid renderlayers # This is how Maya populates the renderlayer display rlm_attribute = "renderLayerManager.renderLayerId" @@ -59,30 +53,34 @@ class CollectMayaRenderlayers(pyblish.api.ContextPlugin): if layer.endswith("defaultRenderLayer"): layername = "masterLayer" else: + # Remove Maya render setup prefix `rs_` layername = layer.split("rs_", 1)[-1] # Get layer specific settings, might be overrides - with lib.renderlayer(layer): - data = { - "subset": layername, - "setMembers": layer, - "publish": True, - "startFrame": self.get_render_attribute("startFrame"), - "endFrame": self.get_render_attribute("endFrame"), - "byFrameStep": self.get_render_attribute("byFrameStep"), - "renderer": self.get_render_attribute("currentRenderer"), + data = { + "subset": layername, + "setMembers": layer, + "publish": True, + "startFrame": self.get_render_attribute("startFrame", + layer=layer), + "endFrame": self.get_render_attribute("endFrame", + layer=layer), + "byFrameStep": self.get_render_attribute("byFrameStep", + layer=layer), + "renderer": self.get_render_attribute("currentRenderer", + layer=layer), - # instance subset - "family": "Render Layers", - "families": ["renderlayer"], - "asset": asset, - "time": api.time(), - "author": context.data["user"], + # instance subset + "family": "Render Layers", + "families": ["renderlayer"], + "asset": asset, + "time": api.time(), + "author": context.data["user"], - # Add source to allow tracing back to the scene from - # which was submitted originally - "source": filepath - } + # Add source to allow tracing back to the scene from + # which was submitted originally + "source": filepath + } # Apply each user defined attribute as data for attr in cmds.listAttr(layer, userDefined=True) or list(): @@ -112,8 +110,9 @@ class CollectMayaRenderlayers(pyblish.api.ContextPlugin): instance.data["label"] = label instance.data.update(data) - def get_render_attribute(self, attr): - return cmds.getAttr("defaultRenderGlobals.{}".format(attr)) + def get_render_attribute(self, attr, layer): + return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr), + layer=layer) def parse_options(self, render_globals): """Get all overrides with a value, skip those without diff --git a/pype/plugins/maya/publish/collect_setdress.py b/pype/plugins/maya/publish/collect_setdress.py index b17d6448da..bb56163293 100644 --- a/pype/plugins/maya/publish/collect_setdress.py +++ b/pype/plugins/maya/publish/collect_setdress.py @@ -23,7 +23,7 @@ class CollectSetDress(pyblish.api.InstancePlugin): """ - order = pyblish.api.CollectorOrder + 0.2 + order = pyblish.api.CollectorOrder + 0.49 label = "Set Dress" families = ["setdress"] @@ -32,7 +32,6 @@ class CollectSetDress(pyblish.api.InstancePlugin): # Find containers containers = avalon.ls() - # Get all content from the instance instance_lookup = set(cmds.ls(instance, type="transform", long=True)) data = defaultdict(list) @@ -41,7 +40,6 @@ class CollectSetDress(pyblish.api.InstancePlugin): for container in containers: root = lib.get_container_transforms(container, root=True) - self.log.debug(root) if not root or root not in instance_lookup: continue diff --git a/pype/plugins/maya/publish/collect_vray_scene.py b/pype/plugins/maya/publish/collect_vray_scene.py new file mode 100644 index 0000000000..89c0aa8670 --- /dev/null +++ b/pype/plugins/maya/publish/collect_vray_scene.py @@ -0,0 +1,110 @@ +import os + +import pyblish.api + +from maya import cmds + +from avalon import api + + +class CollectVRayScene(pyblish.api.ContextPlugin): + """Collect all information prior for exporting vrscenes + """ + + order = pyblish.api.CollectorOrder + label = "Collect VRay Scene" + hosts = ["maya"] + + def process(self, context): + + # Sort by displayOrder + def sort_by_display_order(layer): + return cmds.getAttr("%s.displayOrder" % layer) + + host = api.registered_host() + + asset = api.Session["AVALON_ASSET"] + work_dir = context.data["workspaceDir"] + + # Get VRay Scene instance + vray_scenes = host.lsattr("family", "vrayscene") + if not vray_scenes: + self.log.info("Skipping vrayScene collection, no " + "vrayscene instance found..") + return + + assert len(vray_scenes) == 1, "Multiple vrayscene instances found!" + vray_scene = vray_scenes[0] + + vrscene_data = host.read(vray_scene) + + assert cmds.ls("vraySettings", type="VRaySettingsNode"), ( + "VRay Settings node does not exists. " + "Please ensure V-Ray is the current renderer." + ) + + # Output data + start_frame = int(cmds.getAttr("defaultRenderGlobals.startFrame")) + end_frame = int(cmds.getAttr("defaultRenderGlobals.endFrame")) + + # Create output file path with template + file_name = context.data["currentFile"].replace("\\", "/") + vrscene = ("vrayscene", "", "_", "") + vrscene_output = os.path.join(work_dir, *vrscene) + + # Check and create render output template for render job + # outputDir is required for submit_publish_job + if not vrscene_data.get("suspendRenderJob", False): + renders = ("renders", "", "_", "") + output_renderpath = os.path.join(work_dir, *renders) + vrscene_data["outputDir"] = output_renderpath + + # Get resolution + resolution = (cmds.getAttr("defaultResolution.width"), + cmds.getAttr("defaultResolution.height")) + + # Get format extension + extension = cmds.getAttr("vraySettings.imageFormatStr") + + # Get render layers + render_layers = [i for i in cmds.ls(type="renderLayer") if + cmds.getAttr("{}.renderable".format(i)) and not + cmds.referenceQuery(i, isNodeReferenced=True)] + + render_layers = sorted(render_layers, key=sort_by_display_order) + for layer in render_layers: + + subset = layer + if subset == "defaultRenderLayer": + subset = "masterLayer" + + data = { + "subset": subset, + "setMembers": layer, + + "startFrame": start_frame, + "endFrame": end_frame, + "renderer": "vray", + "resolution": resolution, + "ext": ".{}".format(extension), + + # instance subset + "family": "VRay Scene", + "families": ["vrayscene"], + "asset": asset, + "time": api.time(), + "author": context.data["user"], + + # Add source to allow tracing back to the scene from + # which was submitted originally + "source": file_name, + + # Store VRay Scene additional data + "vrsceneOutput": vrscene_output + } + + data.update(vrscene_data) + + instance = context.create_instance(subset) + self.log.info("Created: %s" % instance.name) + instance.data.update(data) diff --git a/pype/plugins/maya/publish/collect_workscene_fps.py b/pype/plugins/maya/publish/collect_workscene_fps.py new file mode 100644 index 0000000000..41d6ffea33 --- /dev/null +++ b/pype/plugins/maya/publish/collect_workscene_fps.py @@ -0,0 +1,15 @@ +import pyblish.api +from maya import mel + + +class CollectWorksceneFPS(pyblish.api.ContextPlugin): + """Get the FPS of the work scene""" + + label = "Workscene FPS" + order = pyblish.api.CollectorOrder + hosts = ["maya"] + + def process(self, context): + fps = mel.eval('currentTimeUnitToFPS()') + self.log.info("Workscene FPS: %s" % fps) + context.data.update({"fps": fps}) diff --git a/pype/plugins/maya/publish/collect_yeti_cache.py b/pype/plugins/maya/publish/collect_yeti_cache.py index fa2474dd6f..2365162c05 100644 --- a/pype/plugins/maya/publish/collect_yeti_cache.py +++ b/pype/plugins/maya/publish/collect_yeti_cache.py @@ -26,7 +26,7 @@ class CollectYetiCache(pyblish.api.InstancePlugin): Other information is the name of the transform and it's Colorbleed ID """ - order = pyblish.api.CollectorOrder + 0.2 + order = pyblish.api.CollectorOrder + 0.45 label = "Collect Yeti Cache" families = ["yetiRig", "yeticache"] hosts = ["maya"] diff --git a/pype/plugins/maya/publish/collect_yeti_rig.py b/pype/plugins/maya/publish/collect_yeti_rig.py index 11e211bf5f..469651a891 100644 --- a/pype/plugins/maya/publish/collect_yeti_rig.py +++ b/pype/plugins/maya/publish/collect_yeti_rig.py @@ -6,6 +6,7 @@ from maya import cmds import pyblish.api from pype.maya import lib +from pype.lib import pairwise SETTINGS = {"renderDensity", @@ -19,7 +20,7 @@ SETTINGS = {"renderDensity", class CollectYetiRig(pyblish.api.InstancePlugin): """Collect all information of the Yeti Rig""" - order = pyblish.api.CollectorOrder + 0.2 + order = pyblish.api.CollectorOrder + 0.4 label = "Collect Yeti Rig" families = ["yetiRig"] hosts = ["maya"] @@ -29,6 +30,27 @@ class CollectYetiRig(pyblish.api.InstancePlugin): assert "input_SET" in instance.data["setMembers"], ( "Yeti Rig must have an input_SET") + input_connections = self.collect_input_connections(instance) + + # Collect any textures if used + yeti_resources = [] + yeti_nodes = cmds.ls(instance[:], type="pgYetiMaya", long=True) + for node in yeti_nodes: + # Get Yeti resources (textures) + resources = self.get_yeti_resources(node) + yeti_resources.extend(resources) + + instance.data["rigsettings"] = {"inputs": input_connections} + + instance.data["resources"] = yeti_resources + + # Force frame range for export + instance.data["startFrame"] = 1 + instance.data["endFrame"] = 1 + + def collect_input_connections(self, instance): + """Collect the inputs for all nodes in the input_SET""" + # Get the input meshes information input_content = cmds.ls(cmds.sets("input_SET", query=True), long=True) @@ -39,44 +61,38 @@ class CollectYetiRig(pyblish.api.InstancePlugin): # Ignore intermediate objects input_content = cmds.ls(input_content, long=True, noIntermediate=True) + if not input_content: + return [] # Store all connections connections = cmds.listConnections(input_content, source=True, destination=False, connections=True, + # Only allow inputs from dagNodes + # (avoid display layers, etc.) + type="dagNode", plugs=True) or [] - - # Group per source, destination pair. We need to reverse the connection - # list as it comes in with the shape used to query first while that - # shape is the destination of the connection - grouped = [(connections[i+1], item) for i, item in - enumerate(connections) if i % 2 == 0] + connections = cmds.ls(connections, long=True) # Ensure long names inputs = [] - for src, dest in grouped: + for dest, src in pairwise(connections): source_node, source_attr = src.split(".", 1) dest_node, dest_attr = dest.split(".", 1) + # Ensure the source of the connection is not included in the + # current instance's hierarchy. If so, we ignore that connection + # as we will want to preserve it even over a publish. + if source_node in instance: + self.log.debug("Ignoring input connection between nodes " + "inside the instance: %s -> %s" % (src, dest)) + continue + inputs.append({"connections": [source_attr, dest_attr], "sourceID": lib.get_id(source_node), "destinationID": lib.get_id(dest_node)}) - # Collect any textures if used - yeti_resources = [] - yeti_nodes = cmds.ls(instance[:], type="pgYetiMaya", long=True) - for node in yeti_nodes: - # Get Yeti resources (textures) - resources = self.get_yeti_resources(node) - yeti_resources.extend(resources) - - instance.data["rigsettings"] = {"inputs": inputs} - - instance.data["resources"] = yeti_resources - - # Force frame range for export - instance.data["startFrame"] = 1 - instance.data["endFrame"] = 1 + return inputs def get_yeti_resources(self, node): """Get all resource file paths @@ -96,7 +112,13 @@ class CollectYetiRig(pyblish.api.InstancePlugin): list """ resources = [] - image_search_path = cmds.getAttr("{}.imageSearchPath".format(node)) + + image_search_paths = cmds.getAttr("{}.imageSearchPath".format(node)) + + # TODO: Somehow this uses OS environment path separator, `:` vs `;` + # Later on check whether this is pipeline OS cross-compatible. + image_search_paths = [p for p in + image_search_paths.split(os.path.pathsep) if p] # List all related textures texture_filenames = cmds.pgYetiCommand(node, listTextures=True) @@ -108,36 +130,51 @@ class CollectYetiRig(pyblish.api.InstancePlugin): type="reference") self.log.info("Found %i reference node(s)" % len(reference_nodes)) - if texture_filenames and not image_search_path: + if texture_filenames and not image_search_paths: raise ValueError("pgYetiMaya node '%s' is missing the path to the " "files in the 'imageSearchPath " "atttribute'" % node) # Collect all texture files for texture in texture_filenames: - item = {"files": [], "source": texture, "node": node} - texture_filepath = os.path.join(image_search_path, texture) - if len(texture.split(".")) > 2: - # For UDIM based textures (tiles) - if "" in texture: - sequences = self.get_sequence(texture_filepath, - pattern="") - item["files"].extend(sequences) - - # Based textures (animated masks f.e) - elif "%04d" in texture: - sequences = self.get_sequence(texture_filepath, - pattern="%04d") - item["files"].extend(sequences) - # Assuming it is a fixed name - else: - item["files"].append(texture_filepath) + files = [] + if os.path.isabs(texture): + self.log.debug("Texture is absolute path, ignoring " + "image search paths for: %s" % texture) + files = self.search_textures(texture) else: - item["files"].append(texture_filepath) + for root in image_search_paths: + filepath = os.path.join(root, texture) + files = self.search_textures(filepath) + if files: + # Break out on first match in search paths.. + break + + if not files: + self.log.warning( + "No texture found for: %s " + "(searched: %s)" % (texture, image_search_paths)) + + item = { + "files": files, + "source": texture, + "node": node + } resources.append(item) + # For now validate that every texture has at least a single file + # resolved. Since a 'resource' does not have the requirement of having + # a `files` explicitly mapped it's not explicitly validated. + # TODO: Validate this as a validator + invalid_resources = [] + for resource in resources: + if not resource['files']: + invalid_resources.append(resource) + if invalid_resources: + raise RuntimeError("Invalid resources") + # Collect all referenced files for reference_node in reference_nodes: ref_file = cmds.pgYetiGraph(node, @@ -145,35 +182,83 @@ class CollectYetiRig(pyblish.api.InstancePlugin): param="reference_file", getParamValue=True) - if not os.path.isfile(ref_file): - raise RuntimeError("Reference file must be a full file path!") - # Create resource dict - item = {"files": [], - "source": ref_file, - "node": node, - "graphnode": reference_node, - "param": "reference_file"} + item = { + "source": ref_file, + "node": node, + "graphnode": reference_node, + "param": "reference_file", + "files": [] + } ref_file_name = os.path.basename(ref_file) if "%04d" in ref_file_name: - ref_files = self.get_sequence(ref_file) - item["files"].extend(ref_files) + item["files"] = self.get_sequence(ref_file) else: - item["files"].append(ref_file) + if os.path.exists(ref_file) and os.path.isfile(ref_file): + item["files"] = [ref_file] + + if not item["files"]: + self.log.warning("Reference node '%s' has no valid file " + "path set: %s" % (reference_node, ref_file)) + # TODO: This should allow to pass and fail in Validator instead + raise RuntimeError("Reference node must be a full file path!") resources.append(item) return resources - def get_sequence(self, filename, pattern="%04d"): - """Get sequence from filename + def search_textures(self, filepath): + """Search all texture files on disk. + + This also parses to full sequences for those with dynamic patterns + like and %04d in the filename. + + Args: + filepath (str): The full path to the file, including any + dynamic patterns like or %04d + + Returns: + list: The files found on disk + + """ + filename = os.path.basename(filepath) + + # Collect full sequence if it matches a sequence pattern + if len(filename.split(".")) > 2: + + # For UDIM based textures (tiles) + if "" in filename: + sequences = self.get_sequence(filepath, + pattern="") + if sequences: + return sequences + + # Frame/time - Based textures (animated masks f.e) + elif "%04d" in filename: + sequences = self.get_sequence(filepath, + pattern="%04d") + if sequences: + return sequences + + # Assuming it is a fixed name (single file) + if os.path.exists(filepath): + return [filepath] + + return [] + + def get_sequence(self, filepath, pattern="%04d"): + """Get sequence from filename. + + This will only return files if they exist on disk as it tries + to collect the sequence using the filename pattern and searching + for them on disk. Supports negative frame ranges like -001, 0000, 0001 and -0001, 0000, 0001. Arguments: - filename (str): The full path to filename containing the given + filepath (str): The full path to filename containing the given pattern. pattern (str): The pattern to swap with the variable frame number. @@ -183,10 +268,10 @@ class CollectYetiRig(pyblish.api.InstancePlugin): """ from avalon.vendor import clique - escaped = re.escape(filename) + escaped = re.escape(filepath) re_pattern = escaped.replace(pattern, "-?[0-9]+") - source_dir = os.path.dirname(filename) + source_dir = os.path.dirname(filepath) files = [f for f in os.listdir(source_dir) if re.match(re_pattern, f)] diff --git a/pype/plugins/maya/publish/extract_animation.py b/pype/plugins/maya/publish/extract_animation.py index 9bee6d65c0..24d06ac182 100644 --- a/pype/plugins/maya/publish/extract_animation.py +++ b/pype/plugins/maya/publish/extract_animation.py @@ -27,14 +27,12 @@ class ExtractAnimation(pype.api.Extractor): raise RuntimeError("Couldn't find exactly one out_SET: " "{0}".format(out_sets)) out_set = out_sets[0] - nodes = cmds.sets(out_set, query=True) - - self.log.info('nodes to export: {}'.format(str(nodes))) + roots = cmds.sets(out_set, query=True) # Include all descendants - # nodes += cmds.listRelatives(nodes, - # allDescendents=True, - # fullPath=True) or [] + nodes = roots + cmds.listRelatives(roots, + allDescendents=True, + fullPath=True) or [] # Collect the start and end including handles start = instance.data["startFrame"] @@ -57,21 +55,27 @@ class ExtractAnimation(pype.api.Extractor): "writeVisibility": True, "writeCreases": True, "uvWrite": True, - "selection": False, - "root": nodes + "selection": True, + "worldSpace": instance.data.get("worldSpace", True) } + if not instance.data.get("includeParentHierarchy", True): + # Set the root nodes if we don't want to include parents + # The roots are to be considered the ones that are the actual + # direct members of the set + options["root"] = roots + if int(cmds.about(version=True)) >= 2017: # Since Maya 2017 alembic supports multiple uv sets - write them. options["writeUVSets"] = True with avalon.maya.suspended_refresh(): - # with avalon.maya.maintained_selection(): - cmds.select(nodes, noExpand=True) - extract_alembic(file=path, - startFrame=start, - endFrame=end, - **options) + with avalon.maya.maintained_selection(): + cmds.select(nodes, noExpand=True) + extract_alembic(file=path, + startFrame=start, + endFrame=end, + **options) if "files" not in instance.data: instance.data["files"] = list() diff --git a/pype/plugins/maya/publish/extract_camera_alembic.py b/pype/plugins/maya/publish/extract_camera_alembic.py index 1c88a8c676..90f4954823 100644 --- a/pype/plugins/maya/publish/extract_camera_alembic.py +++ b/pype/plugins/maya/publish/extract_camera_alembic.py @@ -67,7 +67,7 @@ class ExtractCameraAlembic(pype.api.Extractor): job_str += ' -file "{0}"'.format(path) with lib.evaluation("off"): - with lib.no_refresh(): + with avalon.maya.suspended_refresh(): cmds.AbcExport(j=job_str, verbose=False) if "files" not in instance.data: diff --git a/pype/plugins/maya/publish/extract_camera_mayaAscii.py b/pype/plugins/maya/publish/extract_camera_mayaAscii.py index 0cb846c551..4a21e7bf58 100644 --- a/pype/plugins/maya/publish/extract_camera_mayaAscii.py +++ b/pype/plugins/maya/publish/extract_camera_mayaAscii.py @@ -127,7 +127,7 @@ class ExtractCameraMayaAscii(pype.api.Extractor): self.log.info("Performing camera bakes for: {0}".format(transform)) with avalon.maya.maintained_selection(): with lib.evaluation("off"): - with lib.no_refresh(): + with avalon.maya.suspended_refresh(): baked = lib.bake_to_world_space( transform, frame_range=range_with_handles, diff --git a/pype/plugins/maya/publish/extract_fbx.py b/pype/plugins/maya/publish/extract_fbx.py new file mode 100644 index 0000000000..48dd5a135c --- /dev/null +++ b/pype/plugins/maya/publish/extract_fbx.py @@ -0,0 +1,216 @@ +import os + +from maya import cmds +import maya.mel as mel + +import pyblish.api +import avalon.maya + +import pype.api + + +class ExtractFBX(pype.api.Extractor): + """Extract FBX from Maya. + + This extracts reproducible FBX exports ignoring any of the settings set + on the local machine in the FBX export options window. + + All export settings are applied with the `FBXExport*` commands prior + to the `FBXExport` call itself. The options can be overridden with their + nice names as seen in the "options" property on this class. + + For more information on FBX exports see: + - https://knowledge.autodesk.com/support/maya/learn-explore/caas + /CloudHelp/cloudhelp/2016/ENU/Maya/files/GUID-6CCE943A-2ED4-4CEE-96D4 + -9CB19C28F4E0-htm.html + - http://forums.cgsociety.org/archive/index.php?t-1032853.html + - https://groups.google.com/forum/#!msg/python_inside_maya/cLkaSo361oE + /LKs9hakE28kJ + + """ + + order = pyblish.api.ExtractorOrder + label = "Extract FBX" + families = ["fbx"] + + @property + def options(self): + """Overridable options for FBX Export + + Given in the following format + - {NAME: EXPECTED TYPE} + + If the overridden option's type does not match, + the option is not included and a warning is logged. + + """ + + return { + "cameras": bool, + "smoothingGroups": bool, + "hardEdges": bool, + "tangents": bool, + "smoothMesh": bool, + "instances": bool, + # "referencedContainersContent": bool, # deprecated in Maya 2016+ + "bakeComplexAnimation": int, + "bakeComplexStart": int, + "bakeComplexEnd": int, + "bakeComplexStep": int, + "bakeResampleAnimation": bool, + "animationOnly": bool, + "useSceneName": bool, + "quaternion": str, # "euler" + "shapes": bool, + "skins": bool, + "constraints": bool, + "lights": bool, + "embeddedTextures": bool, + "inputConnections": bool, + "upAxis": str, # x, y or z, + "triangulate": bool + } + + @property + def default_options(self): + """The default options for FBX extraction. + + This includes shapes, skins, constraints, lights and incoming + connections and exports with the Y-axis as up-axis. + + By default this uses the time sliders start and end time. + + """ + + start_frame = int(cmds.playbackOptions(query=True, + animationStartTime=True)) + end_frame = int(cmds.playbackOptions(query=True, + animationEndTime=True)) + + return { + "cameras": False, + "smoothingGroups": False, + "hardEdges": False, + "tangents": False, + "smoothMesh": False, + "instances": False, + "bakeComplexAnimation": True, + "bakeComplexStart": start_frame, + "bakeComplexEnd": end_frame, + "bakeComplexStep": 1, + "bakeResampleAnimation": True, + "animationOnly": False, + "useSceneName": False, + "quaternion": "euler", + "shapes": True, + "skins": True, + "constraints": False, + "lights": True, + "embeddedTextures": True, + "inputConnections": True, + "upAxis": "y", + "triangulate": False + } + + def parse_overrides(self, instance, options): + """Inspect data of instance to determine overridden options + + An instance may supply any of the overridable options + as data, the option is then added to the extraction. + + """ + + for key in instance.data: + if key not in self.options: + continue + + # Ensure the data is of correct type + value = instance.data[key] + if not isinstance(value, self.options[key]): + self.log.warning( + "Overridden attribute {key} was of " + "the wrong type: {invalid_type} " + "- should have been {valid_type}".format( + key=key, + invalid_type=type(value).__name__, + valid_type=self.options[key].__name__)) + continue + + options[key] = value + + return options + + def process(self, instance): + + # Ensure FBX plug-in is loaded + cmds.loadPlugin("fbxmaya", quiet=True) + + # Define output path + directory = self.staging_dir(instance) + filename = "{0}.fbx".format(instance.name) + path = os.path.join(directory, filename) + + # The export requires forward slashes because we need + # to format it into a string in a mel expression + path = path.replace('\\', '/') + + self.log.info("Extracting FBX to: {0}".format(path)) + + members = instance.data["setMembers"] + self.log.info("Members: {0}".format(members)) + self.log.info("Instance: {0}".format(instance[:])) + + # Parse export options + options = self.default_options + options = self.parse_overrides(instance, options) + self.log.info("Export options: {0}".format(options)) + + # Collect the start and end including handles + start = instance.data["startFrame"] + end = instance.data["endFrame"] + handles = instance.data.get("handles", 0) + if handles: + start -= handles + end += handles + + options['bakeComplexStart'] = start + options['bakeComplexEnd'] = end + + # First apply the default export settings to be fully consistent + # each time for successive publishes + mel.eval("FBXResetExport") + + # Apply the FBX overrides through MEL since the commands + # only work correctly in MEL according to online + # available discussions on the topic + for option, value in options.iteritems(): + key = option[0].upper() + option[1:] # uppercase first letter + + # Boolean must be passed as lower-case strings + # as to MEL standards + if isinstance(value, bool): + value = str(value).lower() + + template = "FBXExport{0} -v {1}" + if key == "UpAxis": + template = "FBXExport{0} {1}" + + cmd = template.format(key, value) + self.log.info(cmd) + mel.eval(cmd) + + # Never show the UI or generate a log + mel.eval("FBXExportShowUI -v false") + mel.eval("FBXExportGenerateLog -v false") + + # Export + with avalon.maya.maintained_selection(): + cmds.select(members, r=1, noExpand=True) + mel.eval('FBXExport -f "{}" -s'.format(path)) + + if "files" not in instance.data: + instance.data["files"] = list() + + instance.data["files"].append(filename) + + self.log.info("Extract FBX successful to: {0}".format(path)) diff --git a/pype/plugins/maya/publish/extract_look.py b/pype/plugins/maya/publish/extract_look.py index bf8dd92568..a30b1fe7d5 100644 --- a/pype/plugins/maya/publish/extract_look.py +++ b/pype/plugins/maya/publish/extract_look.py @@ -1,5 +1,7 @@ import os import json +import tempfile +import contextlib from collections import OrderedDict from maya import cmds @@ -11,6 +13,38 @@ import pype.api import pype.maya.lib as lib +@contextlib.contextmanager +def no_workspace_dir(): + """Force maya to a fake temporary workspace directory. + + Note: This is not maya.cmds.workspace 'rootDirectory' but the 'directory' + + This helps to avoid Maya automatically remapping image paths to files + relative to the currently set directory. + + """ + + # Store current workspace + original = cmds.workspace(query=True, directory=True) + + # Set a fake workspace + fake_workspace_dir = tempfile.mkdtemp() + cmds.workspace(directory=fake_workspace_dir) + + try: + yield + finally: + try: + cmds.workspace(directory=original) + except RuntimeError: + # If the original workspace directory didn't exist either + # ignore the fact that it fails to reset it to the old path + pass + + # Remove the temporary directory + os.rmdir(fake_workspace_dir) + + class ExtractLook(pype.api.Extractor): """Extract Look (Maya Ascii + JSON) @@ -47,10 +81,6 @@ class ExtractLook(pype.api.Extractor): resources = instance.data["resources"] - frame = cmds.currentTime(query=True) - instance.data['startFrame'] = frame - instance.data['endFrame'] = frame - remap = OrderedDict() # needs to be ordered, see color space values for resource in resources: attr = resource['attribute'] @@ -69,18 +99,23 @@ class ExtractLook(pype.api.Extractor): with lib.renderlayer(layer): # TODO: Ensure membership edits don't become renderlayer overrides with lib.empty_sets(sets, force=True): - with lib.attribute_values(remap): - with avalon.maya.maintained_selection(): - cmds.select(sets, noExpand=True) - cmds.file(maya_path, - force=True, - typ="mayaAscii", - exportSelected=True, - preserveReferences=False, - channels=True, - constraints=True, - expressions=True, - constructionHistory=True) + # To avoid Maya trying to automatically remap the file + # textures relative to the `workspace -directory` we force + # it to a fake temporary workspace. This fixes textures + # getting incorrectly remapped. (LKD-17, PLN-101) + with no_workspace_dir(): + with lib.attribute_values(remap): + with avalon.maya.maintained_selection(): + cmds.select(sets, noExpand=True) + cmds.file(maya_path, + force=True, + typ="mayaAscii", + exportSelected=True, + preserveReferences=False, + channels=True, + constraints=True, + expressions=True, + constructionHistory=True) # Write the JSON data self.log.info("Extract json..") diff --git a/pype/plugins/maya/publish/extract_pointcache.py b/pype/plugins/maya/publish/extract_pointcache.py index 739ff62ec6..65982febd8 100644 --- a/pype/plugins/maya/publish/extract_pointcache.py +++ b/pype/plugins/maya/publish/extract_pointcache.py @@ -32,6 +32,13 @@ class ExtractAlembic(pype.api.Extractor): start -= handles end += handles + attrs = instance.data.get("attr", "").split(";") + attrs = [value for value in attrs if value.strip()] + attrs += ["cbId"] + + attr_prefixes = instance.data.get("attrPrefix", "").split(";") + attr_prefixes = [value for value in attr_prefixes if value.strip()] + # Get extra export arguments writeColorSets = instance.data.get("writeColorSets", False) @@ -44,14 +51,22 @@ class ExtractAlembic(pype.api.Extractor): options = { "step": instance.data.get("step", 1.0), - "attr": ["cbId"], + "attr": attrs, + "attrPrefix": attr_prefixes, "writeVisibility": True, "writeCreases": True, "writeColorSets": writeColorSets, "uvWrite": True, - "selection": True + "selection": True, + "worldSpace": instance.data.get("worldSpace", True) } + if not instance.data.get("includeParentHierarchy", True): + # Set the root nodes if we don't want to include parents + # The roots are to be considered the ones that are the actual + # direct members of the set + options["root"] = instance.data.get("setMembers") + if int(cmds.about(version=True)) >= 2017: # Since Maya 2017 alembic supports multiple uv sets - write them. options["writeUVSets"] = True diff --git a/pype/plugins/maya/publish/extract_setdress.py b/pype/plugins/maya/publish/extract_setdress.py index e2950d1151..c4d613dc61 100644 --- a/pype/plugins/maya/publish/extract_setdress.py +++ b/pype/plugins/maya/publish/extract_setdress.py @@ -33,7 +33,7 @@ class ExtractSetDress(pype.api.Extractor): json.dump(instance.data["scenedata"], filepath, ensure_ascii=False) self.log.info("Extracting point cache ..") - cmds.select(instance.data["\\"]) + cmds.select(instance.data["hierarchy"]) # Run basic alembic exporter extract_alembic(file=hierarchy_path, diff --git a/pype/plugins/maya/publish/increment_current_file_deadline.py b/pype/plugins/maya/publish/increment_current_file_deadline.py index 21d4b2fc7a..527f3d781d 100644 --- a/pype/plugins/maya/publish/increment_current_file_deadline.py +++ b/pype/plugins/maya/publish/increment_current_file_deadline.py @@ -11,7 +11,8 @@ class IncrementCurrentFileDeadline(pyblish.api.ContextPlugin): label = "Increment current file" order = pyblish.api.IntegratorOrder + 9.0 hosts = ["maya"] - families = ["renderlayer"] + families = ["renderlayer", + "vrayscene"] optional = True def process(self, context): diff --git a/pype/plugins/maya/publish/submit_maya_deadline.py b/pype/plugins/maya/publish/submit_maya_deadline.py new file mode 100644 index 0000000000..6a6cabdf93 --- /dev/null +++ b/pype/plugins/maya/publish/submit_maya_deadline.py @@ -0,0 +1,264 @@ +import os +import json +import getpass + +from maya import cmds + +from avalon import api +from avalon.vendor import requests + +import pyblish.api + +import pype.maya.lib as lib + + +def get_renderer_variables(renderlayer=None): + """Retrieve the extension which has been set in the VRay settings + + Will return None if the current renderer is not VRay + For Maya 2016.5 and up the renderSetup creates renderSetupLayer node which + start with `rs`. Use the actual node name, do NOT use the `nice name` + + Args: + renderlayer (str): the node name of the renderlayer. + + Returns: + dict + """ + + renderer = lib.get_renderer(renderlayer or lib.get_current_renderlayer()) + render_attrs = lib.RENDER_ATTRS.get(renderer, lib.RENDER_ATTRS["default"]) + + padding = cmds.getAttr("{}.{}".format(render_attrs["node"], + render_attrs["padding"])) + + filename_0 = cmds.renderSettings(fullPath=True, firstImageName=True)[0] + + if renderer == "vray": + # Maya's renderSettings function does not return V-Ray file extension + # so we get the extension from vraySettings + extension = cmds.getAttr("vraySettings.imageFormatStr") + + # When V-Ray image format has not been switched once from default .png + # the getAttr command above returns None. As such we explicitly set + # it to `.png` + if extension is None: + extension = "png" + + filename_prefix = "/_/" + else: + # Get the extension, getAttr defaultRenderGlobals.imageFormat + # returns an index number. + filename_base = os.path.basename(filename_0) + extension = os.path.splitext(filename_base)[-1].strip(".") + filename_prefix = "/_/" + + return {"ext": extension, + "filename_prefix": filename_prefix, + "padding": padding, + "filename_0": filename_0} + + +def preview_fname(folder, scene, layer, padding, ext): + """Return output file path with #### for padding. + + Deadline requires the path to be formatted with # in place of numbers. + For example `/path/to/render.####.png` + + Args: + folder (str): The root output folder (image path) + scene (str): The scene name + layer (str): The layer name to be rendered + padding (int): The padding length + ext(str): The output file extension + + Returns: + str + + """ + + # Following hardcoded "/_/" + output = "{scene}/{scene}_{layer}/{layer}.{number}.{ext}".format( + scene=scene, + layer=layer, + number="#" * padding, + ext=ext + ) + + return os.path.join(folder, output) + + +class MayaSubmitDeadline(pyblish.api.InstancePlugin): + """Submit available render layers to Deadline + + Renders are submitted to a Deadline Web Service as + supplied via the environment variable AVALON_DEADLINE + + """ + + label = "Submit to Deadline" + order = pyblish.api.IntegratorOrder + hosts = ["maya"] + families = ["renderlayer"] + + def process(self, instance): + + AVALON_DEADLINE = api.Session.get("AVALON_DEADLINE", + "http://localhost:8082") + assert AVALON_DEADLINE, "Requires AVALON_DEADLINE" + + context = instance.context + workspace = context.data["workspaceDir"] + filepath = context.data["currentFile"] + filename = os.path.basename(filepath) + comment = context.data.get("comment", "") + scene = os.path.splitext(filename)[0] + dirname = os.path.join(workspace, "renders") + renderlayer = instance.data['setMembers'] # rs_beauty + renderlayer_name = instance.data['subset'] # beauty + renderlayer_globals = instance.data["renderGlobals"] + legacy_layers = renderlayer_globals["UseLegacyRenderLayers"] + deadline_user = context.data.get("deadlineUser", getpass.getuser()) + jobname = "%s - %s" % (filename, instance.name) + + # Get the variables depending on the renderer + render_variables = get_renderer_variables(renderlayer) + output_filename_0 = preview_fname(folder=dirname, + scene=scene, + layer=renderlayer_name, + padding=render_variables["padding"], + ext=render_variables["ext"]) + + try: + # Ensure render folder exists + os.makedirs(dirname) + except OSError: + pass + + # Documentation for keys available at: + # https://docs.thinkboxsoftware.com + # /products/deadline/8.0/1_User%20Manual/manual + # /manual-submission.html#job-info-file-options + payload = { + "JobInfo": { + # Top-level group name + "BatchName": filename, + + # Job name, as seen in Monitor + "Name": jobname, + + # Arbitrary username, for visualisation in Monitor + "UserName": deadline_user, + + "Plugin": instance.data.get("mayaRenderPlugin", "MayaBatch"), + "Frames": "{start}-{end}x{step}".format( + start=int(instance.data["startFrame"]), + end=int(instance.data["endFrame"]), + step=int(instance.data["byFrameStep"]), + ), + + "Comment": comment, + + # Optional, enable double-click to preview rendered + # frames from Deadline Monitor + "OutputFilename0": output_filename_0.replace("\\", "/"), + }, + "PluginInfo": { + # Input + "SceneFile": filepath, + + # Output directory and filename + "OutputFilePath": dirname.replace("\\", "/"), + "OutputFilePrefix": render_variables["filename_prefix"], + + # Mandatory for Deadline + "Version": cmds.about(version=True), + + # Only render layers are considered renderable in this pipeline + "UsingRenderLayers": True, + + # Use legacy Render Layer system + "UseLegacyRenderLayers": legacy_layers, + + # Render only this layer + "RenderLayer": renderlayer, + + # Determine which renderer to use from the file itself + "Renderer": instance.data["renderer"], + + # Resolve relative references + "ProjectPath": workspace, + }, + + # Mandatory for Deadline, may be empty + "AuxFiles": [] + } + + # Include critical environment variables with submission + keys = [ + # This will trigger `userSetup.py` on the slave + # such that proper initialisation happens the same + # way as it does on a local machine. + # TODO(marcus): This won't work if the slaves don't + # have accesss to these paths, such as if slaves are + # running Linux and the submitter is on Windows. + "PYTHONPATH", + + # todo: This is a temporary fix for yeti variables + "PEREGRINEL_LICENSE", + "REDSHIFT_MAYAEXTENSIONSPATH", + "REDSHIFT_DISABLEOUTPUTLOCKFILES", + "VRAY_FOR_MAYA2018_PLUGINS", + "VRAY_PLUGINS", + "VRAY_USE_THREAD_AFFINITY", + "MAYA_MODULE_PATH" + ] + environment = dict({key: os.environ[key] for key in keys + if key in os.environ}, **api.Session) + + PATHS = os.environ["PATH"].split(";") + environment["PATH"] = ";".join([p for p in PATHS + if p.startswith("P:")]) + + payload["JobInfo"].update({ + "EnvironmentKeyValue%d" % index: "{key}={value}".format( + key=key, + value=environment[key] + ) for index, key in enumerate(environment) + }) + + # Include optional render globals + render_globals = instance.data.get("renderGlobals", {}) + payload["JobInfo"].update(render_globals) + + plugin = payload["JobInfo"]["Plugin"] + self.log.info("using render plugin : {}".format(plugin)) + + self.preflight_check(instance) + + self.log.info("Submitting..") + self.log.info(json.dumps(payload, indent=4, sort_keys=True)) + + # E.g. http://192.168.0.1:8082/api/jobs + url = "{}/api/jobs".format(AVALON_DEADLINE) + response = requests.post(url, json=payload) + if not response.ok: + raise Exception(response.text) + + # Store output dir for unified publisher (filesequence) + instance.data["outputDir"] = os.path.dirname(output_filename_0) + instance.data["deadlineSubmissionJob"] = response.json() + + def preflight_check(self, instance): + """Ensure the startFrame, endFrame and byFrameStep are integers""" + + for key in ("startFrame", "endFrame", "byFrameStep"): + value = instance.data[key] + + if int(value) == value: + continue + + self.log.warning( + "%f=%d was rounded off to nearest integer" + % (value, int(value)) + ) diff --git a/pype/plugins/maya/publish/submit_vray_deadline.py b/pype/plugins/maya/publish/submit_vray_deadline.py new file mode 100644 index 0000000000..d43e92c6d8 --- /dev/null +++ b/pype/plugins/maya/publish/submit_vray_deadline.py @@ -0,0 +1,274 @@ +import getpass +import json +import os +from copy import deepcopy + +import pyblish.api + +from avalon import api +from avalon.vendor import requests + +from maya import cmds + + +class VraySubmitDeadline(pyblish.api.InstancePlugin): + """Export the scene to `.vrscene` files per frame per render layer + + vrscene files will be written out based on the following template: + /vrayscene//_/ + + A dependency job will be added for each layer to render the framer + through VRay Standalone + + """ + label = "Submit to Deadline ( vrscene )" + order = pyblish.api.IntegratorOrder + hosts = ["maya"] + families = ["vrayscene"] + + def process(self, instance): + + AVALON_DEADLINE = api.Session.get("AVALON_DEADLINE", + "http://localhost:8082") + assert AVALON_DEADLINE, "Requires AVALON_DEADLINE" + + context = instance.context + + deadline_url = "{}/api/jobs".format(AVALON_DEADLINE) + deadline_user = context.data.get("deadlineUser", getpass.getuser()) + + filepath = context.data["currentFile"] + filename = os.path.basename(filepath) + task_name = "{} - {}".format(filename, instance.name) + + batch_name = "{} - (vrscene)".format(filename) + + # Get the output template for vrscenes + vrscene_output = instance.data["vrsceneOutput"] + + # This is also the input file for the render job + first_file = self.format_output_filename(instance, + filename, + vrscene_output) + + start_frame = int(instance.data["startFrame"]) + end_frame = int(instance.data["endFrame"]) + + # Primary job + self.log.info("Submitting export job ..") + + payload = { + "JobInfo": { + # Top-level group name + "BatchName": batch_name, + + # Job name, as seen in Monitor + "Name": "Export {} [{}-{}]".format(task_name, + start_frame, + end_frame), + + # Arbitrary username, for visualisation in Monitor + "UserName": deadline_user, + + "Plugin": "MayaBatch", + "Frames": "{}-{}".format(start_frame, end_frame), + "FramesPerTask": instance.data.get("framesPerTask", 1), + + "Comment": context.data.get("comment", ""), + + "OutputFilename0": os.path.dirname(first_file), + }, + "PluginInfo": { + + # Renderer + "Renderer": "vray", + + # Mandatory for Deadline + "Version": cmds.about(version=True), + + # Input + "SceneFile": filepath, + + "SkipExistingFrames": True, + + "UsingRenderLayers": True, + + "UseLegacyRenderLayers": True + }, + + # Mandatory for Deadline, may be empty + "AuxFiles": [] + } + + environment = dict(AVALON_TOOLS="global;python36;maya2018") + environment.update(api.Session.copy()) + + jobinfo_environment = self.build_jobinfo_environment(environment) + + payload["JobInfo"].update(jobinfo_environment) + + self.log.info("Job Data:\n{}".format(json.dumps(payload))) + + response = requests.post(url=deadline_url, json=payload) + if not response.ok: + raise RuntimeError(response.text) + + # Secondary job + # Store job to create dependency chain + dependency = response.json() + + if instance.data["suspendRenderJob"]: + self.log.info("Skipping render job and publish job") + return + + self.log.info("Submitting render job ..") + + start_frame = int(instance.data["startFrame"]) + end_frame = int(instance.data["endFrame"]) + ext = instance.data.get("ext", "exr") + + # Create output directory for renders + render_ouput = self.format_output_filename(instance, + filename, + instance.data["outputDir"], + dir=True) + + self.log.info("Render output: %s" % render_ouput) + + # Update output dir + instance.data["outputDir"] = render_ouput + + # Format output file name + sequence_filename = ".".join([instance.name, ext]) + output_filename = os.path.join(render_ouput, sequence_filename) + + # Ensure folder exists: + if not os.path.exists(render_ouput): + os.makedirs(render_ouput) + + payload_b = { + "JobInfo": { + + "JobDependency0": dependency["_id"], + "BatchName": batch_name, + "Name": "Render {} [{}-{}]".format(task_name, + start_frame, + end_frame), + "UserName": deadline_user, + + "Frames": "{}-{}".format(start_frame, end_frame), + + "Plugin": "Vray", + "OverrideTaskExtraInfoNames": False, + + "OutputFilename0": render_ouput, + }, + "PluginInfo": { + + "InputFilename": first_file, + "OutputFilename": output_filename, + "SeparateFilesPerFrame": True, + "VRayEngine": "V-Ray", + + "Width": instance.data["resolution"][0], + "Height": instance.data["resolution"][1], + + }, + "AuxFiles": [], + } + + # Add vray renderslave to environment + tools = environment["AVALON_TOOLS"] + ";vrayrenderslave" + environment_b = deepcopy(environment) + environment_b["AVALON_TOOLS"] = tools + + jobinfo_environment_b = self.build_jobinfo_environment(environment_b) + payload_b["JobInfo"].update(jobinfo_environment_b) + + self.log.info(json.dumps(payload_b)) + + # Post job to deadline + response_b = requests.post(url=deadline_url, json=payload_b) + if not response_b.ok: + raise RuntimeError(response_b.text) + + # Add job for publish job + if not instance.data.get("suspendPublishJob", False): + instance.data["deadlineSubmissionJob"] = response_b.json() + + def build_command(self, instance): + """Create command for Render.exe to export vray scene + + Args: + instance + + Returns: + str + + """ + + cmd = ('-r vray -proj {project} -cam {cam} -noRender -s {startFrame} ' + '-e {endFrame} -rl {layer} -exportFramesSeparate') + + # Get the camera + cammera = instance.data["cameras"][0] + + return cmd.format(project=instance.context.data["workspaceDir"], + cam=cammera, + startFrame=instance.data["startFrame"], + endFrame=instance.data["endFrame"], + layer=instance.name) + + def build_jobinfo_environment(self, env): + """Format environment keys and values to match Deadline rquirements + + Args: + env(dict): environment dictionary + + Returns: + dict + + """ + return {"EnvironmentKeyValue%d" % index: "%s=%s" % (k, env[k]) + for index, k in enumerate(env)} + + def format_output_filename(self, instance, filename, template, dir=False): + """Format the expected output file of the Export job + + Example: + /_/ + "shot010_v006/shot010_v006_CHARS/CHARS" + + Args: + instance: + filename(str): + dir(bool): + + Returns: + str + + """ + + def smart_replace(string, key_values): + new_string = string + for key, value in key_values.items(): + new_string = new_string.replace(key, value) + return new_string + + # Ensure filename has no extension + file_name, _ = os.path.splitext(filename) + + # Reformat without tokens + output_path = smart_replace(template, + {"": file_name, + "": instance.name}) + + if dir: + return output_path.replace("\\", "/") + + start_frame = int(instance.data["startFrame"]) + filename_zero = "{}_{:04d}.vrscene".format(output_path, start_frame) + + result = filename_zero.replace("\\", "/") + + return result diff --git a/pype/plugins/maya/publish/validate_animation_out_set_related_node_ids.py b/pype/plugins/maya/publish/validate_animation_out_set_related_node_ids.py index c45f9b51e5..d41b13252f 100644 --- a/pype/plugins/maya/publish/validate_animation_out_set_related_node_ids.py +++ b/pype/plugins/maya/publish/validate_animation_out_set_related_node_ids.py @@ -17,7 +17,7 @@ class ValidateOutRelatedNodeIds(pyblish.api.InstancePlugin): """ order = pype.api.ValidateContentsOrder - families = ["animation", "pointcache"] + families = ['animation', "pointcache"] hosts = ['maya'] label = 'Animation Out Set Related Node Ids' actions = [pype.maya.action.SelectInvalidAction, pype.api.RepairAction] diff --git a/pype/plugins/maya/publish/validate_camera_attributes.py b/pype/plugins/maya/publish/validate_camera_attributes.py index 6e9b4b3060..8223d87c5d 100644 --- a/pype/plugins/maya/publish/validate_camera_attributes.py +++ b/pype/plugins/maya/publish/validate_camera_attributes.py @@ -15,7 +15,7 @@ class ValidateCameraAttributes(pyblish.api.InstancePlugin): """ order = pype.api.ValidateContentsOrder - families = ["camera"] + families = ['camera'] hosts = ['maya'] label = 'Camera Attributes' actions = [pype.maya.action.SelectInvalidAction] diff --git a/pype/plugins/maya/publish/validate_camera_contents.py b/pype/plugins/maya/publish/validate_camera_contents.py index a8f8bcc2fd..f3e2a6c121 100644 --- a/pype/plugins/maya/publish/validate_camera_contents.py +++ b/pype/plugins/maya/publish/validate_camera_contents.py @@ -16,7 +16,7 @@ class ValidateCameraContents(pyblish.api.InstancePlugin): """ order = pype.api.ValidateContentsOrder - families = ["camera"] + families = ['camera'] hosts = ['maya'] label = 'Camera Contents' actions = [pype.maya.action.SelectInvalidAction] diff --git a/pype/plugins/maya/publish/validate_current_renderlayer_renderable.py b/pype/plugins/maya/publish/validate_current_renderlayer_renderable.py index e927288548..aa6ffa555c 100644 --- a/pype/plugins/maya/publish/validate_current_renderlayer_renderable.py +++ b/pype/plugins/maya/publish/validate_current_renderlayer_renderable.py @@ -1,6 +1,7 @@ import pyblish.api from maya import cmds +from pype.plugin import contextplugin_should_run class ValidateCurrentRenderLayerIsRenderable(pyblish.api.ContextPlugin): @@ -20,7 +21,12 @@ class ValidateCurrentRenderLayerIsRenderable(pyblish.api.ContextPlugin): hosts = ["maya"] families = ["renderlayer"] - def process(self, instance): + def process(self, context): + + # Workaround bug pyblish-base#250 + if not contextplugin_should_run(self, context): + return + layer = cmds.editRenderLayerGlobals(query=True, currentRenderLayer=True) cameras = cmds.ls(type="camera", long=True) renderable = any(c for c in cameras if cmds.getAttr(c + ".renderable")) diff --git a/pype/plugins/maya/publish/validate_deadline_connection.py b/pype/plugins/maya/publish/validate_deadline_connection.py index d0f6e9e54d..9052088d58 100644 --- a/pype/plugins/maya/publish/validate_deadline_connection.py +++ b/pype/plugins/maya/publish/validate_deadline_connection.py @@ -1,8 +1,8 @@ import pyblish.api -import os import avalon.api as api from avalon.vendor import requests +from pype.plugin import contextplugin_should_run class ValidateDeadlineConnection(pyblish.api.ContextPlugin): @@ -13,12 +13,11 @@ class ValidateDeadlineConnection(pyblish.api.ContextPlugin): hosts = ["maya"] families = ["renderlayer"] - def process(self, instance): + def process(self, context): - # AVALON_DEADLINE = api.Session.get("AVALON_DEADLINE", - # "http://localhost:8082") - # - # assert AVALON_DEADLINE is not None, "Requires AVALON_DEADLINE" + # Workaround bug pyblish-base#250 + if not contextplugin_should_run(self, context): + return try: deadline_url = os.environ["DEADLINE_REST_URL"] @@ -26,7 +25,7 @@ class ValidateDeadlineConnection(pyblish.api.ContextPlugin): self.log.error("Deadline REST API url not found.") # Check response - response = requests.get(deadline_url) + response = requests.get(AVALON_DEADLINE) assert response.ok, "Response must be ok" assert response.text.startswith("Deadline Web Service "), ( "Web service did not respond with 'Deadline Web Service'" diff --git a/pype/plugins/maya/publish/validate_instancer_content.py b/pype/plugins/maya/publish/validate_instancer_content.py index 88566b7e1d..68ee17bca6 100644 --- a/pype/plugins/maya/publish/validate_instancer_content.py +++ b/pype/plugins/maya/publish/validate_instancer_content.py @@ -12,7 +12,7 @@ class ValidateInstancerContent(pyblish.api.InstancePlugin): """ order = pyblish.api.ValidatorOrder label = 'Instancer Content' - families = ["instancer"] + families = ['instancer'] def process(self, instance): diff --git a/pype/plugins/maya/publish/validate_instancer_frame_ranges.py b/pype/plugins/maya/publish/validate_instancer_frame_ranges.py index 6931c59e48..f31014e6e9 100644 --- a/pype/plugins/maya/publish/validate_instancer_frame_ranges.py +++ b/pype/plugins/maya/publish/validate_instancer_frame_ranges.py @@ -44,7 +44,7 @@ class ValidateInstancerFrameRanges(pyblish.api.InstancePlugin): """ order = pyblish.api.ValidatorOrder label = 'Instancer Cache Frame Ranges' - families = ["instancer"] + families = ['instancer'] @classmethod def get_invalid(cls, instance): diff --git a/pype/plugins/maya/publish/validate_joints_hidden.py b/pype/plugins/maya/publish/validate_joints_hidden.py index acc1dd07a7..4b20060602 100644 --- a/pype/plugins/maya/publish/validate_joints_hidden.py +++ b/pype/plugins/maya/publish/validate_joints_hidden.py @@ -19,7 +19,7 @@ class ValidateJointsHidden(pyblish.api.InstancePlugin): order = pype.api.ValidateContentsOrder hosts = ['maya'] - families = ["rig"] + families = ['rig'] category = 'rig' version = (0, 1, 0) label = "Joints Hidden" diff --git a/pype/plugins/maya/publish/validate_look_contents.py b/pype/plugins/maya/publish/validate_look_contents.py index eba6aef150..d9cd2c92b1 100644 --- a/pype/plugins/maya/publish/validate_look_contents.py +++ b/pype/plugins/maya/publish/validate_look_contents.py @@ -18,7 +18,7 @@ class ValidateLookContents(pyblish.api.InstancePlugin): """ order = pype.api.ValidateContentsOrder - families = ["look"] + families = ['look'] hosts = ['maya'] label = 'Look Data Contents' actions = [pype.maya.action.SelectInvalidAction] diff --git a/pype/plugins/maya/publish/validate_look_default_shaders_connections.py b/pype/plugins/maya/publish/validate_look_default_shaders_connections.py index 6b8781eab0..af355e178b 100644 --- a/pype/plugins/maya/publish/validate_look_default_shaders_connections.py +++ b/pype/plugins/maya/publish/validate_look_default_shaders_connections.py @@ -17,7 +17,7 @@ class ValidateLookDefaultShadersConnections(pyblish.api.InstancePlugin): """ order = pype.api.ValidateContentsOrder - families = ["look"] + families = ['look'] hosts = ['maya'] label = 'Look Default Shader Connections' diff --git a/pype/plugins/maya/publish/validate_look_id_reference_edits.py b/pype/plugins/maya/publish/validate_look_id_reference_edits.py index e1eac26577..6a28daa7dc 100644 --- a/pype/plugins/maya/publish/validate_look_id_reference_edits.py +++ b/pype/plugins/maya/publish/validate_look_id_reference_edits.py @@ -17,7 +17,7 @@ class ValidateLookIdReferenceEdits(pyblish.api.InstancePlugin): """ order = pype.api.ValidateContentsOrder - families = ["look"] + families = ['look'] hosts = ['maya'] label = 'Look Id Reference Edits' actions = [pype.maya.action.SelectInvalidAction, diff --git a/pype/plugins/maya/publish/validate_look_members_unique.py b/pype/plugins/maya/publish/validate_look_members_unique.py index 722597edce..e9fb20d8d6 100644 --- a/pype/plugins/maya/publish/validate_look_members_unique.py +++ b/pype/plugins/maya/publish/validate_look_members_unique.py @@ -23,7 +23,7 @@ class ValidateUniqueRelationshipMembers(pyblish.api.InstancePlugin): order = pype.api.ValidatePipelineOrder label = 'Look members unique' hosts = ['maya'] - families = ["look"] + families = ['look'] actions = [pype.maya.action.SelectInvalidAction, pype.maya.action.GenerateUUIDsOnInvalidAction] diff --git a/pype/plugins/maya/publish/validate_look_no_default_shaders.py b/pype/plugins/maya/publish/validate_look_no_default_shaders.py index ca347925d9..dbef871a16 100644 --- a/pype/plugins/maya/publish/validate_look_no_default_shaders.py +++ b/pype/plugins/maya/publish/validate_look_no_default_shaders.py @@ -24,13 +24,13 @@ class ValidateLookNoDefaultShaders(pyblish.api.InstancePlugin): """ order = pype.api.ValidateContentsOrder + 0.01 - families = ["look"] + families = ['look'] hosts = ['maya'] label = 'Look No Default Shaders' actions = [pype.maya.action.SelectInvalidAction] DEFAULT_SHADERS = {"lambert1", "initialShadingGroup", - "initialParticleSE", "particleCloud1"} + "initialParticleSE", "particleCloud1"} def process(self, instance): """Process all the nodes in the instance""" diff --git a/pype/plugins/maya/publish/validate_look_sets.py b/pype/plugins/maya/publish/validate_look_sets.py index 1819602430..f3952b53a4 100644 --- a/pype/plugins/maya/publish/validate_look_sets.py +++ b/pype/plugins/maya/publish/validate_look_sets.py @@ -6,11 +6,11 @@ import pype.api class ValidateLookSets(pyblish.api.InstancePlugin): - """Validate if any sets are missing from the instance and look data + """Validate if any sets relationships are not being collected. A shader can be assigned to a node that is missing a Colorbleed ID. Because it is missing the ID it has not been collected in the instance. - This validator ensures no relationships and thus considers it invalid + This validator ensures those relationships and thus considers it invalid if a relationship was not collected. When the relationship needs to be maintained the artist might need to @@ -25,8 +25,10 @@ class ValidateLookSets(pyblish.api.InstancePlugin): - Displacement objectSets (like V-Ray): - It is best practice to add the transform group of the shape to the - displacement objectSet. + It is best practice to add the transform of the shape to the + displacement objectSet. Any parent groups will not work as groups + do not receive a Colorbleed Id. As such the assignments need to be + made to the shapes and their transform. Example content: [asset_GRP|geometry_GRP|body_GES, @@ -37,7 +39,7 @@ class ValidateLookSets(pyblish.api.InstancePlugin): """ order = pype.api.ValidateContentsOrder - families = ["look"] + families = ['look'] hosts = ['maya'] label = 'Look Sets' actions = [pype.maya.action.SelectInvalidAction] @@ -70,13 +72,6 @@ class ValidateLookSets(pyblish.api.InstancePlugin): # check if any objectSets are not present ion the relationships missing_sets = [s for s in sets if s not in relationships] - - for set in missing_sets: - if set.endswith("_SET"): - missing_sets.remove(set) - cls.log.info("Missing Sets " - "'{}'".format(missing_sets)) - if missing_sets: # A set of this node is not coming along, this is wrong! cls.log.error("Missing sets '{}' for node " diff --git a/pype/plugins/maya/publish/validate_look_single_shader.py b/pype/plugins/maya/publish/validate_look_single_shader.py index 8af678eb6a..1b9ebffced 100644 --- a/pype/plugins/maya/publish/validate_look_single_shader.py +++ b/pype/plugins/maya/publish/validate_look_single_shader.py @@ -13,7 +13,7 @@ class ValidateSingleShader(pyblish.api.InstancePlugin): """ order = pype.api.ValidateContentsOrder - families = ["look"] + families = ['look'] hosts = ['maya'] label = 'Look Single Shader Per Shape' actions = [pype.maya.action.SelectInvalidAction] diff --git a/pype/plugins/maya/publish/validate_maya_units.py b/pype/plugins/maya/publish/validate_maya_units.py index 2659444184..8610c4dd25 100644 --- a/pype/plugins/maya/publish/validate_maya_units.py +++ b/pype/plugins/maya/publish/validate_maya_units.py @@ -36,7 +36,7 @@ class ValidateMayaUnits(pyblish.api.ContextPlugin): assert fps and fps == asset_fps, "Scene must be %s FPS" % asset_fps @classmethod - def repair(cls): + def repair(cls, context): """Fix the current FPS setting of the scene, set to PAL(25.0 fps)""" cls.log.info("Setting angular unit to 'degrees'") diff --git a/pype/plugins/maya/publish/validate_mesh_has_uv.py b/pype/plugins/maya/publish/validate_mesh_has_uv.py index f64e414c35..0eb235db0f 100644 --- a/pype/plugins/maya/publish/validate_mesh_has_uv.py +++ b/pype/plugins/maya/publish/validate_mesh_has_uv.py @@ -47,7 +47,7 @@ class ValidateMeshHasUVs(pyblish.api.InstancePlugin): order = pype.api.ValidateMeshOrder hosts = ['maya'] - families = ["model"] + families = ['model'] category = 'geometry' label = 'Mesh Has UVs' actions = [pype.maya.action.SelectInvalidAction] diff --git a/pype/plugins/maya/publish/validate_mesh_lamina_faces.py b/pype/plugins/maya/publish/validate_mesh_lamina_faces.py index 46e8128819..9e6e0f40b7 100644 --- a/pype/plugins/maya/publish/validate_mesh_lamina_faces.py +++ b/pype/plugins/maya/publish/validate_mesh_lamina_faces.py @@ -14,7 +14,7 @@ class ValidateMeshLaminaFaces(pyblish.api.InstancePlugin): order = pype.api.ValidateMeshOrder hosts = ['maya'] - families = ["model"] + families = ['model'] category = 'geometry' version = (0, 1, 0) label = 'Mesh Lamina Faces' diff --git a/pype/plugins/maya/publish/validate_mesh_no_negative_scale.py b/pype/plugins/maya/publish/validate_mesh_no_negative_scale.py index 913c8c2787..ed2ded652b 100644 --- a/pype/plugins/maya/publish/validate_mesh_no_negative_scale.py +++ b/pype/plugins/maya/publish/validate_mesh_no_negative_scale.py @@ -19,7 +19,7 @@ class ValidateMeshNoNegativeScale(pyblish.api.Validator): order = pype.api.ValidateMeshOrder hosts = ['maya'] - families = ["model"] + families = ['model'] label = 'Mesh No Negative Scale' actions = [pype.maya.action.SelectInvalidAction] diff --git a/pype/plugins/maya/publish/validate_mesh_non_manifold.py b/pype/plugins/maya/publish/validate_mesh_non_manifold.py index dbc2d4ff59..961e6e42ef 100644 --- a/pype/plugins/maya/publish/validate_mesh_non_manifold.py +++ b/pype/plugins/maya/publish/validate_mesh_non_manifold.py @@ -15,7 +15,7 @@ class ValidateMeshNonManifold(pyblish.api.Validator): order = pype.api.ValidateMeshOrder hosts = ['maya'] - families = ["model"] + families = ['model'] label = 'Mesh Non-Manifold Vertices/Edges' actions = [pype.maya.action.SelectInvalidAction] diff --git a/pype/plugins/maya/publish/validate_mesh_non_zero_edge.py b/pype/plugins/maya/publish/validate_mesh_non_zero_edge.py index 8194b68da1..6ade81764b 100644 --- a/pype/plugins/maya/publish/validate_mesh_non_zero_edge.py +++ b/pype/plugins/maya/publish/validate_mesh_non_zero_edge.py @@ -17,7 +17,7 @@ class ValidateMeshNonZeroEdgeLength(pyblish.api.InstancePlugin): """ order = pype.api.ValidateMeshOrder - families = ["model"] + families = ['model'] hosts = ['maya'] category = 'geometry' version = (0, 1, 0) diff --git a/pype/plugins/maya/publish/validate_mesh_normals_unlocked.py b/pype/plugins/maya/publish/validate_mesh_normals_unlocked.py index 22e3a2d6dd..faa1c8e248 100644 --- a/pype/plugins/maya/publish/validate_mesh_normals_unlocked.py +++ b/pype/plugins/maya/publish/validate_mesh_normals_unlocked.py @@ -15,7 +15,7 @@ class ValidateMeshNormalsUnlocked(pyblish.api.Validator): order = pype.api.ValidateMeshOrder hosts = ['maya'] - families = ["model"] + families = ['model'] category = 'geometry' version = (0, 1, 0) label = 'Mesh Normals Unlocked' diff --git a/pype/plugins/maya/publish/validate_mesh_shader_connections.py b/pype/plugins/maya/publish/validate_mesh_shader_connections.py index 9201bb6e8d..048b146886 100644 --- a/pype/plugins/maya/publish/validate_mesh_shader_connections.py +++ b/pype/plugins/maya/publish/validate_mesh_shader_connections.py @@ -24,7 +24,7 @@ def get_invalid_sets(shape): """ invalid = [] - sets = cmds.listSets(object=shape, t=1, extendToShape=False) + sets = cmds.listSets(object=shape, t=1, extendToShape=False) or [] for s in sets: members = cmds.sets(s, query=True, nodesOnly=True) if not members: @@ -75,7 +75,7 @@ class ValidateMeshShaderConnections(pyblish.api.InstancePlugin): order = pype.api.ValidateMeshOrder hosts = ['maya'] - families = ["model"] + families = ['model'] label = "Mesh Shader Connections" actions = [pype.maya.action.SelectInvalidAction, pype.api.RepairAction] @@ -93,7 +93,9 @@ class ValidateMeshShaderConnections(pyblish.api.InstancePlugin): def get_invalid(instance): shapes = cmds.ls(instance[:], dag=1, leaf=1, shapes=1, long=True) - shapes = cmds.ls(shapes, shapes=True, noIntermediate=True, long=True) + + # todo: allow to check anything that can have a shader + shapes = cmds.ls(shapes, noIntermediate=True, long=True, type="mesh") invalid = [] for shape in shapes: diff --git a/pype/plugins/maya/publish/validate_mesh_single_uv_set.py b/pype/plugins/maya/publish/validate_mesh_single_uv_set.py index e3e330a0cf..eeddeb3c9c 100644 --- a/pype/plugins/maya/publish/validate_mesh_single_uv_set.py +++ b/pype/plugins/maya/publish/validate_mesh_single_uv_set.py @@ -17,7 +17,7 @@ class ValidateMeshSingleUVSet(pyblish.api.InstancePlugin): order = pype.api.ValidateMeshOrder hosts = ['maya'] - families = ["model", 'studio.pointcache'] + families = ['model', 'pointcache'] category = 'uv' optional = True version = (0, 1, 0) diff --git a/pype/plugins/maya/publish/validate_mesh_uv_set_map1.py b/pype/plugins/maya/publish/validate_mesh_uv_set_map1.py index e4733a1c0a..8794fa9de1 100644 --- a/pype/plugins/maya/publish/validate_mesh_uv_set_map1.py +++ b/pype/plugins/maya/publish/validate_mesh_uv_set_map1.py @@ -17,7 +17,7 @@ class ValidateMeshUVSetMap1(pyblish.api.InstancePlugin): order = pype.api.ValidateMeshOrder hosts = ['maya'] - families = ["model"] + families = ['model'] optional = True label = "Mesh has map1 UV Set" actions = [pype.maya.action.SelectInvalidAction, diff --git a/pype/plugins/maya/publish/validate_mesh_vertices_have_edges.py b/pype/plugins/maya/publish/validate_mesh_vertices_have_edges.py index b5efae440d..2aaf11f6c0 100644 --- a/pype/plugins/maya/publish/validate_mesh_vertices_have_edges.py +++ b/pype/plugins/maya/publish/validate_mesh_vertices_have_edges.py @@ -59,7 +59,7 @@ class ValidateMeshVerticesHaveEdges(pyblish.api.InstancePlugin): order = pype.api.ValidateMeshOrder hosts = ['maya'] - families = ["model"] + families = ['model'] category = 'geometry' label = 'Mesh Vertices Have Edges' actions = [pype.maya.action.SelectInvalidAction, diff --git a/pype/plugins/maya/publish/validate_model_content.py b/pype/plugins/maya/publish/validate_model_content.py index ed73381f40..e76e102672 100644 --- a/pype/plugins/maya/publish/validate_model_content.py +++ b/pype/plugins/maya/publish/validate_model_content.py @@ -63,7 +63,8 @@ class ValidateModelContent(pyblish.api.InstancePlugin): cls.log.error("Must have exactly one top group") if len(assemblies) == 0: cls.log.warning("No top group found. " - "(Are there objects in the instance?)") + "(Are there objects in the instance?" + " Or is it parented in another group?)") return assemblies or True def _is_visible(node): diff --git a/pype/plugins/maya/publish/validate_no_default_camera.py b/pype/plugins/maya/publish/validate_no_default_camera.py index a5714baaf5..3d523e8974 100644 --- a/pype/plugins/maya/publish/validate_no_default_camera.py +++ b/pype/plugins/maya/publish/validate_no_default_camera.py @@ -15,7 +15,7 @@ class ValidateNoDefaultCameras(pyblish.api.InstancePlugin): order = pype.api.ValidateContentsOrder hosts = ['maya'] - families = ["camera"] + families = ['camera'] version = (0, 1, 0) label = "No Default Cameras" actions = [pype.maya.action.SelectInvalidAction] diff --git a/pype/plugins/maya/publish/validate_no_namespace.py b/pype/plugins/maya/publish/validate_no_namespace.py index a4304b7982..626dbcd4b8 100644 --- a/pype/plugins/maya/publish/validate_no_namespace.py +++ b/pype/plugins/maya/publish/validate_no_namespace.py @@ -18,7 +18,7 @@ class ValidateNoNamespace(pyblish.api.InstancePlugin): order = pype.api.ValidateContentsOrder hosts = ['maya'] - families = ["model"] + families = ['model'] category = 'cleanup' version = (0, 1, 0) label = 'No Namespaces' diff --git a/pype/plugins/maya/publish/validate_no_null_transforms.py b/pype/plugins/maya/publish/validate_no_null_transforms.py index 98b9eb20c0..156e0528a1 100644 --- a/pype/plugins/maya/publish/validate_no_null_transforms.py +++ b/pype/plugins/maya/publish/validate_no_null_transforms.py @@ -39,7 +39,7 @@ class ValidateNoNullTransforms(pyblish.api.InstancePlugin): order = pype.api.ValidateContentsOrder hosts = ['maya'] - families = ["model"] + families = ['model'] category = 'cleanup' version = (0, 1, 0) label = 'No Empty/Null Transforms' diff --git a/pype/plugins/maya/publish/validate_no_unknown_nodes.py b/pype/plugins/maya/publish/validate_no_unknown_nodes.py index a14f2ba5cd..cc2b9fc009 100644 --- a/pype/plugins/maya/publish/validate_no_unknown_nodes.py +++ b/pype/plugins/maya/publish/validate_no_unknown_nodes.py @@ -18,7 +18,7 @@ class ValidateNoUnknownNodes(pyblish.api.InstancePlugin): order = pype.api.ValidateContentsOrder hosts = ['maya'] - families = ["model", 'studio.rig'] + families = ['model', 'rig'] optional = True label = "Unknown Nodes" actions = [pype.maya.action.SelectInvalidAction] diff --git a/pype/plugins/maya/publish/validate_node_ids_deformed_shapes.py b/pype/plugins/maya/publish/validate_node_ids_deformed_shapes.py index 5289c4dc70..d8880e5c85 100644 --- a/pype/plugins/maya/publish/validate_node_ids_deformed_shapes.py +++ b/pype/plugins/maya/publish/validate_node_ids_deformed_shapes.py @@ -17,7 +17,7 @@ class ValidateNodeIdsDeformedShape(pyblish.api.InstancePlugin): """ order = pype.api.ValidateContentsOrder - families = ["look"] + families = ['look'] hosts = ['maya'] label = 'Deformed shape ids' actions = [pype.maya.action.SelectInvalidAction, pype.api.RepairAction] diff --git a/pype/plugins/maya/publish/validate_node_ids_in_database.py b/pype/plugins/maya/publish/validate_node_ids_in_database.py index bf0e9f7d43..7347ce2ab2 100644 --- a/pype/plugins/maya/publish/validate_node_ids_in_database.py +++ b/pype/plugins/maya/publish/validate_node_ids_in_database.py @@ -23,7 +23,8 @@ class ValidateNodeIdsInDatabase(pyblish.api.InstancePlugin): hosts = ['maya'] families = ["*"] - actions = [pype.maya.action.SelectInvalidAction] + actions = [pype.maya.action.SelectInvalidAction, + pype.maya.action.GenerateUUIDsOnInvalidAction] def process(self, instance): invalid = self.get_invalid(instance) diff --git a/pype/plugins/maya/publish/validate_node_no_ghosting.py b/pype/plugins/maya/publish/validate_node_no_ghosting.py index 51448cf150..8f40628c61 100644 --- a/pype/plugins/maya/publish/validate_node_no_ghosting.py +++ b/pype/plugins/maya/publish/validate_node_no_ghosting.py @@ -19,7 +19,7 @@ class ValidateNodeNoGhosting(pyblish.api.InstancePlugin): order = pype.api.ValidateContentsOrder hosts = ['maya'] - families = ["model", 'studio.rig'] + families = ['model', 'rig'] label = "No Ghosting" actions = [pype.maya.action.SelectInvalidAction] diff --git a/pype/plugins/maya/publish/validate_render_image_rule.py b/pype/plugins/maya/publish/validate_render_image_rule.py index 377dbfeadc..9a718afc13 100644 --- a/pype/plugins/maya/publish/validate_render_image_rule.py +++ b/pype/plugins/maya/publish/validate_render_image_rule.py @@ -9,7 +9,7 @@ def get_file_rule(rule): return mel.eval('workspace -query -fileRuleEntry "{}"'.format(rule)) -class ValidateRenderImageRule(pyblish.api.InstancePlugin): +class ValidateRenderImageRule(pyblish.api.ContextPlugin): """Validates "images" file rule is set to "renders/" """ @@ -19,7 +19,7 @@ class ValidateRenderImageRule(pyblish.api.InstancePlugin): hosts = ["maya"] families = ["renderlayer"] - def process(self, instance): + def process(self, context): assert get_file_rule("images") == "renders", ( "Workspace's `images` file rule must be set to: renders" diff --git a/pype/plugins/maya/publish/validate_render_no_default_cameras.py b/pype/plugins/maya/publish/validate_render_no_default_cameras.py index d20026534b..439cfb69d3 100644 --- a/pype/plugins/maya/publish/validate_render_no_default_cameras.py +++ b/pype/plugins/maya/publish/validate_render_no_default_cameras.py @@ -3,7 +3,6 @@ from maya import cmds import pyblish.api import pype.api import pype.maya.action -import pype.maya.lib as lib class ValidateRenderNoDefaultCameras(pyblish.api.InstancePlugin): @@ -11,27 +10,21 @@ class ValidateRenderNoDefaultCameras(pyblish.api.InstancePlugin): order = pype.api.ValidateContentsOrder hosts = ['maya'] - families = ["renderlayer"] + families = ['renderlayer'] label = "No Default Cameras Renderable" actions = [pype.maya.action.SelectInvalidAction] @staticmethod def get_invalid(instance): - layer = instance.data["setMembers"] + renderable = set(instance.data["cameras"]) # Collect default cameras cameras = cmds.ls(type='camera', long=True) - defaults = [cam for cam in cameras if - cmds.camera(cam, query=True, startupCamera=True)] + defaults = set(cam for cam in cameras if + cmds.camera(cam, query=True, startupCamera=True)) - invalid = [] - with lib.renderlayer(layer): - for cam in defaults: - if cmds.getAttr(cam + ".renderable"): - invalid.append(cam) - - return invalid + return [cam for cam in renderable if cam in defaults] def process(self, instance): """Process all the cameras in the instance""" diff --git a/pype/plugins/maya/publish/validate_render_single_camera.py b/pype/plugins/maya/publish/validate_render_single_camera.py index da8aacd930..b8561a69c9 100644 --- a/pype/plugins/maya/publish/validate_render_single_camera.py +++ b/pype/plugins/maya/publish/validate_render_single_camera.py @@ -1,9 +1,6 @@ -from maya import cmds - import pyblish.api import pype.api import pype.maya.action -import pype.maya.lib as lib class ValidateRenderSingleCamera(pyblish.api.InstancePlugin): @@ -18,32 +15,29 @@ class ValidateRenderSingleCamera(pyblish.api.InstancePlugin): """ order = pype.api.ValidateContentsOrder - hosts = ['maya'] - families = ["renderlayer"] label = "Render Single Camera" + hosts = ['maya'] + families = ["renderlayer", + "vrayscene"] actions = [pype.maya.action.SelectInvalidAction] - @staticmethod - def get_invalid(instance): - - layer = instance.data["setMembers"] - - cameras = cmds.ls(type='camera', long=True) - - with lib.renderlayer(layer): - renderable = [cam for cam in cameras if - cmds.getAttr(cam + ".renderable")] - - if len(renderable) == 0: - raise RuntimeError("No renderable cameras found.") - elif len(renderable) > 1: - return renderable - else: - return [] - def process(self, instance): """Process all the cameras in the instance""" invalid = self.get_invalid(instance) if invalid: - raise RuntimeError("Multiple renderable cameras" - "found: {0}".format(invalid)) + raise RuntimeError("Invalid cameras for render.") + + @classmethod + def get_invalid(cls, instance): + + cameras = instance.data.get("cameras", []) + + if len(cameras) > 1: + cls.log.error("Multiple renderable cameras found for %s: %s " % + (instance.data["setMembers"], cameras)) + return [instance.data["setMembers"]] + cameras + + elif len(cameras) < 1: + cls.log.error("No renderable cameras found for %s " % + instance.data["setMembers"]) + return [instance.data["setMembers"]] diff --git a/pype/plugins/maya/publish/validate_rendersettings.py b/pype/plugins/maya/publish/validate_rendersettings.py index 6dddf6790d..0450cb83b5 100644 --- a/pype/plugins/maya/publish/validate_rendersettings.py +++ b/pype/plugins/maya/publish/validate_rendersettings.py @@ -50,37 +50,33 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): invalid = False renderer = instance.data['renderer'] - layer_node = instance.data['setMembers'] + layer = instance.data['setMembers'] - # Collect the filename prefix in the render layer - with lib.renderlayer(layer_node): + # Get the node attributes for current renderer + attrs = lib.RENDER_ATTRS.get(renderer, lib.RENDER_ATTRS['default']) + prefix = lib.get_attr_in_layer("{node}.{prefix}".format(**attrs), + layer=layer) + padding = lib.get_attr_in_layer("{node}.{padding}".format(**attrs), + layer=layer) - render_attrs = lib.RENDER_ATTRS.get(renderer, - lib.RENDER_ATTRS['default']) - node = render_attrs["node"] - padding_attr = render_attrs["padding"] - prefix_attr = render_attrs["prefix"] + anim_override = lib.get_attr_in_layer("defaultRenderGlobals.animation", + layer=layer) + if not anim_override: + invalid = True + cls.log.error("Animation needs to be enabled. Use the same " + "frame for start and end to render single frame") - prefix = cmds.getAttr("{}.{}".format(node, prefix_attr)) - padding = cmds.getAttr("{}.{}".format(node, padding_attr)) + fname_prefix = cls.RENDERER_PREFIX.get(renderer, + cls.DEFAULT_PREFIX) + if prefix != fname_prefix: + invalid = True + cls.log.error("Wrong file name prefix: %s (expected: %s)" + % (prefix, fname_prefix)) - anim_override = cmds.getAttr("defaultRenderGlobals.animation") - if not anim_override: - invalid = True - cls.log.error("Animation needs to be enabled. Use the same " - "frame for start and end to render single frame") - - fname_prefix = cls.RENDERER_PREFIX.get(renderer, - cls.DEFAULT_PREFIX) - if prefix != fname_prefix: - invalid = True - cls.log.error("Wrong file name prefix, expecting %s" - % fname_prefix) - - if padding != cls.DEFAULT_PADDING: - invalid = True - cls.log.error("Expecting padding of {} ( {} )".format( - cls.DEFAULT_PADDING, "0" * cls.DEFAULT_PADDING)) + if padding != cls.DEFAULT_PADDING: + invalid = True + cls.log.error("Expecting padding of {} ( {} )".format( + cls.DEFAULT_PADDING, "0" * cls.DEFAULT_PADDING)) return invalid diff --git a/pype/plugins/maya/publish/validate_scene_set_workspace.py b/pype/plugins/maya/publish/validate_scene_set_workspace.py index e342e28d34..778c7eae86 100644 --- a/pype/plugins/maya/publish/validate_scene_set_workspace.py +++ b/pype/plugins/maya/publish/validate_scene_set_workspace.py @@ -30,7 +30,6 @@ class ValidateSceneSetWorkspace(pyblish.api.ContextPlugin): order = pype.api.ValidatePipelineOrder hosts = ['maya'] - families = ["model"] category = 'scene' version = (0, 1, 0) label = 'Maya Workspace Set' diff --git a/pype/plugins/maya/publish/validate_shape_default_names.py b/pype/plugins/maya/publish/validate_shape_default_names.py index 2587562299..9beb77872a 100644 --- a/pype/plugins/maya/publish/validate_shape_default_names.py +++ b/pype/plugins/maya/publish/validate_shape_default_names.py @@ -33,7 +33,7 @@ class ValidateShapeDefaultNames(pyblish.api.InstancePlugin): order = pype.api.ValidateContentsOrder hosts = ['maya'] - families = ["model"] + families = ['model'] category = 'cleanup' optional = True version = (0, 1, 0) diff --git a/pype/plugins/maya/publish/validate_shape_render_stats.py b/pype/plugins/maya/publish/validate_shape_render_stats.py index 6bc440711e..b803cd366b 100644 --- a/pype/plugins/maya/publish/validate_shape_render_stats.py +++ b/pype/plugins/maya/publish/validate_shape_render_stats.py @@ -11,7 +11,7 @@ class ValidateShapeRenderStats(pyblish.api.Validator): order = pype.api.ValidateMeshOrder hosts = ['maya'] - families = ["model"] + families = ['model'] label = 'Shape Default Render Stats' actions = [pype.maya.action.SelectInvalidAction, pype.api.RepairAction] diff --git a/pype/plugins/maya/publish/validate_single_assembly.py b/pype/plugins/maya/publish/validate_single_assembly.py index 12aa2848f2..6f40dfbfd2 100644 --- a/pype/plugins/maya/publish/validate_single_assembly.py +++ b/pype/plugins/maya/publish/validate_single_assembly.py @@ -19,7 +19,7 @@ class ValidateSingleAssembly(pyblish.api.InstancePlugin): order = pype.api.ValidateContentsOrder hosts = ['maya'] - families = ['rig', 'studio.animation'] + families = ['rig', 'animation'] label = 'Single Assembly' def process(self, instance): diff --git a/pype/plugins/maya/publish/validate_skinCluster_deformer_set.py b/pype/plugins/maya/publish/validate_skinCluster_deformer_set.py new file mode 100644 index 0000000000..71f7eea31b --- /dev/null +++ b/pype/plugins/maya/publish/validate_skinCluster_deformer_set.py @@ -0,0 +1,72 @@ +from maya import cmds + +import pyblish.api +import pype.api +import pype.maya.action + + +class ValidateSkinclusterDeformerSet(pyblish.api.InstancePlugin): + """Validate skinClusters on meshes have valid member relationships. + + In rare cases it can happen that a mesh has a skinCluster in its history + but it is *not* included in the deformer relationship history. If this is + the case then FBX will not export the skinning. + + """ + + order = pype.api.ValidateContentsOrder + hosts = ['maya'] + families = ['fbx'] + label = "Skincluster Deformer Relationships" + actions = [pype.maya.action.SelectInvalidAction] + + def process(self, instance): + """Process all the transform nodes in the instance""" + invalid = self.get_invalid(instance) + + if invalid: + raise ValueError("Invalid skinCluster relationships " + "found on meshes: {0}".format(invalid)) + + @classmethod + def get_invalid(cls, instance): + + meshes = cmds.ls(instance, type="mesh", noIntermediate=True, long=True) + invalid = list() + + for mesh in meshes: + history = cmds.listHistory(mesh) or [] + skins = cmds.ls(history, type="skinCluster") + + # Ensure at most one skinCluster + assert len(skins) <= 1, "Cannot have more than one skinCluster" + + if skins: + skin = skins[0] + + # Ensure the mesh is also in the skinCluster set + # otherwise the skin will not be exported correctly + # by the FBX Exporter. + deformer_sets = cmds.listSets(object=mesh, type=2) + for deformer_set in deformer_sets: + used_by = cmds.listConnections(deformer_set + ".usedBy", + source=True, + destination=False) + + # Ignore those that don't seem to have a usedBy connection + if not used_by: + continue + + # We have a matching deformer set relationship + if skin in set(used_by): + break + + else: + invalid.append(mesh) + cls.log.warning( + "Mesh has skinCluster in history but is not included " + "in its deformer relationship set: " + "{0} (skinCluster: {1})".format(mesh, skin) + ) + + return invalid diff --git a/pype/plugins/maya/publish/validate_step_size.py b/pype/plugins/maya/publish/validate_step_size.py index d33898bc2f..20207a050f 100644 --- a/pype/plugins/maya/publish/validate_step_size.py +++ b/pype/plugins/maya/publish/validate_step_size.py @@ -12,9 +12,9 @@ class ValidateStepSize(pyblish.api.InstancePlugin): order = pype.api.ValidateContentsOrder label = 'Step size' - families = ["camera", - 'studio.pointcache', - 'studio.animation'] + families = ['camera', + 'pointcache', + 'animation'] actions = [pype.maya.action.SelectInvalidAction] MIN = 0.01 diff --git a/pype/plugins/maya/publish/validate_transform_naming_suffix.py b/pype/plugins/maya/publish/validate_transform_naming_suffix.py index a5bd097132..0adba9b656 100644 --- a/pype/plugins/maya/publish/validate_transform_naming_suffix.py +++ b/pype/plugins/maya/publish/validate_transform_naming_suffix.py @@ -33,7 +33,7 @@ class ValidateTransformNamingSuffix(pyblish.api.InstancePlugin): order = pype.api.ValidateContentsOrder hosts = ['maya'] - families = ["model"] + families = ['model'] category = 'cleanup' optional = True version = (0, 1, 0) diff --git a/pype/plugins/maya/publish/validate_vray_distributed_rendering.py b/pype/plugins/maya/publish/validate_vray_distributed_rendering.py new file mode 100644 index 0000000000..cccf966ffd --- /dev/null +++ b/pype/plugins/maya/publish/validate_vray_distributed_rendering.py @@ -0,0 +1,55 @@ +import pyblish.api +import pype.api +import pype.maya.lib as lib + +from maya import cmds + + +class ValidateVRayDistributedRendering(pyblish.api.InstancePlugin): + """Validate V-Ray Distributed Rendering is ignored in batch mode. + + Whenever Distributed Rendering is enabled for V-Ray in the render settings + ensure that the "Ignore in batch mode" is enabled so the submitted job + won't try to render each frame with all machines resulting in faulty + errors. + + """ + + order = pype.api.ValidateContentsOrder + label = "VRay Distributed Rendering" + families = ["renderlayer"] + actions = [pype.api.RepairAction] + + # V-Ray attribute names + enabled_attr = "vraySettings.sys_distributed_rendering_on" + ignored_attr = "vraySettings.sys_distributed_rendering_ignore_batch" + + def process(self, instance): + + if instance.data.get("renderer") != "vray": + # If not V-Ray ignore.. + return + + vray_settings = cmds.ls("vraySettings", type="VRaySettingsNode") + assert vray_settings, "Please ensure a VRay Settings Node is present" + + renderlayer = instance.data['setMembers'] + + if not lib.get_attr_in_layer(self.enabled_attr, layer=renderlayer): + # If not distributed rendering enabled, ignore.. + return + + # If distributed rendering is enabled but it is *not* set to ignore + # during batch mode we invalidate the instance + if not lib.get_attr_in_layer(self.ignored_attr, layer=renderlayer): + raise RuntimeError("Renderlayer has distributed rendering enabled " + "but is not set to ignore in batch mode.") + + @classmethod + def repair(cls, instance): + + renderlayer = instance.data.get("setMembers") + with lib.renderlayer(renderlayer): + cls.log.info("Enabling Distributed Rendering " + "ignore in batch mode..") + cmds.setAttr(cls.ignored_attr, True) diff --git a/pype/plugins/maya/publish/validate_vray_translator_settings.py b/pype/plugins/maya/publish/validate_vray_translator_settings.py new file mode 100644 index 0000000000..493febf49a --- /dev/null +++ b/pype/plugins/maya/publish/validate_vray_translator_settings.py @@ -0,0 +1,69 @@ +import pyblish.api +import pype.api +from pype.plugin import contextplugin_should_run + +from maya import cmds + + +class ValidateVRayTranslatorEnabled(pyblish.api.ContextPlugin): + + order = pype.api.ValidateContentsOrder + label = "VRay Translator Settings" + families = ["vrayscene"] + actions = [pype.api.RepairContextAction] + + def process(self, context): + + # Workaround bug pyblish-base#250 + if not contextplugin_should_run(self, context): + return + + invalid = self.get_invalid(context) + if invalid: + raise RuntimeError("Found invalid VRay Translator settings!") + + @classmethod + def get_invalid(cls, context): + + invalid = False + + # Get vraySettings node + vray_settings = cmds.ls(type="VRaySettingsNode") + assert vray_settings, "Please ensure a VRay Settings Node is present" + + node = vray_settings[0] + + if cmds.setAttr("{}.vrscene_render_on".format(node)): + cls.log.error("Render is enabled, this should be disabled") + invalid = True + + if not cmds.getAttr("{}.vrscene_on".format(node)): + cls.log.error("Export vrscene not enabled") + invalid = True + + if not cmds.getAttr("{}.misc_eachFrameInFile".format(node)): + cls.log.error("Each Frame in File not enabled") + invalid = True + + vrscene_filename = cmds.getAttr("{}.vrscene_filename".format(node)) + if vrscene_filename != "vrayscene//_/": + cls.log.error("Template for file name is wrong") + invalid = True + + return invalid + + @classmethod + def repair(cls, context): + + vray_settings = cmds.ls(type="VRaySettingsNode") + if not vray_settings: + node = cmds.createNode("VRaySettingsNode") + else: + node = vray_settings[0] + + cmds.setAttr("{}.vrscene_render_on".format(node), False) + cmds.setAttr("{}.vrscene_on".format(node), True) + cmds.setAttr("{}.misc_eachFrameInFile".format(node), True) + cmds.setAttr("{}.vrscene_filename".format(node), + "vrayscene//_/", + type="string") diff --git a/pype/plugins/maya/publish/validate_vrayproxy_members.py b/pype/plugins/maya/publish/validate_vrayproxy_members.py index 9b76a4947f..6631af43d9 100644 --- a/pype/plugins/maya/publish/validate_vrayproxy_members.py +++ b/pype/plugins/maya/publish/validate_vrayproxy_members.py @@ -12,7 +12,7 @@ class ValidateVrayProxyMembers(pyblish.api.InstancePlugin): order = pyblish.api.ValidatorOrder label = 'VRay Proxy Members' hosts = ['maya'] - families = ["vrayproxy"] + families = ['vrayproxy'] actions = [pype.maya.action.SelectInvalidAction] def process(self, instance): diff --git a/pype/plugins/maya/publish/validate_yeti_renderscript_callbacks.py b/pype/plugins/maya/publish/validate_yeti_renderscript_callbacks.py index ba9b714f11..cd9b0754b3 100644 --- a/pype/plugins/maya/publish/validate_yeti_renderscript_callbacks.py +++ b/pype/plugins/maya/publish/validate_yeti_renderscript_callbacks.py @@ -25,6 +25,17 @@ class ValidateYetiRenderScriptCallbacks(pyblish.api.InstancePlugin): hosts = ["maya"] families = ["renderlayer"] + # Settings per renderer + callbacks = { + "vray": { + "pre": "catch(`pgYetiVRayPreRender`)", + "post": "catch(`pgYetiVRayPostRender`)" + }, + "arnold": { + "pre": "pgYetiPreRender" + } + } + def process(self, instance): invalid = self.get_invalid(instance) @@ -35,14 +46,6 @@ class ValidateYetiRenderScriptCallbacks(pyblish.api.InstancePlugin): @classmethod def get_invalid(cls, instance): - # lookup per render - render_scripts = {"vray": - {"pre": "catch(`pgYetiVRayPreRender`)", - "post": "catch(`pgYetiVRayPostRender`)"}, - "arnold": - {"pre": "pgYetiPreRender"} - } - yeti_loaded = cmds.pluginInfo("pgYetiMaya", query=True, loaded=True) renderer = instance.data["renderer"] @@ -50,26 +53,29 @@ class ValidateYetiRenderScriptCallbacks(pyblish.api.InstancePlugin): cls.log.info("Redshift ignores any pre and post render callbacks") return False - callback_lookup = render_scripts.get(renderer, {}) + callback_lookup = cls.callbacks.get(renderer, {}) if not callback_lookup: cls.log.warning("Renderer '%s' is not supported in this plugin" % renderer) return False - pre_render_callback = cmds.getAttr("defaultRenderGlobals.preMel") - post_render_callback = cmds.getAttr("defaultRenderGlobals.postMel") + pre_mel = cmds.getAttr("defaultRenderGlobals.preMel") or "" + post_mel = cmds.getAttr("defaultRenderGlobals.postMel") or "" - if (pre_render_callback is None) or (post_render_callback is None): - # no callbacks returned - return False + if pre_mel.strip(): + cls.log.debug("Found pre mel: `%s`" % pre_mel) - pre_callbacks = pre_render_callback.split(";") - post_callbacks = post_render_callback.split(";") + if post_mel.strip(): + cls.log.debug("Found post mel: `%s`" % post_mel) + + # Strip callbacks and turn into a set for quick lookup + pre_callbacks = {cmd.strip() for cmd in pre_mel.split(";")} + post_callbacks = {cmd.strip() for cmd in post_mel.split(";")} pre_script = callback_lookup.get("pre", "") post_script = callback_lookup.get("post", "") - # If not loaded + # If Yeti is not loaded invalid = False if not yeti_loaded: if pre_script and pre_script in pre_callbacks: @@ -81,18 +87,19 @@ class ValidateYetiRenderScriptCallbacks(pyblish.api.InstancePlugin): cls.log.error("Found post render callback '%s which is " "not used!" % post_script) invalid = True - else: - if pre_script: - if pre_script not in pre_callbacks: - cls.log.error( - "Could not find required pre render callback " - "`%s`" % pre_script) - invalid = True - if post_script: - if post_script not in post_callbacks: - cls.log.error("Could not find required post render callback" - " `%s`" % post_script) - invalid = True + # If Yeti is loaded + else: + if pre_script and pre_script not in pre_callbacks: + cls.log.error( + "Could not find required pre render callback " + "`%s`" % pre_script) + invalid = True + + if post_script and post_script not in post_callbacks: + cls.log.error( + "Could not find required post render callback" + " `%s`" % post_script) + invalid = True return invalid diff --git a/pype/plugins/maya/publish/validate_yeti_rig_settings.py b/pype/plugins/maya/publish/validate_yeti_rig_settings.py index d6ff93a573..9914277721 100644 --- a/pype/plugins/maya/publish/validate_yeti_rig_settings.py +++ b/pype/plugins/maya/publish/validate_yeti_rig_settings.py @@ -2,32 +2,46 @@ import pyblish.api class ValidateYetiRigSettings(pyblish.api.InstancePlugin): + """Validate Yeti Rig Settings have collected input connections. + + The input connections are collected for the nodes in the `input_SET`. + When no input connections are found a warning is logged but it is allowed + to pass validation. + + """ + order = pyblish.api.ValidatorOrder - label = "Validate Yeti Rig Settings" + label = "Yeti Rig Settings" families = ["yetiRig"] def process(self, instance): invalid = self.get_invalid(instance) if invalid: - raise RuntimeError("Detected invalid Yeti Rig data. " + raise RuntimeError("Detected invalid Yeti Rig data. (See log) " "Tip: Save the scene") @classmethod def get_invalid(cls, instance): - rigsettings = instance.data.get("rigsettings", {}) - if not rigsettings: + rigsettings = instance.data.get("rigsettings", None) + if rigsettings is None: cls.log.error("MAJOR ERROR: No rig settings found!") return True # Get inputs inputs = rigsettings.get("inputs", []) + if not inputs: + # Empty rig settings dictionary + cls.log.warning("No rig inputs found. This can happen when " + "the rig has no inputs from outside the rig.") + return False + for input in inputs: source_id = input["sourceID"] if source_id is None: cls.log.error("Discovered source with 'None' as ID, please " - "check if the input shape has an cbId") + "check if the input shape has a cbId") return True destination_id = input["destinationID"] diff --git a/pype/scripts/__init__.py b/pype/scripts/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pype/scripts/fusion_switch_shot.py b/pype/scripts/fusion_switch_shot.py new file mode 100644 index 0000000000..92271a3b7c --- /dev/null +++ b/pype/scripts/fusion_switch_shot.py @@ -0,0 +1,246 @@ +import os +import re +import sys +import logging + +# Pipeline imports +from avalon import api, io, pipeline +import avalon.fusion + +# Config imports +import pype.lib as pype +import pype.fusion.lib as fusion_lib + +log = logging.getLogger("Update Slap Comp") + +self = sys.modules[__name__] +self._project = None + + +def _format_version_folder(folder): + """Format a version folder based on the filepath + + Assumption here is made that, if the path does not exists the folder + will be "v001" + + Args: + folder: file path to a folder + + Returns: + str: new version folder name + """ + + new_version = 1 + if os.path.isdir(folder): + re_version = re.compile("v\d+$") + versions = [i for i in os.listdir(folder) if os.path.isdir(i) + and re_version.match(i)] + if versions: + # ensure the "v" is not included + new_version = int(max(versions)[1:]) + 1 + + version_folder = "v{:03d}".format(new_version) + + return version_folder + + +def _get_work_folder(session): + """Convenience function to get the work folder path of the current asset""" + + # Get new filename, create path based on asset and work template + template_work = self._project["config"]["template"]["work"] + work_path = pipeline._format_work_template(template_work, session) + + return os.path.normpath(work_path) + + +def _get_fusion_instance(): + fusion = getattr(sys.modules["__main__"], "fusion", None) + if fusion is None: + try: + # Support for FuScript.exe, BlackmagicFusion module for py2 only + import BlackmagicFusion as bmf + fusion = bmf.scriptapp("Fusion") + except ImportError: + raise RuntimeError("Could not find a Fusion instance") + return fusion + + +def _format_filepath(session): + + project = session["AVALON_PROJECT"] + asset = session["AVALON_ASSET"] + + # Save updated slap comp + work_path = _get_work_folder(session) + walk_to_dir = os.path.join(work_path, "scenes", "slapcomp") + slapcomp_dir = os.path.abspath(walk_to_dir) + + # Ensure destination exists + if not os.path.isdir(slapcomp_dir): + log.warning("Folder did not exist, creating folder structure") + os.makedirs(slapcomp_dir) + + # Compute output path + new_filename = "{}_{}_slapcomp_v001.comp".format(project, asset) + new_filepath = os.path.join(slapcomp_dir, new_filename) + + # Create new unqiue filepath + if os.path.exists(new_filepath): + new_filepath = pype.version_up(new_filepath) + + return new_filepath + + +def _update_savers(comp, session): + """Update all savers of the current comp to ensure the output is correct + + Args: + comp (object): current comp instance + session (dict): the current Avalon session + + Returns: + None + """ + + new_work = _get_work_folder(session) + renders = os.path.join(new_work, "renders") + version_folder = _format_version_folder(renders) + renders_version = os.path.join(renders, version_folder) + + comp.Print("New renders to: %s\n" % renders) + + with avalon.fusion.comp_lock_and_undo_chunk(comp): + savers = comp.GetToolList(False, "Saver").values() + for saver in savers: + filepath = saver.GetAttrs("TOOLST_Clip_Name")[1.0] + filename = os.path.basename(filepath) + new_path = os.path.join(renders_version, filename) + saver["Clip"] = new_path + + +def update_frame_range(comp, representations): + """Update the frame range of the comp and render length + + The start and end frame are based on the lowest start frame and the highest + end frame + + Args: + comp (object): current focused comp + representations (list) collection of dicts + + Returns: + None + + """ + + version_ids = [r["parent"] for r in representations] + versions = io.find({"type": "version", "_id": {"$in": version_ids}}) + versions = list(versions) + + start = min(v["data"]["startFrame"] for v in versions) + end = max(v["data"]["endFrame"] for v in versions) + + fusion_lib.update_frame_range(start, end, comp=comp) + + +def switch(asset_name, filepath=None, new=True): + """Switch the current containers of the file to the other asset (shot) + + Args: + filepath (str): file path of the comp file + asset_name (str): name of the asset (shot) + new (bool): Save updated comp under a different name + + Returns: + comp path (str): new filepath of the updated comp + + """ + + # If filepath provided, ensure it is valid absolute path + if filepath is not None: + if not os.path.isabs(filepath): + filepath = os.path.abspath(filepath) + + assert os.path.exists(filepath), "%s must exist " % filepath + + # Assert asset name exists + # It is better to do this here then to wait till switch_shot does it + asset = io.find_one({"type": "asset", "name": asset_name}) + assert asset, "Could not find '%s' in the database" % asset_name + + # Get current project + self._project = io.find_one({"type": "project", + "name": api.Session["AVALON_PROJECT"]}) + + # Go to comp + if not filepath: + current_comp = avalon.fusion.get_current_comp() + assert current_comp is not None, "Could not find current comp" + else: + fusion = _get_fusion_instance() + current_comp = fusion.LoadComp(filepath, quiet=True) + assert current_comp is not None, "Fusion could not load '%s'" % filepath + + host = api.registered_host() + containers = list(host.ls()) + assert containers, "Nothing to update" + + representations = [] + for container in containers: + try: + representation = pype.switch_item(container, + asset_name=asset_name) + representations.append(representation) + except Exception as e: + current_comp.Print("Error in switching! %s\n" % e.message) + + message = "Switched %i Loaders of the %i\n" % (len(representations), + len(containers)) + current_comp.Print(message) + + # Build the session to switch to + switch_to_session = api.Session.copy() + switch_to_session["AVALON_ASSET"] = asset['name'] + + if new: + comp_path = _format_filepath(switch_to_session) + + # Update savers output based on new session + _update_savers(current_comp, switch_to_session) + else: + comp_path = pype.version_up(filepath) + + current_comp.Print(comp_path) + + current_comp.Print("\nUpdating frame range") + update_frame_range(current_comp, representations) + + current_comp.Save(comp_path) + + return comp_path + + +if __name__ == '__main__': + + import argparse + + parser = argparse.ArgumentParser(description="Switch to a shot within an" + "existing comp file") + + parser.add_argument("--file_path", + type=str, + default=True, + help="File path of the comp to use") + + parser.add_argument("--asset_name", + type=str, + default=True, + help="Name of the asset (shot) to switch") + + args, unknown = parser.parse_args() + + api.install(avalon.fusion) + switch(args.asset_name, args.file_path) + + sys.exit(0) diff --git a/pype/scripts/publish_filesequence.py b/pype/scripts/publish_filesequence.py new file mode 100644 index 0000000000..c37ceee07c --- /dev/null +++ b/pype/scripts/publish_filesequence.py @@ -0,0 +1,87 @@ +"""This module is used for command line publishing of image sequences.""" + +import os +import sys +import logging + +handler = logging.basicConfig() +log = logging.getLogger("Publish Image Sequences") +log.setLevel(logging.DEBUG) + +error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}" + + +def publish(paths, gui=False): + """Publish rendered image sequences based on the job data + + Args: + paths (list): a list of paths where to publish from + gui (bool, Optional): Choose to show Pyblish GUI, default is False + + Returns: + None + + """ + + assert isinstance(paths, (list, tuple)), "Must be list of paths" + log.info(paths) + assert any(paths), "No paths found in the list" + # Set the paths to publish for the collector if any provided + if paths: + os.environ["FILESEQUENCE"] = os.pathsep.join(paths) + + # Install Avalon with shell as current host + from avalon import api, shell + api.install(shell) + + # Register target and host + import pyblish.api + pyblish.api.register_target("filesequence") + pyblish.api.register_host("shell") + + # Publish items + if gui: + import pyblish_qml + pyblish_qml.show(modal=True) + else: + + import pyblish.util + context = pyblish.util.publish() + + if not context: + log.warning("Nothing collected.") + sys.exit(1) + + # Collect errors, {plugin name: error} + error_results = [r for r in context.data["results"] if r["error"]] + + if error_results: + log.error(" Errors occurred ...") + for result in error_results: + log.error(error_format.format(**result)) + sys.exit(2) + + +def __main__(): + import argparse + parser = argparse.ArgumentParser() + parser.add_argument("--paths", + nargs="*", + default=[], + help="The filepaths to publish. This can be a " + "directory or a path to a .json publish " + "configuration.") + parser.add_argument("--gui", + default=False, + action="store_true", + help="Whether to run Pyblish in GUI mode.") + + kwargs, args = parser.parse_known_args() + + print("Running publish imagesequence...") + print("Paths: {}".format(kwargs.paths or [os.getcwd()])) + publish(kwargs.paths, gui=kwargs.gui) + + +if __name__ == '__main__': + __main__() diff --git a/pype/vendor/pather/core.py b/pype/vendor/pather/core.py index f2a469dc8b..1203cf9e50 100644 --- a/pype/vendor/pather/core.py +++ b/pype/vendor/pather/core.py @@ -9,8 +9,8 @@ import glob from .error import ParseError # Regex pattern that matches valid file -# TODO: Implement complete pattern if required -RE_FILENAME = '[-\w.,; \[\]]' +# A filename may not contain \/:*?"<>| +RE_FILENAME = r"[^\\/:\"*?<>|]" def format(pattern, data, allow_partial=True): diff --git a/pype/vendor/pather/version.py b/pype/vendor/pather/version.py index 85f96b1e3f..5296c380d7 100644 --- a/pype/vendor/pather/version.py +++ b/pype/vendor/pather/version.py @@ -1,7 +1,7 @@ - + VERSION_MAJOR = 0 VERSION_MINOR = 1 -VERSION_PATCH = 0 +VERSION_PATCH = 1 version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH) version = '%i.%i.%i' % version_info diff --git a/res/icons/colorbleed_logo_36x36.png b/res/icons/colorbleed_logo_36x36.png new file mode 100644 index 0000000000..847a85c228 Binary files /dev/null and b/res/icons/colorbleed_logo_36x36.png differ diff --git a/res/icons/inventory.png b/res/icons/inventory.png new file mode 100644 index 0000000000..34f6488233 Binary files /dev/null and b/res/icons/inventory.png differ diff --git a/res/icons/loader.png b/res/icons/loader.png new file mode 100644 index 0000000000..1988fc76e9 Binary files /dev/null and b/res/icons/loader.png differ diff --git a/res/icons/workfiles.png b/res/icons/workfiles.png new file mode 100644 index 0000000000..f17c869600 Binary files /dev/null and b/res/icons/workfiles.png differ