diff --git a/pype/__init__.py b/pype/__init__.py index e5d1aee374..8bd31c060d 100644 --- a/pype/__init__.py +++ b/pype/__init__.py @@ -6,6 +6,15 @@ from avalon import api as avalon from .launcher_actions import register_launcher_actions from .lib import collect_container_metadata +import logging +log = logging.getLogger(__name__) + +# do not delete these are mandatory +Anatomy = None +Dataflow = None +Metadata = None +Colorspace = None + PACKAGE_DIR = os.path.dirname(__file__) PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins") @@ -15,12 +24,13 @@ LOAD_PATH = os.path.join(PLUGINS_DIR, "global", "load") def install(): - print("Registering global plug-ins..") + log.info("Registering global plug-ins..") pyblish.register_plugin_path(PUBLISH_PATH) avalon.register_plugin_path(avalon.Loader, LOAD_PATH) def uninstall(): - print("Deregistering global plug-ins..") + log.info("Deregistering global plug-ins..") pyblish.deregister_plugin_path(PUBLISH_PATH) avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH) + log.info("Global plug-ins unregistred") diff --git a/pype/api.py b/pype/api.py index e665d40535..36094feb7f 100644 --- a/pype/api.py +++ b/pype/api.py @@ -15,6 +15,26 @@ from .action import ( RepairContextAction ) +from app.api import Logger + +from . import ( + Anatomy, + Colorspace, + Metadata, + Dataflow +) +from .templates import ( + load_data_from_templates, + reset_data_from_templates, + get_project_name, + get_project_code, + get_hiearchy, + get_asset, + get_task, + fill_avalon_workdir, + get_version_from_workfile +) + __all__ = [ # plugin classes "Extractor", @@ -25,5 +45,28 @@ __all__ = [ "ValidateMeshOrder", # action "get_errored_instances_from_context", - "RepairAction" + "RepairAction", + + "Logger", + + # contectual templates + # get data to preloaded templates + "load_data_from_templates", + "reset_data_from_templates", + + # get contextual data + "get_project_name", + "get_project_code", + "get_hiearchy", + "get_asset", + "get_task", + "fill_avalon_workdir", + "get_version_from_workfile", + + # preloaded templates + "Anatomy", + "Colorspace", + "Metadata", + "Dataflow" + ] diff --git a/pype/ftrack/actions/action_Apps.py b/pype/ftrack/actions/action_Apps.py index 9d1e182c19..3d1bf093de 100644 --- a/pype/ftrack/actions/action_Apps.py +++ b/pype/ftrack/actions/action_Apps.py @@ -14,7 +14,7 @@ def registerApp(app, session): try: variant = app['name'].split("_")[1] except Exception as e: - log.warning("'{0}' - App 'name' and 'variant' is not separated by '_' (variant is set to '')".format(app['name'])) + log.warning("'{0}' - App 'name' and 'variant' is not separated by '_' (variant is not set)".format(app['name'])) return abspath = lib.which_app(app['name']) @@ -23,17 +23,16 @@ def registerApp(app, session): return apptoml = toml.load(abspath) + executable = apptoml['executable'] label = app['label'] + if 'ftrack_label' in apptoml: + label = apptoml['ftrack_label'] + icon = None - # TODO get right icons - if 'nuke' in app['name']: - icon = "https://mbtskoudsalg.com/images/nuke-icon-png-2.png" - label = "Nuke" - elif 'maya' in app['name']: - icon = "http://icons.iconarchive.com/icons/froyoshark/enkel/256/Maya-icon.png" - label = "Autodesk Maya" + if 'icon' in apptoml: + icon = apptoml['icon'] # register action AppAction(session, label, name, executable, variant, icon).register() diff --git a/pype/ftrack/actions/action_syncToAvalon.py b/pype/ftrack/actions/action_syncToAvalon.py index 556c57be2a..d89b0e6618 100644 --- a/pype/ftrack/actions/action_syncToAvalon.py +++ b/pype/ftrack/actions/action_syncToAvalon.py @@ -1,5 +1,3 @@ -# :coding: utf-8 -# :copyright: Copyright (c) 2017 ftrack import sys import argparse import logging diff --git a/pype/ftrack/actions/ftrack_action_handler.py b/pype/ftrack/actions/ftrack_action_handler.py index f16647be76..cf0244ab72 100644 --- a/pype/ftrack/actions/ftrack_action_handler.py +++ b/pype/ftrack/actions/ftrack_action_handler.py @@ -10,14 +10,12 @@ from avalon import io, lib, pipeline from avalon import session as sess import acre -from app.api import ( - Templates, - Logger -) -t = Templates( - type=["anatomy"] -) +from pype import api as pype + +log = pype.Logger.getLogger(__name__, "ftrack") + +log.debug("pype.Anatomy: {}".format(pype.Anatomy)) class AppAction(object): @@ -34,7 +32,7 @@ class AppAction(object): def __init__(self, session, label, name, executable, variant=None, icon=None, description=None): '''Expects a ftrack_api.Session instance''' - self.log = Logger.getLogger(self.__class__.__name__) + self.log = pype.Logger.getLogger(self.__class__.__name__) # self.logger = Logger.getLogger(__name__) @@ -243,7 +241,9 @@ class AppAction(object): os.environ["AVALON_APP"] = self.identifier os.environ["AVALON_APP_NAME"] = self.identifier + "_" + self.variant - anatomy = t.anatomy + os.environ["FTRACK_TASKID"] = id + + anatomy = pype.Anatomy io.install() hierarchy = io.find_one({"type": 'asset', "name": entity['parent']['name']})[ 'data']['parents'] @@ -257,9 +257,10 @@ class AppAction(object): "task": entity['name'], "asset": entity['parent']['name'], "hierarchy": hierarchy} - - anatomy = anatomy.format(data) - + try: + anatomy = anatomy.format(data) + except Exception as e: + log.error("{0} Error in anatomy.format: {1}".format(__name__, e)) os.environ["AVALON_WORKDIR"] = os.path.join(anatomy.work.root, anatomy.work.folder) # TODO Add paths to avalon setup from tomls @@ -400,7 +401,7 @@ class BaseAction(object): def __init__(self, session): '''Expects a ftrack_api.Session instance''' - self.log = Logger.getLogger(self.__class__.__name__) + self.log = pype.Logger.getLogger(self.__class__.__name__) if self.label is None: raise ValueError( @@ -437,7 +438,7 @@ class BaseAction(object): ), self._launch ) - + self.log.info("Action '{}' - Registered successfully".format(self.__class__.__name__)) def _discover(self, event): diff --git a/pype/ftrack/ftrackRun.py b/pype/ftrack/ftrackRun.py index e90530b3b2..7fddf171da 100644 --- a/pype/ftrack/ftrackRun.py +++ b/pype/ftrack/ftrackRun.py @@ -7,11 +7,19 @@ import time from app import style from app.vendor.Qt import QtCore, QtGui, QtWidgets from pype.ftrack import credentials, login_dialog as login_dialog -from app.api import Logger + from FtrackServer import FtrackServer -log = Logger.getLogger(__name__) +from pype import api as pype + + +# load data from templates +pype.load_data_from_templates() + +log = pype.Logger.getLogger(__name__, "ftrack") # Validation if alredy logged into Ftrack + + class FtrackRunner: def __init__(self, main_parent=None, parent=None): @@ -76,7 +84,7 @@ class FtrackRunner: def runActionServer(self): if self.actionThread is None: self.actionThread = threading.Thread(target=self.setActionServer) - self.actionThread.daemon=True + self.actionThread.daemon = True self.actionThread.start() log.info("Ftrack action server launched") @@ -107,7 +115,7 @@ class FtrackRunner: def runEventServer(self): if self.eventThread is None: self.eventThread = threading.Thread(target=self.setEventServer) - self.eventThread.daemon=True + self.eventThread.daemon = True self.eventThread.start() log.info("Ftrack event server launched") @@ -168,9 +176,9 @@ class FtrackRunner: self.smEventS.addAction(self.aStopEventS) # Actions - basic - self.aLogin = QtWidgets.QAction("Login",self.menu) + self.aLogin = QtWidgets.QAction("Login", self.menu) self.aLogin.triggered.connect(self.validate) - self.aLogout = QtWidgets.QAction("Logout",self.menu) + self.aLogout = QtWidgets.QAction("Logout", self.menu) self.aLogout.triggered.connect(self.logout) self.menu.addAction(self.aLogin) diff --git a/pype/nuke/__init__.py b/pype/nuke/__init__.py index db2b1f4982..371fe2a786 100644 --- a/pype/nuke/__init__.py +++ b/pype/nuke/__init__.py @@ -1,8 +1,29 @@ import os - +import sys from avalon import api as avalon from pyblish import api as pyblish +from .. import api as pype + +from pype.nuke import menu + +from .lib import ( + create_write_node +) + +import nuke + +# removing logger handler created in avalon_core +for name, handler in [(handler.get_name(), handler) + for handler in pype.Logger.logging.root.handlers[:]]: + if "pype" not in str(name).lower(): + pype.Logger.logging.root.removeHandler(handler) + + +log = pype.Logger.getLogger(__name__, "nuke") + +AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype") + PARENT_DIR = os.path.dirname(__file__) PACKAGE_DIR = os.path.dirname(PARENT_DIR) PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins") @@ -12,9 +33,77 @@ LOAD_PATH = os.path.join(PLUGINS_DIR, "nuke", "load") CREATE_PATH = os.path.join(PLUGINS_DIR, "nuke", "create") INVENTORY_PATH = os.path.join(PLUGINS_DIR, "nuke", "inventory") +self = sys.modules[__name__] +self.nLogger = None + + +class NukeHandler(pype.Logger.logging.Handler): + ''' + Nuke Handler - emits logs into nuke's script editor. + warning will emit nuke.warning() + critical and fatal would popup msg dialog to alert of the error. + ''' + + def __init__(self): + pype.Logger.logging.Handler.__init__(self) + self.set_name("Pype_Nuke_Handler") + + def emit(self, record): + # Formated message: + msg = self.format(record) + + if record.levelname.lower() in [ + "warning", + "critical", + "fatal", + "error" + ]: + nuke.message(msg) + + +'''Adding Nuke Logging Handler''' +nuke_handler = NukeHandler() +if nuke_handler.get_name() \ + not in [handler.get_name() + for handler in pype.Logger.logging.root.handlers[:]]: + pype.Logger.logging.getLogger().addHandler(nuke_handler) + +if not self.nLogger: + self.nLogger = pype.Logger + + +def reload_config(): + """Attempt to reload pipeline at run-time. + + CAUTION: This is primarily for development and debugging purposes. + + """ + + import importlib + + for module in ( + "app", + "app.api", + "{}.api".format(AVALON_CONFIG), + "{}.templates".format(AVALON_CONFIG), + "{}.nuke".format(AVALON_CONFIG), + "{}.nuke.lib".format(AVALON_CONFIG), + "{}.nuke.templates".format(AVALON_CONFIG), + "{}.nuke.menu".format(AVALON_CONFIG) + ): + log.info("Reloading module: {}...".format(module)) + module = importlib.import_module(module) + try: + reload(module) + except Exception: + importlib.reload(module) + def install(): - print("Registering Nuke plug-ins..") + pype.fill_avalon_workdir() + reload_config() + + log.info("Registering Nuke plug-ins..") pyblish.register_plugin_path(PUBLISH_PATH) avalon.register_plugin_path(avalon.Loader, LOAD_PATH) avalon.register_plugin_path(avalon.Creator, CREATE_PATH) @@ -23,48 +112,56 @@ def install(): pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled) # Disable all families except for the ones we explicitly want to see - family_states = ["imagesequence", - "camera", - "pointcache"] + family_states = [ + "render", + "still" + "lifeGroup", + "backdrop", + "imagesequence", + "mov" + "camera", + "pointcache", + ] avalon.data["familiesStateDefault"] = False avalon.data["familiesStateToggled"] = family_states - # # work files start at app start - # workfiles.show( - # os.environ["AVALON_WORKDIR"] - # ) + menu.install() + + # load data from templates + pype.load_data_from_templates() def uninstall(): - print("Deregistering Nuke plug-ins..") + log.info("Deregistering Nuke plug-ins..") pyblish.deregister_plugin_path(PUBLISH_PATH) avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH) avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH) pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled) + # reset data from templates + pype.reset_data_from_templates() -def on_pyblish_instance_toggled(instance, new_value, old_value): - """Toggle saver tool passthrough states on instance toggles.""" - from avalon.nuke import viewer_update_and_undo_stop, add_publish_knob, log +def on_pyblish_instance_toggled(instance, old_value, new_value): + """Toggle node passthrough states on instance toggles.""" + self.log.info("instance toggle: {}, old_value: {}, new_value:{} ".format( + instance, old_value, new_value)) - writes = [n for n in instance if - n.Class() == "Write"] - if not writes: - return + from avalon.nuke import ( + viewer_update_and_undo_stop, + add_publish_knob + ) # Whether instances should be passthrough based on new value - passthrough = not new_value - with viewer_update_and_undo_stop(): - for n in writes: - try: - n["publish"].value() - except ValueError: - n = add_publish_knob(n) - log.info(" `Publish` knob was added to write node..") - current = n["publish"].value() - if current != passthrough: - n["publish"].setValue(passthrough) + with viewer_update_and_undo_stop(): + n = instance[0] + try: + n["publish"].value() + except ValueError: + n = add_publish_knob(n) + log.info(" `Publish` knob was added to write node..") + + n["publish"].setValue(new_value) diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index 3971b7c977..79c292b2ba 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -1,14 +1,254 @@ import sys - +from collections import OrderedDict +from pprint import pprint from avalon.vendor.Qt import QtGui import avalon.nuke - +import pype.api as pype import nuke +log = pype.Logger.getLogger(__name__, "nuke") self = sys.modules[__name__] self._project = None +def format_anatomy(data): + from .templates import ( + get_anatomy + ) + file = script_name() + + anatomy = get_anatomy() + + # TODO: perhaps should be in try! + padding = anatomy.render.padding + + data.update({ + "hierarchy": pype.get_hiearchy(), + "frame": "#"*padding, + "VERSION": pype.get_version_from_workfile(file) + }) + + # log.info("format_anatomy:anatomy: {}".format(anatomy)) + return anatomy.format(data) + + +def script_name(): + return nuke.root().knob('name').value() + + +def create_write_node(name, data): + from .templates import ( + get_dataflow, + get_colorspace + ) + nuke_dataflow_writes = get_dataflow(**data) + nuke_colorspace_writes = get_colorspace(**data) + try: + anatomy_filled = format_anatomy({ + "subset": data["avalon"]["subset"], + "asset": data["avalon"]["asset"], + "task": pype.get_task(), + "family": data["avalon"]["family"], + "project": {"name": pype.get_project_name(), + "code": pype.get_project_code()}, + "representation": nuke_dataflow_writes.file_type, + }) + except Exception as e: + log.error("problem with resolving anatomy tepmlate: {}".format(e)) + + log.debug("anatomy_filled.render: {}".format(anatomy_filled.render)) + + _data = OrderedDict({ + "file": str(anatomy_filled.render.path).replace("\\", "/") + }) + + # adding dataflow template + {_data.update({k: v}) + for k, v in nuke_dataflow_writes.items() + if k not in ["id", "previous"]} + + # adding dataflow template + {_data.update({k: v}) + for k, v in nuke_colorspace_writes.items()} + + _data = avalon.nuke.lib.fix_data_for_node_create(_data) + + log.debug(_data) + + _data["frame_range"] = data.get("frame_range", None) + + instance = avalon.nuke.lib.add_write_node( + name, + **_data + ) + instance = avalon.nuke.lib.imprint(instance, data["avalon"]) + add_rendering_knobs(instance) + return instance + + +def add_rendering_knobs(node): + if "render" not in node.knobs(): + knob = nuke.Boolean_Knob("render", "Render") + knob.setFlag(0x1000) + knob.setValue(False) + node.addKnob(knob) + if "render_farm" not in node.knobs(): + knob = nuke.Boolean_Knob("render_farm", "Render on Farm") + knob.setValue(False) + node.addKnob(knob) + return node + + +def update_frame_range(start, end, root=None): + """Set Nuke script start and end frame range + + Args: + start (float, int): start frame + end (float, int): end frame + root (object, Optional): root object from nuke's script + + Returns: + None + + """ + + knobs = { + "first_frame": start, + "last_frame": end + } + + with avalon.nuke.viewer_update_and_undo_stop(): + for key, value in knobs.items(): + if root: + root[key].setValue(value) + else: + nuke.root()[key].setValue(value) + + +def get_additional_data(container): + """Get Nuke's related data for the container + + Args: + container(dict): the container found by the ls() function + + Returns: + dict + """ + + node = container["_tool"] + tile_color = node['tile_color'].value() + if tile_color is None: + return {} + + hex = '%08x' % tile_color + rgba = [ + float(int(hex[0:2], 16)) / 255.0, + float(int(hex[2:4], 16)) / 255.0, + float(int(hex[4:6], 16)) / 255.0 + ] + + return {"color": QtGui.QColor().fromRgbF(rgba[0], rgba[1], rgba[2])} + + +def set_viewers_colorspace(viewer): + assert isinstance(viewer, dict), log.error( + "set_viewers_colorspace(): argument should be dictionary") + + filter_knobs = [ + "viewerProcess", + "wipe_position" + ] + viewers = [n for n in nuke.allNodes() if n.Class() == 'Viewer'] + erased_viewers = [] + + for v in viewers: + v['viewerProcess'].setValue(str(viewer.viewerProcess)) + if str(viewer.viewerProcess) not in v['viewerProcess'].value(): + copy_inputs = v.dependencies() + copy_knobs = {k: v[k].value() for k in v.knobs() + if k not in filter_knobs} + pprint(copy_knobs) + # delete viewer with wrong settings + erased_viewers.append(v['name'].value()) + nuke.delete(v) + + # create new viewer + nv = nuke.createNode("Viewer") + + # connect to original inputs + for i, n in enumerate(copy_inputs): + nv.setInput(i, n) + + # set coppied knobs + for k, v in copy_knobs.items(): + print(k, v) + nv[k].setValue(v) + + # set viewerProcess + nv['viewerProcess'].setValue(str(viewer.viewerProcess)) + + if erased_viewers: + log.warning( + "Attention! Viewer nodes {} were erased." + "It had wrong color profile".format(erased_viewers)) + + +def set_root_colorspace(root_dict): + assert isinstance(root_dict, dict), log.error( + "set_root_colorspace(): argument should be dictionary") + for knob, value in root_dict.items(): + if nuke.root()[knob].value() not in value: + nuke.root()[knob].setValue(str(value)) + log.info("nuke.root()['{}'] changed to: {}".format(knob, value)) + + +def set_writes_colorspace(write_dict): + assert isinstance(write_dict, dict), log.error( + "set_root_colorspace(): argument should be dictionary") + log.info("set_writes_colorspace(): {}".format(write_dict)) + + +def set_colorspace(): + from pype import api as pype + + nuke_colorspace = getattr(pype.Colorspace, "nuke", None) + + try: + set_root_colorspace(nuke_colorspace.root) + except AttributeError: + log.error( + "set_colorspace(): missing `root` settings in template") + try: + set_viewers_colorspace(nuke_colorspace.viewer) + except AttributeError: + log.error( + "set_colorspace(): missing `viewer` settings in template") + try: + set_writes_colorspace(nuke_colorspace.write) + except AttributeError: + log.error( + "set_colorspace(): missing `write` settings in template") + + try: + for key in nuke_colorspace: + log.info("{}".format(key)) + except TypeError: + log.error("Nuke is not in templates! \n\n\n" + "contact your supervisor!") + + +def get_avalon_knob_data(node): + import toml + try: + data = toml.loads(node['avalon'].value()) + except: + return None + return data + +# TODO: bellow functions are wip and needs to be check where they are used +# ------------------------------------ + + def update_frame_range(start, end, root=None): """Set Nuke script start and end frame range diff --git a/pype/nuke/menu.py b/pype/nuke/menu.py new file mode 100644 index 0000000000..97e2432e16 --- /dev/null +++ b/pype/nuke/menu.py @@ -0,0 +1,12 @@ +import nuke +from avalon.api import Session + +from pype.nuke import lib + + +def install(): + menubar = nuke.menu("Nuke") + menu = menubar.findItem(Session["AVALON_LABEL"]) + + menu.addSeparator() + menu.addCommand("Set colorspace...", lib.set_colorspace) diff --git a/pype/nuke/templates.py b/pype/nuke/templates.py new file mode 100644 index 0000000000..16cb6062a2 --- /dev/null +++ b/pype/nuke/templates.py @@ -0,0 +1,41 @@ +from pype import api as pype + +log = pype.Logger.getLogger(__name__, "nuke") + + +def get_anatomy(**kwarg): + return pype.Anatomy + + +def get_dataflow(**kwarg): + log.info(kwarg) + host = kwarg.get("host", "nuke") + cls = kwarg.get("class", None) + preset = kwarg.get("preset", None) + assert any([host, cls]), log.error("nuke.templates.get_dataflow():" + "Missing mandatory kwargs `host`, `cls`") + + nuke_dataflow = getattr(pype.Dataflow, str(host), None) + nuke_dataflow_node = getattr(nuke_dataflow.nodes, str(cls), None) + if preset: + nuke_dataflow_node = getattr(nuke_dataflow_node, str(preset), None) + + log.info("Dataflow: {}".format(nuke_dataflow_node)) + return nuke_dataflow_node + + +def get_colorspace(**kwarg): + log.info(kwarg) + host = kwarg.get("host", "nuke") + cls = kwarg.get("class", None) + preset = kwarg.get("preset", None) + assert any([host, cls]), log.error("nuke.templates.get_colorspace():" + "Missing mandatory kwargs `host`, `cls`") + + nuke_colorspace = getattr(pype.Colorspace, str(host), None) + nuke_colorspace_node = getattr(nuke_colorspace, str(cls), None) + if preset: + nuke_colorspace_node = getattr(nuke_colorspace_node, str(preset), None) + + log.info("Colorspace: {}".format(nuke_colorspace_node)) + return nuke_colorspace_node diff --git a/pype/plugins/ftrack/collect_ftrack_api.py b/pype/plugins/ftrack/collect_ftrack_api.py new file mode 100644 index 0000000000..ce83652e9c --- /dev/null +++ b/pype/plugins/ftrack/collect_ftrack_api.py @@ -0,0 +1,22 @@ +import os + +import ftrack_api_old as ftrack_api +import pyblish.api + + +class CollectFtrackApi(pyblish.api.ContextPlugin): + """ Collects an ftrack session and the current task id. """ + + order = pyblish.api.CollectorOrder + label = "Collect Ftrack Api" + + def process(self, context): + + # Collect session + session = ftrack_api.Session() + context.data["ftrackSession"] = session + + # Collect task + task_id = os.environ.get("FTRACK_TASKID", "") + + context.data["ftrackTask"] = session.get("Task", task_id) diff --git a/pype/plugins/ftrack/integrate_ftrack_api.py b/pype/plugins/ftrack/integrate_ftrack_api.py new file mode 100644 index 0000000000..e6624dcf51 --- /dev/null +++ b/pype/plugins/ftrack/integrate_ftrack_api.py @@ -0,0 +1,288 @@ +import os + +import pyblish.api +import clique + + +class IntegrateFtrackApi(pyblish.api.InstancePlugin): + """ Commit components to server. """ + + order = pyblish.api.IntegratorOrder+0.499 + label = "Integrate Ftrack Api" + families = ["ftrack"] + + def query(self, entitytype, data): + """ Generate a query expression from data supplied. + + If a value is not a string, we'll add the id of the entity to the + query. + + Args: + entitytype (str): The type of entity to query. + data (dict): The data to identify the entity. + exclusions (list): All keys to exclude from the query. + + Returns: + str: String query to use with "session.query" + """ + queries = [] + for key, value in data.iteritems(): + if not isinstance(value, (basestring, int)): + if "id" in value.keys(): + queries.append( + "{0}.id is \"{1}\"".format(key, value["id"]) + ) + else: + queries.append("{0} is \"{1}\"".format(key, value)) + + query = ( + "select id from " + entitytype + " where " + " and ".join(queries) + ) + self.log.debug(query) + return query + + def process(self, instance): + + session = instance.context.data["ftrackSession"] + task = instance.context.data["ftrackTask"] + + info_msg = "Created new {entity_type} with data: {data}" + info_msg += ", metadata: {metadata}." + + # Iterate over components and publish + for data in instance.data.get("ftrackComponentsList", []): + + # AssetType + # Get existing entity. + assettype_data = {"short": "upload"} + assettype_data.update(data.get("assettype_data", {})) + + assettype_entity = session.query( + self.query("AssetType", assettype_data) + ).first() + + # Create a new entity if none exits. + if not assettype_entity: + assettype_entity = session.create("AssetType", assettype_data) + self.log.info( + "Created new AssetType with data: ".format(assettype_data) + ) + + # Asset + # Get existing entity. + asset_data = { + "name": task["name"], + "type": assettype_entity, + "parent": task["parent"], + } + asset_data.update(data.get("asset_data", {})) + + asset_entity = session.query( + self.query("Asset", asset_data) + ).first() + + # Extracting metadata, and adding after entity creation. This is + # due to a ftrack_api bug where you can't add metadata on creation. + asset_metadata = asset_data.pop("metadata", {}) + + # Create a new entity if none exits. + if not asset_entity: + asset_entity = session.create("Asset", asset_data) + self.log.info( + info_msg.format( + entity_type="Asset", + data=asset_data, + metadata=asset_metadata + ) + ) + + # Adding metadata + existing_asset_metadata = asset_entity["metadata"] + existing_asset_metadata.update(asset_metadata) + asset_entity["metadata"] = existing_asset_metadata + + # AssetVersion + # Get existing entity. + assetversion_data = { + "version": 0, + "asset": asset_entity, + "task": task + } + assetversion_data.update(data.get("assetversion_data", {})) + + assetversion_entity = session.query( + self.query("AssetVersion", assetversion_data) + ).first() + + # Extracting metadata, and adding after entity creation. This is + # due to a ftrack_api bug where you can't add metadata on creation. + assetversion_metadata = assetversion_data.pop("metadata", {}) + + # Create a new entity if none exits. + if not assetversion_entity: + assetversion_entity = session.create( + "AssetVersion", assetversion_data + ) + self.log.info( + info_msg.format( + entity_type="AssetVersion", + data=assetversion_data, + metadata=assetversion_metadata + ) + ) + + # Adding metadata + existing_assetversion_metadata = assetversion_entity["metadata"] + existing_assetversion_metadata.update(assetversion_metadata) + assetversion_entity["metadata"] = existing_assetversion_metadata + + # Have to commit the version and asset, because location can't + # determine the final location without. + session.commit() + + # Component + # Get existing entity. + component_data = { + "name": "main", + "version": assetversion_entity + } + component_data.update(data.get("component_data", {})) + + component_entity = session.query( + self.query("Component", component_data) + ).first() + + component_overwrite = data.get("component_overwrite", False) + location = data.get("component_location", session.pick_location()) + + # Overwrite existing component data if requested. + if component_entity and component_overwrite: + + origin_location = session.query( + "Location where name is \"ftrack.origin\"" + ).one() + + # Removing existing members from location + components = list(component_entity.get("members", [])) + components += [component_entity] + for component in components: + for loc in component["component_locations"]: + if location["id"] == loc["location_id"]: + location.remove_component( + component, recursive=False + ) + + # Deleting existing members on component entity + for member in component_entity.get("members", []): + session.delete(member) + del(member) + + session.commit() + + # Reset members in memory + if "members" in component_entity.keys(): + component_entity["members"] = [] + + # Add components to origin location + try: + collection = clique.parse(data["component_path"]) + except ValueError: + # Assume its a single file + # Changing file type + name, ext = os.path.splitext(data["component_path"]) + component_entity["file_type"] = ext + + origin_location.add_component( + component_entity, data["component_path"] + ) + else: + # Changing file type + component_entity["file_type"] = collection.format("{tail}") + + # Create member components for sequence. + for member_path in collection: + + size = 0 + try: + size = os.path.getsize(member_path) + except OSError: + pass + + name = collection.match(member_path).group("index") + + member_data = { + "name": name, + "container": component_entity, + "size": size, + "file_type": os.path.splitext(member_path)[-1] + } + + component = session.create( + "FileComponent", member_data + ) + origin_location.add_component( + component, member_path, recursive=False + ) + component_entity["members"].append(component) + + # Add components to location. + location.add_component( + component_entity, origin_location, recursive=True + ) + + data["component"] = component_entity + msg = "Overwriting Component with path: {0}, data: {1}, " + msg += "location: {2}" + self.log.info( + msg.format( + data["component_path"], + component_data, + location + ) + ) + + # Extracting metadata, and adding after entity creation. This is + # due to a ftrack_api bug where you can't add metadata on creation. + component_metadata = component_data.pop("metadata", {}) + + # Create new component if none exists. + new_component = False + if not component_entity: + component_entity = assetversion_entity.create_component( + data["component_path"], + data=component_data, + location=location + ) + data["component"] = component_entity + msg = "Created new Component with path: {0}, data: {1}" + msg += ", metadata: {2}, location: {3}" + self.log.info( + msg.format( + data["component_path"], + component_data, + component_metadata, + location + ) + ) + new_component = True + + # Adding metadata + existing_component_metadata = component_entity["metadata"] + existing_component_metadata.update(component_metadata) + component_entity["metadata"] = existing_component_metadata + + # Setting assetversion thumbnail + if data.get("thumbnail", False): + assetversion_entity["thumbnail_id"] = component_entity["id"] + + # Inform user about no changes to the database. + if (component_entity and not component_overwrite and + not new_component): + data["component"] = component_entity + self.log.info( + "Found existing component, and no request to overwrite. " + "Nothing has been changed." + ) + else: + # Commit changes. + session.commit() diff --git a/pype/plugins/ftrack/integrate_ftrack_instances.py b/pype/plugins/ftrack/integrate_ftrack_instances.py new file mode 100644 index 0000000000..9a0a36a413 --- /dev/null +++ b/pype/plugins/ftrack/integrate_ftrack_instances.py @@ -0,0 +1,67 @@ +import pyblish.api +import os + + +class IntegrateFtrackInstance(pyblish.api.InstancePlugin): + """Collect ftrack component data + + Add ftrack component list to instance. + + + """ + + order = pyblish.api.IntegratorOrder + 0.48 + label = 'Integrate Ftrack Component' + + family_mapping = {'camera': 'cam', + 'look': 'look', + 'mayaAscii': 'scene', + 'model': 'geo', + 'rig': 'rig', + 'setdress': 'setdress', + 'pointcache': 'cache', + 'review': 'mov'} + + def process(self, instance): + + self.log.debug('instance {}'.format(instance)) + + assumed_data = instance.data["assumedTemplateData"] + assumed_version = assumed_data["version"] + version_number = int(assumed_version) + family = instance.data['family'].lower() + asset_type = '' + + asset_type = self.family_mapping[family] + + componentList = [] + + transfers = instance.data["transfers"] + + ft_session = instance.context.data["ftrackSession"] + location = ft_session.query( + 'Location where name is "ftrack.unmanaged"').one() + self.log.debug('location {}'.format(location)) + + for src, dest in transfers: + filename, ext = os.path.splitext(src) + self.log.debug('source filename: ' + filename) + self.log.debug('source ext: ' + ext) + + componentList.append({"assettype_data": { + "short": asset_type, + }, + "assetversion_data": { + "version": version_number, + }, + "component_data": { + "name": ext[1:], # Default component name is "main". + }, + "component_path": dest, + 'component_location': location, + "component_overwrite": False, + } + ) + + self.log.debug('componentsList: {}'.format(str(componentList))) + instance.data["ftrackComponentsList"] = componentList diff --git a/pype/plugins/global/publish/collect_deadline_user.py b/pype/plugins/global/_publish_unused/collect_deadline_user.py similarity index 100% rename from pype/plugins/global/publish/collect_deadline_user.py rename to pype/plugins/global/_publish_unused/collect_deadline_user.py diff --git a/pype/plugins/global/publish/collect_comment.py b/pype/plugins/global/publish/collect_comment.py index a246b7eaba..5bbd1da2a1 100644 --- a/pype/plugins/global/publish/collect_comment.py +++ b/pype/plugins/global/publish/collect_comment.py @@ -1,7 +1,7 @@ import pyblish.api -class CollectColorbleedComment(pyblish.api.ContextPlugin): +class CollectComment(pyblish.api.ContextPlugin): """This plug-ins displays the comment dialog box per default""" label = "Collect Comment" diff --git a/pype/plugins/global/publish/collect_templates.py b/pype/plugins/global/publish/collect_templates.py index f2a3da7df4..48b6c448e3 100644 --- a/pype/plugins/global/publish/collect_templates.py +++ b/pype/plugins/global/publish/collect_templates.py @@ -18,6 +18,3 @@ class CollectTemplates(pyblish.api.ContextPlugin): type=["anatomy"] ) context.data['anatomy'] = templates.anatomy - for key in templates.anatomy: - self.log.info(str(key) + ": " + str(templates.anatomy[key])) - # return diff --git a/pype/plugins/global/publish/collect_time.py b/pype/plugins/global/publish/collect_time.py index d4fa658425..e0adc7dfc3 100644 --- a/pype/plugins/global/publish/collect_time.py +++ b/pype/plugins/global/publish/collect_time.py @@ -2,7 +2,7 @@ import pyblish.api from avalon import api -class CollectMindbenderTime(pyblish.api.ContextPlugin): +class CollectTime(pyblish.api.ContextPlugin): """Store global time at the time of publish""" label = "Collect Current Time" diff --git a/pype/plugins/global/publish/integrate.py b/pype/plugins/global/publish/integrate.py index 87ffa2aaa3..e20f59133c 100644 --- a/pype/plugins/global/publish/integrate.py +++ b/pype/plugins/global/publish/integrate.py @@ -233,6 +233,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin): "root": root, "project": PROJECT, "projectcode": "prjX", + 'task': api.Session["AVALON_TASK"], "silo": asset['silo'], "asset": ASSET, "family": instance.data['family'], diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 1933713577..cb852f7c43 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -125,7 +125,7 @@ class SubmitDependentImageSequenceJobDeadline(pyblish.api.InstancePlugin): hosts = ["fusion", "maya", "nuke"] families = [ - "saver.deadline", + "render.deadline", "renderlayer", "imagesequence" ] diff --git a/pype/plugins/maya/publish/collect_look.py b/pype/plugins/maya/publish/collect_look.py index cbfecdc0ad..0daf3cc19c 100644 --- a/pype/plugins/maya/publish/collect_look.py +++ b/pype/plugins/maya/publish/collect_look.py @@ -107,6 +107,7 @@ def seq_to_glob(path): "": "" } + lower = path.lower() has_pattern = False for pattern, regex_pattern in patterns.items(): @@ -213,6 +214,9 @@ class CollectLook(pyblish.api.InstancePlugin): with lib.renderlayer(instance.data["renderlayer"]): self.collect(instance) + # make ftrack publishable + instance.data["families"] = ['ftrack'] + def collect(self, instance): self.log.info("Looking for look associations " diff --git a/pype/plugins/maya/publish/collect_model.py b/pype/plugins/maya/publish/collect_model.py index 47808934b3..393bb82910 100644 --- a/pype/plugins/maya/publish/collect_model.py +++ b/pype/plugins/maya/publish/collect_model.py @@ -7,7 +7,7 @@ class CollectModelData(pyblish.api.InstancePlugin): """Collect model data Ensures always only a single frame is extracted (current frame). - + Note: This is a workaround so that the `studio.model` family can use the same pointcache extractor implementation as animation and pointcaches. @@ -24,3 +24,6 @@ class CollectModelData(pyblish.api.InstancePlugin): frame = cmds.currentTime(query=True) instance.data['startFrame'] = frame instance.data['endFrame'] = frame + + # make ftrack publishable + instance.data["families"] = ['ftrack'] diff --git a/pype/plugins/nuke/_publish_unused/collect_render_target.py b/pype/plugins/nuke/_publish_unused/collect_render_target.py new file mode 100644 index 0000000000..86a38f26b6 --- /dev/null +++ b/pype/plugins/nuke/_publish_unused/collect_render_target.py @@ -0,0 +1,47 @@ +import pyblish.api + + +class CollectNukeRenderMode(pyblish.api.InstancePlugin): + # TODO: rewrite docstring to nuke + """Collect current comp's render Mode + + Options: + local + deadline + + Note that this value is set for each comp separately. When you save the + comp this information will be stored in that file. If for some reason the + available tool does not visualize which render mode is set for the + current comp, please run the following line in the console (Py2) + + comp.GetData("rendermode") + + This will return the name of the current render mode as seen above under + Options. + + """ + + order = pyblish.api.CollectorOrder + 0.4 + label = "Collect Render Mode" + hosts = ["nuke"] + families = ["write", "render.local"] + + def process(self, instance): + """Collect all image sequence tools""" + options = ["local", "deadline"] + + node = instance[0] + + if bool(node["render_local"].getValue()): + rendermode = "local" + else: + rendermode = "deadline" + + assert rendermode in options, "Must be supported render mode" + + # Append family + instance.data["families"].remove("render") + family = "render.{0}".format(rendermode) + instance.data["families"].append(family) + + self.log.info("Render mode: {0}".format(rendermode)) diff --git a/pype/plugins/nuke/_publish_unused/extract_nuke_write.py b/pype/plugins/nuke/_publish_unused/extract_nuke_write.py new file mode 100644 index 0000000000..155b5cf56d --- /dev/null +++ b/pype/plugins/nuke/_publish_unused/extract_nuke_write.py @@ -0,0 +1,116 @@ +import os + +import nuke +import pyblish.api + + +class Extract(pyblish.api.InstancePlugin): + """Super class for write and writegeo extractors.""" + + order = pyblish.api.ExtractorOrder + optional = True + label = "Extract Nuke [super]" + hosts = ["nuke"] + match = pyblish.api.Subset + + # targets = ["process.local"] + + def execute(self, instance): + # Get frame range + node = instance[0] + first_frame = nuke.root()["first_frame"].value() + last_frame = nuke.root()["last_frame"].value() + + if node["use_limit"].value(): + first_frame = node["first"].value() + last_frame = node["last"].value() + + # Render frames + nuke.execute(node.name(), int(first_frame), int(last_frame)) + + +class ExtractNukeWrite(Extract): + """ Extract output from write nodes. """ + + families = ["write", "local"] + label = "Extract Write" + + def process(self, instance): + + self.execute(instance) + + # Validate output + for filename in list(instance.data["collection"]): + if not os.path.exists(filename): + instance.data["collection"].remove(filename) + self.log.warning("\"{0}\" didn't render.".format(filename)) + + +class ExtractNukeCache(Extract): + + label = "Cache" + families = ["cache", "local"] + + def process(self, instance): + + self.execute(instance) + + # Validate output + msg = "\"{0}\" didn't render.".format(instance.data["output_path"]) + assert os.path.exists(instance.data["output_path"]), msg + + +class ExtractNukeCamera(Extract): + + label = "Camera" + families = ["camera", "local"] + + def process(self, instance): + + node = instance[0] + node["writeGeometries"].setValue(False) + node["writePointClouds"].setValue(False) + node["writeAxes"].setValue(False) + + file_path = node["file"].getValue() + node["file"].setValue(instance.data["output_path"]) + + self.execute(instance) + + node["writeGeometries"].setValue(True) + node["writePointClouds"].setValue(True) + node["writeAxes"].setValue(True) + + node["file"].setValue(file_path) + + # Validate output + msg = "\"{0}\" didn't render.".format(instance.data["output_path"]) + assert os.path.exists(instance.data["output_path"]), msg + + +class ExtractNukeGeometry(Extract): + + label = "Geometry" + families = ["geometry", "local"] + + def process(self, instance): + + node = instance[0] + node["writeCameras"].setValue(False) + node["writePointClouds"].setValue(False) + node["writeAxes"].setValue(False) + + file_path = node["file"].getValue() + node["file"].setValue(instance.data["output_path"]) + + self.execute(instance) + + node["writeCameras"].setValue(True) + node["writePointClouds"].setValue(True) + node["writeAxes"].setValue(True) + + node["file"].setValue(file_path) + + # Validate output + msg = "\"{0}\" didn't render.".format(instance.data["output_path"]) + assert os.path.exists(instance.data["output_path"]), msg diff --git a/pype/plugins/nuke/_publish_unused/publish_image_sequences.py b/pype/plugins/nuke/_publish_unused/publish_image_sequences.py new file mode 100644 index 0000000000..e8b468e94a --- /dev/null +++ b/pype/plugins/nuke/_publish_unused/publish_image_sequences.py @@ -0,0 +1,98 @@ +import re +import os +import json +import subprocess + +import pyblish.api + +from pype.action import get_errored_plugins_from_data + + +def _get_script(): + """Get path to the image sequence script""" + + # todo: use a more elegant way to get the python script + + try: + from pype.fusion.scripts import publish_filesequence + except Exception: + raise RuntimeError("Expected module 'publish_imagesequence'" + "to be available") + + module_path = publish_filesequence.__file__ + if module_path.endswith(".pyc"): + module_path = module_path[:-len(".pyc")] + ".py" + + return module_path + + +class PublishImageSequence(pyblish.api.InstancePlugin): + """Publish the generated local image sequences.""" + + order = pyblish.api.IntegratorOrder + label = "Publish Rendered Image Sequence(s)" + hosts = ["fusion"] + families = ["saver.renderlocal"] + + def process(self, instance): + + # Skip this plug-in if the ExtractImageSequence failed + errored_plugins = get_errored_plugins_from_data(instance.context) + if any(plugin.__name__ == "FusionRenderLocal" for plugin in + errored_plugins): + raise RuntimeError("Fusion local render failed, " + "publishing images skipped.") + + subset = instance.data["subset"] + ext = instance.data["ext"] + + # Regex to match resulting renders + regex = "^{subset}.*[0-9]+{ext}+$".format(subset=re.escape(subset), + ext=re.escape(ext)) + + # The instance has most of the information already stored + metadata = { + "regex": regex, + "startFrame": instance.context.data["startFrame"], + "endFrame": instance.context.data["endFrame"], + "families": ["imagesequence"], + } + + # Write metadata and store the path in the instance + output_directory = instance.data["outputDir"] + path = os.path.join(output_directory, + "{}_metadata.json".format(subset)) + with open(path, "w") as f: + json.dump(metadata, f) + + assert os.path.isfile(path), ("Stored path is not a file for %s" + % instance.data["name"]) + + # Suppress any subprocess console + startupinfo = subprocess.STARTUPINFO() + startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW + startupinfo.wShowWindow = subprocess.SW_HIDE + + process = subprocess.Popen(["python", _get_script(), + "--paths", path], + bufsize=1, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + startupinfo=startupinfo) + + while True: + output = process.stdout.readline() + # Break when there is no output or a return code has been given + if output == '' and process.poll() is not None: + process.stdout.close() + break + if output: + line = output.strip() + if line.startswith("ERROR"): + self.log.error(line) + else: + self.log.info(line) + + if process.returncode != 0: + raise RuntimeError("Process quit with non-zero " + "return code: {}".format(process.returncode)) diff --git a/pype/plugins/nuke/_publish_unused/submit_deadline.py b/pype/plugins/nuke/_publish_unused/submit_deadline.py new file mode 100644 index 0000000000..ffb298f75d --- /dev/null +++ b/pype/plugins/nuke/_publish_unused/submit_deadline.py @@ -0,0 +1,147 @@ +import os +import json +import getpass + +from avalon import api +from avalon.vendor import requests + +import pyblish.api + + +class NukeSubmitDeadline(pyblish.api.InstancePlugin): + # TODO: rewrite docstring to nuke + """Submit current Comp to Deadline + + Renders are submitted to a Deadline Web Service as + supplied via the environment variable AVALON_DEADLINE + + """ + + label = "Submit to Deadline" + order = pyblish.api.IntegratorOrder + hosts = ["nuke"] + families = ["write", "render.deadline"] + + def process(self, instance): + + context = instance.context + + key = "__hasRun{}".format(self.__class__.__name__) + if context.data.get(key, False): + return + else: + context.data[key] = True + + AVALON_DEADLINE = api.Session.get("AVALON_DEADLINE", + "http://localhost:8082") + assert AVALON_DEADLINE, "Requires AVALON_DEADLINE" + + # Collect all saver instances in context that are to be rendered + write_instances = [] + for instance in context[:]: + if not self.families[0] in instance.data.get("families"): + # Allow only saver family instances + continue + + if not instance.data.get("publish", True): + # Skip inactive instances + continue + self.log.debug(instance.data["name"]) + write_instances.append(instance) + + if not write_instances: + raise RuntimeError("No instances found for Deadline submittion") + + hostVersion = int(context.data["hostVersion"]) + filepath = context.data["currentFile"] + filename = os.path.basename(filepath) + comment = context.data.get("comment", "") + deadline_user = context.data.get("deadlineUser", getpass.getuser()) + + # Documentation for keys available at: + # https://docs.thinkboxsoftware.com + # /products/deadline/8.0/1_User%20Manual/manual + # /manual-submission.html#job-info-file-options + payload = { + "JobInfo": { + # Top-level group name + "BatchName": filename, + + # Job name, as seen in Monitor + "Name": filename, + + # User, as seen in Monitor + "UserName": deadline_user, + + # Use a default submission pool for Nuke + "Pool": "nuke", + + "Plugin": "Nuke", + "Frames": "{start}-{end}".format( + start=int(instance.data["startFrame"]), + end=int(instance.data["endFrame"]) + ), + + "Comment": comment, + }, + "PluginInfo": { + # Input + "FlowFile": filepath, + + # Mandatory for Deadline + "Version": str(hostVersion), + + # Render in high quality + "HighQuality": True, + + # Whether saver output should be checked after rendering + # is complete + "CheckOutput": True, + + # Proxy: higher numbers smaller images for faster test renders + # 1 = no proxy quality + "Proxy": 1, + }, + + # Mandatory for Deadline, may be empty + "AuxFiles": [] + } + + # Enable going to rendered frames from Deadline Monitor + for index, instance in enumerate(write_instances): + path = instance.data["path"] + folder, filename = os.path.split(path) + payload["JobInfo"]["OutputDirectory%d" % index] = folder + payload["JobInfo"]["OutputFilename%d" % index] = filename + + # Include critical variables with submission + keys = [ + # TODO: This won't work if the slaves don't have accesss to + # these paths, such as if slaves are running Linux and the + # submitter is on Windows. + "PYTHONPATH", + "NUKE_PATH" + # "OFX_PLUGIN_PATH", + ] + environment = dict({key: os.environ[key] for key in keys + if key in os.environ}, **api.Session) + + payload["JobInfo"].update({ + "EnvironmentKeyValue%d" % index: "{key}={value}".format( + key=key, + value=environment[key] + ) for index, key in enumerate(environment) + }) + + self.log.info("Submitting..") + self.log.info(json.dumps(payload, indent=4, sort_keys=True)) + + # E.g. http://192.168.0.1:8082/api/jobs + url = "{}/api/jobs".format(AVALON_DEADLINE) + response = requests.post(url, json=payload) + if not response.ok: + raise Exception(response.text) + + # Store the response for dependent job submission plug-ins + for instance in write_instances: + instance.data["deadlineSubmissionJob"] = response.json() diff --git a/pype/plugins/nuke/publish/validate_nuke_settings.py b/pype/plugins/nuke/_publish_unused/validate_nuke_settings.py similarity index 100% rename from pype/plugins/nuke/publish/validate_nuke_settings.py rename to pype/plugins/nuke/_publish_unused/validate_nuke_settings.py diff --git a/pype/plugins/nuke/publish/validate_proxy_mode.py b/pype/plugins/nuke/_publish_unused/validate_proxy_mode.py similarity index 100% rename from pype/plugins/nuke/publish/validate_proxy_mode.py rename to pype/plugins/nuke/_publish_unused/validate_proxy_mode.py diff --git a/pype/plugins/nuke/publish/validate_write_nodes.py b/pype/plugins/nuke/_publish_unused/validate_write_nodes.py similarity index 100% rename from pype/plugins/nuke/publish/validate_write_nodes.py rename to pype/plugins/nuke/_publish_unused/validate_write_nodes.py diff --git a/pype/plugins/nuke/create/create_backdrop b/pype/plugins/nuke/create/create_backdrop new file mode 100644 index 0000000000..2cdc222618 --- /dev/null +++ b/pype/plugins/nuke/create/create_backdrop @@ -0,0 +1,2 @@ +# creates backdrop which is published as separate nuke script +# it is versioned by major version diff --git a/pype/plugins/nuke/create/create_camera b/pype/plugins/nuke/create/create_camera new file mode 100644 index 0000000000..0d542b8ad7 --- /dev/null +++ b/pype/plugins/nuke/create/create_camera @@ -0,0 +1,3 @@ +# create vanilla camera if no camera is selected +# if camera is selected then it will convert it into containerized object +# it is major versioned in publish diff --git a/pype/plugins/nuke/create/create_read_plate b/pype/plugins/nuke/create/create_read_plate new file mode 100644 index 0000000000..90a47cb55e --- /dev/null +++ b/pype/plugins/nuke/create/create_read_plate @@ -0,0 +1,8 @@ +# create publishable read node usually used for enabling version tracking +# also useful for sharing across shots or assets + +# if read nodes are selected it will convert them to centainer +# if no read node selected it will create read node and offer browser to shot resource folder + +# type movie > mov or imagesequence +# type still > matpaint .psd, .tif, .png, diff --git a/pype/plugins/nuke/create/create_write b/pype/plugins/nuke/create/create_write new file mode 100644 index 0000000000..dcb132875a --- /dev/null +++ b/pype/plugins/nuke/create/create_write @@ -0,0 +1,17 @@ +# type: render +# if no render type node in script then first is having in name [master] for definition of main script renderer +# colorspace setting from templates +# dataflow setting from templates + +# type: mask_render +# created with shuffle gizmo for RGB separation into davinci matte +# colorspace setting from templates +# dataflow setting from templates + +# type: prerender +# backdrop with write and read +# colorspace setting from templates +# dataflow setting from templates + +# type: geo +# dataflow setting from templates diff --git a/pype/plugins/nuke/create/create_write.py b/pype/plugins/nuke/create/create_write.py new file mode 100644 index 0000000000..c1b492ac2e --- /dev/null +++ b/pype/plugins/nuke/create/create_write.py @@ -0,0 +1,149 @@ +from collections import OrderedDict +import avalon.api +import avalon.nuke +from pype.nuke import ( + create_write_node +) +from pype import api as pype + +import nuke + + +log = pype.Logger.getLogger(__name__, "nuke") + + +def subset_to_families(subset, family, families): + subset_sufx = str(subset).replace(family, "") + new_subset = families + subset_sufx + return "{}.{}".format(family, new_subset) + + +class CrateWriteRender(avalon.nuke.Creator): + # change this to template preset + preset = "render" + + name = "WriteRender" + label = "Create Write Render" + hosts = ["nuke"] + family = "{}_write".format(preset) + families = preset + icon = "sign-out" + + def __init__(self, *args, **kwargs): + super(CrateWriteRender, self).__init__(*args, **kwargs) + + data = OrderedDict() + + data["family"] = self.family.split("_")[1] + data["families"] = self.families + + {data.update({k: v}) for k, v in self.data.items() + if k not in data.keys()} + self.data = data + + def process(self): + self.name = self.data["subset"] + + family = self.family.split("_")[0] + node = self.family.split("_")[1] + + instance = nuke.toNode(self.data["subset"]) + + if not instance: + write_data = { + "class": node, + "preset": family, + "avalon": self.data + } + + create_write_node(self.data["subset"], write_data) + + return + + +class CrateWritePrerender(avalon.nuke.Creator): + # change this to template preset + preset = "prerender" + + name = "WritePrerender" + label = "Create Write Prerender" + hosts = ["nuke"] + family = "{}_write".format(preset) + families = preset + icon = "sign-out" + + def __init__(self, *args, **kwargs): + super(CrateWritePrerender, self).__init__(*args, **kwargs) + + data = OrderedDict() + + data["family"] = self.family.split("_")[1] + data["families"] = self.families + + {data.update({k: v}) for k, v in self.data.items() + if k not in data.keys()} + self.data = data + + def process(self): + self.name = self.data["subset"] + + instance = nuke.toNode(self.data["subset"]) + + family = self.family.split("_")[0] + node = self.family.split("_")[1] + + if not instance: + write_data = { + "class": node, + "preset": family, + "avalon": self.data + } + + create_write_node(self.data["subset"], write_data) + + return + + +class CrateWriteStill(avalon.nuke.Creator): + # change this to template preset + preset = "still" + + name = "WriteStill" + label = "Create Write Still" + hosts = ["nuke"] + family = "{}_write".format(preset) + families = preset + icon = "image" + + def __init__(self, *args, **kwargs): + super(CrateWriteStill, self).__init__(*args, **kwargs) + + data = OrderedDict() + + data["family"] = self.family.split("_")[1] + data["families"] = self.families + + {data.update({k: v}) for k, v in self.data.items() + if k not in data.keys()} + self.data = data + + def process(self): + self.name = self.data["subset"] + + instance = nuke.toNode(self.data["subset"]) + + family = self.family.split("_")[0] + node = self.family.split("_")[1] + + if not instance: + write_data = { + "frame_range": [nuke.frame(), nuke.frame()], + "class": node, + "preset": family, + "avalon": self.data + } + + nuke.createNode("FrameHold", "first_frame {}".format(nuke.frame())) + create_write_node(self.data["subset"], write_data) + + return diff --git a/pype/plugins/nuke/create/create_write_exr.py b/pype/plugins/nuke/create/create_write_exr.py deleted file mode 100644 index 41cd528b15..0000000000 --- a/pype/plugins/nuke/create/create_write_exr.py +++ /dev/null @@ -1,48 +0,0 @@ -import os -import avalon.api -import avalon.nuke -import nuke - - -class CrateWriteExr(avalon.api.Creator): - name = "Write_exr" - label = "Create Write: exr" - hosts = ["nuke"] - family = "write" - icon = "sign-out" - - # def __init__(self, *args, **kwargs): - # super(CrateWriteExr, self).__init__(*args, **kwargs) - # self.data.setdefault("subset", "this") - - def process(self): - # nuke = getattr(sys.modules["__main__"], "nuke", None) - data = {} - ext = "exr" - - # todo: improve method of getting current environment - # todo: pref avalon.Session over os.environ - - workdir = os.path.normpath(os.environ["AVALON_WORKDIR"]) - - filename = "{}.####.exr".format(self.name) - filepath = os.path.join( - workdir, - "render", - ext, - filename - ).replace("\\", "/") - - with avalon.nuke.viewer_update_and_undo_stop(): - w = nuke.createNode( - "Write", - "name {}".format(self.name)) - # w.knob('colorspace').setValue() - w.knob('file').setValue(filepath) - w.knob('file_type').setValue(ext) - w.knob('datatype').setValue("16 bit half") - w.knob('compression').setValue("Zip (1 scanline)") - w.knob('create_directories').setValue(True) - w.knob('autocrop').setValue(True) - - return data diff --git a/pype/plugins/nuke/inventory/select_containers.py b/pype/plugins/nuke/inventory/select_containers.py index 89ac31d660..339e3a4992 100644 --- a/pype/plugins/nuke/inventory/select_containers.py +++ b/pype/plugins/nuke/inventory/select_containers.py @@ -1,7 +1,7 @@ from avalon import api -class NukeSelectContainers(api.InventoryAction): +class SelectContainers(api.InventoryAction): label = "Select Containers" icon = "mouse-pointer" diff --git a/pype/plugins/nuke/load/actions.py b/pype/plugins/nuke/load/actions.py index 94ae2999f6..f3b7748f01 100644 --- a/pype/plugins/nuke/load/actions.py +++ b/pype/plugins/nuke/load/actions.py @@ -5,7 +5,7 @@ from avalon import api -class NukeSetFrameRangeLoader(api.Loader): +class SetFrameRangeLoader(api.Loader): """Specific loader of Alembic for the avalon.animation family""" families = ["animation", @@ -38,7 +38,7 @@ class NukeSetFrameRangeLoader(api.Loader): lib.update_frame_range(start, end) -class NukeSetFrameRangeWithHandlesLoader(api.Loader): +class SetFrameRangeWithHandlesLoader(api.Loader): """Specific loader of Alembic for the avalon.animation family""" families = ["animation", diff --git a/pype/plugins/nuke/load/load_alembic b/pype/plugins/nuke/load/load_alembic new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pype/plugins/nuke/load/load_backdrop b/pype/plugins/nuke/load/load_backdrop new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pype/plugins/nuke/load/load_camera_abc b/pype/plugins/nuke/load/load_camera_abc new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pype/plugins/nuke/load/load_camera_nk b/pype/plugins/nuke/load/load_camera_nk new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/pype/plugins/nuke/load/load_camera_nk @@ -0,0 +1 @@ + diff --git a/pype/plugins/nuke/load/load_sequence.py b/pype/plugins/nuke/load/load_sequence.py index 8d89998aa8..0b771a7007 100644 --- a/pype/plugins/nuke/load/load_sequence.py +++ b/pype/plugins/nuke/load/load_sequence.py @@ -118,7 +118,7 @@ def loader_shift(node, frame, relative=True): return int(shift) -class NukeLoadSequence(api.Loader): +class LoadSequence(api.Loader): """Load image sequence into Nuke""" families = ["imagesequence"] diff --git a/pype/plugins/nuke/load/load_still b/pype/plugins/nuke/load/load_still new file mode 100644 index 0000000000..c2aa061c5a --- /dev/null +++ b/pype/plugins/nuke/load/load_still @@ -0,0 +1 @@ +# usually used for mattepainting diff --git a/pype/plugins/nuke/publish/collect_current_file.py b/pype/plugins/nuke/publish/collect_current_file.py index 0d4867f08b..96ec44d9d6 100644 --- a/pype/plugins/nuke/publish/collect_current_file.py +++ b/pype/plugins/nuke/publish/collect_current_file.py @@ -1,18 +1,18 @@ import pyblish.api -class CollectCurrentFile(pyblish.api.ContextPlugin): +class SelectCurrentFile(pyblish.api.ContextPlugin): """Inject the current working file into context""" - order = pyblish.api.CollectorOrder - 0.1 - label = "Collect Current File" + order = pyblish.api.CollectorOrder hosts = ["nuke"] - families = ["workfile"] def process(self, context): import os import nuke current_file = nuke.root().name() + normalised = os.path.normpath(current_file) + context.data["current_file"] = normalised context.data["currentFile"] = normalised diff --git a/pype/plugins/nuke/publish/collect_instances.py b/pype/plugins/nuke/publish/collect_instances.py new file mode 100644 index 0000000000..f1fa1276c2 --- /dev/null +++ b/pype/plugins/nuke/publish/collect_instances.py @@ -0,0 +1,58 @@ +import os + +import nuke +import pyblish.api +from pype.nuke.lib import get_avalon_knob_data + + +@pyblish.api.log +class CollectNukeInstances(pyblish.api.ContextPlugin): + """Collect all nodes with Avalon knob.""" + + order = pyblish.api.CollectorOrder + label = "Collect Instances" + hosts = ["nuke", "nukeassist"] + + def process(self, context): + instances = [] + # creating instances per write node + for node in nuke.allNodes(): + + try: + if node["disable"].value(): + continue + except Exception: + continue + + # get data from avalon knob + avalon_knob_data = get_avalon_knob_data(node) + if not avalon_knob_data: + continue + subset = avalon_knob_data["subset"] + + # Create instance + instance = context.create_instance(subset) + instance.add(node) + + instance.data.update({ + "asset": os.environ["AVALON_ASSET"], + "label": node.name(), + "name": node.name(), + "subset": subset, + "families": [avalon_knob_data["families"]], + "family": avalon_knob_data["family"], + "publish": node.knob("publish").value() + }) + self.log.info("collected instance: {}".format(instance.data)) + instances.append(instance) + + context.data["instances"] = instances + + # Sort/grouped by family (preserving local index) + context[:] = sorted(context, key=self.sort_by_family) + + self.log.debug("context: {}".format(context)) + + def sort_by_family(self, instance): + """Sort by family""" + return instance.data.get("families", instance.data.get("family")) diff --git a/pype/plugins/nuke/publish/collect_writes.py b/pype/plugins/nuke/publish/collect_writes.py new file mode 100644 index 0000000000..db966fd84d --- /dev/null +++ b/pype/plugins/nuke/publish/collect_writes.py @@ -0,0 +1,90 @@ +import os + +import nuke +import pyblish.api +import logging +log = logging.getLogger(__name__) + + +@pyblish.api.log +class CollectNukeWrites(pyblish.api.ContextPlugin): + """Collect all write nodes.""" + + order = pyblish.api.CollectorOrder + 0.1 + label = "Collect Writes" + hosts = ["nuke", "nukeassist"] + + def process(self, context): + for instance in context.data["instances"]: + self.log.debug("checking instance: {}".format(instance)) + node = instance[0] + + if node.Class() != "Write": + continue + + # Determine defined file type + ext = node["file_type"].value() + + # Determine output type + output_type = "img" + if ext == "mov": + output_type = "mov" + + # Get frame range + first_frame = int(nuke.root()["first_frame"].getValue()) + last_frame = int(nuke.root()["last_frame"].getValue()) + + if node["use_limit"].getValue(): + first_frame = int(node["first"].getValue()) + last_frame = int(node["last"].getValue()) + + # get path + path = nuke.filename(node) + output_dir = os.path.dirname(path) + self.log.debug('output dir: {}'.format(output_dir)) + # Include start and end render frame in label + name = node.name() + + label = "{0} ({1}-{2})".format( + name, + int(first_frame), + int(last_frame) + ) + + # preredered frames + if not node["render"].value(): + families = "prerendered.frames" + collected_frames = os.listdir(output_dir) + self.log.debug("collected_frames: {}".format(label)) + if "files" not in instance.data: + instance.data["files"] = list() + instance.data["files"].append(collected_frames) + instance.data['transfer'] = False + else: + # dealing with local/farm rendering + if node["render_farm"].value(): + families = "{}.farm".format(instance.data["families"][0]) + else: + families = "{}.local".format(instance.data["families"][0]) + + self.log.debug("checking for error: {}".format(label)) + instance.data.update({ + "path": path, + "outputDir": output_dir, + "ext": ext, + "label": label, + "families": [families], + "firstFrame": first_frame, + "lastFrame": last_frame, + "outputType": output_type, + "stagingDir": output_dir, + + }) + + self.log.debug("instance.data: {}".format(instance.data)) + + self.log.debug("context: {}".format(context)) + + def sort_by_family(self, instance): + """Sort by family""" + return instance.data.get("families", instance.data.get("family")) diff --git a/pype/plugins/nuke/publish/extract_output_directory.py b/pype/plugins/nuke/publish/extract_output_directory.py index 3064fad3c5..36ddb35e30 100644 --- a/pype/plugins/nuke/publish/extract_output_directory.py +++ b/pype/plugins/nuke/publish/extract_output_directory.py @@ -20,7 +20,7 @@ class ExtractOutputDirectory(pyblish.api.InstancePlugin): path = instance.data["collection"].format() if "output_path" in instance.data.keys(): - path = instance.data["output_path"] + path = instance.data["path"] if not path: return diff --git a/pype/plugins/nuke/publish/extract_script_save.py b/pype/plugins/nuke/publish/extract_script_save.py deleted file mode 100644 index b0eeb47886..0000000000 --- a/pype/plugins/nuke/publish/extract_script_save.py +++ /dev/null @@ -1,15 +0,0 @@ -import nuke -import pyblish.api - - -class ExtractScriptSave(pyblish.api.InstancePlugin): - """ Saves the script before extraction. """ - - order = pyblish.api.ExtractorOrder - 0.49 - label = "Script Save" - hosts = ["nuke"] - families = ["saver"] - - def process(self, instance): - - nuke.scriptSave() diff --git a/pype/plugins/nuke/publish/integrate_rendered_frames.py b/pype/plugins/nuke/publish/integrate_rendered_frames.py new file mode 100644 index 0000000000..f482a48cda --- /dev/null +++ b/pype/plugins/nuke/publish/integrate_rendered_frames.py @@ -0,0 +1,361 @@ +import os +import logging +import shutil + +import errno +import pyblish.api +from avalon import api, io + + +log = logging.getLogger(__name__) + + +class IntegrateFrames(pyblish.api.InstancePlugin): + """Resolve any dependency issies + + This plug-in resolves any paths which, if not updated might break + the published file. + + The order of families is important, when working with lookdev you want to + first publish the texture, update the texture paths in the nodes and then + publish the shading network. Same goes for file dependent assets. + """ + + label = "Integrate Frames" + order = pyblish.api.IntegratorOrder + families = ["prerendered.frames"] + + def process(self, instance): + + self.register(instance) + + self.log.info("Integrating Asset in to the database ...") + # self.integrate(instance) + + def register(self, instance): + + # Required environment variables + PROJECT = api.Session["AVALON_PROJECT"] + ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"] + LOCATION = api.Session["AVALON_LOCATION"] + + context = instance.context + # Atomicity + # + # Guarantee atomic publishes - each asset contains + # an identical set of members. + # __ + # / o + # / \ + # | o | + # \ / + # o __/ + # + assert all(result["success"] for result in context.data["results"]), ( + "Atomicity not held, aborting.") + + # Assemble + # + # | + # v + # ---> <---- + # ^ + # | + # + stagingdir = instance.data.get("stagingDir") + assert stagingdir, ("Incomplete instance \"%s\": " + "Missing reference to staging area." % instance) + + # extra check if stagingDir actually exists and is available + + self.log.debug("Establishing staging directory @ %s" % stagingdir) + + project = io.find_one({"type": "project"}, + projection={"config.template.publish": True}) + + asset = io.find_one({"type": "asset", + "name": ASSET, + "parent": project["_id"]}) + + assert all([project, asset]), ("Could not find current project or " + "asset '%s'" % ASSET) + + subset = self.get_subset(asset, instance) + + # get next version + latest_version = io.find_one({"type": "version", + "parent": subset["_id"]}, + {"name": True}, + sort=[("name", -1)]) + + next_version = 1 + if latest_version is not None: + next_version += latest_version["name"] + + self.log.info("Verifying version from assumed destination") + + assumed_data = instance.data["assumedTemplateData"] + assumed_version = assumed_data["version"] + if assumed_version != next_version: + raise AttributeError("Assumed version 'v{0:03d}' does not match" + "next version in database " + "('v{1:03d}')".format(assumed_version, + next_version)) + + self.log.debug("Next version: v{0:03d}".format(next_version)) + + version_data = self.create_version_data(context, instance) + version = self.create_version(subset=subset, + version_number=next_version, + locations=[LOCATION], + data=version_data) + + self.log.debug("Creating version ...") + version_id = io.insert_one(version).inserted_id + + # Write to disk + # _ + # | | + # _| |_ + # ____\ / + # |\ \ / \ + # \ \ v \ + # \ \________. + # \|________| + # + root = api.registered_root() + # template_data = {"root": root, + # "project": PROJECT, + # "silo": asset['silo'], + # "asset": ASSET, + # "subset": subset["name"], + # "version": version["name"]} + hierarchy = io.find_one({"type":'asset', "name":ASSET})['data']['parents'] + if hierarchy: + # hierarchy = os.path.sep.join(hierarchy) + hierarchy = os.path.join(*hierarchy) + + template_data = {"root": root, + "project": {"name": PROJECT, + "code": "prjX"}, + "silo": asset['silo'], + "asset": ASSET, + "family": instance.data['family'], + "subset": subset["name"], + "VERSION": version["name"], + "hierarchy": hierarchy} + + template_publish = project["config"]["template"]["publish"] + anatomy = instance.context.data['anatomy'] + + # Find the representations to transfer amongst the files + # Each should be a single representation (as such, a single extension) + representations = [] + + for files in instance.data["files"]: + + # Collection + # _______ + # |______|\ + # | |\| + # | || + # | || + # | || + # |_______| + # + if isinstance(files, list): + collection = files + # Assert that each member has identical suffix + _, ext = os.path.splitext(collection[0]) + assert all(ext == os.path.splitext(name)[1] + for name in collection), ( + "Files had varying suffixes, this is a bug" + ) + + assert not any(os.path.isabs(name) for name in collection) + + template_data["representation"] = ext[1:] + + for fname in collection: + + src = os.path.join(stagingdir, fname) + anatomy_filled = anatomy.format(template_data) + dst = anatomy_filled.publish.path + + # if instance.data.get('transfer', True): + # instance.data["transfers"].append([src, dst]) + + else: + # Single file + # _______ + # | |\ + # | | + # | | + # | | + # |_______| + # + fname = files + assert not os.path.isabs(fname), ( + "Given file name is a full path" + ) + _, ext = os.path.splitext(fname) + + template_data["representation"] = ext[1:] + + src = os.path.join(stagingdir, fname) + anatomy_filled = anatomy.format(template_data) + dst = anatomy_filled.publish.path + + + # if instance.data.get('transfer', True): + # dst = src + # instance.data["transfers"].append([src, dst]) + + representation = { + "schema": "pype:representation-2.0", + "type": "representation", + "parent": version_id, + "name": ext[1:], + "data": {'path': src}, + "dependencies": instance.data.get("dependencies", "").split(), + + # Imprint shortcut to context + # for performance reasons. + "context": { + "root": root, + "project": PROJECT, + "projectcode": "prjX", + 'task': api.Session["AVALON_TASK"], + "silo": asset['silo'], + "asset": ASSET, + "family": instance.data['family'], + "subset": subset["name"], + "version": version["name"], + "hierarchy": hierarchy, + "representation": ext[1:] + } + } + representations.append(representation) + + self.log.info("Registering {} items".format(len(representations))) + + io.insert_many(representations) + + def integrate(self, instance): + """Move the files + + Through `instance.data["transfers"]` + + Args: + instance: the instance to integrate + """ + + transfers = instance.data["transfers"] + + for src, dest in transfers: + self.log.info("Copying file .. {} -> {}".format(src, dest)) + self.copy_file(src, dest) + + def copy_file(self, src, dst): + """ Copy given source to destination + + Arguments: + src (str): the source file which needs to be copied + dst (str): the destination of the sourc file + Returns: + None + """ + + dirname = os.path.dirname(dst) + try: + os.makedirs(dirname) + except OSError as e: + if e.errno == errno.EEXIST: + pass + else: + self.log.critical("An unexpected error occurred.") + raise + + shutil.copy(src, dst) + + def get_subset(self, asset, instance): + + subset = io.find_one({"type": "subset", + "parent": asset["_id"], + "name": instance.data["subset"]}) + + if subset is None: + subset_name = instance.data["subset"] + self.log.info("Subset '%s' not found, creating.." % subset_name) + + _id = io.insert_one({ + "schema": "pype:subset-2.0", + "type": "subset", + "name": subset_name, + "data": {}, + "parent": asset["_id"] + }).inserted_id + + subset = io.find_one({"_id": _id}) + + return subset + + def create_version(self, subset, version_number, locations, data=None): + """ Copy given source to destination + + Args: + subset (dict): the registered subset of the asset + version_number (int): the version number + locations (list): the currently registered locations + + Returns: + dict: collection of data to create a version + """ + # Imprint currently registered location + version_locations = [location for location in locations if + location is not None] + + return {"schema": "pype:version-2.0", + "type": "version", + "parent": subset["_id"], + "name": version_number, + "locations": version_locations, + "data": data} + + def create_version_data(self, context, instance): + """Create the data collection for the version + + Args: + context: the current context + instance: the current instance being published + + Returns: + dict: the required information with instance.data as key + """ + + families = [] + current_families = instance.data.get("families", list()) + instance_family = instance.data.get("family", None) + + if instance_family is not None: + families.append(instance_family) + families += current_families + + # create relative source path for DB + relative_path = os.path.relpath(context.data["currentFile"], + api.registered_root()) + source = os.path.join("{root}", relative_path).replace("\\", "/") + + version_data = {"families": families, + "time": context.data["time"], + "author": context.data["user"], + "source": source, + "comment": context.data.get("comment")} + + # Include optional data if present in + optionals = ["startFrame", "endFrame", "step", "handles"] + for key in optionals: + if key in instance.data: + version_data[key] = instance.data[key] + + return version_data diff --git a/pype/plugins/nuke/publish/render_local.py b/pype/plugins/nuke/publish/render_local.py new file mode 100644 index 0000000000..55adedb9e5 --- /dev/null +++ b/pype/plugins/nuke/publish/render_local.py @@ -0,0 +1,48 @@ +import pyblish.api +import nuke + + +class NukeRenderLocal(pyblish.api.InstancePlugin): + # TODO: rewrite docstring to nuke + """Render the current Fusion composition locally. + + Extract the result of savers by starting a comp render + This will run the local render of Fusion. + + """ + + order = pyblish.api.ExtractorOrder + label = "Render Local" + hosts = ["nuke"] + families = ["render.local", "prerender.local", "still.local"] + + def process(self, instance): + + # This should be a ContextPlugin, but this is a workaround + # for a bug in pyblish to run once for a family: issue #250 + context = instance.context + key = "__hasRun{}".format(self.__class__.__name__) + if context.data.get(key, False): + return + else: + context.data[key] = True + + self.log.debug("instance collected: {}".format(instance.data)) + + first_frame = instance.data.get("firstFrame", None) + last_frame = instance.data.get("lastFrame", None) + node_subset_name = instance.data.get("name", None) + + self.log.info("Starting render") + self.log.info("Start frame: {}".format(first_frame)) + self.log.info("End frame: {}".format(last_frame)) + + # Render frames + nuke.execute( + node_subset_name, + int(first_frame), + int(last_frame) + ) + # swith to prerendered.frames + instance[0]["render"].setValue(False) + self.log.info('Finished render') diff --git a/pype/plugins/nuke/publish/script_save.py b/pype/plugins/nuke/publish/script_save.py new file mode 100644 index 0000000000..472742f464 --- /dev/null +++ b/pype/plugins/nuke/publish/script_save.py @@ -0,0 +1,15 @@ +import nuke +import pyblish.api + + +class ExtractScriptSave(pyblish.api.Extractor): + """ + """ + label = 'Script Save' + order = pyblish.api.Extractor.order - 0.45 + hosts = ['nuke'] + + def process(self, instance): + + self.log.info('saving script') + nuke.scriptSave() diff --git a/pype/plugins/nuke/publish/validate_collection.py b/pype/plugins/nuke/publish/validate_collection.py new file mode 100644 index 0000000000..4088272bc4 --- /dev/null +++ b/pype/plugins/nuke/publish/validate_collection.py @@ -0,0 +1,53 @@ +import os +import pyblish.api +import clique + + +@pyblish.api.log +class RepairCollectionAction(pyblish.api.Action): + label = "Repair" + on = "failed" + icon = "wrench" + + def process(self, context, plugin): + + files_remove = [os.path.join(context[0].data["outputDir"], f) + for f in context[0].data["files"]] + for f in files_remove: + os.remove(f) + self.log.debug("removing file: {}".format(f)) + context[0][0]["render"].setValue(True) + self.log.info("Rendering toggled ON") + + +class ValidateCollection(pyblish.api.InstancePlugin): + """ Validates file output. """ + + order = pyblish.api.ValidatorOrder + # optional = True + families = ['prerendered.frames'] + label = "Check prerendered frames" + hosts = ["nuke"] + actions = [RepairCollectionAction] + + def process(self, instance): + self.log.debug('instance.data["files"]: {}'.format(instance.data['files'])) + collections, remainder = clique.assemble(*instance.data['files']) + self.log.info('collections: {}'.format(str(collections))) + + frame_length = instance.data["lastFrame"] \ + - instance.data["firstFrame"] + 1 + + if frame_length is not 1: + assert len(collections) == 1, self.log.info( + "There are multiple collections in the folder") + assert collections[0].is_contiguous(), self.log.info("Some frames appear to be missing") + + assert remainder is not None, self.log.info("There are some extra files in folder") + + self.log.info('frame_length: {}'.format(frame_length)) + self.log.info('len(list(instance.data["files"])): {}'.format( + len(list(instance.data["files"][0])))) + + assert len(list(instance.data["files"][0])) is frame_length, self.log.info( + "{} missing frames. Use repair to render all frames".format(__name__)) diff --git a/pype/plugins/nuke/publish/validate_prerenders_output.py b/pype/plugins/nuke/publish/validate_prerenders_output.py deleted file mode 100644 index 412c55ac0a..0000000000 --- a/pype/plugins/nuke/publish/validate_prerenders_output.py +++ /dev/null @@ -1,20 +0,0 @@ -import os -import pyblish.api - - -@pyblish.api.log -class ValidatePrerendersOutput(pyblish.api.Validator): - """Validates that the output directory for the write nodes exists""" - - families = ['write.prerender'] - hosts = ['nuke'] - label = 'Pre-renders output' - - def process(self, instance): - path = os.path.dirname(instance[0]['file'].value()) - - if 'output' not in path: - name = instance[0].name() - msg = 'Output directory for %s is not in an "output" folder.' % name - - raise ValueError(msg) diff --git a/pype/templates.py b/pype/templates.py new file mode 100644 index 0000000000..7e4b962d52 --- /dev/null +++ b/pype/templates.py @@ -0,0 +1,100 @@ +import os +import re +from avalon import io +from app.api import (Templates, Logger, format) +log = Logger.getLogger(__name__, + os.getenv("AVALON_APP", "pype-config")) + + +def load_data_from_templates(): + from . import api + if not any([ + api.Dataflow, + api.Anatomy, + api.Colorspace, + api.Metadata + ] + ): + # base = Templates() + t = Templates(type=["anatomy", "metadata", "dataflow", "colorspace"]) + api.Anatomy = t.anatomy + api.Metadata = t.metadata.format() + data = {"metadata": api.Metadata} + api.Dataflow = t.dataflow.format(data) + api.Colorspace = t.colorspace + log.info("Data from templates were Loaded...") + + +def reset_data_from_templates(): + from . import api + api.Dataflow = None + api.Anatomy = None + api.Colorspace = None + api.Metadata = None + log.info("Data from templates were Unloaded...") + + +def get_version_from_workfile(file): + pattern = re.compile(r"_v([0-9]*)") + try: + v_string = pattern.findall(file)[0] + return v_string + except IndexError: + log.error("templates:get_version_from_workfile:" + "`{}` missing version string." + "Example `v004`".format(file)) + + +def get_project_code(): + return io.find_one({"type": "project"})["data"]["code"] + + +def get_project_name(): + project_name = os.getenv("AVALON_PROJECT", None) + assert project_name, log.error("missing `AVALON_PROJECT`" + "in environment variables") + return project_name + + +def get_asset(): + asset = os.getenv("AVALON_ASSET", None) + assert asset, log.error("missing `AVALON_ASSET`" + "in environment variables") + return asset + + +def get_task(): + task = os.getenv("AVALON_TASK", None) + assert task, log.error("missing `AVALON_TASK`" + "in environment variables") + return task + + +def get_hiearchy(): + hierarchy = io.find_one({ + "type": 'asset', + "name": get_asset()} + )['data']['parents'] + + if hierarchy: + # hierarchy = os.path.sep.join(hierarchy) + return os.path.join(*hierarchy) + + +def fill_avalon_workdir(): + awd = os.getenv("AVALON_WORKDIR", None) + assert awd, log.error("missing `AVALON_WORKDIR`" + "in environment variables") + if "{" not in awd: + return + + data = { + "hierarchy": get_hiearchy(), + "task": get_task(), + "asset": get_asset(), + "project": {"name": get_project_name(), + "code": get_project_code()}} + + awd_filled = os.path.normpath(format(awd, data)) + os.environ["AVALON_WORKDIR"] = awd_filled + log.info("`AVALON_WORKDIR` fixed to: {}".format(awd_filled)) diff --git a/pype/vendor/backports/__init__.py b/pype/vendor/backports/__init__.py new file mode 100644 index 0000000000..69e3be50da --- /dev/null +++ b/pype/vendor/backports/__init__.py @@ -0,0 +1 @@ +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/pype/vendor/backports/configparser/__init__.py b/pype/vendor/backports/configparser/__init__.py new file mode 100644 index 0000000000..06d7a0855f --- /dev/null +++ b/pype/vendor/backports/configparser/__init__.py @@ -0,0 +1,1390 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +"""Configuration file parser. + +A configuration file consists of sections, lead by a "[section]" header, +and followed by "name: value" entries, with continuations and such in +the style of RFC 822. + +Intrinsic defaults can be specified by passing them into the +ConfigParser constructor as a dictionary. + +class: + +ConfigParser -- responsible for parsing a list of + configuration files, and managing the parsed database. + + methods: + + __init__(defaults=None, dict_type=_default_dict, allow_no_value=False, + delimiters=('=', ':'), comment_prefixes=('#', ';'), + inline_comment_prefixes=None, strict=True, + empty_lines_in_values=True, default_section='DEFAULT', + interpolation=, converters=): + Create the parser. When `defaults' is given, it is initialized into the + dictionary or intrinsic defaults. The keys must be strings, the values + must be appropriate for %()s string interpolation. + + When `dict_type' is given, it will be used to create the dictionary + objects for the list of sections, for the options within a section, and + for the default values. + + When `delimiters' is given, it will be used as the set of substrings + that divide keys from values. + + When `comment_prefixes' is given, it will be used as the set of + substrings that prefix comments in empty lines. Comments can be + indented. + + When `inline_comment_prefixes' is given, it will be used as the set of + substrings that prefix comments in non-empty lines. + + When `strict` is True, the parser won't allow for any section or option + duplicates while reading from a single source (file, string or + dictionary). Default is True. + + When `empty_lines_in_values' is False (default: True), each empty line + marks the end of an option. Otherwise, internal empty lines of + a multiline option are kept as part of the value. + + When `allow_no_value' is True (default: False), options without + values are accepted; the value presented for these is None. + + sections() + Return all the configuration section names, sans DEFAULT. + + has_section(section) + Return whether the given section exists. + + has_option(section, option) + Return whether the given option exists in the given section. + + options(section) + Return list of configuration options for the named section. + + read(filenames, encoding=None) + Read and parse the list of named configuration files, given by + name. A single filename is also allowed. Non-existing files + are ignored. Return list of successfully read files. + + read_file(f, filename=None) + Read and parse one configuration file, given as a file object. + The filename defaults to f.name; it is only used in error + messages (if f has no `name' attribute, the string `' is used). + + read_string(string) + Read configuration from a given string. + + read_dict(dictionary) + Read configuration from a dictionary. Keys are section names, + values are dictionaries with keys and values that should be present + in the section. If the used dictionary type preserves order, sections + and their keys will be added in order. Values are automatically + converted to strings. + + get(section, option, raw=False, vars=None, fallback=_UNSET) + Return a string value for the named option. All % interpolations are + expanded in the return values, based on the defaults passed into the + constructor and the DEFAULT section. Additional substitutions may be + provided using the `vars' argument, which must be a dictionary whose + contents override any pre-existing defaults. If `option' is a key in + `vars', the value from `vars' is used. + + getint(section, options, raw=False, vars=None, fallback=_UNSET) + Like get(), but convert value to an integer. + + getfloat(section, options, raw=False, vars=None, fallback=_UNSET) + Like get(), but convert value to a float. + + getboolean(section, options, raw=False, vars=None, fallback=_UNSET) + Like get(), but convert value to a boolean (currently case + insensitively defined as 0, false, no, off for False, and 1, true, + yes, on for True). Returns False or True. + + items(section=_UNSET, raw=False, vars=None) + If section is given, return a list of tuples with (name, value) for + each option in the section. Otherwise, return a list of tuples with + (section_name, section_proxy) for each section, including DEFAULTSECT. + + remove_section(section) + Remove the given file section and all its options. + + remove_option(section, option) + Remove the given option from the given section. + + set(section, option, value) + Set the given option. + + write(fp, space_around_delimiters=True) + Write the configuration state in .ini format. If + `space_around_delimiters' is True (the default), delimiters + between keys and values are surrounded by spaces. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from collections import MutableMapping +import functools +import io +import itertools +import re +import sys +import warnings + +from backports.configparser.helpers import OrderedDict as _default_dict +from backports.configparser.helpers import ChainMap as _ChainMap +from backports.configparser.helpers import from_none, open, str, PY2 + +__all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError", + "NoOptionError", "InterpolationError", "InterpolationDepthError", + "InterpolationMissingOptionError", "InterpolationSyntaxError", + "ParsingError", "MissingSectionHeaderError", + "ConfigParser", "SafeConfigParser", "RawConfigParser", + "Interpolation", "BasicInterpolation", "ExtendedInterpolation", + "LegacyInterpolation", "SectionProxy", "ConverterMapping", + "DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"] + +DEFAULTSECT = "DEFAULT" + +MAX_INTERPOLATION_DEPTH = 10 + + +# exception classes +class Error(Exception): + """Base class for ConfigParser exceptions.""" + + def __init__(self, msg=''): + self.message = msg + Exception.__init__(self, msg) + + def __repr__(self): + return self.message + + __str__ = __repr__ + + +class NoSectionError(Error): + """Raised when no section matches a requested option.""" + + def __init__(self, section): + Error.__init__(self, 'No section: %r' % (section,)) + self.section = section + self.args = (section, ) + + +class DuplicateSectionError(Error): + """Raised when a section is repeated in an input source. + + Possible repetitions that raise this exception are: multiple creation + using the API or in strict parsers when a section is found more than once + in a single input file, string or dictionary. + """ + + def __init__(self, section, source=None, lineno=None): + msg = [repr(section), " already exists"] + if source is not None: + message = ["While reading from ", repr(source)] + if lineno is not None: + message.append(" [line {0:2d}]".format(lineno)) + message.append(": section ") + message.extend(msg) + msg = message + else: + msg.insert(0, "Section ") + Error.__init__(self, "".join(msg)) + self.section = section + self.source = source + self.lineno = lineno + self.args = (section, source, lineno) + + +class DuplicateOptionError(Error): + """Raised by strict parsers when an option is repeated in an input source. + + Current implementation raises this exception only when an option is found + more than once in a single file, string or dictionary. + """ + + def __init__(self, section, option, source=None, lineno=None): + msg = [repr(option), " in section ", repr(section), + " already exists"] + if source is not None: + message = ["While reading from ", repr(source)] + if lineno is not None: + message.append(" [line {0:2d}]".format(lineno)) + message.append(": option ") + message.extend(msg) + msg = message + else: + msg.insert(0, "Option ") + Error.__init__(self, "".join(msg)) + self.section = section + self.option = option + self.source = source + self.lineno = lineno + self.args = (section, option, source, lineno) + + +class NoOptionError(Error): + """A requested option was not found.""" + + def __init__(self, option, section): + Error.__init__(self, "No option %r in section: %r" % + (option, section)) + self.option = option + self.section = section + self.args = (option, section) + + +class InterpolationError(Error): + """Base class for interpolation-related exceptions.""" + + def __init__(self, option, section, msg): + Error.__init__(self, msg) + self.option = option + self.section = section + self.args = (option, section, msg) + + +class InterpolationMissingOptionError(InterpolationError): + """A string substitution required a setting which was not available.""" + + def __init__(self, option, section, rawval, reference): + msg = ("Bad value substitution: option {0!r} in section {1!r} contains " + "an interpolation key {2!r} which is not a valid option name. " + "Raw value: {3!r}".format(option, section, reference, rawval)) + InterpolationError.__init__(self, option, section, msg) + self.reference = reference + self.args = (option, section, rawval, reference) + + +class InterpolationSyntaxError(InterpolationError): + """Raised when the source text contains invalid syntax. + + Current implementation raises this exception when the source text into + which substitutions are made does not conform to the required syntax. + """ + + +class InterpolationDepthError(InterpolationError): + """Raised when substitutions are nested too deeply.""" + + def __init__(self, option, section, rawval): + msg = ("Recursion limit exceeded in value substitution: option {0!r} " + "in section {1!r} contains an interpolation key which " + "cannot be substituted in {2} steps. Raw value: {3!r}" + "".format(option, section, MAX_INTERPOLATION_DEPTH, + rawval)) + InterpolationError.__init__(self, option, section, msg) + self.args = (option, section, rawval) + + +class ParsingError(Error): + """Raised when a configuration file does not follow legal syntax.""" + + def __init__(self, source=None, filename=None): + # Exactly one of `source'/`filename' arguments has to be given. + # `filename' kept for compatibility. + if filename and source: + raise ValueError("Cannot specify both `filename' and `source'. " + "Use `source'.") + elif not filename and not source: + raise ValueError("Required argument `source' not given.") + elif filename: + source = filename + Error.__init__(self, 'Source contains parsing errors: %r' % source) + self.source = source + self.errors = [] + self.args = (source, ) + + @property + def filename(self): + """Deprecated, use `source'.""" + warnings.warn( + "The 'filename' attribute will be removed in future versions. " + "Use 'source' instead.", + DeprecationWarning, stacklevel=2 + ) + return self.source + + @filename.setter + def filename(self, value): + """Deprecated, user `source'.""" + warnings.warn( + "The 'filename' attribute will be removed in future versions. " + "Use 'source' instead.", + DeprecationWarning, stacklevel=2 + ) + self.source = value + + def append(self, lineno, line): + self.errors.append((lineno, line)) + self.message += '\n\t[line %2d]: %s' % (lineno, line) + + +class MissingSectionHeaderError(ParsingError): + """Raised when a key-value pair is found before any section header.""" + + def __init__(self, filename, lineno, line): + Error.__init__( + self, + 'File contains no section headers.\nfile: %r, line: %d\n%r' % + (filename, lineno, line)) + self.source = filename + self.lineno = lineno + self.line = line + self.args = (filename, lineno, line) + + +# Used in parser getters to indicate the default behaviour when a specific +# option is not found it to raise an exception. Created to enable `None' as +# a valid fallback value. +_UNSET = object() + + +class Interpolation(object): + """Dummy interpolation that passes the value through with no changes.""" + + def before_get(self, parser, section, option, value, defaults): + return value + + def before_set(self, parser, section, option, value): + return value + + def before_read(self, parser, section, option, value): + return value + + def before_write(self, parser, section, option, value): + return value + + +class BasicInterpolation(Interpolation): + """Interpolation as implemented in the classic ConfigParser. + + The option values can contain format strings which refer to other values in + the same section, or values in the special default section. + + For example: + + something: %(dir)s/whatever + + would resolve the "%(dir)s" to the value of dir. All reference + expansions are done late, on demand. If a user needs to use a bare % in + a configuration file, she can escape it by writing %%. Other % usage + is considered a user error and raises `InterpolationSyntaxError'.""" + + _KEYCRE = re.compile(r"%\(([^)]+)\)s") + + def before_get(self, parser, section, option, value, defaults): + L = [] + self._interpolate_some(parser, option, L, value, section, defaults, 1) + return ''.join(L) + + def before_set(self, parser, section, option, value): + tmp_value = value.replace('%%', '') # escaped percent signs + tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax + if '%' in tmp_value: + raise ValueError("invalid interpolation syntax in %r at " + "position %d" % (value, tmp_value.find('%'))) + return value + + def _interpolate_some(self, parser, option, accum, rest, section, map, + depth): + rawval = parser.get(section, option, raw=True, fallback=rest) + if depth > MAX_INTERPOLATION_DEPTH: + raise InterpolationDepthError(option, section, rawval) + while rest: + p = rest.find("%") + if p < 0: + accum.append(rest) + return + if p > 0: + accum.append(rest[:p]) + rest = rest[p:] + # p is no longer used + c = rest[1:2] + if c == "%": + accum.append("%") + rest = rest[2:] + elif c == "(": + m = self._KEYCRE.match(rest) + if m is None: + raise InterpolationSyntaxError(option, section, + "bad interpolation variable reference %r" % rest) + var = parser.optionxform(m.group(1)) + rest = rest[m.end():] + try: + v = map[var] + except KeyError: + raise from_none(InterpolationMissingOptionError( + option, section, rawval, var)) + if "%" in v: + self._interpolate_some(parser, option, accum, v, + section, map, depth + 1) + else: + accum.append(v) + else: + raise InterpolationSyntaxError( + option, section, + "'%%' must be followed by '%%' or '(', " + "found: %r" % (rest,)) + + +class ExtendedInterpolation(Interpolation): + """Advanced variant of interpolation, supports the syntax used by + `zc.buildout'. Enables interpolation between sections.""" + + _KEYCRE = re.compile(r"\$\{([^}]+)\}") + + def before_get(self, parser, section, option, value, defaults): + L = [] + self._interpolate_some(parser, option, L, value, section, defaults, 1) + return ''.join(L) + + def before_set(self, parser, section, option, value): + tmp_value = value.replace('$$', '') # escaped dollar signs + tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax + if '$' in tmp_value: + raise ValueError("invalid interpolation syntax in %r at " + "position %d" % (value, tmp_value.find('$'))) + return value + + def _interpolate_some(self, parser, option, accum, rest, section, map, + depth): + rawval = parser.get(section, option, raw=True, fallback=rest) + if depth > MAX_INTERPOLATION_DEPTH: + raise InterpolationDepthError(option, section, rawval) + while rest: + p = rest.find("$") + if p < 0: + accum.append(rest) + return + if p > 0: + accum.append(rest[:p]) + rest = rest[p:] + # p is no longer used + c = rest[1:2] + if c == "$": + accum.append("$") + rest = rest[2:] + elif c == "{": + m = self._KEYCRE.match(rest) + if m is None: + raise InterpolationSyntaxError(option, section, + "bad interpolation variable reference %r" % rest) + path = m.group(1).split(':') + rest = rest[m.end():] + sect = section + opt = option + try: + if len(path) == 1: + opt = parser.optionxform(path[0]) + v = map[opt] + elif len(path) == 2: + sect = path[0] + opt = parser.optionxform(path[1]) + v = parser.get(sect, opt, raw=True) + else: + raise InterpolationSyntaxError( + option, section, + "More than one ':' found: %r" % (rest,)) + except (KeyError, NoSectionError, NoOptionError): + raise from_none(InterpolationMissingOptionError( + option, section, rawval, ":".join(path))) + if "$" in v: + self._interpolate_some(parser, opt, accum, v, sect, + dict(parser.items(sect, raw=True)), + depth + 1) + else: + accum.append(v) + else: + raise InterpolationSyntaxError( + option, section, + "'$' must be followed by '$' or '{', " + "found: %r" % (rest,)) + + +class LegacyInterpolation(Interpolation): + """Deprecated interpolation used in old versions of ConfigParser. + Use BasicInterpolation or ExtendedInterpolation instead.""" + + _KEYCRE = re.compile(r"%\(([^)]*)\)s|.") + + def before_get(self, parser, section, option, value, vars): + rawval = value + depth = MAX_INTERPOLATION_DEPTH + while depth: # Loop through this until it's done + depth -= 1 + if value and "%(" in value: + replace = functools.partial(self._interpolation_replace, + parser=parser) + value = self._KEYCRE.sub(replace, value) + try: + value = value % vars + except KeyError as e: + raise from_none(InterpolationMissingOptionError( + option, section, rawval, e.args[0])) + else: + break + if value and "%(" in value: + raise InterpolationDepthError(option, section, rawval) + return value + + def before_set(self, parser, section, option, value): + return value + + @staticmethod + def _interpolation_replace(match, parser): + s = match.group(1) + if s is None: + return match.group() + else: + return "%%(%s)s" % parser.optionxform(s) + + +class RawConfigParser(MutableMapping): + """ConfigParser that does not do interpolation.""" + + # Regular expressions for parsing section headers and options + _SECT_TMPL = r""" + \[ # [ + (?P
[^]]+) # very permissive! + \] # ] + """ + _OPT_TMPL = r""" + (?P