From 9073feb469c729cd1986f0aab3abc8b9fada3852 Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Fri, 2 Aug 2019 10:49:25 +0100 Subject: [PATCH 1/9] Repair legacy write nodes. Grabs the existing data from the legacy write nodes, and creates a new write from it. --- .../nuke/publish/collect_legacy_write.py | 25 ++++++++ .../nuke/publish/validate_write_legacy.py | 62 +++++++++++++++++++ 2 files changed, 87 insertions(+) create mode 100644 pype/plugins/nuke/publish/collect_legacy_write.py create mode 100644 pype/plugins/nuke/publish/validate_write_legacy.py diff --git a/pype/plugins/nuke/publish/collect_legacy_write.py b/pype/plugins/nuke/publish/collect_legacy_write.py new file mode 100644 index 0000000000..05dbe4216c --- /dev/null +++ b/pype/plugins/nuke/publish/collect_legacy_write.py @@ -0,0 +1,25 @@ +import nuke + +import pyblish.api + + +class CollectWriteLegacy(pyblish.api.ContextPlugin): + """Collect legacy write nodes.""" + + order = pyblish.api.CollectorOrder + label = "Collect Write Legacy" + hosts = ["nuke", "nukeassist"] + + def process(self, context): + + for node in nuke.allNodes(): + if node.Class() != "Write": + continue + + if "avalon" not in node.knobs().keys(): + continue + + instance = context.create_instance( + node.name(), family="write.legacy" + ) + instance.append(node) diff --git a/pype/plugins/nuke/publish/validate_write_legacy.py b/pype/plugins/nuke/publish/validate_write_legacy.py new file mode 100644 index 0000000000..b452e60ba4 --- /dev/null +++ b/pype/plugins/nuke/publish/validate_write_legacy.py @@ -0,0 +1,62 @@ +import toml +import os + +import nuke + +from avalon import api +import pyblish.api + + +class RepairWriteLegacyAction(pyblish.api.Action): + + label = "Repair" + icon = "wrench" + on = "failed" + + def process(self, context, plugin): + + # Get the errored instances + failed = [] + for result in context.data["results"]: + if (result["error"] is not None and result["instance"] is not None + and result["instance"] not in failed): + failed.append(result["instance"]) + + # Apply pyblish.logic to get the instances for the plug-in + instances = pyblish.api.instances_by_plugin(failed, plugin) + + for instance in instances: + data = toml.loads(instance[0]["avalon"].value()) + data["xpos"] = instance[0].xpos() + data["ypos"] = instance[0].ypos() + data["input"] = instance[0].input(0) + data["publish"] = instance[0]["publish"].value() + data["render"] = instance[0]["render"].value() + data["render_farm"] = instance[0]["render_farm"].value() + + nuke.delete(instance[0]) + + family = "render{}".format(os.environ["AVALON_TASK"].capitalize()) + api.create(data["subset"], data["asset"], family) + node = nuke.toNode(data["subset"]) + node.setXYpos(data["xpos"], data["ypos"]) + node.setInput(0, data["input"]) + node["publish"].setValue(data["publish"]) + node["render"].setValue(data["render"]) + node["render_farm"].setValue(data["render_farm"]) + + +class ValidateWriteLegacy(pyblish.api.InstancePlugin): + """Validate legacy write nodes.""" + + order = pyblish.api.ValidatorOrder + optional = True + families = ["write.legacy"] + label = "Write Legacy" + hosts = ["nuke"] + actions = [RepairWriteLegacyAction] + + def process(self, instance): + + msg = "Clean up legacy write node \"{}\"".format(instance) + assert False, msg From 8502cd48126b231b911874c6097b251554380f18 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Fri, 6 Sep 2019 18:44:22 +0200 Subject: [PATCH 2/9] add changelog --- changelog.md | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/changelog.md b/changelog.md index 159ff0baeb..02939896eb 100644 --- a/changelog.md +++ b/changelog.md @@ -1,7 +1,29 @@ # Pype changelog # Welcome to pype changelog -## 2.1 ## +## 2.2.0 ## + +**new**: +- _(pype)_ add customisable workflow for creating quicktimes from renders or playblasts +- _(nuke)_ option to choose deadline chunk size on write nodes +- _(nukestudio)_ added option to publish soft effects (subTrackItems) from NukeStudio as subsets including LUT files. these can then be loaded in nuke or NukeStudio +- _(nuke)_ option to build nuke script from previously published latest versions of plate and render subsets. +- _(nuke)_ nuke writes now have deadline tab. +- _(ftrack)_ Prepare Project action can now be used for creating the base folder structure on disk and in ftrack, setting up all the initial project attributes and it automatically prepares `pype_project_config` folder for the given project. +- _(clockify)_ Added support for time tracking in clockify. This currently in addition to ftrack time logs, but does not completely replace them. +- _(pype)_ any attributes in Creator and Loader plugins can now be customised using pype preset system + +**changed**: +- nukestudio now uses workio API for workfiles +- _(maya)_ `FIX FPS` prompt in maya now appears in the middle of the screen +- _(muster)_ can now be configured with custom templates + + +**fix**: +- wrong version retrieval from path in certain scenarios +- nuke reset resolution wasn't working in certain scenarios + +## 2.1.0 ## A large cleanup release. Most of the change are under the hood. From f2d1613bb004c88c2d6cb7a2b88ef790c538eabb Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Fri, 6 Sep 2019 18:55:01 +0200 Subject: [PATCH 3/9] add release dates to changelog --- changelog.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/changelog.md b/changelog.md index 02939896eb..46cceb9fdc 100644 --- a/changelog.md +++ b/changelog.md @@ -2,6 +2,7 @@ Welcome to pype changelog ## 2.2.0 ## +_release date: 8 Sept 2019_ **new**: - _(pype)_ add customisable workflow for creating quicktimes from renders or playblasts @@ -15,8 +16,9 @@ Welcome to pype changelog **changed**: - nukestudio now uses workio API for workfiles -- _(maya)_ `FIX FPS` prompt in maya now appears in the middle of the screen +- _(maya)_ "FIX FPS" prompt in maya now appears in the middle of the screen - _(muster)_ can now be configured with custom templates +- _(pype)_ global publishing plugins can now be configured using presets as well as host specific ones **fix**: @@ -24,6 +26,7 @@ Welcome to pype changelog - nuke reset resolution wasn't working in certain scenarios ## 2.1.0 ## +_release date: 6 Aug 2019_ A large cleanup release. Most of the change are under the hood. From 3f1a05536813824e132355947a347b5d6a070354 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 11 Sep 2019 17:52:54 +0200 Subject: [PATCH 4/9] fixed icon name from "boxes" to "cubes" in 3 creator plugins --- pype/plugins/maya/create/create_assembly.py | 2 +- pype/plugins/maya/create/create_layout.py | 2 +- pype/plugins/maya/create/create_setdress.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pype/plugins/maya/create/create_assembly.py b/pype/plugins/maya/create/create_assembly.py index 2a00d4a29a..6d0321b718 100644 --- a/pype/plugins/maya/create/create_assembly.py +++ b/pype/plugins/maya/create/create_assembly.py @@ -7,5 +7,5 @@ class CreateAssembly(avalon.maya.Creator): name = "assembly" label = "Assembly" family = "assembly" - icon = "boxes" + icon = "cubes" defaults = ['Main'] diff --git a/pype/plugins/maya/create/create_layout.py b/pype/plugins/maya/create/create_layout.py index 3f6dd5d769..7f0c82d80e 100644 --- a/pype/plugins/maya/create/create_layout.py +++ b/pype/plugins/maya/create/create_layout.py @@ -7,5 +7,5 @@ class CreateLayout(avalon.maya.Creator): name = "layoutMain" label = "Layout" family = "layout" - icon = "boxes" + icon = "cubes" defaults = ["Main"] diff --git a/pype/plugins/maya/create/create_setdress.py b/pype/plugins/maya/create/create_setdress.py index 079ccbd029..d5fc001299 100644 --- a/pype/plugins/maya/create/create_setdress.py +++ b/pype/plugins/maya/create/create_setdress.py @@ -7,5 +7,5 @@ class CreateSetDress(avalon.maya.Creator): name = "setdressMain" label = "Set Dress" family = "setdress" - icon = "boxes" + icon = "cubes" defaults = ["Main", "Anim"] From 581ba78aff2061b81664f963fdfed003cd714512 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 12 Sep 2019 11:13:04 +0200 Subject: [PATCH 5/9] return original plugins if registered host is not set --- pype/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pype/__init__.py b/pype/__init__.py index bcbedc9a90..c2311dd528 100644 --- a/pype/__init__.py +++ b/pype/__init__.py @@ -35,6 +35,8 @@ def patched_discover(superclass): plugins = _original_discover(superclass) # determine host application to use for finding presets + if avalon.registered_host() is None: + return plugins host = avalon.registered_host().__name__.split(".")[-1] # map plugin superclass to preset json. Currenly suppoted is load and From 1cb83eadabfaeb905c75f7a43adb012860af5a1f Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Fri, 20 Sep 2019 10:13:43 +0200 Subject: [PATCH 6/9] fixed PEP and other cosmetic issues in maya.lib --- pype/maya/lib.py | 98 ++++++++++++++++++++++++++---------------------- 1 file changed, 53 insertions(+), 45 deletions(-) diff --git a/pype/maya/lib.py b/pype/maya/lib.py index e54dac78f2..bd48862721 100644 --- a/pype/maya/lib.py +++ b/pype/maya/lib.py @@ -39,19 +39,17 @@ SHAPE_ATTRS = {"castsShadows", "doubleSided", "opposite"} -RENDER_ATTRS = {"vray": - { +RENDER_ATTRS = {"vray": { "node": "vraySettings", "prefix": "fileNamePrefix", "padding": "fileNamePadding", "ext": "imageFormatStr" - }, - "default": - { + }, + "default": { "node": "defaultRenderGlobals", "prefix": "imageFilePrefix", "padding": "extensionPadding" - } + } } @@ -341,19 +339,6 @@ def undo_chunk(): cmds.undoInfo(closeChunk=True) -@contextlib.contextmanager -def renderlayer(layer): - """Set the renderlayer during the context""" - - original = cmds.editRenderLayerGlobals(query=True, currentRenderLayer=True) - - try: - cmds.editRenderLayerGlobals(currentRenderLayer=layer) - yield - finally: - cmds.editRenderLayerGlobals(currentRenderLayer=original) - - @contextlib.contextmanager def evaluation(mode="off"): """Set the evaluation manager during context. @@ -832,7 +817,8 @@ def is_visible(node, # Display layers set overrideEnabled and overrideVisibility on members if cmds.attributeQuery('overrideEnabled', node=node, exists=True): override_enabled = cmds.getAttr('{}.overrideEnabled'.format(node)) - override_visibility = cmds.getAttr('{}.overrideVisibility'.format(node)) + override_visibility = cmds.getAttr('{}.overrideVisibility'.format( + node)) if override_enabled and override_visibility: return False @@ -854,8 +840,8 @@ def extract_alembic(file, startFrame=None, endFrame=None, selection=True, - uvWrite= True, - eulerFilter= True, + uvWrite=True, + eulerFilter=True, dataFormat="ogawa", verbose=False, **kwargs): @@ -1470,8 +1456,8 @@ def apply_shaders(relationships, shadernodes, nodes): member_uuids = [member["uuid"] for member in data["members"]] filtered_nodes = list() - for uuid in member_uuids: - filtered_nodes.extend(nodes_by_id[uuid]) + for m_uuid in member_uuids: + filtered_nodes.extend(nodes_by_id[m_uuid]) id_shading_engines = shading_engines_by_id[shader_uuid] if not id_shading_engines: @@ -2110,6 +2096,7 @@ def bake_to_world_space(nodes, return world_space_nodes + def load_capture_preset(path=None, data=None): import capture_gui import capture @@ -2119,14 +2106,14 @@ def load_capture_preset(path=None, data=None): else: path = path preset = capture_gui.lib.load_json(path) - print preset + print(preset) options = dict() # CODEC id = 'Codec' for key in preset[id]: - options[str(key)]= preset[id][key] + options[str(key)] = preset[id][key] # GENERIC id = 'Generic' @@ -2142,7 +2129,6 @@ def load_capture_preset(path=None, data=None): options['height'] = preset[id]['height'] options['width'] = preset[id]['width'] - # DISPLAY OPTIONS id = 'Display Options' disp_options = {} @@ -2154,7 +2140,6 @@ def load_capture_preset(path=None, data=None): options['display_options'] = disp_options - # VIEWPORT OPTIONS temp_options = {} id = 'Renderer' @@ -2163,11 +2148,12 @@ def load_capture_preset(path=None, data=None): temp_options2 = {} id = 'Viewport Options' - light_options = { 0: "default", - 1: 'all', - 2: 'selected', - 3: 'flat', - 4: 'nolights'} + light_options = { + 0: "default", + 1: 'all', + 2: 'selected', + 3: 'flat', + 4: 'nolights'} for key in preset[id]: if key == 'high_quality': temp_options2['multiSampleEnable'] = True @@ -2190,7 +2176,10 @@ def load_capture_preset(path=None, data=None): else: temp_options[str(key)] = preset[id][key] - for key in ['override_viewport_options', 'high_quality', 'alphaCut', "gpuCacheDisplayFilter"]: + for key in ['override_viewport_options', + 'high_quality', + 'alphaCut', + 'gpuCacheDisplayFilter']: temp_options.pop(key, None) for key in ['ssaoEnable']: @@ -2199,7 +2188,6 @@ def load_capture_preset(path=None, data=None): options['viewport_options'] = temp_options options['viewport2_options'] = temp_options2 - # use active sound track scene = capture.parse_active_scene() options['sound'] = scene['sound'] @@ -2363,31 +2351,51 @@ class shelf(): if item['type'] == 'button': self.addButon(item['name'], command=item['command']) if item['type'] == 'menuItem': - self.addMenuItem(item['parent'], item['name'], command=item['command']) + self.addMenuItem(item['parent'], + item['name'], + command=item['command']) if item['type'] == 'subMenu': - self.addMenuItem(item['parent'], item['name'], command=item['command']) + self.addMenuItem(item['parent'], + item['name'], + command=item['command']) - def addButon(self, label, icon="commandButton.png", command=_null, doubleCommand=_null): - '''Adds a shelf button with the specified label, command, double click command and image.''' + def addButon(self, label, icon="commandButton.png", + command=_null, doubleCommand=_null): + ''' + Adds a shelf button with the specified label, command, + double click command and image. + ''' cmds.setParent(self.name) if icon: icon = self.iconPath + icon - cmds.shelfButton(width=37, height=37, image=icon, l=label, command=command, dcc=doubleCommand, imageOverlayLabel=label, olb=self.labelBackground, olc=self.labelColour) + cmds.shelfButton(width=37, height=37, image=icon, label=label, + command=command, dcc=doubleCommand, + imageOverlayLabel=label, olb=self.labelBackground, + olc=self.labelColour) def addMenuItem(self, parent, label, command=_null, icon=""): - '''Adds a shelf button with the specified label, command, double click command and image.''' + ''' + Adds a shelf button with the specified label, command, + double click command and image. + ''' if icon: icon = self.iconPath + icon - return cmds.menuItem(p=parent, l=label, c=command, i="") + return cmds.menuItem(p=parent, label=label, c=command, i="") def addSubMenu(self, parent, label, icon=None): - '''Adds a sub menu item with the specified label and icon to the specified parent popup menu.''' + ''' + Adds a sub menu item with the specified label and icon to + the specified parent popup menu. + ''' if icon: icon = self.iconPath + icon - return cmds.menuItem(p=parent, l=label, i=icon, subMenu=1) + return cmds.menuItem(p=parent, label=label, i=icon, subMenu=1) def _cleanOldShelf(self): - '''Checks if the shelf exists and empties it if it does or creates it if it does not.''' + ''' + Checks if the shelf exists and empties it if it does + or creates it if it does not. + ''' if cmds.shelfLayout(self.name, ex=1): if cmds.shelfLayout(self.name, q=1, ca=1): for each in cmds.shelfLayout(self.name, q=1, ca=1): From 621cfddc0d2b3c7a6e9b0b05c70ad379fa282f92 Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Fri, 20 Sep 2019 13:40:51 +0200 Subject: [PATCH 7/9] show selection handle on load and offset it to loaded bbox center --- pype/plugins/maya/load/load_mayaascii.py | 18 +++++++++++++++++- pype/plugins/maya/load/load_reference.py | 24 +++++++++++++++++++++++- 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/pype/plugins/maya/load/load_mayaascii.py b/pype/plugins/maya/load/load_mayaascii.py index 03a15b0524..b9a5de2782 100644 --- a/pype/plugins/maya/load/load_mayaascii.py +++ b/pype/plugins/maya/load/load_mayaascii.py @@ -45,7 +45,23 @@ class MayaAsciiLoader(pype.maya.plugin.ReferenceLoader): cmds.setAttr(groupName + ".useOutlinerColor", 1) cmds.setAttr(groupName + ".outlinerColor", c[0], c[1], c[2]) - + cmds.setAttr(groupName + ".displayHandle", 1) + # get bounding box + bbox = cmds.exactWorldBoundingBox(groupName) + # get pivot position on world space + pivot = cmds.xform(groupName, q=True, sp=True, ws=True) + # center of bounding box + cx = (bbox[0] + bbox[3]) / 2 + cy = (bbox[1] + bbox[4]) / 2 + cz = (bbox[2] + bbox[5]) / 2 + # add pivot position to calculate offset + cx = cx + pivot[0] + cy = cy + pivot[1] + cz = cz + pivot[2] + # set selection handle offset to center of bounding box + cmds.setAttr(groupName + ".selectHandleX", cx) + cmds.setAttr(groupName + ".selectHandleY", cy) + cmds.setAttr(groupName + ".selectHandleZ", cz) return nodes def switch(self, container, representation): diff --git a/pype/plugins/maya/load/load_reference.py b/pype/plugins/maya/load/load_reference.py index fb4b90a1cd..0a9796e5d7 100644 --- a/pype/plugins/maya/load/load_reference.py +++ b/pype/plugins/maya/load/load_reference.py @@ -1,8 +1,9 @@ -from avalon import api + import pype.maya.plugin import os from pypeapp import config import pymel.core as pm +from pprint import pprint reload(config) @@ -58,6 +59,9 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader): for root in roots: root.setParent(groupNode) + cmds.setAttr(groupName + ".displayHandle", 1) + groupNode + presets = config.get_presets(project=os.environ['AVALON_PROJECT']) colors = presets['plugins']['maya']['load']['colors'] c = colors.get(family) @@ -67,6 +71,24 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader): self[:] = nodes + cmds.setAttr(groupName + ".displayHandle", 1) + # get bounding box + bbox = cmds.exactWorldBoundingBox(groupName) + # get pivot position on world space + pivot = cmds.xform(groupName, q=True, sp=True, ws=True) + # center of bounding box + cx = (bbox[0] + bbox[3]) / 2 + cy = (bbox[1] + bbox[4]) / 2 + cz = (bbox[2] + bbox[5]) / 2 + # add pivot position to calculate offset + cx = cx + pivot[0] + cy = cy + pivot[1] + cz = cz + pivot[2] + # set selection handle offset to center of bounding box + cmds.setAttr(groupName + ".selectHandleX", cx) + cmds.setAttr(groupName + ".selectHandleY", cy) + cmds.setAttr(groupName + ".selectHandleZ", cz) + return nodes def switch(self, container, representation): From b145cfe6d29c4f2e0baada93217ca9a043dbe371 Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Fri, 20 Sep 2019 13:45:34 +0200 Subject: [PATCH 8/9] fixed unused pprint import --- pype/plugins/maya/load/load_reference.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pype/plugins/maya/load/load_reference.py b/pype/plugins/maya/load/load_reference.py index 0a9796e5d7..f855cb55f9 100644 --- a/pype/plugins/maya/load/load_reference.py +++ b/pype/plugins/maya/load/load_reference.py @@ -3,7 +3,6 @@ import pype.maya.plugin import os from pypeapp import config import pymel.core as pm -from pprint import pprint reload(config) @@ -94,6 +93,7 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader): def switch(self, container, representation): self.update(container, representation) + # for backwards compatibility class AbcLoader(ReferenceLoader): label = "Deprecated loader (don't use)" @@ -101,6 +101,7 @@ class AbcLoader(ReferenceLoader): representations = ["abc"] tool_names = [] + # for backwards compatibility class ModelLoader(ReferenceLoader): label = "Deprecated loader (don't use)" From 16189b56918acebb51295b93f157f0d892e5497e Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 2 Oct 2019 13:45:01 +0200 Subject: [PATCH 9/9] fix: some changes improving publishing and loading luts --- pype/plugins/global/publish/integrate_new.py | 2 + pype/plugins/nuke/load/load_luts.py | 322 +++++++++++++++++ pype/plugins/nuke/load/load_luts_ip.py | 335 ++++++++++++++++++ .../nuke/publish/validate_active_viewer.py | 24 ++ 4 files changed, 683 insertions(+) create mode 100644 pype/plugins/nuke/load/load_luts.py create mode 100644 pype/plugins/nuke/load/load_luts_ip.py create mode 100644 pype/plugins/nuke/publish/validate_active_viewer.py diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index d9e4f3f533..e87ee97087 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -429,6 +429,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): Returns: None """ + src = os.path.normpath(src) + dst = os.path.normpath(dst) self.log.debug("Copying file .. {} -> {}".format(src, dst)) dirname = os.path.dirname(dst) diff --git a/pype/plugins/nuke/load/load_luts.py b/pype/plugins/nuke/load/load_luts.py new file mode 100644 index 0000000000..4f7c19a588 --- /dev/null +++ b/pype/plugins/nuke/load/load_luts.py @@ -0,0 +1,322 @@ +from avalon import api, style, io +import nuke +import json +from collections import OrderedDict + + +class LoadLuts(api.Loader): + """Loading colorspace soft effect exported from nukestudio""" + + representations = ["lutJson"] + families = ["lut"] + + label = "Load Luts - nodes" + order = 0 + icon = "cc" + color = style.colors.light + ignore_attr = ["useLifetime"] + + def load(self, context, name, namespace, data): + """ + Loading function to get the soft effects to particular read node + + Arguments: + context (dict): context of version + name (str): name of the version + namespace (str): asset name + data (dict): compulsory attribute > not used + + Returns: + nuke node: containerised nuke node object + """ + # import dependencies + from avalon.nuke import containerise + + # get main variables + version = context['version'] + version_data = version.get("data", {}) + vname = version.get("name", None) + first = version_data.get("frameStart", None) + last = version_data.get("frameEnd", None) + workfile_first_frame = int(nuke.root()["first_frame"].getValue()) + namespace = namespace or context['asset']['name'] + colorspace = version_data.get("colorspace", None) + object_name = "{}_{}".format(name, namespace) + + # prepare data for imprinting + # add additional metadata from the version to imprint to Avalon knob + add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd", + "source", "author", "fps"] + + data_imprint = {"frameStart": first, + "frameEnd": last, + "version": vname, + "colorspaceInput": colorspace, + "objectName": object_name} + + for k in add_keys: + data_imprint.update({k: version_data[k]}) + + # getting file path + file = self.fname.replace("\\", "/") + + # getting data from json file with unicode conversion + with open(file, "r") as f: + json_f = {self.byteify(key): self.byteify(value) + for key, value in json.load(f).iteritems()} + + # get correct order of nodes by positions on track and subtrack + nodes_order = self.reorder_nodes(json_f["effects"]) + + # adding nodes to node graph + # just in case we are in group lets jump out of it + nuke.endGroup() + + GN = nuke.createNode("Group") + + GN["name"].setValue(object_name) + + # adding content to the group node + with GN: + pre_node = nuke.createNode("Input") + pre_node["name"].setValue("rgb") + + for ef_name, ef_val in nodes_order.items(): + node = nuke.createNode(ef_val["class"]) + for k, v in ef_val["node"].items(): + if k in self.ignore_attr: + continue + if isinstance(v, list) and len(v) > 4: + node[k].setAnimated() + for i, value in enumerate(v): + if isinstance(value, list): + for ci, cv in enumerate(value): + node[k].setValueAt( + cv, + (workfile_first_frame + i), + ci) + else: + node[k].setValueAt( + value, + (workfile_first_frame + i)) + else: + node[k].setValue(v) + node.setInput(0, pre_node) + pre_node = node + + output = nuke.createNode("Output") + output.setInput(0, pre_node) + + # try to find parent read node + self.connect_read_node(GN, namespace, json_f["assignTo"]) + + GN["tile_color"].setValue(int("0x3469ffff", 16)) + + self.log.info("Loaded lut setup: `{}`".format(GN["name"].value())) + + return containerise( + node=GN, + name=name, + namespace=namespace, + context=context, + loader=self.__class__.__name__, + data=data_imprint) + + def update(self, container, representation): + """Update the Loader's path + + Nuke automatically tries to reset some variables when changing + the loader's path to a new file. These automatic changes are to its + inputs: + + """ + + from avalon.nuke import ( + update_container + ) + # get main variables + # Get version from io + version = io.find_one({ + "type": "version", + "_id": representation["parent"] + }) + # get corresponding node + GN = nuke.toNode(container['objectName']) + + file = api.get_representation_path(representation).replace("\\", "/") + name = container['name'] + version_data = version.get("data", {}) + vname = version.get("name", None) + first = version_data.get("frameStart", None) + last = version_data.get("frameEnd", None) + workfile_first_frame = int(nuke.root()["first_frame"].getValue()) + namespace = container['namespace'] + colorspace = version_data.get("colorspace", None) + object_name = "{}_{}".format(name, namespace) + + add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd", + "source", "author", "fps"] + + data_imprint = {"representation": str(representation["_id"]), + "frameStart": first, + "frameEnd": last, + "version": vname, + "colorspaceInput": colorspace, + "objectName": object_name} + + for k in add_keys: + data_imprint.update({k: version_data[k]}) + + # Update the imprinted representation + update_container( + GN, + data_imprint + ) + + # getting data from json file with unicode conversion + with open(file, "r") as f: + json_f = {self.byteify(key): self.byteify(value) + for key, value in json.load(f).iteritems()} + + # get correct order of nodes by positions on track and subtrack + nodes_order = self.reorder_nodes(json_f["effects"]) + + # adding nodes to node graph + # just in case we are in group lets jump out of it + nuke.endGroup() + + # adding content to the group node + with GN: + # first remove all nodes + [nuke.delete(n) for n in nuke.allNodes()] + + # create input node + pre_node = nuke.createNode("Input") + pre_node["name"].setValue("rgb") + + for ef_name, ef_val in nodes_order.items(): + node = nuke.createNode(ef_val["class"]) + for k, v in ef_val["node"].items(): + if k in self.ignore_attr: + continue + if isinstance(v, list) and len(v) > 3: + node[k].setAnimated() + for i, value in enumerate(v): + if isinstance(value, list): + for ci, cv in enumerate(value): + node[k].setValueAt( + cv, + (workfile_first_frame + i), + ci) + else: + node[k].setValueAt( + value, + (workfile_first_frame + i)) + else: + node[k].setValue(v) + node.setInput(0, pre_node) + pre_node = node + + # create output node + output = nuke.createNode("Output") + output.setInput(0, pre_node) + + # try to find parent read node + self.connect_read_node(GN, namespace, json_f["assignTo"]) + + # get all versions in list + versions = io.find({ + "type": "version", + "parent": version["parent"] + }).distinct('name') + + max_version = max(versions) + + # change color of node + if version.get("name") not in [max_version]: + GN["tile_color"].setValue(int("0xd84f20ff", 16)) + else: + GN["tile_color"].setValue(int("0x3469ffff", 16)) + + self.log.info("udated to version: {}".format(version.get("name"))) + + def connect_read_node(self, group_node, asset, subset): + """ + Finds read node and selects it + + Arguments: + asset (str): asset name + + Returns: + nuke node: node is selected + None: if nothing found + """ + search_name = "{0}_{1}".format(asset, subset) + node = [n for n in nuke.allNodes() if search_name in n["name"].value()] + if len(node) > 0: + rn = node[0] + else: + rn = None + + # Parent read node has been found + # solving connections + if rn: + dep_nodes = rn.dependent() + + if len(dep_nodes) > 0: + for dn in dep_nodes: + dn.setInput(0, group_node) + + group_node.setInput(0, rn) + group_node.autoplace() + + def reorder_nodes(self, data): + new_order = OrderedDict() + trackNums = [v["trackIndex"] for k, v in data.items()] + subTrackNums = [v["subTrackIndex"] for k, v in data.items()] + + for trackIndex in range( + min(trackNums), max(trackNums) + 1): + for subTrackIndex in range( + min(subTrackNums), max(subTrackNums) + 1): + item = self.get_item(data, trackIndex, subTrackIndex) + if item is not {}: + new_order.update(item) + return new_order + + def get_item(self, data, trackIndex, subTrackIndex): + return {key: val for key, val in data.items() + if subTrackIndex == val["subTrackIndex"] + if trackIndex == val["trackIndex"]} + + def byteify(self, input): + """ + Converts unicode strings to strings + It goes trought all dictionary + + Arguments: + input (dict/str): input + + Returns: + dict: with fixed values and keys + + """ + + if isinstance(input, dict): + return {self.byteify(key): self.byteify(value) + for key, value in input.iteritems()} + elif isinstance(input, list): + return [self.byteify(element) for element in input] + elif isinstance(input, unicode): + return input.encode('utf-8') + else: + return input + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + from avalon.nuke import viewer_update_and_undo_stop + node = nuke.toNode(container['objectName']) + with viewer_update_and_undo_stop(): + nuke.delete(node) diff --git a/pype/plugins/nuke/load/load_luts_ip.py b/pype/plugins/nuke/load/load_luts_ip.py new file mode 100644 index 0000000000..b30f84cc42 --- /dev/null +++ b/pype/plugins/nuke/load/load_luts_ip.py @@ -0,0 +1,335 @@ +from avalon import api, style, io +import nuke +import json +from collections import OrderedDict +from pype.nuke import lib + +class LoadLutsInputProcess(api.Loader): + """Loading colorspace soft effect exported from nukestudio""" + + representations = ["lutJson"] + families = ["lut"] + + label = "Load Luts - Input Process" + order = 0 + icon = "eye" + color = style.colors.alert + ignore_attr = ["useLifetime"] + + def load(self, context, name, namespace, data): + """ + Loading function to get the soft effects to particular read node + + Arguments: + context (dict): context of version + name (str): name of the version + namespace (str): asset name + data (dict): compulsory attribute > not used + + Returns: + nuke node: containerised nuke node object + """ + # import dependencies + from avalon.nuke import containerise + + # get main variables + version = context['version'] + version_data = version.get("data", {}) + vname = version.get("name", None) + first = version_data.get("frameStart", None) + last = version_data.get("frameEnd", None) + workfile_first_frame = int(nuke.root()["first_frame"].getValue()) + namespace = namespace or context['asset']['name'] + colorspace = version_data.get("colorspace", None) + object_name = "{}_{}".format(name, namespace) + + # prepare data for imprinting + # add additional metadata from the version to imprint to Avalon knob + add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd", + "source", "author", "fps"] + + data_imprint = {"frameStart": first, + "frameEnd": last, + "version": vname, + "colorspaceInput": colorspace, + "objectName": object_name} + + for k in add_keys: + data_imprint.update({k: version_data[k]}) + + # getting file path + file = self.fname.replace("\\", "/") + + # getting data from json file with unicode conversion + with open(file, "r") as f: + json_f = {self.byteify(key): self.byteify(value) + for key, value in json.load(f).iteritems()} + + # get correct order of nodes by positions on track and subtrack + nodes_order = self.reorder_nodes(json_f["effects"]) + + # adding nodes to node graph + # just in case we are in group lets jump out of it + nuke.endGroup() + + GN = nuke.createNode("Group") + + GN["name"].setValue(object_name) + + # adding content to the group node + with GN: + pre_node = nuke.createNode("Input") + pre_node["name"].setValue("rgb") + + for ef_name, ef_val in nodes_order.items(): + node = nuke.createNode(ef_val["class"]) + for k, v in ef_val["node"].items(): + if k in self.ignore_attr: + continue + if isinstance(v, list) and len(v) > 4: + node[k].setAnimated() + for i, value in enumerate(v): + if isinstance(value, list): + for ci, cv in enumerate(value): + node[k].setValueAt( + cv, + (workfile_first_frame + i), + ci) + else: + node[k].setValueAt( + value, + (workfile_first_frame + i)) + else: + node[k].setValue(v) + node.setInput(0, pre_node) + pre_node = node + + output = nuke.createNode("Output") + output.setInput(0, pre_node) + + # try to place it under Viewer1 + if not self.connect_active_viewer(GN): + nuke.delete(GN) + return + + GN["tile_color"].setValue(int("0x3469ffff", 16)) + + self.log.info("Loaded lut setup: `{}`".format(GN["name"].value())) + + return containerise( + node=GN, + name=name, + namespace=namespace, + context=context, + loader=self.__class__.__name__, + data=data_imprint) + + def update(self, container, representation): + """Update the Loader's path + + Nuke automatically tries to reset some variables when changing + the loader's path to a new file. These automatic changes are to its + inputs: + + """ + + from avalon.nuke import ( + update_container + ) + # get main variables + # Get version from io + version = io.find_one({ + "type": "version", + "_id": representation["parent"] + }) + # get corresponding node + GN = nuke.toNode(container['objectName']) + + file = api.get_representation_path(representation).replace("\\", "/") + name = container['name'] + version_data = version.get("data", {}) + vname = version.get("name", None) + first = version_data.get("frameStart", None) + last = version_data.get("frameEnd", None) + workfile_first_frame = int(nuke.root()["first_frame"].getValue()) + namespace = container['namespace'] + colorspace = version_data.get("colorspace", None) + object_name = "{}_{}".format(name, namespace) + + add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd", + "source", "author", "fps"] + + data_imprint = {"representation": str(representation["_id"]), + "frameStart": first, + "frameEnd": last, + "version": vname, + "colorspaceInput": colorspace, + "objectName": object_name} + + for k in add_keys: + data_imprint.update({k: version_data[k]}) + + # Update the imprinted representation + update_container( + GN, + data_imprint + ) + + # getting data from json file with unicode conversion + with open(file, "r") as f: + json_f = {self.byteify(key): self.byteify(value) + for key, value in json.load(f).iteritems()} + + # get correct order of nodes by positions on track and subtrack + nodes_order = self.reorder_nodes(json_f["effects"]) + + # adding nodes to node graph + # just in case we are in group lets jump out of it + nuke.endGroup() + + # adding content to the group node + with GN: + # first remove all nodes + [nuke.delete(n) for n in nuke.allNodes()] + + # create input node + pre_node = nuke.createNode("Input") + pre_node["name"].setValue("rgb") + + for ef_name, ef_val in nodes_order.items(): + node = nuke.createNode(ef_val["class"]) + for k, v in ef_val["node"].items(): + if k in self.ignore_attr: + continue + if isinstance(v, list) and len(v) > 3: + node[k].setAnimated() + for i, value in enumerate(v): + if isinstance(value, list): + for ci, cv in enumerate(value): + node[k].setValueAt( + cv, + (workfile_first_frame + i), + ci) + else: + node[k].setValueAt( + value, + (workfile_first_frame + i)) + else: + node[k].setValue(v) + node.setInput(0, pre_node) + pre_node = node + + # create output node + output = nuke.createNode("Output") + output.setInput(0, pre_node) + + # try to place it under Viewer1 + if not self.connect_active_viewer(GN): + nuke.delete(GN) + return + + # get all versions in list + versions = io.find({ + "type": "version", + "parent": version["parent"] + }).distinct('name') + + max_version = max(versions) + + # change color of node + if version.get("name") not in [max_version]: + GN["tile_color"].setValue(int("0xd84f20ff", 16)) + else: + GN["tile_color"].setValue(int("0x3469ffff", 16)) + + self.log.info("udated to version: {}".format(version.get("name"))) + + def connect_active_viewer(self, group_node): + """ + Finds Active viewer and + place the node under it, also adds + name of group into Input Process of the viewer + + Arguments: + group_node (nuke node): nuke group node object + + """ + group_node_name = group_node["name"].value() + + viewer = [n for n in nuke.allNodes() if "Viewer1" in n["name"].value()] + if len(viewer) > 0: + viewer = viewer[0] + else: + self.log.error("Please create Viewer node before you run this action again") + return None + + # get coordinates of Viewer1 + xpos = viewer["xpos"].value() + ypos = viewer["ypos"].value() + + ypos += 150 + + viewer["ypos"].setValue(ypos) + + # set coordinates to group node + group_node["xpos"].setValue(xpos) + group_node["ypos"].setValue(ypos + 50) + + # add group node name to Viewer Input Process + viewer["input_process_node"].setValue(group_node_name) + + # put backdrop under + lib.create_backdrop(label="Input Process", layer=2, nodes=[viewer, group_node], color="0x7c7faaff") + + return True + + def reorder_nodes(self, data): + new_order = OrderedDict() + trackNums = [v["trackIndex"] for k, v in data.items()] + subTrackNums = [v["subTrackIndex"] for k, v in data.items()] + + for trackIndex in range( + min(trackNums), max(trackNums) + 1): + for subTrackIndex in range( + min(subTrackNums), max(subTrackNums) + 1): + item = self.get_item(data, trackIndex, subTrackIndex) + if item is not {}: + new_order.update(item) + return new_order + + def get_item(self, data, trackIndex, subTrackIndex): + return {key: val for key, val in data.items() + if subTrackIndex == val["subTrackIndex"] + if trackIndex == val["trackIndex"]} + + def byteify(self, input): + """ + Converts unicode strings to strings + It goes trought all dictionary + + Arguments: + input (dict/str): input + + Returns: + dict: with fixed values and keys + + """ + + if isinstance(input, dict): + return {self.byteify(key): self.byteify(value) + for key, value in input.iteritems()} + elif isinstance(input, list): + return [self.byteify(element) for element in input] + elif isinstance(input, unicode): + return input.encode('utf-8') + else: + return input + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + from avalon.nuke import viewer_update_and_undo_stop + node = nuke.toNode(container['objectName']) + with viewer_update_and_undo_stop(): + nuke.delete(node) diff --git a/pype/plugins/nuke/publish/validate_active_viewer.py b/pype/plugins/nuke/publish/validate_active_viewer.py new file mode 100644 index 0000000000..618a7f1502 --- /dev/null +++ b/pype/plugins/nuke/publish/validate_active_viewer.py @@ -0,0 +1,24 @@ +import pyblish.api +import nuke + + +class ValidateActiveViewer(pyblish.api.ContextPlugin): + """Validate presentse of the active viewer from nodes + """ + + order = pyblish.api.ValidatorOrder + label = "Validate Active Viewer" + hosts = ["nuke"] + + def process(self, context): + viewer_process_node = context.data.get("ViewerProcess") + + assert viewer_process_node, ( + "Missing active viewer process! Please click on output write node and push key number 1-9" + ) + active_viewer = context.data["ActiveViewer"] + active_input = active_viewer.activeInput() + + assert active_input is not None, ( + "Missing active viewer input! Please click on output write node and push key number 1-9" + )