diff --git a/.flake8 b/.flake8 index 9de8d23bb2..67ed2d77a3 100644 --- a/.flake8 +++ b/.flake8 @@ -1,5 +1,7 @@ [flake8] # ignore = D203 +ignore = BLK100 +max-line-length = 79 exclude = .git, __pycache__, diff --git a/.hound.yml b/.hound.yml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/LICENSE b/LICENSE index dfcd71eb3f..63249bb52b 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2018 orbi tools s.r.o +Copyright (c) 2020 Orbi Tools s.r.o. Permission is hereby granted, free of charge, to any person obtaining a copy diff --git a/README.md b/README.md index e254b0ad87..8110887cbd 100644 --- a/README.md +++ b/README.md @@ -1,31 +1,11 @@ Pype ==== -The base studio _config_ for [Avalon](https://getavalon.github.io/) +Welcome to PYPE _config_ for [Avalon](https://getavalon.github.io/) -Currently this config is dependent on our customised avalon instalation so it won't work with vanilla avalon core. We're working on open sourcing all of the necessary code though. You can still get inspiration or take our individual validators and scripts which should work just fine in other pipelines. +To get all the key information about the project, go to [PYPE.club](http://pype.club) + + +Currently this config is dependent on our customised avalon instalation so it won't work with vanilla avalon core. To install it you'll need to download [pype-setup](github.com/pypeclub/pype-setup), which is able to deploy everything for you if you follow the documentation. _This configuration acts as a starting point for all pype club clients wth avalon deployment._ - -Code convention ---------------- - -Below are some of the standard practices applied to this repositories. - -- **Etiquette: PEP8** - - All code is written in PEP8. It is recommended you use a linter as you work, flake8 and pylinter are both good options. -- **Etiquette: Napoleon docstrings** - - Any docstrings are made in Google Napoleon format. See [Napoleon](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) for details. - -- **Etiquette: Semantic Versioning** - - This project follows [semantic versioning](http://semver.org). -- **Etiquette: Underscore means private** - - Anything prefixed with an underscore means that it is internal to wherever it is used. For example, a variable name is only ever used in the parent function or class. A module is not for use by the end-user. In contrast, anything without an underscore is public, but not necessarily part of the API. Members of the API resides in `api.py`. - -- **API: Idempotence** - - A public function must be able to be called twice and produce the exact same result. This means no changing of state without restoring previous state when finishing. For example, if a function requires changing the current selection in Autodesk Maya, it must restore the previous selection prior to completing. diff --git a/pype/maya/plugin.py b/pype/maya/plugin.py index 327cf47cbd..ed244d56df 100644 --- a/pype/maya/plugin.py +++ b/pype/maya/plugin.py @@ -1,4 +1,5 @@ from avalon import api +from avalon.vendor import qargparse def get_reference_node_parents(ref): @@ -33,11 +34,29 @@ class ReferenceLoader(api.Loader): `update` logic. """ - def load(self, - context, - name=None, - namespace=None, - data=None): + + options = [ + qargparse.Integer( + "count", + label="Count", + default=1, + min=1, + help="How many times to load?" + ), + qargparse.Double3( + "offset", + label="Position Offset", + help="Offset loaded models for easier selection." + ) + ] + + def load( + self, + context, + name=None, + namespace=None, + options=None + ): import os from avalon.maya import lib @@ -46,29 +65,46 @@ class ReferenceLoader(api.Loader): assert os.path.exists(self.fname), "%s does not exist." % self.fname asset = context['asset'] + loaded_containers = [] - namespace = namespace or lib.unique_namespace( - asset["name"] + "_", - prefix="_" if asset["name"][0].isdigit() else "", - suffix="_", - ) + count = options.get("count") or 1 + for c in range(0, count): + namespace = namespace or lib.unique_namespace( + asset["name"] + "_", + prefix="_" if asset["name"][0].isdigit() else "", + suffix="_", + ) - self.process_reference(context=context, - name=name, - namespace=namespace, - data=data) + # Offset loaded subset + if "offset" in options: + offset = [i * c for i in options["offset"]] + options["translate"] = offset - # Only containerize if any nodes were loaded by the Loader - nodes = self[:] - if not nodes: - return + self.log.info(options) - return containerise( - name=name, - namespace=namespace, - nodes=nodes, - context=context, - loader=self.__class__.__name__) + self.process_reference( + context=context, + name=name, + namespace=namespace, + options=options + ) + + # Only containerize if any nodes were loaded by the Loader + nodes = self[:] + if not nodes: + return + + loaded_containers.append(containerise( + name=name, + namespace=namespace, + nodes=nodes, + context=context, + loader=self.__class__.__name__ + )) + + c += 1 + namespace = None + return loaded_containers def process_reference(self, context, name, namespace, data): """To be implemented by subclass""" diff --git a/pype/nuke/__init__.py b/pype/nuke/__init__.py index f1f87e40c8..e775468996 100644 --- a/pype/nuke/__init__.py +++ b/pype/nuke/__init__.py @@ -93,11 +93,11 @@ def install(): # Set context settings. nuke.addOnCreate(workfile_settings.set_context_settings, nodeClass="Root") + nuke.addOnCreate(workfile_settings.set_favorites, nodeClass="Root") menu.install() - def launch_workfiles_app(): '''Function letting start workfiles after start of host ''' diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index dedc42fa1d..8e241dad16 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -15,13 +15,12 @@ import nuke from .presets import ( get_colorspace_preset, get_node_dataflow_preset, - get_node_colorspace_preset -) - -from .presets import ( + get_node_colorspace_preset, get_anatomy ) +from .utils import set_context_favorites + from pypeapp import Logger log = Logger().get_logger(__name__, "nuke") @@ -620,7 +619,8 @@ class WorkfileSettings(object): # third set ocio custom path if root_dict.get("customOCIOConfigPath"): self._root_node["customOCIOConfigPath"].setValue( - str(root_dict["customOCIOConfigPath"]).format(**os.environ) + str(root_dict["customOCIOConfigPath"]).format( + **os.environ).replace("\\", "/") ) log.debug("nuke.root()['{}'] changed to: {}".format( "customOCIOConfigPath", root_dict["customOCIOConfigPath"])) @@ -944,6 +944,26 @@ class WorkfileSettings(object): # add colorspace menu item self.set_colorspace() + def set_favorites(self): + projects_root = os.getenv("AVALON_PROJECTS") + work_dir = os.getenv("AVALON_WORKDIR") + asset = os.getenv("AVALON_ASSET") + project = os.getenv("AVALON_PROJECT") + hierarchy = os.getenv("AVALON_HIERARCHY") + favorite_items = OrderedDict() + + # project + favorite_items.update({"Project dir": os.path.join( + projects_root, project).replace("\\", "/")}) + # shot + favorite_items.update({"Shot dir": os.path.join( + projects_root, project, + hierarchy, asset).replace("\\", "/")}) + # workdir + favorite_items.update({"Work dir": work_dir}) + + set_context_favorites(favorite_items) + def get_hierarchical_attr(entity, attr, default=None): attr_parts = attr.split('.') @@ -1350,8 +1370,8 @@ class ExporterReview: else: self.fname = os.path.basename(self.path_in) self.fhead = os.path.splitext(self.fname)[0] + "." - self.first_frame = self.instance.data.get("frameStart", None) - self.last_frame = self.instance.data.get("frameEnd", None) + self.first_frame = self.instance.data.get("frameStartHandle", None) + self.last_frame = self.instance.data.get("frameEndHandle", None) if "#" in self.fhead: self.fhead = self.fhead.replace("#", "")[:-1] diff --git a/pype/nuke/utils.py b/pype/nuke/utils.py index 7583221696..aa5bc1077e 100644 --- a/pype/nuke/utils.py +++ b/pype/nuke/utils.py @@ -3,6 +3,23 @@ import nuke from avalon.nuke import lib as anlib +def set_context_favorites(favorites={}): + """ Addig favorite folders to nuke's browser + + Argumets: + favorites (dict): couples of {name:path} + """ + dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) + icon_path = os.path.join(dir, 'res', 'icons', 'folder-favorite3.png') + + for name, path in favorites.items(): + nuke.addFavoriteDir( + name, + path, + nuke.IMAGE | nuke.SCRIPT | nuke.GEO, + icon=icon_path) + + def get_node_outputs(node): ''' Return a dictionary of the nodes and pipes that are connected to node diff --git a/pype/nukestudio/lib.py b/pype/nukestudio/lib.py index c71e2cb999..774a9d45bf 100644 --- a/pype/nukestudio/lib.py +++ b/pype/nukestudio/lib.py @@ -1,4 +1,5 @@ import os +import re import sys import hiero import pyblish.api @@ -7,7 +8,6 @@ from avalon.vendor.Qt import (QtWidgets, QtGui) import pype.api as pype from pypeapp import Logger - log = Logger().get_logger(__name__, "nukestudio") cached_process = None @@ -361,3 +361,449 @@ def CreateNukeWorkfile(nodes=None, nodes=nuke_script.getNodes(), **kwargs ) + + +class ClipLoader: + + active_bin = None + + def __init__(self, plugin_cls, context, sequence=None, track=None, **kwargs): + """ Initialize object + + Arguments: + plugin_cls (api.Loader): plugin object + context (dict): loader plugin context + sequnce (hiero.core.Sequence): sequence object + track (hiero.core.Track): track object + kwargs (dict)[optional]: possible keys: + projectBinPath: "path/to/binItem" + hieroWorkfileName: "name_of_hiero_project_file_no_extension" + + """ + self.cls = plugin_cls + self.context = context + self.kwargs = kwargs + self.active_project = self._get_active_project() + self.project_bin = self.active_project.clipsBin() + + self.data = dict() + + assert self._set_data(), str("Cannot Load selected data, look into " + "database or call your supervisor") + + # inject asset data to representation dict + self._get_asset_data() + log.debug("__init__ self.data: `{}`".format(self.data)) + + # add active components to class + self.active_sequence = self._get_active_sequence(sequence) + self.active_track = self._get_active_track(track) + + def _set_data(self): + """ Gets context and convert it to self.data + data structure: + { + "name": "assetName_subsetName_representationName" + "path": "path/to/file/created/by/get_repr..", + "binPath": "projectBinPath", + } + """ + # create name + repr = self.context["representation"] + repr_cntx = repr["context"] + asset = str(repr_cntx["asset"]) + subset = str(repr_cntx["subset"]) + representation = str(repr_cntx["representation"]) + self.data["clip_name"] = "_".join([asset, subset, representation]) + self.data["track_name"] = "_".join([subset, representation]) + + # gets file path + file = self.cls.fname + if not file: + repr_id = repr["_id"] + log.warning( + "Representation id `{}` is failing to load".format(repr_id)) + return None + self.data["path"] = file.replace("\\", "/") + + # convert to hashed path + if repr_cntx.get("frame"): + self._fix_path_hashes() + + # solve project bin structure path + hierarchy = str("/".join(( + "Loader", + repr_cntx["hierarchy"].replace("\\", "/"), + asset + ))) + + self.data["binPath"] = self.kwargs.get( + "projectBinPath", + hierarchy + ) + + return True + + def _fix_path_hashes(self): + """ Convert file path where it is needed padding with hashes + """ + file = self.data["path"] + if "#" not in file: + frame = self.context["representation"]["context"].get("frame") + padding = len(frame) + file = file.replace(frame, "#"*padding) + self.data["path"] = file + + def _get_active_project(self): + """ Get hiero active project object + """ + fname = self.kwargs.get("hieroWorkfileName", "") + + return next((p for p in hiero.core.projects() + if fname in p.name()), + hiero.core.projects()[-1]) + + def _get_asset_data(self): + """ Get all available asset data + + joint `data` key with asset.data dict into the representaion + + """ + asset_name = self.context["representation"]["context"]["asset"] + self.data["assetData"] = pype.get_asset(asset_name)["data"] + + def _make_project_bin(self, hierarchy): + """ Creare bins by given hierarchy path + + It will also make sure no duplicit bins will be created + + Arguments: + hierarchy (str): path devided by slashes "bin0/bin1/bin2" + + Returns: + bin (hiero.core.BinItem): with the bin to be used for mediaItem + """ + if self.active_bin: + return self.active_bin + + assert hierarchy != "", "Please add hierarchy!" + log.debug("__ hierarchy1: `{}`".format(hierarchy)) + if '/' in hierarchy: + hierarchy = hierarchy.split('/') + else: + hierarchy = [hierarchy] + + parent_bin = None + for i, name in enumerate(hierarchy): + # if first index and list is more then one long + if i == 0: + bin = next((bin for bin in self.project_bin.bins() + if name in bin.name()), None) + if not bin: + bin = hiero.core.Bin(name) + self.project_bin.addItem(bin) + log.debug("__ bin.name: `{}`".format(bin.name())) + parent_bin = bin + + # if second to prelast + elif (i >= 1) and (i <= (len(hierarchy) - 1)): + bin = next((bin for bin in parent_bin.bins() + if name in bin.name()), None) + if not bin: + bin = hiero.core.Bin(name) + parent_bin.addItem(bin) + + parent_bin = bin + + return parent_bin + + def _make_track_item(self): + """ Create track item with """ + pass + + def _set_clip_color(self, last_version=True): + """ Sets color of clip on clip/track item + + Arguments: + last_version (bool): True = green | False = red + """ + pass + + def _set_container_tag(self, item, metadata): + """ Sets container tag to given clip/track item + + Arguments: + item (hiero.core.BinItem or hiero.core.TrackItem) + metadata (dict): data to be added to tag + """ + pass + + def _get_active_sequence(self, sequence): + if not sequence: + return hiero.ui.activeSequence() + else: + return sequence + + def _get_active_track(self, track): + if not track: + track_name = self.data["track_name"] + else: + track_name = track.name() + + track_pass = next( + (t for t in self.active_sequence.videoTracks() + if t.name() in track_name), None + ) + + if not track_pass: + track_pass = hiero.core.VideoTrack(track_name) + self.active_sequence.addTrack(track_pass) + + return track_pass + + def load(self): + log.debug("__ active_project: `{}`".format(self.active_project)) + log.debug("__ active_sequence: `{}`".format(self.active_sequence)) + + # create project bin for the media to be imported into + self.active_bin = self._make_project_bin(self.data["binPath"]) + log.debug("__ active_bin: `{}`".format(self.active_bin)) + + log.debug("__ version.data: `{}`".format( + self.context["version"]["data"])) + + # create mediaItem in active project bin + # create clip media + media = hiero.core.MediaSource(self.data["path"]) + media_duration = int(media.duration()) + + handle_start = int(self.data["assetData"]["handleStart"]) + handle_end = int(self.data["assetData"]["handleEnd"]) + + clip_in = int(self.data["assetData"]["clipIn"]) + clip_out = int(self.data["assetData"]["clipOut"]) + + log.debug("__ media_duration: `{}`".format(media_duration)) + log.debug("__ handle_start: `{}`".format(handle_start)) + log.debug("__ handle_end: `{}`".format(handle_end)) + log.debug("__ clip_in: `{}`".format(clip_in)) + log.debug("__ clip_out: `{}`".format(clip_out)) + + # check if slate is included + # either in version data families or by calculating frame diff + slate_on = next( + (f for f in self.context["version"]["data"]["families"] + if "slate" in f), + None) or bool((( + clip_out - clip_in + 1) + handle_start + handle_end + ) - media_duration) + + log.debug("__ slate_on: `{}`".format(slate_on)) + + # calculate slate differences + if slate_on: + media_duration -= 1 + handle_start += 1 + + fps = self.data["assetData"]["fps"] + + # create Clip from Media + _clip = hiero.core.Clip(media) + _clip.setName(self.data["clip_name"]) + + # add Clip to bin if not there yet + if self.data["clip_name"] not in [ + b.name() + for b in self.active_bin.items()]: + binItem = hiero.core.BinItem(_clip) + self.active_bin.addItem(binItem) + + _source = next((item for item in self.active_bin.items() + if self.data["clip_name"] in item.name()), None) + + if not _source: + log.warning("Problem with created Source clip: `{}`".format( + self.data["clip_name"])) + + version = next((s for s in _source.items()), None) + clip = version.item() + + # add to track as clip item + track_item = hiero.core.TrackItem( + self.data["clip_name"], hiero.core.TrackItem.kVideo) + + track_item.setSource(clip) + + track_item.setSourceIn(handle_start) + track_item.setTimelineIn(clip_in) + + track_item.setSourceOut(media_duration - handle_end) + track_item.setTimelineOut(clip_out) + track_item.setPlaybackSpeed(1) + self.active_track.addTrackItem(track_item) + + log.info("Loading clips: `{}`".format(self.data["clip_name"])) + + +def create_nk_workfile_clips(nk_workfiles, seq=None): + ''' + nk_workfile is list of dictionaries like: + [{ + 'path': 'P:/Jakub_testy_pipeline/test_v01.nk', + 'name': 'test', + 'handleStart': 15, # added asymetrically to handles + 'handleEnd': 10, # added asymetrically to handles + "clipIn": 16, + "frameStart": 991, + "frameEnd": 1023, + 'task': 'Comp-tracking', + 'work_dir': 'VFX_PR', + 'shot': '00010' + }] + ''' + + proj = hiero.core.projects()[-1] + root = proj.clipsBin() + + if not seq: + seq = hiero.core.Sequence('NewSequences') + root.addItem(hiero.core.BinItem(seq)) + # todo will ned to define this better + # track = seq[1] # lazy example to get a destination# track + clips_lst = [] + for nk in nk_workfiles: + task_path = '/'.join([nk['work_dir'], nk['shot'], nk['task']]) + bin = create_bin_in_project(task_path, proj) + + if nk['task'] not in seq.videoTracks(): + track = hiero.core.VideoTrack(nk['task']) + seq.addTrack(track) + else: + track = seq.tracks(nk['task']) + + # create clip media + media = hiero.core.MediaSource(nk['path']) + media_in = int(media.startTime() or 0) + media_duration = int(media.duration() or 0) + + handle_start = nk.get("handleStart") + handle_end = nk.get("handleEnd") + + if media_in: + source_in = media_in + handle_start + else: + source_in = nk["frameStart"] + handle_start + + if media_duration: + source_out = (media_in + media_duration - 1) - handle_end + else: + source_out = nk["frameEnd"] - handle_end + + source = hiero.core.Clip(media) + + name = os.path.basename(os.path.splitext(nk['path'])[0]) + split_name = split_by_client_version(name)[0] or name + + # add to bin as clip item + items_in_bin = [b.name() for b in bin.items()] + if split_name not in items_in_bin: + binItem = hiero.core.BinItem(source) + bin.addItem(binItem) + + new_source = [ + item for item in bin.items() if split_name in item.name() + ][0].items()[0].item() + + # add to track as clip item + trackItem = hiero.core.TrackItem( + split_name, hiero.core.TrackItem.kVideo) + trackItem.setSource(new_source) + trackItem.setSourceIn(source_in) + trackItem.setSourceOut(source_out) + trackItem.setTimelineIn(nk["clipIn"]) + trackItem.setTimelineOut(nk["clipIn"] + (source_out - source_in)) + track.addTrackItem(trackItem) + clips_lst.append(trackItem) + + return clips_lst + + +def create_bin_in_project(bin_name='', project=''): + ''' + create bin in project and + if the bin_name is "bin1/bin2/bin3" it will create whole depth + ''' + + if not project: + # get the first loaded project + project = hiero.core.projects()[-1] + if not bin_name: + return None + if '/' in bin_name: + bin_name = bin_name.split('/') + else: + bin_name = [bin_name] + + clipsBin = project.clipsBin() + + done_bin_lst = [] + for i, b in enumerate(bin_name): + if i == 0 and len(bin_name) > 1: + if b in [bin.name() for bin in clipsBin.bins()]: + bin = [bin for bin in clipsBin.bins() if b in bin.name()][0] + done_bin_lst.append(bin) + else: + create_bin = hiero.core.Bin(b) + clipsBin.addItem(create_bin) + done_bin_lst.append(create_bin) + + elif i >= 1 and i < len(bin_name) - 1: + if b in [bin.name() for bin in done_bin_lst[i - 1].bins()]: + bin = [ + bin for bin in done_bin_lst[i - 1].bins() + if b in bin.name() + ][0] + done_bin_lst.append(bin) + else: + create_bin = hiero.core.Bin(b) + done_bin_lst[i - 1].addItem(create_bin) + done_bin_lst.append(create_bin) + + elif i == len(bin_name) - 1: + if b in [bin.name() for bin in done_bin_lst[i - 1].bins()]: + bin = [ + bin for bin in done_bin_lst[i - 1].bins() + if b in bin.name() + ][0] + done_bin_lst.append(bin) + else: + create_bin = hiero.core.Bin(b) + done_bin_lst[i - 1].addItem(create_bin) + done_bin_lst.append(create_bin) + # print [bin.name() for bin in clipsBin.bins()] + return done_bin_lst[-1] + + +def split_by_client_version(string): + regex = r"[/_.]v\d+" + try: + matches = re.findall(regex, string, re.IGNORECASE) + return string.split(matches[0]) + except Exception as e: + print(e) + return None + + +# nk_workfiles = [{ +# 'path': 'C:/Users/hubert/_PYPE_testing/projects/D001_projectx/episodes/ep120/ep120sq01/120sh020/publish/plates/platesMain/v023/prjx_120sh020_platesMain_v023.nk', +# 'name': '120sh020_platesMain', +# 'handles': 10, +# 'handleStart': 10, +# 'handleEnd': 10, +# "clipIn": 16, +# "frameStart": 991, +# "frameEnd": 1023, +# 'task': 'platesMain', +# 'work_dir': 'shots', +# 'shot': '120sh020' +# }] diff --git a/pype/nukestudio/menu.py b/pype/nukestudio/menu.py index 36ce4df34e..ee9af44e74 100644 --- a/pype/nukestudio/menu.py +++ b/pype/nukestudio/menu.py @@ -43,7 +43,7 @@ def install(): """ # here is the best place to add menu - from avalon.tools import publish + from avalon.tools import publish, cbloader from avalon.vendor.Qt import QtGui menu_name = os.environ['AVALON_LABEL'] @@ -86,6 +86,9 @@ def install(): lambda *args: publish.show(hiero.ui.mainWindow()) ) + loader_action = menu.addAction("Load...") + loader_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png")) + loader_action.triggered.connect(cbloader.show) menu.addSeparator() reload_action = menu.addAction("Reload pipeline...") @@ -93,11 +96,12 @@ def install(): reload_action.triggered.connect(reload_config) # Is this required? - hiero.ui.registerAction(context_label_action) - hiero.ui.registerAction(workfiles_action) - hiero.ui.registerAction(default_tags_action) - hiero.ui.registerAction(publish_action) - hiero.ui.registerAction(reload_action) + # hiero.ui.registerAction(context_label_action) + # hiero.ui.registerAction(workfiles_action) + # hiero.ui.registerAction(default_tags_action) + # hiero.ui.registerAction(publish_action) + # hiero.ui.registerAction(loader_action) + # hiero.ui.registerAction(reload_action) self.context_label_action = context_label_action self.workfile_actions = workfiles_action diff --git a/pype/nukestudio/precomp_clip.py b/pype/nukestudio/precomp_clip.py deleted file mode 100644 index b544b6e654..0000000000 --- a/pype/nukestudio/precomp_clip.py +++ /dev/null @@ -1,188 +0,0 @@ -import hiero.core -import hiero.ui - -import re -import os - - -def create_nk_script_clips(script_lst, seq=None): - ''' - nk_scripts is list of dictionaries like: - [{ - 'path': 'P:/Jakub_testy_pipeline/test_v01.nk', - 'name': 'test', - 'handles': 10, - 'handleStart': 15, # added asymetrically to handles - 'handleEnd': 10, # added asymetrically to handles - "clipIn": 16, - "frameStart": 991, - "frameEnd": 1023, - 'task': 'Comp-tracking', - 'work_dir': 'VFX_PR', - 'shot': '00010' - }] - ''' - - proj = hiero.core.projects()[-1] - root = proj.clipsBin() - - if not seq: - seq = hiero.core.Sequence('NewSequences') - root.addItem(hiero.core.BinItem(seq)) - # todo will ned to define this better - # track = seq[1] # lazy example to get a destination# track - clips_lst = [] - for nk in script_lst: - task_path = '/'.join([nk['work_dir'], nk['shot'], nk['task']]) - bin = create_bin_in_project(task_path, proj) - - if nk['task'] not in seq.videoTracks(): - track = hiero.core.VideoTrack(nk['task']) - seq.addTrack(track) - else: - track = seq.tracks(nk['task']) - - # create slip media - print("__ path: `{}`".format(nk['path'])) - - media = hiero.core.MediaSource(nk['path']) - media_in = int(media.startTime() or 0) - media_duration = int(media.duration() or 0) - - handle_start = nk.get("handleStart") or nk['handles'] - handle_end = nk.get("handleEnd") or nk['handles'] - - if media_in: - source_in = media_in + handle_start - else: - source_in = nk["frameStart"] + handle_start - - if media_duration: - source_out = (media_in + media_duration - 1) - handle_end - else: - source_out = nk["frameEnd"] - handle_end - - print("__ media: `{}`".format(media)) - print("__ media_in: `{}`".format(media_in)) - print("__ media_duration : `{}`".format(media_duration)) - print("__ source_in: `{}`".format(source_in)) - print("__ source_out : `{}`".format(source_out)) - - source = hiero.core.Clip(media) - print("__ source : `{}`".format(source)) - print("__ source.sourceIn(): `{}`".format(source.sourceIn())) - - name = os.path.basename(os.path.splitext(nk['path'])[0]) - split_name = split_by_client_version(name)[0] or name - - print("__ split_name: `{}`".format(split_name)) - - # add to bin as clip item - items_in_bin = [b.name() for b in bin.items()] - if split_name not in items_in_bin: - binItem = hiero.core.BinItem(source) - bin.addItem(binItem) - - print("__ bin.items(): `{}`".format(bin.items())) - - new_source = [ - item for item in bin.items() if split_name in item.name() - ][0].items()[0].item() - - print("__ new_source: `{}`".format(new_source)) - print("__ new_source: `{}`".format(new_source)) - - # add to track as clip item - trackItem = hiero.core.TrackItem(split_name, hiero.core.TrackItem.kVideo) - trackItem.setSource(new_source) - trackItem.setSourceIn(source_in) - trackItem.setSourceOut(source_out) - trackItem.setSourceIn(source_in) - trackItem.setTimelineIn(nk["clipIn"]) - trackItem.setTimelineOut(nk["clipIn"] + (source_out - source_in)) - track.addTrackItem(trackItem) - track.addTrackItem(trackItem) - clips_lst.append(trackItem) - - return clips_lst - - -def create_bin_in_project(bin_name='', project=''): - ''' - create bin in project and - if the bin_name is "bin1/bin2/bin3" it will create whole depth - ''' - - if not project: - # get the first loaded project - project = hiero.core.projects()[-1] - if not bin_name: - return None - if '/' in bin_name: - bin_name = bin_name.split('/') - else: - bin_name = [bin_name] - - clipsBin = project.clipsBin() - - done_bin_lst = [] - for i, b in enumerate(bin_name): - if i == 0 and len(bin_name) > 1: - if b in [bin.name() for bin in clipsBin.bins()]: - bin = [bin for bin in clipsBin.bins() if b in bin.name()][0] - done_bin_lst.append(bin) - else: - create_bin = hiero.core.Bin(b) - clipsBin.addItem(create_bin) - done_bin_lst.append(create_bin) - - elif i >= 1 and i < len(bin_name) - 1: - if b in [bin.name() for bin in done_bin_lst[i - 1].bins()]: - bin = [ - bin for bin in done_bin_lst[i - 1].bins() - if b in bin.name() - ][0] - done_bin_lst.append(bin) - else: - create_bin = hiero.core.Bin(b) - done_bin_lst[i - 1].addItem(create_bin) - done_bin_lst.append(create_bin) - - elif i == len(bin_name) - 1: - if b in [bin.name() for bin in done_bin_lst[i - 1].bins()]: - bin = [ - bin for bin in done_bin_lst[i - 1].bins() - if b in bin.name() - ][0] - done_bin_lst.append(bin) - else: - create_bin = hiero.core.Bin(b) - done_bin_lst[i - 1].addItem(create_bin) - done_bin_lst.append(create_bin) - # print [bin.name() for bin in clipsBin.bins()] - return done_bin_lst[-1] - - -def split_by_client_version(string): - regex = r"[/_.]v\d+" - try: - matches = re.findall(regex, string, re.IGNORECASE) - return string.split(matches[0]) - except Exception as e: - print(e) - return None - - -script_lst = [{ - 'path': 'C:/Users/hubert/_PYPE_testing/projects/D001_projectx/episodes/ep120/ep120sq01/120sh020/publish/plates/platesMain/v023/prjx_120sh020_platesMain_v023.nk', - 'name': '120sh020_platesMain', - 'handles': 10, - 'handleStart': 10, - 'handleEnd': 10, - "clipIn": 16, - "frameStart": 991, - "frameEnd": 1023, - 'task': 'platesMain', - 'work_dir': 'shots', - 'shot': '120sh020' -}] diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_instances.py b/pype/plugins/ftrack/publish/integrate_ftrack_instances.py index 78583b0a2f..591dcf0dc2 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_instances.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_instances.py @@ -73,9 +73,9 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): ''' start_frame = 0 end_frame = 1 - if 'endFrameReview' in comp and 'startFrameReview' in comp: + if 'frameEndFtrack' in comp and 'frameStartFtrack' in comp: end_frame += ( - comp['endFrameReview'] - comp['startFrameReview'] + comp['frameEndFtrack'] - comp['frameStartFtrack'] ) else: end_frame += ( @@ -127,7 +127,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): # Add custom attributes for AssetVersion assetversion_cust_attrs = {} - intent_val = instance.context.data.get("intent") + intent_val = instance.context.data.get("intent", {}).get("value") if intent_val: assetversion_cust_attrs["intent"] = intent_val diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_note.py b/pype/plugins/ftrack/publish/integrate_ftrack_note.py index 2621ca96ab..679010ca58 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_note.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_note.py @@ -1,4 +1,5 @@ import sys +import json import pyblish.api import six @@ -18,6 +19,48 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): # - note label must exist in Ftrack note_labels = [] + def get_intent_label(self, session, intent_value): + if not intent_value: + return + + intent_configurations = session.query( + "CustomAttributeConfiguration where key is intent" + ).all() + if not intent_configurations: + return + + intent_configuration = intent_configurations[0] + if len(intent_configuration) > 1: + self.log.warning(( + "Found more than one `intent` custom attribute." + " Using first found." + )) + + config = intent_configuration.get("config") + if not config: + return + + configuration = json.loads(config) + items = configuration.get("data") + if not items: + return + + if sys.version_info[0] < 3: + string_type = basestring + else: + string_type = str + + if isinstance(items, string_type): + items = json.loads(items) + + intent_label = None + for item in items: + if item["value"] == intent_value: + intent_label = item["menu"] + break + + return intent_label + def process(self, instance): comment = (instance.context.data.get("comment") or "").strip() if not comment: @@ -26,17 +69,34 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): self.log.debug("Comment is set to `{}`".format(comment)) - intent = instance.context.data.get("intent") - if intent: - msg = "Intent is set to `{}` and was added to comment.".format( - intent - ) + session = instance.context.data["ftrackSession"] + + intent_val = instance.context.data.get("intent", {}).get("value") + intent_label = instance.context.data.get("intent", {}).get("label") + final_label = None + if intent_val: + final_label = self.get_intent_label(session, intent_val) + if final_label is None: + final_label = intent_label + + # if intent label is set then format comment + # - it is possible that intent_label is equal to "" (empty string) + if final_label: + msg = "Intent label is set to `{}`.".format(final_label) comment = self.note_with_intent_template.format(**{ - "intent": intent, + "intent": final_label, "comment": comment }) + + elif intent_val: + msg = ( + "Intent is set to `{}` and was not added" + " to comment because label is set to `{}`." + ).format(intent_val, final_label) + else: msg = "Intent is not set." + self.log.debug(msg) asset_versions_key = "ftrackIntegratedAssetVersions" @@ -45,7 +105,6 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): self.log.info("There are any integrated AssetVersions") return - session = instance.context.data["ftrackSession"] user = session.query( "User where username is \"{}\"".format(session.api_user) ).first() diff --git a/pype/plugins/global/publish/collect_anatomy.py b/pype/plugins/global/publish/collect_anatomy.py index ae83e39513..73ae3bb024 100644 --- a/pype/plugins/global/publish/collect_anatomy.py +++ b/pype/plugins/global/publish/collect_anatomy.py @@ -6,10 +6,6 @@ Requires: username -> collect_pype_user *(pyblish.api.CollectorOrder + 0.001) datetimeData -> collect_datetime_data *(pyblish.api.CollectorOrder) -Optional: - comment -> collect_comment *(pyblish.api.CollectorOrder) - intent -> collected in pyblish-lite - Provides: context -> anatomy (pypeapp.Anatomy) context -> anatomyData diff --git a/pype/plugins/global/publish/collect_avalon_entities.py b/pype/plugins/global/publish/collect_avalon_entities.py index 20899361c5..103f5abd1a 100644 --- a/pype/plugins/global/publish/collect_avalon_entities.py +++ b/pype/plugins/global/publish/collect_avalon_entities.py @@ -47,6 +47,7 @@ class CollectAvalonEntities(pyblish.api.ContextPlugin): context.data["assetEntity"] = asset_entity data = asset_entity['data'] - context.data['handles'] = int(data.get("handles", 0)) - context.data["handleStart"] = int(data.get("handleStart", 0)) - context.data["handleEnd"] = int(data.get("handleEnd", 0)) + handles = int(data.get("handles") or 0) + context.data["handles"] = handles + context.data["handleStart"] = int(data.get("handleStart", handles)) + context.data["handleEnd"] = int(data.get("handleEnd", handles)) diff --git a/pype/plugins/global/publish/collect_rendered_files.py b/pype/plugins/global/publish/collect_rendered_files.py index 010cf44c15..552fd49f6d 100644 --- a/pype/plugins/global/publish/collect_rendered_files.py +++ b/pype/plugins/global/publish/collect_rendered_files.py @@ -35,7 +35,7 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): def _process_path(self, data): # validate basic necessary data data_err = "invalid json file - missing data" - required = ["asset", "user", "intent", "comment", + required = ["asset", "user", "comment", "job", "instances", "session", "version"] assert all(elem in data.keys() for elem in required), data_err diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index faecbb47a7..086a1fdfb2 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -35,23 +35,29 @@ class ExtractBurnin(pype.api.Extractor): context_data.get("handleStart")) handle_end = instance.data.get("handleEnd", context_data.get("handleEnd")) - duration = frame_end - frame_start + 1 + + frame_start_handle = frame_start - handle_start + frame_end_handle = frame_end + handle_end + duration = frame_end_handle - frame_start_handle + 1 prep_data = copy.deepcopy(instance.data["anatomyData"]) if "slate.farm" in instance.data["families"]: - frame_start += 1 + frame_start_handle += 1 duration -= 1 prep_data.update({ - "frame_start": frame_start, - "frame_end": frame_end, + "frame_start": frame_start_handle, + "frame_end": frame_end_handle, "duration": duration, "version": int(version), - "comment": instance.context.data.get("comment", ""), - "intent": instance.context.data.get("intent", "") + "comment": instance.context.data.get("comment", "") }) + intent = instance.context.data.get("intent", {}).get("label") + if intent: + prep_data["intent"] = intent + # get anatomy project anatomy = instance.context.data['anatomy'] @@ -99,13 +105,13 @@ class ExtractBurnin(pype.api.Extractor): _prep_data["anatomy"] = filled_anatomy.get_solved() # copy frame range variables - frame_start_cp = frame_start - frame_end_cp = frame_end + frame_start_cp = frame_start_handle + frame_end_cp = frame_end_handle duration_cp = duration if no_handles: - frame_start_cp = frame_start + handle_start - frame_end_cp = frame_end - handle_end + frame_start_cp = frame_start + frame_end_cp = frame_end duration_cp = frame_end_cp - frame_start_cp + 1 _prep_data.update({ "frame_start": frame_start_cp, diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 23e582edd2..c8a8510fb2 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -24,10 +24,10 @@ class ExtractReview(pyblish.api.InstancePlugin): outputs = {} ext_filter = [] + to_width = 1920 + to_height = 1080 def process(self, instance): - to_width = 1920 - to_height = 1080 output_profiles = self.outputs or {} @@ -41,8 +41,8 @@ class ExtractReview(pyblish.api.InstancePlugin): handle_end = inst_data.get("handleEnd", context_data.get("handleEnd")) pixel_aspect = inst_data.get("pixelAspect", 1) - resolution_width = inst_data.get("resolutionWidth", to_width) - resolution_height = inst_data.get("resolutionHeight", to_height) + resolution_width = inst_data.get("resolutionWidth", self.to_width) + resolution_height = inst_data.get("resolutionHeight", self.to_height) self.log.debug("Families In: `{}`".format(inst_data["families"])) self.log.debug("__ frame_start: {}".format(frame_start)) self.log.debug("__ frame_end: {}".format(frame_end)) @@ -166,30 +166,33 @@ class ExtractReview(pyblish.api.InstancePlugin): # necessary input data # adds start arg only if image sequence + + frame_start_handle = frame_start - handle_start + frame_end_handle = frame_end + handle_end if isinstance(repre["files"], list): - if frame_start != repre.get("detectedStart", frame_start): - frame_start = repre.get("detectedStart") + if frame_start_handle != repre.get("detectedStart", frame_start_handle): + frame_start_handle = repre.get("detectedStart") # exclude handle if no handles defined if no_handles: - frame_start_no_handles = frame_start + handle_start - frame_end_no_handles = frame_end - handle_end + frame_start_handle = frame_start + frame_end_handle = frame_end input_args.append( "-start_number {0} -framerate {1}".format( - frame_start, fps)) + frame_start_handle, fps)) else: if no_handles: start_sec = float(handle_start) / fps input_args.append("-ss {:0.2f}".format(start_sec)) - frame_start_no_handles = frame_start + handle_start - frame_end_no_handles = frame_end - handle_end + frame_start_handle = frame_start + frame_end_handle = frame_end input_args.append("-i {}".format(full_input_path)) for audio in instance.data.get("audio", []): offset_frames = ( - instance.data.get("startFrameReview") - + instance.data.get("frameStartFtrack") - audio["offset"] ) offset_seconds = offset_frames / fps @@ -223,30 +226,42 @@ class ExtractReview(pyblish.api.InstancePlugin): output_args.extend(profile.get('output', [])) # defining image ratios - resolution_ratio = float(resolution_width / ( - resolution_height * pixel_aspect)) - delivery_ratio = float(to_width) / float(to_height) - self.log.debug(resolution_ratio) - self.log.debug(delivery_ratio) + resolution_ratio = (float(resolution_width) * pixel_aspect) / resolution_height + delivery_ratio = float(self.to_width) / float(self.to_height) + self.log.debug( + "__ resolution_ratio: `{}`".format(resolution_ratio)) + self.log.debug( + "__ delivery_ratio: `{}`".format(delivery_ratio)) # get scale factor - scale_factor = to_height / ( + scale_factor = float(self.to_height) / ( resolution_height * pixel_aspect) - self.log.debug(scale_factor) + + # shorten two decimals long float number for testing conditions + resolution_ratio_test = float( + "{:0.2f}".format(resolution_ratio)) + delivery_ratio_test = float( + "{:0.2f}".format(delivery_ratio)) + + if resolution_ratio_test < delivery_ratio_test: + scale_factor = float(self.to_width) / ( + resolution_width * pixel_aspect) + + self.log.debug("__ scale_factor: `{}`".format(scale_factor)) # letter_box lb = profile.get('letter_box', 0) if lb != 0: - ffmpet_width = to_width - ffmpet_height = to_height + ffmpeg_width = self.to_width + ffmpeg_height = self.to_height if "reformat" not in p_tags: lb /= pixel_aspect - if resolution_ratio != delivery_ratio: - ffmpet_width = resolution_width - ffmpet_height = int( + if resolution_ratio_test != delivery_ratio_test: + ffmpeg_width = resolution_width + ffmpeg_height = int( resolution_height * pixel_aspect) else: - if resolution_ratio != delivery_ratio: + if resolution_ratio_test != delivery_ratio_test: lb /= scale_factor else: lb /= pixel_aspect @@ -258,16 +273,14 @@ class ExtractReview(pyblish.api.InstancePlugin): "c=black,drawbox=0:ih-round((ih-(iw*(" "1/{2})))/2):iw:round((ih-(iw*(1/{2})))" "/2):t=fill:c=black").format( - ffmpet_width, ffmpet_height, lb)) + ffmpeg_width, ffmpeg_height, lb)) # In case audio is longer than video. output_args.append("-shortest") if no_handles: - duration_sec = float( - (frame_end - ( - frame_start + handle_start - ) + 1) - handle_end) / fps + duration_sec = float(frame_end_handle - frame_start_handle + 1) / fps + output_args.append("-t {:0.2f}".format(duration_sec)) # output filename @@ -284,24 +297,26 @@ class ExtractReview(pyblish.api.InstancePlugin): # scaling none square pixels and 1920 width if "reformat" in p_tags: - if resolution_ratio < delivery_ratio: + if resolution_ratio_test < delivery_ratio_test: self.log.debug("lower then delivery") - width_scale = int(to_width * scale_factor) + width_scale = int(self.to_width * scale_factor) width_half_pad = int(( - to_width - width_scale)/2) - height_scale = to_height + self.to_width - width_scale)/2) + height_scale = self.to_height height_half_pad = 0 else: self.log.debug("heigher then delivery") - width_scale = to_width + width_scale = self.to_width width_half_pad = 0 - scale_factor = float(to_width) / float( - resolution_width) - self.log.debug(scale_factor) + scale_factor = float(self.to_width) / (float( + resolution_width) * pixel_aspect) + self.log.debug( + "__ scale_factor: `{}`".format( + scale_factor)) height_scale = int( resolution_height * scale_factor) height_half_pad = int( - (to_height - height_scale)/2) + (self.to_height - height_scale)/2) self.log.debug( "__ width_scale: `{}`".format(width_scale)) @@ -319,7 +334,7 @@ class ExtractReview(pyblish.api.InstancePlugin): "scale={0}x{1}:flags=lanczos," "pad={2}:{3}:{4}:{5}:black,setsar=1" ).format(width_scale, height_scale, - to_width, to_height, + self.to_width, self.to_height, width_half_pad, height_half_pad ) @@ -383,7 +398,9 @@ class ExtractReview(pyblish.api.InstancePlugin): "codec": codec_args, "_profile": profile, "resolutionHeight": resolution_height, - "resolutionWidth": resolution_width + "resolutionWidth": resolution_width, + "frameStartFtrack": frame_start_handle, + "frameEndFtrack": frame_end_handle }) if is_sequence: repre_new.update({ @@ -393,8 +410,8 @@ class ExtractReview(pyblish.api.InstancePlugin): if no_handles: repre_new.update({ "outputName": name + "_noHandles", - "startFrameReview": frame_start_no_handles, - "endFrameReview": frame_end_no_handles + "frameStartFtrack": frame_start, + "frameEndFtrack": frame_end }) if repre_new.get('preview'): repre_new.pop("preview") @@ -409,6 +426,11 @@ class ExtractReview(pyblish.api.InstancePlugin): if "delete" in repre.get("tags", []): representations_new.remove(repre) + instance.data.update({ + "reviewToWidth": self.to_width, + "reviewToHeight": self.to_height + }) + self.log.debug( "new representations: {}".format(representations_new)) instance.data["representations"] = representations_new diff --git a/pype/plugins/global/publish/extract_review_slate.py b/pype/plugins/global/publish/extract_review_slate.py index 699ed4a5eb..8c33a0d853 100644 --- a/pype/plugins/global/publish/extract_review_slate.py +++ b/pype/plugins/global/publish/extract_review_slate.py @@ -24,24 +24,36 @@ class ExtractReviewSlate(pype.api.Extractor): slate_path = inst_data.get("slateFrame") ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") - to_width = 1920 - to_height = 1080 + # values are set in ExtractReview + to_width = inst_data["reviewToWidth"] + to_height = inst_data["reviewToHeight"] + resolution_width = inst_data.get("resolutionWidth", to_width) resolution_height = inst_data.get("resolutionHeight", to_height) pixel_aspect = inst_data.get("pixelAspect", 1) fps = inst_data.get("fps") # defining image ratios - resolution_ratio = float(resolution_width / ( - resolution_height * pixel_aspect)) + resolution_ratio = (float(resolution_width) * pixel_aspect) / resolution_height delivery_ratio = float(to_width) / float(to_height) - self.log.debug(resolution_ratio) - self.log.debug(delivery_ratio) + self.log.debug("__ resolution_ratio: `{}`".format(resolution_ratio)) + self.log.debug("__ delivery_ratio: `{}`".format(delivery_ratio)) # get scale factor - scale_factor = to_height / ( + scale_factor = float(to_height) / ( resolution_height * pixel_aspect) - self.log.debug(scale_factor) + + # shorten two decimals long float number for testing conditions + resolution_ratio_test = float( + "{:0.2f}".format(resolution_ratio)) + delivery_ratio_test = float( + "{:0.2f}".format(delivery_ratio)) + + if resolution_ratio_test < delivery_ratio_test: + scale_factor = float(to_width) / ( + resolution_width * pixel_aspect) + + self.log.debug("__ scale_factor: `{}`".format(scale_factor)) for i, repre in enumerate(inst_data["representations"]): _remove_at_end = [] @@ -95,7 +107,7 @@ class ExtractReviewSlate(pype.api.Extractor): # scaling none square pixels and 1920 width if "reformat" in p_tags: - if resolution_ratio < delivery_ratio: + if resolution_ratio_test < delivery_ratio_test: self.log.debug("lower then delivery") width_scale = int(to_width * scale_factor) width_half_pad = int(( @@ -106,7 +118,8 @@ class ExtractReviewSlate(pype.api.Extractor): self.log.debug("heigher then delivery") width_scale = to_width width_half_pad = 0 - scale_factor = float(to_width) / float(resolution_width) + scale_factor = float(to_width) / (float( + resolution_width) * pixel_aspect) self.log.debug(scale_factor) height_scale = int( resolution_height * scale_factor) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 3ad7805fe7..dcf19ae32c 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -238,8 +238,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): ) i += 1 - # Avoid copied pools and remove secondary pool - payload["JobInfo"]["Pool"] = "none" + # remove secondary pool payload["JobInfo"].pop("SecondaryPool", None) self.log.info("Submitting Deadline job ...") @@ -355,8 +354,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): aov) staging = os.path.dirname(list(cols[0])[0]) - start = int(instance_data.get("frameStart")) - end = int(instance_data.get("frameEnd")) self.log.info("Creating data for: {}".format(subset_name)) @@ -377,8 +374,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "name": ext, "ext": ext, "files": [os.path.basename(f) for f in list(cols[0])], - "frameStart": start, - "frameEnd": end, + "frameStart": int(instance_data.get("frameStartHandle")), + "frameEnd": int(instance_data.get("frameEndHandle")), # If expectedFile are absolute, we need only filenames "stagingDir": staging, "anatomy_template": "render", @@ -413,8 +410,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): """ representations = [] - start = int(instance.get("frameStart")) - end = int(instance.get("frameEnd")) cols, rem = clique.assemble(exp_files) bake_render_path = instance.get("bakeRenderPath") @@ -442,8 +437,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "name": ext, "ext": ext, "files": [os.path.basename(f) for f in list(c)], - "frameStart": start, - "frameEnd": end, + "frameStart": int(instance.get("frameStartHandle")), + "frameEnd": int(instance.get("frameEndHandle")), # If expectedFile are absolute, we need only filenames "stagingDir": os.path.dirname(list(c)[0]), "anatomy_template": "render", @@ -577,6 +572,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "frameEnd": end, "handleStart": handle_start, "handleEnd": handle_end, + "frameStartHandle": start - handle_start, + "frameEndHandle": end + handle_end, "fps": fps, "source": source, "extendFrames": data.get("extendFrames"), diff --git a/pype/plugins/maya/load/load_reference.py b/pype/plugins/maya/load/load_reference.py index cbd1da7cbd..797933300c 100644 --- a/pype/plugins/maya/load/load_reference.py +++ b/pype/plugins/maya/load/load_reference.py @@ -24,7 +24,7 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader): icon = "code-fork" color = "orange" - def process_reference(self, context, name, namespace, data): + def process_reference(self, context, name, namespace, options): import maya.cmds as cmds from avalon import maya import pymel.core as pm @@ -101,16 +101,18 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader): cmds.setAttr(groupName + ".selectHandleY", cy) cmds.setAttr(groupName + ".selectHandleZ", cz) - if data.get("post_process", True): - if family == "rig": - self._post_process_rig(name, namespace, context, data) + if family == "rig": + self._post_process_rig(name, namespace, context, options) + else: + if "translate" in options: + cmds.setAttr(groupName + ".t", *options["translate"]) return newNodes def switch(self, container, representation): self.update(container, representation) - def _post_process_rig(self, name, namespace, context, data): + def _post_process_rig(self, name, namespace, context, options): output = next((node for node in self if node.endswith("out_SET")), None) diff --git a/pype/plugins/maya/publish/collect_instances.py b/pype/plugins/maya/publish/collect_instances.py index 39d7bcd86d..5af717ba4d 100644 --- a/pype/plugins/maya/publish/collect_instances.py +++ b/pype/plugins/maya/publish/collect_instances.py @@ -103,16 +103,22 @@ class CollectInstances(pyblish.api.ContextPlugin): # Store the exact members of the object set instance.data["setMembers"] = members - # Define nice label name = cmds.ls(objset, long=False)[0] # use short name label = "{0} ({1})".format(name, data["asset"]) + if "handles" in data: + data["handleStart"] = data["handles"] + data["handleEnd"] = data["handles"] + # Append start frame and end frame to label if present if "frameStart" and "frameEnd" in data: - label += " [{0}-{1}]".format(int(data["frameStart"]), - int(data["frameEnd"])) + data["frameStartHandle"] = data["frameStart"] - data["handleStart"] + data["frameEndHandle"] = data["frameEnd"] + data["handleEnd"] + + label += " [{0}-{1}]".format(int(data["frameStartHandle"]), + int(data["frameEndHandle"])) instance.data["label"] = label @@ -122,7 +128,6 @@ class CollectInstances(pyblish.api.ContextPlugin): # user interface interested in visualising it. self.log.info("Found: \"%s\" " % instance.data["name"]) self.log.debug("DATA: \"%s\" " % instance.data) - def sort_by_family(instance): """Sort by family""" diff --git a/pype/plugins/maya/publish/collect_render.py b/pype/plugins/maya/publish/collect_render.py index f31198448b..be3878e6bd 100644 --- a/pype/plugins/maya/publish/collect_render.py +++ b/pype/plugins/maya/publish/collect_render.py @@ -211,17 +211,19 @@ class CollectMayaRender(pyblish.api.ContextPlugin): "attachTo": attachTo, "setMembers": layer_name, "publish": True, - "frameStart": int(self.get_render_attribute("startFrame", + "frameStart": int(context.data["assetEntity"]['data']['frameStart']), + "frameEnd": int(context.data["assetEntity"]['data']['frameEnd']), + "frameStartHandle": int(self.get_render_attribute("startFrame", layer=layer_name)), - "frameEnd": int(self.get_render_attribute("endFrame", + "frameEndHandle": int(self.get_render_attribute("endFrame", layer=layer_name)), "byFrameStep": int( self.get_render_attribute("byFrameStep", layer=layer_name)), "renderer": self.get_render_attribute("currentRenderer", layer=layer_name), - "handleStart": context.data["assetEntity"]['data']['handleStart'], - "handleEnd": context.data["assetEntity"]['data']['handleEnd'], + "handleStart": int(context.data["assetEntity"]['data']['handleStart']), + "handleEnd": int(context.data["assetEntity"]['data']['handleEnd']), # instance subset "family": "renderlayer", @@ -236,7 +238,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin): "expectedFiles": full_exp_files, "resolutionWidth": cmds.getAttr("defaultResolution.width"), "resolutionHeight": cmds.getAttr("defaultResolution.height"), - "pixelAspect": cmds.getAttr("defaultResolution.height") + "pixelAspect": cmds.getAttr("defaultResolution.pixelAspect") } # Apply each user defined attribute as data @@ -259,8 +261,8 @@ class CollectMayaRender(pyblish.api.ContextPlugin): # Define nice label label = "{0} ({1})".format(expected_layer_name, data["asset"]) - label += " [{0}-{1}]".format(int(data["frameStart"]), - int(data["frameEnd"])) + label += " [{0}-{1}]".format(int(data["frameStartHandle"]), + int(data["frameEndHandle"])) instance = context.create_instance(expected_layer_name) instance.data["label"] = label diff --git a/pype/plugins/maya/publish/collect_review.py b/pype/plugins/maya/publish/collect_review.py index 18eee78a9c..9b6027b98d 100644 --- a/pype/plugins/maya/publish/collect_review.py +++ b/pype/plugins/maya/publish/collect_review.py @@ -54,8 +54,10 @@ class CollectReview(pyblish.api.InstancePlugin): self.log.debug('adding review family to {}'.format(reviewable_subset)) data['review_camera'] = camera # data["publish"] = False - data['startFrameReview'] = instance.data["frameStart"] - data['endFrameReview'] = instance.data["frameEnd"] + data['frameStartFtrack'] = instance.data["frameStartHandle"] + data['frameEndFtrack'] = instance.data["frameEndHandle"] + data['frameStartHandle'] = instance.data["frameStartHandle"] + data['frameEndHandle'] = instance.data["frameEndHandle"] data["frameStart"] = instance.data["frameStart"] data["frameEnd"] = instance.data["frameEnd"] data['handles'] = instance.data['handles'] @@ -69,8 +71,8 @@ class CollectReview(pyblish.api.InstancePlugin): else: instance.data['subset'] = task + 'Review' instance.data['review_camera'] = camera - instance.data['startFrameReview'] = instance.data["frameStart"] - instance.data['endFrameReview'] = instance.data["frameEnd"] + instance.data['frameStartFtrack'] = instance.data["frameStartHandle"] + instance.data['frameEndFtrack'] = instance.data["frameEndHandle"] # make ftrack publishable instance.data["families"] = ['ftrack'] diff --git a/pype/plugins/maya/publish/extract_quicktime.py b/pype/plugins/maya/publish/extract_quicktime.py index 94b5a716a2..29d6b78051 100644 --- a/pype/plugins/maya/publish/extract_quicktime.py +++ b/pype/plugins/maya/publish/extract_quicktime.py @@ -33,17 +33,13 @@ class ExtractQuicktime(pype.api.Extractor): # if start and end frames cannot be determined, get them # from Maya timeline - start = instance.data.get("startFrameReview") - end = instance.data.get("endFrameReview") + start = instance.data.get("frameStartFtrack") + end = instance.data.get("frameEndFtrack") if start is None: start = cmds.playbackOptions(query=True, animationStartTime=True) if end is None: end = cmds.playbackOptions(query=True, animationEndTime=True) self.log.info("start: {}, end: {}".format(start, end)) - handles = instance.data.get("handles", 0) - if handles: - start -= handles - end += handles # get cameras camera = instance.data['review_camera'] diff --git a/pype/plugins/maya/publish/submit_maya_deadline.py b/pype/plugins/maya/publish/submit_maya_deadline.py index bd8497152e..7547f34ba1 100644 --- a/pype/plugins/maya/publish/submit_maya_deadline.py +++ b/pype/plugins/maya/publish/submit_maya_deadline.py @@ -234,8 +234,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): "Plugin": instance.data.get("mayaRenderPlugin", "MayaBatch"), "Frames": "{start}-{end}x{step}".format( - start=int(instance.data["frameStart"]), - end=int(instance.data["frameEnd"]), + start=int(instance.data["frameStartHandle"]), + end=int(instance.data["frameEndHandle"]), step=int(instance.data["byFrameStep"]), ), @@ -340,7 +340,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): def preflight_check(self, instance): """Ensure the startFrame, endFrame and byFrameStep are integers""" - for key in ("frameStart", "frameEnd", "byFrameStep"): + for key in ("frameStartHandle", "frameEndHandle", "byFrameStep"): value = instance.data[key] if int(value) == value: diff --git a/pype/plugins/nuke/publish/collect_workfile.py b/pype/plugins/nuke/publish/collect_workfile.py index 9c01a3ec97..b95edf0a93 100644 --- a/pype/plugins/nuke/publish/collect_workfile.py +++ b/pype/plugins/nuke/publish/collect_workfile.py @@ -23,11 +23,12 @@ class CollectWorkfile(pyblish.api.ContextPlugin): add_publish_knob(root) family = "workfile" + task = os.getenv("AVALON_TASK", None) # creating instances per write node file_path = context.data["currentFile"] staging_dir = os.path.dirname(file_path) base_name = os.path.basename(file_path) - subset = "{0}_{1}".format(os.getenv("AVALON_TASK", None), family) + subset = family + task.capitalize() # Get frame range first_frame = int(root["first_frame"].getValue()) diff --git a/pype/plugins/nuke/publish/collect_writes.py b/pype/plugins/nuke/publish/collect_writes.py index 993b8574f5..0dc7c81fae 100644 --- a/pype/plugins/nuke/publish/collect_writes.py +++ b/pype/plugins/nuke/publish/collect_writes.py @@ -36,7 +36,6 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): output_type = "mov" # Get frame range - handles = instance.context.data['handles'] handle_start = instance.context.data["handleStart"] handle_end = instance.context.data["handleEnd"] first_frame = int(nuke.root()["first_frame"].getValue()) @@ -46,7 +45,6 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): ) if node["use_limit"].getValue(): - handles = 0 first_frame = int(node["first"].getValue()) last_frame = int(node["last"].getValue()) @@ -134,8 +132,10 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): "label": label, "handleStart": handle_start, "handleEnd": handle_end, - "frameStart": first_frame, - "frameEnd": last_frame, + "frameStart": first_frame + handle_start, + "frameEnd": last_frame - handle_end, + "frameStartHandle": first_frame, + "frameEndHandle": last_frame, "outputType": output_type, "family": "write", "families": families, diff --git a/pype/plugins/nuke/publish/extract_render_local.py b/pype/plugins/nuke/publish/extract_render_local.py index 9b8baa468b..5467d239c2 100644 --- a/pype/plugins/nuke/publish/extract_render_local.py +++ b/pype/plugins/nuke/publish/extract_render_local.py @@ -27,13 +27,13 @@ class NukeRenderLocal(pype.api.Extractor): self.log.debug("instance collected: {}".format(instance.data)) - first_frame = instance.data.get("frameStart", None) + first_frame = instance.data.get("frameStartHandle", None) # exception for slate workflow if "slate" in instance.data["families"]: first_frame -= 1 - last_frame = instance.data.get("frameEnd", None) + last_frame = instance.data.get("frameEndHandle", None) node_subset_name = instance.data.get("name", None) self.log.info("Starting render") diff --git a/pype/plugins/nuke/publish/extract_slate_frame.py b/pype/plugins/nuke/publish/extract_slate_frame.py index 488f9bd31d..369cbe0496 100644 --- a/pype/plugins/nuke/publish/extract_slate_frame.py +++ b/pype/plugins/nuke/publish/extract_slate_frame.py @@ -77,7 +77,7 @@ class ExtractSlateFrame(pype.api.Extractor): else: fname = os.path.basename(instance.data.get("path", None)) fhead = os.path.splitext(fname)[0] + "." - first_frame = instance.data.get("frameStart", None) - 1 + first_frame = instance.data.get("frameStartHandle", None) - 1 last_frame = first_frame if "#" in fhead: @@ -157,7 +157,7 @@ class ExtractSlateFrame(pype.api.Extractor): return comment = instance.context.data.get("comment") - intent = instance.context.data.get("intent") + intent = instance.context.data.get("intent", {}).get("value", "") try: node["f_submission_note"].setValue(comment) diff --git a/pype/plugins/nuke/publish/submit_nuke_deadline.py b/pype/plugins/nuke/publish/submit_nuke_deadline.py index ee7432e241..0a9ef33398 100644 --- a/pype/plugins/nuke/publish/submit_nuke_deadline.py +++ b/pype/plugins/nuke/publish/submit_nuke_deadline.py @@ -41,8 +41,8 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): self._ver = re.search(r"\d+\.\d+", context.data.get("hostVersion")) self._deadline_user = context.data.get( "deadlineUser", getpass.getuser()) - self._frame_start = int(instance.data["frameStart"]) - self._frame_end = int(instance.data["frameEnd"]) + self._frame_start = int(instance.data["frameStartHandle"]) + self._frame_end = int(instance.data["frameEndHandle"]) # get output path render_path = instance.data['path'] diff --git a/pype/plugins/nuke/publish/validate_rendered_frames.py b/pype/plugins/nuke/publish/validate_rendered_frames.py index 8a8bf3cc5e..6e9b91dd72 100644 --- a/pype/plugins/nuke/publish/validate_rendered_frames.py +++ b/pype/plugins/nuke/publish/validate_rendered_frames.py @@ -51,7 +51,7 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin): collection = collections[0] frame_length = int( - instance.data["frameEnd"] - instance.data["frameStart"] + 1 + instance.data["frameEndHandle"] - instance.data["frameStartHandle"] + 1 ) if frame_length != 1: diff --git a/pype/plugins/nukestudio/load/load_sequences_to_timeline_asset_origin.py b/pype/plugins/nukestudio/load/load_sequences_to_timeline_asset_origin.py new file mode 100644 index 0000000000..2ee2409b86 --- /dev/null +++ b/pype/plugins/nukestudio/load/load_sequences_to_timeline_asset_origin.py @@ -0,0 +1,49 @@ +from avalon import api +import hiero +from pype.nukestudio import lib +reload(lib) + + +class LoadSequencesToTimelineAssetOrigin(api.Loader): + """Load image sequence into Hiero timeline + + Place clip to timeline on its asset origin timings collected + during conforming to project + """ + + families = ["render2d", "source", "plate", "render"] + representations = ["exr", "dpx", "jpg", "jpeg", "png"] + + label = "Load to timeline with shot origin timing" + order = -10 + icon = "code-fork" + color = "orange" + + def load(self, context, name, namespace, data): + + data.update({ + # "projectBinPath": "Loaded", + "hieroWorkfileName": hiero.ui.activeProject().name() + }) + + self.log.debug("_ context: `{}`".format(context)) + self.log.debug("_ representation._id: `{}`".format( + context["representation"]["_id"])) + + clip_loader = lib.ClipLoader(self, context, **data) + clip_loader.load() + + self.log.info("Loader done: `{}`".format(name)) + + def switch(self, container, representation): + self.update(container, representation) + + def update(self, container, representation): + """ Updating previously loaded clips + """ + pass + + def remove(self, container): + """ Removing previously loaded clips + """ + pass diff --git a/pype/plugins/nukestudio/publish/collect_clips.py b/pype/plugins/nukestudio/publish/collect_clips.py index b8654b0784..6a1dad9a6d 100644 --- a/pype/plugins/nukestudio/publish/collect_clips.py +++ b/pype/plugins/nukestudio/publish/collect_clips.py @@ -18,7 +18,6 @@ class CollectClips(api.ContextPlugin): context.data["assetsShared"] = dict() projectdata = context.data["projectEntity"]["data"] - version = context.data.get("version", "001") sequence = context.data.get("activeSequence") selection = context.data.get("selection") @@ -108,8 +107,7 @@ class CollectClips(api.ContextPlugin): "family": "clip", "families": [], "handleStart": projectdata.get("handleStart", 0), - "handleEnd": projectdata.get("handleEnd", 0), - "version": int(version)}) + "handleEnd": projectdata.get("handleEnd", 0)}) instance = context.create_instance(**data) diff --git a/pype/plugins/nukestudio/publish/collect_instance_version.py b/pype/plugins/nukestudio/publish/collect_instance_version.py new file mode 100644 index 0000000000..b79ccbdf54 --- /dev/null +++ b/pype/plugins/nukestudio/publish/collect_instance_version.py @@ -0,0 +1,18 @@ +from pyblish import api + + +class CollectInstanceVersion(api.InstancePlugin): + """ Collecting versions of Hiero project into instances + + If activated then any subset version is created in + version of the actual project. + """ + + order = api.CollectorOrder + 0.011 + label = "Collect Instance Version" + + def process(self, instance): + version = instance.context.data.get("version", "001") + instance.data.update({ + "version": int(version) + }) diff --git a/pype/plugins/nukestudio/publish/collect_plates.py b/pype/plugins/nukestudio/publish/collect_plates.py index d08f69d4bb..4ed281f0ee 100644 --- a/pype/plugins/nukestudio/publish/collect_plates.py +++ b/pype/plugins/nukestudio/publish/collect_plates.py @@ -126,7 +126,7 @@ class CollectPlatesData(api.InstancePlugin): transfer_data = [ "handleStart", "handleEnd", "sourceIn", "sourceOut", "frameStart", "frameEnd", "sourceInH", "sourceOutH", "clipIn", "clipOut", - "clipInH", "clipOutH", "asset", "track", "version", "resolutionWidth", "resolutionHeight", "pixelAspect", "fps" + "clipInH", "clipOutH", "asset", "track", "resolutionWidth", "resolutionHeight", "pixelAspect", "fps" ] # pass data to version @@ -141,6 +141,13 @@ class CollectPlatesData(api.InstancePlugin): "fps": instance.context.data["fps"] }) + version = instance.data.get("version") + if version: + version_data.update({ + "version": version + }) + + try: basename, ext = os.path.splitext(source_file) head, padding = os.path.splitext(basename) diff --git a/pype/plugins/nukestudio/publish/collect_reviews.py b/pype/plugins/nukestudio/publish/collect_reviews.py index af8fd4a0e7..7cf8d77de4 100644 --- a/pype/plugins/nukestudio/publish/collect_reviews.py +++ b/pype/plugins/nukestudio/publish/collect_reviews.py @@ -78,6 +78,8 @@ class CollectReviews(api.InstancePlugin): file_dir = os.path.dirname(file_path) file = os.path.basename(file_path) ext = os.path.splitext(file)[-1][1:] + handleStart = rev_inst.data.get("handleStart") + handleEnd = rev_inst.data.get("handleEnd") # change label instance.data["label"] = "{0} - {1} - ({2}) - review".format( @@ -86,13 +88,14 @@ class CollectReviews(api.InstancePlugin): self.log.debug("Instance review: {}".format(rev_inst.data["name"])) - # adding representation for review mov representation = { "files": file, "stagingDir": file_dir, "frameStart": rev_inst.data.get("sourceIn"), "frameEnd": rev_inst.data.get("sourceOut"), + "frameStartFtrack": rev_inst.data.get("sourceIn") - handleStart, + "frameEndFtrack": rev_inst.data.get("sourceOut") + handleEnd, "step": 1, "fps": rev_inst.data.get("fps"), "preview": True, diff --git a/pype/plugins/standalonepublisher/publish/extract_review.py b/pype/plugins/standalonepublisher/publish/extract_review.py index 66cdcdf4df..36793d4c62 100644 --- a/pype/plugins/standalonepublisher/publish/extract_review.py +++ b/pype/plugins/standalonepublisher/publish/extract_review.py @@ -170,8 +170,8 @@ class ExtractReviewSP(pyblish.api.InstancePlugin): "stagingDir": out_stagigng_dir, "tags": new_tags, "outputName": name, - "startFrameReview": 1, - "endFrameReview": video_len + "frameStartFtrack": 1, + "frameEndFtrack": video_len }) # cleanup thumbnail from new repre if repre_new.get("thumbnail"): diff --git a/res/icons/folder-favorite.png b/res/icons/folder-favorite.png new file mode 100644 index 0000000000..198b289e9e Binary files /dev/null and b/res/icons/folder-favorite.png differ diff --git a/res/icons/folder-favorite2.png b/res/icons/folder-favorite2.png new file mode 100644 index 0000000000..91bc3f0fbe Binary files /dev/null and b/res/icons/folder-favorite2.png differ diff --git a/res/icons/folder-favorite3.png b/res/icons/folder-favorite3.png new file mode 100644 index 0000000000..ce1e6d7171 Binary files /dev/null and b/res/icons/folder-favorite3.png differ diff --git a/setup/nukestudio/hiero_plugin_path/Python/Startup/project_helpers.py b/setup/nukestudio/hiero_plugin_path/Python/Startup/project_helpers.py new file mode 100644 index 0000000000..7e274bd0a3 --- /dev/null +++ b/setup/nukestudio/hiero_plugin_path/Python/Startup/project_helpers.py @@ -0,0 +1,235 @@ +try: + from PySide.QtGui import * + from PySide.QtCore import * +except: + from PySide2.QtGui import * + from PySide2.QtWidgets import * + from PySide2.QtCore import * + +from hiero.core.util import uniquify, version_get, version_set +import hiero.core +import hiero.ui +import nuke + +# A globally variable for storing the current Project +gTrackedActiveProject = None + +# This selection handler will track changes in items selected/deselected in the Bin/Timeline/Spreadsheet Views + + +def __trackActiveProjectHandler(event): + global gTrackedActiveProject + selection = event.sender.selection() + binSelection = selection + if len(binSelection) > 0 and hasattr(binSelection[0], 'project'): + proj = binSelection[0].project() + + # We only store this if its a valid, active User Project + if proj in hiero.core.projects(hiero.core.Project.kUserProjects): + gTrackedActiveProject = proj + + +hiero.core.events.registerInterest( + 'kSelectionChanged/kBin', __trackActiveProjectHandler) +hiero.core.events.registerInterest( + 'kSelectionChanged/kTimeline', __trackActiveProjectHandler) +hiero.core.events.registerInterest( + 'kSelectionChanged/Spreadsheet', __trackActiveProjectHandler) + + +def activeProject(): + """hiero.ui.activeProject() -> returns the current Project + + Note: There is not technically a notion of a 'active' Project in Hiero/NukeStudio, as it is a multi-project App. + This method determines what is 'active' by going down the following rules... + + # 1 - If the current Viewer (hiero.ui.currentViewer) contains a Clip or Sequence, this item is assumed to give the active Project + # 2 - If nothing is currently in the Viewer, look to the active View, determine project from active selection + # 3 - If no current selection can be determined, fall back to a globally tracked last selection from trackActiveProjectHandler + # 4 - If all those rules fail, fall back to the last project in the list of hiero.core.projects() + + @return: hiero.core.Project""" + global gTrackedActiveProject + activeProject = None + + # Case 1 : Look for what the current Viewr tells us - this might not be what we want, and relies on hiero.ui.currentViewer() being robust. + cv = hiero.ui.currentViewer().player().sequence() + if hasattr(cv, 'project'): + activeProject = cv.project() + else: + # Case 2: We can't determine a project from the current Viewer, so try seeing what's selected in the activeView + # Note that currently, if you run activeProject from the Script Editor, the activeView is always None, so this will rarely get used! + activeView = hiero.ui.activeView() + if activeView: + # We can determine an active View.. see what's being worked with + selection = activeView.selection() + + # Handle the case where nothing is selected in the active view + if len(selection) == 0: + # It's possible that there is no selection in a Timeline/Spreadsheet, but these views have 'sequence' method, so try that... + if isinstance(hiero.ui.activeView(), (hiero.ui.TimelineEditor, hiero.ui.SpreadsheetView)): + activeSequence = activeView.sequence() + if hasattr(currentItem, 'project'): + activeProject = activeSequence.project() + + # The active view has a selection... assume that the first item in the selection has the active Project + else: + currentItem = selection[0] + if hasattr(currentItem, 'project'): + activeProject = currentItem.project() + + # Finally, Cases 3 and 4... + if not activeProject: + activeProjects = hiero.core.projects(hiero.core.Project.kUserProjects) + if gTrackedActiveProject in activeProjects: + activeProject = gTrackedActiveProject + else: + activeProject = activeProjects[-1] + + return activeProject + +# Method to get all recent projects + + +def recentProjects(): + """hiero.core.recentProjects() -> Returns a list of paths to recently opened projects + + Hiero stores up to 5 recent projects in uistate.ini with the [recentFile]/# key. + + @return: list of paths to .hrox Projects""" + + appSettings = hiero.core.ApplicationSettings() + recentProjects = [] + for i in range(0, 5): + proj = appSettings.value('recentFile/%i' % i) + if len(proj) > 0: + recentProjects.append(proj) + return recentProjects + +# Method to get recent project by index + + +def recentProject(k=0): + """hiero.core.recentProject(k) -> Returns the recent project path, specified by integer k (0-4) + + @param: k (optional, default = 0) - an integer from 0-4, relating to the index of recent projects. + + @return: hiero.core.Project""" + + appSettings = hiero.core.ApplicationSettings() + proj = appSettings.value('recentFile/%i' % int(k), None) + return proj + +# Method to get open project by index + + +def openRecentProject(k=0): + """hiero.core.openRecentProject(k) -> Opens the most the recent project as listed in the Open Recent list. + + @param: k (optional, default = 0) - an integer from 0-4, relating to the index of recent projects. + @return: hiero.core.Project""" + + appSettings = hiero.core.ApplicationSettings() + proj = appSettings.value('recentFile/%i' % int(k), None) + proj = hiero.core.openProject(proj) + return proj + + +# Duck punch these methods into the relevant ui/core namespaces +hiero.ui.activeProject = activeProject +hiero.core.recentProjects = recentProjects +hiero.core.recentProject = recentProject +hiero.core.openRecentProject = openRecentProject + + +# Method to Save a new Version of the activeHrox Project +class SaveAllProjects(QAction): + + def __init__(self): + QAction.__init__(self, "Save All Projects", None) + self.triggered.connect(self.projectSaveAll) + hiero.core.events.registerInterest( + "kShowContextMenu/kBin", self.eventHandler) + + def projectSaveAll(self): + allProjects = hiero.core.projects() + for proj in allProjects: + try: + proj.save() + print 'Saved Project: %s to: %s ' % (proj.name(), proj.path()) + except: + print 'Unable to save Project: %s to: %s. Check file permissions.' % (proj.name(), proj.path()) + + def eventHandler(self, event): + event.menu.addAction(self) + +# For projects with v# in the path name, saves out a new Project with v#+1 + + +class SaveNewProjectVersion(QAction): + + def __init__(self): + QAction.__init__(self, "Save New Version...", None) + self.triggered.connect(self.saveNewVersion) + hiero.core.events.registerInterest( + "kShowContextMenu/kBin", self.eventHandler) + self.selectedProjects = [] + + def saveNewVersion(self): + if len(self.selectedProjects) > 0: + projects = self.selectedProjects + else: + projects = [hiero.ui.activeProject()] + + if len(projects) < 1: + return + + for proj in projects: + oldName = proj.name() + path = proj.path() + v = None + prefix = None + try: + (prefix, v) = version_get(path, 'v') + except ValueError, msg: + print msg + + if (prefix is not None) and (v is not None): + v = int(v) + newPath = version_set(path, prefix, v, v + 1) + try: + proj.saveAs(newPath) + print 'Saved new project version: %s to: %s ' % (oldName, newPath) + except: + print 'Unable to save Project: %s. Check file permissions.' % (oldName) + else: + newPath = path.replace(".hrox", "_v01.hrox") + answer = nuke.ask( + '%s does not contain a version number.\nDo you want to save as %s?' % (proj, newPath)) + if answer: + try: + proj.saveAs(newPath) + print 'Saved new project version: %s to: %s ' % (oldName, newPath) + except: + print 'Unable to save Project: %s. Check file permissions.' % (oldName) + + def eventHandler(self, event): + self.selectedProjects = [] + if hasattr(event.sender, 'selection') and event.sender.selection() is not None and len(event.sender.selection()) != 0: + selection = event.sender.selection() + self.selectedProjects = uniquify( + [item.project() for item in selection]) + event.menu.addAction(self) + + +# Instantiate the actions +saveAllAct = SaveAllProjects() +saveNewAct = SaveNewProjectVersion() + +fileMenu = hiero.ui.findMenuAction("foundry.menu.file") +importAct = hiero.ui.findMenuAction("foundry.project.importFiles") +hiero.ui.insertMenuAction(saveNewAct, fileMenu.menu(), + before="Import File(s)...") +hiero.ui.insertMenuAction(saveAllAct, fileMenu.menu(), + before="Import File(s)...") +fileMenu.menu().insertSeparator(importAct)