From 5849a5eeeda1c0582dd5a2ffbd5e576f065e989d Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Wed, 10 Jul 2019 16:46:09 +0100 Subject: [PATCH 01/55] Support multiple items in selection. --- pype/ftrack/actions/action_rv.py | 562 ++++++++++--------------------- 1 file changed, 183 insertions(+), 379 deletions(-) diff --git a/pype/ftrack/actions/action_rv.py b/pype/ftrack/actions/action_rv.py index c41938ada1..d89b5ca73b 100644 --- a/pype/ftrack/actions/action_rv.py +++ b/pype/ftrack/actions/action_rv.py @@ -1,13 +1,14 @@ -from pype.ftrack import BaseAction import os import sys -import json import subprocess -from pype.vendor import ftrack_api import logging -import operator -import re +import traceback +import json + from pypeapp import Logger, config +from pype.ftrack import BaseAction +from pype.vendor import ftrack_api +from avalon import io, api log = Logger().get_logger(__name__) @@ -53,14 +54,7 @@ class RVAction(BaseAction): def discover(self, session, entities, event): """Return available actions based on *event*. """ - selection = event["data"].get("selection", []) - if len(selection) != 1: - return False - - entityType = selection[0].get("entityType", None) - if entityType in ["assetversion", "task"]: - return True - return False + return True def set_rv_path(self): self.rv_path = self.config_data.get("rv_path") @@ -72,141 +66,197 @@ class RVAction(BaseAction): ) super().register() + def get_components_from_entity(self, session, entity, components): + """Get components from various entity types. + + The components dictionary is modifid in place, so nothing is returned. + + Args: + entity (Ftrack entity) + components (dict) + """ + + if entity.entity_type.lower() == "assetversion": + for component in entity["components"]: + if component["file_type"][1:] not in self.allowed_types: + continue + + try: + components[entity["asset"]["parent"]["name"]].append( + component + ) + except KeyError: + components[entity["asset"]["parent"]["name"]] = [component] + + return + + if entity.entity_type.lower() == "task": + query = "AssetVersion where task_id is '{0}'".format(entity["id"]) + for assetversion in session.query(query): + self.get_components_from_entity( + session, assetversion, components + ) + + return + + if entity.entity_type.lower() == "shot": + query = "AssetVersion where asset.parent.id is '{0}'".format( + entity["id"] + ) + for assetversion in session.query(query): + self.get_components_from_entity( + session, assetversion, components + ) + + return + + raise NotImplementedError( + "\"{}\" entity type is not implemented yet.".format( + entity.entity_type + ) + ) + def interface(self, session, entities, event): if event['data'].get('values', {}): return - entity = entities[0] - versions = [] - - entity_type = entity.entity_type.lower() - if entity_type == "assetversion": - if ( - entity[ - 'components' - ][0]['file_type'][1:] in self.allowed_types - ): - versions.append(entity) - else: - master_entity = entity - if entity_type == "task": - master_entity = entity['parent'] - - for asset in master_entity['assets']: - for version in asset['versions']: - # Get only AssetVersion of selected task - if ( - entity_type == "task" and - version['task']['id'] != entity['id'] - ): - continue - # Get only components with allowed type - filetype = version['components'][0]['file_type'] - if filetype[1:] in self.allowed_types: - versions.append(version) - - if len(versions) < 1: - return { - 'success': False, - 'message': 'There are no Asset Versions to open.' - } - - items = [] - base_label = "v{0} - {1} - {2}" - default_component = self.config_data.get( - 'default_component', None - ) - last_available = None - select_value = None - for version in versions: - for component in version['components']: - label = base_label.format( - str(version['version']).zfill(3), - version['asset']['type']['name'], - component['name'] - ) - - try: - location = component[ - 'component_locations' - ][0]['location'] - file_path = location.get_filesystem_path(component) - except Exception: - file_path = component[ - 'component_locations' - ][0]['resource_identifier'] - - if os.path.isdir(os.path.dirname(file_path)): - last_available = file_path - if component['name'] == default_component: - select_value = file_path - items.append( - {'label': label, 'value': file_path} - ) - - if len(items) == 0: - return { - 'success': False, - 'message': ( - 'There are no Asset Versions with accessible path.' - ) - } - - item = { - 'label': 'Items to view', - 'type': 'enumerator', - 'name': 'path', - 'data': sorted( - items, - key=operator.itemgetter('label'), - reverse=True + user = session.query( + "User where username is '{0}'".format( + os.environ["FTRACK_API_USER"] ) - } - if select_value is not None: - item['value'] = select_value - else: - item['value'] = last_available + ).one() + job = session.create( + "Job", + { + "user": user, + "status": "running", + "data": json.dumps({ + "description": "RV: Collecting components." + }) + } + ) + # Commit to feedback to user. + session.commit() - return {'items': [item]} + try: + items = self.get_interface_items(session, entities) + except Exception: + log.error(traceback.format_exc()) + job["status"] = "failed" + else: + job["status"] = "done" + + # Commit to end job. + session.commit() + + return {"items": items} + + def get_interface_items(self, session, entities): + + components = {} + for entity in entities: + self.get_components_from_entity(session, entity, components) + + # Sort by version + for parent_name, entities in components.items(): + version_mapping = {} + for entity in entities: + try: + version_mapping[entity["version"]["version"]].append( + entity + ) + except KeyError: + version_mapping[entity["version"]["version"]] = [entity] + + components[parent_name] = [] + for version in reversed(sorted(version_mapping.keys())): + components[parent_name].extend(version_mapping[version]) + + # Items to present to user. + items = [] + label = "{} - v{} - {}" + for parent_name, entities in components.items(): + data = [] + for entity in entities: + data.append( + { + "label": label.format( + entity["version"]["asset"]["name"], + str(entity["version"]["version"]).zfill(3), + entity["file_type"][1:] + ), + "value": entity["id"] + } + ) + + items.append( + { + "label": parent_name, + "type": "enumerator", + "name": parent_name, + "data": data, + "value": data[0]["value"] + } + ) + + return items def launch(self, session, entities, event): """Callback method for RV action.""" # Launching application if "values" not in event["data"]: return - filename = event['data']['values']['path'] - fps = entities[0].get('custom_attributes', {}).get('fps', None) + io.install() - cmd = [] - # change frame number to padding string for RV to play sequence - try: - frame = re.findall(r'(\d+).', filename)[-1] - except KeyError: - # we didn't detected frame number - pass - else: - padding = '#' * len(frame) - pos = filename.rfind(frame) - filename = filename[:pos] + padding + filename[ - filename.rfind('.'):] - - # RV path - cmd.append(os.path.normpath(self.rv_path)) - if fps is not None: - cmd.append("-fps {}".format(int(fps))) - cmd.append(os.path.normpath(filename)) - log.info('Running rv: {}'.format(' '.join(cmd))) - try: - # Run RV with these commands - subprocess.Popen(' '.join(cmd), shell=True) - except Exception as e: - return { - 'success': False, - 'message': 'File "{}" was not found.'.format( - e + paths = [] + for parent_name in sorted(event["data"]["values"].keys()): + component = session.get( + "Component", event["data"]["values"][parent_name] + ) + asset = io.find_one({"type": "asset", "name": parent_name}) + subset = io.find_one( + { + "type": "subset", + "name": component["version"]["asset"]["name"], + "parent": asset["_id"] + } + ) + version = io.find_one( + { + "type": "version", + "name": component["version"]["version"], + "parent": subset["_id"] + } + ) + representation = io.find_one( + { + "type": "representation", + "parent": version["_id"], + "name": component["file_type"][1:] + } + ) + if representation is None: + representation = io.find_one( + { + "type": "representation", + "parent": version["_id"], + "name": "preview" + } ) - } + paths.append(api.get_representation_path(representation)) + + args = [os.path.normpath(self.rv_path)] + + fps = entities[0].get("custom_attributes", {}).get("fps", None) + if fps is not None: + args.extend(["-fps", str(fps)]) + + args.extend(paths) + + log.info("Running rv: {}".format(args)) + + subprocess.Popen(args) return True @@ -257,249 +307,3 @@ def main(arguments=None): if __name__ == '__main__': raise SystemExit(main(sys.argv[1:])) - -""" -Usage: RV movie and image sequence viewer - - One File: rv foo.jpg - This Directory: rv . - Other Directory: rv /path/to/dir - Image Sequence w/Audio: rv [ in.#.tif in.wav ] - Stereo w/Audio: rv [ left.#.tif right.#.tif in.wav ] - Stereo Movies: rv [ left.mov right.mov ] - Stereo Movie (from rvio): rv stereo.mov - Cuts Sequenced: rv cut1.mov cut2.#.exr cut3.mov - Stereo Cuts Sequenced: rv [ l1.mov r1.mov ] [ l2.mov r2.mov ] - Forced Anamorphic: rv [ -pa 2.0 fullaperture.#.dpx ] - Compare: rv -wipe a.exr b.exr - Difference: rv -diff a.exr b.exr - Slap Comp Over: rv -over a.exr b.exr - Tile Images: rv -tile *.jpg - Cache + Play Movie: rv -l -play foo.mov - Cache Images to Examine: rv -c big.#.exr - Fullscreen on 2nd monitor: rv -fullscreen -screen 1 - Select Source View: rv [ in.exr -select view right ] - Select Source Layer: rv [ in.exr -select layer light1.diffuse ] - (single-view source) - Select Source Layer: rv [ in.exr -select layer left,light1.diffuse ] - (multi-view source) - Select Source Channel: rv [ in.exr -select channel R ] - (single-view, single-layer source) - Select Source Channel: rv [ in.exr -select channel left,Diffuse,R ] - (multi-view, multi-layer source) - -Image Sequence Numbering - - Frames 1 to 100 no padding: image.1-100@.jpg - Frames 1 to 100 padding 4: image.1-100#.jpg -or- image.1-100@@@@.jpg - Frames 1 to 100 padding 5: image.1-100@@@@@.jpg - Frames -100 to -200 padding 4: image.-100--200#jpg - printf style padding 4: image.%04d.jpg - printf style w/range: image.%04d.jpg 1-100 - printf no padding w/range: image.%d.jpg 1-100 - Complicated no pad 1 to 100: image_887f1-100@_982.tif - Stereo pair (left,right): image.#.%V.tif - Stereo pair (L,R): image.#.%v.tif - All Frames, padding 4: image.#.jpg - All Frames in Sequence: image.*.jpg - All Frames in Directory: /path/to/directory - All Frames in current dir: . - -Per-source arguments (inside [ and ] restricts to that source only) - --pa %f Per-source pixel aspect ratio --ro %d Per-source range offset --rs %d Per-source range start --fps %f Per-source or global fps --ao %f Per-source audio offset in seconds --so %f Per-source stereo relative eye offset --rso %f Per-source stereo right eye offset --volume %f Per-source or global audio volume (default=1) --fcdl %S Per-source file CDL --lcdl %S Per-source look CDL --flut %S Per-source file LUT --llut %S Per-source look LUT --pclut %S Per-source pre-cache software LUT --cmap %S Per-source channel mapping - (channel names, separated by ',') --select %S %S Per-source view/layer/channel selection --crop %d %d %d %d Per-source crop (xmin, ymin, xmax, ymax) --uncrop %d %d %d %d Per-source uncrop (width, height, xoffset, yoffset) --in %d Per-source cut-in frame --out %d Per-source cut-out frame --noMovieAudio Disable source movie's baked-in audio --inparams ... Source specific input parameters - - ... Input sequence patterns, images, movies, or directories --c Use region frame cache --l Use look-ahead cache --nc Use no caching --s %f Image scale reduction --ns Nuke style sequence notation - (deprecated and ignored -- no longer needed) --noRanges No separate frame ranges - (i.e. 1-10 will be considered a file) --sessionType %S Session type (sequence, stack) (deprecated, use -view) --stereo %S Stereo mode - (hardware, checker, scanline, anaglyph, lumanaglyph, - left, right, pair, mirror, hsqueezed, vsqueezed) --stereoSwap %d Swap left and right eyes stereo display - (0 == no, 1 == yes, default=0) --vsync %d Video Sync (1 = on, 0 = off, default = 1) --comp %S Composite mode - (over, add, difference, replace, topmost) --layout %S Layout mode (packed, row, column, manual) --over Same as -comp over -view defaultStack --diff Same as -comp difference -view defaultStack --replace Same as -comp replace -view defaultStack --topmost Same as -comp topmost -view defaultStack --layer Same as -comp topmost -view defaultStack, with strict - frame ranges --tile Same as -layout packed -view defaultLayout --wipe Same as -over with wipes enabled --view %S Start with a particular view --noSequence Don't contract files into sequences --inferSequence Infer sequences from one file --autoRetime %d Automatically retime conflicting media fps in - sequences and stacks (1 = on, 0 = off, default = 1) --rthreads %d Number of reader threads (default=1) --fullscreen Start in fullscreen mode --present Start in presentation mode (using presentation device) --presentAudio %d Use presentation audio device in presentation mode - (1 = on, 0 = off) --presentDevice %S Presentation mode device --presentVideoFormat %S Presentation mode override video format - (device specific) --presentDataFormat %S Presentation mode override data format - (device specific) --screen %d Start on screen (0, 1, 2, ...) --noBorders No window manager decorations --geometry %d %d [%d %d] Start geometry X, Y, W, H --fitMedia Fit the window to the first media shown --init %S Override init script --nofloat Turn off floating point by default --maxbits %d Maximum default bit depth (default=32) --gamma %f Set display gamma (default=1) --sRGB Display using linear -> sRGB conversion --rec709 Display using linear -> Rec 709 conversion --dlut %S Apply display LUT --brightness %f Set display relative brightness in stops (default=0) --resampleMethod %S Resampling method - (area, linear, cubic, nearest, default=area) --eval %S Evaluate Mu expression at every session start --pyeval %S Evaluate Python expression at every session start --nomb Hide menu bar on start up --play Play on startup --playMode %d Playback mode (0=Context dependent, 1=Play all frames, - 2=Realtime, default=0) --loopMode %d Playback loop mode - (0=Loop, 1=Play Once, 2=Ping-Pong, default=0) --cli Mu command line interface --vram %f VRAM usage limit in Mb, default = 64.000000 --cram %f Max region cache RAM usage in Gb, - (6.4Gb available, default 1Gb) --lram %f Max look-ahead cache RAM usage in Gb, - (6.4Gb available, default 0.2Gb) --noPBO Prevent use of GL PBOs for pixel transfer --prefetch Prefetch images for rendering --useAppleClientStorage Use APPLE_client_storage extension --useThreadedUpload Use threading for texture uploading/downloading - if possible --bwait %f Max buffer wait time in cached seconds, default 5.0 --lookback %f Percentage of the lookahead cache reserved for - frames behind the playhead, default 25 --yuv Assume YUV hardware conversion --noaudio Turn off audio --audiofs %d Use fixed audio frame size - (results are hardware dependant ... try 512) --audioCachePacket %d Audio cache packet size in samples (default=2048) --audioMinCache %f Audio cache min size in seconds (default=0.300000) --audioMaxCache %f Audio cache max size in seconds (default=0.600000) --audioModule %S Use specific audio module --audioDevice %S Use specific audio device --audioRate %f Use specific output audio rate (default=ask hardware) --audioPrecision %d Use specific output audio precision (default=16) --audioNice %d Close audio device when not playing - (may cause problems on some hardware) default=0 --audioNoLock %d Do not use hardware audio/video syncronization - (use software instead, default=0) --audioPreRoll %d Preroll audio on device open (Linux only; default=0) --audioGlobalOffset %f Global audio offset in seconds --audioDeviceLatency %f Audio device latency compensation in milliseconds --bg %S Background pattern (default=black, white, grey18, - grey50, checker, crosshatch) --formats Show all supported image and movie formats --apple Use Quicktime and NSImage libraries (on OS X) --cinalt Use alternate Cineon/DPX readers --exrcpus %d EXR thread count (default=0) --exrRGBA EXR Always read as RGBA (default=false) --exrInherit EXR guess channel inheritance (default=false) --exrNoOneChannel EXR never use one channel planar images (default=false) --exrIOMethod %d [%d] EXR I/O Method (0=standard, 1=buffered, 2=unbuffered, - 3=MemoryMap, 4=AsyncBuffered, 5=AsyncUnbuffered, - default=1) and optional chunk size (default=61440) --exrReadWindowIsDisplayWindow - EXR read window is display window (default=false) --exrReadWindow %d EXR Read Window Method (0=Data, 1=Display, - 2=Union, 3=Data inside Display, default=3) --jpegRGBA Make JPEG four channel RGBA on read - (default=no, use RGB or YUV) --jpegIOMethod %d [%d] JPEG I/O Method (0=standard, 1=buffered, - 2=unbuffered, 3=MemoryMap, 4=AsyncBuffered, - 5=AsyncUnbuffered, default=1) and optional - chunk size (default=61440) --cinpixel %S Cineon pixel storage (default=RGB8_PLANAR) --cinchroma Use Cineon chromaticity values - (for default reader only) --cinIOMethod %d [%d] Cineon I/O Method (0=standard, 1=buffered, - 2=unbuffered, 3=MemoryMap, 4=AsyncBuffered, - 5=AsyncUnbuffered, default=1) and optional - chunk size (default=61440) --dpxpixel %S DPX pixel storage (default=RGB8_PLANAR) --dpxchroma Use DPX chromaticity values (for default reader only) --dpxIOMethod %d [%d] DPX I/O Method (0=standard, 1=buffered, 2=unbuffered, - 3=MemoryMap, 4=AsyncBuffered, 5=AsyncUnbuffered, - default=1) and optional chunk size (default=61440) --tgaIOMethod %d [%d] TARGA I/O Method (0=standard, 1=buffered, - 2=unbuffered, 3=MemoryMap, 4=AsyncBuffered, - 5=AsyncUnbuffered, default=1) - and optional chunk size (default=61440) --tiffIOMethod %d [%d] TIFF I/O Method (0=standard, 1=buffered, - 2=unbuffered, 3=MemoryMap, 4=AsyncBuffered, - 5=AsyncUnbuffered, default=1) and optional - chunk size (default=61440) --lic %S Use specific license file --noPrefs Ignore preferences --resetPrefs Reset preferences to default values --qtcss %S Use QT style sheet for UI --qtstyle %S Use QT style --qtdesktop %d QT desktop aware, default=1 (on) --xl Aggressively absorb screen space for large media --mouse %d Force tablet/stylus events to be treated as a - mouse events, default=0 (off) --network Start networking --networkPort %d Port for networking --networkHost %S Alternate host/address for incoming connections --networkTag %S Tag to mark automatically saved port file --networkConnect %S [%d] Start networking and connect to host at port --networkPerm %d Default network connection permission - (0=Ask, 1=Allow, 2=Deny, default=0) --reuse %d Try to re-use the current session for - incoming URLs (1 = reuse session, - 0 = new session, default = 1) --nopackages Don't load any packages at startup (for debugging) --encodeURL Encode the command line as - an rvlink URL, print, and exit --bakeURL Fully bake the command line as an - rvlink URL, print, and exit --sendEvent ... Send external events e.g. -sendEvent 'name' 'content' --flags ... Arbitrary flags (flag, or 'name=value') - for use in Mu code --debug ... Debug category --version Show RV version number --strictlicense Exit rather than consume an rv license if no rvsolo - licenses are available --prefsPath %S Alternate path to preferences directory --sleep %d Sleep (in seconds) before starting to - allow attaching debugger -""" From 22144c78282a1d73cfe423955e78c601bf4947f8 Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Wed, 10 Jul 2019 17:52:17 +0100 Subject: [PATCH 02/55] Fix variable reference. --- pype/ftrack/actions/action_rv.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pype/ftrack/actions/action_rv.py b/pype/ftrack/actions/action_rv.py index d89b5ca73b..8bf0bcd239 100644 --- a/pype/ftrack/actions/action_rv.py +++ b/pype/ftrack/actions/action_rv.py @@ -138,6 +138,7 @@ class RVAction(BaseAction): # Commit to feedback to user. session.commit() + items = [] try: items = self.get_interface_items(session, entities) except Exception: From 3f85103990ef05ebdc5ff1cf020e1940b0320d7a Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Wed, 10 Jul 2019 17:56:11 +0100 Subject: [PATCH 03/55] Sort same versions by date. --- pype/ftrack/actions/action_rv.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pype/ftrack/actions/action_rv.py b/pype/ftrack/actions/action_rv.py index 8bf0bcd239..0476bb67cf 100644 --- a/pype/ftrack/actions/action_rv.py +++ b/pype/ftrack/actions/action_rv.py @@ -169,6 +169,12 @@ class RVAction(BaseAction): except KeyError: version_mapping[entity["version"]["version"]] = [entity] + # Sort same versions by date. + for version, entities in version_mapping.items(): + version_mapping[version] = sorted( + entities, key=lambda x: x["version"]["date"], reverse=True + ) + components[parent_name] = [] for version in reversed(sorted(version_mapping.keys())): components[parent_name].extend(version_mapping[version]) From ec97e66d97c0ba152fa3717b6efa6587312ebc76 Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Wed, 10 Jul 2019 18:01:10 +0100 Subject: [PATCH 04/55] Feedback to user about collecting file paths. --- pype/ftrack/actions/action_rv.py | 61 +++++++++++++++++++++++++------- 1 file changed, 48 insertions(+), 13 deletions(-) diff --git a/pype/ftrack/actions/action_rv.py b/pype/ftrack/actions/action_rv.py index 0476bb67cf..b3cc53c662 100644 --- a/pype/ftrack/actions/action_rv.py +++ b/pype/ftrack/actions/action_rv.py @@ -214,6 +214,53 @@ class RVAction(BaseAction): if "values" not in event["data"]: return + user = session.query( + "User where username is '{0}'".format( + os.environ["FTRACK_API_USER"] + ) + ).one() + job = session.create( + "Job", + { + "user": user, + "status": "running", + "data": json.dumps({ + "description": "RV: Collecting file paths." + }) + } + ) + # Commit to feedback to user. + session.commit() + + paths = [] + try: + paths = self.get_file_paths(session, event) + except Exception: + log.error(traceback.format_exc()) + job["status"] = "failed" + else: + job["status"] = "done" + + # Commit to end job. + session.commit() + + args = [os.path.normpath(self.rv_path)] + + fps = entities[0].get("custom_attributes", {}).get("fps", None) + if fps is not None: + args.extend(["-fps", str(fps)]) + + args.extend(paths) + + log.info("Running rv: {}".format(args)) + + subprocess.Popen(args) + + return True + + def get_file_paths(self, session, event): + """Get file paths from selected components.""" + io.install() paths = [] @@ -253,19 +300,7 @@ class RVAction(BaseAction): ) paths.append(api.get_representation_path(representation)) - args = [os.path.normpath(self.rv_path)] - - fps = entities[0].get("custom_attributes", {}).get("fps", None) - if fps is not None: - args.extend(["-fps", str(fps)]) - - args.extend(paths) - - log.info("Running rv: {}".format(args)) - - subprocess.Popen(args) - - return True + return paths def register(session): From 20878ec3cb469e51d263c6162be5c4d905f2e42d Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 12 Jul 2019 10:48:49 +0200 Subject: [PATCH 05/55] feat(pype, nuke): publishing with new review to ftrack with ffmpeg, wip --- .../ftrack/publish/collect_ftrack_api.py | 3 + .../global/_publish_unused/extract_review.py | 92 ------------ pype/plugins/global/publish/extract_review.py | 141 ++++++++++++++++++ .../_publish_unused/collect_render_target.py | 46 ++++++ .../extract_script.py | 4 +- .../plugins/nuke/publish/collect_instances.py | 2 +- pype/plugins/nuke/publish/collect_writes.py | 3 +- .../nuke/publish/extract_render_local.py | 1 - ...tract_review.py => extract_review_data.py} | 85 +++-------- 9 files changed, 213 insertions(+), 164 deletions(-) delete mode 100644 pype/plugins/global/_publish_unused/extract_review.py create mode 100644 pype/plugins/global/publish/extract_review.py create mode 100644 pype/plugins/nuke/_publish_unused/collect_render_target.py rename pype/plugins/nuke/{publish => _publish_unused}/extract_script.py (94%) rename pype/plugins/nuke/publish/{extract_review.py => extract_review_data.py} (69%) diff --git a/pype/plugins/ftrack/publish/collect_ftrack_api.py b/pype/plugins/ftrack/publish/collect_ftrack_api.py index e4923cac98..d09baec676 100644 --- a/pype/plugins/ftrack/publish/collect_ftrack_api.py +++ b/pype/plugins/ftrack/publish/collect_ftrack_api.py @@ -18,6 +18,9 @@ class CollectFtrackApi(pyblish.api.ContextPlugin): ftrack_log = logging.getLogger('ftrack_api') ftrack_log.setLevel(logging.WARNING) + ftrack_log = logging.getLogger('ftrack_api_old') + ftrack_log.setLevel(logging.WARNING) + # Collect session session = ftrack_api.Session() context.data["ftrackSession"] = session diff --git a/pype/plugins/global/_publish_unused/extract_review.py b/pype/plugins/global/_publish_unused/extract_review.py deleted file mode 100644 index 885db1cfc9..0000000000 --- a/pype/plugins/global/_publish_unused/extract_review.py +++ /dev/null @@ -1,92 +0,0 @@ -# import os -# import pyblish.api -# import subprocess -# from pype.vendor import clique -# from pypeapp import config -# -# -# class ExtractReview(pyblish.api.InstancePlugin): -# """Resolve any dependency issies -# -# This plug-in resolves any paths which, if not updated might break -# the published file. -# -# The order of families is important, when working with lookdev you want to -# first publish the texture, update the texture paths in the nodes and then -# publish the shading network. Same goes for file dependent assets. -# """ -# -# label = "Extract Review" -# order = pyblish.api.ExtractorOrder -# # families = ["imagesequence", "render", "write", "source"] -# # hosts = ["shell"] -# -# def process(self, instance): -# # adding plugin attributes from presets -# publish_presets = config.get_presets()["plugins"]["global"]["publish"] -# plugin_attrs = publish_presets[self.__class__.__name__] -# -# -# fps = instance.data.get("fps") -# start = instance.data.get("startFrame") -# stagingdir = os.path.normpath(instance.data.get("stagingDir")) -# -# collected_frames = os.listdir(stagingdir) -# collections, remainder = clique.assemble(collected_frames) -# -# full_input_path = os.path.join( -# stagingdir, collections[0].format('{head}{padding}{tail}') -# ) -# self.log.info("input {}".format(full_input_path)) -# -# filename = collections[0].format('{head}') -# if not filename.endswith('.'): -# filename += "." -# movFile = filename + "mov" -# full_output_path = os.path.join(stagingdir, movFile) -# -# self.log.info("output {}".format(full_output_path)) -# -# config_data = instance.context.data['output_repre_config'] -# -# proj_name = os.environ.get('AVALON_PROJECT', '__default__') -# profile = config_data.get(proj_name, config_data['__default__']) -# -# input_args = [] -# # overrides output file -# input_args.append("-y") -# # preset's input data -# input_args.extend(profile.get('input', [])) -# # necessary input data -# input_args.append("-start_number {}".format(start)) -# input_args.append("-i {}".format(full_input_path)) -# input_args.append("-framerate {}".format(fps)) -# -# output_args = [] -# # preset's output data -# output_args.extend(profile.get('output', [])) -# # output filename -# output_args.append(full_output_path) -# mov_args = [ -# "ffmpeg", -# " ".join(input_args), -# " ".join(output_args) -# ] -# subprocess_mov = " ".join(mov_args) -# sub_proc = subprocess.Popen(subprocess_mov) -# sub_proc.wait() -# -# if not os.path.isfile(full_output_path): -# raise("Quicktime wasn't created succesfully") -# -# if "representations" not in instance.data: -# instance.data["representations"] = [] -# -# representation = { -# 'name': 'mov', -# 'ext': 'mov', -# 'files': movFile, -# "stagingDir": stagingdir, -# "preview": True -# } -# instance.data["representations"].append(representation) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py new file mode 100644 index 0000000000..8c570d0c73 --- /dev/null +++ b/pype/plugins/global/publish/extract_review.py @@ -0,0 +1,141 @@ +import os +import pyblish.api +import subprocess +from pype.vendor import clique +from pypeapp import config + + +class ExtractReview(pyblish.api.InstancePlugin): + """Resolve any dependency issies + + This plug-in resolves any paths which, if not updated might break + the published file. + + The order of families is important, when working with lookdev you want to + first publish the texture, update the texture paths in the nodes and then + publish the shading network. Same goes for file dependent assets. + """ + + label = "Extract Review" + order = pyblish.api.ExtractorOrder + 0.02 + families = ["review"] + + def process(self, instance): + # adding plugin attributes from presets + publish_presets = config.get_presets()["plugins"]["global"]["publish"] + plugin_attrs = publish_presets[self.__class__.__name__] + output_profiles = plugin_attrs.get("outputs", {}) + + inst_data = instance.data + fps = inst_data.get("fps") + start_frame = inst_data.get("startFrame") + + # get representation and loop them + representations = instance.data["representations"] + + # filter out mov and img sequences + representations_new = list() + for repre in representations: + if repre['ext'] in plugin_attrs["ext_filter"]: + tags = repre.get("tags", []) + + self.log.info("Try repre: {}".format(repre)) + + if "review" in tags: + + repre_new = repre.copy() + del(repre) + + staging_dir = repre_new["stagingDir"] + + if "mov" not in repre_new['ext']: + # get output presets and loop them + collected_frames = os.listdir(staging_dir) + collections, remainder = clique.assemble( + collected_frames) + + full_input_path = os.path.join( + staging_dir, collections[0].format( + '{head}{padding}{tail}') + ) + + filename = collections[0].format('{head}') + if not filename.endswith('.'): + filename += "." + mov_file = filename + "mov" + + else: + full_input_path = os.path.join( + staging_dir, repre_new["files"]) + + filename = repre_new["files"].split(".")[0] + mov_file = filename + ".mov" + # test if the file is not the input file + if not os.path.isfile(os.path.join( + staging_dir, mov_file)): + mov_file = filename + "_.mov" + + full_output_path = os.path.join(staging_dir, mov_file) + + self.log.info("input {}".format(full_input_path)) + self.log.info("output {}".format(full_output_path)) + + for name, profile in output_profiles.items(): + self.log.debug("Profile name: {}".format(name)) + new_tags = tags + profile.get('tags', []) + input_args = [] + + # overrides output file + input_args.append("-y") + + # preset's input data + input_args.extend(profile.get('input', [])) + + # necessary input data + # adds start arg only if image sequence + if "mov" not in repre_new['ext']: + input_args.append("-start_number {}".format( + start_frame)) + + input_args.append("-i {}".format(full_input_path)) + input_args.append("-framerate {}".format(fps)) + + output_args = [] + # preset's output data + output_args.extend(profile.get('output', [])) + + # output filename + output_args.append(full_output_path) + mov_args = [ + "ffmpeg", + " ".join(input_args), + " ".join(output_args) + ] + subprocess_mov = " ".join(mov_args) + + # run subprocess + sub_proc = subprocess.Popen(subprocess_mov) + sub_proc.wait() + + if not os.path.isfile(full_output_path): + self.log.error( + "Quicktime wasn't created succesfully") + + # create representation data + repre_new.update({ + 'name': name, + 'ext': 'mov', + 'files': mov_file, + "thumbnail": False, + "preview": True, + "tags": new_tags + }) + + # adding representation + representations_new.append(repre_new) + else: + representations_new.append(repre) + + self.log.debug( + "new representations: {}".format(representations_new)) + instance.data["representations"] = representations_new diff --git a/pype/plugins/nuke/_publish_unused/collect_render_target.py b/pype/plugins/nuke/_publish_unused/collect_render_target.py new file mode 100644 index 0000000000..6c04414f69 --- /dev/null +++ b/pype/plugins/nuke/_publish_unused/collect_render_target.py @@ -0,0 +1,46 @@ +import pyblish.api + + +@pyblish.api.log +class CollectRenderTarget(pyblish.api.InstancePlugin): + """Collect families for all instances""" + + order = pyblish.api.CollectorOrder + 0.2 + label = "Collect Render Target" + hosts = ["nuke", "nukeassist"] + families = ['write'] + + def process(self, instance): + + node = instance[0] + + self.log.info('processing {}'.format(node)) + + families = [] + if instance.data.get('families'): + families += instance.data['families'] + + # set for ftrack to accept + # instance.data["families"] = ["ftrack"] + + if node["render"].value(): + # dealing with local/farm rendering + if node["render_farm"].value(): + families.append("render.farm") + else: + families.append("render.local") + else: + families.append("render.frames") + # to ignore staging dir op in integrate + instance.data['transfer'] = False + + families.append('ftrack') + + instance.data["families"] = families + + # Sort/grouped by family (preserving local index) + instance.context[:] = sorted(instance.context, key=self.sort_by_family) + + def sort_by_family(self, instance): + """Sort by family""" + return instance.data.get("families", instance.data.get("family")) diff --git a/pype/plugins/nuke/publish/extract_script.py b/pype/plugins/nuke/_publish_unused/extract_script.py similarity index 94% rename from pype/plugins/nuke/publish/extract_script.py rename to pype/plugins/nuke/_publish_unused/extract_script.py index d0be98b93e..7d55ea0da4 100644 --- a/pype/plugins/nuke/publish/extract_script.py +++ b/pype/plugins/nuke/_publish_unused/extract_script.py @@ -27,8 +27,8 @@ class ExtractScript(pype.api.Extractor): shutil.copy(current_script, path) if "representations" not in instance.data: - instance.data["representations"] = [] - + instance.data["representations"] = list() + representation = { 'name': 'nk', 'ext': '.nk', diff --git a/pype/plugins/nuke/publish/collect_instances.py b/pype/plugins/nuke/publish/collect_instances.py index 7f119f9a1e..35673c5ff3 100644 --- a/pype/plugins/nuke/publish/collect_instances.py +++ b/pype/plugins/nuke/publish/collect_instances.py @@ -68,7 +68,7 @@ class CollectNukeInstances(pyblish.api.ContextPlugin): "avalonKnob": avalon_knob_data, "publish": node.knob('publish').value(), "step": 1, - "fps": int(nuke.root()['fps'].value()) + "fps": nuke.root()['fps'].value() }) diff --git a/pype/plugins/nuke/publish/collect_writes.py b/pype/plugins/nuke/publish/collect_writes.py index 2dae39a1fc..216160616b 100644 --- a/pype/plugins/nuke/publish/collect_writes.py +++ b/pype/plugins/nuke/publish/collect_writes.py @@ -11,7 +11,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): order = pyblish.api.CollectorOrder + 0.1 label = "Collect Writes" hosts = ["nuke", "nukeassist"] - families = ["render.local", "render", "render.farm"] + families = ["render", "render.local", "render.farm"] def process(self, instance): @@ -96,5 +96,4 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): "colorspace": node["colorspace"].value(), }) - self.log.debug("instance.data: {}".format(instance.data)) diff --git a/pype/plugins/nuke/publish/extract_render_local.py b/pype/plugins/nuke/publish/extract_render_local.py index f424bf1200..1d6550024f 100644 --- a/pype/plugins/nuke/publish/extract_render_local.py +++ b/pype/plugins/nuke/publish/extract_render_local.py @@ -21,7 +21,6 @@ class NukeRenderLocal(pype.api.Extractor): def process(self, instance): node = instance[0] - context = instance.context self.log.debug("instance collected: {}".format(instance.data)) diff --git a/pype/plugins/nuke/publish/extract_review.py b/pype/plugins/nuke/publish/extract_review_data.py similarity index 69% rename from pype/plugins/nuke/publish/extract_review.py rename to pype/plugins/nuke/publish/extract_review_data.py index bdbd3d17a6..552aa0cdb0 100644 --- a/pype/plugins/nuke/publish/extract_review.py +++ b/pype/plugins/nuke/publish/extract_review_data.py @@ -2,10 +2,9 @@ import os import nuke import pyblish.api import pype -from pype.vendor import ffmpeg -class ExtractDataForReview(pype.api.Extractor): +class ExtractReviewData(pype.api.Extractor): """Extracts movie and thumbnail with baked in luts must be run after extract_render_local.py @@ -13,8 +12,7 @@ class ExtractDataForReview(pype.api.Extractor): """ order = pyblish.api.ExtractorOrder + 0.01 - label = "Extract Review" - optional = True + label = "Extract Review Data" families = ["review"] hosts = ["nuke"] @@ -35,63 +33,15 @@ class ExtractDataForReview(pype.api.Extractor): if "still" not in instance.data["families"]: self.render_review_representation(instance, representation="mov") - self.log.debug("review mov:") - self.transcode_mov(instance) - self.log.debug("instance.data: {}".format(instance.data)) self.render_review_representation(instance, representation="jpeg") else: - self.log.debug("instance: {}".format(instance)) self.render_review_representation(instance, representation="jpeg") # Restore selection [i["selected"].setValue(False) for i in nuke.allNodes()] [i["selected"].setValue(True) for i in selection] - def transcode_mov(self, instance): - collection = instance.data["collection"] - stagingDir = instance.data["stagingDir"].replace("\\", "/") - file_name = collection.format("{head}mov") - - review_mov = os.path.join(stagingDir, file_name).replace("\\", "/") - - self.log.info("transcoding review mov: {0}".format(review_mov)) - if instance.data.get("baked_colorspace_movie"): - input_movie = instance.data["baked_colorspace_movie"] - out, err = ( - ffmpeg - .input(input_movie) - .output( - review_mov, - pix_fmt='yuv420p', - crf=18, - timecode="00:00:00:01" - ) - .overwrite_output() - .run() - ) - - self.log.debug("Removing `{0}`...".format( - instance.data["baked_colorspace_movie"])) - os.remove(instance.data["baked_colorspace_movie"]) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'review', - 'ext': 'mov', - 'files': file_name, - "stagingDir": stagingDir, - "anatomy_template": "render", - "thumbnail": False, - "preview": True, - 'startFrameReview': instance.data['startFrame'], - 'endFrameReview': instance.data['endFrame'], - 'frameRate': instance.context.data["framerate"] - } - instance.data["representations"].append(representation) - def render_review_representation(self, instance, representation="mov"): @@ -172,6 +122,7 @@ class ExtractDataForReview(pype.api.Extractor): temporary_nodes.append(write_node) thumbnail = False preview = True + tags = ["review"] elif representation in "jpeg": file = fhead + "jpeg" @@ -184,29 +135,31 @@ class ExtractDataForReview(pype.api.Extractor): temporary_nodes.append(write_node) thumbnail = True preview = False + tags = ["thumbnail"] # retime for first_frame = int(last_frame) / 2 last_frame = int(last_frame) / 2 - # add into files for integration as representation - if "representations" not in instance.data: - instance.data["representations"] = [] - - repre = { - 'name': representation, - 'ext': representation, - 'files': file, - "stagingDir": stagingDir, - "anatomy_template": "render", - "thumbnail": thumbnail, - "preview": preview - } - instance.data["representations"].append(repre) + repre = { + 'name': representation, + 'ext': representation, + 'files': file, + "stagingDir": stagingDir, + "startFrame": first_frame, + "endFrame": last_frame, + "anatomy_template": "render", + "thumbnail": thumbnail, + "preview": preview, + "tags": tags + } + instance.data["representations"].append(repre) # Render frames nuke.execute(write_node.name(), int(first_frame), int(last_frame)) + self.log.debug("representations: {}".format(instance.data["representations"])) + # Clean up for node in temporary_nodes: nuke.delete(node) From f45986a1ce5395c000c0a047194ae01b3d7d58a9 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 12 Jul 2019 16:30:13 +0200 Subject: [PATCH 06/55] feat(pype): adding burnin plugin to function with reviews --- .../publish/integrate_ftrack_instances.py | 9 +- pype/plugins/global/publish/extract_burnin.py | 41 +++++-- pype/plugins/global/publish/extract_review.py | 103 ++++++++++-------- 3 files changed, 91 insertions(+), 62 deletions(-) diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_instances.py b/pype/plugins/ftrack/publish/integrate_ftrack_instances.py index a79e1f8ce5..9d8bd653d7 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_instances.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_instances.py @@ -49,14 +49,14 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): for comp in instance.data['representations']: self.log.debug('component {}'.format(comp)) - if comp.get('thumbnail'): + if comp.get('thumbnail') or ("thumbnail" in comp.get('tags', [])): location = self.get_ftrack_location( 'ftrack.server', ft_session ) component_data = { "name": "thumbnail" # Default component name is "main". } - elif comp.get('preview'): + elif comp.get('preview') or ("preview" in comp.get('tags', [])): ''' Ftrack bug requirement: - Start frame must be 0 @@ -120,7 +120,9 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): componentList.append(component_item) # Create copy with ftrack.unmanaged location if thumb or prev - if comp.get('thumbnail') or comp.get('preview'): + if comp.get('thumbnail') or comp.get('preview') \ + or ("preview" in comp.get('tags', [])) \ + or ("thumbnail" in comp.get('tags', [])): unmanaged_loc = self.get_ftrack_location( 'ftrack.unmanaged', ft_session ) @@ -148,7 +150,6 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): componentList.append(component_item_src) - self.log.debug('componentsList: {}'.format(str(componentList))) instance.data["ftrackComponentsList"] = componentList diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index 34ee33f602..b2956e330e 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -2,6 +2,7 @@ import os import subprocess import pype.api import json +import pyblish class ExtractBurnin(pype.api.Extractor): @@ -14,7 +15,8 @@ class ExtractBurnin(pype.api.Extractor): """ label = "Quicktime with burnins" - families = ["burnin"] + order = pyblish.api.ExtractorOrder + 0.03 + families = ["review", "burnin"] optional = True def process(self, instance): @@ -29,25 +31,30 @@ class ExtractBurnin(pype.api.Extractor): "start_frame": int(instance.data['startFrame']), "version": "v" + str(instance.context.data['version']) } + self.log.debug("__ burnin_data1: {}".format(burnin_data)) + for i, repre in enumerate(instance.data["representations"]): + self.log.debug("__ i: `{}`, repre: `{}`".format(i, repre)) - for repre in instance.data["representations"]: - if (not repre.get("burnin", False) or - "burnin" not in repre.get("tags", [])): + if "burnin" not in repre.get("tags", []): continue - stagingdir = self.staging_dir(instance) + stagingdir = repre["stagingDir"] filename = "{0}".format(repre["files"]) - movieFileBurnin = filename + "Burn" + ".mov" + name = "_burnin" + movieFileBurnin = filename.replace(".mov", "") + name + ".mov" full_movie_path = os.path.join(stagingdir, repre["files"]) full_burnin_path = os.path.join(stagingdir, movieFileBurnin) + self.log.debug("__ full_burnin_path: {}".format(full_burnin_path)) burnin_data = { "input": full_movie_path.replace("\\", "/"), "output": full_burnin_path.replace("\\", "/"), "burnin_data": burnin_data - } + } + + self.log.debug("__ burnin_data2: {}".format(burnin_data)) json_data = json.dumps(burnin_data) scriptpath = os.path.join(os.environ['PYPE_MODULE_ROOT'], @@ -55,9 +62,19 @@ class ExtractBurnin(pype.api.Extractor): "scripts", "otio_burnin.py") - p = subprocess.Popen( - ['python', scriptpath, json_data] - ) - p.wait() + self.log.debug("__ scriptpath: {}".format(scriptpath)) - repre['files']: movieFileBurnin + try: + p = subprocess.Popen( + [os.getenv("PYPE_PYTHON_EXE"), scriptpath, json_data] + ) + p.wait() + except Exception as e: + raise RuntimeError("Burnin script didn't work: `{}`".format(e)) + + if os.path.exists(full_burnin_path): + repre_update = { + "files": movieFileBurnin, + "name": repre["name"] + name + } + instance.data["representations"][i].update(repre_update) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 8c570d0c73..af6d59d798 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -6,14 +6,14 @@ from pypeapp import config class ExtractReview(pyblish.api.InstancePlugin): - """Resolve any dependency issies + """Extracting Review mov file for Ftrack - This plug-in resolves any paths which, if not updated might break - the published file. + Compulsory attribute of representation is tags list with "review", + otherwise the representation is ignored. - The order of families is important, when working with lookdev you want to - first publish the texture, update the texture paths in the nodes and then - publish the shading network. Same goes for file dependent assets. + All new represetnations are created and encoded by ffmpeg following + presets found in `pype-config/presets/plugins/global/publish.json:ExtractReview:outputs`. To change the file extension + filter values use preset's attributes `ext_filter` """ label = "Extract Review" @@ -30,6 +30,8 @@ class ExtractReview(pyblish.api.InstancePlugin): fps = inst_data.get("fps") start_frame = inst_data.get("startFrame") + self.log.debug("Families In: `{}`".format(instance.data["families"])) + # get representation and loop them representations = instance.data["representations"] @@ -43,46 +45,50 @@ class ExtractReview(pyblish.api.InstancePlugin): if "review" in tags: - repre_new = repre.copy() - del(repre) - - staging_dir = repre_new["stagingDir"] - - if "mov" not in repre_new['ext']: - # get output presets and loop them - collected_frames = os.listdir(staging_dir) - collections, remainder = clique.assemble( - collected_frames) - - full_input_path = os.path.join( - staging_dir, collections[0].format( - '{head}{padding}{tail}') - ) - - filename = collections[0].format('{head}') - if not filename.endswith('.'): - filename += "." - mov_file = filename + "mov" - - else: - full_input_path = os.path.join( - staging_dir, repre_new["files"]) - - filename = repre_new["files"].split(".")[0] - mov_file = filename + ".mov" - # test if the file is not the input file - if not os.path.isfile(os.path.join( - staging_dir, mov_file)): - mov_file = filename + "_.mov" - - full_output_path = os.path.join(staging_dir, mov_file) - - self.log.info("input {}".format(full_input_path)) - self.log.info("output {}".format(full_output_path)) + staging_dir = repre["stagingDir"] for name, profile in output_profiles.items(): + if "mov" not in repre['ext']: + # get output presets and loop them + collections, remainder = clique.assemble( + repre["files"]) + + full_input_path = os.path.join( + staging_dir, collections[0].format( + '{head}{padding}{tail}') + ) + + filename = collections[0].format('{head}') + if filename.endswith('.'): + filename = filename[:-1] + else: + full_input_path = os.path.join( + staging_dir, repre["files"]) + filename = repre["files"].split(".")[0] + + mov_file = filename + "_{0}.{1}".format(name, "mov") + + full_output_path = os.path.join(staging_dir, mov_file) + + self.log.info("input {}".format(full_input_path)) + self.log.info("output {}".format(full_output_path)) + + repre_new = repre.copy() + self.log.debug("Profile name: {}".format(name)) - new_tags = tags + profile.get('tags', []) + + new_tags = tags[:] + p_tags = profile.get('tags', []) + self.log.info("p_tags: `{}`".format(p_tags)) + # add families + [instance.data["families"].append(t) for t in p_tags + if t not in instance.data["families"]] + # add to + [new_tags.append(t) for t in p_tags + if t not in new_tags] + + self.log.info("new_tags: `{}`".format(new_tags)) + input_args = [] # overrides output file @@ -126,16 +132,21 @@ class ExtractReview(pyblish.api.InstancePlugin): 'name': name, 'ext': 'mov', 'files': mov_file, - "thumbnail": False, - "preview": True, - "tags": new_tags + "tags": new_tags, + "outputName": name }) + repre_new.pop("preview") + repre_new.pop("thumbnail") # adding representation representations_new.append(repre_new) else: representations_new.append(repre) + else: + representations_new.append(repre) self.log.debug( "new representations: {}".format(representations_new)) instance.data["representations"] = representations_new + + self.log.debug("Families Out: `{}`".format(instance.data["families"])) From 37fd304026de383b399fab8a55afa9785e748cd1 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 12 Jul 2019 16:30:50 +0200 Subject: [PATCH 07/55] feat(pype): addig additional path element for representation preset names --- pype/plugins/global/publish/integrate_new.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index e70657eef9..c03e66c670 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -342,6 +342,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): template_data["representation"] = repre['ext'] + if repre.get("outputName"): + template_data["output"] = repre['outputName'] + src = os.path.join(stagingdir, fname) anatomy_filled = anatomy.format(template_data) dst = os.path.normpath( From bc277d5ad27a6e470e4aa0b8565a2ecd2497d666 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 16 Jul 2019 18:49:45 +0200 Subject: [PATCH 08/55] fix(nks): distributing handles didnt respect individual clip with tag, renaming collect_tag_types to *_main --- pype/plugins/nukestudio/publish/collect_handles.py | 2 +- .../publish/{collect_tag_types.py => collect_tag_main.py} | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) rename pype/plugins/nukestudio/publish/{collect_tag_types.py => collect_tag_main.py} (86%) diff --git a/pype/plugins/nukestudio/publish/collect_handles.py b/pype/plugins/nukestudio/publish/collect_handles.py index 104a60d02c..03652989b8 100644 --- a/pype/plugins/nukestudio/publish/collect_handles.py +++ b/pype/plugins/nukestudio/publish/collect_handles.py @@ -41,7 +41,7 @@ class CollectClipHandles(api.ContextPlugin): }) for instance in filtered_instances: - if not instance.data.get("main") or not instance.data.get("handleTag"): + if not instance.data.get("main") and not instance.data.get("handleTag"): self.log.debug("Synchronize handles on: `{}`".format( instance.data["name"])) name = instance.data["asset"] diff --git a/pype/plugins/nukestudio/publish/collect_tag_types.py b/pype/plugins/nukestudio/publish/collect_tag_main.py similarity index 86% rename from pype/plugins/nukestudio/publish/collect_tag_types.py rename to pype/plugins/nukestudio/publish/collect_tag_main.py index fad9e54735..36d9b95554 100644 --- a/pype/plugins/nukestudio/publish/collect_tag_types.py +++ b/pype/plugins/nukestudio/publish/collect_tag_main.py @@ -5,7 +5,7 @@ class CollectClipTagTypes(api.InstancePlugin): """Collect Types from Tags of selected track items.""" order = api.CollectorOrder + 0.012 - label = "Collect Plate Type from Tag" + label = "Collect main flag" hosts = ["nukestudio"] families = ['clip'] @@ -25,7 +25,8 @@ class CollectClipTagTypes(api.InstancePlugin): t_subset.capitalize()) if "plateMain" in subset_name: - instance.data["main"] = True + if not instance.data.get("main"): + instance.data["main"] = True self.log.info("`plateMain` found in instance.name: `{}`".format( instance.data["name"])) return From b07bf3d20f4e5340f90132f1c12f96b70cefcd1c Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 16 Jul 2019 18:50:35 +0200 Subject: [PATCH 09/55] fix(global): debug print after loop --- pype/plugins/global/publish/integrate_new.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index e70657eef9..1b6c203343 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -286,8 +286,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): os.path.normpath( anatomy_filled[template_name]["path"]) ) - self.log.debug( - "test_dest_files: {}".format(str(test_dest_files))) + + self.log.debug( + "test_dest_files: {}".format(str(test_dest_files))) dst_collections, remainder = clique.assemble(test_dest_files) dst_collection = dst_collections[0] From 37a4a4af3d6657e21072ba781d412f109eace60b Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 17 Jul 2019 10:21:39 +0200 Subject: [PATCH 10/55] fix(pype): supporting search for version with `.v001` --- pype/templates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/templates.py b/pype/templates.py index 7d12801a00..0bc5fc9d99 100644 --- a/pype/templates.py +++ b/pype/templates.py @@ -85,7 +85,7 @@ def get_version_from_path(file): v: version number in string ('001') """ - pattern = re.compile(r"_v([0-9]*)") + pattern = re.compile(r"[\.\_]v([0-9]*)") try: v = pattern.findall(file)[0] return v From 460abbd74e6fa75bb29a6c67386b8e829cd03832 Mon Sep 17 00:00:00 2001 From: jezschaj Date: Wed, 17 Jul 2019 11:34:08 +0200 Subject: [PATCH 11/55] fix(nuke): get version with dot --- pype/templates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/templates.py b/pype/templates.py index 7d12801a00..596bf8d661 100644 --- a/pype/templates.py +++ b/pype/templates.py @@ -85,7 +85,7 @@ def get_version_from_path(file): v: version number in string ('001') """ - pattern = re.compile(r"_v([0-9]*)") + pattern = re.compile(r"[\._]v([0-9]*)") try: v = pattern.findall(file)[0] return v From 3ee587c6998327f6d5b57bd9287cdd0eabf77fc2 Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Wed, 17 Jul 2019 13:11:27 +0100 Subject: [PATCH 12/55] Ensure AVALON_PROJECT is set. --- pype/ftrack/actions/action_rv.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pype/ftrack/actions/action_rv.py b/pype/ftrack/actions/action_rv.py index b3cc53c662..158ddc63e8 100644 --- a/pype/ftrack/actions/action_rv.py +++ b/pype/ftrack/actions/action_rv.py @@ -261,6 +261,12 @@ class RVAction(BaseAction): def get_file_paths(self, session, event): """Get file paths from selected components.""" + link = session.get( + "Component", list(event["data"]["values"].values())[0] + )["version"]["asset"]["parent"]["link"][0] + project = session.get(link["type"], link["id"]) + os.environ["AVALON_PROJECT"] = project["name"] + api.Session["AVALON_PROJECT"] = project["name"] io.install() paths = [] From 615afe938267ccc9ac263b2650cc9902e9451a59 Mon Sep 17 00:00:00 2001 From: jezschaj Date: Wed, 17 Jul 2019 14:44:23 +0200 Subject: [PATCH 13/55] fix(glob): burnin and review didn't work properly --- pype/plugins/global/publish/extract_burnin.py | 5 ++++- pype/plugins/global/publish/extract_review.py | 12 ++++++++---- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index b2956e330e..425db087d3 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -62,13 +62,16 @@ class ExtractBurnin(pype.api.Extractor): "scripts", "otio_burnin.py") - self.log.debug("__ scriptpath: {}".format(scriptpath)) + self.log.debug("Burnin scriptpath: {}".format(scriptpath)) try: p = subprocess.Popen( [os.getenv("PYPE_PYTHON_EXE"), scriptpath, json_data] ) p.wait() + if not os.path.isfile(full_burnin_path): + self.log.error( + "Burnin file wasn't created succesfully") except Exception as e: raise RuntimeError("Burnin script didn't work: `{}`".format(e)) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index af6d59d798..98013c518a 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -48,6 +48,10 @@ class ExtractReview(pyblish.api.InstancePlugin): staging_dir = repre["stagingDir"] for name, profile in output_profiles.items(): + ext = profile.get("ext", None) + if not ext: + ext = "mov" + self.log.warning("`ext` attribute not in output profile. Setting to default ext: `mov`") if "mov" not in repre['ext']: # get output presets and loop them collections, remainder = clique.assemble( @@ -66,9 +70,9 @@ class ExtractReview(pyblish.api.InstancePlugin): staging_dir, repre["files"]) filename = repre["files"].split(".")[0] - mov_file = filename + "_{0}.{1}".format(name, "mov") + repr_file = filename + "_{0}.{1}".format(name, ext) - full_output_path = os.path.join(staging_dir, mov_file) + full_output_path = os.path.join(staging_dir, repr_file) self.log.info("input {}".format(full_input_path)) self.log.info("output {}".format(full_output_path)) @@ -130,8 +134,8 @@ class ExtractReview(pyblish.api.InstancePlugin): # create representation data repre_new.update({ 'name': name, - 'ext': 'mov', - 'files': mov_file, + 'ext': ext, + 'files': repr_file, "tags": new_tags, "outputName": name }) From c4c3252b33adf477c5b1bf02fa8728105bfdb332 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Thu, 18 Jul 2019 09:59:57 +0200 Subject: [PATCH 14/55] adding maya support --- .../_publish_unused/extract_quicktime.py | 86 +++++++++++++++++++ .../global/publish/extract_quicktime.py | 86 ------------------- .../plugins/maya/publish/extract_quicktime.py | 49 +---------- 3 files changed, 89 insertions(+), 132 deletions(-) create mode 100644 pype/plugins/global/_publish_unused/extract_quicktime.py delete mode 100644 pype/plugins/global/publish/extract_quicktime.py diff --git a/pype/plugins/global/_publish_unused/extract_quicktime.py b/pype/plugins/global/_publish_unused/extract_quicktime.py new file mode 100644 index 0000000000..6a33d825d0 --- /dev/null +++ b/pype/plugins/global/_publish_unused/extract_quicktime.py @@ -0,0 +1,86 @@ +import os +import pyblish.api +import subprocess +from pype.vendor import clique + + +class ExtractQuicktimeEXR(pyblish.api.InstancePlugin): + """Resolve any dependency issies + + This plug-in resolves any paths which, if not updated might break + the published file. + + The order of families is important, when working with lookdev you want to + first publish the texture, update the texture paths in the nodes and then + publish the shading network. Same goes for file dependent assets. + """ + + label = "Extract Quicktime" + order = pyblish.api.ExtractorOrder + families = ["imagesequence", "render", "write", "source"] + hosts = ["shell"] + + def process(self, instance): + # fps = instance.data.get("fps") + # start = instance.data.get("startFrame") + # stagingdir = os.path.normpath(instance.data.get("stagingDir")) + # + # collected_frames = os.listdir(stagingdir) + # collections, remainder = clique.assemble(collected_frames) + # + # full_input_path = os.path.join( + # stagingdir, collections[0].format('{head}{padding}{tail}') + # ) + # self.log.info("input {}".format(full_input_path)) + # + # filename = collections[0].format('{head}') + # if not filename.endswith('.'): + # filename += "." + # movFile = filename + "mov" + # full_output_path = os.path.join(stagingdir, movFile) + # + # self.log.info("output {}".format(full_output_path)) + # + # config_data = instance.context.data['output_repre_config'] + # + # proj_name = os.environ.get('AVALON_PROJECT', '__default__') + # profile = config_data.get(proj_name, config_data['__default__']) + # + # input_args = [] + # # overrides output file + # input_args.append("-y") + # # preset's input data + # input_args.extend(profile.get('input', [])) + # # necessary input data + # input_args.append("-start_number {}".format(start)) + # input_args.append("-i {}".format(full_input_path)) + # input_args.append("-framerate {}".format(fps)) + # + # output_args = [] + # # preset's output data + # output_args.extend(profile.get('output', [])) + # # output filename + # output_args.append(full_output_path) + # mov_args = [ + # "ffmpeg", + # " ".join(input_args), + # " ".join(output_args) + # ] + # subprocess_mov = " ".join(mov_args) + # sub_proc = subprocess.Popen(subprocess_mov) + # sub_proc.wait() + # + # if not os.path.isfile(full_output_path): + # raise("Quicktime wasn't created succesfully") + # + # if "representations" not in instance.data: + # instance.data["representations"] = [] + # + # representation = { + # 'name': 'mov', + # 'ext': 'mov', + # 'files': movFile, + # "stagingDir": stagingdir, + # "preview": True + # } + # instance.data["representations"].append(representation) diff --git a/pype/plugins/global/publish/extract_quicktime.py b/pype/plugins/global/publish/extract_quicktime.py deleted file mode 100644 index b6ccf38385..0000000000 --- a/pype/plugins/global/publish/extract_quicktime.py +++ /dev/null @@ -1,86 +0,0 @@ -import os -import pyblish.api -import subprocess -from pype.vendor import clique - - -class ExtractQuicktimeEXR(pyblish.api.InstancePlugin): - """Resolve any dependency issies - - This plug-in resolves any paths which, if not updated might break - the published file. - - The order of families is important, when working with lookdev you want to - first publish the texture, update the texture paths in the nodes and then - publish the shading network. Same goes for file dependent assets. - """ - - label = "Extract Quicktime" - order = pyblish.api.ExtractorOrder - families = ["imagesequence", "render", "write", "source"] - hosts = ["shell"] - - def process(self, instance): - fps = instance.data.get("fps") - start = instance.data.get("startFrame") - stagingdir = os.path.normpath(instance.data.get("stagingDir")) - - collected_frames = os.listdir(stagingdir) - collections, remainder = clique.assemble(collected_frames) - - full_input_path = os.path.join( - stagingdir, collections[0].format('{head}{padding}{tail}') - ) - self.log.info("input {}".format(full_input_path)) - - filename = collections[0].format('{head}') - if not filename.endswith('.'): - filename += "." - movFile = filename + "mov" - full_output_path = os.path.join(stagingdir, movFile) - - self.log.info("output {}".format(full_output_path)) - - config_data = instance.context.data['output_repre_config'] - - proj_name = os.environ.get('AVALON_PROJECT', '__default__') - profile = config_data.get(proj_name, config_data['__default__']) - - input_args = [] - # overrides output file - input_args.append("-y") - # preset's input data - input_args.extend(profile.get('input', [])) - # necessary input data - input_args.append("-start_number {}".format(start)) - input_args.append("-i {}".format(full_input_path)) - input_args.append("-framerate {}".format(fps)) - - output_args = [] - # preset's output data - output_args.extend(profile.get('output', [])) - # output filename - output_args.append(full_output_path) - mov_args = [ - "ffmpeg", - " ".join(input_args), - " ".join(output_args) - ] - subprocess_mov = " ".join(mov_args) - sub_proc = subprocess.Popen(subprocess_mov) - sub_proc.wait() - - if not os.path.isfile(full_output_path): - raise("Quicktime wasn't created succesfully") - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'mov', - 'ext': 'mov', - 'files': movFile, - "stagingDir": stagingdir, - "preview": True - } - instance.data["representations"].append(representation) diff --git a/pype/plugins/maya/publish/extract_quicktime.py b/pype/plugins/maya/publish/extract_quicktime.py index ff08799c0a..87608af641 100644 --- a/pype/plugins/maya/publish/extract_quicktime.py +++ b/pype/plugins/maya/publish/extract_quicktime.py @@ -99,7 +99,6 @@ class ExtractQuicktime(pype.api.Extractor): playblast = capture_gui.lib.capture_scene(preset) self.log.info("file list {}".format(playblast)) - # self.log.info("Calculating HUD data overlay") collected_frames = os.listdir(stagingdir) collections, remainder = clique.assemble(collected_frames) @@ -107,61 +106,19 @@ class ExtractQuicktime(pype.api.Extractor): stagingdir, collections[0].format('{head}{padding}{tail}')) self.log.info("input {}".format(input_path)) - movieFile = filename + ".mov" - movieFileBurnin = filename + "Burn" + ".mov" - - full_movie_path = os.path.join(stagingdir, movieFile) - full_burnin_path = os.path.join(stagingdir, movieFileBurnin) - self.log.info("output {}".format(full_movie_path)) - with avalon.maya.suspended_refresh(): - try: - ( - ffmpeg - .input(input_path, framerate=fps, start_number=int(start)) - .output(full_movie_path) - .run(overwrite_output=True, - capture_stdout=True, - capture_stderr=True) - ) - except ffmpeg.Error as e: - ffmpeg_error = 'ffmpeg error: {}'.format(e.stderr) - self.log.error(ffmpeg_error) - raise RuntimeError(ffmpeg_error) - - version = instance.context.data['version'] - - burnin_data = { - "input": full_movie_path.replace("\\", "/"), - "output": full_burnin_path.replace("\\", "/"), - "burnin_data": { - "username": instance.context.data['user'], - "asset": os.environ['AVALON_ASSET'], - "task": os.environ['AVALON_TASK'], - "start_frame": int(start), - "version": "v" + str(version) - } - } - - json_data = json.dumps(burnin_data) - scriptpath = os.path.join(os.environ['PYPE_MODULE_ROOT'], "pype", "scripts", "otio_burnin.py") - - p = subprocess.Popen( - ['python', scriptpath, json_data] - ) - p.wait() - if "representations" not in instance.data: instance.data["representations"] = [] representation = { 'name': 'mov', 'ext': 'mov', - 'files': movieFileBurnin, + 'files': collected_frames, "stagingDir": stagingdir, 'startFrame': start, 'endFrame': end, 'frameRate': fps, - 'preview': True + 'preview': True, + 'tags': ['review'] } instance.data["representations"].append(representation) From f77ba219d92de4bbaa8e9f062ddb49a4037b09c3 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Thu, 18 Jul 2019 10:00:20 +0200 Subject: [PATCH 15/55] addin deadline support for the new review plugins --- .../global/publish/collect_filesequences.py | 15 +++++++++++---- pype/plugins/global/publish/submit_publish_job.py | 2 +- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py index ad128c099b..5c3914aa41 100644 --- a/pype/plugins/global/publish/collect_filesequences.py +++ b/pype/plugins/global/publish/collect_filesequences.py @@ -6,6 +6,7 @@ from pprint import pformat import pyblish.api from avalon import api +import pype.api as pype def collect(root, @@ -64,7 +65,7 @@ def collect(root, return collections -class CollectFileSequences(pyblish.api.ContextPlugin): +class CollectRenderedFrames(pyblish.api.ContextPlugin): """Gather file sequences from working directory When "FILESEQUENCE" environment variable is set these paths (folders or @@ -87,7 +88,7 @@ class CollectFileSequences(pyblish.api.ContextPlugin): order = pyblish.api.CollectorOrder targets = ["filesequence"] - label = "File Sequences" + label = "RenderedFrames" def process(self, context): if os.environ.get("PYPE_PUBLISH_PATHS"): @@ -128,6 +129,9 @@ class CollectFileSequences(pyblish.api.ContextPlugin): self.log.info("setting session using metadata") api.Session.update(session) os.environ.update(session) + + version = data.get("version") + context.data['version'] = version else: # Search in directory data = dict() @@ -161,6 +165,7 @@ class CollectFileSequences(pyblish.api.ContextPlugin): assert isinstance(families, (list, tuple)), "Must be iterable" assert families, "Must have at least a single family" families.append("ftrack") + families.append("review") for collection in collections: instance = context.create_instance(str(collection)) self.log.info("Collection: %s" % list(collection)) @@ -191,7 +196,8 @@ class CollectFileSequences(pyblish.api.ContextPlugin): "startFrame": start, "endFrame": end, "fps": fps, - "source": data.get('source', '') + "source": data.get('source', ''), + "version": version }) instance.append(collection) instance.context.data['fps'] = fps @@ -205,7 +211,8 @@ class CollectFileSequences(pyblish.api.ContextPlugin): 'files': list(collection), "stagingDir": root, "anatomy_template": "render", - "frameRate": fps + "frameRate": fps, + "tags": ['review'] } instance.data["representations"].append(representation) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 992553cc7e..057fd2362c 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -276,7 +276,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "families": ["render"], "source": source, "user": context.data["user"], - + "version": context.data["version"], # Optional metadata (for debugging) "metadata": { "instance": data, From 8b4dc6332e6e79fecaecb43e542a03ac3afdd395 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Thu, 18 Jul 2019 10:00:47 +0200 Subject: [PATCH 16/55] making sure we're sending clean paths to otio --- pype/plugins/global/publish/extract_burnin.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index b2956e330e..2e9e61ad82 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -44,8 +44,8 @@ class ExtractBurnin(pype.api.Extractor): name = "_burnin" movieFileBurnin = filename.replace(".mov", "") + name + ".mov" - full_movie_path = os.path.join(stagingdir, repre["files"]) - full_burnin_path = os.path.join(stagingdir, movieFileBurnin) + full_movie_path = os.path.join(os.path.normpath(stagingdir), repre["files"]) + full_burnin_path = os.path.join(os.path.normpath(stagingdir), movieFileBurnin) self.log.debug("__ full_burnin_path: {}".format(full_burnin_path)) burnin_data = { @@ -57,12 +57,13 @@ class ExtractBurnin(pype.api.Extractor): self.log.debug("__ burnin_data2: {}".format(burnin_data)) json_data = json.dumps(burnin_data) - scriptpath = os.path.join(os.environ['PYPE_MODULE_ROOT'], + scriptpath = os.path.normpath(os.path.join(os.environ['PYPE_MODULE_ROOT'], "pype", "scripts", - "otio_burnin.py") + "otio_burnin.py")) self.log.debug("__ scriptpath: {}".format(scriptpath)) + self.log.debug("__ EXE: {}".format(os.getenv("PYPE_PYTHON_EXE"))) try: p = subprocess.Popen( From 35ebde3a419863cec6c29de505880db4975f4a7d Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Thu, 18 Jul 2019 10:01:51 +0200 Subject: [PATCH 17/55] add option to filter by family in the review presets --- pype/plugins/global/publish/extract_review.py | 159 +++++++++--------- 1 file changed, 82 insertions(+), 77 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index af6d59d798..62a2eb0bd4 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -36,7 +36,7 @@ class ExtractReview(pyblish.api.InstancePlugin): representations = instance.data["representations"] # filter out mov and img sequences - representations_new = list() + representations_new = representations.copy() for repre in representations: if repre['ext'] in plugin_attrs["ext_filter"]: tags = repre.get("tags", []) @@ -44,106 +44,111 @@ class ExtractReview(pyblish.api.InstancePlugin): self.log.info("Try repre: {}".format(repre)) if "review" in tags: - staging_dir = repre["stagingDir"] - for name, profile in output_profiles.items(): - if "mov" not in repre['ext']: - # get output presets and loop them - collections, remainder = clique.assemble( - repre["files"]) + if any(item in instance.data['families'] for item in profile['families']): + if isinstance(repre["files"], list): + # if "mov" not in repre['ext']: + # get output presets and loop them + collections, remainder = clique.assemble( + repre["files"]) - full_input_path = os.path.join( - staging_dir, collections[0].format( - '{head}{padding}{tail}') - ) + full_input_path = os.path.join( + staging_dir, collections[0].format( + '{head}{padding}{tail}') + ) - filename = collections[0].format('{head}') - if filename.endswith('.'): - filename = filename[:-1] - else: - full_input_path = os.path.join( - staging_dir, repre["files"]) - filename = repre["files"].split(".")[0] + filename = collections[0].format('{head}') + if filename.endswith('.'): + filename = filename[:-1] + else: + self.log.info("1: {}".format(full_input_path)) + full_input_path = os.path.join( + staging_dir, repre["files"]) + filename = repre["files"].split(".")[0] - mov_file = filename + "_{0}.{1}".format(name, "mov") + mov_file = filename + "_{0}.{1}".format(name, "mov") - full_output_path = os.path.join(staging_dir, mov_file) + full_output_path = os.path.join(staging_dir, mov_file) - self.log.info("input {}".format(full_input_path)) - self.log.info("output {}".format(full_output_path)) + self.log.info("input {}".format(full_input_path)) + self.log.info("output {}".format(full_output_path)) - repre_new = repre.copy() + repre_new = repre.copy() - self.log.debug("Profile name: {}".format(name)) + self.log.debug("Profile name: {}".format(name)) - new_tags = tags[:] - p_tags = profile.get('tags', []) - self.log.info("p_tags: `{}`".format(p_tags)) - # add families - [instance.data["families"].append(t) for t in p_tags - if t not in instance.data["families"]] - # add to - [new_tags.append(t) for t in p_tags - if t not in new_tags] + new_tags = tags[:] + p_tags = profile.get('tags', []) + self.log.info("p_tags: `{}`".format(p_tags)) + # add families + [instance.data["families"].append(t) for t in p_tags + if t not in instance.data["families"]] + # add to + [new_tags.append(t) for t in p_tags + if t not in new_tags] - self.log.info("new_tags: `{}`".format(new_tags)) + self.log.info("new_tags: `{}`".format(new_tags)) - input_args = [] + input_args = [] - # overrides output file - input_args.append("-y") + # overrides output file + input_args.append("-y") - # preset's input data - input_args.extend(profile.get('input', [])) + # preset's input data + input_args.extend(profile.get('input', [])) - # necessary input data - # adds start arg only if image sequence - if "mov" not in repre_new['ext']: - input_args.append("-start_number {}".format( - start_frame)) + # necessary input data + # adds start arg only if image sequence + if "mov" not in repre_new['ext']: + input_args.append("-start_number {}".format( + start_frame)) - input_args.append("-i {}".format(full_input_path)) - input_args.append("-framerate {}".format(fps)) + input_args.append("-i {}".format(full_input_path)) + input_args.append("-framerate {}".format(fps)) - output_args = [] - # preset's output data - output_args.extend(profile.get('output', [])) + output_args = [] + # preset's output data + output_args.extend(profile.get('output', [])) - # output filename - output_args.append(full_output_path) - mov_args = [ - "ffmpeg", - " ".join(input_args), - " ".join(output_args) - ] - subprocess_mov = " ".join(mov_args) + # output filename + output_args.append(full_output_path) + mov_args = [ + "ffmpeg", + " ".join(input_args), + " ".join(output_args) + ] + subprocess_mov = " ".join(mov_args) - # run subprocess - sub_proc = subprocess.Popen(subprocess_mov) - sub_proc.wait() + # run subprocess + sub_proc = subprocess.Popen(subprocess_mov) + sub_proc.wait() - if not os.path.isfile(full_output_path): - self.log.error( - "Quicktime wasn't created succesfully") + if not os.path.isfile(full_output_path): + self.log.error( + "Quicktime wasn't created succesfully") - # create representation data - repre_new.update({ - 'name': name, - 'ext': 'mov', - 'files': mov_file, - "tags": new_tags, - "outputName": name - }) - repre_new.pop("preview") - repre_new.pop("thumbnail") + # create representation data + repre_new.update({ + 'name': name, + 'ext': 'mov', + 'files': mov_file, + "tags": new_tags, + "outputName": name + }) - # adding representation - representations_new.append(repre_new) + if repre_new.get('preview'): + repre_new.pop("preview") + if repre_new.get('thumbnail'): + repre_new.pop("thumbnail") + + # adding representation + representations_new.append(repre_new) else: - representations_new.append(repre) + continue else: - representations_new.append(repre) + continue + self.log.debug( "new representations: {}".format(representations_new)) From 5c819475c6edc6c18d32d032b806b1f760b9e03f Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Thu, 18 Jul 2019 14:29:27 +0200 Subject: [PATCH 18/55] fix allwo burnin without explicit verson --- pype/plugins/global/publish/collect_filesequences.py | 5 +---- pype/plugins/global/publish/extract_burnin.py | 7 ++++++- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py index 5c3914aa41..ed48404a98 100644 --- a/pype/plugins/global/publish/collect_filesequences.py +++ b/pype/plugins/global/publish/collect_filesequences.py @@ -130,8 +130,6 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): api.Session.update(session) os.environ.update(session) - version = data.get("version") - context.data['version'] = version else: # Search in directory data = dict() @@ -196,8 +194,7 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): "startFrame": start, "endFrame": end, "fps": fps, - "source": data.get('source', ''), - "version": version + "source": data.get('source', '') }) instance.append(collection) instance.context.data['fps'] = fps diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index 2e9e61ad82..721b7e75f8 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -24,13 +24,18 @@ class ExtractBurnin(pype.api.Extractor): raise RuntimeError("Burnin needs already created mov to work on.") # TODO: expand burnin data list to include all usefull keys + version = '' + if instance.context.data.get('version'): + version = "v" + str(instance.context.data['version']) + burnin_data = { "username": instance.context.data['user'], "asset": os.environ['AVALON_ASSET'], "task": os.environ['AVALON_TASK'], "start_frame": int(instance.data['startFrame']), - "version": "v" + str(instance.context.data['version']) + "version": version } + self.log.debug("__ burnin_data1: {}".format(burnin_data)) for i, repre in enumerate(instance.data["representations"]): self.log.debug("__ i: `{}`, repre: `{}`".format(i, repre)) From 39d03919341eb5b129c751c51ec5192bd4a9ea30 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 18 Jul 2019 16:19:21 +0200 Subject: [PATCH 19/55] fix(nuke): review workflow and other fixes --- pype/nuke/lib.py | 23 +++++--- .../publish/integrate_ftrack_instances.py | 1 + pype/plugins/global/publish/extract_burnin.py | 16 +++--- pype/plugins/global/publish/extract_review.py | 55 ++++++++++++------- pype/plugins/global/publish/integrate_new.py | 48 ++++++++-------- .../plugins/nuke/publish/collect_instances.py | 2 +- pype/plugins/nuke/publish/collect_writes.py | 9 +-- .../nuke/publish/extract_render_local.py | 15 ++--- .../nuke/publish/extract_review_data.py | 25 +++++---- .../nuke/publish/validate_rendered_frames.py | 9 ++- pype/plugins/nuke/publish/validate_script.py | 2 +- 11 files changed, 115 insertions(+), 90 deletions(-) diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index a9aac47228..6a57704fff 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -343,8 +343,6 @@ def reset_frame_range_handles(): """Set frame range to current asset""" root = nuke.root() - fps = float(api.Session.get("AVALON_FPS", 25)) - root["fps"].setValue(fps) name = api.Session["AVALON_ASSET"] asset = io.find_one({"name": name, "type": "asset"}) @@ -356,7 +354,7 @@ def reset_frame_range_handles(): data = asset["data"] missing_cols = [] - check_cols = ["fstart", "fend", "handle_start", "handle_end"] + check_cols = ["fps", "fstart", "fend", "handle_start", "handle_end"] for col in check_cols: if col not in data: @@ -373,20 +371,29 @@ def reset_frame_range_handles(): handles = avalon.nuke.get_handles(asset) handle_start, handle_end = pype.get_handle_irregular(asset) - log.info("__ handles: `{}`".format(handles)) - log.info("__ handle_start: `{}`".format(handle_start)) - log.info("__ handle_end: `{}`".format(handle_end)) - + fps = asset["data"]["fps"] edit_in = int(asset["data"]["fstart"]) - handle_start edit_out = int(asset["data"]["fend"]) + handle_end + root["fps"].setValue(fps) root["first_frame"].setValue(edit_in) root["last_frame"].setValue(edit_out) + log.info("__ handles: `{}`".format(handles)) + log.info("__ handle_start: `{}`".format(handle_start)) + log.info("__ handle_end: `{}`".format(handle_end)) + log.info("__ edit_in: `{}`".format(edit_in)) + log.info("__ edit_out: `{}`".format(edit_out)) + log.info("__ fps: `{}`".format(fps)) + # setting active viewers nuke.frame(int(asset["data"]["fstart"])) - vv = nuke.activeViewer().node() + try: + vv = nuke.activeViewer().node() + except AttributeError: + log.error("No active viewer. Select any node and hit num `1`") + return range = '{0}-{1}'.format( int(asset["data"]["fstart"]), diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_instances.py b/pype/plugins/ftrack/publish/integrate_ftrack_instances.py index ef2ea6f6ca..02455454bb 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_instances.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_instances.py @@ -56,6 +56,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): component_data = { "name": "thumbnail" # Default component name is "main". } + comp['thumbnail'] = True elif comp.get('preview') or ("preview" in comp.get('tags', [])): ''' Ftrack bug requirement: diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index 0559325ff2..5f16cc91f2 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -28,15 +28,14 @@ class ExtractBurnin(pype.api.Extractor): if instance.context.data.get('version'): version = "v" + str(instance.context.data['version']) - burnin_data = { + prep_data = { "username": instance.context.data['user'], "asset": os.environ['AVALON_ASSET'], "task": os.environ['AVALON_TASK'], "start_frame": int(instance.data['startFrame']), "version": version } - - self.log.debug("__ burnin_data1: {}".format(burnin_data)) + self.log.debug("__ prep_data: {}".format(prep_data)) for i, repre in enumerate(instance.data["representations"]): self.log.debug("__ i: `{}`, repre: `{}`".format(i, repre)) @@ -56,7 +55,7 @@ class ExtractBurnin(pype.api.Extractor): burnin_data = { "input": full_movie_path.replace("\\", "/"), "output": full_burnin_path.replace("\\", "/"), - "burnin_data": burnin_data + "burnin_data": prep_data } self.log.debug("__ burnin_data2: {}".format(burnin_data)) @@ -76,14 +75,17 @@ class ExtractBurnin(pype.api.Extractor): ) p.wait() if not os.path.isfile(full_burnin_path): - self.log.error( - "Burnin file wasn't created succesfully") + raise RuntimeError("File not existing: {}".format(full_burnin_path)) except Exception as e: raise RuntimeError("Burnin script didn't work: `{}`".format(e)) if os.path.exists(full_burnin_path): repre_update = { "files": movieFileBurnin, - "name": repre["name"] + name + "name": repre["name"] } instance.data["representations"][i].update(repre_update) + + # removing the source mov file + os.remove(full_movie_path) + self.log.debug("Removed: `{}`".format(full_movie_path)) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 62a2eb0bd4..3a764b19c3 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -36,7 +36,7 @@ class ExtractReview(pyblish.api.InstancePlugin): representations = instance.data["representations"] # filter out mov and img sequences - representations_new = representations.copy() + representations_new = representations[:] for repre in representations: if repre['ext'] in plugin_attrs["ext_filter"]: tags = repre.get("tags", []) @@ -46,10 +46,19 @@ class ExtractReview(pyblish.api.InstancePlugin): if "review" in tags: staging_dir = repre["stagingDir"] for name, profile in output_profiles.items(): + self.log.debug("Profile name: {}".format(name)) + + ext = profile.get("ext", None) + if not ext: + ext = "mov" + self.log.warning( + "`ext` attribute not in output profile. Setting to default ext: `mov`") + + self.log.debug("instance.families: {}".format(instance.data['families'])) + self.log.debug("profile.families: {}".format(profile['families'])) + if any(item in instance.data['families'] for item in profile['families']): if isinstance(repre["files"], list): - # if "mov" not in repre['ext']: - # get output presets and loop them collections, remainder = clique.assemble( repre["files"]) @@ -62,27 +71,26 @@ class ExtractReview(pyblish.api.InstancePlugin): if filename.endswith('.'): filename = filename[:-1] else: - self.log.info("1: {}".format(full_input_path)) full_input_path = os.path.join( staging_dir, repre["files"]) filename = repre["files"].split(".")[0] - mov_file = filename + "_{0}.{1}".format(name, "mov") + repr_file = filename + "_{0}.{1}".format(name, ext) - full_output_path = os.path.join(staging_dir, mov_file) + full_output_path = os.path.join( + staging_dir, repr_file) self.log.info("input {}".format(full_input_path)) self.log.info("output {}".format(full_output_path)) repre_new = repre.copy() - self.log.debug("Profile name: {}".format(name)) - new_tags = tags[:] p_tags = profile.get('tags', []) self.log.info("p_tags: `{}`".format(p_tags)) # add families - [instance.data["families"].append(t) for t in p_tags + [instance.data["families"].append(t) + for t in p_tags if t not in instance.data["families"]] # add to [new_tags.append(t) for t in p_tags @@ -101,16 +109,22 @@ class ExtractReview(pyblish.api.InstancePlugin): # necessary input data # adds start arg only if image sequence if "mov" not in repre_new['ext']: - input_args.append("-start_number {}".format( - start_frame)) + input_args.append("-start_number {0} -framerate {1}".format( + start_frame, fps)) input_args.append("-i {}".format(full_input_path)) - input_args.append("-framerate {}".format(fps)) output_args = [] # preset's output data output_args.extend(profile.get('output', [])) + # letter_box + # TODO: add to documentation + lb = profile.get('letter_box', None) + if lb: + output_args.append( + "-filter:v drawbox=0:0:iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black,drawbox=0:ih-round((ih-(iw*(1/{0})))/2):iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black".format(lb)) + # output filename output_args.append(full_output_path) mov_args = [ @@ -118,25 +132,25 @@ class ExtractReview(pyblish.api.InstancePlugin): " ".join(input_args), " ".join(output_args) ] - subprocess_mov = " ".join(mov_args) + subprcs_cmd = " ".join(mov_args) # run subprocess - sub_proc = subprocess.Popen(subprocess_mov) + self.log.debug("{}".format(subprcs_cmd)) + sub_proc = subprocess.Popen(subprcs_cmd) sub_proc.wait() if not os.path.isfile(full_output_path): - self.log.error( + raise FileExistsError( "Quicktime wasn't created succesfully") # create representation data repre_new.update({ 'name': name, - 'ext': 'mov', - 'files': mov_file, + 'ext': ext, + 'files': repr_file, "tags": new_tags, "outputName": name }) - if repre_new.get('preview'): repre_new.pop("preview") if repre_new.get('thumbnail'): @@ -144,12 +158,15 @@ class ExtractReview(pyblish.api.InstancePlugin): # adding representation representations_new.append(repre_new) + # if "delete" in tags: + # if "mov" in full_input_path: + # os.remove(full_input_path) + # self.log.debug("Removed: `{}`".format(full_input_path)) else: continue else: continue - self.log.debug( "new representations: {}".format(representations_new)) instance.data["representations"] = representations_new diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index d9e4f3f533..e758789c37 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -99,18 +99,18 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # \ / # o __/ # - for result in context.data["results"]: - if not result["success"]: - self.log.debug(result) - exc_type, exc_value, exc_traceback = result["error_info"] - extracted_traceback = traceback.extract_tb(exc_traceback)[-1] - self.log.debug( - "Error at line {}: \"{}\"".format( - extracted_traceback[1], result["error"] - ) - ) - assert all(result["success"] for result in context.data["results"]), ( - "Atomicity not held, aborting.") + # for result in context.data["results"]: + # if not result["success"]: + # self.log.debug(result) + # exc_type, exc_value, exc_traceback = result["error_info"] + # extracted_traceback = traceback.extract_tb(exc_traceback)[-1] + # self.log.debug( + # "Error at line {}: \"{}\"".format( + # extracted_traceback[1], result["error"] + # ) + # ) + # assert all(result["success"] for result in context.data["results"]), ( + # "Atomicity not held, aborting.") # Assemble # @@ -225,17 +225,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # hierarchy = os.path.sep.join(hierarchy) hierarchy = os.path.join(*parents) - template_data = {"root": root, - "project": {"name": PROJECT, - "code": project['data']['code']}, - "silo": asset['silo'], - "task": TASK, - "asset": ASSET, - "family": instance.data['family'], - "subset": subset["name"], - "version": int(version["name"]), - "hierarchy": hierarchy} - anatomy = instance.context.data['anatomy'] # Find the representations to transfer amongst the files @@ -257,6 +246,17 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # | || # |_______| # + # create template data for Anatomy + template_data = {"root": root, + "project": {"name": PROJECT, + "code": project['data']['code']}, + "silo": asset['silo'], + "task": TASK, + "asset": ASSET, + "family": instance.data['family'], + "subset": subset["name"], + "version": int(version["name"]), + "hierarchy": hierarchy} files = repre['files'] if repre.get('stagingDir'): @@ -286,7 +286,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): os.path.normpath( anatomy_filled[template_name]["path"]) ) - + self.log.debug( "test_dest_files: {}".format(str(test_dest_files))) diff --git a/pype/plugins/nuke/publish/collect_instances.py b/pype/plugins/nuke/publish/collect_instances.py index 35673c5ff3..cca5a861ff 100644 --- a/pype/plugins/nuke/publish/collect_instances.py +++ b/pype/plugins/nuke/publish/collect_instances.py @@ -64,7 +64,7 @@ class CollectNukeInstances(pyblish.api.ContextPlugin): "name": node.name(), "subset": subset, "family": avalon_knob_data["family"], - "families": [family], + "families": [avalon_knob_data["family"], family], "avalonKnob": avalon_knob_data, "publish": node.knob('publish').value(), "step": 1, diff --git a/pype/plugins/nuke/publish/collect_writes.py b/pype/plugins/nuke/publish/collect_writes.py index 216160616b..f98a3a0f7d 100644 --- a/pype/plugins/nuke/publish/collect_writes.py +++ b/pype/plugins/nuke/publish/collect_writes.py @@ -66,19 +66,20 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): instance.data['families'].append('ftrack') if "representations" not in instance.data: instance.data["representations"] = list() - try: - collected_frames = os.listdir(output_dir) representation = { 'name': ext, 'ext': ext, - 'files': collected_frames, "stagingDir": output_dir, "anatomy_template": "render" } - instance.data["representations"].append(representation) + try: + collected_frames = os.listdir(output_dir) + representation['files'] = collected_frames + instance.data["representations"].append(representation) except Exception: + instance.data["representations"].append(representation) self.log.debug("couldn't collect frames: {}".format(label)) if 'render.local' in instance.data['families']: diff --git a/pype/plugins/nuke/publish/extract_render_local.py b/pype/plugins/nuke/publish/extract_render_local.py index 1d6550024f..2b185720a6 100644 --- a/pype/plugins/nuke/publish/extract_render_local.py +++ b/pype/plugins/nuke/publish/extract_render_local.py @@ -28,12 +28,6 @@ class NukeRenderLocal(pype.api.Extractor): last_frame = instance.data.get("endFrame", None) node_subset_name = instance.data.get("name", None) - # swap path to stageDir - temp_dir = self.staging_dir(instance).replace("\\", "/") - output_dir = instance.data.get("outputDir") - path = node['file'].value() - node['file'].setValue(path.replace(output_dir, temp_dir)) - self.log.info("Starting render") self.log.info("Start frame: {}".format(first_frame)) self.log.info("End frame: {}".format(last_frame)) @@ -45,27 +39,26 @@ class NukeRenderLocal(pype.api.Extractor): int(last_frame) ) - # swap path back to publish path path = node['file'].value() - node['file'].setValue(path.replace(temp_dir, output_dir)) + out_dir = os.path.dirname(path) ext = node["file_type"].value() if "representations" not in instance.data: instance.data["representations"] = [] - collected_frames = os.listdir(temp_dir) + collected_frames = os.listdir(out_dir) repre = { 'name': ext, 'ext': ext, 'files': collected_frames, - "stagingDir": temp_dir, + "stagingDir": out_dir, "anatomy_template": "render" } instance.data["representations"].append(repre) self.log.info("Extracted instance '{0}' to: {1}".format( instance.name, - temp_dir + out_dir )) instance.data['family'] = 'render' diff --git a/pype/plugins/nuke/publish/extract_review_data.py b/pype/plugins/nuke/publish/extract_review_data.py index 552aa0cdb0..69df0ab31f 100644 --- a/pype/plugins/nuke/publish/extract_review_data.py +++ b/pype/plugins/nuke/publish/extract_review_data.py @@ -82,10 +82,15 @@ class ExtractReviewData(pype.api.Extractor): temporary_nodes.append(node) reformat_node = nuke.createNode("Reformat") - reformat_node["format"].setValue("HD_1080") - reformat_node["resize"].setValue("fit") - reformat_node["filter"].setValue("Lanczos6") - reformat_node["black_outside"].setValue(True) + + ref_node = self.nodes.get("Reformat", None) + if ref_node: + for k, v in ref_node: + self.log.debug("k,v: {0}:{1}".format(k,v)) + if isinstance(v, unicode): + v = str(v) + reformat_node[k].setValue(v) + reformat_node.setInput(0, previous_node) previous_node = reformat_node temporary_nodes.append(reformat_node) @@ -112,6 +117,7 @@ class ExtractReviewData(pype.api.Extractor): if representation in "mov": file = fhead + "baked.mov" + name = "baked" path = os.path.join(stagingDir, file).replace("\\", "/") self.log.debug("Path: {}".format(path)) instance.data["baked_colorspace_movie"] = path @@ -120,12 +126,11 @@ class ExtractReviewData(pype.api.Extractor): write_node["raw"].setValue(1) write_node.setInput(0, previous_node) temporary_nodes.append(write_node) - thumbnail = False - preview = True - tags = ["review"] + tags = ["review", "delete"] elif representation in "jpeg": file = fhead + "jpeg" + name = "thumbnail" path = os.path.join(stagingDir, file).replace("\\", "/") instance.data["thumbnail"] = path write_node["file"].setValue(path) @@ -133,8 +138,6 @@ class ExtractReviewData(pype.api.Extractor): write_node["raw"].setValue(1) write_node.setInput(0, previous_node) temporary_nodes.append(write_node) - thumbnail = True - preview = False tags = ["thumbnail"] # retime for @@ -142,15 +145,13 @@ class ExtractReviewData(pype.api.Extractor): last_frame = int(last_frame) / 2 repre = { - 'name': representation, + 'name': name, 'ext': representation, 'files': file, "stagingDir": stagingDir, "startFrame": first_frame, "endFrame": last_frame, "anatomy_template": "render", - "thumbnail": thumbnail, - "preview": preview, "tags": tags } instance.data["representations"].append(repre) diff --git a/pype/plugins/nuke/publish/validate_rendered_frames.py b/pype/plugins/nuke/publish/validate_rendered_frames.py index 841001ef43..93eb84f304 100644 --- a/pype/plugins/nuke/publish/validate_rendered_frames.py +++ b/pype/plugins/nuke/publish/validate_rendered_frames.py @@ -11,9 +11,12 @@ class RepairCollectionAction(pyblish.api.Action): icon = "wrench" def process(self, context, plugin): - + self.log.info(context[0]) files_remove = [os.path.join(context[0].data["outputDir"], f) - for f in context[0].data["files"]] + for r in context[0].data.get("representations", []) + for f in r.get("files", []) + ] + self.log.info(files_remove) for f in files_remove: os.remove(f) self.log.debug("removing file: {}".format(f)) @@ -38,7 +41,7 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin): if not repre.get('files'): msg = ("no frames were collected, " "you need to render them") - self.log.error(msg) + self.log.warning(msg) raise ValidationException(msg) collections, remainder = clique.assemble(repre["files"]) diff --git a/pype/plugins/nuke/publish/validate_script.py b/pype/plugins/nuke/publish/validate_script.py index 4ad76b898b..efb0537246 100644 --- a/pype/plugins/nuke/publish/validate_script.py +++ b/pype/plugins/nuke/publish/validate_script.py @@ -24,7 +24,7 @@ class ValidateScript(pyblish.api.InstancePlugin): # These attributes will be checked attributes = [ "fps", "fstart", "fend", - "resolution_width", "resolution_height", "pixel_aspect", "handle_start", "handle_end" + "resolution_width", "resolution_height", "handle_start", "handle_end" ] # Value of these attributes can be found on parents From 0e0dea31237bc611e6319dee8855aabe4b036e0f Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Thu, 18 Jul 2019 18:02:52 +0200 Subject: [PATCH 20/55] (hotfix) project modules were not loading into shelfs --- setup/maya/userSetup.py | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/setup/maya/userSetup.py b/setup/maya/userSetup.py index 7b06fe7f33..b419e9d27e 100644 --- a/setup/maya/userSetup.py +++ b/setup/maya/userSetup.py @@ -1,18 +1,25 @@ import os -import sys from pypeapp import config -from pype.maya import lib +import pype.maya.lib as mlib from maya import cmds -def build_shelf(): - presets = config.get_presets() - shelf_preset = presets['maya'].get('project_shelf') - if shelf_preset: - project = os.environ["AVALON_PROJECT"] - for k, v in shelf_preset['imports'].items(): - sys.modules[k] = __import__(v, fromlist=[project]) +print("starting PYPE usersetup") - lib.shelf(name=shelf_preset['name'], preset=shelf_preset) +# build a shelf +presets = config.get_presets() +shelf_preset = presets['maya'].get('project_shelf') -cmds.evalDeferred("build_shelf()") + +if shelf_preset: + project = os.environ["AVALON_PROJECT"] + + for i in shelf_preset['imports']: + import_string = "from {} import {}".format(project, i) + print(import_string) + exec(import_string) + +cmds.evalDeferred("mlib.shelf(name=shelf_preset['name'], preset=shelf_preset)") + + +print("finished PYPE usersetup") From c183f044545331fc0f343e539c3d5b0174bb5509 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Fri, 19 Jul 2019 16:31:06 +0200 Subject: [PATCH 21/55] PYPE-253_fix_aov_publishing_from_maya --- pype/plugins/maya/publish/validate_rendersettings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/maya/publish/validate_rendersettings.py b/pype/plugins/maya/publish/validate_rendersettings.py index 0450cb83b5..a41fe7b5f2 100644 --- a/pype/plugins/maya/publish/validate_rendersettings.py +++ b/pype/plugins/maya/publish/validate_rendersettings.py @@ -35,7 +35,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): DEFAULT_PADDING = 4 RENDERER_PREFIX = {"vray": "//"} - DEFAULT_PREFIX = "//" + DEFAULT_PREFIX = "//_" def process(self, instance): From 6f5913e027d09b329f62f88293b6b1765aea3b17 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 19 Jul 2019 17:41:27 +0200 Subject: [PATCH 22/55] fix(global): integrate new didnt create correct padding for image sequences --- pype/plugins/global/publish/integrate_new.py | 23 ++++++++++++-------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index e758789c37..2d04c3ec1a 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -271,15 +271,20 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): self.log.debug( "src_tail_collections: {}".format(str(src_collections))) src_collection = src_collections[0] + # Assert that each member has identical suffix src_head = src_collection.format("{head}") src_tail = src_collection.format("{tail}") + + # fix dst_padding + padd_len = len(files[0].replace(src_head, "").replace(src_tail, "")) + src_padding_exp = "%0{}d".format(padd_len) + test_dest_files = list() for i in [1, 2]: template_data["representation"] = repre['ext'] - template_data["frame"] = src_collection.format( - "{padding}") % i + template_data["frame"] = src_padding_exp % i anatomy_filled = anatomy.format(template_data) test_dest_files.append( @@ -295,24 +300,23 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): dst_head = dst_collection.format("{head}") dst_tail = dst_collection.format("{tail}") - repre['published_path'] = dst_collection.format() - index_frame_start = None if repre.get('startFrame'): frame_start_padding = len(str( repre.get('endFrame'))) index_frame_start = repre.get('startFrame') + dst_padding_exp = src_padding_exp for i in src_collection.indexes: - src_padding = src_collection.format("{padding}") % i + src_padding = src_padding_exp % i src_file_name = "{0}{1}{2}".format( src_head, src_padding, src_tail) - dst_padding = dst_collection.format("{padding}") % i + dst_padding = src_padding_exp % i if index_frame_start: - dst_padding = "%0{}d".format( - frame_start_padding) % index_frame_start + dst_padding_exp = "%0{}d".format(frame_start_padding) + dst_padding = dst_padding_exp % index_frame_start index_frame_start += 1 dst = "{0}{1}{2}".format(dst_head, dst_padding, dst_tail) @@ -321,6 +325,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): self.log.debug("source: {}".format(src)) instance.data["transfers"].append([src, dst]) + repre['published_path'] = "{0}{1}{2}".format(dst_head, dst_padding_exp, dst_tail) # for imagesequence version data hashes = '#' * len(dst_padding) dst = os.path.normpath("{0}{1}{2}".format( @@ -380,7 +385,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "representation": repre['ext'] } } - self.log.debug("__ _representation: {}".format(representation)) + self.log.debug("__ representation: {}".format(representation)) destination_list.append(dst) self.log.debug("__ destination_list: {}".format(destination_list)) instance.data['destination_list'] = destination_list From ec359430d9b2e72432c6f07432ed608074a612c0 Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Sun, 21 Jul 2019 16:21:42 +0100 Subject: [PATCH 23/55] Update flags --- pype/scripts/publish_filesequence.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pype/scripts/publish_filesequence.py b/pype/scripts/publish_filesequence.py index 123c35cf70..53b63bd428 100644 --- a/pype/scripts/publish_filesequence.py +++ b/pype/scripts/publish_filesequence.py @@ -51,8 +51,11 @@ def __main__(): elif platform.system().lower() == "windows": pype_command = "pype.bat" - args = [os.path.join(pype_root, pype_command), - "--node", "--publish", "--paths", " ".join(paths)] + args = [ + os.path.join(pype_root, pype_command), + "publish", + " ".join(paths) + ] print("Pype command: {}".format(" ".join(args))) subprocess.call(args, shell=True) From 272461316b37ffb76c2b7a589f5dc6ae34d5110e Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Sun, 21 Jul 2019 16:37:37 +0100 Subject: [PATCH 24/55] Create model needs to be "Main" by default. --- pype/plugins/maya/create/create_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/maya/create/create_model.py b/pype/plugins/maya/create/create_model.py index f9ba229c89..241e2be7f9 100644 --- a/pype/plugins/maya/create/create_model.py +++ b/pype/plugins/maya/create/create_model.py @@ -8,7 +8,7 @@ class CreateModel(avalon.maya.Creator): label = "Model" family = "model" icon = "cube" - defaults = [ "_MD", "_HD", "_LD", "Main", "Proxy",] + defaults = ["Main", "Proxy", "_MD", "_HD", "_LD"] def __init__(self, *args, **kwargs): super(CreateModel, self).__init__(*args, **kwargs) From 5c0463f58fbeb931683a8e9e44f00b93c8c11575 Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Sun, 21 Jul 2019 16:39:46 +0100 Subject: [PATCH 25/55] Fix shading engine validation Validation was falsely failing because the same shading engine was returned multiple times. --- pype/plugins/maya/publish/validate_look_single_shader.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pype/plugins/maya/publish/validate_look_single_shader.py b/pype/plugins/maya/publish/validate_look_single_shader.py index 1b9ebffced..a60d1f1817 100644 --- a/pype/plugins/maya/publish/validate_look_single_shader.py +++ b/pype/plugins/maya/publish/validate_look_single_shader.py @@ -40,6 +40,10 @@ class ValidateSingleShader(pyblish.api.InstancePlugin): shading_engines = cmds.listConnections(shape, destination=True, type="shadingEngine") or [] + + # Only interested in unique shading engines. + shading_engines = list(set(shading_engines)) + if not shading_engines: no_shaders.append(shape) elif len(shading_engines) > 1: From 02453f3a5c0e71b928b8524d6e2d930c2f889a81 Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Sun, 21 Jul 2019 16:40:21 +0100 Subject: [PATCH 26/55] Add repair for image rule --- pype/plugins/maya/publish/validate_render_image_rule.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pype/plugins/maya/publish/validate_render_image_rule.py b/pype/plugins/maya/publish/validate_render_image_rule.py index 377dbfeadc..c05a15ab77 100644 --- a/pype/plugins/maya/publish/validate_render_image_rule.py +++ b/pype/plugins/maya/publish/validate_render_image_rule.py @@ -1,4 +1,5 @@ import maya.mel as mel +import pymel.core as pm import pyblish.api import pype.api @@ -18,9 +19,15 @@ class ValidateRenderImageRule(pyblish.api.InstancePlugin): label = "Images File Rule (Workspace)" hosts = ["maya"] families = ["renderlayer"] + actions = [pype.api.RepairAction] def process(self, instance): assert get_file_rule("images") == "renders", ( "Workspace's `images` file rule must be set to: renders" ) + + @classmethod + def repair(cls, instance): + pm.workspace.fileRules["images"] = "renders" + pm.system.Workspace.save() From 0799cf1d04010ddb96cae54b373ed8de14c133da Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 22 Jul 2019 11:52:05 +0200 Subject: [PATCH 27/55] fix(nks): hierarchical custom attributes, starting frame frame start distribute to other instances, resolution_width/height,pixel_aspect to asset custom attributes --- .../publish/collect_hierarchy_context.py | 74 ++++++++++++------- .../publish/collect_tag_framestart.py | 3 +- 2 files changed, 48 insertions(+), 29 deletions(-) diff --git a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py index bbae365fa6..fd646451b6 100644 --- a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py +++ b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py @@ -34,6 +34,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): def process(self, context): for instance in context[:]: + assets_shared = context.data.get("assetsShared") tags = instance.data.get("tags", None) clip = instance.data["item"] asset = instance.data.get("asset") @@ -139,19 +140,28 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): "Clip: `{}`".format(asset) ) - assetsShared = { - asset: { - "asset": instance.data["asset"], - "hierarchy": hierarchy, - "parents": parents, - "tasks": instance.data['tasks'] - }} - self.log.debug("__ assetsShared: {}".format(assetsShared)) # add formated hierarchy path into instance data instance.data["hierarchy"] = hierarchy instance.data["parents"] = parents - context.data["assetsShared"].update( - assetsShared) + + # adding to asset shared dict + if assets_shared.get(asset): + self.log.debug("Adding to shared assets: `{}`".format( + asset)) + assets_shared[asset].update({ + "asset": instance.data["asset"], + "hierarchy": hierarchy, + "parents": parents, + "tasks": instance.data["tasks"] + }) + + # adding frame start if any on instance + start_frame = instance.data.get("frameStart") + if start_frame: + assets_shared[asset].update({ + "frameStart": start_frame + }) + class CollectHierarchyContext(pyblish.api.ContextPlugin): @@ -176,6 +186,7 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin): def process(self, context): instances = context[:] + sequence = context.data['activeSequence'] # create hierarchyContext attr if context has none temp_context = {} @@ -201,6 +212,12 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin): instance.data["hierarchy"] = s_asset_data["hierarchy"] instance.data["tasks"] = s_asset_data["tasks"] + # adding frame start if any on instance + start_frame = s_asset_data.get("frameStart") + if start_frame: + instance.data["frameStart"] = start_frame + + self.log.debug( "__ instance.data[parents]: {}".format( instance.data["parents"] @@ -226,8 +243,6 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin): # get custom attributes of the shot if instance.data.get("main"): - start_frame = instance.data.get("frameStart", 0) - in_info['custom_attributes'] = { 'handles': int(instance.data.get('handles')), 'handle_start': handle_start, @@ -238,27 +253,30 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin): "edit_in": int(instance.data["startFrame"]), "edit_out": int(instance.data["endFrame"]) } - if start_frame is not 0: - in_info['custom_attributes'].update({ - 'fstart': start_frame, - 'fend': start_frame + ( - instance.data["endFrame"] - instance.data["startFrame"]) - }) + # adding SourceResolution if Tag was present - s_res = instance.data.get("sourceResolution") - if s_res and instance.data.get("main"): - item = instance.data["item"] - self.log.debug("TrackItem: `{0}`".format( - item)) - width = int(item.source().mediaSource().width()) - height = int(item.source().mediaSource().height()) - self.log.info("Source Width and Height are: `{0} x {1}`".format( - width, height)) + if instance.data.get("main"): + width = int(sequence.format().width()) + height = int(sequence.format().height()) + pixel_aspect = sequence.format().pixelAspect() + self.log.info("Sequence Width,Height,PixelAspect are: `{0},{1},{2}`".format( + width, height, pixel_aspect)) + in_info['custom_attributes'].update({ "resolution_width": width, - "resolution_height": height + "resolution_height": height, + "pixel_aspect": pixel_aspect }) + start_frame = instance.data.get("frameStart") + if start_frame: + in_info['custom_attributes'].update({ + 'fstart': start_frame, + 'fend': start_frame + ( + instance.data["endFrame"] - + instance.data["startFrame"]) + }) + in_info['tasks'] = instance.data['tasks'] parents = instance.data.get('parents', []) diff --git a/pype/plugins/nukestudio/publish/collect_tag_framestart.py b/pype/plugins/nukestudio/publish/collect_tag_framestart.py index 244a86e9f4..256350b2a4 100644 --- a/pype/plugins/nukestudio/publish/collect_tag_framestart.py +++ b/pype/plugins/nukestudio/publish/collect_tag_framestart.py @@ -20,4 +20,5 @@ class CollectClipTagFrameStart(api.InstancePlugin): # gets only task family tags and collect labels if "frameStart" in t_family: t_number = t_metadata.get("tag.number", "") - instance.data["frameStart"] = int(t_number) + start_frame = int(t_number) + instance.data["frameStart"] = start_frame From 8e862e5c887eac00b80f840fd1e03203f25f8c3f Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Tue, 23 Jul 2019 08:41:56 +0100 Subject: [PATCH 28/55] collect_renderable_camera needs to run after collect_renderlayers. --- pype/plugins/maya/publish/collect_renderable_camera.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pype/plugins/maya/publish/collect_renderable_camera.py b/pype/plugins/maya/publish/collect_renderable_camera.py index 9bfc010204..6b1732c3cb 100644 --- a/pype/plugins/maya/publish/collect_renderable_camera.py +++ b/pype/plugins/maya/publish/collect_renderable_camera.py @@ -8,7 +8,8 @@ from pype.maya import lib class CollectRenderableCamera(pyblish.api.InstancePlugin): """Collect the renderable camera(s) for the render layer""" - order = pyblish.api.CollectorOrder + 0.01 + # Offset to be after renderlayer collection. + order = pyblish.api.CollectorOrder + 0.02 label = "Collect Renderable Camera(s)" hosts = ["maya"] families = ["vrayscene", From ee11908a33a337f446972e29213a89dd9864dc0d Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 23 Jul 2019 10:19:52 +0200 Subject: [PATCH 29/55] fix(nks): asset share handles --- .../nukestudio/publish/collect_handles.py | 19 +++-- .../publish/collect_hierarchy_context.py | 31 ++++---- .../nukestudio/publish/collect_shots.py | 77 ++++++++++--------- 3 files changed, 71 insertions(+), 56 deletions(-) diff --git a/pype/plugins/nukestudio/publish/collect_handles.py b/pype/plugins/nukestudio/publish/collect_handles.py index 03652989b8..ed13691daf 100644 --- a/pype/plugins/nukestudio/publish/collect_handles.py +++ b/pype/plugins/nukestudio/publish/collect_handles.py @@ -32,13 +32,18 @@ class CollectClipHandles(api.ContextPlugin): if instance.data.get("main"): name = instance.data["asset"] if assets_shared.get(name): - self.log.debug("Adding to shared assets: `{}`".format( - instance.data["name"])) - assets_shared[name].update({ - "handles": handles, - "handleStart": handle_start, - "handleEnd": handle_end - }) + asset_shared = assets_shared.get(name) + else: + asset_shared = assets_shared[name] + + self.log.debug("Adding to shared assets: `{}`".format( + instance.data["name"])) + asset_shared.update({ + "handles": handles, + "handleStart": handle_start, + "handleEnd": handle_end + }) + for instance in filtered_instances: if not instance.data.get("main") and not instance.data.get("handleTag"): diff --git a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py index fd646451b6..28b007b109 100644 --- a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py +++ b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py @@ -145,22 +145,27 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): instance.data["parents"] = parents # adding to asset shared dict + self.log.debug("__ assets_shared: {}".format(assets_shared)) if assets_shared.get(asset): self.log.debug("Adding to shared assets: `{}`".format( - asset)) - assets_shared[asset].update({ - "asset": instance.data["asset"], - "hierarchy": hierarchy, - "parents": parents, - "tasks": instance.data["tasks"] - }) + instance.data["name"])) + asset_shared = assets_shared.get(asset) + else: + asset_shared = assets_shared[asset] - # adding frame start if any on instance - start_frame = instance.data.get("frameStart") - if start_frame: - assets_shared[asset].update({ - "frameStart": start_frame - }) + asset_shared.update({ + "asset": instance.data["asset"], + "hierarchy": hierarchy, + "parents": parents, + "tasks": instance.data["tasks"] + }) + + # adding frame start if any on instance + start_frame = instance.data.get("frameStart") + if start_frame: + asset_shared.update({ + "frameStart": start_frame + }) diff --git a/pype/plugins/nukestudio/publish/collect_shots.py b/pype/plugins/nukestudio/publish/collect_shots.py index 26a4c7fb6b..506020bbc3 100644 --- a/pype/plugins/nukestudio/publish/collect_shots.py +++ b/pype/plugins/nukestudio/publish/collect_shots.py @@ -25,50 +25,55 @@ class CollectShots(api.ContextPlugin): ) continue - # Collect data. - data = {} - for key, value in instance.data.iteritems(): - data[key] = value + if instance.data.get("main"): + # Collect data. + data = {} + for key, value in instance.data.iteritems(): + if key in "main": + continue + data[key] = value - data["family"] = "shot" - data["families"] = [] - data["frameStart"] = instance.data.get("frameStart", 1) + data["family"] = "shot" + data["families"] = [] + data["frameStart"] = instance.data.get("frameStart", 1) - data["subset"] = data["family"] + "Main" + data["subset"] = data["family"] + "Main" - data["name"] = data["subset"] + "_" + data["asset"] + data["name"] = data["subset"] + "_" + data["asset"] - data["label"] = data["asset"] + " - " + data["subset"] + " - tasks: {} - assetbuilds: {}".format( - data["tasks"], [x["name"] for x in data.get("assetbuilds", [])] - ) + data["label"] = data["asset"] + " - " + data["subset"] + " - tasks: {} - assetbuilds: {}".format( + data["tasks"], [x["name"] for x in data.get("assetbuilds", [])] + ) - # Get handles. - data["handleStart"] = instance.data["handleStart"] - data["handleEnd"] = instance.data["handleEnd"] + # Get handles. + data["handleStart"] = instance.data["handleStart"] + data["handleEnd"] = instance.data["handleEnd"] - # Frame-ranges with handles. - data["sourceInH"] = data["sourceIn"] - data["handleStart"] - data["sourceOutH"] = data["sourceOut"] + data["handleEnd"] + # Frame-ranges with handles. + data["sourceInH"] = data["sourceIn"] - data["handleStart"] + data["sourceOutH"] = data["sourceOut"] + data["handleEnd"] - # Get timeline frames. - data["timelineIn"] = int(data["item"].timelineIn()) - data["timelineOut"] = int(data["item"].timelineOut()) + # Get timeline frames. + data["timelineIn"] = int(data["item"].timelineIn()) + data["timelineOut"] = int(data["item"].timelineOut()) - # Frame-ranges with handles. - data["timelineInHandles"] = data["timelineIn"] - data["timelineInHandles"] -= data["handleStart"] - data["timelineOutHandles"] = data["timelineOut"] - data["timelineOutHandles"] += data["handleEnd"] + # Frame-ranges with handles. + data["timelineInHandles"] = data["timelineIn"] + data["timelineInHandles"] -= data["handleStart"] + data["timelineOutHandles"] = data["timelineOut"] + data["timelineOutHandles"] += data["handleEnd"] - # Creating comp frame range. - data["endFrame"] = ( - data["frameStart"] + (data["sourceOut"] - data["sourceIn"]) - ) + # Creating comp frame range. + data["endFrame"] = ( + data["frameStart"] + (data["sourceOut"] - data["sourceIn"]) + ) - # Get fps. - sequence = instance.context.data["activeSequence"] - data["fps"] = sequence.framerate() + # Get fps. + sequence = instance.context.data["activeSequence"] + data["fps"] = sequence.framerate() - # Create instance. - self.log.debug("Creating instance with: {}".format(data["name"])) - instance.context.create_instance(**data) + # Create instance. + self.log.debug("Creating instance with: {}".format(data["name"])) + instance.context.create_instance(**data) + + self.log.debug("_ context: {}".format(context[:])) From 6f5a80bf1b2379e560c06648a106308febda951e Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 23 Jul 2019 18:10:57 +0200 Subject: [PATCH 30/55] apps actions wont show on project task --- pype/ftrack/lib/ftrack_app_handler.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pype/ftrack/lib/ftrack_app_handler.py b/pype/ftrack/lib/ftrack_app_handler.py index 2d1d88f7d4..29d478b10f 100644 --- a/pype/ftrack/lib/ftrack_app_handler.py +++ b/pype/ftrack/lib/ftrack_app_handler.py @@ -94,6 +94,9 @@ class AppAction(BaseHandler): ): return False + if entities[0]['parent'].entity_type.lower() == 'project': + return False + ft_project = entities[0]['project'] database = pypelib.get_avalon_database() From cef42b84b7ea76463446a37046fd340e6c08e3fa Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 23 Jul 2019 18:14:51 +0200 Subject: [PATCH 31/55] timer service wont try to launch timers on project tasks --- pype/services/timers_manager/timers_manager.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pype/services/timers_manager/timers_manager.py b/pype/services/timers_manager/timers_manager.py index e1980d3d90..72140de323 100644 --- a/pype/services/timers_manager/timers_manager.py +++ b/pype/services/timers_manager/timers_manager.py @@ -79,6 +79,14 @@ class TimersManager(metaclass=Singleton): } ''' self.last_task = data + + if len(input_data['hierarchy']) < 1: + self.log.error(( + 'Timer has been launched on task which is child of Project.' + ' That is not allowed in Pype!' + )) + return + for module in self.modules: module.start_timer_manager(data) self.is_running = True From e1cc291605562985030bbddb75c00d34777072ac Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 23 Jul 2019 18:18:20 +0200 Subject: [PATCH 32/55] fixed variable naming --- pype/services/timers_manager/timers_manager.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pype/services/timers_manager/timers_manager.py b/pype/services/timers_manager/timers_manager.py index 72140de323..2259dfc34d 100644 --- a/pype/services/timers_manager/timers_manager.py +++ b/pype/services/timers_manager/timers_manager.py @@ -78,15 +78,15 @@ class TimersManager(metaclass=Singleton): 'task_name': 'Lookdev BG' } ''' - self.last_task = data - - if len(input_data['hierarchy']) < 1: + if len(data['hierarchy']) < 1: self.log.error(( - 'Timer has been launched on task which is child of Project.' - ' That is not allowed in Pype!' + 'Not allowed action in Pype!!' + ' Timer has been launched on task which is child of Project.' )) return + self.last_task = data + for module in self.modules: module.start_timer_manager(data) self.is_running = True From bcbbef1c35acaf49a0e15648db98b08eaf197ce2 Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Tue, 23 Jul 2019 21:53:26 +0100 Subject: [PATCH 33/55] Setting default mapping for "animation" and "workfile" --- pype/plugins/ftrack/publish/integrate_ftrack_instances.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_instances.py b/pype/plugins/ftrack/publish/integrate_ftrack_instances.py index 02455454bb..c595178361 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_instances.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_instances.py @@ -26,7 +26,9 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): 'write': 'render', 'review': 'mov', 'plate': 'img', - 'audio': 'audio' + 'audio': 'audio', + 'workfile': 'scene', + 'animation': 'cache' } def process(self, instance): From 590f953564f4a81d6efabd1bf0b308d8da5d6409 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Wed, 24 Jul 2019 10:55:54 +0200 Subject: [PATCH 34/55] hotfix(global) if handles were not found on render it couldn't be loaded to nuke, also we need to make sure they are integers --- pype/plugins/nuke/load/load_sequence.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/pype/plugins/nuke/load/load_sequence.py b/pype/plugins/nuke/load/load_sequence.py index 9dd83de064..fd733f7c87 100644 --- a/pype/plugins/nuke/load/load_sequence.py +++ b/pype/plugins/nuke/load/load_sequence.py @@ -94,9 +94,9 @@ class LoadSequence(api.Loader): first = version_data.get("startFrame", None) last = version_data.get("endFrame", None) - handles = version_data.get("handles", None) - handle_start = version_data.get("handleStart", None) - handle_end = version_data.get("handleEnd", None) + handles = version_data.get("handles", 0) + handle_start = version_data.get("handleStart", 0) + handle_end = version_data.get("handleEnd", 0) # fix handle start and end if none are available if not handle_start and not handle_end: @@ -130,10 +130,10 @@ class LoadSequence(api.Loader): r["colorspace"].setValue(str(colorspace)) loader_shift(r, first, relative=True) - r["origfirst"].setValue(first) - r["first"].setValue(first) - r["origlast"].setValue(last) - r["last"].setValue(last) + r["origfirst"].setValue(int(first)) + r["first"].setValue(int(first)) + r["origlast"].setValue(int(last)) + r["last"].setValue(int(last)) # add additional metadata from the version to imprint to Avalon knob add_keys = ["startFrame", "endFrame", "handles", From 0bfdd3668c1a5787802b02dee8ae2acbbb3c2dcb Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Wed, 24 Jul 2019 10:56:33 +0200 Subject: [PATCH 35/55] hotfix(maya) making maketx argument in look collection overridable from presets. --- pype/plugins/maya/publish/collect_look.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pype/plugins/maya/publish/collect_look.py b/pype/plugins/maya/publish/collect_look.py index 614e5b44a4..8482369407 100644 --- a/pype/plugins/maya/publish/collect_look.py +++ b/pype/plugins/maya/publish/collect_look.py @@ -211,6 +211,7 @@ class CollectLook(pyblish.api.InstancePlugin): families = ["look"] label = "Collect Look" hosts = ["maya"] + maketx = True def process(self, instance): """Collect the Look in the instance with the correct layer settings""" @@ -220,7 +221,8 @@ class CollectLook(pyblish.api.InstancePlugin): # make ftrack publishable instance.data["families"] = ['ftrack'] - instance.data['maketx'] = True + instance.data['maketx'] = self.maketx + self.log.info('maketx: {}'.format(self.maketx)) def collect(self, instance): From 60a7739d8bfb282f843ba55782ef4b4ee5d7a333 Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Wed, 24 Jul 2019 23:05:38 +0100 Subject: [PATCH 36/55] Use review source component if present. --- pype/ftrack/actions/action_rv.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/pype/ftrack/actions/action_rv.py b/pype/ftrack/actions/action_rv.py index 158ddc63e8..223fd0a94b 100644 --- a/pype/ftrack/actions/action_rv.py +++ b/pype/ftrack/actions/action_rv.py @@ -269,11 +269,28 @@ class RVAction(BaseAction): api.Session["AVALON_PROJECT"] = project["name"] io.install() + location = ftrack_api.Session().pick_location() + paths = [] for parent_name in sorted(event["data"]["values"].keys()): component = session.get( "Component", event["data"]["values"][parent_name] ) + + # Newer publishes have the source referenced in Ftrack. + online_source = False + for neighbour_component in component["version"]["components"]: + if neighbour_component["name"] != "ftrackreview-mp4_src": + continue + + paths.append( + location.get_filesystem_path(neighbour_component) + ) + online_source = True + + if online_source: + continue + asset = io.find_one({"type": "asset", "name": parent_name}) subset = io.find_one( { From 89784a6e8cb9f99078153e3236ec7258a5ac4dd0 Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Wed, 24 Jul 2019 23:11:13 +0100 Subject: [PATCH 37/55] Support EXR renders. - User can frame hold the first frame with pop dialog. --- pype/plugins/maya/load/load_image_plane.py | 28 ++++++++++++++++++---- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/pype/plugins/maya/load/load_image_plane.py b/pype/plugins/maya/load/load_image_plane.py index 5534cce0ee..dd7b7ea812 100644 --- a/pype/plugins/maya/load/load_image_plane.py +++ b/pype/plugins/maya/load/load_image_plane.py @@ -7,9 +7,9 @@ from Qt import QtWidgets class ImagePlaneLoader(api.Loader): """Specific loader of plate for image planes on selected camera.""" - families = ["plate"] + families = ["plate", "render"] label = "Create imagePlane on selected camera." - representations = ["mov"] + representations = ["mov", "exr"] icon = "image" color = "orange" @@ -58,12 +58,10 @@ class ImagePlaneLoader(api.Loader): camera=camera, showInAllViews=False ) image_plane_shape.depth.set(image_plane_depth) - # Need to get "type" by string, because its a method as well. - pc.Attribute(image_plane_shape + ".type").set(2) + image_plane_shape.imageName.set( context["representation"]["data"]["path"] ) - image_plane_shape.useFrameExtension.set(1) start_frame = pc.playbackOptions(q=True, min=True) end_frame = pc.playbackOptions(q=True, max=True) @@ -71,6 +69,26 @@ class ImagePlaneLoader(api.Loader): image_plane_shape.frameOffset.set(1 - start_frame) image_plane_shape.frameIn.set(start_frame) image_plane_shape.frameOut.set(end_frame) + image_plane_shape.useFrameExtension.set(1) + + if context["representation"]["name"] == "mov": + # Need to get "type" by string, because its a method as well. + pc.Attribute(image_plane_shape + ".type").set(2) + + # Ask user whether to use sequence or still image. + if context["representation"]["name"] == "exr": + reply = QtWidgets.QMessageBox.information( + None, + "Frame Hold.", + "Hold image sequence on first frame?", + QtWidgets.QMessageBox.Ok, + QtWidgets.QMessageBox.Cancel + ) + if reply == QtWidgets.QMessageBox.Ok: + pc.delete( + image_plane_shape.listConnections(type="expression")[0] + ) + image_plane_shape.frameExtension.set(start_frame) new_nodes.extend( [image_plane_transform.name(), image_plane_shape.name()] From 5da5e1201419b9fbce30ea0261137df868f88f95 Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Wed, 24 Jul 2019 23:25:14 +0100 Subject: [PATCH 38/55] Start/End frame can be floats. Enforce integer. --- pype/plugins/nuke/load/load_sequence.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pype/plugins/nuke/load/load_sequence.py b/pype/plugins/nuke/load/load_sequence.py index 9dd83de064..c5d8513146 100644 --- a/pype/plugins/nuke/load/load_sequence.py +++ b/pype/plugins/nuke/load/load_sequence.py @@ -130,10 +130,10 @@ class LoadSequence(api.Loader): r["colorspace"].setValue(str(colorspace)) loader_shift(r, first, relative=True) - r["origfirst"].setValue(first) - r["first"].setValue(first) - r["origlast"].setValue(last) - r["last"].setValue(last) + r["origfirst"].setValue(int(first)) + r["first"].setValue(int(first)) + r["origlast"].setValue(int(last)) + r["last"].setValue(int(last)) # add additional metadata from the version to imprint to Avalon knob add_keys = ["startFrame", "endFrame", "handles", From 2a174ef69d8c2d32bb14db12eb27ec1d50e32f2c Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Wed, 24 Jul 2019 23:25:52 +0100 Subject: [PATCH 39/55] Handles can be non-existent, in which case they are 0. --- pype/plugins/nuke/load/load_sequence.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pype/plugins/nuke/load/load_sequence.py b/pype/plugins/nuke/load/load_sequence.py index c5d8513146..fd733f7c87 100644 --- a/pype/plugins/nuke/load/load_sequence.py +++ b/pype/plugins/nuke/load/load_sequence.py @@ -94,9 +94,9 @@ class LoadSequence(api.Loader): first = version_data.get("startFrame", None) last = version_data.get("endFrame", None) - handles = version_data.get("handles", None) - handle_start = version_data.get("handleStart", None) - handle_end = version_data.get("handleEnd", None) + handles = version_data.get("handles", 0) + handle_start = version_data.get("handleStart", 0) + handle_end = version_data.get("handleEnd", 0) # fix handle start and end if none are available if not handle_start and not handle_end: From 61d78192a4a92c3ca842cb2e7fb92e981bb67f5b Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Wed, 24 Jul 2019 23:29:16 +0100 Subject: [PATCH 40/55] Submit which write node to render on Deadline. --- pype/plugins/nuke/publish/submit_nuke_deadline.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pype/plugins/nuke/publish/submit_nuke_deadline.py b/pype/plugins/nuke/publish/submit_nuke_deadline.py index 3d854f66e9..fb5d0fea75 100644 --- a/pype/plugins/nuke/publish/submit_nuke_deadline.py +++ b/pype/plugins/nuke/publish/submit_nuke_deadline.py @@ -100,6 +100,9 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): # Resolve relative references "ProjectPath": workspace, + + # Only the specific write node is rendered. + "WriteNode": instance[0].name() }, # Mandatory for Deadline, may be empty From 460c601e1a64e15db4581e8110b0fb06172daa8a Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Wed, 24 Jul 2019 23:36:12 +0100 Subject: [PATCH 41/55] Padding can get confused when there are other files present. Validate against source collection. --- pype/plugins/global/publish/integrate_new.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 2d04c3ec1a..0d077ca65e 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -276,9 +276,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): src_head = src_collection.format("{head}") src_tail = src_collection.format("{tail}") - # fix dst_padding - padd_len = len(files[0].replace(src_head, "").replace(src_tail, "")) + valid_files = [x for x in files if src_collection.match(x)] + padd_len = len( + valid_files[0].replace(src_head, "").replace(src_tail, "") + ) src_padding_exp = "%0{}d".format(padd_len) test_dest_files = list() From 5cf11dd64aab79790c4a0bec6df5e55387dcfb1c Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Wed, 24 Jul 2019 23:38:34 +0100 Subject: [PATCH 42/55] Log output from ffmpeg process. --- pype/plugins/global/publish/extract_review.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 3a764b19c3..1b66b4e9d2 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -136,12 +136,21 @@ class ExtractReview(pyblish.api.InstancePlugin): # run subprocess self.log.debug("{}".format(subprcs_cmd)) - sub_proc = subprocess.Popen(subprcs_cmd) - sub_proc.wait() + sub_proc = subprocess.Popen( + subprcs_cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + stdin=subprocess.PIPE, + cwd=os.path.dirname(output_args[-1]) + ) + + output = sub_proc.communicate()[0] if not os.path.isfile(full_output_path): - raise FileExistsError( - "Quicktime wasn't created succesfully") + raise ValueError( + "Quicktime wasn't created succesfully: " + "{}".format(output) + ) # create representation data repre_new.update({ From 2a0977e6da93d243161af26c965f79b1dbb74c1b Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Thu, 25 Jul 2019 23:21:13 +0100 Subject: [PATCH 43/55] Improve logging with stating which attributes are compared. - Code cosmetics. --- pype/plugins/nuke/publish/validate_script.py | 23 +++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/pype/plugins/nuke/publish/validate_script.py b/pype/plugins/nuke/publish/validate_script.py index efb0537246..ae298de209 100644 --- a/pype/plugins/nuke/publish/validate_script.py +++ b/pype/plugins/nuke/publish/validate_script.py @@ -23,12 +23,24 @@ class ValidateScript(pyblish.api.InstancePlugin): # These attributes will be checked attributes = [ - "fps", "fstart", "fend", - "resolution_width", "resolution_height", "handle_start", "handle_end" + "fps", + "fstart", + "fend", + "resolution_width", + "resolution_height", + "handle_start", + "handle_end" ] # Value of these attributes can be found on parents - hierarchical_attributes = ["fps", "resolution_width", "resolution_height", "pixel_aspect", "handle_start", "handle_end"] + hierarchical_attributes = [ + "fps", + "resolution_width", + "resolution_height", + "pixel_aspect", + "handle_start", + "handle_end" + ] missing_attributes = [] asset_attributes = {} @@ -84,8 +96,9 @@ class ValidateScript(pyblish.api.InstancePlugin): # Compare asset's values Nukescript X Database not_matching = [] for attr in attributes: - self.log.debug("asset vs script attribute: {0}, {1}".format( - asset_attributes[attr], script_attributes[attr])) + self.log.debug("asset vs script attribute \"{}\": {}, {}".format( + attr, asset_attributes[attr], script_attributes[attr]) + ) if asset_attributes[attr] != script_attributes[attr]: not_matching.append(attr) From df3da5e3c561e39fceee8c0a9b070fdd2e40875a Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Thu, 25 Jul 2019 23:22:38 +0100 Subject: [PATCH 44/55] Some entities has None as keys data member. --- pype/ftrack/events/event_sync_to_avalon.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/ftrack/events/event_sync_to_avalon.py b/pype/ftrack/events/event_sync_to_avalon.py index f6b2b48a1f..6753bb2413 100644 --- a/pype/ftrack/events/event_sync_to_avalon.py +++ b/pype/ftrack/events/event_sync_to_avalon.py @@ -16,7 +16,7 @@ class Sync_to_Avalon(BaseEvent): # If mongo_id textfield has changed: RETURN! # - infinite loop for ent in event['data']['entities']: - if 'keys' in ent: + if ent.get('keys') is not None: if ca_mongoid in ent['keys']: return @@ -109,7 +109,7 @@ class Sync_to_Avalon(BaseEvent): ' for more information.' ) items = [ - {'type': 'label', 'value':'# Fatal Error'}, + {'type': 'label', 'value': '# Fatal Error'}, {'type': 'label', 'value': '

{}

'.format(ftrack_message)} ] self.show_interface(event, items, title) From 873e5cb35b8135382e07302322abb8b362d07564 Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Thu, 25 Jul 2019 23:23:49 +0100 Subject: [PATCH 45/55] Improve logging with stacktrace of errors. --- pype/ftrack/lib/ftrack_event_handler.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/pype/ftrack/lib/ftrack_event_handler.py b/pype/ftrack/lib/ftrack_event_handler.py index c6c91e7428..0e1a2ecb5d 100644 --- a/pype/ftrack/lib/ftrack_event_handler.py +++ b/pype/ftrack/lib/ftrack_event_handler.py @@ -43,9 +43,20 @@ class BaseEvent(BaseHandler): self.session.rollback() self.session._local_cache.clear() - self.launch( - self.session, event - ) + try: + self.launch( + self.session, event + ) + except Exception as e: + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] + log_message = "{}/{}/Line: {}".format( + exc_type, fname, exc_tb.tb_lineno + ) + self.log.error( + 'Error during syncToAvalon: {}'.format(log_message), + exc_info=True + ) return From 43ab84721fd75e3cae617b73d55a7f7ea0d13eb1 Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Thu, 25 Jul 2019 23:25:00 +0100 Subject: [PATCH 46/55] Raise errors from subprocess to get proper failed jobs. --- pype/scripts/publish_filesequence.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pype/scripts/publish_filesequence.py b/pype/scripts/publish_filesequence.py index 53b63bd428..705a21c8c2 100644 --- a/pype/scripts/publish_filesequence.py +++ b/pype/scripts/publish_filesequence.py @@ -58,7 +58,9 @@ def __main__(): ] print("Pype command: {}".format(" ".join(args))) - subprocess.call(args, shell=True) + exit_code = subprocess.call(args, shell=True) + if exit_code != 0: + raise ValueError("Publishing failed.") if __name__ == '__main__': From e36f289f44ba48c1a7d508ade9b78b7b161cfcde Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Fri, 26 Jul 2019 08:11:48 +0100 Subject: [PATCH 47/55] Set context settings on startup. --- pype/nuke/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pype/nuke/__init__.py b/pype/nuke/__init__.py index b64f728771..982724e899 100644 --- a/pype/nuke/__init__.py +++ b/pype/nuke/__init__.py @@ -23,6 +23,7 @@ from pypeapp import Logger # for handler in Logger.logging.root.handlers[:]]: # if "pype" not in str(name).lower(): # Logger.logging.root.removeHandler(handler) +from . import lib self = sys.modules[__name__] @@ -138,6 +139,9 @@ def install(): if launch_workfiles: nuke.addOnCreate(launch_workfiles_app, nodeClass="Root") + # Set context settings. + nuke.addOnCreate(lib.set_context_settings, nodeClass="Root") + def launch_workfiles_app(): if not self.workfiles_launched: From 6999c2a00ac8ca728ea0776b1720da96d68fce66 Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Fri, 26 Jul 2019 08:12:17 +0100 Subject: [PATCH 48/55] Remove redundant code and code cosmetics. --- pype/nuke/__init__.py | 20 +++----------------- 1 file changed, 3 insertions(+), 17 deletions(-) diff --git a/pype/nuke/__init__.py b/pype/nuke/__init__.py index 982724e899..94f591b2e8 100644 --- a/pype/nuke/__init__.py +++ b/pype/nuke/__init__.py @@ -1,28 +1,14 @@ import os import sys +import logging + +import nuke from avalon import api as avalon from avalon.tools import workfiles from pyblish import api as pyblish - -from .. import api - from pype.nuke import menu -import logging - -from .lib import ( - create_write_node -) - -import nuke - from pypeapp import Logger - -# #removing logger handler created in avalon_core -# for name, handler in [(handler.get_name(), handler) -# for handler in Logger.logging.root.handlers[:]]: -# if "pype" not in str(name).lower(): -# Logger.logging.root.removeHandler(handler) from . import lib From 414d6a888d55a28778f2c398603195884f88d627 Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Fri, 26 Jul 2019 08:15:18 +0100 Subject: [PATCH 49/55] Remove active viewer warning. - Because we set the context on the creation of the Root, the log error pop up appear unnecessarily. - Working with viewer is Nuke workflow behaviour which should not be explained by the pipeline. - Supporting finding the first viewer node in the scene. --- pype/nuke/lib.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index 6a57704fff..eae812ec26 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -392,8 +392,11 @@ def reset_frame_range_handles(): try: vv = nuke.activeViewer().node() except AttributeError: - log.error("No active viewer. Select any node and hit num `1`") - return + viewer_nodes = nuke.allNodes(filter="Viewer") + if viewer_nodes: + vv = viewer_nodes[0] + else: + return range = '{0}-{1}'.format( int(asset["data"]["fstart"]), From cffd0c498c92ef0fc00782304bdf29fdf084a7f4 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 26 Jul 2019 11:03:55 +0200 Subject: [PATCH 50/55] (hotfix) event sync to avalon wont crash if 'keys' key is in entity and is set to None --- pype/ftrack/events/event_sync_to_avalon.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/ftrack/events/event_sync_to_avalon.py b/pype/ftrack/events/event_sync_to_avalon.py index f6b2b48a1f..e647d9f940 100644 --- a/pype/ftrack/events/event_sync_to_avalon.py +++ b/pype/ftrack/events/event_sync_to_avalon.py @@ -16,7 +16,7 @@ class Sync_to_Avalon(BaseEvent): # If mongo_id textfield has changed: RETURN! # - infinite loop for ent in event['data']['entities']: - if 'keys' in ent: + if ent.get('keys') is not None: if ca_mongoid in ent['keys']: return From eea84c9e7a1b0a055dbd8689b91c0dd31b5c0e5b Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 26 Jul 2019 11:05:37 +0200 Subject: [PATCH 51/55] (hotfix) missing application config toml file is more specifically logged --- pype/ftrack/lib/avalon_sync.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/pype/ftrack/lib/avalon_sync.py b/pype/ftrack/lib/avalon_sync.py index 4eaf28eae4..94de0172af 100644 --- a/pype/ftrack/lib/avalon_sync.py +++ b/pype/ftrack/lib/avalon_sync.py @@ -507,11 +507,17 @@ def get_project_apps(entity): apps = [] for app in entity['custom_attributes']['applications']: try: - app_config = {} - app_config['name'] = app - app_config['label'] = toml.load(avalon.lib.which_app(app))['label'] + toml_path = avalon.lib.which_app(app) + if not toml_path: + log.warning(( + 'Missing config file for application "{}"' + ).format(app)) + continue - apps.append(app_config) + apps.append({ + 'name': app, + 'label': toml.load(toml_path)['label'] + }) except Exception as e: log.warning('Error with application {0} - {1}'.format(app, e)) From ec510c9c317f5591f728f2bba2b599457bc4143c Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Fri, 26 Jul 2019 12:38:35 +0200 Subject: [PATCH 52/55] (hotfix) maketx set in presets --- pype/plugins/maya/publish/collect_look.py | 1 - pype/plugins/maya/publish/collect_review.py | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/pype/plugins/maya/publish/collect_look.py b/pype/plugins/maya/publish/collect_look.py index 8482369407..5d5cb6f886 100644 --- a/pype/plugins/maya/publish/collect_look.py +++ b/pype/plugins/maya/publish/collect_look.py @@ -220,7 +220,6 @@ class CollectLook(pyblish.api.InstancePlugin): self.collect(instance) # make ftrack publishable - instance.data["families"] = ['ftrack'] instance.data['maketx'] = self.maketx self.log.info('maketx: {}'.format(self.maketx)) diff --git a/pype/plugins/maya/publish/collect_review.py b/pype/plugins/maya/publish/collect_review.py index 52aff1d459..c434bda49d 100644 --- a/pype/plugins/maya/publish/collect_review.py +++ b/pype/plugins/maya/publish/collect_review.py @@ -56,6 +56,8 @@ class CollectReview(pyblish.api.InstancePlugin): # data["publish"] = False data['startFrameReview'] = instance.data['startFrame'] data['endFrameReview'] = instance.data['endFrame'] + data['startFrame'] = instance.data['startFrame'] + data['endFrame'] = instance.data['endFrame'] data['handles'] = instance.data['handles'] data['step'] = instance.data['step'] data['fps'] = instance.data['fps'] From 7facb8faea192652ddd1c84b91eef8eecd4c4d7b Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Sat, 27 Jul 2019 09:32:08 +0100 Subject: [PATCH 53/55] Iterate over all viewer nodes to set frame range. --- pype/nuke/lib.py | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index eae812ec26..dd543c63e6 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -389,27 +389,19 @@ def reset_frame_range_handles(): # setting active viewers nuke.frame(int(asset["data"]["fstart"])) - try: - vv = nuke.activeViewer().node() - except AttributeError: - viewer_nodes = nuke.allNodes(filter="Viewer") - if viewer_nodes: - vv = viewer_nodes[0] - else: - return - range = '{0}-{1}'.format( int(asset["data"]["fstart"]), int(asset["data"]["fend"])) - vv['frame_range'].setValue(range) - vv['frame_range_lock'].setValue(True) + for node in nuke.allNodes(filter="Viewer"): + node['frame_range'].setValue(range) + node['frame_range_lock'].setValue(True) - log.info("_frameRange: {}".format(range)) - log.info("frameRange: {}".format(vv['frame_range'].value())) + log.info("_frameRange: {}".format(range)) + log.info("frameRange: {}".format(node['frame_range'].value())) - vv['frame_range'].setValue(range) - vv['frame_range_lock'].setValue(True) + node['frame_range'].setValue(range) + node['frame_range_lock'].setValue(True) # adding handle_start/end to root avalon knob if not avalon.nuke.set_avalon_knob_data(root, { From 8cc3099c79aa98e70bc830a69ea465ae95e33b54 Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Sun, 28 Jul 2019 21:23:32 +0100 Subject: [PATCH 54/55] Ensure EXR plugin is loaded. --- pype/plugins/maya/load/load_image_plane.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pype/plugins/maya/load/load_image_plane.py b/pype/plugins/maya/load/load_image_plane.py index dd7b7ea812..e2d94ac82e 100644 --- a/pype/plugins/maya/load/load_image_plane.py +++ b/pype/plugins/maya/load/load_image_plane.py @@ -90,6 +90,9 @@ class ImagePlaneLoader(api.Loader): ) image_plane_shape.frameExtension.set(start_frame) + # Ensure OpenEXRLoader plugin is loaded. + pc.loadPlugin("OpenEXRLoader.mll", quiet=True) + new_nodes.extend( [image_plane_transform.name(), image_plane_shape.name()] ) From 09d51c52fb581fc0f4e673228e0b26e49e7f1300 Mon Sep 17 00:00:00 2001 From: Toke Jepsen Date: Mon, 29 Jul 2019 09:09:32 +0100 Subject: [PATCH 55/55] Explicit absolute import of create_write_node. --- pype/plugins/nuke/create/create_write.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/pype/plugins/nuke/create/create_write.py b/pype/plugins/nuke/create/create_write.py index ff1fde6638..a5b0c204bd 100644 --- a/pype/plugins/nuke/create/create_write.py +++ b/pype/plugins/nuke/create/create_write.py @@ -1,11 +1,8 @@ from collections import OrderedDict import avalon.api import avalon.nuke -from pype.nuke import ( - create_write_node -) +from pype.nuke.lib import create_write_node from pype import api as pype -# from pypeapp import Logger import nuke